1 /* $NetBSD: bha.c,v 1.70 2008/04/08 12:07:25 cegger Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Originally written by Julian Elischer (julian@tfs.com) 42 * for TRW Financial Systems for use under the MACH(2.5) operating system. 43 * 44 * TRW Financial Systems, in accordance with their agreement with Carnegie 45 * Mellon University, makes this software available to CMU to distribute 46 * or use in any manner that they see fit as long as this message is kept with 47 * the software. For this reason TFS also grants any other persons or 48 * organisations permission to use or modify this software. 49 * 50 * TFS supplies this software to be publicly redistributed 51 * on the understanding that TFS is not responsible for the correct 52 * functioning of this software in any circumstances. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.70 2008/04/08 12:07:25 cegger Exp $"); 57 58 #include "opt_ddb.h" 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/callout.h> 63 #include <sys/kernel.h> 64 #include <sys/errno.h> 65 #include <sys/ioctl.h> 66 #include <sys/device.h> 67 #include <sys/malloc.h> 68 #include <sys/buf.h> 69 #include <sys/proc.h> 70 #include <sys/user.h> 71 72 #include <uvm/uvm_extern.h> 73 74 #include <sys/bus.h> 75 #include <sys/intr.h> 76 77 #include <dev/scsipi/scsi_all.h> 78 #include <dev/scsipi/scsipi_all.h> 79 #include <dev/scsipi/scsiconf.h> 80 81 #include <dev/ic/bhareg.h> 82 #include <dev/ic/bhavar.h> 83 84 #ifndef DDB 85 #define Debugger() panic("should call debugger here (bha.c)") 86 #endif /* ! DDB */ 87 88 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT) 89 90 #ifdef BHADEBUG 91 int bha_debug = 0; 92 #endif /* BHADEBUG */ 93 94 static int bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int, 95 u_char *, int, u_char *); 96 97 static void bha_scsipi_request(struct scsipi_channel *, 98 scsipi_adapter_req_t, void *); 99 static void bha_minphys(struct buf *); 100 101 static void bha_get_xfer_mode(struct bha_softc *, 102 struct scsipi_xfer_mode *); 103 104 static void bha_done(struct bha_softc *, struct bha_ccb *); 105 static int bha_poll(struct bha_softc *, struct scsipi_xfer *, int); 106 static void bha_timeout(void *arg); 107 108 static int bha_init(struct bha_softc *); 109 110 static int bha_create_mailbox(struct bha_softc *); 111 static void bha_collect_mbo(struct bha_softc *); 112 113 static void bha_queue_ccb(struct bha_softc *, struct bha_ccb *); 114 static void bha_start_ccbs(struct bha_softc *); 115 static void bha_finish_ccbs(struct bha_softc *); 116 117 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t); 118 static void bha_create_ccbs(struct bha_softc *, int); 119 static int bha_init_ccb(struct bha_softc *, struct bha_ccb *); 120 static struct bha_ccb *bha_get_ccb(struct bha_softc *); 121 static void bha_free_ccb(struct bha_softc *, struct bha_ccb *); 122 123 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */ 124 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 125 126 /* 127 * Number of CCBs in an allocation group; must be computed at run-time. 128 */ 129 static int bha_ccbs_per_group; 130 131 static inline struct bha_mbx_out * 132 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo) 133 { 134 135 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1]) 136 return (&sc->sc_mbo[0]); 137 return (mbo + 1); 138 } 139 140 static inline struct bha_mbx_in * 141 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi) 142 { 143 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1]) 144 return (&sc->sc_mbi[0]); 145 return (mbi + 1); 146 } 147 148 /* 149 * bha_attach: 150 * 151 * Finish attaching a Buslogic controller, and configure children. 152 */ 153 void 154 bha_attach(struct bha_softc *sc) 155 { 156 struct scsipi_adapter *adapt = &sc->sc_adapter; 157 struct scsipi_channel *chan = &sc->sc_channel; 158 int initial_ccbs; 159 160 /* 161 * Initialize the number of CCBs per group. 162 */ 163 if (bha_ccbs_per_group == 0) 164 bha_ccbs_per_group = BHA_CCBS_PER_GROUP; 165 166 initial_ccbs = bha_info(sc); 167 if (initial_ccbs == 0) { 168 aprint_error_dev(&sc->sc_dev, "unable to get adapter info\n"); 169 return; 170 } 171 172 /* 173 * Fill in the scsipi_adapter. 174 */ 175 memset(adapt, 0, sizeof(*adapt)); 176 adapt->adapt_dev = &sc->sc_dev; 177 adapt->adapt_nchannels = 1; 178 /* adapt_openings initialized below */ 179 adapt->adapt_max_periph = sc->sc_mbox_count; 180 adapt->adapt_request = bha_scsipi_request; 181 adapt->adapt_minphys = bha_minphys; 182 183 /* 184 * Fill in the scsipi_channel. 185 */ 186 memset(chan, 0, sizeof(*chan)); 187 chan->chan_adapter = adapt; 188 chan->chan_bustype = &scsi_bustype; 189 chan->chan_channel = 0; 190 chan->chan_flags = SCSIPI_CHAN_CANGROW; 191 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8; 192 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8; 193 chan->chan_id = sc->sc_scsi_id; 194 195 TAILQ_INIT(&sc->sc_free_ccb); 196 TAILQ_INIT(&sc->sc_waiting_ccb); 197 TAILQ_INIT(&sc->sc_allocating_ccbs); 198 199 if (bha_create_mailbox(sc) != 0) 200 return; 201 202 bha_create_ccbs(sc, initial_ccbs); 203 if (sc->sc_cur_ccbs < 2) { 204 aprint_error_dev(&sc->sc_dev, "not enough CCBs to run\n"); 205 return; 206 } 207 208 adapt->adapt_openings = sc->sc_cur_ccbs; 209 210 if (bha_init(sc) != 0) 211 return; 212 213 (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 214 } 215 216 /* 217 * bha_intr: 218 * 219 * Interrupt service routine. 220 */ 221 int 222 bha_intr(void *arg) 223 { 224 struct bha_softc *sc = arg; 225 bus_space_tag_t iot = sc->sc_iot; 226 bus_space_handle_t ioh = sc->sc_ioh; 227 u_char sts; 228 229 #ifdef BHADEBUG 230 printf("%s: bha_intr ", device_xname(&sc->sc_dev)); 231 #endif /* BHADEBUG */ 232 233 /* 234 * First acknowledge the interrupt, Then if it's not telling about 235 * a completed operation just return. 236 */ 237 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 238 if ((sts & BHA_INTR_ANYINTR) == 0) 239 return (0); 240 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 241 242 #ifdef BHADIAG 243 /* Make sure we clear CCB_SENDING before finishing a CCB. */ 244 bha_collect_mbo(sc); 245 #endif 246 247 /* Mail box out empty? */ 248 if (sts & BHA_INTR_MBOA) { 249 struct bha_toggle toggle; 250 251 toggle.cmd.opcode = BHA_MBO_INTR_EN; 252 toggle.cmd.enable = 0; 253 bha_cmd(iot, ioh, device_xname(&sc->sc_dev), 254 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 255 0, (u_char *)0); 256 bha_start_ccbs(sc); 257 } 258 259 /* Mail box in full? */ 260 if (sts & BHA_INTR_MBIF) 261 bha_finish_ccbs(sc); 262 263 return (1); 264 } 265 266 /***************************************************************************** 267 * SCSI interface routines 268 *****************************************************************************/ 269 270 /* 271 * bha_scsipi_request: 272 * 273 * Perform a request for the SCSIPI layer. 274 */ 275 static void 276 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 277 void *arg) 278 { 279 struct scsipi_adapter *adapt = chan->chan_adapter; 280 struct bha_softc *sc = (void *)adapt->adapt_dev; 281 struct scsipi_xfer *xs; 282 struct scsipi_periph *periph; 283 bus_dma_tag_t dmat = sc->sc_dmat; 284 struct bha_ccb *ccb; 285 int error, seg, flags, s; 286 287 switch (req) { 288 case ADAPTER_REQ_RUN_XFER: 289 xs = arg; 290 periph = xs->xs_periph; 291 flags = xs->xs_control; 292 293 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n")); 294 295 /* Get a CCB to use. */ 296 ccb = bha_get_ccb(sc); 297 #ifdef DIAGNOSTIC 298 /* 299 * This should never happen as we track the resources 300 * in the mid-layer. 301 */ 302 if (ccb == NULL) { 303 scsipi_printaddr(periph); 304 printf("unable to allocate ccb\n"); 305 panic("bha_scsipi_request"); 306 } 307 #endif 308 309 ccb->xs = xs; 310 ccb->timeout = xs->timeout; 311 312 /* 313 * Put all the arguments for the xfer in the ccb 314 */ 315 if (flags & XS_CTL_RESET) { 316 ccb->opcode = BHA_RESET_CCB; 317 ccb->scsi_cmd_length = 0; 318 } else { 319 /* can't use S/G if zero length */ 320 if (xs->cmdlen > sizeof(ccb->scsi_cmd)) { 321 printf("%s: cmdlen %d too large for CCB\n", 322 device_xname(&sc->sc_dev), xs->cmdlen); 323 xs->error = XS_DRIVER_STUFFUP; 324 goto out_bad; 325 } 326 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB 327 : BHA_INITIATOR_CCB); 328 memcpy(&ccb->scsi_cmd, xs->cmd, 329 ccb->scsi_cmd_length = xs->cmdlen); 330 } 331 332 if (xs->datalen) { 333 /* 334 * Map the DMA transfer. 335 */ 336 #ifdef TFS 337 if (flags & XS_CTL_DATA_UIO) { 338 error = bus_dmamap_load_uio(dmat, 339 ccb->dmamap_xfer, (struct uio *)xs->data, 340 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 341 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 342 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 343 BUS_DMA_WRITE)); 344 } else 345 #endif /* TFS */ 346 { 347 error = bus_dmamap_load(dmat, 348 ccb->dmamap_xfer, xs->data, xs->datalen, 349 NULL, 350 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 351 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 352 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 353 BUS_DMA_WRITE)); 354 } 355 356 switch (error) { 357 case 0: 358 break; 359 360 case ENOMEM: 361 case EAGAIN: 362 xs->error = XS_RESOURCE_SHORTAGE; 363 goto out_bad; 364 365 default: 366 xs->error = XS_DRIVER_STUFFUP; 367 aprint_error_dev(&sc->sc_dev, "error %d loading DMA map\n", error); 368 out_bad: 369 bha_free_ccb(sc, ccb); 370 scsipi_done(xs); 371 return; 372 } 373 374 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 375 ccb->dmamap_xfer->dm_mapsize, 376 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 377 BUS_DMASYNC_PREWRITE); 378 379 /* 380 * Load the hardware scatter/gather map with the 381 * contents of the DMA map. 382 */ 383 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) { 384 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr, 385 ccb->scat_gath[seg].seg_addr); 386 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len, 387 ccb->scat_gath[seg].seg_len); 388 } 389 390 ltophys(ccb->hashkey + offsetof(struct bha_ccb, 391 scat_gath), ccb->data_addr); 392 ltophys(ccb->dmamap_xfer->dm_nsegs * 393 sizeof(struct bha_scat_gath), ccb->data_length); 394 } else { 395 /* 396 * No data xfer, use non S/G values. 397 */ 398 ltophys(0, ccb->data_addr); 399 ltophys(0, ccb->data_length); 400 } 401 402 if (XS_CTL_TAGTYPE(xs) != 0) { 403 ccb->tag_enable = 1; 404 ccb->tag_type = xs->xs_tag_type & 0x03; 405 } else { 406 ccb->tag_enable = 0; 407 ccb->tag_type = 0; 408 } 409 410 ccb->data_out = 0; 411 ccb->data_in = 0; 412 ccb->target = periph->periph_target; 413 ccb->lun = periph->periph_lun; 414 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense), 415 ccb->sense_ptr); 416 ccb->req_sense_length = sizeof(ccb->scsi_sense); 417 ccb->host_stat = 0x00; 418 ccb->target_stat = 0x00; 419 ccb->link_id = 0; 420 ltophys(0, ccb->link_addr); 421 422 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 423 424 s = splbio(); 425 bha_queue_ccb(sc, ccb); 426 splx(s); 427 428 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n")); 429 if ((flags & XS_CTL_POLL) == 0) 430 return; 431 432 /* 433 * If we can't use interrupts, poll on completion 434 */ 435 if (bha_poll(sc, xs, ccb->timeout)) { 436 bha_timeout(ccb); 437 if (bha_poll(sc, xs, ccb->timeout)) 438 bha_timeout(ccb); 439 } 440 return; 441 442 case ADAPTER_REQ_GROW_RESOURCES: 443 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) { 444 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 445 return; 446 } 447 seg = sc->sc_cur_ccbs; 448 bha_create_ccbs(sc, bha_ccbs_per_group); 449 adapt->adapt_openings += sc->sc_cur_ccbs - seg; 450 return; 451 452 case ADAPTER_REQ_SET_XFER_MODE: 453 /* 454 * Can't really do this on the Buslogic. It has its 455 * own setup info. But we do know how to query what 456 * the settings are. 457 */ 458 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg); 459 return; 460 } 461 } 462 463 /* 464 * bha_minphys: 465 * 466 * Limit a transfer to our maximum transfer size. 467 */ 468 void 469 bha_minphys(struct buf *bp) 470 { 471 472 if (bp->b_bcount > BHA_MAXXFER) 473 bp->b_bcount = BHA_MAXXFER; 474 minphys(bp); 475 } 476 477 /***************************************************************************** 478 * SCSI job execution helper routines 479 *****************************************************************************/ 480 481 /* 482 * bha_get_xfer_mode; 483 * 484 * Negotiate the xfer mode for the specified periph, and report 485 * back the mode to the midlayer. 486 * 487 * NOTE: we must be called at splbio(). 488 */ 489 static void 490 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm) 491 { 492 struct bha_setup hwsetup; 493 struct bha_period hwperiod; 494 struct bha_sync *bs; 495 int toff = xm->xm_target & 7, tmask = (1 << toff); 496 int wide, period, offset, rlen; 497 498 /* 499 * Issue an Inquire Setup Information. We can extract 500 * sync and wide information from here. 501 */ 502 rlen = sizeof(hwsetup.reply) + 503 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0); 504 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP; 505 hwsetup.cmd.len = rlen; 506 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 507 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd, 508 rlen, (u_char *)&hwsetup.reply); 509 510 xm->xm_mode = 0; 511 xm->xm_period = 0; 512 xm->xm_offset = 0; 513 514 /* 515 * First check for wide. On later boards, we can check 516 * directly in the setup info if wide is currently active. 517 * 518 * On earlier boards, we have to make an educated guess. 519 */ 520 if (sc->sc_flags & BHAF_WIDE) { 521 if (strcmp(sc->sc_firmware, "5.06L") >= 0) { 522 if (xm->xm_target > 7) { 523 wide = 524 hwsetup.reply_w.high_wide_active & tmask; 525 } else { 526 wide = 527 hwsetup.reply_w.low_wide_active & tmask; 528 } 529 if (wide) 530 xm->xm_mode |= PERIPH_CAP_WIDE16; 531 } else { 532 /* XXX Check `wide permitted' in the config info. */ 533 xm->xm_mode |= PERIPH_CAP_WIDE16; 534 } 535 } 536 537 /* 538 * Now get basic sync info. 539 */ 540 bs = (xm->xm_target > 7) ? 541 &hwsetup.reply_w.sync_high[toff] : 542 &hwsetup.reply.sync_low[toff]; 543 544 if (bs->valid) { 545 xm->xm_mode |= PERIPH_CAP_SYNC; 546 period = (bs->period * 50) + 20; 547 offset = bs->offset; 548 549 /* 550 * On boards that can do Fast and Ultra, use the Inquire Period 551 * command to get the period. 552 */ 553 if (sc->sc_firmware[0] >= '3') { 554 rlen = sizeof(hwperiod.reply) + 555 ((sc->sc_flags & BHAF_WIDE) ? 556 sizeof(hwperiod.reply_w) : 0); 557 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD; 558 hwperiod.cmd.len = rlen; 559 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 560 sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd, 561 rlen, (u_char *)&hwperiod.reply); 562 563 if (xm->xm_target > 7) 564 period = hwperiod.reply_w.period[toff]; 565 else 566 period = hwperiod.reply.period[toff]; 567 568 period *= 10; 569 } 570 571 xm->xm_period = 572 scsipi_sync_period_to_factor(period * 100); 573 xm->xm_offset = offset; 574 } 575 576 /* 577 * Now check for tagged queueing support. 578 * 579 * XXX Check `tags permitted' in the config info. 580 */ 581 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 582 xm->xm_mode |= PERIPH_CAP_TQING; 583 584 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm); 585 } 586 587 /* 588 * bha_done: 589 * 590 * A CCB has completed execution. Pass the status back to the 591 * upper layer. 592 */ 593 static void 594 bha_done(struct bha_softc *sc, struct bha_ccb *ccb) 595 { 596 bus_dma_tag_t dmat = sc->sc_dmat; 597 struct scsipi_xfer *xs = ccb->xs; 598 599 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n")); 600 601 #ifdef BHADIAG 602 if (ccb->flags & CCB_SENDING) { 603 printf("%s: exiting ccb still in transit!\n", 604 device_xname(&sc->sc_dev)); 605 Debugger(); 606 return; 607 } 608 #endif 609 if ((ccb->flags & CCB_ALLOC) == 0) { 610 aprint_error_dev(&sc->sc_dev, "exiting ccb not allocated!\n"); 611 Debugger(); 612 return; 613 } 614 615 /* 616 * If we were a data transfer, unload the map that described 617 * the data buffer. 618 */ 619 if (xs->datalen) { 620 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 621 ccb->dmamap_xfer->dm_mapsize, 622 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : 623 BUS_DMASYNC_POSTWRITE); 624 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 625 } 626 627 if (xs->error == XS_NOERROR) { 628 if (ccb->host_stat != BHA_OK) { 629 switch (ccb->host_stat) { 630 case BHA_SEL_TIMEOUT: /* No response */ 631 xs->error = XS_SELTIMEOUT; 632 break; 633 default: /* Other scsi protocol messes */ 634 printf("%s: host_stat %x\n", 635 device_xname(&sc->sc_dev), ccb->host_stat); 636 xs->error = XS_DRIVER_STUFFUP; 637 break; 638 } 639 } else if (ccb->target_stat != SCSI_OK) { 640 switch (ccb->target_stat) { 641 case SCSI_CHECK: 642 memcpy(&xs->sense.scsi_sense, 643 &ccb->scsi_sense, 644 sizeof(xs->sense.scsi_sense)); 645 xs->error = XS_SENSE; 646 break; 647 case SCSI_BUSY: 648 xs->error = XS_BUSY; 649 break; 650 default: 651 printf("%s: target_stat %x\n", 652 device_xname(&sc->sc_dev), ccb->target_stat); 653 xs->error = XS_DRIVER_STUFFUP; 654 break; 655 } 656 } else 657 xs->resid = 0; 658 } 659 660 bha_free_ccb(sc, ccb); 661 scsipi_done(xs); 662 } 663 664 /* 665 * bha_poll: 666 * 667 * Poll for completion of the specified job. 668 */ 669 static int 670 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count) 671 { 672 bus_space_tag_t iot = sc->sc_iot; 673 bus_space_handle_t ioh = sc->sc_ioh; 674 675 /* timeouts are in msec, so we loop in 1000 usec cycles */ 676 while (count) { 677 /* 678 * If we had interrupts enabled, would we 679 * have got an interrupt? 680 */ 681 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) & 682 BHA_INTR_ANYINTR) 683 bha_intr(sc); 684 if (xs->xs_status & XS_STS_DONE) 685 return (0); 686 delay(1000); /* only happens in boot so ok */ 687 count--; 688 } 689 return (1); 690 } 691 692 /* 693 * bha_timeout: 694 * 695 * CCB timeout handler. 696 */ 697 static void 698 bha_timeout(void *arg) 699 { 700 struct bha_ccb *ccb = arg; 701 struct scsipi_xfer *xs = ccb->xs; 702 struct scsipi_periph *periph = xs->xs_periph; 703 struct bha_softc *sc = 704 (void *)periph->periph_channel->chan_adapter->adapt_dev; 705 int s; 706 707 scsipi_printaddr(periph); 708 printf("timed out"); 709 710 s = splbio(); 711 712 #ifdef BHADIAG 713 /* 714 * If the ccb's mbx is not free, then the board has gone Far East? 715 */ 716 bha_collect_mbo(sc); 717 if (ccb->flags & CCB_SENDING) { 718 aprint_error_dev(&sc->sc_dev, "not taking commands!\n"); 719 Debugger(); 720 } 721 #endif 722 723 /* 724 * If it has been through before, then 725 * a previous abort has failed, don't 726 * try abort again 727 */ 728 if (ccb->flags & CCB_ABORT) { 729 /* abort timed out */ 730 printf(" AGAIN\n"); 731 /* XXX Must reset! */ 732 } else { 733 /* abort the operation that has timed out */ 734 printf("\n"); 735 ccb->xs->error = XS_TIMEOUT; 736 ccb->timeout = BHA_ABORT_TIMEOUT; 737 ccb->flags |= CCB_ABORT; 738 bha_queue_ccb(sc, ccb); 739 } 740 741 splx(s); 742 } 743 744 /***************************************************************************** 745 * Misc. subroutines. 746 *****************************************************************************/ 747 748 /* 749 * bha_cmd: 750 * 751 * Send a command to the Buglogic controller. 752 */ 753 static int 754 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, int icnt, 755 u_char *ibuf, int ocnt, u_char *obuf) 756 { 757 int i; 758 int wait; 759 u_char sts; 760 u_char opcode = ibuf[0]; 761 762 /* 763 * Calculate a reasonable timeout for the command. 764 */ 765 switch (opcode) { 766 case BHA_INQUIRE_DEVICES: 767 case BHA_INQUIRE_DEVICES_2: 768 wait = 90 * 20000; 769 break; 770 default: 771 wait = 1 * 20000; 772 break; 773 } 774 775 /* 776 * Wait for the adapter to go idle, unless it's one of 777 * the commands which don't need this 778 */ 779 if (opcode != BHA_MBO_INTR_EN) { 780 for (i = 20000; i; i--) { /* 1 sec? */ 781 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 782 if (sts & BHA_STAT_IDLE) 783 break; 784 delay(50); 785 } 786 if (!i) { 787 printf("%s: bha_cmd, host not idle(0x%x)\n", 788 name, sts); 789 return (1); 790 } 791 } 792 793 /* 794 * Now that it is idle, if we expect output, preflush the 795 * queue feeding to us. 796 */ 797 if (ocnt) { 798 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) & 799 BHA_STAT_DF) 800 (void)bus_space_read_1(iot, ioh, BHA_DATA_PORT); 801 } 802 803 /* 804 * Output the command and the number of arguments given 805 * for each byte, first check the port is empty. 806 */ 807 while (icnt--) { 808 for (i = wait; i; i--) { 809 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 810 if (!(sts & BHA_STAT_CDF)) 811 break; 812 delay(50); 813 } 814 if (!i) { 815 if (opcode != BHA_INQUIRE_REVISION) 816 printf("%s: bha_cmd, cmd/data port full\n", 817 name); 818 goto bad; 819 } 820 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++); 821 } 822 823 /* 824 * If we expect input, loop that many times, each time, 825 * looking for the data register to have valid data 826 */ 827 while (ocnt--) { 828 for (i = wait; i; i--) { 829 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 830 if (sts & BHA_STAT_DF) 831 break; 832 delay(50); 833 } 834 if (!i) { 835 #ifdef BHADEBUG 836 if (opcode != BHA_INQUIRE_REVISION) 837 printf("%s: bha_cmd, cmd/data port empty %d\n", 838 name, ocnt); 839 #endif /* BHADEBUG */ 840 goto bad; 841 } 842 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT); 843 } 844 845 /* 846 * Wait for the board to report a finished instruction. 847 * We may get an extra interrupt for the HACC signal, but this is 848 * unimportant. 849 */ 850 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) { 851 for (i = 20000; i; i--) { /* 1 sec? */ 852 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 853 /* XXX Need to save this in the interrupt handler? */ 854 if (sts & BHA_INTR_HACC) 855 break; 856 delay(50); 857 } 858 if (!i) { 859 printf("%s: bha_cmd, host not finished(0x%x)\n", 860 name, sts); 861 return (1); 862 } 863 } 864 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 865 return (0); 866 867 bad: 868 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST); 869 return (1); 870 } 871 872 /* 873 * bha_find: 874 * 875 * Find the board. 876 */ 877 int 878 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh) 879 { 880 int i; 881 u_char sts; 882 struct bha_extended_inquire inquire; 883 884 /* Check something is at the ports we need to access */ 885 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 886 if (sts == 0xFF) 887 return (0); 888 889 /* 890 * Reset board, If it doesn't respond, assume 891 * that it's not there.. good for the probe 892 */ 893 894 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, 895 BHA_CTRL_HRST | BHA_CTRL_SRST); 896 897 delay(100); 898 for (i = BHA_RESET_TIMEOUT; i; i--) { 899 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 900 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT)) 901 break; 902 delay(1000); 903 } 904 if (!i) { 905 #ifdef BHADEBUG 906 if (bha_debug) 907 printf("bha_find: No answer from buslogic board\n"); 908 #endif /* BHADEBUG */ 909 return (0); 910 } 911 912 /* 913 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible 914 * interface. The native bha interface is not compatible with 915 * an aha. 1542. We need to ensure that we never match an 916 * Adaptec 1542. We must also avoid sending Adaptec-compatible 917 * commands to a real bha, lest it go into 1542 emulation mode. 918 * (On an indirect bus like ISA, we should always probe for BusLogic 919 * interfaces before Adaptec interfaces). 920 */ 921 922 /* 923 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking 924 * for an extended-geometry register. The 1542[AB] don't have one. 925 */ 926 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT); 927 if (sts == 0xFF) 928 return (0); 929 930 /* 931 * Check that we actually know how to use this board. 932 */ 933 delay(1000); 934 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 935 inquire.cmd.len = sizeof(inquire.reply); 936 i = bha_cmd(iot, ioh, "(bha_find)", 937 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 938 sizeof(inquire.reply), (u_char *)&inquire.reply); 939 940 /* 941 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev) 942 * have the extended-geometry register and also respond to 943 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards, 944 * by checking the size of the reply is what a BusLogic card returns. 945 */ 946 if (i) { 947 #ifdef BHADEBUG 948 printf("bha_find: board returned %d instead of %d to %s\n", 949 i, sizeof(inquire.reply), "INQUIRE_EXTENDED"); 950 #endif 951 return (0); 952 } 953 954 /* OK, we know we've found a buslogic adaptor. */ 955 956 switch (inquire.reply.bus_type) { 957 case BHA_BUS_TYPE_24BIT: 958 case BHA_BUS_TYPE_32BIT: 959 break; 960 case BHA_BUS_TYPE_MCA: 961 /* We don't grok MicroChannel (yet). */ 962 return (0); 963 default: 964 printf("bha_find: illegal bus type %c\n", 965 inquire.reply.bus_type); 966 return (0); 967 } 968 969 return (1); 970 } 971 972 973 /* 974 * bha_inquire_config: 975 * 976 * Determine irq/drq. 977 */ 978 int 979 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh, 980 struct bha_probe_data *sc) 981 { 982 int irq, drq; 983 struct bha_config config; 984 985 /* 986 * Assume we have a board at this stage setup DMA channel from 987 * jumpers and save int level 988 */ 989 delay(1000); 990 config.cmd.opcode = BHA_INQUIRE_CONFIG; 991 bha_cmd(iot, ioh, "(bha_inquire_config)", 992 sizeof(config.cmd), (u_char *)&config.cmd, 993 sizeof(config.reply), (u_char *)&config.reply); 994 switch (config.reply.chan) { 995 case EISADMA: 996 drq = -1; 997 break; 998 case CHAN0: 999 drq = 0; 1000 break; 1001 case CHAN5: 1002 drq = 5; 1003 break; 1004 case CHAN6: 1005 drq = 6; 1006 break; 1007 case CHAN7: 1008 drq = 7; 1009 break; 1010 default: 1011 printf("bha: illegal drq setting %x\n", 1012 config.reply.chan); 1013 return (0); 1014 } 1015 1016 switch (config.reply.intr) { 1017 case INT9: 1018 irq = 9; 1019 break; 1020 case INT10: 1021 irq = 10; 1022 break; 1023 case INT11: 1024 irq = 11; 1025 break; 1026 case INT12: 1027 irq = 12; 1028 break; 1029 case INT14: 1030 irq = 14; 1031 break; 1032 case INT15: 1033 irq = 15; 1034 break; 1035 default: 1036 printf("bha: illegal irq setting %x\n", 1037 config.reply.intr); 1038 return (0); 1039 } 1040 1041 /* if we want to fill in softc, do so now */ 1042 if (sc != NULL) { 1043 sc->sc_irq = irq; 1044 sc->sc_drq = drq; 1045 } 1046 1047 return (1); 1048 } 1049 1050 int 1051 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh, 1052 struct bha_probe_data *bpd) 1053 { 1054 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd); 1055 } 1056 1057 /* 1058 * bha_disable_isacompat: 1059 * 1060 * Disable the ISA-compatibility ioports on PCI bha devices, 1061 * to ensure they're not autoconfigured a second time as an ISA bha. 1062 */ 1063 int 1064 bha_disable_isacompat(struct bha_softc *sc) 1065 { 1066 struct bha_isadisable isa_disable; 1067 1068 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT; 1069 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1; 1070 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev), 1071 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd, 1072 0, (u_char *)0); 1073 return (0); 1074 } 1075 1076 /* 1077 * bha_info: 1078 * 1079 * Get information about the board, and report it. We 1080 * return the initial number of CCBs, 0 if we failed. 1081 */ 1082 int 1083 bha_info(struct bha_softc *sc) 1084 { 1085 bus_space_tag_t iot = sc->sc_iot; 1086 bus_space_handle_t ioh = sc->sc_ioh; 1087 struct bha_extended_inquire inquire; 1088 struct bha_config config; 1089 struct bha_devices devices; 1090 struct bha_setup setup; 1091 struct bha_model model; 1092 struct bha_revision revision; 1093 struct bha_digit digit; 1094 int i, j, initial_ccbs, rlen; 1095 const char *name = device_xname(&sc->sc_dev); 1096 char *p; 1097 1098 /* 1099 * Fetch the extended inquire information. 1100 */ 1101 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 1102 inquire.cmd.len = sizeof(inquire.reply); 1103 bha_cmd(iot, ioh, name, 1104 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 1105 sizeof(inquire.reply), (u_char *)&inquire.reply); 1106 1107 /* 1108 * Fetch the configuration information. 1109 */ 1110 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1111 bha_cmd(iot, ioh, name, 1112 sizeof(config.cmd), (u_char *)&config.cmd, 1113 sizeof(config.reply), (u_char *)&config.reply); 1114 1115 sc->sc_scsi_id = config.reply.scsi_dev; 1116 1117 /* 1118 * Get the firmware revision. 1119 */ 1120 p = sc->sc_firmware; 1121 revision.cmd.opcode = BHA_INQUIRE_REVISION; 1122 bha_cmd(iot, ioh, name, 1123 sizeof(revision.cmd), (u_char *)&revision.cmd, 1124 sizeof(revision.reply), (u_char *)&revision.reply); 1125 *p++ = revision.reply.firm_revision; 1126 *p++ = '.'; 1127 *p++ = revision.reply.firm_version; 1128 digit.cmd.opcode = BHA_INQUIRE_REVISION_3; 1129 bha_cmd(iot, ioh, name, 1130 sizeof(digit.cmd), (u_char *)&digit.cmd, 1131 sizeof(digit.reply), (u_char *)&digit.reply); 1132 *p++ = digit.reply.digit; 1133 if (revision.reply.firm_revision >= '3' || 1134 (revision.reply.firm_revision == '3' && 1135 revision.reply.firm_version >= '3')) { 1136 digit.cmd.opcode = BHA_INQUIRE_REVISION_4; 1137 bha_cmd(iot, ioh, name, 1138 sizeof(digit.cmd), (u_char *)&digit.cmd, 1139 sizeof(digit.reply), (u_char *)&digit.reply); 1140 *p++ = digit.reply.digit; 1141 } 1142 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0')) 1143 p--; 1144 *p = '\0'; 1145 1146 /* 1147 * Get the model number. 1148 * 1149 * Some boards do not handle the Inquire Board Model Number 1150 * command correctly, or don't give correct information. 1151 * 1152 * So, we use the Firmware Revision and Extended Setup 1153 * information to fixup the model number in these cases. 1154 * 1155 * The firmware version indicates: 1156 * 1157 * 5.xx BusLogic "W" Series Host Adapters 1158 * BT-948/958/958D 1159 * 1160 * 4.xx BusLogic "C" Series Host Adapters 1161 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 1162 * 1163 * 3.xx BusLogic "S" Series Host Adapters 1164 * BT-747S/747D/757S/757D/445S/545S/542D 1165 * BT-542B/742A (revision H) 1166 * 1167 * 2.xx BusLogic "A" Series Host Adapters 1168 * BT-542B/742A (revision G and below) 1169 * 1170 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter 1171 */ 1172 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT && 1173 sc->sc_firmware[0] < '3') 1174 snprintf(sc->sc_model, sizeof(sc->sc_model), "542B"); 1175 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1176 sc->sc_firmware[0] == '2' && 1177 (sc->sc_firmware[2] == '1' || 1178 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0'))) 1179 snprintf(sc->sc_model, sizeof(sc->sc_model), "742A"); 1180 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1181 sc->sc_firmware[0] == '0') 1182 snprintf(sc->sc_model, sizeof(sc->sc_model), "747A"); 1183 else { 1184 p = sc->sc_model; 1185 model.cmd.opcode = BHA_INQUIRE_MODEL; 1186 model.cmd.len = sizeof(model.reply); 1187 bha_cmd(iot, ioh, name, 1188 sizeof(model.cmd), (u_char *)&model.cmd, 1189 sizeof(model.reply), (u_char *)&model.reply); 1190 *p++ = model.reply.id[0]; 1191 *p++ = model.reply.id[1]; 1192 *p++ = model.reply.id[2]; 1193 *p++ = model.reply.id[3]; 1194 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1195 p--; 1196 *p++ = model.reply.version[0]; 1197 *p++ = model.reply.version[1]; 1198 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1199 p--; 1200 *p = '\0'; 1201 } 1202 1203 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */ 1204 if (strcmp(sc->sc_firmware, "3.31") >= 0) 1205 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN; 1206 1207 /* 1208 * Determine some characteristics about our bus. 1209 */ 1210 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE) 1211 sc->sc_flags |= BHAF_WIDE; 1212 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL) 1213 sc->sc_flags |= BHAF_DIFFERENTIAL; 1214 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA) 1215 sc->sc_flags |= BHAF_ULTRA; 1216 1217 /* 1218 * Determine some characterists of the board. 1219 */ 1220 sc->sc_max_dmaseg = inquire.reply.sg_limit; 1221 1222 /* 1223 * Determine the maximum CCB count and whether or not 1224 * tagged queueing is available on this host adapter. 1225 * 1226 * Tagged queueing works on: 1227 * 1228 * "W" Series adapters 1229 * "C" Series adapters with firmware >= 4.22 1230 * "S" Series adapters with firmware >= 3.35 1231 * 1232 * The internal CCB counts are: 1233 * 1234 * 192 BT-948/958/958D 1235 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C 1236 * 50 BT-545C/540CF 1237 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A 1238 */ 1239 switch (sc->sc_firmware[0]) { 1240 case '5': 1241 sc->sc_max_ccbs = 192; 1242 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1243 break; 1244 1245 case '4': 1246 if (sc->sc_model[0] == '5') 1247 sc->sc_max_ccbs = 50; 1248 else 1249 sc->sc_max_ccbs = 100; 1250 if (strcmp(sc->sc_firmware, "4.22") >= 0) 1251 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1252 break; 1253 1254 case '3': 1255 if (strcmp(sc->sc_firmware, "3.35") >= 0) 1256 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1257 /* FALLTHROUGH */ 1258 1259 default: 1260 sc->sc_max_ccbs = 30; 1261 } 1262 1263 /* 1264 * Set the mailbox count to precisely the number of HW CCBs 1265 * available. A mailbox isn't required while a CCB is executing, 1266 * but this allows us to actually enqueue up to our resource 1267 * limit. 1268 * 1269 * This will keep the mailbox count small on boards which don't 1270 * have strict round-robin (they have to scan the entire set of 1271 * mailboxes each time they run a command). 1272 */ 1273 sc->sc_mbox_count = sc->sc_max_ccbs; 1274 1275 /* 1276 * Obtain setup information. 1277 */ 1278 rlen = sizeof(setup.reply) + 1279 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0); 1280 setup.cmd.opcode = BHA_INQUIRE_SETUP; 1281 setup.cmd.len = rlen; 1282 bha_cmd(iot, ioh, name, 1283 sizeof(setup.cmd), (u_char *)&setup.cmd, 1284 rlen, (u_char *)&setup.reply); 1285 1286 aprint_normal_dev(&sc->sc_dev, "model BT-%s, firmware %s\n", 1287 sc->sc_model, sc->sc_firmware); 1288 1289 aprint_normal_dev(&sc->sc_dev, "%d H/W CCBs", sc->sc_max_ccbs); 1290 if (setup.reply.sync_neg) 1291 aprint_normal(", sync"); 1292 if (setup.reply.parity) 1293 aprint_normal(", parity"); 1294 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 1295 aprint_normal(", tagged queueing"); 1296 if (sc->sc_flags & BHAF_WIDE_LUN) 1297 aprint_normal(", wide LUN support"); 1298 aprint_normal("\n"); 1299 1300 /* 1301 * Poll targets 0 - 7. 1302 */ 1303 devices.cmd.opcode = BHA_INQUIRE_DEVICES; 1304 bha_cmd(iot, ioh, name, 1305 sizeof(devices.cmd), (u_char *)&devices.cmd, 1306 sizeof(devices.reply), (u_char *)&devices.reply); 1307 1308 /* Count installed units. */ 1309 initial_ccbs = 0; 1310 for (i = 0; i < 8; i++) { 1311 for (j = 0; j < 8; j++) { 1312 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1313 initial_ccbs++; 1314 } 1315 } 1316 1317 /* 1318 * Poll targets 8 - 15 if we have a wide bus. 1319 */ 1320 if (sc->sc_flags & BHAF_WIDE) { 1321 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2; 1322 bha_cmd(iot, ioh, name, 1323 sizeof(devices.cmd), (u_char *)&devices.cmd, 1324 sizeof(devices.reply), (u_char *)&devices.reply); 1325 1326 for (i = 0; i < 8; i++) { 1327 for (j = 0; j < 8; j++) { 1328 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1329 initial_ccbs++; 1330 } 1331 } 1332 } 1333 1334 /* 1335 * Double the initial CCB count, for good measure. 1336 */ 1337 initial_ccbs *= 2; 1338 1339 /* 1340 * Sanity check the initial CCB count; don't create more than 1341 * we can enqueue (sc_max_ccbs), and make sure there are some 1342 * at all. 1343 */ 1344 if (initial_ccbs > sc->sc_max_ccbs) 1345 initial_ccbs = sc->sc_max_ccbs; 1346 if (initial_ccbs == 0) 1347 initial_ccbs = 2; 1348 1349 return (initial_ccbs); 1350 } 1351 1352 /* 1353 * bha_init: 1354 * 1355 * Initialize the board. 1356 */ 1357 static int 1358 bha_init(struct bha_softc *sc) 1359 { 1360 const char *name = device_xname(&sc->sc_dev); 1361 struct bha_toggle toggle; 1362 struct bha_mailbox mailbox; 1363 struct bha_mbx_out *mbo; 1364 struct bha_mbx_in *mbi; 1365 int i; 1366 1367 /* 1368 * Set up the mailbox. We always run the mailbox in round-robin. 1369 */ 1370 for (i = 0; i < sc->sc_mbox_count; i++) { 1371 mbo = &sc->sc_mbo[i]; 1372 mbi = &sc->sc_mbi[i]; 1373 1374 mbo->cmd = BHA_MBO_FREE; 1375 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1376 1377 mbi->comp_stat = BHA_MBI_FREE; 1378 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1379 } 1380 1381 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0]; 1382 sc->sc_tmbi = &sc->sc_mbi[0]; 1383 1384 sc->sc_mbofull = 0; 1385 1386 /* 1387 * If the board supports strict round-robin, enable that. 1388 */ 1389 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) { 1390 toggle.cmd.opcode = BHA_ROUND_ROBIN; 1391 toggle.cmd.enable = 1; 1392 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1393 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1394 0, NULL); 1395 } 1396 1397 /* 1398 * Give the mailbox to the board. 1399 */ 1400 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED; 1401 mailbox.cmd.nmbx = sc->sc_mbox_count; 1402 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr); 1403 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1404 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd, 1405 0, (u_char *)0); 1406 1407 return (0); 1408 } 1409 1410 /***************************************************************************** 1411 * CCB execution engine 1412 *****************************************************************************/ 1413 1414 /* 1415 * bha_queue_ccb: 1416 * 1417 * Queue a CCB to be sent to the controller, and send it if possible. 1418 */ 1419 static void 1420 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1421 { 1422 1423 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 1424 bha_start_ccbs(sc); 1425 } 1426 1427 /* 1428 * bha_start_ccbs: 1429 * 1430 * Send as many CCBs as we have empty mailboxes for. 1431 */ 1432 static void 1433 bha_start_ccbs(struct bha_softc *sc) 1434 { 1435 bus_space_tag_t iot = sc->sc_iot; 1436 bus_space_handle_t ioh = sc->sc_ioh; 1437 struct bha_ccb_group *bcg; 1438 struct bha_mbx_out *mbo; 1439 struct bha_ccb *ccb; 1440 1441 mbo = sc->sc_tmbo; 1442 1443 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 1444 if (sc->sc_mbofull >= sc->sc_mbox_count) { 1445 #ifdef DIAGNOSTIC 1446 if (sc->sc_mbofull > sc->sc_mbox_count) 1447 panic("bha_start_ccbs: mbofull > mbox_count"); 1448 #endif 1449 /* 1450 * No mailboxes available; attempt to collect ones 1451 * that have already been used. 1452 */ 1453 bha_collect_mbo(sc); 1454 if (sc->sc_mbofull == sc->sc_mbox_count) { 1455 /* 1456 * Still no more available; have the 1457 * controller interrupt us when it 1458 * frees one. 1459 */ 1460 struct bha_toggle toggle; 1461 1462 toggle.cmd.opcode = BHA_MBO_INTR_EN; 1463 toggle.cmd.enable = 1; 1464 bha_cmd(iot, ioh, device_xname(&sc->sc_dev), 1465 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1466 0, (u_char *)0); 1467 break; 1468 } 1469 } 1470 1471 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 1472 #ifdef BHADIAG 1473 ccb->flags |= CCB_SENDING; 1474 #endif 1475 1476 /* 1477 * Put the CCB in the mailbox. 1478 */ 1479 bcg = BHA_CCB_GROUP(ccb); 1480 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr + 1481 BHA_CCB_OFFSET(ccb), mbo->ccb_addr); 1482 if (ccb->flags & CCB_ABORT) 1483 mbo->cmd = BHA_MBO_ABORT; 1484 else 1485 mbo->cmd = BHA_MBO_START; 1486 1487 BHA_MBO_SYNC(sc, mbo, 1488 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1489 1490 /* Tell the card to poll immediately. */ 1491 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI); 1492 1493 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 1494 callout_reset(&ccb->xs->xs_callout, 1495 mstohz(ccb->timeout), bha_timeout, ccb); 1496 1497 ++sc->sc_mbofull; 1498 mbo = bha_nextmbo(sc, mbo); 1499 } 1500 1501 sc->sc_tmbo = mbo; 1502 } 1503 1504 /* 1505 * bha_finish_ccbs: 1506 * 1507 * Finalize the execution of CCBs in our incoming mailbox. 1508 */ 1509 static void 1510 bha_finish_ccbs(struct bha_softc *sc) 1511 { 1512 struct bha_mbx_in *mbi; 1513 struct bha_ccb *ccb; 1514 int i; 1515 1516 mbi = sc->sc_tmbi; 1517 1518 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1519 1520 if (mbi->comp_stat == BHA_MBI_FREE) { 1521 for (i = 0; i < sc->sc_mbox_count; i++) { 1522 if (mbi->comp_stat != BHA_MBI_FREE) { 1523 #ifdef BHADIAG 1524 /* 1525 * This can happen in normal operation if 1526 * we use all mailbox slots. 1527 */ 1528 printf("%s: mbi not in round-robin order\n", 1529 device_xname(&sc->sc_dev)); 1530 #endif 1531 goto again; 1532 } 1533 mbi = bha_nextmbi(sc, mbi); 1534 BHA_MBI_SYNC(sc, mbi, 1535 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1536 } 1537 #ifdef BHADIAGnot 1538 printf("%s: mbi interrupt with no full mailboxes\n", 1539 device_xname(&sc->sc_dev)); 1540 #endif 1541 return; 1542 } 1543 1544 again: 1545 do { 1546 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr)); 1547 if (ccb == NULL) { 1548 aprint_error_dev(&sc->sc_dev, "bad mbi ccb pointer 0x%08x; skipping\n", 1549 phystol(mbi->ccb_addr)); 1550 goto next; 1551 } 1552 1553 BHA_CCB_SYNC(sc, ccb, 1554 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1555 1556 #ifdef BHADEBUG 1557 if (bha_debug) { 1558 u_char *cp = ccb->scsi_cmd; 1559 printf("op=%x %x %x %x %x %x\n", 1560 cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]); 1561 printf("comp_stat %x for mbi addr = %p, ", 1562 mbi->comp_stat, mbi); 1563 printf("ccb addr = %p\n", ccb); 1564 } 1565 #endif /* BHADEBUG */ 1566 1567 switch (mbi->comp_stat) { 1568 case BHA_MBI_OK: 1569 case BHA_MBI_ERROR: 1570 if ((ccb->flags & CCB_ABORT) != 0) { 1571 /* 1572 * If we already started an abort, wait for it 1573 * to complete before clearing the CCB. We 1574 * could instead just clear CCB_SENDING, but 1575 * what if the mailbox was already received? 1576 * The worst that happens here is that we clear 1577 * the CCB a bit later than we need to. BFD. 1578 */ 1579 goto next; 1580 } 1581 break; 1582 1583 case BHA_MBI_ABORT: 1584 case BHA_MBI_UNKNOWN: 1585 /* 1586 * Even if the CCB wasn't found, we clear it anyway. 1587 * See preceding comment. 1588 */ 1589 break; 1590 1591 default: 1592 aprint_error_dev(&sc->sc_dev, "bad mbi comp_stat %02x; skipping\n", 1593 mbi->comp_stat); 1594 goto next; 1595 } 1596 1597 callout_stop(&ccb->xs->xs_callout); 1598 bha_done(sc, ccb); 1599 1600 next: 1601 mbi->comp_stat = BHA_MBI_FREE; 1602 BHA_CCB_SYNC(sc, ccb, 1603 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1604 1605 mbi = bha_nextmbi(sc, mbi); 1606 BHA_MBI_SYNC(sc, mbi, 1607 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1608 } while (mbi->comp_stat != BHA_MBI_FREE); 1609 1610 sc->sc_tmbi = mbi; 1611 } 1612 1613 /***************************************************************************** 1614 * Mailbox management functions. 1615 *****************************************************************************/ 1616 1617 /* 1618 * bha_create_mailbox: 1619 * 1620 * Create the mailbox structures. Helper function for bha_attach(). 1621 * 1622 * NOTE: The Buslogic hardware only gets one DMA address for the 1623 * mailbox! It expects: 1624 * 1625 * mailbox_out[mailbox_size] 1626 * mailbox_in[mailbox_size] 1627 */ 1628 static int 1629 bha_create_mailbox(struct bha_softc *sc) 1630 { 1631 bus_dma_segment_t seg; 1632 size_t size; 1633 int error, rseg; 1634 1635 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) + 1636 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count); 1637 1638 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1639 1, &rseg, sc->sc_dmaflags); 1640 if (error) { 1641 aprint_error_dev(&sc->sc_dev, "unable to allocate mailboxes, error = %d\n", 1642 error); 1643 goto bad_0; 1644 } 1645 1646 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 1647 (void **)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT); 1648 if (error) { 1649 aprint_error_dev(&sc->sc_dev, "unable to map mailboxes, error = %d\n", 1650 error); 1651 goto bad_1; 1652 } 1653 1654 memset(sc->sc_mbo, 0, size); 1655 1656 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1657 sc->sc_dmaflags, &sc->sc_dmamap_mbox); 1658 if (error) { 1659 aprint_error_dev(&sc->sc_dev, 1660 "unable to create mailbox DMA map, error = %d\n", 1661 error); 1662 goto bad_2; 1663 } 1664 1665 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox, 1666 sc->sc_mbo, size, NULL, 0); 1667 if (error) { 1668 aprint_error_dev(&sc->sc_dev, "unable to load mailbox DMA map, error = %d\n", 1669 error); 1670 goto bad_3; 1671 } 1672 1673 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count); 1674 1675 return (0); 1676 1677 bad_3: 1678 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox); 1679 bad_2: 1680 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_mbo, size); 1681 bad_1: 1682 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1683 bad_0: 1684 return (error); 1685 } 1686 1687 /* 1688 * bha_collect_mbo: 1689 * 1690 * Garbage collect mailboxes that are no longer in use. 1691 */ 1692 static void 1693 bha_collect_mbo(struct bha_softc *sc) 1694 { 1695 struct bha_mbx_out *mbo; 1696 #ifdef BHADIAG 1697 struct bha_ccb *ccb; 1698 #endif 1699 1700 mbo = sc->sc_cmbo; 1701 1702 while (sc->sc_mbofull > 0) { 1703 BHA_MBO_SYNC(sc, mbo, 1704 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1705 if (mbo->cmd != BHA_MBO_FREE) 1706 break; 1707 1708 #ifdef BHADIAG 1709 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr)); 1710 ccb->flags &= ~CCB_SENDING; 1711 #endif 1712 1713 --sc->sc_mbofull; 1714 mbo = bha_nextmbo(sc, mbo); 1715 } 1716 1717 sc->sc_cmbo = mbo; 1718 } 1719 1720 /***************************************************************************** 1721 * CCB management functions 1722 *****************************************************************************/ 1723 1724 static inline void 1725 bha_reset_ccb(struct bha_ccb *ccb) 1726 { 1727 1728 ccb->flags = 0; 1729 } 1730 1731 /* 1732 * bha_create_ccbs: 1733 * 1734 * Create a set of CCBs. 1735 * 1736 * We determine the target CCB count, and then keep creating them 1737 * until we reach the target, or fail. CCBs that are allocated 1738 * but not "created" are left on the allocating list. 1739 * 1740 * XXX AB_QUIET/AB_SILENT lossage here; this is called during 1741 * boot as well as at run-time. 1742 */ 1743 static void 1744 bha_create_ccbs(struct bha_softc *sc, int count) 1745 { 1746 struct bha_ccb_group *bcg; 1747 struct bha_ccb *ccb; 1748 bus_dma_segment_t seg; 1749 bus_dmamap_t ccbmap; 1750 int target, i, error, rseg; 1751 1752 /* 1753 * If the current CCB count is already the max number we're 1754 * allowed to have, bail out now. 1755 */ 1756 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) 1757 return; 1758 1759 /* 1760 * Compute our target count, and clamp it down to the max 1761 * number we're allowed to have. 1762 */ 1763 target = sc->sc_cur_ccbs + count; 1764 if (target > sc->sc_max_ccbs) 1765 target = sc->sc_max_ccbs; 1766 1767 /* 1768 * If there are CCBs on the allocating list, don't allocate a 1769 * CCB group yet. 1770 */ 1771 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL) 1772 goto have_allocating_ccbs; 1773 1774 allocate_group: 1775 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 1776 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT); 1777 if (error) { 1778 aprint_error_dev(&sc->sc_dev, "unable to allocate CCB group, error = %d\n", 1779 error); 1780 goto bad_0; 1781 } 1782 1783 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 1784 (void *)&bcg, 1785 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1786 if (error) { 1787 aprint_error_dev(&sc->sc_dev, "unable to map CCB group, error = %d\n", 1788 error); 1789 goto bad_1; 1790 } 1791 1792 memset(bcg, 0, PAGE_SIZE); 1793 1794 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1795 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap); 1796 if (error) { 1797 aprint_error_dev(&sc->sc_dev, "unable to create CCB group DMA map, error = %d\n", 1798 error); 1799 goto bad_2; 1800 } 1801 1802 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL, 1803 sc->sc_dmaflags | BUS_DMA_NOWAIT); 1804 if (error) { 1805 aprint_error_dev(&sc->sc_dev, "unable to load CCB group DMA map, error = %d\n", 1806 error); 1807 goto bad_3; 1808 } 1809 1810 bcg->bcg_dmamap = ccbmap; 1811 1812 #ifdef DIAGNOSTIC 1813 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) != 1814 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1])) 1815 panic("bha_create_ccbs: CCB group size botch"); 1816 #endif 1817 1818 /* 1819 * Add all of the CCBs in this group to the allocating list. 1820 */ 1821 for (i = 0; i < bha_ccbs_per_group; i++) { 1822 ccb = &bcg->bcg_ccbs[i]; 1823 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain); 1824 } 1825 1826 have_allocating_ccbs: 1827 /* 1828 * Loop over the allocating list until we reach our CCB target. 1829 * If we run out on the list, we'll allocate another group's 1830 * worth. 1831 */ 1832 while (sc->sc_cur_ccbs < target) { 1833 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs); 1834 if (ccb == NULL) 1835 goto allocate_group; 1836 if (bha_init_ccb(sc, ccb) != 0) { 1837 /* 1838 * We were unable to initialize the CCB. 1839 * This is likely due to a resource shortage, 1840 * so bail out now. 1841 */ 1842 return; 1843 } 1844 } 1845 1846 /* 1847 * If we got here, we've reached our target! 1848 */ 1849 return; 1850 1851 bad_3: 1852 bus_dmamap_destroy(sc->sc_dmat, ccbmap); 1853 bad_2: 1854 bus_dmamem_unmap(sc->sc_dmat, (void *)bcg, PAGE_SIZE); 1855 bad_1: 1856 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1857 bad_0: 1858 return; 1859 } 1860 1861 /* 1862 * bha_init_ccb: 1863 * 1864 * Initialize a CCB; helper function for bha_create_ccbs(). 1865 */ 1866 static int 1867 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1868 { 1869 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb); 1870 int hashnum, error; 1871 1872 /* 1873 * Create the DMA map for this CCB. 1874 * 1875 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages 1876 * XXX in the ISA case. A better solution is needed. 1877 */ 1878 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG, 1879 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags, 1880 &ccb->dmamap_xfer); 1881 if (error) { 1882 aprint_error_dev(&sc->sc_dev, "unable to create CCB DMA map, error = %d\n", 1883 error); 1884 return (error); 1885 } 1886 1887 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain); 1888 1889 /* 1890 * Put the CCB into the phystokv hash table. 1891 */ 1892 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr + 1893 BHA_CCB_OFFSET(ccb); 1894 hashnum = CCB_HASH(ccb->hashkey); 1895 ccb->nexthash = sc->sc_ccbhash[hashnum]; 1896 sc->sc_ccbhash[hashnum] = ccb; 1897 bha_reset_ccb(ccb); 1898 1899 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1900 sc->sc_cur_ccbs++; 1901 1902 return (0); 1903 } 1904 1905 /* 1906 * bha_get_ccb: 1907 * 1908 * Get a CCB for the SCSI operation. If there are none left, 1909 * wait until one becomes available, if we can. 1910 */ 1911 static struct bha_ccb * 1912 bha_get_ccb(struct bha_softc *sc) 1913 { 1914 struct bha_ccb *ccb; 1915 int s; 1916 1917 s = splbio(); 1918 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1919 if (ccb != NULL) { 1920 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 1921 ccb->flags |= CCB_ALLOC; 1922 } 1923 splx(s); 1924 return (ccb); 1925 } 1926 1927 /* 1928 * bha_free_ccb: 1929 * 1930 * Put a CCB back onto the free list. 1931 */ 1932 static void 1933 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb) 1934 { 1935 int s; 1936 1937 s = splbio(); 1938 bha_reset_ccb(ccb); 1939 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1940 splx(s); 1941 } 1942 1943 /* 1944 * bha_ccb_phys_kv: 1945 * 1946 * Given a CCB DMA address, locate the CCB in kernel virtual space. 1947 */ 1948 static struct bha_ccb * 1949 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys) 1950 { 1951 int hashnum = CCB_HASH(ccb_phys); 1952 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum]; 1953 1954 while (ccb) { 1955 if (ccb->hashkey == ccb_phys) 1956 break; 1957 ccb = ccb->nexthash; 1958 } 1959 return (ccb); 1960 } 1961