1 /* $NetBSD: bha.c,v 1.45 2001/07/19 16:25:24 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Originally written by Julian Elischer (julian@tfs.com) 42 * for TRW Financial Systems for use under the MACH(2.5) operating system. 43 * 44 * TRW Financial Systems, in accordance with their agreement with Carnegie 45 * Mellon University, makes this software available to CMU to distribute 46 * or use in any manner that they see fit as long as this message is kept with 47 * the software. For this reason TFS also grants any other persons or 48 * organisations permission to use or modify this software. 49 * 50 * TFS supplies this software to be publicly redistributed 51 * on the understanding that TFS is not responsible for the correct 52 * functioning of this software in any circumstances. 53 */ 54 55 #include "opt_ddb.h" 56 57 #include <sys/types.h> 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/callout.h> 61 #include <sys/kernel.h> 62 #include <sys/errno.h> 63 #include <sys/ioctl.h> 64 #include <sys/device.h> 65 #include <sys/malloc.h> 66 #include <sys/buf.h> 67 #include <sys/proc.h> 68 #include <sys/user.h> 69 70 #include <uvm/uvm_extern.h> 71 72 #include <machine/bus.h> 73 #include <machine/intr.h> 74 75 #include <dev/scsipi/scsi_all.h> 76 #include <dev/scsipi/scsipi_all.h> 77 #include <dev/scsipi/scsiconf.h> 78 79 #include <dev/ic/bhareg.h> 80 #include <dev/ic/bhavar.h> 81 82 #ifndef DDB 83 #define Debugger() panic("should call debugger here (bha.c)") 84 #endif /* ! DDB */ 85 86 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT) 87 88 #ifdef BHADEBUG 89 int bha_debug = 0; 90 #endif /* BHADEBUG */ 91 92 static int bha_cmd __P((bus_space_tag_t, bus_space_handle_t, char *, int, 93 u_char *, int, u_char *)); 94 95 static void bha_scsipi_request __P((struct scsipi_channel *, 96 scsipi_adapter_req_t, void *)); 97 static void bha_minphys __P((struct buf *)); 98 99 static void bha_get_xfer_mode __P((struct bha_softc *, 100 struct scsipi_xfer_mode *)); 101 102 static void bha_done __P((struct bha_softc *, struct bha_ccb *)); 103 int bha_poll __P((struct bha_softc *, struct scsipi_xfer *, int)); 104 static void bha_timeout __P((void *arg)); 105 106 static int bha_init __P((struct bha_softc *)); 107 108 static int bha_create_mailbox __P((struct bha_softc *)); 109 static void bha_collect_mbo __P((struct bha_softc *)); 110 111 static void bha_queue_ccb __P((struct bha_softc *, struct bha_ccb *)); 112 static void bha_start_ccbs __P((struct bha_softc *)); 113 static void bha_finish_ccbs __P((struct bha_softc *)); 114 115 struct bha_ccb *bha_ccb_phys_kv __P((struct bha_softc *, bus_addr_t)); 116 void bha_create_ccbs __P((struct bha_softc *, int)); 117 int bha_init_ccb __P((struct bha_softc *, struct bha_ccb *)); 118 struct bha_ccb *bha_get_ccb __P((struct bha_softc *)); 119 void bha_free_ccb __P((struct bha_softc *, struct bha_ccb *)); 120 121 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */ 122 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 123 124 /* 125 * Number of CCBs in an allocation group; must be computed at run-time. 126 */ 127 int bha_ccbs_per_group; 128 129 __inline struct bha_mbx_out *bha_nextmbo __P((struct bha_softc *, 130 struct bha_mbx_out *)); 131 __inline struct bha_mbx_in *bha_nextmbi __P((struct bha_softc *, 132 struct bha_mbx_in *)); 133 134 __inline struct bha_mbx_out * 135 bha_nextmbo(sc, mbo) 136 struct bha_softc *sc; 137 struct bha_mbx_out *mbo; 138 { 139 140 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1]) 141 return (&sc->sc_mbo[0]); 142 return (mbo + 1); 143 } 144 145 __inline struct bha_mbx_in * 146 bha_nextmbi(sc, mbi) 147 struct bha_softc *sc; 148 struct bha_mbx_in *mbi; 149 { 150 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1]) 151 return (&sc->sc_mbi[0]); 152 return (mbi + 1); 153 } 154 155 /* 156 * bha_attach: 157 * 158 * Finish attaching a Buslogic controller, and configure children. 159 */ 160 void 161 bha_attach(sc) 162 struct bha_softc *sc; 163 { 164 struct scsipi_adapter *adapt = &sc->sc_adapter; 165 struct scsipi_channel *chan = &sc->sc_channel; 166 int initial_ccbs; 167 168 /* 169 * Initialize the number of CCBs per group. 170 */ 171 if (bha_ccbs_per_group == 0) 172 bha_ccbs_per_group = BHA_CCBS_PER_GROUP; 173 174 initial_ccbs = bha_info(sc); 175 if (initial_ccbs == 0) { 176 printf("%s: unable to get adapter info\n", 177 sc->sc_dev.dv_xname); 178 return; 179 } 180 181 /* 182 * Fill in the scsipi_adapter. 183 */ 184 memset(adapt, 0, sizeof(*adapt)); 185 adapt->adapt_dev = &sc->sc_dev; 186 adapt->adapt_nchannels = 1; 187 /* adapt_openings initialized below */ 188 adapt->adapt_max_periph = sc->sc_mbox_count; 189 adapt->adapt_request = bha_scsipi_request; 190 adapt->adapt_minphys = bha_minphys; 191 192 /* 193 * Fill in the scsipi_channel. 194 */ 195 memset(chan, 0, sizeof(*chan)); 196 chan->chan_adapter = adapt; 197 chan->chan_bustype = &scsi_bustype; 198 chan->chan_channel = 0; 199 chan->chan_flags = SCSIPI_CHAN_CANGROW; 200 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8; 201 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8; 202 chan->chan_id = sc->sc_scsi_id; 203 204 TAILQ_INIT(&sc->sc_free_ccb); 205 TAILQ_INIT(&sc->sc_waiting_ccb); 206 TAILQ_INIT(&sc->sc_allocating_ccbs); 207 208 if (bha_create_mailbox(sc) != 0) 209 return; 210 211 bha_create_ccbs(sc, initial_ccbs); 212 if (sc->sc_cur_ccbs < 2) { 213 printf("%s: not enough CCBs to run\n", 214 sc->sc_dev.dv_xname); 215 return; 216 } 217 218 adapt->adapt_openings = sc->sc_cur_ccbs; 219 220 if (bha_init(sc) != 0) 221 return; 222 223 (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 224 } 225 226 /* 227 * bha_intr: 228 * 229 * Interrupt service routine. 230 */ 231 int 232 bha_intr(arg) 233 void *arg; 234 { 235 struct bha_softc *sc = arg; 236 bus_space_tag_t iot = sc->sc_iot; 237 bus_space_handle_t ioh = sc->sc_ioh; 238 u_char sts; 239 240 #ifdef BHADEBUG 241 printf("%s: bha_intr ", sc->sc_dev.dv_xname); 242 #endif /* BHADEBUG */ 243 244 /* 245 * First acknowlege the interrupt, Then if it's not telling about 246 * a completed operation just return. 247 */ 248 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 249 if ((sts & BHA_INTR_ANYINTR) == 0) 250 return (0); 251 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 252 253 #ifdef BHADIAG 254 /* Make sure we clear CCB_SENDING before finishing a CCB. */ 255 bha_collect_mbo(sc); 256 #endif 257 258 /* Mail box out empty? */ 259 if (sts & BHA_INTR_MBOA) { 260 struct bha_toggle toggle; 261 262 toggle.cmd.opcode = BHA_MBO_INTR_EN; 263 toggle.cmd.enable = 0; 264 bha_cmd(iot, ioh, sc->sc_dev.dv_xname, 265 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 266 0, (u_char *)0); 267 bha_start_ccbs(sc); 268 } 269 270 /* Mail box in full? */ 271 if (sts & BHA_INTR_MBIF) 272 bha_finish_ccbs(sc); 273 274 return (1); 275 } 276 277 /***************************************************************************** 278 * SCSI interface routines 279 *****************************************************************************/ 280 281 /* 282 * bha_scsipi_request: 283 * 284 * Perform a request for the SCSIPI layer. 285 */ 286 void 287 bha_scsipi_request(chan, req, arg) 288 struct scsipi_channel *chan; 289 scsipi_adapter_req_t req; 290 void *arg; 291 { 292 struct scsipi_adapter *adapt = chan->chan_adapter; 293 struct bha_softc *sc = (void *)adapt->adapt_dev; 294 struct scsipi_xfer *xs; 295 struct scsipi_periph *periph; 296 bus_dma_tag_t dmat = sc->sc_dmat; 297 struct bha_ccb *ccb; 298 int error, seg, flags, s; 299 300 switch (req) { 301 case ADAPTER_REQ_RUN_XFER: 302 xs = arg; 303 periph = xs->xs_periph; 304 flags = xs->xs_control; 305 306 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n")); 307 308 /* Get a CCB to use. */ 309 ccb = bha_get_ccb(sc); 310 #ifdef DIAGNOSTIC 311 /* 312 * This should never happen as we track the resources 313 * in the mid-layer. 314 */ 315 if (ccb == NULL) { 316 scsipi_printaddr(periph); 317 printf("unable to allocate ccb\n"); 318 panic("bha_scsipi_request"); 319 } 320 #endif 321 322 ccb->xs = xs; 323 ccb->timeout = xs->timeout; 324 325 /* 326 * Put all the arguments for the xfer in the ccb 327 */ 328 if (flags & XS_CTL_RESET) { 329 ccb->opcode = BHA_RESET_CCB; 330 ccb->scsi_cmd_length = 0; 331 } else { 332 /* can't use S/G if zero length */ 333 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB 334 : BHA_INITIATOR_CCB); 335 memcpy(&ccb->scsi_cmd, xs->cmd, 336 ccb->scsi_cmd_length = xs->cmdlen); 337 } 338 339 if (xs->datalen) { 340 /* 341 * Map the DMA transfer. 342 */ 343 #ifdef TFS 344 if (flags & XS_CTL_DATA_UIO) { 345 error = bus_dmamap_load_uio(dmat, 346 ccb->dmamap_xfer, (struct uio *)xs->data, 347 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 348 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 349 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 350 BUS_DMA_WRITE)); 351 } else 352 #endif /* TFS */ 353 { 354 error = bus_dmamap_load(dmat, 355 ccb->dmamap_xfer, xs->data, xs->datalen, 356 NULL, 357 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : 358 BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 359 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : 360 BUS_DMA_WRITE)); 361 } 362 363 switch (error) { 364 case 0: 365 break; 366 367 case ENOMEM: 368 case EAGAIN: 369 xs->error = XS_RESOURCE_SHORTAGE; 370 goto out_bad; 371 372 default: 373 xs->error = XS_DRIVER_STUFFUP; 374 printf("%s: error %d loading DMA map\n", 375 sc->sc_dev.dv_xname, error); 376 out_bad: 377 bha_free_ccb(sc, ccb); 378 scsipi_done(xs); 379 return; 380 } 381 382 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 383 ccb->dmamap_xfer->dm_mapsize, 384 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 385 BUS_DMASYNC_PREWRITE); 386 387 /* 388 * Load the hardware scatter/gather map with the 389 * contents of the DMA map. 390 */ 391 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) { 392 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr, 393 ccb->scat_gath[seg].seg_addr); 394 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len, 395 ccb->scat_gath[seg].seg_len); 396 } 397 398 ltophys(ccb->hashkey + offsetof(struct bha_ccb, 399 scat_gath), ccb->data_addr); 400 ltophys(ccb->dmamap_xfer->dm_nsegs * 401 sizeof(struct bha_scat_gath), ccb->data_length); 402 } else { 403 /* 404 * No data xfer, use non S/G values. 405 */ 406 ltophys(0, ccb->data_addr); 407 ltophys(0, ccb->data_length); 408 } 409 410 if (XS_CTL_TAGTYPE(xs) != 0) { 411 ccb->tag_enable = 1; 412 ccb->tag_type = xs->xs_tag_type & 0x03; 413 } else { 414 ccb->tag_enable = 0; 415 ccb->tag_type = 0; 416 } 417 418 ccb->data_out = 0; 419 ccb->data_in = 0; 420 ccb->target = periph->periph_target; 421 ccb->lun = periph->periph_lun; 422 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense), 423 ccb->sense_ptr); 424 ccb->req_sense_length = sizeof(ccb->scsi_sense); 425 ccb->host_stat = 0x00; 426 ccb->target_stat = 0x00; 427 ccb->link_id = 0; 428 ltophys(0, ccb->link_addr); 429 430 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 431 432 s = splbio(); 433 bha_queue_ccb(sc, ccb); 434 splx(s); 435 436 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n")); 437 if ((flags & XS_CTL_POLL) == 0) 438 return; 439 440 /* 441 * If we can't use interrupts, poll on completion 442 */ 443 if (bha_poll(sc, xs, ccb->timeout)) { 444 bha_timeout(ccb); 445 if (bha_poll(sc, xs, ccb->timeout)) 446 bha_timeout(ccb); 447 } 448 return; 449 450 case ADAPTER_REQ_GROW_RESOURCES: 451 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) { 452 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 453 return; 454 } 455 seg = sc->sc_cur_ccbs; 456 bha_create_ccbs(sc, bha_ccbs_per_group); 457 adapt->adapt_openings += sc->sc_cur_ccbs - seg; 458 return; 459 460 case ADAPTER_REQ_SET_XFER_MODE: 461 /* 462 * Can't really do this on the Buslogic. It has its 463 * own setup info. But we do know how to query what 464 * the settings are. 465 */ 466 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg); 467 return; 468 } 469 } 470 471 /* 472 * bha_minphys: 473 * 474 * Limit a transfer to our maximum transfer size. 475 */ 476 void 477 bha_minphys(bp) 478 struct buf *bp; 479 { 480 481 if (bp->b_bcount > BHA_MAXXFER) 482 bp->b_bcount = BHA_MAXXFER; 483 minphys(bp); 484 } 485 486 /***************************************************************************** 487 * SCSI job execution helper routines 488 *****************************************************************************/ 489 490 /* 491 * bha_get_xfer_mode; 492 * 493 * Negotiate the xfer mode for the specified periph, and report 494 * back the mode to the midlayer. 495 * 496 * NOTE: we must be called at splbio(). 497 */ 498 void 499 bha_get_xfer_mode(sc, xm) 500 struct bha_softc *sc; 501 struct scsipi_xfer_mode *xm; 502 { 503 struct bha_setup hwsetup; 504 struct bha_period hwperiod; 505 struct bha_sync *bs; 506 int toff = xm->xm_target & 7, tmask = (1 << toff); 507 int wide, period, offset, rlen; 508 509 /* 510 * Issue an Inquire Setup Information. We can extract 511 * sync and wide information from here. 512 */ 513 rlen = sizeof(hwsetup.reply) + 514 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0); 515 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP; 516 hwsetup.cmd.len = rlen; 517 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 518 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd, 519 rlen, (u_char *)&hwsetup.reply); 520 521 xm->xm_mode = 0; 522 xm->xm_period = 0; 523 xm->xm_offset = 0; 524 525 /* 526 * First check for wide. On later boards, we can check 527 * directly in the setup info if wide is currently active. 528 * 529 * On earlier boards, we have to make an educated guess. 530 */ 531 if (sc->sc_flags & BHAF_WIDE) { 532 if (strcmp(sc->sc_firmware, "5.06L") >= 0) { 533 if (xm->xm_target > 7) { 534 wide = 535 hwsetup.reply_w.high_wide_active & tmask; 536 } else { 537 wide = 538 hwsetup.reply_w.low_wide_active & tmask; 539 } 540 if (wide) 541 xm->xm_mode |= PERIPH_CAP_WIDE16; 542 } else { 543 /* XXX Check `wide permitted' in the config info. */ 544 xm->xm_mode |= PERIPH_CAP_WIDE16; 545 } 546 } 547 548 /* 549 * Now get basic sync info. 550 */ 551 bs = (xm->xm_target > 7) ? 552 &hwsetup.reply_w.sync_high[toff] : 553 &hwsetup.reply.sync_low[toff]; 554 555 if (bs->valid) { 556 xm->xm_mode |= PERIPH_CAP_SYNC; 557 period = (bs->period * 50) + 20; 558 offset = bs->offset; 559 560 /* 561 * On boards that can do Fast and Ultra, use the Inquire Period 562 * command to get the period. 563 */ 564 if (sc->sc_firmware[0] >= '3') { 565 rlen = sizeof(hwperiod.reply) + 566 ((sc->sc_flags & BHAF_WIDE) ? 567 sizeof(hwperiod.reply_w) : 0); 568 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD; 569 hwperiod.cmd.len = rlen; 570 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 571 sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd, 572 rlen, (u_char *)&hwperiod.reply); 573 574 if (xm->xm_target > 7) 575 period = hwperiod.reply_w.period[toff]; 576 else 577 period = hwperiod.reply.period[toff]; 578 579 period *= 10; 580 } 581 582 xm->xm_period = 583 scsipi_sync_period_to_factor(period * 10); 584 xm->xm_offset = offset; 585 } 586 587 /* 588 * Now check for tagged queueing support. 589 * 590 * XXX Check `tags permitted' in the config info. 591 */ 592 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 593 xm->xm_mode |= PERIPH_CAP_TQING; 594 595 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm); 596 } 597 598 /* 599 * bha_done: 600 * 601 * A CCB has completed execution. Pass the status back to the 602 * upper layer. 603 */ 604 void 605 bha_done(sc, ccb) 606 struct bha_softc *sc; 607 struct bha_ccb *ccb; 608 { 609 bus_dma_tag_t dmat = sc->sc_dmat; 610 struct scsipi_xfer *xs = ccb->xs; 611 612 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n")); 613 614 #ifdef BHADIAG 615 if (ccb->flags & CCB_SENDING) { 616 printf("%s: exiting ccb still in transit!\n", 617 sc->sc_dev.dv_xname); 618 Debugger(); 619 return; 620 } 621 #endif 622 if ((ccb->flags & CCB_ALLOC) == 0) { 623 printf("%s: exiting ccb not allocated!\n", 624 sc->sc_dev.dv_xname); 625 Debugger(); 626 return; 627 } 628 629 /* 630 * If we were a data transfer, unload the map that described 631 * the data buffer. 632 */ 633 if (xs->datalen) { 634 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0, 635 ccb->dmamap_xfer->dm_mapsize, 636 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : 637 BUS_DMASYNC_POSTWRITE); 638 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 639 } 640 641 if (xs->error == XS_NOERROR) { 642 if (ccb->host_stat != BHA_OK) { 643 switch (ccb->host_stat) { 644 case BHA_SEL_TIMEOUT: /* No response */ 645 xs->error = XS_SELTIMEOUT; 646 break; 647 default: /* Other scsi protocol messes */ 648 printf("%s: host_stat %x\n", 649 sc->sc_dev.dv_xname, ccb->host_stat); 650 xs->error = XS_DRIVER_STUFFUP; 651 break; 652 } 653 } else if (ccb->target_stat != SCSI_OK) { 654 switch (ccb->target_stat) { 655 case SCSI_CHECK: 656 memcpy(&xs->sense.scsi_sense, 657 &ccb->scsi_sense, 658 sizeof(xs->sense.scsi_sense)); 659 xs->error = XS_SENSE; 660 break; 661 case SCSI_BUSY: 662 xs->error = XS_BUSY; 663 break; 664 default: 665 printf("%s: target_stat %x\n", 666 sc->sc_dev.dv_xname, ccb->target_stat); 667 xs->error = XS_DRIVER_STUFFUP; 668 break; 669 } 670 } else 671 xs->resid = 0; 672 } 673 674 bha_free_ccb(sc, ccb); 675 scsipi_done(xs); 676 } 677 678 /* 679 * bha_poll: 680 * 681 * Poll for completion of the specified job. 682 */ 683 int 684 bha_poll(sc, xs, count) 685 struct bha_softc *sc; 686 struct scsipi_xfer *xs; 687 int count; 688 { 689 bus_space_tag_t iot = sc->sc_iot; 690 bus_space_handle_t ioh = sc->sc_ioh; 691 692 /* timeouts are in msec, so we loop in 1000 usec cycles */ 693 while (count) { 694 /* 695 * If we had interrupts enabled, would we 696 * have got an interrupt? 697 */ 698 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) & 699 BHA_INTR_ANYINTR) 700 bha_intr(sc); 701 if (xs->xs_status & XS_STS_DONE) 702 return (0); 703 delay(1000); /* only happens in boot so ok */ 704 count--; 705 } 706 return (1); 707 } 708 709 /* 710 * bha_timeout: 711 * 712 * CCB timeout handler. 713 */ 714 void 715 bha_timeout(arg) 716 void *arg; 717 { 718 struct bha_ccb *ccb = arg; 719 struct scsipi_xfer *xs = ccb->xs; 720 struct scsipi_periph *periph = xs->xs_periph; 721 struct bha_softc *sc = 722 (void *)periph->periph_channel->chan_adapter->adapt_dev; 723 int s; 724 725 scsipi_printaddr(periph); 726 printf("timed out"); 727 728 s = splbio(); 729 730 #ifdef BHADIAG 731 /* 732 * If the ccb's mbx is not free, then the board has gone Far East? 733 */ 734 bha_collect_mbo(sc); 735 if (ccb->flags & CCB_SENDING) { 736 printf("%s: not taking commands!\n", sc->sc_dev.dv_xname); 737 Debugger(); 738 } 739 #endif 740 741 /* 742 * If it has been through before, then 743 * a previous abort has failed, don't 744 * try abort again 745 */ 746 if (ccb->flags & CCB_ABORT) { 747 /* abort timed out */ 748 printf(" AGAIN\n"); 749 /* XXX Must reset! */ 750 } else { 751 /* abort the operation that has timed out */ 752 printf("\n"); 753 ccb->xs->error = XS_TIMEOUT; 754 ccb->timeout = BHA_ABORT_TIMEOUT; 755 ccb->flags |= CCB_ABORT; 756 bha_queue_ccb(sc, ccb); 757 } 758 759 splx(s); 760 } 761 762 /***************************************************************************** 763 * Misc. subroutines. 764 *****************************************************************************/ 765 766 /* 767 * bha_cmd: 768 * 769 * Send a command to the Buglogic controller. 770 */ 771 int 772 bha_cmd(iot, ioh, name, icnt, ibuf, ocnt, obuf) 773 bus_space_tag_t iot; 774 bus_space_handle_t ioh; 775 char *name; 776 int icnt, ocnt; 777 u_char *ibuf, *obuf; 778 { 779 int i; 780 int wait; 781 u_char sts; 782 u_char opcode = ibuf[0]; 783 784 /* 785 * Calculate a reasonable timeout for the command. 786 */ 787 switch (opcode) { 788 case BHA_INQUIRE_DEVICES: 789 case BHA_INQUIRE_DEVICES_2: 790 wait = 90 * 20000; 791 break; 792 default: 793 wait = 1 * 20000; 794 break; 795 } 796 797 /* 798 * Wait for the adapter to go idle, unless it's one of 799 * the commands which don't need this 800 */ 801 if (opcode != BHA_MBO_INTR_EN) { 802 for (i = 20000; i; i--) { /* 1 sec? */ 803 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 804 if (sts & BHA_STAT_IDLE) 805 break; 806 delay(50); 807 } 808 if (!i) { 809 printf("%s: bha_cmd, host not idle(0x%x)\n", 810 name, sts); 811 return (1); 812 } 813 } 814 815 /* 816 * Now that it is idle, if we expect output, preflush the 817 * queue feeding to us. 818 */ 819 if (ocnt) { 820 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) & 821 BHA_STAT_DF) 822 bus_space_read_1(iot, ioh, BHA_DATA_PORT); 823 } 824 825 /* 826 * Output the command and the number of arguments given 827 * for each byte, first check the port is empty. 828 */ 829 while (icnt--) { 830 for (i = wait; i; i--) { 831 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 832 if (!(sts & BHA_STAT_CDF)) 833 break; 834 delay(50); 835 } 836 if (!i) { 837 if (opcode != BHA_INQUIRE_REVISION) 838 printf("%s: bha_cmd, cmd/data port full\n", 839 name); 840 goto bad; 841 } 842 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++); 843 } 844 845 /* 846 * If we expect input, loop that many times, each time, 847 * looking for the data register to have valid data 848 */ 849 while (ocnt--) { 850 for (i = wait; i; i--) { 851 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 852 if (sts & BHA_STAT_DF) 853 break; 854 delay(50); 855 } 856 if (!i) { 857 #ifdef BHADEBUG 858 if (opcode != BHA_INQUIRE_REVISION) 859 printf("%s: bha_cmd, cmd/data port empty %d\n", 860 name, ocnt); 861 #endif /* BHADEBUG */ 862 goto bad; 863 } 864 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT); 865 } 866 867 /* 868 * Wait for the board to report a finished instruction. 869 * We may get an extra interrupt for the HACC signal, but this is 870 * unimportant. 871 */ 872 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) { 873 for (i = 20000; i; i--) { /* 1 sec? */ 874 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT); 875 /* XXX Need to save this in the interrupt handler? */ 876 if (sts & BHA_INTR_HACC) 877 break; 878 delay(50); 879 } 880 if (!i) { 881 printf("%s: bha_cmd, host not finished(0x%x)\n", 882 name, sts); 883 return (1); 884 } 885 } 886 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST); 887 return (0); 888 889 bad: 890 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST); 891 return (1); 892 } 893 894 /* 895 * bha_find: 896 * 897 * Find the board. 898 */ 899 int 900 bha_find(iot, ioh) 901 bus_space_tag_t iot; 902 bus_space_handle_t ioh; 903 { 904 int i; 905 u_char sts; 906 struct bha_extended_inquire inquire; 907 908 /* Check something is at the ports we need to access */ 909 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 910 if (sts == 0xFF) 911 return (0); 912 913 /* 914 * Reset board, If it doesn't respond, assume 915 * that it's not there.. good for the probe 916 */ 917 918 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, 919 BHA_CTRL_HRST | BHA_CTRL_SRST); 920 921 delay(100); 922 for (i = BHA_RESET_TIMEOUT; i; i--) { 923 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT); 924 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT)) 925 break; 926 delay(1000); 927 } 928 if (!i) { 929 #ifdef BHADEBUG 930 if (bha_debug) 931 printf("bha_find: No answer from buslogic board\n"); 932 #endif /* BHADEBUG */ 933 return (0); 934 } 935 936 /* 937 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible 938 * interface. The native bha interface is not compatible with 939 * an aha. 1542. We need to ensure that we never match an 940 * Adaptec 1542. We must also avoid sending Adaptec-compatible 941 * commands to a real bha, lest it go into 1542 emulation mode. 942 * (On an indirect bus like ISA, we should always probe for BusLogic 943 * interfaces before Adaptec interfaces). 944 */ 945 946 /* 947 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking 948 * for an extended-geometry register. The 1542[AB] don't have one. 949 */ 950 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT); 951 if (sts == 0xFF) 952 return (0); 953 954 /* 955 * Check that we actually know how to use this board. 956 */ 957 delay(1000); 958 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 959 inquire.cmd.len = sizeof(inquire.reply); 960 i = bha_cmd(iot, ioh, "(bha_find)", 961 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 962 sizeof(inquire.reply), (u_char *)&inquire.reply); 963 964 /* 965 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev) 966 * have the extended-geometry register and also respond to 967 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards, 968 * by checking the size of the reply is what a BusLogic card returns. 969 */ 970 if (i) { 971 #ifdef BHADEBUG 972 printf("bha_find: board returned %d instead of %d to %s\n", 973 i, sizeof(inquire.reply), "INQUIRE_EXTENDED"); 974 #endif 975 return (0); 976 } 977 978 /* OK, we know we've found a buslogic adaptor. */ 979 980 switch (inquire.reply.bus_type) { 981 case BHA_BUS_TYPE_24BIT: 982 case BHA_BUS_TYPE_32BIT: 983 break; 984 case BHA_BUS_TYPE_MCA: 985 /* We don't grok MicroChannel (yet). */ 986 return (0); 987 default: 988 printf("bha_find: illegal bus type %c\n", 989 inquire.reply.bus_type); 990 return (0); 991 } 992 993 return (1); 994 } 995 996 997 /* 998 * bha_inquire_config: 999 * 1000 * Determine irq/drq. 1001 */ 1002 int 1003 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh, 1004 struct bha_probe_data *sc) 1005 { 1006 int irq, drq; 1007 struct bha_config config; 1008 1009 /* 1010 * Assume we have a board at this stage setup dma channel from 1011 * jumpers and save int level 1012 */ 1013 delay(1000); 1014 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1015 bha_cmd(iot, ioh, "(bha_inquire_config)", 1016 sizeof(config.cmd), (u_char *)&config.cmd, 1017 sizeof(config.reply), (u_char *)&config.reply); 1018 switch (config.reply.chan) { 1019 case EISADMA: 1020 drq = -1; 1021 break; 1022 case CHAN0: 1023 drq = 0; 1024 break; 1025 case CHAN5: 1026 drq = 5; 1027 break; 1028 case CHAN6: 1029 drq = 6; 1030 break; 1031 case CHAN7: 1032 drq = 7; 1033 break; 1034 default: 1035 printf("bha: illegal drq setting %x\n", 1036 config.reply.chan); 1037 return (0); 1038 } 1039 1040 switch (config.reply.intr) { 1041 case INT9: 1042 irq = 9; 1043 break; 1044 case INT10: 1045 irq = 10; 1046 break; 1047 case INT11: 1048 irq = 11; 1049 break; 1050 case INT12: 1051 irq = 12; 1052 break; 1053 case INT14: 1054 irq = 14; 1055 break; 1056 case INT15: 1057 irq = 15; 1058 break; 1059 default: 1060 printf("bha: illegal irq setting %x\n", 1061 config.reply.intr); 1062 return (0); 1063 } 1064 1065 /* if we want to fill in softc, do so now */ 1066 if (sc != NULL) { 1067 sc->sc_irq = irq; 1068 sc->sc_drq = drq; 1069 } 1070 1071 return (1); 1072 } 1073 1074 int 1075 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh, 1076 struct bha_probe_data *bpd) 1077 { 1078 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd); 1079 } 1080 1081 /* 1082 * bha_disable_isacompat: 1083 * 1084 * Disable the ISA-compatiblity ioports on PCI bha devices, 1085 * to ensure they're not autoconfigured a second time as an ISA bha. 1086 */ 1087 int 1088 bha_disable_isacompat(sc) 1089 struct bha_softc *sc; 1090 { 1091 struct bha_isadisable isa_disable; 1092 1093 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT; 1094 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1; 1095 bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname, 1096 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd, 1097 0, (u_char *)0); 1098 return (0); 1099 } 1100 1101 /* 1102 * bha_info: 1103 * 1104 * Get information about the board, and report it. We 1105 * return the initial number of CCBs, 0 if we failed. 1106 */ 1107 int 1108 bha_info(sc) 1109 struct bha_softc *sc; 1110 { 1111 bus_space_tag_t iot = sc->sc_iot; 1112 bus_space_handle_t ioh = sc->sc_ioh; 1113 struct bha_extended_inquire inquire; 1114 struct bha_config config; 1115 struct bha_devices devices; 1116 struct bha_setup setup; 1117 struct bha_model model; 1118 struct bha_revision revision; 1119 struct bha_digit digit; 1120 int i, j, initial_ccbs, rlen; 1121 char *name = sc->sc_dev.dv_xname; 1122 char *p; 1123 1124 /* 1125 * Fetch the extended inquire information. 1126 */ 1127 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED; 1128 inquire.cmd.len = sizeof(inquire.reply); 1129 bha_cmd(iot, ioh, name, 1130 sizeof(inquire.cmd), (u_char *)&inquire.cmd, 1131 sizeof(inquire.reply), (u_char *)&inquire.reply); 1132 1133 /* 1134 * Fetch the configuration information. 1135 */ 1136 config.cmd.opcode = BHA_INQUIRE_CONFIG; 1137 bha_cmd(iot, ioh, name, 1138 sizeof(config.cmd), (u_char *)&config.cmd, 1139 sizeof(config.reply), (u_char *)&config.reply); 1140 1141 sc->sc_scsi_id = config.reply.scsi_dev; 1142 1143 /* 1144 * Get the firmware revision. 1145 */ 1146 p = sc->sc_firmware; 1147 revision.cmd.opcode = BHA_INQUIRE_REVISION; 1148 bha_cmd(iot, ioh, name, 1149 sizeof(revision.cmd), (u_char *)&revision.cmd, 1150 sizeof(revision.reply), (u_char *)&revision.reply); 1151 *p++ = revision.reply.firm_revision; 1152 *p++ = '.'; 1153 *p++ = revision.reply.firm_version; 1154 digit.cmd.opcode = BHA_INQUIRE_REVISION_3; 1155 bha_cmd(iot, ioh, name, 1156 sizeof(digit.cmd), (u_char *)&digit.cmd, 1157 sizeof(digit.reply), (u_char *)&digit.reply); 1158 *p++ = digit.reply.digit; 1159 if (revision.reply.firm_revision >= '3' || 1160 (revision.reply.firm_revision == '3' && 1161 revision.reply.firm_version >= '3')) { 1162 digit.cmd.opcode = BHA_INQUIRE_REVISION_4; 1163 bha_cmd(iot, ioh, name, 1164 sizeof(digit.cmd), (u_char *)&digit.cmd, 1165 sizeof(digit.reply), (u_char *)&digit.reply); 1166 *p++ = digit.reply.digit; 1167 } 1168 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0')) 1169 p--; 1170 *p = '\0'; 1171 1172 /* 1173 * Get the model number. 1174 * 1175 * Some boards do not handle the Inquire Board Model Number 1176 * command correctly, or don't give correct information. 1177 * 1178 * So, we use the Firmware Revision and Extended Setup 1179 * information to fixup the model number in these cases. 1180 * 1181 * The firmware version indicates: 1182 * 1183 * 5.xx BusLogic "W" Series Host Adapters 1184 * BT-948/958/958D 1185 * 1186 * 4.xx BusLogic "C" Series Host Adapters 1187 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 1188 * 1189 * 3.xx BusLogic "S" Series Host Adapters 1190 * BT-747S/747D/757S/757D/445S/545S/542D 1191 * BT-542B/742A (revision H) 1192 * 1193 * 2.xx BusLogic "A" Series Host Adapters 1194 * BT-542B/742A (revision G and below) 1195 * 1196 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter 1197 */ 1198 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT && 1199 sc->sc_firmware[0] < '3') 1200 sprintf(sc->sc_model, "542B"); 1201 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1202 sc->sc_firmware[0] == '2' && 1203 (sc->sc_firmware[2] == '1' || 1204 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0'))) 1205 sprintf(sc->sc_model, "742A"); 1206 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT && 1207 sc->sc_firmware[0] == '0') 1208 sprintf(sc->sc_model, "747A"); 1209 else { 1210 p = sc->sc_model; 1211 model.cmd.opcode = BHA_INQUIRE_MODEL; 1212 model.cmd.len = sizeof(model.reply); 1213 bha_cmd(iot, ioh, name, 1214 sizeof(model.cmd), (u_char *)&model.cmd, 1215 sizeof(model.reply), (u_char *)&model.reply); 1216 *p++ = model.reply.id[0]; 1217 *p++ = model.reply.id[1]; 1218 *p++ = model.reply.id[2]; 1219 *p++ = model.reply.id[3]; 1220 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1221 p--; 1222 *p++ = model.reply.version[0]; 1223 *p++ = model.reply.version[1]; 1224 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0')) 1225 p--; 1226 *p = '\0'; 1227 } 1228 1229 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */ 1230 if (strcmp(sc->sc_firmware, "3.31") >= 0) 1231 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN; 1232 1233 /* 1234 * Determine some characteristics about our bus. 1235 */ 1236 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE) 1237 sc->sc_flags |= BHAF_WIDE; 1238 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL) 1239 sc->sc_flags |= BHAF_DIFFERENTIAL; 1240 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA) 1241 sc->sc_flags |= BHAF_ULTRA; 1242 1243 /* 1244 * Determine some characterists of the board. 1245 */ 1246 sc->sc_max_dmaseg = inquire.reply.sg_limit; 1247 1248 /* 1249 * Determine the maximum CCB count and whether or not 1250 * tagged queueing is available on this host adapter. 1251 * 1252 * Tagged queueing works on: 1253 * 1254 * "W" Series adapters 1255 * "C" Series adapters with firmware >= 4.22 1256 * "S" Series adapters with firmware >= 3.35 1257 * 1258 * The internal CCB counts are: 1259 * 1260 * 192 BT-948/958/958D 1261 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C 1262 * 50 BT-545C/540CF 1263 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A 1264 */ 1265 switch (sc->sc_firmware[0]) { 1266 case '5': 1267 sc->sc_max_ccbs = 192; 1268 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1269 break; 1270 1271 case '4': 1272 if (sc->sc_model[0] == '5') 1273 sc->sc_max_ccbs = 50; 1274 else 1275 sc->sc_max_ccbs = 100; 1276 if (strcmp(sc->sc_firmware, "4.22") >= 0) 1277 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1278 break; 1279 1280 case '3': 1281 if (strcmp(sc->sc_firmware, "3.35") >= 0) 1282 sc->sc_flags |= BHAF_TAGGED_QUEUEING; 1283 /* FALLTHROUGH */ 1284 1285 default: 1286 sc->sc_max_ccbs = 30; 1287 } 1288 1289 /* 1290 * Set the mailbox count to precisely the number of HW CCBs 1291 * available. A mailbox isn't required while a CCB is executing, 1292 * but this allows us to actually enqueue up to our resource 1293 * limit. 1294 * 1295 * This will keep the mailbox count small on boards which don't 1296 * have strict round-robin (they have to scan the entire set of 1297 * mailboxes each time they run a command). 1298 */ 1299 sc->sc_mbox_count = sc->sc_max_ccbs; 1300 1301 /* 1302 * Obtain setup information. 1303 */ 1304 rlen = sizeof(setup.reply) + 1305 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0); 1306 setup.cmd.opcode = BHA_INQUIRE_SETUP; 1307 setup.cmd.len = rlen; 1308 bha_cmd(iot, ioh, name, 1309 sizeof(setup.cmd), (u_char *)&setup.cmd, 1310 rlen, (u_char *)&setup.reply); 1311 1312 printf("%s: model BT-%s, firmware %s\n", sc->sc_dev.dv_xname, 1313 sc->sc_model, sc->sc_firmware); 1314 1315 printf("%s: %d H/W CCBs", sc->sc_dev.dv_xname, sc->sc_max_ccbs); 1316 if (setup.reply.sync_neg) 1317 printf(", sync"); 1318 if (setup.reply.parity) 1319 printf(", parity"); 1320 if (sc->sc_flags & BHAF_TAGGED_QUEUEING) 1321 printf(", tagged queueing"); 1322 if (sc->sc_flags & BHAF_WIDE_LUN) 1323 printf(", wide LUN support"); 1324 printf("\n"); 1325 1326 /* 1327 * Poll targets 0 - 7. 1328 */ 1329 devices.cmd.opcode = BHA_INQUIRE_DEVICES; 1330 bha_cmd(iot, ioh, name, 1331 sizeof(devices.cmd), (u_char *)&devices.cmd, 1332 sizeof(devices.reply), (u_char *)&devices.reply); 1333 1334 /* Count installed units. */ 1335 initial_ccbs = 0; 1336 for (i = 0; i < 8; i++) { 1337 for (j = 0; j < 8; j++) { 1338 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1339 initial_ccbs++; 1340 } 1341 } 1342 1343 /* 1344 * Poll targets 8 - 15 if we have a wide bus. 1345 */ 1346 if (sc->sc_flags & BHAF_WIDE) { 1347 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2; 1348 bha_cmd(iot, ioh, name, 1349 sizeof(devices.cmd), (u_char *)&devices.cmd, 1350 sizeof(devices.reply), (u_char *)&devices.reply); 1351 1352 for (i = 0; i < 8; i++) { 1353 for (j = 0; j < 8; j++) { 1354 if (((devices.reply.lun_map[i] >> j) & 1) == 1) 1355 initial_ccbs++; 1356 } 1357 } 1358 } 1359 1360 /* 1361 * Double the initial CCB count, for good measure. 1362 */ 1363 initial_ccbs *= 2; 1364 1365 /* 1366 * Sanity check the initial CCB count; don't create more than 1367 * we can enqueue (sc_max_ccbs), and make sure there are some 1368 * at all. 1369 */ 1370 if (initial_ccbs > sc->sc_max_ccbs) 1371 initial_ccbs = sc->sc_max_ccbs; 1372 if (initial_ccbs == 0) 1373 initial_ccbs = 2; 1374 1375 return (initial_ccbs); 1376 } 1377 1378 /* 1379 * bha_init: 1380 * 1381 * Initialize the board. 1382 */ 1383 int 1384 bha_init(sc) 1385 struct bha_softc *sc; 1386 { 1387 char *name = sc->sc_dev.dv_xname; 1388 struct bha_toggle toggle; 1389 struct bha_mailbox mailbox; 1390 struct bha_mbx_out *mbo; 1391 struct bha_mbx_in *mbi; 1392 int i; 1393 1394 /* 1395 * Set up the mailbox. We always run the mailbox in round-robin. 1396 */ 1397 for (i = 0; i < sc->sc_mbox_count; i++) { 1398 mbo = &sc->sc_mbo[i]; 1399 mbi = &sc->sc_mbi[i]; 1400 1401 mbo->cmd = BHA_MBO_FREE; 1402 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1403 1404 mbi->comp_stat = BHA_MBI_FREE; 1405 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1406 } 1407 1408 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0]; 1409 sc->sc_tmbi = &sc->sc_mbi[0]; 1410 1411 sc->sc_mbofull = 0; 1412 1413 /* 1414 * If the board supports strict round-robin, enable that. 1415 */ 1416 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) { 1417 toggle.cmd.opcode = BHA_ROUND_ROBIN; 1418 toggle.cmd.enable = 1; 1419 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1420 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1421 0, NULL); 1422 } 1423 1424 /* 1425 * Give the mailbox to the board. 1426 */ 1427 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED; 1428 mailbox.cmd.nmbx = sc->sc_mbox_count; 1429 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr); 1430 bha_cmd(sc->sc_iot, sc->sc_ioh, name, 1431 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd, 1432 0, (u_char *)0); 1433 1434 return (0); 1435 } 1436 1437 /***************************************************************************** 1438 * CCB execution engine 1439 *****************************************************************************/ 1440 1441 /* 1442 * bha_queue_ccb: 1443 * 1444 * Queue a CCB to be sent to the controller, and send it if possible. 1445 */ 1446 void 1447 bha_queue_ccb(sc, ccb) 1448 struct bha_softc *sc; 1449 struct bha_ccb *ccb; 1450 { 1451 1452 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 1453 bha_start_ccbs(sc); 1454 } 1455 1456 /* 1457 * bha_start_ccbs: 1458 * 1459 * Send as many CCBs as we have empty mailboxes for. 1460 */ 1461 void 1462 bha_start_ccbs(sc) 1463 struct bha_softc *sc; 1464 { 1465 bus_space_tag_t iot = sc->sc_iot; 1466 bus_space_handle_t ioh = sc->sc_ioh; 1467 struct bha_ccb_group *bcg; 1468 struct bha_mbx_out *mbo; 1469 struct bha_ccb *ccb; 1470 1471 mbo = sc->sc_tmbo; 1472 1473 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) { 1474 if (sc->sc_mbofull >= sc->sc_mbox_count) { 1475 #ifdef DIAGNOSTIC 1476 if (sc->sc_mbofull > sc->sc_mbox_count) 1477 panic("bha_start_ccbs: mbofull > mbox_count"); 1478 #endif 1479 /* 1480 * No mailboxes available; attempt to collect ones 1481 * that have already been used. 1482 */ 1483 bha_collect_mbo(sc); 1484 if (sc->sc_mbofull == sc->sc_mbox_count) { 1485 /* 1486 * Still no more available; have the 1487 * controller interrupt us when it 1488 * frees one. 1489 */ 1490 struct bha_toggle toggle; 1491 1492 toggle.cmd.opcode = BHA_MBO_INTR_EN; 1493 toggle.cmd.enable = 1; 1494 bha_cmd(iot, ioh, sc->sc_dev.dv_xname, 1495 sizeof(toggle.cmd), (u_char *)&toggle.cmd, 1496 0, (u_char *)0); 1497 break; 1498 } 1499 } 1500 1501 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 1502 #ifdef BHADIAG 1503 ccb->flags |= CCB_SENDING; 1504 #endif 1505 1506 /* 1507 * Put the CCB in the mailbox. 1508 */ 1509 bcg = BHA_CCB_GROUP(ccb); 1510 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr + 1511 BHA_CCB_OFFSET(ccb), mbo->ccb_addr); 1512 if (ccb->flags & CCB_ABORT) 1513 mbo->cmd = BHA_MBO_ABORT; 1514 else 1515 mbo->cmd = BHA_MBO_START; 1516 1517 BHA_MBO_SYNC(sc, mbo, 1518 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1519 1520 /* Tell the card to poll immediately. */ 1521 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI); 1522 1523 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0) 1524 callout_reset(&ccb->xs->xs_callout, 1525 (ccb->timeout * hz) / 1000, bha_timeout, ccb); 1526 1527 ++sc->sc_mbofull; 1528 mbo = bha_nextmbo(sc, mbo); 1529 } 1530 1531 sc->sc_tmbo = mbo; 1532 } 1533 1534 /* 1535 * bha_finish_ccbs: 1536 * 1537 * Finalize the execution of CCBs in our incoming mailbox. 1538 */ 1539 void 1540 bha_finish_ccbs(sc) 1541 struct bha_softc *sc; 1542 { 1543 struct bha_mbx_in *mbi; 1544 struct bha_ccb *ccb; 1545 int i; 1546 1547 mbi = sc->sc_tmbi; 1548 1549 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1550 1551 if (mbi->comp_stat == BHA_MBI_FREE) { 1552 for (i = 0; i < sc->sc_mbox_count; i++) { 1553 if (mbi->comp_stat != BHA_MBI_FREE) { 1554 #ifdef BHADIAG 1555 /* 1556 * This can happen in normal operation if 1557 * we use all mailbox slots. 1558 */ 1559 printf("%s: mbi not in round-robin order\n", 1560 sc->sc_dev.dv_xname); 1561 #endif 1562 goto again; 1563 } 1564 mbi = bha_nextmbi(sc, mbi); 1565 BHA_MBI_SYNC(sc, mbi, 1566 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1567 } 1568 #ifdef BHADIAGnot 1569 printf("%s: mbi interrupt with no full mailboxes\n", 1570 sc->sc_dev.dv_xname); 1571 #endif 1572 return; 1573 } 1574 1575 again: 1576 do { 1577 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr)); 1578 if (ccb == NULL) { 1579 printf("%s: bad mbi ccb pointer 0x%08x; skipping\n", 1580 sc->sc_dev.dv_xname, phystol(mbi->ccb_addr)); 1581 goto next; 1582 } 1583 1584 BHA_CCB_SYNC(sc, ccb, 1585 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1586 1587 #ifdef BHADEBUG 1588 if (bha_debug) { 1589 struct scsi_generic *cmd = &ccb->scsi_cmd; 1590 printf("op=%x %x %x %x %x %x\n", 1591 cmd->opcode, cmd->bytes[0], cmd->bytes[1], 1592 cmd->bytes[2], cmd->bytes[3], cmd->bytes[4]); 1593 printf("comp_stat %x for mbi addr = 0x%p, ", 1594 mbi->comp_stat, mbi); 1595 printf("ccb addr = %p\n", ccb); 1596 } 1597 #endif /* BHADEBUG */ 1598 1599 switch (mbi->comp_stat) { 1600 case BHA_MBI_OK: 1601 case BHA_MBI_ERROR: 1602 if ((ccb->flags & CCB_ABORT) != 0) { 1603 /* 1604 * If we already started an abort, wait for it 1605 * to complete before clearing the CCB. We 1606 * could instead just clear CCB_SENDING, but 1607 * what if the mailbox was already received? 1608 * The worst that happens here is that we clear 1609 * the CCB a bit later than we need to. BFD. 1610 */ 1611 goto next; 1612 } 1613 break; 1614 1615 case BHA_MBI_ABORT: 1616 case BHA_MBI_UNKNOWN: 1617 /* 1618 * Even if the CCB wasn't found, we clear it anyway. 1619 * See preceeding comment. 1620 */ 1621 break; 1622 1623 default: 1624 printf("%s: bad mbi comp_stat %02x; skipping\n", 1625 sc->sc_dev.dv_xname, mbi->comp_stat); 1626 goto next; 1627 } 1628 1629 callout_stop(&ccb->xs->xs_callout); 1630 bha_done(sc, ccb); 1631 1632 next: 1633 mbi->comp_stat = BHA_MBI_FREE; 1634 BHA_CCB_SYNC(sc, ccb, 1635 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1636 1637 mbi = bha_nextmbi(sc, mbi); 1638 BHA_MBI_SYNC(sc, mbi, 1639 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1640 } while (mbi->comp_stat != BHA_MBI_FREE); 1641 1642 sc->sc_tmbi = mbi; 1643 } 1644 1645 /***************************************************************************** 1646 * Mailbox management functions. 1647 *****************************************************************************/ 1648 1649 /* 1650 * bha_create_mailbox: 1651 * 1652 * Create the mailbox structures. Helper function for bha_attach(). 1653 * 1654 * NOTE: The Buslogic hardware only gets one DMA address for the 1655 * mailbox! It expects: 1656 * 1657 * mailbox_out[mailbox_size] 1658 * mailbox_in[mailbox_size] 1659 */ 1660 int 1661 bha_create_mailbox(sc) 1662 struct bha_softc *sc; 1663 { 1664 bus_dma_segment_t seg; 1665 size_t size; 1666 int error, rseg; 1667 1668 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) + 1669 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count); 1670 1671 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1672 1, &rseg, sc->sc_dmaflags); 1673 if (error) { 1674 printf("%s: unable to allocate mailboxes, error = %d\n", 1675 sc->sc_dev.dv_xname, error); 1676 goto bad_0; 1677 } 1678 1679 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 1680 (caddr_t *)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT); 1681 if (error) { 1682 printf("%s: unable to map mailboxes, error = %d\n", 1683 sc->sc_dev.dv_xname, error); 1684 goto bad_1; 1685 } 1686 1687 memset(sc->sc_mbo, 0, size); 1688 1689 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1690 sc->sc_dmaflags, &sc->sc_dmamap_mbox); 1691 if (error) { 1692 printf("%s: unable to create mailbox DMA map, error = %d\n", 1693 sc->sc_dev.dv_xname, error); 1694 goto bad_2; 1695 } 1696 1697 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox, 1698 sc->sc_mbo, size, NULL, 0); 1699 if (error) { 1700 printf("%s: unable to load mailbox DMA map, error = %d\n", 1701 sc->sc_dev.dv_xname, error); 1702 goto bad_3; 1703 } 1704 1705 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count); 1706 1707 return (0); 1708 1709 bad_3: 1710 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox); 1711 bad_2: 1712 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_mbo, size); 1713 bad_1: 1714 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1715 bad_0: 1716 return (error); 1717 } 1718 1719 /* 1720 * bha_collect_mbo: 1721 * 1722 * Garbage collect mailboxes that are no longer in use. 1723 */ 1724 void 1725 bha_collect_mbo(sc) 1726 struct bha_softc *sc; 1727 { 1728 struct bha_mbx_out *mbo; 1729 #ifdef BHADIAG 1730 struct bha_ccb *ccb; 1731 #endif 1732 1733 mbo = sc->sc_cmbo; 1734 1735 while (sc->sc_mbofull > 0) { 1736 BHA_MBO_SYNC(sc, mbo, 1737 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1738 if (mbo->cmd != BHA_MBO_FREE) 1739 break; 1740 1741 #ifdef BHADIAG 1742 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr)); 1743 ccb->flags &= ~CCB_SENDING; 1744 #endif 1745 1746 --sc->sc_mbofull; 1747 mbo = bha_nextmbo(sc, mbo); 1748 } 1749 1750 sc->sc_cmbo = mbo; 1751 } 1752 1753 /***************************************************************************** 1754 * CCB management functions 1755 *****************************************************************************/ 1756 1757 __inline void bha_reset_ccb __P((struct bha_ccb *)); 1758 1759 __inline void 1760 bha_reset_ccb(ccb) 1761 struct bha_ccb *ccb; 1762 { 1763 1764 ccb->flags = 0; 1765 } 1766 1767 /* 1768 * bha_create_ccbs: 1769 * 1770 * Create a set of CCBs. 1771 * 1772 * We determine the target CCB count, and then keep creating them 1773 * until we reach the target, or fail. CCBs that are allocated 1774 * but not "created" are left on the allocating list. 1775 */ 1776 void 1777 bha_create_ccbs(sc, count) 1778 struct bha_softc *sc; 1779 int count; 1780 { 1781 struct bha_ccb_group *bcg; 1782 struct bha_ccb *ccb; 1783 bus_dma_segment_t seg; 1784 bus_dmamap_t ccbmap; 1785 int target, i, error, rseg; 1786 1787 /* 1788 * If the current CCB count is already the max number we're 1789 * allowed to have, bail out now. 1790 */ 1791 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) 1792 return; 1793 1794 /* 1795 * Compute our target count, and clamp it down to the max 1796 * number we're allowed to have. 1797 */ 1798 target = sc->sc_cur_ccbs + count; 1799 if (target > sc->sc_max_ccbs) 1800 target = sc->sc_max_ccbs; 1801 1802 /* 1803 * If there are CCBs on the allocating list, don't allocate a 1804 * CCB group yet. 1805 */ 1806 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL) 1807 goto have_allocating_ccbs; 1808 1809 allocate_group: 1810 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 1811 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT); 1812 if (error) { 1813 printf("%s: unable to allocate CCB group, error = %d\n", 1814 sc->sc_dev.dv_xname, error); 1815 goto bad_0; 1816 } 1817 1818 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 1819 (caddr_t *)&bcg, 1820 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1821 if (error) { 1822 printf("%s: unable to map CCB group, error = %d\n", 1823 sc->sc_dev.dv_xname, error); 1824 goto bad_1; 1825 } 1826 1827 memset(bcg, 0, PAGE_SIZE); 1828 1829 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1830 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap); 1831 if (error) { 1832 printf("%s: unable to create CCB group DMA map, error = %d\n", 1833 sc->sc_dev.dv_xname, error); 1834 goto bad_2; 1835 } 1836 1837 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL, 1838 sc->sc_dmaflags | BUS_DMA_NOWAIT); 1839 if (error) { 1840 printf("%s: unable to load CCB group DMA map, error = %d\n", 1841 sc->sc_dev.dv_xname, error); 1842 goto bad_3; 1843 } 1844 1845 bcg->bcg_dmamap = ccbmap; 1846 1847 #ifdef DIAGNOSTIC 1848 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) != 1849 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1])) 1850 panic("bha_create_ccbs: CCB group size botch"); 1851 #endif 1852 1853 /* 1854 * Add all of the CCBs in this group to the allocating list. 1855 */ 1856 for (i = 0; i < bha_ccbs_per_group; i++) { 1857 ccb = &bcg->bcg_ccbs[i]; 1858 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain); 1859 } 1860 1861 have_allocating_ccbs: 1862 /* 1863 * Loop over the allocating list until we reach our CCB target. 1864 * If we run out on the list, we'll allocate another group's 1865 * worth. 1866 */ 1867 while (sc->sc_cur_ccbs < target) { 1868 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs); 1869 if (ccb == NULL) 1870 goto allocate_group; 1871 if (bha_init_ccb(sc, ccb) != 0) { 1872 /* 1873 * We were unable to initialize the CCB. 1874 * This is likely due to a resource shortage, 1875 * so bail out now. 1876 */ 1877 return; 1878 } 1879 } 1880 1881 /* 1882 * If we got here, we've reached our target! 1883 */ 1884 return; 1885 1886 bad_3: 1887 bus_dmamap_destroy(sc->sc_dmat, ccbmap); 1888 bad_2: 1889 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)bcg, PAGE_SIZE); 1890 bad_1: 1891 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1892 bad_0: 1893 return; 1894 } 1895 1896 /* 1897 * bha_init_ccb: 1898 * 1899 * Initialize a CCB; helper function for bha_create_ccbs(). 1900 */ 1901 int 1902 bha_init_ccb(sc, ccb) 1903 struct bha_softc *sc; 1904 struct bha_ccb *ccb; 1905 { 1906 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb); 1907 int hashnum, error; 1908 1909 /* 1910 * Create the DMA map for this CCB. 1911 * 1912 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages 1913 * XXX in the ISA case. A better solution is needed. 1914 */ 1915 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG, 1916 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags, 1917 &ccb->dmamap_xfer); 1918 if (error) { 1919 printf("%s: unable to create CCB DMA map, error = %d\n", 1920 sc->sc_dev.dv_xname, error); 1921 return (error); 1922 } 1923 1924 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain); 1925 1926 /* 1927 * Put the CCB into the phystokv hash table. 1928 */ 1929 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr + 1930 BHA_CCB_OFFSET(ccb); 1931 hashnum = CCB_HASH(ccb->hashkey); 1932 ccb->nexthash = sc->sc_ccbhash[hashnum]; 1933 sc->sc_ccbhash[hashnum] = ccb; 1934 bha_reset_ccb(ccb); 1935 1936 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1937 sc->sc_cur_ccbs++; 1938 1939 return (0); 1940 } 1941 1942 /* 1943 * bha_get_ccb: 1944 * 1945 * Get a CCB for the SCSI operation. If there are none left, 1946 * wait until one becomes available, if we can. 1947 */ 1948 struct bha_ccb * 1949 bha_get_ccb(sc) 1950 struct bha_softc *sc; 1951 { 1952 struct bha_ccb *ccb; 1953 int s; 1954 1955 s = splbio(); 1956 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1957 if (ccb != NULL) { 1958 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 1959 ccb->flags |= CCB_ALLOC; 1960 } 1961 splx(s); 1962 return (ccb); 1963 } 1964 1965 /* 1966 * bha_free_ccb: 1967 * 1968 * Put a CCB back onto the free list. 1969 */ 1970 void 1971 bha_free_ccb(sc, ccb) 1972 struct bha_softc *sc; 1973 struct bha_ccb *ccb; 1974 { 1975 int s; 1976 1977 s = splbio(); 1978 bha_reset_ccb(ccb); 1979 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 1980 splx(s); 1981 } 1982 1983 /* 1984 * bha_ccb_phys_kv: 1985 * 1986 * Given a CCB DMA address, locate the CCB in kernel virtual space. 1987 */ 1988 struct bha_ccb * 1989 bha_ccb_phys_kv(sc, ccb_phys) 1990 struct bha_softc *sc; 1991 bus_addr_t ccb_phys; 1992 { 1993 int hashnum = CCB_HASH(ccb_phys); 1994 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum]; 1995 1996 while (ccb) { 1997 if (ccb->hashkey == ccb_phys) 1998 break; 1999 ccb = ccb->nexthash; 2000 } 2001 return (ccb); 2002 } 2003