1 /* $NetBSD: aic7xxx_osm.c,v 1.20 2005/12/05 18:29:45 bouyer Exp $ */ 2 3 /* 4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers 5 * 6 * Copyright (c) 1994-2001 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU Public License ("GPL"). 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $ 34 * 35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $ 36 */ 37 /* 38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.20 2005/12/05 18:29:45 bouyer Exp $"); 43 44 #include <dev/ic/aic7xxx_osm.h> 45 #include <dev/ic/aic7xxx_inline.h> 46 47 #ifndef AHC_TMODE_ENABLE 48 #define AHC_TMODE_ENABLE 0 49 #endif 50 51 52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg); 53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments); 54 static int ahc_poll(struct ahc_softc *ahc, int wait); 55 static void ahc_setup_data(struct ahc_softc *ahc, 56 struct scsipi_xfer *xs, struct scb *scb); 57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag, 59 struct proc *p); 60 61 62 63 /* 64 * Attach all the sub-devices we can find 65 */ 66 int 67 ahc_attach(struct ahc_softc *ahc) 68 { 69 u_long s; 70 int i; 71 char ahc_info[256]; 72 73 LIST_INIT(&ahc->pending_scbs); 74 for (i = 0; i < AHC_NUM_TARGETS; i++) 75 TAILQ_INIT(&ahc->untagged_queues[i]); 76 77 ahc_lock(ahc, &s); 78 79 ahc->sc_adapter.adapt_dev = &ahc->sc_dev; 80 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1; 81 82 ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1; 83 ahc->sc_adapter.adapt_max_periph = 16; 84 85 ahc->sc_adapter.adapt_ioctl = ahc_ioctl; 86 ahc->sc_adapter.adapt_minphys = ahc_minphys; 87 ahc->sc_adapter.adapt_request = ahc_action; 88 89 ahc->sc_channel.chan_adapter = &ahc->sc_adapter; 90 ahc->sc_channel.chan_bustype = &scsi_bustype; 91 ahc->sc_channel.chan_channel = 0; 92 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8; 93 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/; 94 ahc->sc_channel.chan_id = ahc->our_id; 95 ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW; 96 97 if (ahc->features & AHC_TWIN) { 98 ahc->sc_channel_b = ahc->sc_channel; 99 ahc->sc_channel_b.chan_id = ahc->our_id_b; 100 ahc->sc_channel_b.chan_channel = 1; 101 } 102 103 ahc_controller_info(ahc, ahc_info, sizeof(ahc_info)); 104 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info); 105 106 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) { 107 ahc->sc_child = config_found((void *)&ahc->sc_dev, 108 &ahc->sc_channel, scsiprint); 109 if (ahc->features & AHC_TWIN) 110 ahc->sc_child_b = config_found((void *)&ahc->sc_dev, 111 &ahc->sc_channel_b, scsiprint); 112 } else { 113 if (ahc->features & AHC_TWIN) 114 ahc->sc_child = config_found((void *)&ahc->sc_dev, 115 &ahc->sc_channel_b, scsiprint); 116 ahc->sc_child_b = config_found((void *)&ahc->sc_dev, 117 &ahc->sc_channel, scsiprint); 118 } 119 120 ahc_intr_enable(ahc, TRUE); 121 122 if (ahc->flags & AHC_RESET_BUS_A) 123 ahc_reset_channel(ahc, 'A', TRUE); 124 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B) 125 ahc_reset_channel(ahc, 'B', TRUE); 126 127 ahc_unlock(ahc, &s); 128 return (1); 129 } 130 131 /* 132 * Catch an interrupt from the adapter 133 */ 134 void 135 ahc_platform_intr(void *arg) 136 { 137 struct ahc_softc *ahc; 138 139 ahc = (struct ahc_softc *)arg; 140 ahc_intr(ahc); 141 } 142 143 /* 144 * We have an scb which has been processed by the 145 * adaptor, now we look to see how the operation 146 * went. 147 */ 148 void 149 ahc_done(struct ahc_softc *ahc, struct scb *scb) 150 { 151 struct scsipi_xfer *xs; 152 struct scsipi_periph *periph; 153 u_long s; 154 155 xs = scb->xs; 156 periph = xs->xs_periph; 157 LIST_REMOVE(scb, pending_links); 158 if ((scb->flags & SCB_UNTAGGEDQ) != 0) { 159 struct scb_tailq *untagged_q; 160 int target_offset; 161 162 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 163 untagged_q = &ahc->untagged_queues[target_offset]; 164 TAILQ_REMOVE(untagged_q, scb, links.tqe); 165 scb->flags &= ~SCB_UNTAGGEDQ; 166 ahc_run_untagged_queue(ahc, untagged_q); 167 } 168 169 callout_stop(&scb->xs->xs_callout); 170 171 if (xs->datalen) { 172 int op; 173 174 if (xs->xs_control & XS_CTL_DATA_IN) 175 op = BUS_DMASYNC_POSTREAD; 176 else 177 op = BUS_DMASYNC_POSTWRITE; 178 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0, 179 scb->dmamap->dm_mapsize, op); 180 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap); 181 } 182 183 /* 184 * If the recovery SCB completes, we have to be 185 * out of our timeout. 186 */ 187 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 188 struct scb *list_scb; 189 190 /* 191 * We were able to complete the command successfully, 192 * so reinstate the timeouts for all other pending 193 * commands. 194 */ 195 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 196 if (!(list_scb->xs->xs_control & XS_CTL_POLL)) { 197 callout_reset(&list_scb->xs->xs_callout, 198 (list_scb->xs->timeout > 1000000) ? 199 (list_scb->xs->timeout / 1000) * hz : 200 (list_scb->xs->timeout * hz) / 1000, 201 ahc_timeout, list_scb); 202 } 203 } 204 205 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 206 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 207 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 208 scsipi_printaddr(xs->xs_periph); 209 printf("%s: no longer in timeout, status = %x\n", 210 ahc_name(ahc), xs->status); 211 } 212 213 /* Don't clobber any existing error state */ 214 if (xs->error != XS_NOERROR) { 215 /* Don't clobber any existing error state */ 216 } else if ((scb->flags & SCB_SENSE) != 0) { 217 /* 218 * We performed autosense retrieval. 219 * 220 * Zero any sense not transferred by the 221 * device. The SCSI spec mandates that any 222 * untransferred data should be assumed to be 223 * zero. Complete the 'bounce' of sense information 224 * through buffers accessible via bus-space by 225 * copying it into the clients csio. 226 */ 227 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense)); 228 memcpy(&xs->sense.scsi_sense, 229 ahc_get_sense_buf(ahc, scb), 230 sizeof(xs->sense.scsi_sense)); 231 xs->error = XS_SENSE; 232 } 233 if (scb->flags & SCB_FREEZE_QUEUE) { 234 scsipi_periph_thaw(periph, 1); 235 scb->flags &= ~SCB_FREEZE_QUEUE; 236 } 237 238 ahc_lock(ahc, &s); 239 ahc_free_scb(ahc, scb); 240 ahc_unlock(ahc, &s); 241 242 scsipi_done(xs); 243 } 244 245 static int 246 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag, 247 struct proc *p) 248 { 249 struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev; 250 int s, ret = ENOTTY; 251 252 switch (cmd) { 253 case SCBUSIORESET: 254 s = splbio(); 255 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A', 256 TRUE); 257 splx(s); 258 ret = 0; 259 break; 260 default: 261 break; 262 } 263 264 return ret; 265 } 266 267 static void 268 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 269 { 270 struct ahc_softc *ahc; 271 int s; 272 struct ahc_initiator_tinfo *tinfo; 273 struct ahc_tmode_tstate *tstate; 274 275 ahc = (void *)chan->chan_adapter->adapt_dev; 276 277 switch (req) { 278 279 case ADAPTER_REQ_RUN_XFER: 280 { 281 struct scsipi_xfer *xs; 282 struct scsipi_periph *periph; 283 struct scb *scb; 284 struct hardware_scb *hscb; 285 u_int target_id; 286 u_int our_id; 287 u_long ss; 288 289 xs = arg; 290 periph = xs->xs_periph; 291 292 target_id = periph->periph_target; 293 our_id = ahc->our_id; 294 295 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n")); 296 297 /* 298 * get an scb to use. 299 */ 300 ahc_lock(ahc, &ss); 301 if ((scb = ahc_get_scb(ahc)) == NULL) { 302 xs->error = XS_RESOURCE_SHORTAGE; 303 ahc_unlock(ahc, &ss); 304 scsipi_done(xs); 305 return; 306 } 307 ahc_unlock(ahc, &ss); 308 309 hscb = scb->hscb; 310 311 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb)); 312 scb->xs = xs; 313 314 /* 315 * Put all the arguments for the xfer in the scb 316 */ 317 hscb->control = 0; 318 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id); 319 hscb->lun = periph->periph_lun; 320 if (xs->xs_control & XS_CTL_RESET) { 321 hscb->cdb_len = 0; 322 scb->flags |= SCB_DEVICE_RESET; 323 hscb->control |= MK_MESSAGE; 324 ahc_execute_scb(scb, NULL, 0); 325 } 326 327 ahc_setup_data(ahc, xs, scb); 328 329 break; 330 } 331 case ADAPTER_REQ_GROW_RESOURCES: 332 #ifdef AHC_DEBUG 333 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc)); 334 #endif 335 chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc); 336 if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 337 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 338 return; 339 340 case ADAPTER_REQ_SET_XFER_MODE: 341 { 342 struct scsipi_xfer_mode *xm = arg; 343 struct ahc_devinfo devinfo; 344 int target_id, our_id, first; 345 u_int width; 346 char channel; 347 u_int ppr_options, period, offset; 348 struct ahc_syncrate *syncrate; 349 uint16_t old_autoneg; 350 351 target_id = xm->xm_target; 352 our_id = chan->chan_id; 353 channel = (chan->chan_channel == 1) ? 'B' : 'A'; 354 s = splbio(); 355 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, 356 &tstate); 357 ahc_compile_devinfo(&devinfo, our_id, target_id, 358 0, channel, ROLE_INITIATOR); 359 360 old_autoneg = tstate->auto_negotiate; 361 362 /* 363 * XXX since the period and offset are not provided here, 364 * fake things by forcing a renegotiation using the user 365 * settings if this is called for the first time (i.e. 366 * during probe). Also, cap various values at the user 367 * values, assuming that the user set it up that way. 368 */ 369 if (ahc->inited_target[target_id] == 0) { 370 period = tinfo->user.period; 371 offset = tinfo->user.offset; 372 ppr_options = tinfo->user.ppr_options; 373 width = tinfo->user.width; 374 tstate->tagenable |= 375 (ahc->user_tagenable & devinfo.target_mask); 376 tstate->discenable |= 377 (ahc->user_discenable & devinfo.target_mask); 378 ahc->inited_target[target_id] = 1; 379 first = 1; 380 } else 381 first = 0; 382 383 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 384 width = MSG_EXT_WDTR_BUS_16_BIT; 385 else 386 width = MSG_EXT_WDTR_BUS_8_BIT; 387 388 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN); 389 if (width > tinfo->user.width) 390 width = tinfo->user.width; 391 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); 392 393 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) { 394 period = 0; 395 offset = 0; 396 ppr_options = 0; 397 } 398 399 if ((xm->xm_mode & PERIPH_CAP_DT) && 400 (ppr_options & MSG_EXT_PPR_DT_REQ)) 401 ppr_options |= MSG_EXT_PPR_DT_REQ; 402 else 403 ppr_options &= ~MSG_EXT_PPR_DT_REQ; 404 if ((tstate->discenable & devinfo.target_mask) == 0 || 405 (tstate->tagenable & devinfo.target_mask) == 0) 406 ppr_options &= ~MSG_EXT_PPR_IU_REQ; 407 408 if ((xm->xm_mode & PERIPH_CAP_TQING) && 409 (ahc->user_tagenable & devinfo.target_mask)) 410 tstate->tagenable |= devinfo.target_mask; 411 else 412 tstate->tagenable &= ~devinfo.target_mask; 413 414 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, 415 AHC_SYNCRATE_MAX); 416 ahc_validate_offset(ahc, NULL, syncrate, &offset, 417 width, ROLE_UNKNOWN); 418 419 if (offset == 0) { 420 period = 0; 421 ppr_options = 0; 422 } 423 424 if (ppr_options != 0 425 && tinfo->user.transport_version >= 3) { 426 tinfo->goal.transport_version = 427 tinfo->user.transport_version; 428 tinfo->curr.transport_version = 429 tinfo->user.transport_version; 430 } 431 432 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, 433 ppr_options, AHC_TRANS_GOAL, FALSE); 434 435 /* 436 * If this is the first request, and no negotiation is 437 * needed, just confirm the state to the scsipi layer, 438 * so that it can print a message. 439 */ 440 if (old_autoneg == tstate->auto_negotiate && first) { 441 xm->xm_mode = 0; 442 xm->xm_period = tinfo->curr.period; 443 xm->xm_offset = tinfo->curr.offset; 444 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT) 445 xm->xm_mode |= PERIPH_CAP_WIDE16; 446 if (tinfo->curr.period) 447 xm->xm_mode |= PERIPH_CAP_SYNC; 448 if (tstate->tagenable & devinfo.target_mask) 449 xm->xm_mode |= PERIPH_CAP_TQING; 450 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ) 451 xm->xm_mode |= PERIPH_CAP_DT; 452 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 453 } 454 splx(s); 455 } 456 } 457 458 return; 459 } 460 461 static void 462 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments) 463 { 464 struct scb *scb; 465 struct scsipi_xfer *xs; 466 struct ahc_softc *ahc; 467 struct ahc_initiator_tinfo *tinfo; 468 struct ahc_tmode_tstate *tstate; 469 470 u_int mask; 471 long s; 472 473 scb = (struct scb *)arg; 474 xs = scb->xs; 475 xs->error = 0; 476 xs->status = 0; 477 xs->xs_status = 0; 478 ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev; 479 480 if (nsegments != 0) { 481 struct ahc_dma_seg *sg; 482 bus_dma_segment_t *end_seg; 483 int op; 484 485 end_seg = dm_segs + nsegments; 486 487 /* Copy the segments into our SG list */ 488 sg = scb->sg_list; 489 while (dm_segs < end_seg) { 490 uint32_t len; 491 492 sg->addr = ahc_htole32(dm_segs->ds_addr); 493 len = dm_segs->ds_len 494 | ((dm_segs->ds_addr >> 8) & AHC_SG_HIGH_ADDR_MASK); 495 sg->len = ahc_htole32(len); 496 sg++; 497 dm_segs++; 498 } 499 500 /* 501 * Note where to find the SG entries in bus space. 502 * We also set the full residual flag which the 503 * sequencer will clear as soon as a data transfer 504 * occurs. 505 */ 506 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID); 507 508 if (xs->xs_control & XS_CTL_DATA_IN) 509 op = BUS_DMASYNC_PREREAD; 510 else 511 op = BUS_DMASYNC_PREWRITE; 512 513 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0, 514 scb->dmamap->dm_mapsize, op); 515 516 sg--; 517 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 518 519 /* Copy the first SG into the "current" data pointer area */ 520 scb->hscb->dataptr = scb->sg_list->addr; 521 scb->hscb->datacnt = scb->sg_list->len; 522 } else { 523 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 524 scb->hscb->dataptr = 0; 525 scb->hscb->datacnt = 0; 526 } 527 528 scb->sg_count = nsegments; 529 530 ahc_lock(ahc, &s); 531 532 /* 533 * Last time we need to check if this SCB needs to 534 * be aborted. 535 */ 536 if (xs->xs_status & XS_STS_DONE) { 537 if (nsegments != 0) 538 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 539 ahc_free_scb(ahc, scb); 540 ahc_unlock(ahc, &s); 541 scsipi_done(xs); 542 return; 543 } 544 545 tinfo = ahc_fetch_transinfo(ahc, ahc->channel, 546 SCSIID_OUR_ID(scb->hscb->scsiid), 547 SCSIID_TARGET(ahc, scb->hscb->scsiid), 548 &tstate); 549 550 mask = SCB_GET_TARGET_MASK(ahc, scb); 551 scb->hscb->scsirate = tinfo->scsirate; 552 scb->hscb->scsioffset = tinfo->curr.offset; 553 554 if ((tstate->ultraenb & mask) != 0) 555 scb->hscb->control |= ULTRAENB; 556 557 if ((tstate->discenable & mask) != 0) 558 scb->hscb->control |= DISCENB; 559 560 if (xs->xs_tag_type) 561 scb->hscb->control |= xs->xs_tag_type; 562 563 #if 1 /* This looks like it makes sense at first, but it can loop */ 564 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0 565 && tinfo->goal.offset == 0 566 && tinfo->goal.ppr_options == 0)) { 567 scb->flags |= SCB_NEGOTIATE; 568 scb->hscb->control |= MK_MESSAGE; 569 } else 570 #endif 571 if ((tstate->auto_negotiate & mask) != 0) { 572 scb->flags |= SCB_AUTO_NEGOTIATE; 573 scb->hscb->control |= MK_MESSAGE; 574 } 575 576 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 577 578 if (!(xs->xs_control & XS_CTL_POLL)) { 579 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ? 580 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000, 581 ahc_timeout, scb); 582 } 583 584 /* 585 * We only allow one untagged transaction 586 * per target in the initiator role unless 587 * we are storing a full busy target *lun* 588 * table in SCB space. 589 */ 590 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 591 && (ahc->flags & AHC_SCB_BTT) == 0) { 592 struct scb_tailq *untagged_q; 593 int target_offset; 594 595 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 596 untagged_q = &(ahc->untagged_queues[target_offset]); 597 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 598 scb->flags |= SCB_UNTAGGEDQ; 599 if (TAILQ_FIRST(untagged_q) != scb) { 600 ahc_unlock(ahc, &s); 601 return; 602 } 603 } 604 scb->flags |= SCB_ACTIVE; 605 606 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 607 /* Define a mapping from our tag to the SCB. */ 608 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 609 ahc_pause(ahc); 610 if ((ahc->flags & AHC_PAGESCBS) == 0) 611 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 612 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag); 613 ahc_unpause(ahc); 614 } else { 615 ahc_queue_scb(ahc, scb); 616 } 617 618 if (!(xs->xs_control & XS_CTL_POLL)) { 619 ahc_unlock(ahc, &s); 620 return; 621 } 622 623 /* 624 * If we can't use interrupts, poll for completion 625 */ 626 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n")); 627 do { 628 if (ahc_poll(ahc, xs->timeout)) { 629 if (!(xs->xs_control & XS_CTL_SILENT)) 630 printf("cmd fail\n"); 631 ahc_timeout(scb); 632 break; 633 } 634 } while (!(xs->xs_status & XS_STS_DONE)); 635 ahc_unlock(ahc, &s); 636 637 return; 638 } 639 640 static int 641 ahc_poll(struct ahc_softc *ahc, int wait) 642 { 643 while (--wait) { 644 DELAY(1000); 645 if (ahc_inb(ahc, INTSTAT) & INT_PEND) 646 break; 647 } 648 649 if (wait == 0) { 650 printf("%s: board is not responding\n", ahc_name(ahc)); 651 return (EIO); 652 } 653 654 ahc_intr((void *)ahc); 655 return (0); 656 } 657 658 static void 659 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs, 660 struct scb *scb) 661 { 662 struct hardware_scb *hscb; 663 664 hscb = scb->hscb; 665 xs->resid = xs->status = 0; 666 667 hscb->cdb_len = xs->cmdlen; 668 if (hscb->cdb_len > sizeof(hscb->cdb32)) { 669 u_long s; 670 671 ahc_set_transaction_status(scb, CAM_REQ_INVALID); 672 ahc_lock(ahc, &s); 673 ahc_free_scb(ahc, scb); 674 ahc_unlock(ahc, &s); 675 scsipi_done(xs); 676 return; 677 } 678 679 if (hscb->cdb_len > 12) { 680 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len); 681 scb->flags |= SCB_CDB32_PTR; 682 } else { 683 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len); 684 } 685 686 /* Only use S/G if there is a transfer */ 687 if (xs->datalen) { 688 int error; 689 690 error = bus_dmamap_load(ahc->parent_dmat, 691 scb->dmamap, xs->data, 692 xs->datalen, NULL, 693 ((xs->xs_control & XS_CTL_NOSLEEP) ? 694 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 695 BUS_DMA_STREAMING | 696 ((xs->xs_control & XS_CTL_DATA_IN) ? 697 BUS_DMA_READ : BUS_DMA_WRITE)); 698 if (error) { 699 #ifdef AHC_DEBUG 700 printf("%s: in ahc_setup_data(): bus_dmamap_load() " 701 "= %d\n", 702 ahc_name(ahc), error); 703 #endif 704 xs->error = XS_RESOURCE_SHORTAGE; 705 scsipi_done(xs); 706 return; 707 } 708 ahc_execute_scb(scb, 709 scb->dmamap->dm_segs, 710 scb->dmamap->dm_nsegs); 711 } else { 712 ahc_execute_scb(scb, NULL, 0); 713 } 714 } 715 716 static void 717 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 718 719 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 720 struct scb *list_scb; 721 722 scb->flags |= SCB_RECOVERY_SCB; 723 724 /* 725 * Take all queued, but not sent SCBs out of the equation. 726 * Also ensure that no new CCBs are queued to us while we 727 * try to fix this problem. 728 */ 729 scsipi_channel_freeze(&ahc->sc_channel, 1); 730 if (ahc->features & AHC_TWIN) 731 scsipi_channel_freeze(&ahc->sc_channel_b, 1); 732 733 /* 734 * Go through all of our pending SCBs and remove 735 * any scheduled timeouts for them. We will reschedule 736 * them after we've successfully fixed this problem. 737 */ 738 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 739 callout_stop(&list_scb->xs->xs_callout); 740 } 741 } 742 } 743 744 void 745 ahc_timeout(void *arg) 746 { 747 struct scb *scb; 748 struct ahc_softc *ahc; 749 long s; 750 int found; 751 u_int last_phase; 752 int target; 753 int lun; 754 int i; 755 char channel; 756 757 scb = (struct scb *)arg; 758 ahc = (struct ahc_softc *)scb->ahc_softc; 759 760 ahc_lock(ahc, &s); 761 762 ahc_pause_and_flushwork(ahc); 763 764 if ((scb->flags & SCB_ACTIVE) == 0) { 765 /* Previous timeout took care of me already */ 766 printf("%s: Timedout SCB already complete. " 767 "Interrupts may not be functioning.\n", ahc_name(ahc)); 768 ahc_unpause(ahc); 769 ahc_unlock(ahc, &s); 770 return; 771 } 772 773 target = SCB_GET_TARGET(ahc, scb); 774 channel = SCB_GET_CHANNEL(ahc, scb); 775 lun = SCB_GET_LUN(scb); 776 777 ahc_print_path(ahc, scb); 778 printf("SCB 0x%x - timed out\n", scb->hscb->tag); 779 ahc_dump_card_state(ahc); 780 last_phase = ahc_inb(ahc, LASTPHASE); 781 if (scb->sg_count > 0) { 782 for (i = 0; i < scb->sg_count; i++) { 783 printf("sg[%d] - Addr 0x%x : Length %d\n", 784 i, 785 scb->sg_list[i].addr, 786 scb->sg_list[i].len & AHC_SG_LEN_MASK); 787 } 788 } 789 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 790 /* 791 * Been down this road before. 792 * Do a full bus reset. 793 */ 794 bus_reset: 795 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 796 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 797 printf("%s: Issued Channel %c Bus Reset. " 798 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 799 } else { 800 /* 801 * If we are a target, transition to bus free and report 802 * the timeout. 803 * 804 * The target/initiator that is holding up the bus may not 805 * be the same as the one that triggered this timeout 806 * (different commands have different timeout lengths). 807 * If the bus is idle and we are acting as the initiator 808 * for this request, queue a BDR message to the timed out 809 * target. Otherwise, if the timed out transaction is 810 * active: 811 * Initiator transaction: 812 * Stuff the message buffer with a BDR message and assert 813 * ATN in the hopes that the target will let go of the bus 814 * and go to the mesgout phase. If this fails, we'll 815 * get another timeout 2 seconds later which will attempt 816 * a bus reset. 817 * 818 * Target transaction: 819 * Transition to BUS FREE and report the error. 820 * It's good to be the target! 821 */ 822 u_int active_scb_index; 823 u_int saved_scbptr; 824 825 saved_scbptr = ahc_inb(ahc, SCBPTR); 826 active_scb_index = ahc_inb(ahc, SCB_TAG); 827 828 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 829 && (active_scb_index < ahc->scb_data->numscbs)) { 830 struct scb *active_scb; 831 832 /* 833 * If the active SCB is not us, assume that 834 * the active SCB has a longer timeout than 835 * the timedout SCB, and wait for the active 836 * SCB to timeout. 837 */ 838 active_scb = ahc_lookup_scb(ahc, active_scb_index); 839 if (active_scb != scb) { 840 uint64_t newtimeout; 841 842 ahc_print_path(ahc, scb); 843 printf("Other SCB Timeout%s", 844 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 845 ? " again\n" : "\n"); 846 scb->flags |= SCB_OTHERTCL_TIMEOUT; 847 newtimeout = MAX(active_scb->xs->timeout, 848 scb->xs->timeout); 849 callout_reset(&scb->xs->xs_callout, 850 newtimeout > 1000000 ? 851 (newtimeout / 1000) * hz : 852 (newtimeout * hz) / 1000, 853 ahc_timeout, scb); 854 ahc_unpause(ahc); 855 ahc_unlock(ahc, &s); 856 return; 857 } 858 859 /* It's us */ 860 if ((scb->flags & SCB_TARGET_SCB) != 0) { 861 862 /* 863 * Send back any queued up transactions 864 * and properly record the error condition. 865 */ 866 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), 867 SCB_GET_CHANNEL(ahc, scb), 868 SCB_GET_LUN(scb), 869 scb->hscb->tag, 870 ROLE_TARGET, 871 CAM_CMD_TIMEOUT); 872 873 /* Will clear us from the bus */ 874 ahc_restart(ahc); 875 ahc_unlock(ahc, &s); 876 return; 877 } 878 879 ahc_set_recoveryscb(ahc, active_scb); 880 ahc_outb(ahc, MSG_OUT, HOST_MSG); 881 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 882 ahc_print_path(ahc, active_scb); 883 printf("BDR message in message buffer\n"); 884 active_scb->flags |= SCB_DEVICE_RESET; 885 callout_reset(&active_scb->xs->xs_callout, 886 2 * hz, ahc_timeout, active_scb); 887 ahc_unpause(ahc); 888 } else { 889 int disconnected; 890 891 /* XXX Shouldn't panic. Just punt instead? */ 892 if ((scb->flags & SCB_TARGET_SCB) != 0) 893 panic("Timed-out target SCB but bus idle"); 894 895 if (last_phase != P_BUSFREE 896 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 897 /* XXX What happened to the SCB? */ 898 /* Hung target selection. Goto busfree */ 899 printf("%s: Hung target selection\n", 900 ahc_name(ahc)); 901 ahc_restart(ahc); 902 ahc_unlock(ahc, &s); 903 return; 904 } 905 906 if (ahc_search_qinfifo(ahc, target, channel, lun, 907 scb->hscb->tag, ROLE_INITIATOR, 908 /*status*/0, SEARCH_COUNT) > 0) { 909 disconnected = FALSE; 910 } else { 911 disconnected = TRUE; 912 } 913 914 if (disconnected) { 915 916 ahc_set_recoveryscb(ahc, scb); 917 /* 918 * Actually re-queue this SCB in an attempt 919 * to select the device before it reconnects. 920 * In either case (selection or reselection), 921 * we will now issue a target reset to the 922 * timed-out device. 923 * 924 * Set the MK_MESSAGE control bit indicating 925 * that we desire to send a message. We 926 * also set the disconnected flag since 927 * in the paging case there is no guarantee 928 * that our SCB control byte matches the 929 * version on the card. We don't want the 930 * sequencer to abort the command thinking 931 * an unsolicited reselection occurred. 932 */ 933 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 934 scb->flags |= SCB_DEVICE_RESET; 935 936 /* 937 * Remove any cached copy of this SCB in the 938 * disconnected list in preparation for the 939 * queuing of our abort SCB. We use the 940 * same element in the SCB, SCB_NEXT, for 941 * both the qinfifo and the disconnected list. 942 */ 943 ahc_search_disc_list(ahc, target, channel, 944 lun, scb->hscb->tag, 945 /*stop_on_first*/TRUE, 946 /*remove*/TRUE, 947 /*save_state*/FALSE); 948 949 /* 950 * In the non-paging case, the sequencer will 951 * never re-reference the in-core SCB. 952 * To make sure we are notified during 953 * reslection, set the MK_MESSAGE flag in 954 * the card's copy of the SCB. 955 */ 956 if ((ahc->flags & AHC_PAGESCBS) == 0) { 957 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 958 ahc_outb(ahc, SCB_CONTROL, 959 ahc_inb(ahc, SCB_CONTROL) 960 | MK_MESSAGE); 961 } 962 963 /* 964 * Clear out any entries in the QINFIFO first 965 * so we are the next SCB for this target 966 * to run. 967 */ 968 ahc_search_qinfifo(ahc, 969 SCB_GET_TARGET(ahc, scb), 970 channel, SCB_GET_LUN(scb), 971 SCB_LIST_NULL, 972 ROLE_INITIATOR, 973 CAM_REQUEUE_REQ, 974 SEARCH_COMPLETE); 975 ahc_print_path(ahc, scb); 976 printf("Queuing a BDR SCB\n"); 977 ahc_qinfifo_requeue_tail(ahc, scb); 978 ahc_outb(ahc, SCBPTR, saved_scbptr); 979 callout_reset(&scb->xs->xs_callout, 2 * hz, 980 ahc_timeout, scb); 981 ahc_unpause(ahc); 982 } else { 983 /* Go "immediatly" to the bus reset */ 984 /* This shouldn't happen */ 985 ahc_set_recoveryscb(ahc, scb); 986 ahc_print_path(ahc, scb); 987 printf("SCB %d: Immediate reset. " 988 "Flags = 0x%x\n", scb->hscb->tag, 989 scb->flags); 990 goto bus_reset; 991 } 992 } 993 } 994 ahc_unlock(ahc, &s); 995 } 996 997 void 998 ahc_platform_set_tags(struct ahc_softc *ahc, 999 struct ahc_devinfo *devinfo, int enable) 1000 { 1001 struct ahc_tmode_tstate *tstate; 1002 1003 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1004 devinfo->target, &tstate); 1005 1006 if (enable) 1007 tstate->tagenable |= devinfo->target_mask; 1008 else 1009 tstate->tagenable &= ~devinfo->target_mask; 1010 } 1011 1012 int 1013 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) 1014 { 1015 if (sizeof(struct ahc_platform_data) == 0) 1016 return 0; 1017 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, 1018 M_NOWAIT); 1019 if (ahc->platform_data == NULL) 1020 return (ENOMEM); 1021 return (0); 1022 } 1023 1024 void 1025 ahc_platform_free(struct ahc_softc *ahc) 1026 { 1027 if (sizeof(struct ahc_platform_data) == 0) 1028 return; 1029 free(ahc->platform_data, M_DEVBUF); 1030 } 1031 1032 int 1033 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) 1034 { 1035 return (0); 1036 } 1037 1038 int 1039 ahc_detach(struct device *self, int flags) 1040 { 1041 int rv = 0; 1042 1043 struct ahc_softc *ahc = (struct ahc_softc*)self; 1044 1045 ahc_intr_enable(ahc, FALSE); 1046 if (ahc->sc_child != NULL) 1047 rv = config_detach(ahc->sc_child, flags); 1048 if (rv == 0 && ahc->sc_child_b != NULL) 1049 rv = config_detach(ahc->sc_child_b, flags); 1050 1051 shutdownhook_disestablish(ahc->shutdown_hook); 1052 1053 ahc_free(ahc); 1054 1055 return (rv); 1056 } 1057 1058 1059 void 1060 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun, 1061 ac_code code, void *opt_arg) 1062 { 1063 struct ahc_tmode_tstate *tstate; 1064 struct ahc_initiator_tinfo *tinfo; 1065 struct ahc_devinfo devinfo; 1066 struct scsipi_channel *chan; 1067 struct scsipi_xfer_mode xm; 1068 1069 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel; 1070 switch (code) { 1071 case AC_TRANSFER_NEG: 1072 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target, 1073 &tstate); 1074 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun, 1075 channel, ROLE_UNKNOWN); 1076 /* 1077 * Don't bother if negotiating. XXX? 1078 */ 1079 if (tinfo->curr.period != tinfo->goal.period 1080 || tinfo->curr.width != tinfo->goal.width 1081 || tinfo->curr.offset != tinfo->goal.offset 1082 || tinfo->curr.ppr_options != tinfo->goal.ppr_options) 1083 break; 1084 xm.xm_target = target; 1085 xm.xm_mode = 0; 1086 xm.xm_period = tinfo->curr.period; 1087 xm.xm_offset = tinfo->curr.offset; 1088 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT) 1089 xm.xm_mode |= PERIPH_CAP_WIDE16; 1090 if (tinfo->curr.period) 1091 xm.xm_mode |= PERIPH_CAP_SYNC; 1092 if (tstate->tagenable & devinfo.target_mask) 1093 xm.xm_mode |= PERIPH_CAP_TQING; 1094 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ) 1095 xm.xm_mode |= PERIPH_CAP_DT; 1096 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm); 1097 break; 1098 case AC_BUS_RESET: 1099 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL); 1100 case AC_SENT_BDR: 1101 default: 1102 break; 1103 } 1104 } 1105