1 /* $NetBSD: aic79xx_osm.c,v 1.18 2007/03/04 06:01:48 christos Exp $ */ 2 3 /* 4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers 5 * 6 * Copyright (c) 1994-2002 Justin T. Gibbs. 7 * Copyright (c) 2001-2002 Adaptec Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU Public License ("GPL"). 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $ 35 * 36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $ 37 */ 38 /* 39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. 40 * - April 2003 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.18 2007/03/04 06:01:48 christos Exp $"); 45 46 #include <dev/ic/aic79xx_osm.h> 47 #include <dev/ic/aic7xxx_cam.h> 48 #include <dev/ic/aic79xx_inline.h> 49 50 #ifndef AHD_TMODE_ENABLE 51 #define AHD_TMODE_ENABLE 0 52 #endif 53 54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd, 55 void *addr, int flag, struct proc *p); 56 static void ahd_action(struct scsipi_channel *chan, 57 scsipi_adapter_req_t req, void *arg); 58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 59 int nsegments); 60 static int ahd_poll(struct ahd_softc *ahd, int wait); 61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs, 62 struct scb *scb); 63 64 #if NOT_YET 65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb); 66 #endif 67 68 /* 69 * Attach all the sub-devices we can find 70 */ 71 int 72 ahd_attach(struct ahd_softc *ahd) 73 { 74 int s; 75 char ahd_info[256]; 76 77 ahd_controller_info(ahd, ahd_info, sizeof(ahd_info)); 78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info); 79 80 ahd_lock(ahd, &s); 81 82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev; 83 ahd->sc_adapter.adapt_nchannels = 1; 84 85 ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1; 86 ahd->sc_adapter.adapt_max_periph = 32; 87 88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl; 89 ahd->sc_adapter.adapt_minphys = ahd_minphys; 90 ahd->sc_adapter.adapt_request = ahd_action; 91 92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter; 93 ahd->sc_channel.chan_bustype = &scsi_bustype; 94 ahd->sc_channel.chan_channel = 0; 95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS; 96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/; 97 ahd->sc_channel.chan_id = ahd->our_id; 98 ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW; 99 100 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint); 101 102 ahd_intr_enable(ahd, TRUE); 103 104 if (ahd->flags & AHD_RESET_BUS_A) 105 ahd_reset_channel(ahd, 'A', TRUE); 106 107 ahd_unlock(ahd, &s); 108 109 return (1); 110 } 111 112 static int 113 ahd_ioctl(struct scsipi_channel *channel, u_long cmd, 114 void *addr, int flag, struct proc *p) 115 { 116 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev; 117 int s, ret = ENOTTY; 118 119 switch (cmd) { 120 case SCBUSIORESET: 121 s = splbio(); 122 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE); 123 splx(s); 124 ret = 0; 125 break; 126 default: 127 break; 128 } 129 130 return ret; 131 } 132 133 /* 134 * Catch an interrupt from the adapter 135 */ 136 void 137 ahd_platform_intr(void *arg) 138 { 139 struct ahd_softc *ahd; 140 141 ahd = (struct ahd_softc *)arg; 142 143 printf("%s; ahd_platform_intr\n", ahd_name(ahd)); 144 145 ahd_intr(ahd); 146 } 147 148 /* 149 * We have an scb which has been processed by the 150 * adaptor, now we look to see how the operation * went. 151 */ 152 void 153 ahd_done(struct ahd_softc *ahd, struct scb *scb) 154 { 155 struct scsipi_xfer *xs; 156 struct scsipi_periph *periph; 157 int s; 158 159 LIST_REMOVE(scb, pending_links); 160 161 xs = scb->xs; 162 periph = xs->xs_periph; 163 164 callout_stop(&scb->xs->xs_callout); 165 166 if (xs->datalen) { 167 int op; 168 169 if (xs->xs_control & XS_CTL_DATA_IN) 170 op = BUS_DMASYNC_POSTREAD; 171 else 172 op = BUS_DMASYNC_POSTWRITE; 173 174 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0, 175 scb->dmamap->dm_mapsize, op); 176 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap); 177 } 178 179 /* 180 * If the recovery SCB completes, we have to be 181 * out of our timeout. 182 */ 183 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 184 struct scb *list_scb; 185 186 /* 187 * We were able to complete the command successfully, 188 * so reinstate the timeouts for all other pending 189 * commands. 190 */ 191 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) { 192 struct scsipi_xfer *txs = list_scb->xs; 193 194 if (!(txs->xs_control & XS_CTL_POLL)) { 195 callout_reset(&txs->xs_callout, 196 (txs->timeout > 1000000) ? 197 (txs->timeout / 1000) * hz : 198 (txs->timeout * hz) / 1000, 199 ahd_timeout, list_scb); 200 } 201 } 202 203 if (ahd_get_transaction_status(scb) != XS_NOERROR) 204 ahd_set_transaction_status(scb, XS_TIMEOUT); 205 scsipi_printaddr(xs->xs_periph); 206 printf("%s: no longer in timeout, status = %x\n", 207 ahd_name(ahd), xs->status); 208 } 209 210 if (xs->error != XS_NOERROR) { 211 /* Don't clobber any existing error state */ 212 } else if ((xs->status == SCSI_STATUS_BUSY) || 213 (xs->status == SCSI_STATUS_QUEUE_FULL)) { 214 ahd_set_transaction_status(scb, XS_BUSY); 215 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n", 216 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb)); 217 } else if ((scb->flags & SCB_SENSE) != 0) { 218 /* 219 * We performed autosense retrieval. 220 * 221 * zero the sense data before having 222 * the drive fill it. The SCSI spec mandates 223 * that any untransferred data should be 224 * assumed to be zero. Complete the 'bounce' 225 * of sense information through buffers accessible 226 * via bus-space by copying it into the clients 227 * csio. 228 */ 229 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense)); 230 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb), 231 sizeof(struct scsi_sense_data)); 232 233 ahd_set_transaction_status(scb, XS_SENSE); 234 } else if ((scb->flags & SCB_PKT_SENSE) != 0) { 235 struct scsi_status_iu_header *siu; 236 u_int sense_len; 237 #ifdef AHD_DEBUG 238 int i; 239 #endif 240 /* 241 * Copy only the sense data into the provided buffer. 242 */ 243 siu = (struct scsi_status_iu_header *)scb->sense_data; 244 sense_len = MIN(scsi_4btoul(siu->sense_length), 245 sizeof(xs->sense.scsi_sense)); 246 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense)); 247 memcpy(&xs->sense.scsi_sense, 248 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len); 249 #ifdef AHD_DEBUG 250 printf("Copied %d bytes of sense data offset %d:", sense_len, 251 SIU_SENSE_OFFSET(siu)); 252 for (i = 0; i < sense_len; i++) 253 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]); 254 printf("\n"); 255 #endif 256 ahd_set_transaction_status(scb, XS_SENSE); 257 } 258 259 if (scb->flags & SCB_FREEZE_QUEUE) { 260 scsipi_periph_thaw(periph, 1); 261 scb->flags &= ~SCB_FREEZE_QUEUE; 262 } 263 264 if (scb->flags & SCB_REQUEUE) 265 ahd_set_transaction_status(scb, XS_REQUEUE); 266 267 ahd_lock(ahd, &s); 268 ahd_free_scb(ahd, scb); 269 ahd_unlock(ahd, &s); 270 271 scsipi_done(xs); 272 } 273 274 static void 275 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 276 { 277 struct ahd_softc *ahd; 278 struct ahd_initiator_tinfo *tinfo; 279 struct ahd_tmode_tstate *tstate; 280 281 ahd = (void *)chan->chan_adapter->adapt_dev; 282 283 switch(req) { 284 285 case ADAPTER_REQ_RUN_XFER: 286 { 287 struct scsipi_xfer *xs; 288 struct scsipi_periph *periph; 289 struct scb *scb; 290 struct hardware_scb *hscb; 291 u_int target_id; 292 u_int our_id; 293 u_int col_idx; 294 char channel; 295 int s; 296 297 xs = arg; 298 periph = xs->xs_periph; 299 300 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n")); 301 302 target_id = periph->periph_target; 303 our_id = ahd->our_id; 304 channel = (chan->chan_channel == 1) ? 'B' : 'A'; 305 306 /* 307 * get an scb to use. 308 */ 309 ahd_lock(ahd, &s); 310 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, 311 target_id, &tstate); 312 313 if (xs->xs_tag_type != 0 || 314 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) 315 col_idx = AHD_NEVER_COL_IDX; 316 else 317 col_idx = AHD_BUILD_COL_IDX(target_id, 318 periph->periph_lun); 319 320 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { 321 xs->error = XS_RESOURCE_SHORTAGE; 322 ahd_unlock(ahd, &s); 323 scsipi_done(xs); 324 return; 325 } 326 ahd_unlock(ahd, &s); 327 328 hscb = scb->hscb; 329 330 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb)); 331 scb->xs = xs; 332 333 /* 334 * Put all the arguments for the xfer in the scb 335 */ 336 hscb->control = 0; 337 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id); 338 hscb->lun = periph->periph_lun; 339 if (xs->xs_control & XS_CTL_RESET) { 340 hscb->cdb_len = 0; 341 scb->flags |= SCB_DEVICE_RESET; 342 hscb->control |= MK_MESSAGE; 343 hscb->task_management = SIU_TASKMGMT_LUN_RESET; 344 ahd_execute_scb(scb, NULL, 0); 345 } else { 346 hscb->task_management = 0; 347 } 348 349 ahd_setup_data(ahd, xs, scb); 350 break; 351 } 352 353 case ADAPTER_REQ_GROW_RESOURCES: 354 #ifdef AHC_DEBUG 355 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd)); 356 #endif 357 chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd); 358 if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC) 359 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW; 360 break; 361 362 case ADAPTER_REQ_SET_XFER_MODE: 363 { 364 struct scsipi_xfer_mode *xm = arg; 365 struct ahd_devinfo devinfo; 366 int target_id, our_id, first; 367 u_int width; 368 int s; 369 char channel; 370 u_int ppr_options = 0, period, offset; 371 uint16_t old_autoneg; 372 373 target_id = xm->xm_target; 374 our_id = chan->chan_id; 375 channel = 'A'; 376 s = splbio(); 377 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id, 378 &tstate); 379 ahd_compile_devinfo(&devinfo, our_id, target_id, 380 0, channel, ROLE_INITIATOR); 381 382 old_autoneg = tstate->auto_negotiate; 383 384 /* 385 * XXX since the period and offset are not provided here, 386 * fake things by forcing a renegotiation using the user 387 * settings if this is called for the first time (i.e. 388 * during probe). Also, cap various values at the user 389 * values, assuming that the user set it up that way. 390 */ 391 if (ahd->inited_target[target_id] == 0) { 392 period = tinfo->user.period; 393 offset = tinfo->user.offset; 394 ppr_options = tinfo->user.ppr_options; 395 width = tinfo->user.width; 396 tstate->tagenable |= 397 (ahd->user_tagenable & devinfo.target_mask); 398 tstate->discenable |= 399 (ahd->user_discenable & devinfo.target_mask); 400 ahd->inited_target[target_id] = 1; 401 first = 1; 402 } else 403 first = 0; 404 405 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 406 width = MSG_EXT_WDTR_BUS_16_BIT; 407 else 408 width = MSG_EXT_WDTR_BUS_8_BIT; 409 410 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN); 411 if (width > tinfo->user.width) 412 width = tinfo->user.width; 413 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE); 414 415 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) { 416 period = 0; 417 offset = 0; 418 ppr_options = 0; 419 } 420 421 if ((xm->xm_mode & PERIPH_CAP_DT) && 422 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ)) 423 ppr_options |= MSG_EXT_PPR_DT_REQ; 424 else 425 ppr_options &= ~MSG_EXT_PPR_DT_REQ; 426 427 if ((tstate->discenable & devinfo.target_mask) == 0 || 428 (tstate->tagenable & devinfo.target_mask) == 0) 429 ppr_options &= ~MSG_EXT_PPR_IU_REQ; 430 431 if ((xm->xm_mode & PERIPH_CAP_TQING) && 432 (ahd->user_tagenable & devinfo.target_mask)) 433 tstate->tagenable |= devinfo.target_mask; 434 else 435 tstate->tagenable &= ~devinfo.target_mask; 436 437 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX); 438 ahd_validate_offset(ahd, NULL, period, &offset, 439 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN); 440 if (offset == 0) { 441 period = 0; 442 ppr_options = 0; 443 } 444 if (ppr_options != 0 445 && tinfo->user.transport_version >= 3) { 446 tinfo->goal.transport_version = 447 tinfo->user.transport_version; 448 tinfo->curr.transport_version = 449 tinfo->user.transport_version; 450 } 451 452 ahd_set_syncrate(ahd, &devinfo, period, offset, 453 ppr_options, AHD_TRANS_GOAL, FALSE); 454 455 /* 456 * If this is the first request, and no negotiation is 457 * needed, just confirm the state to the scsipi layer, 458 * so that it can print a message. 459 */ 460 if (old_autoneg == tstate->auto_negotiate && first) { 461 xm->xm_mode = 0; 462 xm->xm_period = tinfo->curr.period; 463 xm->xm_offset = tinfo->curr.offset; 464 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT) 465 xm->xm_mode |= PERIPH_CAP_WIDE16; 466 if (tinfo->curr.period) 467 xm->xm_mode |= PERIPH_CAP_SYNC; 468 if (tstate->tagenable & devinfo.target_mask) 469 xm->xm_mode |= PERIPH_CAP_TQING; 470 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ) 471 xm->xm_mode |= PERIPH_CAP_DT; 472 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 473 } 474 splx(s); 475 } 476 } 477 478 return; 479 } 480 481 static void 482 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments) 483 { 484 struct scb *scb; 485 struct scsipi_xfer *xs; 486 struct ahd_softc *ahd; 487 struct ahd_initiator_tinfo *tinfo; 488 struct ahd_tmode_tstate *tstate; 489 u_int mask; 490 int s; 491 492 scb = (struct scb*)arg; 493 xs = scb->xs; 494 xs->error = 0; 495 xs->status = 0; 496 xs->xs_status = 0; 497 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev; 498 499 scb->sg_count = 0; 500 if (nsegments != 0) { 501 void *sg; 502 int op; 503 u_int i; 504 505 ahd_setup_data_scb(ahd, scb); 506 507 /* Copy the segments into our SG list */ 508 for (i = nsegments, sg = scb->sg_list; i > 0; i--) { 509 510 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr, 511 dm_segs->ds_len, 512 /*last*/i == 1); 513 dm_segs++; 514 } 515 516 if (xs->xs_control & XS_CTL_DATA_IN) 517 op = BUS_DMASYNC_PREREAD; 518 else 519 op = BUS_DMASYNC_PREWRITE; 520 521 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0, 522 scb->dmamap->dm_mapsize, op); 523 } 524 525 ahd_lock(ahd, &s); 526 527 /* 528 * Last time we need to check if this SCB needs to 529 * be aborted. 530 */ 531 if (ahd_get_scsi_status(scb) == XS_STS_DONE) { 532 if (nsegments != 0) 533 bus_dmamap_unload(ahd->parent_dmat, 534 scb->dmamap); 535 ahd_free_scb(ahd, scb); 536 ahd_unlock(ahd, &s); 537 return; 538 } 539 540 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid), 541 SCSIID_OUR_ID(scb->hscb->scsiid), 542 SCSIID_TARGET(ahd, scb->hscb->scsiid), 543 &tstate); 544 545 mask = SCB_GET_TARGET_MASK(ahd, scb); 546 547 if ((tstate->discenable & mask) != 0) 548 scb->hscb->control |= DISCENB; 549 550 if ((tstate->tagenable & mask) != 0) 551 scb->hscb->control |= xs->xs_tag_type|TAG_ENB; 552 553 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) { 554 scb->flags |= SCB_PACKETIZED; 555 if (scb->hscb->task_management != 0) 556 scb->hscb->control &= ~MK_MESSAGE; 557 } 558 559 #if 0 /* This looks like it makes sense at first, but it can loop */ 560 if ((xs->xs_control & XS_CTL_DISCOVERY) && 561 (tinfo->goal.width != 0 562 || tinfo->goal.period != 0 563 || tinfo->goal.ppr_options != 0)) { 564 scb->flags |= SCB_NEGOTIATE; 565 scb->hscb->control |= MK_MESSAGE; 566 } else 567 #endif 568 if ((tstate->auto_negotiate & mask) != 0) { 569 scb->flags |= SCB_AUTO_NEGOTIATE; 570 scb->hscb->control |= MK_MESSAGE; 571 } 572 573 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 574 575 scb->flags |= SCB_ACTIVE; 576 577 if (!(xs->xs_control & XS_CTL_POLL)) { 578 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ? 579 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000, 580 ahd_timeout, scb); 581 } 582 583 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 584 /* Define a mapping from our tag to the SCB. */ 585 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 586 ahd_pause(ahd); 587 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 588 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 589 ahd_unpause(ahd); 590 } else { 591 ahd_queue_scb(ahd, scb); 592 } 593 594 if (!(xs->xs_control & XS_CTL_POLL)) { 595 ahd_unlock(ahd, &s); 596 return; 597 } 598 /* 599 * If we can't use interrupts, poll for completion 600 */ 601 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n")); 602 do { 603 if (ahd_poll(ahd, xs->timeout)) { 604 if (!(xs->xs_control & XS_CTL_SILENT)) 605 printf("cmd fail\n"); 606 ahd_timeout(scb); 607 break; 608 } 609 } while (!(xs->xs_status & XS_STS_DONE)); 610 611 ahd_unlock(ahd, &s); 612 } 613 614 static int 615 ahd_poll(struct ahd_softc *ahd, int wait) 616 { 617 618 while (--wait) { 619 DELAY(1000); 620 if (ahd_inb(ahd, INTSTAT) & INT_PEND) 621 break; 622 } 623 624 if (wait == 0) { 625 printf("%s: board is not responding\n", ahd_name(ahd)); 626 return (EIO); 627 } 628 629 ahd_intr((void *)ahd); 630 return (0); 631 } 632 633 634 static void 635 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs, 636 struct scb *scb) 637 { 638 struct hardware_scb *hscb; 639 640 hscb = scb->hscb; 641 xs->resid = xs->status = 0; 642 643 hscb->cdb_len = xs->cmdlen; 644 if (hscb->cdb_len > MAX_CDB_LEN) { 645 int s; 646 /* 647 * Should CAM start to support CDB sizes 648 * greater than 16 bytes, we could use 649 * the sense buffer to store the CDB. 650 */ 651 ahd_set_transaction_status(scb, 652 XS_DRIVER_STUFFUP); 653 654 ahd_lock(ahd, &s); 655 ahd_free_scb(ahd, scb); 656 ahd_unlock(ahd, &s); 657 scsipi_done(xs); 658 } 659 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len); 660 661 /* Only use S/G if there is a transfer */ 662 if (xs->datalen) { 663 int error; 664 665 error = bus_dmamap_load(ahd->parent_dmat, 666 scb->dmamap, xs->data, 667 xs->datalen, NULL, 668 ((xs->xs_control & XS_CTL_NOSLEEP) ? 669 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 670 BUS_DMA_STREAMING | 671 ((xs->xs_control & XS_CTL_DATA_IN) ? 672 BUS_DMA_READ : BUS_DMA_WRITE)); 673 if (error) { 674 #ifdef AHD_DEBUG 675 printf("%s: in ahc_setup_data(): bus_dmamap_load() " 676 "= %d\n", 677 ahd_name(ahd), error); 678 #endif 679 xs->error = XS_RESOURCE_SHORTAGE; 680 scsipi_done(xs); 681 return; 682 } 683 ahd_execute_scb(scb, 684 scb->dmamap->dm_segs, 685 scb->dmamap->dm_nsegs); 686 } else { 687 ahd_execute_scb(scb, NULL, 0); 688 } 689 } 690 691 void 692 ahd_timeout(void *arg) 693 { 694 struct scb *scb; 695 struct ahd_softc *ahd; 696 ahd_mode_state saved_modes; 697 int s; 698 699 scb = (struct scb *)arg; 700 ahd = (struct ahd_softc *)scb->ahd_softc; 701 702 printf("%s: ahd_timeout\n", ahd_name(ahd)); 703 704 ahd_lock(ahd, &s); 705 706 ahd_pause_and_flushwork(ahd); 707 saved_modes = ahd_save_modes(ahd); 708 #if 0 709 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 710 ahd_outb(ahd, SCSISIGO, ACKO); 711 printf("set ACK\n"); 712 ahd_outb(ahd, SCSISIGO, 0); 713 printf("clearing Ack\n"); 714 ahd_restore_modes(ahd, saved_modes); 715 #endif 716 if ((scb->flags & SCB_ACTIVE) == 0) { 717 /* Previous timeout took care of me already */ 718 printf("%s: Timedout SCB already complete. " 719 "Interrupts may not be functioning.\n", ahd_name(ahd)); 720 ahd_unpause(ahd); 721 ahd_unlock(ahd, &s); 722 return; 723 } 724 725 ahd_print_path(ahd, scb); 726 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb)); 727 ahd_dump_card_state(ahd); 728 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), 729 /*initiate reset*/TRUE); 730 ahd_unlock(ahd, &s); 731 return; 732 } 733 734 int 735 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) 736 { 737 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF, 738 M_NOWAIT /*| M_ZERO*/); 739 if (ahd->platform_data == NULL) 740 return (ENOMEM); 741 742 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data)); 743 744 return (0); 745 } 746 747 void 748 ahd_platform_free(struct ahd_softc *ahd) 749 { 750 free(ahd->platform_data, M_DEVBUF); 751 } 752 753 int 754 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd) 755 { 756 /* We don't sort softcs under NetBSD so report equal always */ 757 return (0); 758 } 759 760 int 761 ahd_detach(struct device *self, int flags) 762 { 763 int rv = 0; 764 765 struct ahd_softc *ahd = (struct ahd_softc*)self; 766 767 if (ahd->sc_child != NULL) 768 rv = config_detach((void *)ahd->sc_child, flags); 769 770 shutdownhook_disestablish(ahd->shutdown_hook); 771 772 ahd_free(ahd); 773 774 return rv; 775 } 776 777 void 778 ahd_platform_set_tags(struct ahd_softc *ahd, 779 struct ahd_devinfo *devinfo, ahd_queue_alg alg) 780 { 781 struct ahd_tmode_tstate *tstate; 782 783 ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 784 devinfo->target, &tstate); 785 786 if (alg != AHD_QUEUE_NONE) 787 tstate->tagenable |= devinfo->target_mask; 788 else 789 tstate->tagenable &= ~devinfo->target_mask; 790 } 791 792 void 793 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun, 794 ac_code code, void *opt_arg) 795 { 796 struct ahd_tmode_tstate *tstate; 797 struct ahd_initiator_tinfo *tinfo; 798 struct ahd_devinfo devinfo; 799 struct scsipi_channel *chan; 800 struct scsipi_xfer_mode xm; 801 802 #ifdef DIAGNOSTIC 803 if (channel != 'A') 804 panic("ahd_send_async: not channel A"); 805 #endif 806 chan = &ahc->sc_channel; 807 switch (code) { 808 case AC_TRANSFER_NEG: 809 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target, 810 &tstate); 811 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun, 812 channel, ROLE_UNKNOWN); 813 /* 814 * Don't bother if negotiating. XXX? 815 */ 816 if (tinfo->curr.period != tinfo->goal.period 817 || tinfo->curr.width != tinfo->goal.width 818 || tinfo->curr.offset != tinfo->goal.offset 819 || tinfo->curr.ppr_options != tinfo->goal.ppr_options) 820 break; 821 xm.xm_target = target; 822 xm.xm_mode = 0; 823 xm.xm_period = tinfo->curr.period; 824 xm.xm_offset = tinfo->curr.offset; 825 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ) 826 xm.xm_mode |= PERIPH_CAP_DT; 827 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT) 828 xm.xm_mode |= PERIPH_CAP_WIDE16; 829 if (tinfo->curr.period) 830 xm.xm_mode |= PERIPH_CAP_SYNC; 831 if (tstate->tagenable & devinfo.target_mask) 832 xm.xm_mode |= PERIPH_CAP_TQING; 833 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm); 834 break; 835 case AC_BUS_RESET: 836 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL); 837 case AC_SENT_BDR: 838 default: 839 break; 840 } 841 } 842