1 /* $NetBSD: aic7xxx.c,v 1.131 2014/03/27 18:28:26 christos Exp $ */ 2 3 /* 4 * Core routines and tables shareable across OS platforms. 5 * 6 * Copyright (c) 1994-2002 Justin T. Gibbs. 7 * Copyright (c) 2000-2002 Adaptec Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 3. Neither the names of the above-listed copyright holders nor the names 22 * of any contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * Alternatively, this software may be distributed under the terms of the 26 * GNU General Public License ("GPL") version 2 as published by the Free 27 * Software Foundation. 28 * 29 * NO WARRANTY 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGES. 41 * 42 * $Id: aic7xxx.c,v 1.131 2014/03/27 18:28:26 christos Exp $ 43 * 44 * //depot/aic7xxx/aic7xxx/aic7xxx.c#112 $ 45 * 46 * $FreeBSD: /repoman/r/ncvs/src/sys/dev/aic7xxx/aic7xxx.c,v 1.88 2003/01/20 20:44:55 gibbs Exp $ 47 */ 48 /* 49 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: aic7xxx.c,v 1.131 2014/03/27 18:28:26 christos Exp $"); 54 55 #include <dev/ic/aic7xxx_osm.h> 56 #include <dev/ic/aic7xxx_inline.h> 57 #include <dev/ic/aic7xxx_cam.h> 58 59 /****************************** Softc Data ************************************/ 60 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 61 62 /***************************** Lookup Tables **********************************/ 63 const char *ahc_chip_names[] = 64 { 65 "NONE", 66 "aic7770", 67 "aic7850", 68 "aic7855", 69 "aic7859", 70 "aic7860", 71 "aic7870", 72 "aic7880", 73 "aic7895", 74 "aic7895C", 75 "aic7890/91", 76 "aic7896/97", 77 "aic7892", 78 "aic7899" 79 }; 80 81 /* 82 * Hardware error codes. 83 */ 84 struct ahc_hard_error_entry { 85 uint8_t errno; 86 const char *errmesg; 87 }; 88 89 static struct ahc_hard_error_entry ahc_hard_errors[] = { 90 { ILLHADDR, "Illegal Host Access" }, 91 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 92 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 93 { SQPARERR, "Sequencer Parity Error" }, 94 { DPARERR, "Data-path Parity Error" }, 95 { MPARERR, "Scratch or SCB Memory Parity Error" }, 96 { PCIERRSTAT, "PCI Error detected" }, 97 { CIOPARERR, "CIOBUS Parity Error" }, 98 }; 99 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 100 101 static struct ahc_phase_table_entry ahc_phase_table[] = 102 { 103 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 104 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 105 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 106 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 107 { P_COMMAND, MSG_NOOP, "in Command phase" }, 108 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 109 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 110 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 111 { P_BUSFREE, MSG_NOOP, "while idle" }, 112 { 0, MSG_NOOP, "in unknown phase" } 113 }; 114 115 /* 116 * In most cases we only wish to itterate over real phases, so 117 * exclude the last element from the count. 118 */ 119 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 120 121 /* 122 * Valid SCSIRATE values. (p. 3-17) 123 * Provides a mapping of transfer periods in ns to the proper value to 124 * stick in the scsixfer reg. 125 */ 126 static struct ahc_syncrate ahc_syncrates[] = 127 { 128 /* ultra2 fast/ultra period rate */ 129 { 0x42, 0x000, 9, "80.0" }, 130 { 0x03, 0x000, 10, "40.0" }, 131 { 0x04, 0x000, 11, "33.0" }, 132 { 0x05, 0x100, 12, "20.0" }, 133 { 0x06, 0x110, 15, "16.0" }, 134 { 0x07, 0x120, 18, "13.4" }, 135 { 0x08, 0x000, 25, "10.0" }, 136 { 0x19, 0x010, 31, "8.0" }, 137 { 0x1a, 0x020, 37, "6.67" }, 138 { 0x1b, 0x030, 43, "5.7" }, 139 { 0x1c, 0x040, 50, "5.0" }, 140 { 0x00, 0x050, 56, "4.4" }, 141 { 0x00, 0x060, 62, "4.0" }, 142 { 0x00, 0x070, 68, "3.6" }, 143 { 0x00, 0x000, 0, NULL } 144 }; 145 146 /* Our Sequencer Program */ 147 #include <dev/microcode/aic7xxx/aic7xxx_seq.h> 148 149 /**************************** Function Declarations ***************************/ 150 static void ahc_force_renegotiation(struct ahc_softc *ahc); 151 static struct ahc_tmode_tstate* 152 ahc_alloc_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel); 154 #ifdef AHC_TARGET_MODE 155 static void ahc_free_tstate(struct ahc_softc *ahc, 156 u_int scsi_id, char channel, int force); 157 #endif 158 static struct ahc_syncrate* 159 ahc_devlimited_syncrate(struct ahc_softc *ahc, 160 struct ahc_initiator_tinfo *, 161 u_int *period, 162 u_int *ppr_options, 163 role_t role); 164 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 165 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 166 struct ahc_devinfo *devinfo); 167 static void ahc_scb_devinfo(struct ahc_softc *ahc, 168 struct ahc_devinfo *devinfo, 169 struct scb *scb); 170 static void ahc_assert_atn(struct ahc_softc *ahc); 171 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo, 173 struct scb *scb); 174 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo); 176 static void ahc_construct_sdtr(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 u_int period, u_int offset); 179 static void ahc_construct_wdtr(struct ahc_softc *ahc, 180 struct ahc_devinfo *devinfo, 181 u_int bus_width); 182 static void ahc_construct_ppr(struct ahc_softc *ahc, 183 struct ahc_devinfo *devinfo, 184 u_int period, u_int offset, 185 u_int bus_width, u_int ppr_options); 186 static void ahc_clear_msg_state(struct ahc_softc *ahc); 187 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 188 static void ahc_handle_message_phase(struct ahc_softc *ahc); 189 typedef enum { 190 AHCMSG_1B, 191 AHCMSG_2B, 192 AHCMSG_EXT 193 } ahc_msgtype; 194 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 195 u_int msgval, int full); 196 static int ahc_parse_msg(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo); 200 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 201 struct ahc_devinfo *devinfo); 202 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 203 static void ahc_handle_devreset(struct ahc_softc *ahc, 204 struct ahc_devinfo *devinfo, 205 cam_status status, 206 const char *message, 207 int verbose_level); 208 #if AHC_TARGET_MODE 209 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 210 struct ahc_devinfo *devinfo, 211 struct scb *scb); 212 #endif 213 214 #if 0 215 static bus_dmamap_callback_t ahc_dmamap_cb; 216 #endif 217 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 218 static int ahc_init_scbdata(struct ahc_softc *ahc); 219 static void ahc_fini_scbdata(struct ahc_softc *ahc); 220 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 221 struct scb *prev_scb, 222 struct scb *scb); 223 static int ahc_qinfifo_count(struct ahc_softc *ahc); 224 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 225 u_int prev, u_int scbptr); 226 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 227 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 228 u_int scbpos, u_int prev); 229 static void ahc_reset_current_bus(struct ahc_softc *ahc); 230 #ifdef AHC_DUMP_SEQ 231 static void ahc_dumpseq(struct ahc_softc *ahc); 232 #endif 233 static void ahc_loadseq(struct ahc_softc *ahc); 234 static int ahc_check_patch(struct ahc_softc *ahc, 235 struct patch **start_patch, 236 u_int start_instr, u_int *skip_addr); 237 static void ahc_download_instr(struct ahc_softc *ahc, 238 u_int instrptr, uint8_t *dconsts); 239 #ifdef AHC_TARGET_MODE 240 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 241 struct ahc_tmode_lstate *lstate, 242 u_int initiator_id, 243 u_int event_type, 244 u_int event_arg); 245 static void ahc_update_scsiid(struct ahc_softc *ahc, 246 u_int targid_mask); 247 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 248 struct target_cmd *cmd); 249 #endif 250 251 /************************** Added for porting to NetBSD ***********************/ 252 static int ahc_createdmamem(bus_dma_tag_t tag, 253 int size, 254 int flags, 255 bus_dmamap_t *mapp, 256 void **vaddr, 257 bus_addr_t *baddr, 258 bus_dma_segment_t *seg, 259 int *nseg, 260 const char *myname, const char *what); 261 static void ahc_freedmamem(bus_dma_tag_t tag, 262 int size, 263 bus_dmamap_t map, 264 void *vaddr, 265 bus_dma_segment_t *seg, 266 int nseg); 267 268 /************************* Sequencer Execution Control ************************/ 269 /* 270 * Restart the sequencer program from address zero 271 */ 272 void 273 ahc_restart(struct ahc_softc *ahc) 274 { 275 276 ahc_pause(ahc); 277 278 /* No more pending messages. */ 279 ahc_clear_msg_state(ahc); 280 281 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 282 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 283 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 284 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 285 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 286 ahc_outb(ahc, SAVED_LUN, 0xFF); 287 288 /* 289 * Ensure that the sequencer's idea of TQINPOS 290 * matches our own. The sequencer increments TQINPOS 291 * only after it sees a DMA complete and a reset could 292 * occur before the increment leaving the kernel to believe 293 * the command arrived but the sequencer to not. 294 */ 295 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 296 297 /* Always allow reselection */ 298 ahc_outb(ahc, SCSISEQ, 299 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 300 if ((ahc->features & AHC_CMD_CHAN) != 0) { 301 /* Ensure that no DMA operations are in progress */ 302 ahc_outb(ahc, CCSCBCNT, 0); 303 ahc_outb(ahc, CCSGCTL, 0); 304 ahc_outb(ahc, CCSCBCTL, 0); 305 } 306 /* 307 * If we were in the process of DMA'ing SCB data into 308 * an SCB, replace that SCB on the free list. This prevents 309 * an SCB leak. 310 */ 311 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 312 ahc_add_curscb_to_free_list(ahc); 313 ahc_outb(ahc, SEQ_FLAGS2, 314 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 315 } 316 ahc_outb(ahc, MWI_RESIDUAL, 0); 317 ahc_outb(ahc, SEQCTL, FASTMODE); 318 ahc_outb(ahc, SEQADDR0, 0); 319 ahc_outb(ahc, SEQADDR1, 0); 320 ahc_unpause(ahc); 321 } 322 323 /************************* Input/Output Queues ********************************/ 324 void 325 ahc_run_qoutfifo(struct ahc_softc *ahc) 326 { 327 struct scb *scb; 328 u_int scb_index; 329 330 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 331 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 332 333 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 334 if ((ahc->qoutfifonext & 0x03) == 0x03) { 335 u_int modnext; 336 337 /* 338 * Clear 32bits of QOUTFIFO at a time 339 * so that we don't clobber an incoming 340 * byte DMA to the array on architectures 341 * that only support 32bit load and store 342 * operations. 343 */ 344 modnext = ahc->qoutfifonext & ~0x3; 345 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 346 ahc_dmamap_sync(ahc, 347 ahc->parent_dmat /*shared_data_dmat*/, 348 ahc->shared_data_dmamap, 349 /*offset*/modnext, /*len*/4, 350 BUS_DMASYNC_PREREAD); 351 } 352 ahc->qoutfifonext++; 353 354 scb = ahc_lookup_scb(ahc, scb_index); 355 if (scb == NULL) { 356 printf("%s: WARNING no command for scb %d " 357 "(cmdcmplt)\nQOUTPOS = %d\n", 358 ahc_name(ahc), scb_index, 359 (ahc->qoutfifonext - 1) & 0xFF); 360 continue; 361 } 362 363 /* 364 * Save off the residual 365 * if there is one. 366 */ 367 ahc_update_residual(ahc, scb); 368 ahc_done(ahc, scb); 369 } 370 } 371 372 void 373 ahc_run_untagged_queues(struct ahc_softc *ahc) 374 { 375 int i; 376 377 for (i = 0; i < 16; i++) 378 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 379 } 380 381 void 382 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 383 { 384 struct scb *scb; 385 386 if (ahc->untagged_queue_lock != 0) 387 return; 388 389 if ((scb = TAILQ_FIRST(queue)) != NULL 390 && (scb->flags & SCB_ACTIVE) == 0) { 391 scb->flags |= SCB_ACTIVE; 392 ahc_queue_scb(ahc, scb); 393 } 394 } 395 396 /************************* Interrupt Handling *********************************/ 397 void 398 ahc_handle_brkadrint(struct ahc_softc *ahc) 399 { 400 /* 401 * We upset the sequencer :-( 402 * Lookup the error message 403 */ 404 int i; 405 int error; 406 407 error = ahc_inb(ahc, ERROR); 408 for (i = 0; error != 1 && i < num_errors; i++) 409 error >>= 1; 410 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 411 ahc_name(ahc), ahc_hard_errors[i].errmesg, 412 ahc_inb(ahc, SEQADDR0) | 413 (ahc_inb(ahc, SEQADDR1) << 8)); 414 415 ahc_dump_card_state(ahc); 416 417 /* Tell everyone that this HBA is no longer available */ 418 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 419 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 420 CAM_NO_HBA); 421 422 /* Disable all interrupt sources by resetting the controller */ 423 ahc_shutdown(ahc); 424 } 425 426 void 427 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 428 { 429 struct scb *scb; 430 struct ahc_devinfo devinfo; 431 432 ahc_fetch_devinfo(ahc, &devinfo); 433 434 /* 435 * Clear the upper byte that holds SEQINT status 436 * codes and clear the SEQINT bit. We will unpause 437 * the sequencer, if appropriate, after servicing 438 * the request. 439 */ 440 ahc_outb(ahc, CLRINT, CLRSEQINT); 441 switch (intstat & SEQINT_MASK) { 442 case BAD_STATUS: 443 { 444 u_int scb_index; 445 struct hardware_scb *hscb; 446 447 /* 448 * Set the default return value to 0 (don't 449 * send sense). The sense code will change 450 * this if needed. 451 */ 452 ahc_outb(ahc, RETURN_1, 0); 453 454 /* 455 * The sequencer will notify us when a command 456 * has an error that would be of interest to 457 * the kernel. This allows us to leave the sequencer 458 * running in the common case of command completes 459 * without error. The sequencer will already have 460 * DMA'd the SCB back up to us, so we can reference 461 * the in kernel copy directly. 462 */ 463 scb_index = ahc_inb(ahc, SCB_TAG); 464 scb = ahc_lookup_scb(ahc, scb_index); 465 if (scb == NULL) { 466 ahc_print_devinfo(ahc, &devinfo); 467 printf("ahc_intr - referenced scb " 468 "not valid during seqint 0x%x scb(%d)\n", 469 intstat, scb_index); 470 ahc_dump_card_state(ahc); 471 panic("for safety"); 472 goto unpause; 473 } 474 475 hscb = scb->hscb; 476 477 /* Don't want to clobber the original sense code */ 478 if ((scb->flags & SCB_SENSE) != 0) { 479 /* 480 * Clear the SCB_SENSE Flag and have 481 * the sequencer do a normal command 482 * complete. 483 */ 484 scb->flags &= ~SCB_SENSE; 485 break; 486 } 487 /* Freeze the queue until the client sees the error. */ 488 ahc_freeze_devq(ahc, scb); 489 ahc_freeze_scb(scb); 490 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 491 switch (hscb->shared_data.status.scsi_status) { 492 case SCSI_STATUS_OK: 493 printf("%s: Interrupted for status of 0 (?)\n", 494 ahc_name(ahc)); 495 break; 496 case SCSI_STATUS_CMD_TERMINATED: 497 case SCSI_STATUS_CHECK_COND: 498 { 499 struct ahc_dma_seg *sg; 500 struct scsi_request_sense *sc; 501 struct ahc_initiator_tinfo *targ_info; 502 struct ahc_tmode_tstate *tstate; 503 struct ahc_transinfo *tinfo; 504 uint32_t len; 505 #ifdef AHC_DEBUG 506 if (ahc_debug & AHC_SHOW_SENSE) { 507 ahc_print_path(ahc, scb); 508 printf("SCB %d: requests Check Status\n", 509 scb->hscb->tag); 510 } 511 #endif 512 513 if (ahc_perform_autosense(scb) == 0) 514 break; 515 516 targ_info = ahc_fetch_transinfo(ahc, 517 devinfo.channel, 518 devinfo.our_scsiid, 519 devinfo.target, 520 &tstate); 521 tinfo = &targ_info->curr; 522 sg = scb->sg_list; 523 sc = (struct scsi_request_sense *) 524 (&hscb->shared_data.cdb); 525 /* 526 * Save off the residual if there is one. 527 */ 528 ahc_update_residual(ahc, scb); 529 #ifdef AHC_DEBUG 530 if (ahc_debug & AHC_SHOW_SENSE) { 531 ahc_print_path(ahc, scb); 532 printf("Sending Sense\n"); 533 } 534 #endif 535 sg->addr = ahc_htole32(ahc_get_sense_bufaddr(ahc, scb)); 536 len = ahc_get_sense_bufsize(ahc, scb); 537 sg->len = ahc_htole32(len | AHC_DMA_LAST_SEG); 538 539 memset(sc, 0, sizeof(*sc)); 540 sc->opcode = SCSI_REQUEST_SENSE; 541 if (tinfo->protocol_version <= SCSI_REV_2 542 && SCB_GET_LUN(scb) < 8) 543 sc->byte2 = SCB_GET_LUN(scb) << 5; 544 sc->length = len; 545 546 /* 547 * We can't allow the target to disconnect. 548 * This will be an untagged transaction and 549 * having the target disconnect will make this 550 * transaction indistinguishable from outstanding 551 * tagged transactions. 552 */ 553 hscb->control = 0; 554 555 /* 556 * This request sense could be because the 557 * the device lost power or in some other 558 * way has lost our transfer negotiations. 559 * Renegotiate if appropriate. Unit attention 560 * errors will be reported before any data 561 * phases occur. 562 */ 563 if (ahc_get_residual(scb) 564 == ahc_get_transfer_length(scb)) { 565 ahc_update_neg_request(ahc, &devinfo, 566 tstate, targ_info, 567 AHC_NEG_IF_NON_ASYNC); 568 } 569 if (tstate->auto_negotiate & devinfo.target_mask) { 570 hscb->control |= MK_MESSAGE; 571 scb->flags &= ~SCB_NEGOTIATE; 572 scb->flags |= SCB_AUTO_NEGOTIATE; 573 } 574 hscb->cdb_len = sizeof(*sc); 575 hscb->dataptr = sg->addr; 576 hscb->datacnt = sg->len; 577 hscb->sgptr = 578 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 579 scb->sg_count = 1; 580 scb->flags |= SCB_SENSE; 581 ahc_qinfifo_requeue_tail(ahc, scb); 582 ahc_outb(ahc, RETURN_1, SEND_SENSE); 583 /* 584 * Ensure we have enough time to actually 585 * retrieve the sense. 586 */ 587 ahc_scb_timer_reset(scb, 5 * 1000000); 588 break; 589 } 590 default: 591 break; 592 } 593 break; 594 } 595 case NO_MATCH: 596 { 597 /* Ensure we don't leave the selection hardware on */ 598 ahc_outb(ahc, SCSISEQ, 599 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 600 601 printf("%s:%c:%d: no active SCB for reconnecting " 602 "target - issuing BUS DEVICE RESET\n", 603 ahc_name(ahc), devinfo.channel, devinfo.target); 604 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 605 "ARG_1 == 0x%x ACCUM = 0x%x\n", 606 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 607 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 608 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 609 "SINDEX == 0x%x\n", 610 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 611 ahc_index_busy_tcl(ahc, 612 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 613 ahc_inb(ahc, SAVED_LUN))), 614 ahc_inb(ahc, SINDEX)); 615 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 616 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 617 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 618 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 619 ahc_inb(ahc, SCB_CONTROL)); 620 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 621 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 622 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 623 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 624 ahc_dump_card_state(ahc); 625 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 626 ahc->msgout_len = 1; 627 ahc->msgout_index = 0; 628 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 629 ahc_outb(ahc, MSG_OUT, HOST_MSG); 630 ahc_assert_atn(ahc); 631 break; 632 } 633 case SEND_REJECT: 634 { 635 u_int rejbyte = ahc_inb(ahc, ACCUM); 636 printf("%s:%c:%d: Warning - unknown message received from " 637 "target (0x%x). Rejecting\n", 638 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 639 break; 640 } 641 case PROTO_VIOLATION: 642 { 643 ahc_handle_proto_violation(ahc); 644 break; 645 } 646 case IGN_WIDE_RES: 647 ahc_handle_ign_wide_residue(ahc, &devinfo); 648 break; 649 case PDATA_REINIT: 650 ahc_reinitialize_dataptrs(ahc); 651 break; 652 case BAD_PHASE: 653 { 654 u_int lastphase; 655 656 lastphase = ahc_inb(ahc, LASTPHASE); 657 printf("%s:%c:%d: unknown scsi bus phase %x, " 658 "lastphase = 0x%x. Attempting to continue\n", 659 ahc_name(ahc), devinfo.channel, devinfo.target, 660 lastphase, ahc_inb(ahc, SCSISIGI)); 661 break; 662 } 663 case MISSED_BUSFREE: 664 { 665 u_int lastphase; 666 667 lastphase = ahc_inb(ahc, LASTPHASE); 668 printf("%s:%c:%d: Missed busfree. " 669 "Lastphase = 0x%x, Curphase = 0x%x\n", 670 ahc_name(ahc), devinfo.channel, devinfo.target, 671 lastphase, ahc_inb(ahc, SCSISIGI)); 672 ahc_restart(ahc); 673 return; 674 } 675 case HOST_MSG_LOOP: 676 { 677 /* 678 * The sequencer has encountered a message phase 679 * that requires host assistance for completion. 680 * While handling the message phase(s), we will be 681 * notified by the sequencer after each byte is 682 * transferred so we can track bus phase changes. 683 * 684 * If this is the first time we've seen a HOST_MSG_LOOP 685 * interrupt, initialize the state of the host message 686 * loop. 687 */ 688 if (ahc->msg_type == MSG_TYPE_NONE) { 689 struct scb *scb1; 690 u_int scb_index; 691 u_int bus_phase; 692 693 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 694 if (bus_phase != P_MESGIN 695 && bus_phase != P_MESGOUT) { 696 printf("ahc_intr: HOST_MSG_LOOP bad " 697 "phase 0x%x\n", 698 bus_phase); 699 /* 700 * Probably transitioned to bus free before 701 * we got here. Just punt the message. 702 */ 703 ahc_clear_intstat(ahc); 704 ahc_restart(ahc); 705 return; 706 } 707 708 scb_index = ahc_inb(ahc, SCB_TAG); 709 scb1 = ahc_lookup_scb(ahc, scb_index); 710 if (devinfo.role == ROLE_INITIATOR) { 711 if (scb1 == NULL) 712 panic("HOST_MSG_LOOP with " 713 "invalid SCB %x\n", scb_index); 714 715 if (bus_phase == P_MESGOUT) 716 ahc_setup_initiator_msgout(ahc, 717 &devinfo, 718 scb1); 719 else { 720 ahc->msg_type = 721 MSG_TYPE_INITIATOR_MSGIN; 722 ahc->msgin_index = 0; 723 } 724 } 725 #if AHC_TARGET_MODE 726 else { 727 if (bus_phase == P_MESGOUT) { 728 ahc->msg_type = 729 MSG_TYPE_TARGET_MSGOUT; 730 ahc->msgin_index = 0; 731 } 732 else 733 ahc_setup_target_msgin(ahc, 734 &devinfo, 735 scb1); 736 } 737 #endif 738 } 739 740 ahc_handle_message_phase(ahc); 741 break; 742 } 743 case PERR_DETECTED: 744 { 745 /* 746 * If we've cleared the parity error interrupt 747 * but the sequencer still believes that SCSIPERR 748 * is true, it must be that the parity error is 749 * for the currently presented byte on the bus, 750 * and we are not in a phase (data-in) where we will 751 * eventually ack this byte. Ack the byte and 752 * throw it away in the hope that the target will 753 * take us to message out to deliver the appropriate 754 * error message. 755 */ 756 if ((intstat & SCSIINT) == 0 757 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 758 759 if ((ahc->features & AHC_DT) == 0) { 760 u_int curphase; 761 762 /* 763 * The hardware will only let you ack bytes 764 * if the expected phase in SCSISIGO matches 765 * the current phase. Make sure this is 766 * currently the case. 767 */ 768 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 769 ahc_outb(ahc, LASTPHASE, curphase); 770 ahc_outb(ahc, SCSISIGO, curphase); 771 } 772 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 773 int wait; 774 775 /* 776 * In a data phase. Faster to bitbucket 777 * the data than to individually ack each 778 * byte. This is also the only strategy 779 * that will work with AUTOACK enabled. 780 */ 781 ahc_outb(ahc, SXFRCTL1, 782 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 783 wait = 5000; 784 while (--wait != 0) { 785 if ((ahc_inb(ahc, SCSISIGI) 786 & (CDI|MSGI)) != 0) 787 break; 788 ahc_delay(100); 789 } 790 ahc_outb(ahc, SXFRCTL1, 791 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 792 if (wait == 0) { 793 struct scb *scb1; 794 u_int scb_index; 795 796 ahc_print_devinfo(ahc, &devinfo); 797 printf("Unable to clear parity error. " 798 "Resetting bus.\n"); 799 scb_index = ahc_inb(ahc, SCB_TAG); 800 scb1 = ahc_lookup_scb(ahc, scb_index); 801 if (scb1 != NULL) 802 ahc_set_transaction_status(scb1, 803 CAM_UNCOR_PARITY); 804 ahc_reset_channel(ahc, devinfo.channel, 805 /*init reset*/TRUE); 806 } 807 } else { 808 (void)ahc_inb(ahc, SCSIDATL); 809 } 810 } 811 break; 812 } 813 case DATA_OVERRUN: 814 { 815 /* 816 * When the sequencer detects an overrun, it 817 * places the controller in "BITBUCKET" mode 818 * and allows the target to complete its transfer. 819 * Unfortunately, none of the counters get updated 820 * when the controller is in this mode, so we have 821 * no way of knowing how large the overrun was. 822 */ 823 u_int scbindex = ahc_inb(ahc, SCB_TAG); 824 u_int lastphase = ahc_inb(ahc, LASTPHASE); 825 u_int i; 826 827 scb = ahc_lookup_scb(ahc, scbindex); 828 for (i = 0; i < num_phases; i++) { 829 if (lastphase == ahc_phase_table[i].phase) 830 break; 831 } 832 ahc_print_path(ahc, scb); 833 printf("data overrun detected %s." 834 " Tag == 0x%x.\n", 835 ahc_phase_table[i].phasemsg, 836 scb->hscb->tag); 837 ahc_print_path(ahc, scb); 838 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 839 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 840 ahc_get_transfer_length(scb), scb->sg_count); 841 if (scb->sg_count > 0) { 842 for (i = 0; i < scb->sg_count; i++) { 843 844 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 845 i, 846 (ahc_le32toh(scb->sg_list[i].len) >> 24 847 & SG_HIGH_ADDR_BITS), 848 ahc_le32toh(scb->sg_list[i].addr), 849 ahc_le32toh(scb->sg_list[i].len) 850 & AHC_SG_LEN_MASK); 851 } 852 } 853 /* 854 * Set this and it will take effect when the 855 * target does a command complete. 856 */ 857 ahc_freeze_devq(ahc, scb); 858 if ((scb->flags & SCB_SENSE) == 0) { 859 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 860 } else { 861 scb->flags &= ~SCB_SENSE; 862 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 863 } 864 ahc_freeze_scb(scb); 865 866 if ((ahc->features & AHC_ULTRA2) != 0) { 867 /* 868 * Clear the channel in case we return 869 * to data phase later. 870 */ 871 ahc_outb(ahc, SXFRCTL0, 872 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 873 ahc_outb(ahc, SXFRCTL0, 874 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 875 } 876 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 877 u_int dscommand1; 878 879 /* Ensure HHADDR is 0 for future DMA operations. */ 880 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 881 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 882 ahc_outb(ahc, HADDR, 0); 883 ahc_outb(ahc, DSCOMMAND1, dscommand1); 884 } 885 break; 886 } 887 case MKMSG_FAILED: 888 { 889 u_int scbindex; 890 891 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 892 ahc_name(ahc), devinfo.channel, devinfo.target, 893 devinfo.lun); 894 scbindex = ahc_inb(ahc, SCB_TAG); 895 scb = ahc_lookup_scb(ahc, scbindex); 896 if (scb != NULL 897 && (scb->flags & SCB_RECOVERY_SCB) != 0) 898 /* 899 * Ensure that we didn't put a second instance of this 900 * SCB into the QINFIFO. 901 */ 902 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 903 SCB_GET_CHANNEL(ahc, scb), 904 SCB_GET_LUN(scb), scb->hscb->tag, 905 ROLE_INITIATOR, /*status*/0, 906 SEARCH_REMOVE); 907 break; 908 } 909 case NO_FREE_SCB: 910 { 911 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 912 ahc_dump_card_state(ahc); 913 panic("for safety"); 914 break; 915 } 916 case SCB_MISMATCH: 917 { 918 u_int scbptr; 919 920 scbptr = ahc_inb(ahc, SCBPTR); 921 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 922 scbptr, ahc_inb(ahc, ARG_1), 923 ahc->scb_data->hscbs[scbptr].tag); 924 ahc_dump_card_state(ahc); 925 panic("for saftey"); 926 break; 927 } 928 case OUT_OF_RANGE: 929 { 930 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 931 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 932 "ARG_1 == 0x%x ACCUM = 0x%x\n", 933 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 934 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 935 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 936 "SINDEX == 0x%x\n, A == 0x%x\n", 937 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 938 ahc_index_busy_tcl(ahc, 939 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 940 ahc_inb(ahc, SAVED_LUN))), 941 ahc_inb(ahc, SINDEX), 942 ahc_inb(ahc, ACCUM)); 943 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 944 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 945 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 946 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 947 ahc_inb(ahc, SCB_CONTROL)); 948 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 949 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 950 ahc_dump_card_state(ahc); 951 panic("for safety"); 952 break; 953 } 954 default: 955 printf("ahc_intr: seqint, " 956 "intstat == 0x%x, scsisigi = 0x%x\n", 957 intstat, ahc_inb(ahc, SCSISIGI)); 958 break; 959 } 960 unpause: 961 /* 962 * The sequencer is paused immediately on 963 * a SEQINT, so we should restart it when 964 * we're done. 965 */ 966 ahc_unpause(ahc); 967 } 968 969 void 970 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 971 { 972 u_int scb_index; 973 u_int status0; 974 u_int status; 975 struct scb *scb; 976 char cur_channel; 977 char intr_channel; 978 979 if ((ahc->features & AHC_TWIN) != 0 980 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 981 cur_channel = 'B'; 982 else 983 cur_channel = 'A'; 984 intr_channel = cur_channel; 985 986 if ((ahc->features & AHC_ULTRA2) != 0) 987 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 988 else 989 status0 = 0; 990 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 991 if (status == 0 && status0 == 0) { 992 if ((ahc->features & AHC_TWIN) != 0) { 993 /* Try the other channel */ 994 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 995 status = ahc_inb(ahc, SSTAT1) 996 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 997 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 998 } 999 if (status == 0) { 1000 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 1001 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1002 ahc_unpause(ahc); 1003 return; 1004 } 1005 } 1006 1007 /* Make sure the sequencer is in a safe location. */ 1008 ahc_clear_critical_section(ahc); 1009 1010 scb_index = ahc_inb(ahc, SCB_TAG); 1011 scb = ahc_lookup_scb(ahc, scb_index); 1012 if (scb != NULL 1013 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1014 scb = NULL; 1015 1016 if ((ahc->features & AHC_ULTRA2) != 0 1017 && (status0 & IOERR) != 0) { 1018 int now_lvd; 1019 1020 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1021 printf("%s: Transceiver State Has Changed to %s mode\n", 1022 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1023 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1024 /* 1025 * When transitioning to SE mode, the reset line 1026 * glitches, triggering an arbitration bug in some 1027 * Ultra2 controllers. This bug is cleared when we 1028 * assert the reset line. Since a reset glitch has 1029 * already occurred with this transition and a 1030 * transceiver state change is handled just like 1031 * a bus reset anyway, asserting the reset line 1032 * ourselves is safe. 1033 */ 1034 ahc_reset_channel(ahc, intr_channel, 1035 /*Initiate Reset*/now_lvd == 0); 1036 } else if ((status & SCSIRSTI) != 0) { 1037 printf("%s: Someone reset channel %c\n", 1038 ahc_name(ahc), intr_channel); 1039 if (intr_channel != cur_channel) 1040 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1041 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1042 } else if ((status & SCSIPERR) != 0) { 1043 /* 1044 * Determine the bus phase and queue an appropriate message. 1045 * SCSIPERR is latched true as soon as a parity error 1046 * occurs. If the sequencer acked the transfer that 1047 * caused the parity error and the currently presented 1048 * transfer on the bus has correct parity, SCSIPERR will 1049 * be cleared by CLRSCSIPERR. Use this to determine if 1050 * we should look at the last phase the sequencer recorded, 1051 * or the current phase presented on the bus. 1052 */ 1053 u_int mesg_out; 1054 u_int curphase; 1055 u_int errorphase; 1056 u_int lastphase; 1057 u_int scsirate; 1058 u_int i; 1059 u_int sstat2; 1060 int silent; 1061 1062 lastphase = ahc_inb(ahc, LASTPHASE); 1063 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1064 sstat2 = ahc_inb(ahc, SSTAT2); 1065 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1066 /* 1067 * For all phases save DATA, the sequencer won't 1068 * automatically ack a byte that has a parity error 1069 * in it. So the only way that the current phase 1070 * could be 'data-in' is if the parity error is for 1071 * an already acked byte in the data phase. During 1072 * synchronous data-in transfers, we may actually 1073 * ack bytes before latching the current phase in 1074 * LASTPHASE, leading to the discrepancy between 1075 * curphase and lastphase. 1076 */ 1077 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1078 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1079 errorphase = curphase; 1080 else 1081 errorphase = lastphase; 1082 1083 for (i = 0; i < num_phases; i++) { 1084 if (errorphase == ahc_phase_table[i].phase) 1085 break; 1086 } 1087 mesg_out = ahc_phase_table[i].mesg_out; 1088 silent = FALSE; 1089 if (scb != NULL) { 1090 if (SCB_IS_SILENT(scb)) 1091 silent = TRUE; 1092 else 1093 ahc_print_path(ahc, scb); 1094 scb->flags |= SCB_TRANSMISSION_ERROR; 1095 } else 1096 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1097 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1098 scsirate = ahc_inb(ahc, SCSIRATE); 1099 if (silent == FALSE) { 1100 printf("parity error detected %s. " 1101 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1102 ahc_phase_table[i].phasemsg, 1103 ahc_inw(ahc, SEQADDR0), 1104 scsirate); 1105 if ((ahc->features & AHC_DT) != 0) { 1106 if ((sstat2 & CRCVALERR) != 0) 1107 printf("\tCRC Value Mismatch\n"); 1108 if ((sstat2 & CRCENDERR) != 0) 1109 printf("\tNo terminal CRC packet " 1110 "recevied\n"); 1111 if ((sstat2 & CRCREQERR) != 0) 1112 printf("\tIllegal CRC packet " 1113 "request\n"); 1114 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1115 printf("\tUnexpected %sDT Data Phase\n", 1116 (scsirate & SINGLE_EDGE) 1117 ? "" : "non-"); 1118 } 1119 } 1120 1121 if ((ahc->features & AHC_DT) != 0 1122 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1123 /* 1124 * This error applies regardless of 1125 * data direction, so ignore the value 1126 * in the phase table. 1127 */ 1128 mesg_out = MSG_INITIATOR_DET_ERR; 1129 } 1130 1131 /* 1132 * We've set the hardware to assert ATN if we 1133 * get a parity error on "in" phases, so all we 1134 * need to do is stuff the message buffer with 1135 * the appropriate message. "In" phases have set 1136 * mesg_out to something other than MSG_NOP. 1137 */ 1138 if (mesg_out != MSG_NOOP) { 1139 if (ahc->msg_type != MSG_TYPE_NONE) 1140 ahc->send_msg_perror = TRUE; 1141 else 1142 ahc_outb(ahc, MSG_OUT, mesg_out); 1143 } 1144 /* 1145 * Force a renegotiation with this target just in 1146 * case we are out of sync for some external reason 1147 * unknown (or unreported) by the target. 1148 */ 1149 ahc_force_renegotiation(ahc); 1150 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1151 ahc_unpause(ahc); 1152 } else if ((status & SELTO) != 0) { 1153 u_int scbptr; 1154 1155 /* Stop the selection */ 1156 ahc_outb(ahc, SCSISEQ, 0); 1157 1158 /* No more pending messages */ 1159 ahc_clear_msg_state(ahc); 1160 1161 /* Clear interrupt state */ 1162 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1163 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1164 1165 /* 1166 * Although the driver does not care about the 1167 * 'Selection in Progress' status bit, the busy 1168 * LED does. SELINGO is only cleared by a sucessfull 1169 * selection, so we must manually clear it to insure 1170 * the LED turns off just incase no future successful 1171 * selections occur (e.g. no devices on the bus). 1172 */ 1173 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1174 1175 scbptr = ahc_inb(ahc, WAITING_SCBH); 1176 ahc_outb(ahc, SCBPTR, scbptr); 1177 scb_index = ahc_inb(ahc, SCB_TAG); 1178 1179 scb = ahc_lookup_scb(ahc, scb_index); 1180 if (scb == NULL) { 1181 printf("%s: ahc_intr - referenced scb not " 1182 "valid during SELTO scb(%d, %d)\n", 1183 ahc_name(ahc), scbptr, scb_index); 1184 ahc_dump_card_state(ahc); 1185 } else { 1186 #ifdef AHC_DEBUG 1187 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1188 ahc_print_path(ahc, scb); 1189 printf("Saw Selection Timeout for SCB 0x%x\n", 1190 scb_index); 1191 } 1192 #endif 1193 /* 1194 * Force a renegotiation with this target just in 1195 * case the cable was pulled and will later be 1196 * re-attached. The target may forget its negotiation 1197 * settings with us should it attempt to reselect 1198 * during the interruption. The target will not issue 1199 * a unit attention in this case, so we must always 1200 * renegotiate. 1201 */ 1202 ahc_force_renegotiation(ahc); 1203 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1204 ahc_freeze_devq(ahc, scb); 1205 } 1206 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1207 ahc_restart(ahc); 1208 } else if ((status & BUSFREE) != 0 1209 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1210 u_int lastphase; 1211 u_int saved_scsiid; 1212 u_int saved_lun; 1213 u_int target; 1214 u_int initiator_role_id; 1215 char channel; 1216 int printerror; 1217 1218 /* 1219 * Clear our selection hardware as soon as possible. 1220 * We may have an entry in the waiting Q for this target, 1221 * that is affected by this busfree and we don't want to 1222 * go about selecting the target while we handle the event. 1223 */ 1224 ahc_outb(ahc, SCSISEQ, 1225 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1226 1227 /* 1228 * Disable busfree interrupts and clear the busfree 1229 * interrupt status. We do this here so that several 1230 * bus transactions occur prior to clearing the SCSIINT 1231 * latch. It can take a bit for the clearing to take effect. 1232 */ 1233 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1234 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1235 1236 /* 1237 * Look at what phase we were last in. 1238 * If its message out, chances are pretty good 1239 * that the busfree was in response to one of 1240 * our abort requests. 1241 */ 1242 lastphase = ahc_inb(ahc, LASTPHASE); 1243 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1244 saved_lun = ahc_inb(ahc, SAVED_LUN); 1245 target = SCSIID_TARGET(ahc, saved_scsiid); 1246 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1247 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1248 printerror = 1; 1249 1250 if (lastphase == P_MESGOUT) { 1251 struct ahc_devinfo devinfo; 1252 u_int tag; 1253 1254 ahc_fetch_devinfo(ahc, &devinfo); 1255 tag = SCB_LIST_NULL; 1256 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1257 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1258 if (ahc->msgout_buf[ahc->msgout_index - 1] 1259 == MSG_ABORT_TAG) 1260 tag = scb->hscb->tag; 1261 ahc_print_path(ahc, scb); 1262 printf("SCB %d - Abort%s Completed.\n", 1263 scb->hscb->tag, tag == SCB_LIST_NULL ? 1264 "" : " Tag"); 1265 ahc_abort_scbs(ahc, target, channel, 1266 saved_lun, tag, 1267 ROLE_INITIATOR, 1268 CAM_REQ_ABORTED); 1269 printerror = 0; 1270 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1271 MSG_BUS_DEV_RESET, TRUE)) { 1272 #ifdef __FreeBSD__ 1273 /* 1274 * Don't mark the user's request for this BDR 1275 * as completing with CAM_BDR_SENT. CAM3 1276 * specifies CAM_REQ_CMP. 1277 */ 1278 if (scb != NULL 1279 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1280 && ahc_match_scb(ahc, scb, target, channel, 1281 CAM_LUN_WILDCARD, 1282 SCB_LIST_NULL, 1283 ROLE_INITIATOR)) { 1284 ahc_set_transaction_status(scb, 1285 CAM_REQ_CMP); 1286 } 1287 #endif 1288 ahc_compile_devinfo(&devinfo, 1289 initiator_role_id, 1290 target, 1291 CAM_LUN_WILDCARD, 1292 channel, 1293 ROLE_INITIATOR); 1294 ahc_handle_devreset(ahc, &devinfo, 1295 CAM_BDR_SENT, 1296 "Bus Device Reset", 1297 /*verbose_level*/0); 1298 printerror = 0; 1299 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1300 MSG_EXT_PPR, FALSE)) { 1301 struct ahc_initiator_tinfo *tinfo; 1302 struct ahc_tmode_tstate *tstate; 1303 1304 /* 1305 * PPR Rejected. Try non-ppr negotiation 1306 * and retry command. 1307 */ 1308 tinfo = ahc_fetch_transinfo(ahc, 1309 devinfo.channel, 1310 devinfo.our_scsiid, 1311 devinfo.target, 1312 &tstate); 1313 tinfo->curr.transport_version = 2; 1314 tinfo->goal.transport_version = 2; 1315 tinfo->goal.ppr_options = 0; 1316 ahc_qinfifo_requeue_tail(ahc, scb); 1317 printerror = 0; 1318 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1319 MSG_EXT_WDTR, FALSE) 1320 || ahc_sent_msg(ahc, AHCMSG_EXT, 1321 MSG_EXT_SDTR, FALSE)) { 1322 /* 1323 * Negotiation Rejected. Go-async and 1324 * retry command. 1325 */ 1326 ahc_set_width(ahc, &devinfo, 1327 MSG_EXT_WDTR_BUS_8_BIT, 1328 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1329 /*paused*/TRUE); 1330 ahc_set_syncrate(ahc, &devinfo, 1331 /*syncrate*/NULL, 1332 /*period*/0, /*offset*/0, 1333 /*ppr_options*/0, 1334 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1335 /*paused*/TRUE); 1336 ahc_qinfifo_requeue_tail(ahc, scb); 1337 printerror = 0; 1338 } 1339 } 1340 if (printerror != 0) { 1341 u_int i; 1342 1343 if (scb != NULL) { 1344 u_int tag; 1345 1346 if ((scb->hscb->control & TAG_ENB) != 0) 1347 tag = scb->hscb->tag; 1348 else 1349 tag = SCB_LIST_NULL; 1350 ahc_print_path(ahc, scb); 1351 ahc_abort_scbs(ahc, target, channel, 1352 SCB_GET_LUN(scb), tag, 1353 ROLE_INITIATOR, 1354 CAM_UNEXP_BUSFREE); 1355 } else { 1356 /* 1357 * We had not fully identified this connection, 1358 * so we cannot abort anything. 1359 */ 1360 printf("%s: ", ahc_name(ahc)); 1361 } 1362 for (i = 0; i < num_phases; i++) { 1363 if (lastphase == ahc_phase_table[i].phase) 1364 break; 1365 } 1366 /* 1367 * Renegotiate with this device at the 1368 * next opportunity just in case this busfree 1369 * is due to a negotiation mismatch with the 1370 * device. 1371 */ 1372 ahc_force_renegotiation(ahc); 1373 printf("Unexpected busfree %s\n" 1374 "SEQADDR == 0x%x\n", 1375 ahc_phase_table[i].phasemsg, 1376 ahc_inb(ahc, SEQADDR0) 1377 | (ahc_inb(ahc, SEQADDR1) << 8)); 1378 } 1379 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1380 ahc_restart(ahc); 1381 } else { 1382 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1383 ahc_name(ahc), status); 1384 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1385 } 1386 } 1387 1388 /* 1389 * Force renegotiation to occur the next time we initiate 1390 * a command to the current device. 1391 */ 1392 static void 1393 ahc_force_renegotiation(struct ahc_softc *ahc) 1394 { 1395 struct ahc_devinfo devinfo; 1396 struct ahc_initiator_tinfo *targ_info; 1397 struct ahc_tmode_tstate *tstate; 1398 1399 ahc_fetch_devinfo(ahc, &devinfo); 1400 targ_info = ahc_fetch_transinfo(ahc, 1401 devinfo.channel, 1402 devinfo.our_scsiid, 1403 devinfo.target, 1404 &tstate); 1405 ahc_update_neg_request(ahc, &devinfo, tstate, 1406 targ_info, AHC_NEG_IF_NON_ASYNC); 1407 } 1408 1409 #define AHC_MAX_STEPS 2000 1410 void 1411 ahc_clear_critical_section(struct ahc_softc *ahc) 1412 { 1413 int stepping; 1414 int steps; 1415 u_int simode0; 1416 u_int simode1; 1417 1418 if (ahc->num_critical_sections == 0) 1419 return; 1420 1421 stepping = FALSE; 1422 steps = 0; 1423 simode0 = 0; 1424 simode1 = 0; 1425 for (;;) { 1426 struct cs *cs; 1427 u_int seqaddr; 1428 u_int i; 1429 1430 seqaddr = ahc_inb(ahc, SEQADDR0) 1431 | (ahc_inb(ahc, SEQADDR1) << 8); 1432 1433 /* 1434 * Seqaddr represents the next instruction to execute, 1435 * so we are really executing the instruction just 1436 * before it. 1437 */ 1438 if (seqaddr != 0) 1439 seqaddr -= 1; 1440 cs = ahc->critical_sections; 1441 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1442 1443 if (cs->begin < seqaddr && cs->end >= seqaddr) 1444 break; 1445 } 1446 1447 if (i == ahc->num_critical_sections) 1448 break; 1449 1450 if (steps > AHC_MAX_STEPS) { 1451 printf("%s: Infinite loop in critical section\n", 1452 ahc_name(ahc)); 1453 ahc_dump_card_state(ahc); 1454 panic("critical section loop"); 1455 } 1456 1457 steps++; 1458 if (stepping == FALSE) { 1459 1460 /* 1461 * Disable all interrupt sources so that the 1462 * sequencer will not be stuck by a pausing 1463 * interrupt condition while we attempt to 1464 * leave a critical section. 1465 */ 1466 simode0 = ahc_inb(ahc, SIMODE0); 1467 ahc_outb(ahc, SIMODE0, 0); 1468 simode1 = ahc_inb(ahc, SIMODE1); 1469 if ((ahc->features & AHC_DT) != 0) 1470 /* 1471 * On DT class controllers, we 1472 * use the enhanced busfree logic. 1473 * Unfortunately we cannot re-enable 1474 * busfree detection within the 1475 * current connection, so we must 1476 * leave it on while single stepping. 1477 */ 1478 ahc_outb(ahc, SIMODE1, ENBUSFREE); 1479 else 1480 ahc_outb(ahc, SIMODE1, 0); 1481 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1482 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1483 stepping = TRUE; 1484 } 1485 if ((ahc->features & AHC_DT) != 0) { 1486 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1487 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1488 } 1489 ahc_outb(ahc, HCNTRL, ahc->unpause); 1490 while (!ahc_is_paused(ahc)) 1491 ahc_delay(200); 1492 } 1493 if (stepping) { 1494 ahc_outb(ahc, SIMODE0, simode0); 1495 ahc_outb(ahc, SIMODE1, simode1); 1496 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1497 } 1498 } 1499 1500 /* 1501 * Clear any pending interrupt status. 1502 */ 1503 void 1504 ahc_clear_intstat(struct ahc_softc *ahc) 1505 { 1506 /* Clear any interrupt conditions this may have caused */ 1507 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1508 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1509 CLRREQINIT); 1510 ahc_flush_device_writes(ahc); 1511 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1512 ahc_flush_device_writes(ahc); 1513 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1514 ahc_flush_device_writes(ahc); 1515 } 1516 1517 /**************************** Debugging Routines ******************************/ 1518 #ifdef AHC_DEBUG 1519 uint32_t ahc_debug = 0; /* AHC_SHOW_MISC|AHC_SHOW_SENSE|AHC_DEBUG_OPTS;*/ 1520 #endif 1521 1522 void 1523 ahc_print_scb(struct scb *scb) 1524 { 1525 int i; 1526 1527 struct hardware_scb *hscb = scb->hscb; 1528 1529 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1530 (void *)scb, 1531 hscb->control, 1532 hscb->scsiid, 1533 hscb->lun, 1534 hscb->cdb_len); 1535 printf("Shared Data: "); 1536 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1537 printf("%#02x", hscb->shared_data.cdb[i]); 1538 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1539 ahc_le32toh(hscb->dataptr), 1540 ahc_le32toh(hscb->datacnt), 1541 ahc_le32toh(hscb->sgptr), 1542 hscb->tag); 1543 if (scb->sg_count > 0) { 1544 for (i = 0; i < scb->sg_count; i++) { 1545 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1546 i, 1547 (ahc_le32toh(scb->sg_list[i].len) >> 24 1548 & SG_HIGH_ADDR_BITS), 1549 ahc_le32toh(scb->sg_list[i].addr), 1550 ahc_le32toh(scb->sg_list[i].len)); 1551 } 1552 } 1553 } 1554 1555 /************************* Transfer Negotiation *******************************/ 1556 /* 1557 * Allocate per target mode instance (ID we respond to as a target) 1558 * transfer negotiation data structures. 1559 */ 1560 static struct ahc_tmode_tstate * 1561 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1562 { 1563 struct ahc_tmode_tstate *master_tstate; 1564 struct ahc_tmode_tstate *tstate; 1565 int i; 1566 1567 master_tstate = ahc->enabled_targets[ahc->our_id]; 1568 if (channel == 'B') { 1569 scsi_id += 8; 1570 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1571 } 1572 if (ahc->enabled_targets[scsi_id] != NULL 1573 && ahc->enabled_targets[scsi_id] != master_tstate) 1574 panic("%s: ahc_alloc_tstate - Target already allocated", 1575 ahc_name(ahc)); 1576 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1577 if (tstate == NULL) 1578 return (NULL); 1579 1580 /* 1581 * If we have allocated a master tstate, copy user settings from 1582 * the master tstate (taken from SRAM or the EEPROM) for this 1583 * channel, but reset our current and goal settings to async/narrow 1584 * until an initiator talks to us. 1585 */ 1586 if (master_tstate != NULL) { 1587 memcpy(tstate, master_tstate, sizeof(*tstate)); 1588 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1589 tstate->ultraenb = 0; 1590 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1591 memset(&tstate->transinfo[i].curr, 0, 1592 sizeof(tstate->transinfo[i].curr)); 1593 memset(&tstate->transinfo[i].goal, 0, 1594 sizeof(tstate->transinfo[i].goal)); 1595 } 1596 } else 1597 memset(tstate, 0, sizeof(*tstate)); 1598 ahc->enabled_targets[scsi_id] = tstate; 1599 return (tstate); 1600 } 1601 1602 #ifdef AHC_TARGET_MODE 1603 /* 1604 * Free per target mode instance (ID we respond to as a target) 1605 * transfer negotiation data structures. 1606 */ 1607 static void 1608 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1609 { 1610 struct ahc_tmode_tstate *tstate; 1611 1612 /* 1613 * Don't clean up our "master" tstate. 1614 * It has our default user settings. 1615 */ 1616 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1617 || (channel == 'A' && scsi_id == ahc->our_id)) 1618 && force == FALSE) 1619 return; 1620 1621 if (channel == 'B') 1622 scsi_id += 8; 1623 tstate = ahc->enabled_targets[scsi_id]; 1624 if (tstate != NULL) 1625 free(tstate, M_DEVBUF); 1626 ahc->enabled_targets[scsi_id] = NULL; 1627 } 1628 #endif 1629 1630 /* 1631 * Called when we have an active connection to a target on the bus, 1632 * this function finds the nearest syncrate to the input period limited 1633 * by the capabilities of the bus connectivity of and sync settings for 1634 * the target. 1635 */ 1636 struct ahc_syncrate * 1637 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1638 struct ahc_initiator_tinfo *tinfo, 1639 u_int *period, u_int *ppr_options, role_t role) 1640 { 1641 struct ahc_transinfo *transinfo; 1642 u_int maxsync; 1643 1644 if ((ahc->features & AHC_ULTRA2) != 0) { 1645 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1646 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1647 maxsync = AHC_SYNCRATE_DT; 1648 } else { 1649 maxsync = AHC_SYNCRATE_ULTRA; 1650 /* Can't do DT on an SE bus */ 1651 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1652 } 1653 } else if ((ahc->features & AHC_ULTRA) != 0) { 1654 maxsync = AHC_SYNCRATE_ULTRA; 1655 } else { 1656 maxsync = AHC_SYNCRATE_FAST; 1657 } 1658 /* 1659 * Never allow a value higher than our current goal 1660 * period otherwise we may allow a target initiated 1661 * negotiation to go above the limit as set by the 1662 * user. In the case of an initiator initiated 1663 * sync negotiation, we limit based on the user 1664 * setting. This allows the system to still accept 1665 * incoming negotiations even if target initiated 1666 * negotiation is not performed. 1667 */ 1668 if (role == ROLE_TARGET) 1669 transinfo = &tinfo->user; 1670 else 1671 transinfo = &tinfo->goal; 1672 *ppr_options &= transinfo->ppr_options; 1673 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1674 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1675 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1676 } 1677 if (transinfo->period == 0) { 1678 *period = 0; 1679 *ppr_options = 0; 1680 return (NULL); 1681 } 1682 *period = MAX(*period, transinfo->period); 1683 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1684 } 1685 1686 /* 1687 * Look up the valid period to SCSIRATE conversion in our table. 1688 * Return the period and offset that should be sent to the target 1689 * if this was the beginning of an SDTR. 1690 */ 1691 struct ahc_syncrate * 1692 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1693 u_int *ppr_options, u_int maxsync) 1694 { 1695 struct ahc_syncrate *syncrate; 1696 1697 if ((ahc->features & AHC_DT) == 0) 1698 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1699 1700 /* Skip all DT only entries if DT is not available */ 1701 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1702 && maxsync < AHC_SYNCRATE_ULTRA2) 1703 maxsync = AHC_SYNCRATE_ULTRA2; 1704 1705 for (syncrate = &ahc_syncrates[maxsync]; 1706 syncrate->rate != NULL; 1707 syncrate++) { 1708 1709 /* 1710 * The Ultra2 table doesn't go as low 1711 * as for the Fast/Ultra cards. 1712 */ 1713 if ((ahc->features & AHC_ULTRA2) != 0 1714 && (syncrate->sxfr_u2 == 0)) 1715 break; 1716 1717 if (*period <= syncrate->period) { 1718 /* 1719 * When responding to a target that requests 1720 * sync, the requested rate may fall between 1721 * two rates that we can output, but still be 1722 * a rate that we can receive. Because of this, 1723 * we want to respond to the target with 1724 * the same rate that it sent to us even 1725 * if the period we use to send data to it 1726 * is lower. Only lower the response period 1727 * if we must. 1728 */ 1729 if (syncrate == &ahc_syncrates[maxsync]) 1730 *period = syncrate->period; 1731 1732 /* 1733 * At some speeds, we only support 1734 * ST transfers. 1735 */ 1736 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1737 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1738 break; 1739 } 1740 } 1741 1742 if ((*period == 0) 1743 || (syncrate->rate == NULL) 1744 || ((ahc->features & AHC_ULTRA2) != 0 1745 && (syncrate->sxfr_u2 == 0))) { 1746 /* Use asynchronous transfers. */ 1747 *period = 0; 1748 syncrate = NULL; 1749 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1750 } 1751 return (syncrate); 1752 } 1753 1754 /* 1755 * Convert from an entry in our syncrate table to the SCSI equivalent 1756 * sync "period" factor. 1757 */ 1758 u_int 1759 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1760 { 1761 struct ahc_syncrate *syncrate; 1762 1763 if ((ahc->features & AHC_ULTRA2) != 0) 1764 scsirate &= SXFR_ULTRA2; 1765 else 1766 scsirate &= SXFR; 1767 1768 syncrate = &ahc_syncrates[maxsync]; 1769 while (syncrate->rate != NULL) { 1770 1771 if ((ahc->features & AHC_ULTRA2) != 0) { 1772 if (syncrate->sxfr_u2 == 0) 1773 break; 1774 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1775 return (syncrate->period); 1776 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1777 return (syncrate->period); 1778 } 1779 syncrate++; 1780 } 1781 return (0); /* async */ 1782 } 1783 1784 /* 1785 * Truncate the given synchronous offset to a value the 1786 * current adapter type and syncrate are capable of. 1787 */ 1788 void 1789 ahc_validate_offset(struct ahc_softc *ahc, 1790 struct ahc_initiator_tinfo *tinfo, 1791 struct ahc_syncrate *syncrate, 1792 u_int *offset, int wide, role_t role) 1793 { 1794 u_int maxoffset; 1795 1796 /* Limit offset to what we can do */ 1797 if (syncrate == NULL) { 1798 maxoffset = 0; 1799 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1800 maxoffset = MAX_OFFSET_ULTRA2; 1801 } else { 1802 if (wide) 1803 maxoffset = MAX_OFFSET_16BIT; 1804 else 1805 maxoffset = MAX_OFFSET_8BIT; 1806 } 1807 *offset = MIN(*offset, maxoffset); 1808 if (tinfo != NULL) { 1809 if (role == ROLE_TARGET) 1810 *offset = MIN(*offset, tinfo->user.offset); 1811 else 1812 *offset = MIN(*offset, tinfo->goal.offset); 1813 } 1814 } 1815 1816 /* 1817 * Truncate the given transfer width parameter to a value the 1818 * current adapter type is capable of. 1819 */ 1820 void 1821 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1822 u_int *bus_width, role_t role) 1823 { 1824 switch (*bus_width) { 1825 default: 1826 if (ahc->features & AHC_WIDE) { 1827 /* Respond Wide */ 1828 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1829 break; 1830 } 1831 /* FALLTHROUGH */ 1832 case MSG_EXT_WDTR_BUS_8_BIT: 1833 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1834 break; 1835 } 1836 if (tinfo != NULL) { 1837 if (role == ROLE_TARGET) 1838 *bus_width = MIN(tinfo->user.width, *bus_width); 1839 else 1840 *bus_width = MIN(tinfo->goal.width, *bus_width); 1841 } 1842 } 1843 1844 /* 1845 * Update the bitmask of targets for which the controller should 1846 * negotiate with at the next convenient opportunity. This currently 1847 * means the next time we send the initial identify messages for 1848 * a new transaction. 1849 */ 1850 int 1851 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1852 struct ahc_tmode_tstate *tstate, 1853 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1854 { 1855 u_int auto_negotiate_orig; 1856 1857 auto_negotiate_orig = tstate->auto_negotiate; 1858 if (neg_type == AHC_NEG_ALWAYS) { 1859 /* 1860 * Force our "current" settings to be 1861 * unknown so that unless a bus reset 1862 * occurs the need to renegotiate is 1863 * recorded persistently. 1864 */ 1865 if ((ahc->features & AHC_WIDE) != 0) 1866 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1867 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1868 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1869 } 1870 if (tinfo->curr.period != tinfo->goal.period 1871 || tinfo->curr.width != tinfo->goal.width 1872 || tinfo->curr.offset != tinfo->goal.offset 1873 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1874 || (neg_type == AHC_NEG_IF_NON_ASYNC 1875 && (tinfo->goal.offset != 0 1876 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1877 || tinfo->goal.ppr_options != 0))) 1878 tstate->auto_negotiate |= devinfo->target_mask; 1879 else 1880 tstate->auto_negotiate &= ~devinfo->target_mask; 1881 1882 return (auto_negotiate_orig != tstate->auto_negotiate); 1883 } 1884 1885 /* 1886 * Update the user/goal/curr tables of synchronous negotiation 1887 * parameters as well as, in the case of a current or active update, 1888 * any data structures on the host controller. In the case of an 1889 * active update, the specified target is currently talking to us on 1890 * the bus, so the transfer parameter update must take effect 1891 * immediately. 1892 */ 1893 void 1894 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1895 struct ahc_syncrate *syncrate, u_int period, 1896 u_int offset, u_int ppr_options, u_int type, int paused) 1897 { 1898 struct ahc_initiator_tinfo *tinfo; 1899 struct ahc_tmode_tstate *tstate; 1900 u_int old_period; 1901 u_int old_offset; 1902 u_int old_ppr; 1903 int active; 1904 int update_needed; 1905 1906 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1907 update_needed = 0; 1908 1909 if (syncrate == NULL) { 1910 period = 0; 1911 offset = 0; 1912 } 1913 1914 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1915 devinfo->target, &tstate); 1916 1917 if ((type & AHC_TRANS_USER) != 0) { 1918 tinfo->user.period = period; 1919 tinfo->user.offset = offset; 1920 tinfo->user.ppr_options = ppr_options; 1921 } 1922 1923 if ((type & AHC_TRANS_GOAL) != 0) { 1924 tinfo->goal.period = period; 1925 tinfo->goal.offset = offset; 1926 tinfo->goal.ppr_options = ppr_options; 1927 } 1928 1929 old_period = tinfo->curr.period; 1930 old_offset = tinfo->curr.offset; 1931 old_ppr = tinfo->curr.ppr_options; 1932 1933 if ((type & AHC_TRANS_CUR) != 0 1934 && (old_period != period 1935 || old_offset != offset 1936 || old_ppr != ppr_options)) { 1937 u_int scsirate; 1938 1939 update_needed++; 1940 scsirate = tinfo->scsirate; 1941 if ((ahc->features & AHC_ULTRA2) != 0) { 1942 1943 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1944 if (syncrate != NULL) { 1945 scsirate |= syncrate->sxfr_u2; 1946 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1947 scsirate |= ENABLE_CRC; 1948 else 1949 scsirate |= SINGLE_EDGE; 1950 } 1951 } else { 1952 1953 scsirate &= ~(SXFR|SOFS); 1954 /* 1955 * Ensure Ultra mode is set properly for 1956 * this target. 1957 */ 1958 tstate->ultraenb &= ~devinfo->target_mask; 1959 if (syncrate != NULL) { 1960 if (syncrate->sxfr & ULTRA_SXFR) { 1961 tstate->ultraenb |= 1962 devinfo->target_mask; 1963 } 1964 scsirate |= syncrate->sxfr & SXFR; 1965 scsirate |= offset & SOFS; 1966 } 1967 if (active) { 1968 u_int sxfrctl0; 1969 1970 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1971 sxfrctl0 &= ~FAST20; 1972 if (tstate->ultraenb & devinfo->target_mask) 1973 sxfrctl0 |= FAST20; 1974 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1975 } 1976 } 1977 if (active) { 1978 ahc_outb(ahc, SCSIRATE, scsirate); 1979 if ((ahc->features & AHC_ULTRA2) != 0) 1980 ahc_outb(ahc, SCSIOFFSET, offset); 1981 } 1982 1983 tinfo->scsirate = scsirate; 1984 tinfo->curr.period = period; 1985 tinfo->curr.offset = offset; 1986 tinfo->curr.ppr_options = ppr_options; 1987 1988 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1989 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1990 if (bootverbose) { 1991 if (offset != 0) { 1992 printf("%s: target %d synchronous at %sMHz%s, " 1993 "offset = 0x%x\n", ahc_name(ahc), 1994 devinfo->target, syncrate->rate, 1995 (ppr_options & MSG_EXT_PPR_DT_REQ) 1996 ? " DT" : "", offset); 1997 } else { 1998 printf("%s: target %d using " 1999 "asynchronous transfers\n", 2000 ahc_name(ahc), devinfo->target); 2001 } 2002 } 2003 } 2004 2005 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2006 tinfo, AHC_NEG_TO_GOAL); 2007 2008 if (update_needed) 2009 ahc_update_pending_scbs(ahc); 2010 } 2011 2012 /* 2013 * Update the user/goal/curr tables of wide negotiation 2014 * parameters as well as, in the case of a current or active update, 2015 * any data structures on the host controller. In the case of an 2016 * active update, the specified target is currently talking to us on 2017 * the bus, so the transfer parameter update must take effect 2018 * immediately. 2019 */ 2020 void 2021 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2022 u_int width, u_int type, int paused) 2023 { 2024 struct ahc_initiator_tinfo *tinfo; 2025 struct ahc_tmode_tstate *tstate; 2026 u_int oldwidth; 2027 int active; 2028 int update_needed; 2029 2030 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2031 update_needed = 0; 2032 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2033 devinfo->target, &tstate); 2034 2035 if ((type & AHC_TRANS_USER) != 0) 2036 tinfo->user.width = width; 2037 2038 if ((type & AHC_TRANS_GOAL) != 0) 2039 tinfo->goal.width = width; 2040 2041 oldwidth = tinfo->curr.width; 2042 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2043 u_int scsirate; 2044 2045 update_needed++; 2046 scsirate = tinfo->scsirate; 2047 scsirate &= ~WIDEXFER; 2048 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2049 scsirate |= WIDEXFER; 2050 2051 tinfo->scsirate = scsirate; 2052 2053 if (active) 2054 ahc_outb(ahc, SCSIRATE, scsirate); 2055 2056 tinfo->curr.width = width; 2057 2058 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2059 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2060 if (bootverbose) { 2061 printf("%s: target %d using %dbit transfers\n", 2062 ahc_name(ahc), devinfo->target, 2063 8 * (0x01 << width)); 2064 } 2065 } 2066 2067 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2068 tinfo, AHC_NEG_TO_GOAL); 2069 if (update_needed) 2070 ahc_update_pending_scbs(ahc); 2071 } 2072 2073 /* 2074 * Update the current state of tagged queuing for a given target. 2075 */ 2076 void 2077 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2078 ahc_queue_alg alg) 2079 { 2080 ahc_platform_set_tags(ahc, devinfo, alg); 2081 } 2082 2083 /* 2084 * When the transfer settings for a connection change, update any 2085 * in-transit SCBs to contain the new data so the hardware will 2086 * be set correctly during future (re)selections. 2087 */ 2088 static void 2089 ahc_update_pending_scbs(struct ahc_softc *ahc) 2090 { 2091 struct scb *pending_scb; 2092 int pending_scb_count; 2093 int i; 2094 int paused; 2095 u_int saved_scbptr; 2096 2097 /* 2098 * Traverse the pending SCB list and ensure that all of the 2099 * SCBs there have the proper settings. 2100 */ 2101 pending_scb_count = 0; 2102 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2103 struct ahc_devinfo devinfo; 2104 struct hardware_scb *pending_hscb; 2105 struct ahc_initiator_tinfo *tinfo; 2106 struct ahc_tmode_tstate *tstate; 2107 2108 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2109 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2110 devinfo.our_scsiid, 2111 devinfo.target, &tstate); 2112 pending_hscb = pending_scb->hscb; 2113 pending_hscb->control &= ~ULTRAENB; 2114 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2115 pending_hscb->control |= ULTRAENB; 2116 pending_hscb->scsirate = tinfo->scsirate; 2117 pending_hscb->scsioffset = tinfo->curr.offset; 2118 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2119 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2120 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2121 pending_hscb->control &= ~MK_MESSAGE; 2122 } 2123 ahc_sync_scb(ahc, pending_scb, 2124 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2125 pending_scb_count++; 2126 } 2127 2128 if (pending_scb_count == 0) 2129 return; 2130 2131 if (ahc_is_paused(ahc)) { 2132 paused = 1; 2133 } else { 2134 paused = 0; 2135 ahc_pause(ahc); 2136 } 2137 2138 saved_scbptr = ahc_inb(ahc, SCBPTR); 2139 /* Ensure that the hscbs down on the card match the new information */ 2140 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2141 struct hardware_scb *pending_hscb; 2142 u_int control; 2143 u_int scb_tag; 2144 2145 ahc_outb(ahc, SCBPTR, i); 2146 scb_tag = ahc_inb(ahc, SCB_TAG); 2147 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2148 if (pending_scb == NULL) 2149 continue; 2150 2151 pending_hscb = pending_scb->hscb; 2152 control = ahc_inb(ahc, SCB_CONTROL); 2153 control &= ~(ULTRAENB|MK_MESSAGE); 2154 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2155 ahc_outb(ahc, SCB_CONTROL, control); 2156 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2157 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2158 } 2159 ahc_outb(ahc, SCBPTR, saved_scbptr); 2160 2161 if (paused == 0) 2162 ahc_unpause(ahc); 2163 } 2164 2165 /**************************** Pathing Information *****************************/ 2166 static void 2167 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2168 { 2169 u_int saved_scsiid; 2170 role_t role; 2171 int our_id; 2172 2173 if (ahc_inb(ahc, SSTAT0) & TARGET) 2174 role = ROLE_TARGET; 2175 else 2176 role = ROLE_INITIATOR; 2177 2178 if (role == ROLE_TARGET 2179 && (ahc->features & AHC_MULTI_TID) != 0 2180 && (ahc_inb(ahc, SEQ_FLAGS) 2181 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2182 /* We were selected, so pull our id from TARGIDIN */ 2183 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2184 } else if ((ahc->features & AHC_ULTRA2) != 0) 2185 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2186 else 2187 our_id = ahc_inb(ahc, SCSIID) & OID; 2188 2189 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2190 ahc_compile_devinfo(devinfo, 2191 our_id, 2192 SCSIID_TARGET(ahc, saved_scsiid), 2193 ahc_inb(ahc, SAVED_LUN), 2194 SCSIID_CHANNEL(ahc, saved_scsiid), 2195 role); 2196 } 2197 2198 struct ahc_phase_table_entry* 2199 ahc_lookup_phase_entry(int phase) 2200 { 2201 struct ahc_phase_table_entry *entry; 2202 struct ahc_phase_table_entry *last_entry; 2203 2204 /* 2205 * num_phases doesn't include the default entry which 2206 * will be returned if the phase doesn't match. 2207 */ 2208 last_entry = &ahc_phase_table[num_phases]; 2209 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2210 if (phase == entry->phase) 2211 break; 2212 } 2213 return (entry); 2214 } 2215 2216 void 2217 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2218 u_int lun, char channel, role_t role) 2219 { 2220 devinfo->our_scsiid = our_id; 2221 devinfo->target = target; 2222 devinfo->lun = lun; 2223 devinfo->target_offset = target; 2224 devinfo->channel = channel; 2225 devinfo->role = role; 2226 if (channel == 'B') 2227 devinfo->target_offset += 8; 2228 devinfo->target_mask = (0x01 << devinfo->target_offset); 2229 } 2230 2231 void 2232 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2233 { 2234 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2235 devinfo->target, devinfo->lun); 2236 } 2237 2238 static void 2239 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2240 struct scb *scb) 2241 { 2242 role_t role; 2243 int our_id; 2244 2245 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2246 role = ROLE_INITIATOR; 2247 if ((scb->flags & SCB_TARGET_SCB) != 0) 2248 role = ROLE_TARGET; 2249 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2250 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2251 } 2252 2253 2254 /************************ Message Phase Processing ****************************/ 2255 static void 2256 ahc_assert_atn(struct ahc_softc *ahc) 2257 { 2258 u_int scsisigo; 2259 2260 scsisigo = ATNO; 2261 if ((ahc->features & AHC_DT) == 0) 2262 scsisigo |= ahc_inb(ahc, SCSISIGI); 2263 ahc_outb(ahc, SCSISIGO, scsisigo); 2264 } 2265 2266 /* 2267 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2268 * or enters the initial message out phase, we are interrupted. Fill our 2269 * outgoing message buffer with the appropriate message and begin handing 2270 * the message phase(s) manually. 2271 */ 2272 static void 2273 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2274 struct scb *scb) 2275 { 2276 /* 2277 * To facilitate adding multiple messages together, 2278 * each routine should increment the index and len 2279 * variables instead of setting them explicitly. 2280 */ 2281 ahc->msgout_index = 0; 2282 ahc->msgout_len = 0; 2283 2284 if ((scb->flags & SCB_DEVICE_RESET) == 0 2285 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2286 u_int identify_msg; 2287 2288 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2289 if ((scb->hscb->control & DISCENB) != 0) 2290 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2291 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2292 ahc->msgout_len++; 2293 2294 if ((scb->hscb->control & TAG_ENB) != 0) { 2295 ahc->msgout_buf[ahc->msgout_index++] = 2296 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2297 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2298 ahc->msgout_len += 2; 2299 } 2300 } 2301 2302 if (scb->flags & SCB_DEVICE_RESET) { 2303 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2304 ahc->msgout_len++; 2305 ahc_print_path(ahc, scb); 2306 printf("Bus Device Reset Message Sent\n"); 2307 /* 2308 * Clear our selection hardware in advance of 2309 * the busfree. We may have an entry in the waiting 2310 * Q for this target, and we don't want to go about 2311 * selecting while we handle the busfree and blow it 2312 * away. 2313 */ 2314 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2315 } else if ((scb->flags & SCB_ABORT) != 0) { 2316 if ((scb->hscb->control & TAG_ENB) != 0) 2317 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2318 else 2319 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2320 ahc->msgout_len++; 2321 ahc_print_path(ahc, scb); 2322 printf("Abort%s Message Sent\n", 2323 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2324 /* 2325 * Clear our selection hardware in advance of 2326 * the busfree. We may have an entry in the waiting 2327 * Q for this target, and we don't want to go about 2328 * selecting while we handle the busfree and blow it 2329 * away. 2330 */ 2331 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2332 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2333 ahc_build_transfer_msg(ahc, devinfo); 2334 } else { 2335 printf("ahc_intr: AWAITING_MSG for an SCB that " 2336 "does not have a waiting message\n"); 2337 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2338 devinfo->target_mask); 2339 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2340 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2341 ahc_inb(ahc, MSG_OUT), scb->flags); 2342 } 2343 2344 /* 2345 * Clear the MK_MESSAGE flag from the SCB so we aren't 2346 * asked to send this message again. 2347 */ 2348 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2349 scb->hscb->control &= ~MK_MESSAGE; 2350 ahc->msgout_index = 0; 2351 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2352 } 2353 2354 /* 2355 * Build an appropriate transfer negotiation message for the 2356 * currently active target. 2357 */ 2358 static void 2359 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2360 { 2361 /* 2362 * We need to initiate transfer negotiations. 2363 * If our current and goal settings are identical, 2364 * we want to renegotiate due to a check condition. 2365 */ 2366 struct ahc_initiator_tinfo *tinfo; 2367 struct ahc_tmode_tstate *tstate; 2368 struct ahc_syncrate *rate; 2369 int dowide; 2370 int dosync; 2371 int doppr; 2372 u_int period; 2373 u_int ppr_options; 2374 u_int offset; 2375 2376 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2377 devinfo->target, &tstate); 2378 /* 2379 * Filter our period based on the current connection. 2380 * If we can't perform DT transfers on this segment (not in LVD 2381 * mode for instance), then our decision to issue a PPR message 2382 * may change. 2383 */ 2384 period = tinfo->goal.period; 2385 ppr_options = tinfo->goal.ppr_options; 2386 /* Target initiated PPR is not allowed in the SCSI spec */ 2387 if (devinfo->role == ROLE_TARGET) 2388 ppr_options = 0; 2389 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2390 &ppr_options, devinfo->role); 2391 dowide = tinfo->curr.width != tinfo->goal.width; 2392 dosync = tinfo->curr.period != period; 2393 /* 2394 * Only use PPR if we have options that need it, even if the device 2395 * claims to support it. There might be an expander in the way 2396 * that doesn't. 2397 */ 2398 doppr = ppr_options != 0; 2399 2400 if (!dowide && !dosync && !doppr) { 2401 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2402 dosync = tinfo->goal.offset != 0; 2403 } 2404 2405 if (!dowide && !dosync && !doppr) { 2406 /* 2407 * Force async with a WDTR message if we have a wide bus, 2408 * or just issue an SDTR with a 0 offset. 2409 */ 2410 if ((ahc->features & AHC_WIDE) != 0) 2411 dowide = 1; 2412 else 2413 dosync = 1; 2414 2415 if (bootverbose) { 2416 ahc_print_devinfo(ahc, devinfo); 2417 printf("Ensuring async\n"); 2418 } 2419 } 2420 2421 /* Target initiated PPR is not allowed in the SCSI spec */ 2422 if (devinfo->role == ROLE_TARGET) 2423 doppr = 0; 2424 2425 /* 2426 * Both the PPR message and SDTR message require the 2427 * goal syncrate to be limited to what the target device 2428 * is capable of handling (based on whether an LVD->SE 2429 * expander is on the bus), so combine these two cases. 2430 * Regardless, guarantee that if we are using WDTR and SDTR 2431 * messages that WDTR comes first. 2432 */ 2433 if (doppr || (dosync && !dowide)) { 2434 2435 offset = tinfo->goal.offset; 2436 ahc_validate_offset(ahc, tinfo, rate, &offset, 2437 doppr ? tinfo->goal.width 2438 : tinfo->curr.width, 2439 devinfo->role); 2440 if (doppr) { 2441 ahc_construct_ppr(ahc, devinfo, period, offset, 2442 tinfo->goal.width, ppr_options); 2443 } else { 2444 ahc_construct_sdtr(ahc, devinfo, period, offset); 2445 } 2446 } else { 2447 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2448 } 2449 } 2450 2451 /* 2452 * Build a synchronous negotiation message in our message 2453 * buffer based on the input parameters. 2454 */ 2455 static void 2456 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2457 u_int period, u_int offset) 2458 { 2459 if (offset == 0) 2460 period = AHC_ASYNC_XFER_PERIOD; 2461 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2462 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2463 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2464 ahc->msgout_buf[ahc->msgout_index++] = period; 2465 ahc->msgout_buf[ahc->msgout_index++] = offset; 2466 ahc->msgout_len += 5; 2467 if (bootverbose) { 2468 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2469 ahc_name(ahc), devinfo->channel, devinfo->target, 2470 devinfo->lun, period, offset); 2471 } 2472 } 2473 2474 /* 2475 * Build a wide negotiation message in our message 2476 * buffer based on the input parameters. 2477 */ 2478 static void 2479 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2480 u_int bus_width) 2481 { 2482 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2483 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2484 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2485 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2486 ahc->msgout_len += 4; 2487 if (bootverbose) { 2488 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2489 ahc_name(ahc), devinfo->channel, devinfo->target, 2490 devinfo->lun, bus_width); 2491 } 2492 } 2493 2494 /* 2495 * Build a parallel protocol request message in our message 2496 * buffer based on the input parameters. 2497 */ 2498 static void 2499 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2500 u_int period, u_int offset, u_int bus_width, 2501 u_int ppr_options) 2502 { 2503 if (offset == 0) 2504 period = AHC_ASYNC_XFER_PERIOD; 2505 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2506 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2507 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2508 ahc->msgout_buf[ahc->msgout_index++] = period; 2509 ahc->msgout_buf[ahc->msgout_index++] = 0; 2510 ahc->msgout_buf[ahc->msgout_index++] = offset; 2511 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2512 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2513 ahc->msgout_len += 8; 2514 if (bootverbose) { 2515 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2516 "offset %x, ppr_options %x\n", ahc_name(ahc), 2517 devinfo->channel, devinfo->target, devinfo->lun, 2518 bus_width, period, offset, ppr_options); 2519 } 2520 } 2521 2522 /* 2523 * Clear any active message state. 2524 */ 2525 static void 2526 ahc_clear_msg_state(struct ahc_softc *ahc) 2527 { 2528 ahc->msgout_len = 0; 2529 ahc->msgin_index = 0; 2530 ahc->msg_type = MSG_TYPE_NONE; 2531 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2532 /* 2533 * The target didn't care to respond to our 2534 * message request, so clear ATN. 2535 */ 2536 ahc_outb(ahc, CLRSINT1, CLRATNO); 2537 } 2538 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2539 ahc_outb(ahc, SEQ_FLAGS2, 2540 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2541 } 2542 2543 static void 2544 ahc_handle_proto_violation(struct ahc_softc *ahc) 2545 { 2546 struct ahc_devinfo devinfo; 2547 struct scb *scb; 2548 u_int scbid; 2549 u_int seq_flags; 2550 u_int curphase; 2551 u_int lastphase; 2552 int found; 2553 2554 ahc_fetch_devinfo(ahc, &devinfo); 2555 scbid = ahc_inb(ahc, SCB_TAG); 2556 scb = ahc_lookup_scb(ahc, scbid); 2557 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2558 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2559 lastphase = ahc_inb(ahc, LASTPHASE); 2560 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2561 2562 /* 2563 * The reconnecting target either did not send an 2564 * identify message, or did, but we didn't find an SCB 2565 * to match. 2566 */ 2567 ahc_print_devinfo(ahc, &devinfo); 2568 printf("Target did not send an IDENTIFY message. " 2569 "LASTPHASE = 0x%x.\n", lastphase); 2570 scb = NULL; 2571 } else if (scb == NULL) { 2572 /* 2573 * We don't seem to have an SCB active for this 2574 * transaction. Print an error and reset the bus. 2575 */ 2576 ahc_print_devinfo(ahc, &devinfo); 2577 printf("No SCB found during protocol violation\n"); 2578 goto proto_violation_reset; 2579 } else { 2580 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2581 if ((seq_flags & NO_CDB_SENT) != 0) { 2582 ahc_print_path(ahc, scb); 2583 printf("No or incomplete CDB sent to device.\n"); 2584 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2585 /* 2586 * The target never bothered to provide status to 2587 * us prior to completing the command. Since we don't 2588 * know the disposition of this command, we must attempt 2589 * to abort it. Assert ATN and prepare to send an abort 2590 * message. 2591 */ 2592 ahc_print_path(ahc, scb); 2593 printf("Completed command without status.\n"); 2594 } else { 2595 ahc_print_path(ahc, scb); 2596 printf("Unknown protocol violation.\n"); 2597 ahc_dump_card_state(ahc); 2598 } 2599 } 2600 if ((lastphase & ~P_DATAIN_DT) == 0 2601 || lastphase == P_COMMAND) { 2602 proto_violation_reset: 2603 /* 2604 * Target either went directly to data/command 2605 * phase or didn't respond to our ATN. 2606 * The only safe thing to do is to blow 2607 * it away with a bus reset. 2608 */ 2609 found = ahc_reset_channel(ahc, 'A', TRUE); 2610 printf("%s: Issued Channel %c Bus Reset. " 2611 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2612 } else { 2613 /* 2614 * Leave the selection hardware off in case 2615 * this abort attempt will affect yet to 2616 * be sent commands. 2617 */ 2618 ahc_outb(ahc, SCSISEQ, 2619 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2620 ahc_assert_atn(ahc); 2621 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2622 if (scb == NULL) { 2623 ahc_print_devinfo(ahc, &devinfo); 2624 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2625 ahc->msgout_len = 1; 2626 ahc->msgout_index = 0; 2627 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2628 } else { 2629 ahc_print_path(ahc, scb); 2630 scb->flags |= SCB_ABORT; 2631 } 2632 printf("Protocol violation %s. Attempting to abort.\n", 2633 ahc_lookup_phase_entry(curphase)->phasemsg); 2634 } 2635 } 2636 2637 /* 2638 * Manual message loop handler. 2639 */ 2640 static void 2641 ahc_handle_message_phase(struct ahc_softc *ahc) 2642 { 2643 struct ahc_devinfo devinfo; 2644 u_int bus_phase; 2645 int end_session; 2646 2647 ahc_fetch_devinfo(ahc, &devinfo); 2648 end_session = FALSE; 2649 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2650 2651 reswitch: 2652 switch (ahc->msg_type) { 2653 case MSG_TYPE_INITIATOR_MSGOUT: 2654 { 2655 int lastbyte; 2656 int phasemis; 2657 int msgdone; 2658 2659 if (ahc->msgout_len == 0) 2660 panic("HOST_MSG_LOOP interrupt with no active message"); 2661 2662 #ifdef AHC_DEBUG 2663 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2664 ahc_print_devinfo(ahc, &devinfo); 2665 printf("INITIATOR_MSG_OUT"); 2666 } 2667 #endif 2668 phasemis = bus_phase != P_MESGOUT; 2669 if (phasemis) { 2670 #ifdef AHC_DEBUG 2671 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2672 printf(" PHASEMIS %s\n", 2673 ahc_lookup_phase_entry(bus_phase) 2674 ->phasemsg); 2675 } 2676 #endif 2677 if (bus_phase == P_MESGIN) { 2678 /* 2679 * Change gears and see if 2680 * this messages is of interest to 2681 * us or should be passed back to 2682 * the sequencer. 2683 */ 2684 ahc_outb(ahc, CLRSINT1, CLRATNO); 2685 ahc->send_msg_perror = FALSE; 2686 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2687 ahc->msgin_index = 0; 2688 goto reswitch; 2689 } 2690 end_session = TRUE; 2691 break; 2692 } 2693 2694 if (ahc->send_msg_perror) { 2695 ahc_outb(ahc, CLRSINT1, CLRATNO); 2696 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2697 #ifdef AHC_DEBUG 2698 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2699 printf(" byte 0x%x\n", ahc->send_msg_perror); 2700 #endif 2701 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2702 break; 2703 } 2704 2705 msgdone = ahc->msgout_index == ahc->msgout_len; 2706 if (msgdone) { 2707 /* 2708 * The target has requested a retry. 2709 * Re-assert ATN, reset our message index to 2710 * 0, and try again. 2711 */ 2712 ahc->msgout_index = 0; 2713 ahc_assert_atn(ahc); 2714 } 2715 2716 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2717 if (lastbyte) { 2718 /* Last byte is signified by dropping ATN */ 2719 ahc_outb(ahc, CLRSINT1, CLRATNO); 2720 } 2721 2722 /* 2723 * Clear our interrupt status and present 2724 * the next byte on the bus. 2725 */ 2726 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2727 #ifdef AHC_DEBUG 2728 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2729 printf(" byte 0x%x\n", 2730 ahc->msgout_buf[ahc->msgout_index]); 2731 #endif 2732 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2733 break; 2734 } 2735 case MSG_TYPE_INITIATOR_MSGIN: 2736 { 2737 int phasemis; 2738 int message_done; 2739 2740 #ifdef AHC_DEBUG 2741 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2742 ahc_print_devinfo(ahc, &devinfo); 2743 printf("INITIATOR_MSG_IN"); 2744 } 2745 #endif 2746 phasemis = bus_phase != P_MESGIN; 2747 if (phasemis) { 2748 #ifdef AHC_DEBUG 2749 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2750 printf(" PHASEMIS %s\n", 2751 ahc_lookup_phase_entry(bus_phase) 2752 ->phasemsg); 2753 } 2754 #endif 2755 ahc->msgin_index = 0; 2756 if (bus_phase == P_MESGOUT 2757 && (ahc->send_msg_perror == TRUE 2758 || (ahc->msgout_len != 0 2759 && ahc->msgout_index == 0))) { 2760 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2761 goto reswitch; 2762 } 2763 end_session = TRUE; 2764 break; 2765 } 2766 2767 /* Pull the byte in without acking it */ 2768 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2769 #ifdef AHC_DEBUG 2770 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2771 printf(" byte 0x%x\n", 2772 ahc->msgin_buf[ahc->msgin_index]); 2773 #endif 2774 2775 message_done = ahc_parse_msg(ahc, &devinfo); 2776 2777 if (message_done) { 2778 /* 2779 * Clear our incoming message buffer in case there 2780 * is another message following this one. 2781 */ 2782 ahc->msgin_index = 0; 2783 2784 /* 2785 * If this message illicited a response, 2786 * assert ATN so the target takes us to the 2787 * message out phase. 2788 */ 2789 if (ahc->msgout_len != 0) { 2790 #ifdef AHC_DEBUG 2791 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2792 ahc_print_devinfo(ahc, &devinfo); 2793 printf("Asserting ATN for response\n"); 2794 } 2795 #endif 2796 ahc_assert_atn(ahc); 2797 } 2798 } else 2799 ahc->msgin_index++; 2800 2801 if (message_done == MSGLOOP_TERMINATED) { 2802 end_session = TRUE; 2803 } else { 2804 /* Ack the byte */ 2805 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2806 (void)ahc_inb(ahc, SCSIDATL); 2807 } 2808 break; 2809 } 2810 case MSG_TYPE_TARGET_MSGIN: 2811 { 2812 int msgdone; 2813 int msgout_request; 2814 2815 if (ahc->msgout_len == 0) 2816 panic("Target MSGIN with no active message"); 2817 2818 /* 2819 * If we interrupted a mesgout session, the initiator 2820 * will not know this until our first REQ. So, we 2821 * only honor mesgout requests after we've sent our 2822 * first byte. 2823 */ 2824 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2825 && ahc->msgout_index > 0) 2826 msgout_request = TRUE; 2827 else 2828 msgout_request = FALSE; 2829 2830 if (msgout_request) { 2831 2832 /* 2833 * Change gears and see if 2834 * this messages is of interest to 2835 * us or should be passed back to 2836 * the sequencer. 2837 */ 2838 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2839 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2840 ahc->msgin_index = 0; 2841 /* Dummy read to REQ for first byte */ 2842 (void)ahc_inb(ahc, SCSIDATL); 2843 ahc_outb(ahc, SXFRCTL0, 2844 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2845 break; 2846 } 2847 2848 msgdone = ahc->msgout_index == ahc->msgout_len; 2849 if (msgdone) { 2850 ahc_outb(ahc, SXFRCTL0, 2851 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2852 end_session = TRUE; 2853 break; 2854 } 2855 2856 /* 2857 * Present the next byte on the bus. 2858 */ 2859 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2860 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2861 break; 2862 } 2863 case MSG_TYPE_TARGET_MSGOUT: 2864 { 2865 int lastbyte; 2866 int msgdone; 2867 2868 /* 2869 * The initiator signals that this is 2870 * the last byte by dropping ATN. 2871 */ 2872 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2873 2874 /* 2875 * Read the latched byte, but turn off SPIOEN first 2876 * so that we don't inadvertently cause a REQ for the 2877 * next byte. 2878 */ 2879 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2880 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2881 msgdone = ahc_parse_msg(ahc, &devinfo); 2882 if (msgdone == MSGLOOP_TERMINATED) { 2883 /* 2884 * The message is *really* done in that it caused 2885 * us to go to bus free. The sequencer has already 2886 * been reset at this point, so pull the ejection 2887 * handle. 2888 */ 2889 return; 2890 } 2891 2892 ahc->msgin_index++; 2893 2894 /* 2895 * XXX Read spec about initiator dropping ATN too soon 2896 * and use msgdone to detect it. 2897 */ 2898 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2899 ahc->msgin_index = 0; 2900 2901 /* 2902 * If this message illicited a response, transition 2903 * to the Message in phase and send it. 2904 */ 2905 if (ahc->msgout_len != 0) { 2906 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2907 ahc_outb(ahc, SXFRCTL0, 2908 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2909 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2910 ahc->msgin_index = 0; 2911 break; 2912 } 2913 } 2914 2915 if (lastbyte) 2916 end_session = TRUE; 2917 else { 2918 /* Ask for the next byte. */ 2919 ahc_outb(ahc, SXFRCTL0, 2920 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2921 } 2922 2923 break; 2924 } 2925 default: 2926 panic("Unknown REQINIT message type"); 2927 } 2928 2929 if (end_session) { 2930 ahc_clear_msg_state(ahc); 2931 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2932 } else 2933 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2934 } 2935 2936 /* 2937 * See if we sent a particular extended message to the target. 2938 * If "full" is true, return true only if the target saw the full 2939 * message. If "full" is false, return true if the target saw at 2940 * least the first byte of the message. 2941 */ 2942 static int 2943 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2944 { 2945 int found; 2946 u_int index; 2947 2948 found = FALSE; 2949 index = 0; 2950 2951 while (index < ahc->msgout_len) { 2952 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2953 u_int end_index; 2954 2955 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2956 if (ahc->msgout_buf[index+2] == msgval 2957 && type == AHCMSG_EXT) { 2958 2959 if (full) { 2960 if (ahc->msgout_index > end_index) 2961 found = TRUE; 2962 } else if (ahc->msgout_index > index) 2963 found = TRUE; 2964 } 2965 index = end_index; 2966 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2967 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2968 2969 /* Skip tag type and tag id or residue param*/ 2970 index += 2; 2971 } else { 2972 /* Single byte message */ 2973 if (type == AHCMSG_1B 2974 && ahc->msgout_buf[index] == msgval 2975 && ahc->msgout_index > index) 2976 found = TRUE; 2977 index++; 2978 } 2979 2980 if (found) 2981 break; 2982 } 2983 return (found); 2984 } 2985 2986 /* 2987 * Wait for a complete incoming message, parse it, and respond accordingly. 2988 */ 2989 static int 2990 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2991 { 2992 struct ahc_initiator_tinfo *tinfo; 2993 struct ahc_tmode_tstate *tstate; 2994 int reject; 2995 int done; 2996 int response; 2997 u_int targ_scsirate; 2998 2999 done = MSGLOOP_IN_PROG; 3000 response = FALSE; 3001 reject = FALSE; 3002 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3003 devinfo->target, &tstate); 3004 targ_scsirate = tinfo->scsirate; 3005 3006 /* 3007 * Parse as much of the message as is available, 3008 * rejecting it if we don't support it. When 3009 * the entire message is available and has been 3010 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3011 * that we have parsed an entire message. 3012 * 3013 * In the case of extended messages, we accept the length 3014 * byte outright and perform more checking once we know the 3015 * extended message type. 3016 */ 3017 switch (ahc->msgin_buf[0]) { 3018 case MSG_DISCONNECT: 3019 case MSG_SAVEDATAPOINTER: 3020 case MSG_CMDCOMPLETE: 3021 case MSG_RESTOREPOINTERS: 3022 case MSG_IGN_WIDE_RESIDUE: 3023 /* 3024 * End our message loop as these are messages 3025 * the sequencer handles on its own. 3026 */ 3027 done = MSGLOOP_TERMINATED; 3028 break; 3029 case MSG_MESSAGE_REJECT: 3030 response = ahc_handle_msg_reject(ahc, devinfo); 3031 /* FALLTHROUGH */ 3032 case MSG_NOOP: 3033 done = MSGLOOP_MSGCOMPLETE; 3034 break; 3035 case MSG_EXTENDED: 3036 { 3037 /* Wait for enough of the message to begin validation */ 3038 if (ahc->msgin_index < 2) 3039 break; 3040 switch (ahc->msgin_buf[2]) { 3041 case MSG_EXT_SDTR: 3042 { 3043 struct ahc_syncrate *syncrate; 3044 u_int period; 3045 u_int ppr_options; 3046 u_int offset; 3047 u_int saved_offset; 3048 3049 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3050 reject = TRUE; 3051 break; 3052 } 3053 3054 /* 3055 * Wait until we have both args before validating 3056 * and acting on this message. 3057 * 3058 * Add one to MSG_EXT_SDTR_LEN to account for 3059 * the extended message preamble. 3060 */ 3061 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3062 break; 3063 3064 period = ahc->msgin_buf[3]; 3065 ppr_options = 0; 3066 saved_offset = offset = ahc->msgin_buf[4]; 3067 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3068 &ppr_options, 3069 devinfo->role); 3070 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3071 targ_scsirate & WIDEXFER, 3072 devinfo->role); 3073 if (bootverbose) { 3074 printf("(%s:%c:%d:%d): Received " 3075 "SDTR period %x, offset %x\n\t" 3076 "Filtered to period %x, offset %x\n", 3077 ahc_name(ahc), devinfo->channel, 3078 devinfo->target, devinfo->lun, 3079 ahc->msgin_buf[3], saved_offset, 3080 period, offset); 3081 } 3082 ahc_set_syncrate(ahc, devinfo, 3083 syncrate, period, 3084 offset, ppr_options, 3085 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3086 /*paused*/TRUE); 3087 3088 /* 3089 * See if we initiated Sync Negotiation 3090 * and didn't have to fall down to async 3091 * transfers. 3092 */ 3093 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3094 /* We started it */ 3095 if (saved_offset != offset) { 3096 /* Went too low - force async */ 3097 reject = TRUE; 3098 } 3099 } else { 3100 /* 3101 * Send our own SDTR in reply 3102 */ 3103 if (bootverbose 3104 && devinfo->role == ROLE_INITIATOR) { 3105 printf("(%s:%c:%d:%d): Target " 3106 "Initiated SDTR\n", 3107 ahc_name(ahc), devinfo->channel, 3108 devinfo->target, devinfo->lun); 3109 } 3110 ahc->msgout_index = 0; 3111 ahc->msgout_len = 0; 3112 ahc_construct_sdtr(ahc, devinfo, 3113 period, offset); 3114 ahc->msgout_index = 0; 3115 response = TRUE; 3116 } 3117 done = MSGLOOP_MSGCOMPLETE; 3118 break; 3119 } 3120 case MSG_EXT_WDTR: 3121 { 3122 u_int bus_width; 3123 u_int saved_width; 3124 u_int sending_reply; 3125 3126 sending_reply = FALSE; 3127 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3128 reject = TRUE; 3129 break; 3130 } 3131 3132 /* 3133 * Wait until we have our arg before validating 3134 * and acting on this message. 3135 * 3136 * Add one to MSG_EXT_WDTR_LEN to account for 3137 * the extended message preamble. 3138 */ 3139 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3140 break; 3141 3142 bus_width = ahc->msgin_buf[3]; 3143 saved_width = bus_width; 3144 ahc_validate_width(ahc, tinfo, &bus_width, 3145 devinfo->role); 3146 if (bootverbose) { 3147 printf("(%s:%c:%d:%d): Received WDTR " 3148 "%x filtered to %x\n", 3149 ahc_name(ahc), devinfo->channel, 3150 devinfo->target, devinfo->lun, 3151 saved_width, bus_width); 3152 } 3153 3154 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3155 /* 3156 * Don't send a WDTR back to the 3157 * target, since we asked first. 3158 * If the width went higher than our 3159 * request, reject it. 3160 */ 3161 if (saved_width > bus_width) { 3162 reject = TRUE; 3163 printf("(%s:%c:%d:%d): requested %dBit " 3164 "transfers. Rejecting...\n", 3165 ahc_name(ahc), devinfo->channel, 3166 devinfo->target, devinfo->lun, 3167 8 * (0x01 << bus_width)); 3168 bus_width = 0; 3169 } 3170 } else { 3171 /* 3172 * Send our own WDTR in reply 3173 */ 3174 if (bootverbose 3175 && devinfo->role == ROLE_INITIATOR) { 3176 printf("(%s:%c:%d:%d): Target " 3177 "Initiated WDTR\n", 3178 ahc_name(ahc), devinfo->channel, 3179 devinfo->target, devinfo->lun); 3180 } 3181 ahc->msgout_index = 0; 3182 ahc->msgout_len = 0; 3183 ahc_construct_wdtr(ahc, devinfo, bus_width); 3184 ahc->msgout_index = 0; 3185 response = TRUE; 3186 sending_reply = TRUE; 3187 } 3188 ahc_set_width(ahc, devinfo, bus_width, 3189 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3190 /*paused*/TRUE); 3191 /* After a wide message, we are async */ 3192 ahc_set_syncrate(ahc, devinfo, 3193 /*syncrate*/NULL, /*period*/0, 3194 /*offset*/0, /*ppr_options*/0, 3195 AHC_TRANS_ACTIVE, /*paused*/TRUE); 3196 if (sending_reply == FALSE && reject == FALSE) { 3197 3198 if (tinfo->goal.offset) { 3199 ahc->msgout_index = 0; 3200 ahc->msgout_len = 0; 3201 ahc_build_transfer_msg(ahc, devinfo); 3202 ahc->msgout_index = 0; 3203 response = TRUE; 3204 } 3205 } 3206 done = MSGLOOP_MSGCOMPLETE; 3207 break; 3208 } 3209 case MSG_EXT_PPR: 3210 { 3211 struct ahc_syncrate *syncrate; 3212 u_int period; 3213 u_int offset; 3214 u_int bus_width; 3215 u_int ppr_options; 3216 u_int saved_width; 3217 u_int saved_offset; 3218 u_int saved_ppr_options; 3219 3220 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3221 reject = TRUE; 3222 break; 3223 } 3224 3225 /* 3226 * Wait until we have all args before validating 3227 * and acting on this message. 3228 * 3229 * Add one to MSG_EXT_PPR_LEN to account for 3230 * the extended message preamble. 3231 */ 3232 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3233 break; 3234 3235 period = ahc->msgin_buf[3]; 3236 offset = ahc->msgin_buf[5]; 3237 bus_width = ahc->msgin_buf[6]; 3238 saved_width = bus_width; 3239 ppr_options = ahc->msgin_buf[7]; 3240 /* 3241 * According to the spec, a DT only 3242 * period factor with no DT option 3243 * set implies async. 3244 */ 3245 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3246 && period == 9) 3247 offset = 0; 3248 saved_ppr_options = ppr_options; 3249 saved_offset = offset; 3250 3251 /* 3252 * Mask out any options we don't support 3253 * on any controller. Transfer options are 3254 * only available if we are negotiating wide. 3255 */ 3256 ppr_options &= MSG_EXT_PPR_DT_REQ; 3257 if (bus_width == 0) 3258 ppr_options = 0; 3259 3260 ahc_validate_width(ahc, tinfo, &bus_width, 3261 devinfo->role); 3262 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3263 &ppr_options, 3264 devinfo->role); 3265 ahc_validate_offset(ahc, tinfo, syncrate, 3266 &offset, bus_width, 3267 devinfo->role); 3268 3269 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3270 /* 3271 * If we are unable to do any of the 3272 * requested options (we went too low), 3273 * then we'll have to reject the message. 3274 */ 3275 if (saved_width > bus_width 3276 || saved_offset != offset 3277 || saved_ppr_options != ppr_options) { 3278 reject = TRUE; 3279 period = 0; 3280 offset = 0; 3281 bus_width = 0; 3282 ppr_options = 0; 3283 syncrate = NULL; 3284 } 3285 } else { 3286 if (devinfo->role != ROLE_TARGET) 3287 printf("(%s:%c:%d:%d): Target " 3288 "Initiated PPR\n", 3289 ahc_name(ahc), devinfo->channel, 3290 devinfo->target, devinfo->lun); 3291 else 3292 printf("(%s:%c:%d:%d): Initiator " 3293 "Initiated PPR\n", 3294 ahc_name(ahc), devinfo->channel, 3295 devinfo->target, devinfo->lun); 3296 ahc->msgout_index = 0; 3297 ahc->msgout_len = 0; 3298 ahc_construct_ppr(ahc, devinfo, period, offset, 3299 bus_width, ppr_options); 3300 ahc->msgout_index = 0; 3301 response = TRUE; 3302 } 3303 if (bootverbose) { 3304 printf("(%s:%c:%d:%d): Received PPR width %x, " 3305 "period %x, offset %x,options %x\n" 3306 "\tFiltered to width %x, period %x, " 3307 "offset %x, options %x\n", 3308 ahc_name(ahc), devinfo->channel, 3309 devinfo->target, devinfo->lun, 3310 saved_width, ahc->msgin_buf[3], 3311 saved_offset, saved_ppr_options, 3312 bus_width, period, offset, ppr_options); 3313 } 3314 ahc_set_width(ahc, devinfo, bus_width, 3315 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3316 /*paused*/TRUE); 3317 ahc_set_syncrate(ahc, devinfo, 3318 syncrate, period, 3319 offset, ppr_options, 3320 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3321 /*paused*/TRUE); 3322 done = MSGLOOP_MSGCOMPLETE; 3323 break; 3324 } 3325 default: 3326 /* Unknown extended message. Reject it. */ 3327 reject = TRUE; 3328 break; 3329 } 3330 break; 3331 } 3332 #ifdef AHC_TARGET_MODE 3333 case MSG_BUS_DEV_RESET: 3334 ahc_handle_devreset(ahc, devinfo, 3335 CAM_BDR_SENT, 3336 "Bus Device Reset Received", 3337 /*verbose_level*/0); 3338 ahc_restart(ahc); 3339 done = MSGLOOP_TERMINATED; 3340 break; 3341 case MSG_ABORT_TAG: 3342 case MSG_ABORT: 3343 case MSG_CLEAR_QUEUE: 3344 { 3345 int tag; 3346 3347 /* Target mode messages */ 3348 if (devinfo->role != ROLE_TARGET) { 3349 reject = TRUE; 3350 break; 3351 } 3352 tag = SCB_LIST_NULL; 3353 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3354 tag = ahc_inb(ahc, INITIATOR_TAG); 3355 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3356 devinfo->lun, tag, ROLE_TARGET, 3357 CAM_REQ_ABORTED); 3358 3359 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3360 if (tstate != NULL) { 3361 struct ahc_tmode_lstate* lstate; 3362 3363 lstate = tstate->enabled_luns[devinfo->lun]; 3364 if (lstate != NULL) { 3365 ahc_queue_lstate_event(ahc, lstate, 3366 devinfo->our_scsiid, 3367 ahc->msgin_buf[0], 3368 /*arg*/tag); 3369 ahc_send_lstate_events(ahc, lstate); 3370 } 3371 } 3372 ahc_restart(ahc); 3373 done = MSGLOOP_TERMINATED; 3374 break; 3375 } 3376 #endif 3377 case MSG_TERM_IO_PROC: 3378 default: 3379 reject = TRUE; 3380 break; 3381 } 3382 3383 if (reject) { 3384 /* 3385 * Setup to reject the message. 3386 */ 3387 ahc->msgout_index = 0; 3388 ahc->msgout_len = 1; 3389 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3390 done = MSGLOOP_MSGCOMPLETE; 3391 response = TRUE; 3392 } 3393 3394 if (done != MSGLOOP_IN_PROG && !response) 3395 /* Clear the outgoing message buffer */ 3396 ahc->msgout_len = 0; 3397 3398 return (done); 3399 } 3400 3401 /* 3402 * Process a message reject message. 3403 */ 3404 static int 3405 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3406 { 3407 /* 3408 * What we care about here is if we had an 3409 * outstanding SDTR or WDTR message for this 3410 * target. If we did, this is a signal that 3411 * the target is refusing negotiation. 3412 */ 3413 struct scb *scb; 3414 struct ahc_initiator_tinfo *tinfo; 3415 struct ahc_tmode_tstate *tstate; 3416 u_int scb_index; 3417 u_int last_msg; 3418 int response = 0; 3419 3420 scb_index = ahc_inb(ahc, SCB_TAG); 3421 scb = ahc_lookup_scb(ahc, scb_index); 3422 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3423 devinfo->our_scsiid, 3424 devinfo->target, &tstate); 3425 /* Might be necessary */ 3426 last_msg = ahc_inb(ahc, LAST_MSG); 3427 3428 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3429 /* 3430 * Target does not support the PPR message. 3431 * Attempt to negotiate SPI-2 style. 3432 */ 3433 if (bootverbose) { 3434 printf("(%s:%c:%d:%d): PPR Rejected. " 3435 "Trying WDTR/SDTR\n", 3436 ahc_name(ahc), devinfo->channel, 3437 devinfo->target, devinfo->lun); 3438 } 3439 tinfo->goal.ppr_options = 0; 3440 tinfo->curr.transport_version = 2; 3441 tinfo->goal.transport_version = 2; 3442 ahc->msgout_index = 0; 3443 ahc->msgout_len = 0; 3444 ahc_build_transfer_msg(ahc, devinfo); 3445 ahc->msgout_index = 0; 3446 response = 1; 3447 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3448 3449 /* note 8bit xfers */ 3450 if (bootverbose) 3451 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3452 "8bit transfers\n", ahc_name(ahc), 3453 devinfo->channel, devinfo->target, devinfo->lun); 3454 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3455 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3456 /*paused*/TRUE); 3457 /* 3458 * No need to clear the sync rate. If the target 3459 * did not accept the command, our syncrate is 3460 * unaffected. If the target started the negotiation, 3461 * but rejected our response, we already cleared the 3462 * sync rate before sending our WDTR. 3463 */ 3464 if (tinfo->goal.offset != tinfo->curr.offset) { 3465 3466 /* Start the sync negotiation */ 3467 ahc->msgout_index = 0; 3468 ahc->msgout_len = 0; 3469 ahc_build_transfer_msg(ahc, devinfo); 3470 ahc->msgout_index = 0; 3471 response = 1; 3472 } 3473 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3474 /* note asynch xfers and clear flag */ 3475 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3476 /*offset*/0, /*ppr_options*/0, 3477 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3478 /*paused*/TRUE); 3479 if (bootverbose) 3480 printf("(%s:%c:%d:%d): refuses synchronous negotiation." 3481 " Using asynchronous transfers\n", 3482 ahc_name(ahc), devinfo->channel, 3483 devinfo->target, devinfo->lun); 3484 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3485 int tag_type; 3486 int mask; 3487 3488 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3489 3490 if (tag_type == MSG_SIMPLE_TASK) { 3491 if (bootverbose) 3492 printf("(%s:%c:%d:%d): refuses tagged commands." 3493 " Performing non-tagged I/O\n", 3494 ahc_name(ahc), devinfo->channel, 3495 devinfo->target, devinfo->lun); 3496 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3497 mask = ~0x23; 3498 } else { 3499 if (bootverbose) 3500 printf("(%s:%c:%d:%d): refuses %s tagged " 3501 "commands. Performing simple queue " 3502 "tagged I/O only\n", 3503 ahc_name(ahc), devinfo->channel, 3504 devinfo->target, devinfo->lun, 3505 tag_type == MSG_ORDERED_TASK 3506 ? "ordered" : "head of queue"); 3507 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3508 mask = ~0x03; 3509 } 3510 3511 /* 3512 * Resend the identify for this CCB as the target 3513 * may believe that the selection is invalid otherwise. 3514 */ 3515 ahc_outb(ahc, SCB_CONTROL, 3516 ahc_inb(ahc, SCB_CONTROL) & mask); 3517 scb->hscb->control &= mask; 3518 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3519 /*type*/MSG_SIMPLE_TASK); 3520 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3521 ahc_assert_atn(ahc); 3522 3523 /* 3524 * This transaction is now at the head of 3525 * the untagged queue for this target. 3526 */ 3527 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3528 struct scb_tailq *untagged_q; 3529 3530 untagged_q = 3531 &(ahc->untagged_queues[devinfo->target_offset]); 3532 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3533 scb->flags |= SCB_UNTAGGEDQ; 3534 } 3535 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3536 scb->hscb->tag); 3537 3538 /* 3539 * Requeue all tagged commands for this target 3540 * currently in our possession so they can be 3541 * converted to untagged commands. 3542 */ 3543 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3544 SCB_GET_CHANNEL(ahc, scb), 3545 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3546 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3547 SEARCH_COMPLETE); 3548 } else { 3549 /* 3550 * Otherwise, we ignore it. 3551 */ 3552 if (bootverbose) 3553 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3554 ahc_name(ahc), devinfo->channel, devinfo->target, 3555 last_msg); 3556 } 3557 return (response); 3558 } 3559 3560 /* 3561 * Process an ingnore wide residue message. 3562 */ 3563 static void 3564 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 3565 struct ahc_devinfo *devinfo) 3566 { 3567 u_int scb_index; 3568 struct scb *scb; 3569 3570 scb_index = ahc_inb(ahc, SCB_TAG); 3571 scb = ahc_lookup_scb(ahc, scb_index); 3572 /* 3573 * XXX Actually check data direction in the sequencer? 3574 * Perhaps add datadir to some spare bits in the hscb? 3575 */ 3576 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3577 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3578 /* 3579 * Ignore the message if we haven't 3580 * seen an appropriate data phase yet. 3581 */ 3582 } else { 3583 /* 3584 * If the residual occurred on the last 3585 * transfer and the transfer request was 3586 * expected to end on an odd count, do 3587 * nothing. Otherwise, subtract a byte 3588 * and update the residual count accordingly. 3589 */ 3590 uint32_t sgptr; 3591 3592 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3593 if ((sgptr & SG_LIST_NULL) != 0 3594 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3595 /* 3596 * If the residual occurred on the last 3597 * transfer and the transfer request was 3598 * expected to end on an odd count, do 3599 * nothing. 3600 */ 3601 } else { 3602 struct ahc_dma_seg *sg; 3603 uint32_t data_cnt; 3604 uint32_t data_addr; 3605 uint32_t sglen; 3606 3607 /* Pull in the rest of the sgptr */ 3608 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3609 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3610 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3611 sgptr &= SG_PTR_MASK; 3612 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3613 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3614 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3615 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3616 3617 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3618 | (ahc_inb(ahc, SHADDR + 2) << 16) 3619 | (ahc_inb(ahc, SHADDR + 1) << 8) 3620 | (ahc_inb(ahc, SHADDR)); 3621 3622 data_cnt += 1; 3623 data_addr -= 1; 3624 3625 sg = ahc_sg_bus_to_virt(scb, sgptr); 3626 /* 3627 * The residual sg ptr points to the next S/G 3628 * to load so we must go back one. 3629 */ 3630 sg--; 3631 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3632 if (sg != scb->sg_list 3633 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3634 3635 sg--; 3636 sglen = ahc_le32toh(sg->len); 3637 /* 3638 * Preserve High Address and SG_LIST bits 3639 * while setting the count to 1. 3640 */ 3641 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3642 data_addr = ahc_le32toh(sg->addr) 3643 + (sglen & AHC_SG_LEN_MASK) - 1; 3644 3645 /* 3646 * Increment sg so it points to the 3647 * "next" sg. 3648 */ 3649 sg++; 3650 sgptr = ahc_sg_virt_to_bus(scb, sg); 3651 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3652 sgptr >> 24); 3653 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3654 sgptr >> 16); 3655 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3656 sgptr >> 8); 3657 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3658 } 3659 3660 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3661 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3662 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3663 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3664 } 3665 } 3666 } 3667 3668 3669 /* 3670 * Reinitialize the data pointers for the active transfer 3671 * based on its current residual. 3672 */ 3673 static void 3674 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3675 { 3676 struct scb *scb; 3677 struct ahc_dma_seg *sg; 3678 u_int scb_index; 3679 uint32_t sgptr; 3680 uint32_t resid; 3681 uint32_t dataptr; 3682 3683 scb_index = ahc_inb(ahc, SCB_TAG); 3684 scb = ahc_lookup_scb(ahc, scb_index); 3685 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3686 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3687 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3688 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3689 3690 sgptr &= SG_PTR_MASK; 3691 sg = ahc_sg_bus_to_virt(scb, sgptr); 3692 3693 /* The residual sg_ptr always points to the next sg */ 3694 sg--; 3695 3696 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3697 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3698 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3699 3700 dataptr = ahc_le32toh(sg->addr) 3701 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3702 - resid; 3703 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3704 u_int dscommand1; 3705 3706 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3707 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3708 ahc_outb(ahc, HADDR, 3709 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3710 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3711 } 3712 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3713 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3714 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3715 ahc_outb(ahc, HADDR, dataptr); 3716 ahc_outb(ahc, HCNT + 2, resid >> 16); 3717 ahc_outb(ahc, HCNT + 1, resid >> 8); 3718 ahc_outb(ahc, HCNT, resid); 3719 if ((ahc->features & AHC_ULTRA2) == 0) { 3720 ahc_outb(ahc, STCNT + 2, resid >> 16); 3721 ahc_outb(ahc, STCNT + 1, resid >> 8); 3722 ahc_outb(ahc, STCNT, resid); 3723 } 3724 } 3725 3726 /* 3727 * Handle the effects of issuing a bus device reset message. 3728 */ 3729 static void 3730 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3731 cam_status status, const char *message, int verbose_level) 3732 { 3733 #ifdef AHC_TARGET_MODE 3734 struct ahc_tmode_tstate* tstate; 3735 u_int lun; 3736 #endif 3737 int found; 3738 3739 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3740 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3741 status); 3742 3743 #ifdef AHC_TARGET_MODE 3744 /* 3745 * Send an immediate notify ccb to all target mord peripheral 3746 * drivers affected by this action. 3747 */ 3748 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3749 if (tstate != NULL) { 3750 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3751 struct ahc_tmode_lstate* lstate; 3752 3753 lstate = tstate->enabled_luns[lun]; 3754 if (lstate == NULL) 3755 continue; 3756 3757 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3758 MSG_BUS_DEV_RESET, /*arg*/0); 3759 ahc_send_lstate_events(ahc, lstate); 3760 } 3761 } 3762 #endif 3763 3764 /* 3765 * Go back to async/narrow transfers and renegotiate. 3766 */ 3767 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3768 AHC_TRANS_CUR, /*paused*/TRUE); 3769 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3770 /*period*/0, /*offset*/0, /*ppr_options*/0, 3771 AHC_TRANS_CUR, /*paused*/TRUE); 3772 3773 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3774 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3775 3776 if (message != NULL 3777 && (verbose_level <= bootverbose)) 3778 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3779 message, devinfo->channel, devinfo->target, found); 3780 } 3781 3782 #ifdef AHC_TARGET_MODE 3783 static void 3784 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3785 struct scb *scb) 3786 { 3787 3788 /* 3789 * To facilitate adding multiple messages together, 3790 * each routine should increment the index and len 3791 * variables instead of setting them explicitly. 3792 */ 3793 ahc->msgout_index = 0; 3794 ahc->msgout_len = 0; 3795 3796 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3797 ahc_build_transfer_msg(ahc, devinfo); 3798 else 3799 panic("ahc_intr: AWAITING target message with no message"); 3800 3801 ahc->msgout_index = 0; 3802 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3803 } 3804 #endif 3805 3806 int 3807 ahc_softc_init(struct ahc_softc *ahc) 3808 { 3809 3810 /* The IRQMS bit is only valid on VL and EISA chips */ 3811 if ((ahc->chip & AHC_PCI) == 0) 3812 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3813 else 3814 ahc->unpause = 0; 3815 ahc->pause = ahc->unpause | PAUSE; 3816 /* XXX The shared scb data stuff should be deprecated */ 3817 if (ahc->scb_data == NULL) { 3818 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3819 M_DEVBUF, M_NOWAIT); 3820 if (ahc->scb_data == NULL) 3821 return (ENOMEM); 3822 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3823 } 3824 3825 return (0); 3826 } 3827 3828 void 3829 ahc_softc_insert(struct ahc_softc *ahc) 3830 { 3831 struct ahc_softc *list_ahc; 3832 3833 #if AHC_PCI_CONFIG > 0 3834 /* 3835 * Second Function PCI devices need to inherit some 3836 * settings from function 0. 3837 */ 3838 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3839 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3840 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3841 ahc_dev_softc_t list_pci; 3842 ahc_dev_softc_t pci; 3843 3844 list_pci = list_ahc->dev_softc; 3845 pci = ahc->dev_softc; 3846 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3847 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3848 struct ahc_softc *master; 3849 struct ahc_softc *slave; 3850 3851 if (ahc_get_pci_function(list_pci) == 0) { 3852 master = list_ahc; 3853 slave = ahc; 3854 } else { 3855 master = ahc; 3856 slave = list_ahc; 3857 } 3858 slave->flags &= ~AHC_BIOS_ENABLED; 3859 slave->flags |= 3860 master->flags & AHC_BIOS_ENABLED; 3861 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3862 slave->flags |= 3863 master->flags & AHC_PRIMARY_CHANNEL; 3864 break; 3865 } 3866 } 3867 } 3868 #endif 3869 3870 /* 3871 * Insertion sort into our list of softcs. 3872 */ 3873 list_ahc = TAILQ_FIRST(&ahc_tailq); 3874 while (list_ahc != NULL 3875 && ahc_softc_comp(list_ahc, ahc) <= 0) 3876 list_ahc = TAILQ_NEXT(list_ahc, links); 3877 if (list_ahc != NULL) 3878 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3879 else 3880 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3881 ahc->init_level++; 3882 } 3883 3884 /* 3885 * Verify that the passed in softc pointer is for a 3886 * controller that is still configured. 3887 */ 3888 struct ahc_softc * 3889 ahc_find_softc(struct ahc_softc *ahc) 3890 { 3891 struct ahc_softc *list_ahc; 3892 3893 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3894 if (list_ahc == ahc) 3895 return (ahc); 3896 } 3897 return (NULL); 3898 } 3899 3900 void 3901 ahc_set_unit(struct ahc_softc *ahc, int unit) 3902 { 3903 ahc->unit = unit; 3904 } 3905 3906 void 3907 ahc_set_name(struct ahc_softc *ahc, const char *name) 3908 { 3909 ahc->name = name; 3910 } 3911 3912 void 3913 ahc_free(struct ahc_softc *ahc) 3914 { 3915 int i; 3916 3917 ahc_fini_scbdata(ahc); 3918 switch (ahc->init_level) { 3919 default: 3920 case 2: 3921 ahc_shutdown(ahc); 3922 /* TAILQ_REMOVE(&ahc_tailq, ahc, links); XXX */ 3923 /* FALLTHROUGH */ 3924 case 1: 3925 bus_dmamap_unload(ahc->parent_dmat, ahc->shared_data_dmamap); 3926 bus_dmamap_destroy(ahc->parent_dmat, ahc->shared_data_dmamap); 3927 bus_dmamem_unmap(ahc->parent_dmat, (void *)ahc->qoutfifo, 3928 ahc->shared_data_size); 3929 bus_dmamem_free(ahc->parent_dmat, &ahc->shared_data_seg, 3930 ahc->shared_data_nseg); 3931 break; 3932 case 0: 3933 break; 3934 } 3935 3936 ahc_platform_free(ahc); 3937 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3938 struct ahc_tmode_tstate *tstate; 3939 3940 tstate = ahc->enabled_targets[i]; 3941 if (tstate != NULL) { 3942 #if AHC_TARGET_MODE 3943 int j; 3944 3945 for (j = 0; j < AHC_NUM_LUNS; j++) { 3946 struct ahc_tmode_lstate *lstate; 3947 3948 lstate = tstate->enabled_luns[j]; 3949 if (lstate != NULL) { 3950 /*xpt_free_path(lstate->path);*/ 3951 free(lstate, M_DEVBUF); 3952 } 3953 } 3954 #endif 3955 free(tstate, M_DEVBUF); 3956 } 3957 } 3958 #if AHC_TARGET_MODE 3959 if (ahc->black_hole != NULL) { 3960 /*xpt_free_path(ahc->black_hole->path);*/ 3961 free(ahc->black_hole, M_DEVBUF); 3962 } 3963 #endif 3964 #ifndef __NetBSD__ 3965 if (ahc->name != NULL) 3966 free(ahc->name, M_DEVBUF); 3967 #endif 3968 if (ahc->seep_config != NULL) 3969 free(ahc->seep_config, M_DEVBUF); 3970 #if !defined(__FreeBSD__) && !defined(__NetBSD__) 3971 free(ahc, M_DEVBUF); 3972 #endif 3973 return; 3974 } 3975 3976 void 3977 ahc_shutdown(void *arg) 3978 { 3979 struct ahc_softc *ahc; 3980 int i; 3981 3982 ahc = arg; 3983 3984 /* This will reset most registers to 0, but not all */ 3985 ahc_reset(ahc); 3986 ahc_outb(ahc, SCSISEQ, 0); 3987 ahc_outb(ahc, SXFRCTL0, 0); 3988 ahc_outb(ahc, DSPCISTATUS, 0); 3989 3990 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3991 ahc_outb(ahc, i, 0); 3992 } 3993 3994 /* 3995 * Reset the controller and record some information about it 3996 * that is only available just after a reset. 3997 */ 3998 int 3999 ahc_reset(struct ahc_softc *ahc) 4000 { 4001 u_int sblkctl; 4002 u_int sxfrctl1_a, sxfrctl1_b; 4003 int wait; 4004 4005 /* 4006 * Preserve the value of the SXFRCTL1 register for all channels. 4007 * It contains settings that affect termination and we don't want 4008 * to disturb the integrity of the bus. 4009 */ 4010 ahc_pause(ahc); 4011 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { 4012 /* 4013 * The chip has not been initialized since 4014 * PCI/EISA/VLB bus reset. Don't trust 4015 * "left over BIOS data". 4016 */ 4017 ahc->flags |= AHC_NO_BIOS_INIT; 4018 } 4019 sxfrctl1_b = 0; 4020 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4021 u_int sblkctl1; 4022 4023 /* 4024 * Save channel B's settings in case this chip 4025 * is setup for TWIN channel operation. 4026 */ 4027 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4028 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4029 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4030 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4031 } 4032 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4033 4034 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4035 4036 /* 4037 * Ensure that the reset has finished. We delay 1000us 4038 * prior to reading the register to make sure the chip 4039 * has sufficiently completed its reset to handle register 4040 * accesses. 4041 */ 4042 wait = 1000; 4043 do { 4044 ahc_delay(1000); 4045 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4046 4047 if (wait == 0) { 4048 printf("%s: WARNING - Failed chip reset! " 4049 "Trying to initialize anyway.\n", ahc_name(ahc)); 4050 } 4051 ahc_outb(ahc, HCNTRL, ahc->pause); 4052 4053 /* Determine channel configuration */ 4054 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4055 /* No Twin Channel PCI cards */ 4056 if ((ahc->chip & AHC_PCI) != 0) 4057 sblkctl &= ~SELBUSB; 4058 switch (sblkctl) { 4059 case 0: 4060 /* Single Narrow Channel */ 4061 break; 4062 case 2: 4063 /* Wide Channel */ 4064 ahc->features |= AHC_WIDE; 4065 break; 4066 case 8: 4067 /* Twin Channel */ 4068 ahc->features |= AHC_TWIN; 4069 break; 4070 default: 4071 printf(" Unsupported adapter type (0x%x). Ignoring\n", 4072 sblkctl); 4073 return(-1); 4074 } 4075 4076 /* 4077 * Reload sxfrctl1. 4078 * 4079 * We must always initialize STPWEN to 1 before we 4080 * restore the saved values. STPWEN is initialized 4081 * to a tri-state condition which can only be cleared 4082 * by turning it on. 4083 */ 4084 if ((ahc->features & AHC_TWIN) != 0) { 4085 u_int sblkctl1; 4086 4087 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4088 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4089 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4090 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4091 } 4092 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4093 4094 #ifdef AHC_DUMP_SEQ 4095 if (ahc->init_level == 0) 4096 ahc_dumpseq(ahc); 4097 #endif 4098 4099 return (0); 4100 } 4101 4102 /* 4103 * Determine the number of SCBs available on the controller 4104 */ 4105 int 4106 ahc_probe_scbs(struct ahc_softc *ahc) { 4107 int i; 4108 4109 for (i = 0; i < AHC_SCB_MAX; i++) { 4110 4111 ahc_outb(ahc, SCBPTR, i); 4112 ahc_outb(ahc, SCB_BASE, i); 4113 if (ahc_inb(ahc, SCB_BASE) != i) 4114 break; 4115 ahc_outb(ahc, SCBPTR, 0); 4116 if (ahc_inb(ahc, SCB_BASE) != 0) 4117 break; 4118 } 4119 return (i); 4120 } 4121 4122 #if 0 4123 static void 4124 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4125 { 4126 bus_addr_t *baddr; 4127 4128 baddr = (bus_addr_t *)arg; 4129 *baddr = segs->ds_addr; 4130 } 4131 #endif 4132 4133 static void 4134 ahc_build_free_scb_list(struct ahc_softc *ahc) 4135 { 4136 int scbsize; 4137 int i; 4138 4139 scbsize = 32; 4140 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4141 scbsize = 64; 4142 4143 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4144 int j; 4145 4146 ahc_outb(ahc, SCBPTR, i); 4147 4148 /* 4149 * Touch all SCB bytes to avoid parity errors 4150 * should one of our debugging routines read 4151 * an otherwise uninitiatlized byte. 4152 */ 4153 for (j = 0; j < scbsize; j++) 4154 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4155 4156 /* Clear the control byte. */ 4157 ahc_outb(ahc, SCB_CONTROL, 0); 4158 4159 /* Set the next pointer */ 4160 if ((ahc->flags & AHC_PAGESCBS) != 0) 4161 ahc_outb(ahc, SCB_NEXT, i+1); 4162 else 4163 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4164 4165 /* Make the tag number, SCSIID, and lun invalid */ 4166 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4167 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4168 ahc_outb(ahc, SCB_LUN, 0xFF); 4169 } 4170 4171 /* Make sure that the last SCB terminates the free list */ 4172 ahc_outb(ahc, SCBPTR, i-1); 4173 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4174 } 4175 4176 static int 4177 ahc_init_scbdata(struct ahc_softc *ahc) 4178 { 4179 struct scb_data *scb_data; 4180 4181 scb_data = ahc->scb_data; 4182 SLIST_INIT(&scb_data->free_scbs); 4183 SLIST_INIT(&scb_data->sg_maps); 4184 4185 /* Allocate SCB resources */ 4186 scb_data->scbarray = malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4187 M_DEVBUF, M_NOWAIT); 4188 if (scb_data->scbarray == NULL) 4189 return (ENOMEM); 4190 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4191 4192 /* Determine the number of hardware SCBs and initialize them */ 4193 4194 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4195 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4196 /* SCB 0 heads the free list */ 4197 ahc_outb(ahc, FREE_SCBH, 0); 4198 } else { 4199 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4200 } 4201 4202 if (ahc->scb_data->maxhscbs == 0) { 4203 printf("%s: No SCB space found\n", ahc_name(ahc)); 4204 return (ENXIO); 4205 } 4206 4207 ahc_build_free_scb_list(ahc); 4208 4209 /* 4210 * Create our DMA tags. These tags define the kinds of device 4211 * accessible memory allocations and memory mappings we will 4212 * need to perform during normal operation. 4213 * 4214 * Unless we need to further restrict the allocation, we rely 4215 * on the restrictions of the parent dmat, hence the common 4216 * use of MAXADDR and MAXSIZE. 4217 */ 4218 4219 if (ahc_createdmamem(ahc->parent_dmat, 4220 AHC_SCB_MAX * sizeof(struct hardware_scb), ahc->sc_dmaflags, 4221 &scb_data->hscb_dmamap, 4222 (void **)&scb_data->hscbs, &scb_data->hscb_busaddr, 4223 &scb_data->hscb_seg, &scb_data->hscb_nseg, ahc_name(ahc), 4224 "hardware SCB structures") < 0) 4225 goto error_exit; 4226 4227 scb_data->init_level++; 4228 4229 if (ahc_createdmamem(ahc->parent_dmat, 4230 AHC_SCB_MAX * sizeof(struct scsi_sense_data), ahc->sc_dmaflags, 4231 &scb_data->sense_dmamap, (void **)&scb_data->sense, 4232 &scb_data->sense_busaddr, &scb_data->sense_seg, 4233 &scb_data->sense_nseg, ahc_name(ahc), "sense buffers") < 0) 4234 goto error_exit; 4235 4236 scb_data->init_level++; 4237 4238 /* Perform initial CCB allocation */ 4239 memset(scb_data->hscbs, 0, 4240 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4241 ahc_alloc_scbs(ahc); 4242 scb_data->init_level++; 4243 4244 if (scb_data->numscbs == 0) { 4245 printf("%s: ahc_init_scbdata - " 4246 "Unable to allocate initial scbs\n", 4247 ahc_name(ahc)); 4248 goto error_exit; 4249 } 4250 4251 /* 4252 * Tell the sequencer which SCB will be the next one it receives. 4253 */ 4254 ahc->next_queued_scb = ahc_get_scb(ahc); 4255 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4256 4257 /* 4258 * Note that we were successfull 4259 */ 4260 return (0); 4261 4262 error_exit: 4263 4264 return (ENOMEM); 4265 } 4266 4267 static void 4268 ahc_fini_scbdata(struct ahc_softc *ahc) 4269 { 4270 struct scb_data *scb_data; 4271 4272 scb_data = ahc->scb_data; 4273 if (scb_data == NULL) 4274 return; 4275 4276 switch (scb_data->init_level) { 4277 default: 4278 case 5: 4279 { 4280 struct sg_map_node *sg_map; 4281 4282 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4283 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4284 ahc_freedmamem(ahc->parent_dmat, PAGE_SIZE, 4285 sg_map->sg_dmamap, (void *)sg_map->sg_vaddr, 4286 &sg_map->sg_dmasegs, sg_map->sg_nseg); 4287 free(sg_map, M_DEVBUF); 4288 } 4289 } 4290 /*FALLTHROUGH*/ 4291 case 4: 4292 ahc_freedmamem(ahc->parent_dmat, 4293 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 4294 scb_data->sense_dmamap, (void *)scb_data->sense, 4295 &scb_data->sense_seg, scb_data->sense_nseg); 4296 /*FALLTHROUGH*/ 4297 case 3: 4298 ahc_freedmamem(ahc->parent_dmat, 4299 AHC_SCB_MAX * sizeof(struct hardware_scb), 4300 scb_data->hscb_dmamap, (void *)scb_data->hscbs, 4301 &scb_data->hscb_seg, scb_data->hscb_nseg); 4302 /*FALLTHROUGH*/ 4303 case 2: 4304 case 1: 4305 case 0: 4306 break; 4307 } 4308 if (scb_data->scbarray != NULL) 4309 free(scb_data->scbarray, M_DEVBUF); 4310 } 4311 4312 int 4313 ahc_alloc_scbs(struct ahc_softc *ahc) 4314 { 4315 struct scb_data *scb_data; 4316 struct scb *next_scb; 4317 struct sg_map_node *sg_map; 4318 bus_addr_t physaddr; 4319 struct ahc_dma_seg *segs; 4320 int newcount; 4321 int i; 4322 4323 scb_data = ahc->scb_data; 4324 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4325 /* Can't allocate any more */ 4326 return (0); 4327 4328 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4329 4330 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_WAITOK); 4331 4332 if (sg_map == NULL) 4333 return (0); 4334 4335 /* Allocate S/G space for the next batch of SCBS */ 4336 if (ahc_createdmamem(ahc->parent_dmat, PAGE_SIZE, ahc->sc_dmaflags, 4337 &sg_map->sg_dmamap, 4338 (void **)&sg_map->sg_vaddr, &sg_map->sg_physaddr, 4339 &sg_map->sg_dmasegs, &sg_map->sg_nseg, 4340 ahc_name(ahc), 4341 "SG space") < 0) { 4342 free(sg_map, M_DEVBUF); 4343 return (0); 4344 } 4345 4346 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4347 4348 segs = sg_map->sg_vaddr; 4349 physaddr = sg_map->sg_physaddr; 4350 4351 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4352 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4353 for (i = 0; i < newcount; i++) { 4354 struct scb_platform_data *pdata; 4355 int error; 4356 4357 pdata = malloc(sizeof(*pdata), M_DEVBUF, M_WAITOK); 4358 if (pdata == NULL) 4359 break; 4360 next_scb->platform_data = pdata; 4361 next_scb->sg_map = sg_map; 4362 next_scb->sg_list = segs; 4363 /* 4364 * The sequencer always starts with the second entry. 4365 * The first entry is embedded in the scb. 4366 */ 4367 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4368 next_scb->ahc_softc = ahc; 4369 next_scb->flags = SCB_FREE; 4370 4371 error = bus_dmamap_create(ahc->parent_dmat, 4372 AHC_MAXTRANSFER_SIZE, AHC_NSEG, MAXPHYS, 0, 4373 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW|ahc->sc_dmaflags, 4374 &next_scb->dmamap); 4375 if (error != 0) 4376 break; 4377 4378 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4379 next_scb->hscb->tag = ahc->scb_data->numscbs; 4380 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4381 next_scb, links.sle); 4382 segs += AHC_NSEG; 4383 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4384 next_scb++; 4385 ahc->scb_data->numscbs++; 4386 } 4387 return (newcount); 4388 } 4389 4390 void 4391 ahc_controller_info(struct ahc_softc *ahc, char *tbuf, size_t l) 4392 { 4393 size_t len; 4394 4395 len = snprintf(tbuf, l, "%s: ", 4396 ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4397 if (len > l) 4398 return; 4399 if ((ahc->features & AHC_TWIN) != 0) 4400 len += snprintf(tbuf + len, l - len, 4401 "Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ", 4402 ahc->our_id, ahc->our_id_b, 4403 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4404 else { 4405 const char *speed; 4406 const char *type; 4407 4408 speed = ""; 4409 if ((ahc->features & AHC_ULTRA) != 0) { 4410 speed = "Ultra "; 4411 } else if ((ahc->features & AHC_DT) != 0) { 4412 speed = "Ultra160 "; 4413 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4414 speed = "Ultra2 "; 4415 } 4416 if ((ahc->features & AHC_WIDE) != 0) { 4417 type = "Wide"; 4418 } else { 4419 type = "Single"; 4420 } 4421 len += snprintf(tbuf + len, l - len, "%s%s Channel %c, SCSI Id=%d, ", 4422 speed, type, ahc->channel, ahc->our_id); 4423 } 4424 if (len > l) 4425 return; 4426 4427 if ((ahc->flags & AHC_PAGESCBS) != 0) 4428 snprintf(tbuf + len, l - len, "%d/%d SCBs", 4429 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4430 else 4431 snprintf(tbuf + len, l - len, "%d SCBs", ahc->scb_data->maxhscbs); 4432 } 4433 4434 /* 4435 * Start the board, ready for normal operation 4436 */ 4437 int 4438 ahc_init(struct ahc_softc *ahc) 4439 { 4440 int max_targ; 4441 int i; 4442 int term; 4443 u_int scsi_conf; 4444 u_int scsiseq_template; 4445 u_int ultraenb; 4446 u_int discenable; 4447 u_int tagenable; 4448 size_t driver_data_size; 4449 uint32_t physaddr; 4450 4451 #ifdef AHC_DEBUG 4452 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4453 ahc->flags |= AHC_SEQUENCER_DEBUG; 4454 #endif 4455 4456 #ifdef AHC_PRINT_SRAM 4457 printf("Scratch Ram:"); 4458 for (i = 0x20; i < 0x5f; i++) { 4459 if (((i % 8) == 0) && (i != 0)) { 4460 printf ("\n "); 4461 } 4462 printf (" 0x%x", ahc_inb(ahc, i)); 4463 } 4464 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4465 for (i = 0x70; i < 0x7f; i++) { 4466 if (((i % 8) == 0) && (i != 0)) { 4467 printf ("\n "); 4468 } 4469 printf (" 0x%x", ahc_inb(ahc, i)); 4470 } 4471 } 4472 printf ("\n"); 4473 /* 4474 * Reading uninitialized scratch ram may 4475 * generate parity errors. 4476 */ 4477 ahc_outb(ahc, CLRINT, CLRPARERR); 4478 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4479 #endif 4480 max_targ = 15; 4481 4482 /* 4483 * Assume we have a board at this stage and it has been reset. 4484 */ 4485 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4486 ahc->our_id = ahc->our_id_b = 7; 4487 4488 /* 4489 * Default to allowing initiator operations. 4490 */ 4491 ahc->flags |= AHC_INITIATORROLE; 4492 4493 /* 4494 * Only allow target mode features if this unit has them enabled. 4495 */ 4496 //if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4497 ahc->features &= ~AHC_TARGETMODE; 4498 4499 /* 4500 * DMA tag for our command fifos and other data in system memory 4501 * the card's sequencer must be able to access. For initiator 4502 * roles, we need to allocate space for the qinfifo and qoutfifo. 4503 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4504 * When providing for the target mode role, we must additionally 4505 * provide space for the incoming target command fifo and an extra 4506 * byte to deal with a DMA bug in some chip versions. 4507 */ 4508 driver_data_size = 2 * 256 * sizeof(uint8_t); 4509 if ((ahc->features & AHC_TARGETMODE) != 0) 4510 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4511 + /*DMA WideOdd Bug Buffer*/1; 4512 4513 if (ahc_createdmamem(ahc->parent_dmat, driver_data_size, 4514 ahc->sc_dmaflags, 4515 &ahc->shared_data_dmamap, (void **)&ahc->qoutfifo, 4516 &ahc->shared_data_busaddr, &ahc->shared_data_seg, 4517 &ahc->shared_data_nseg, ahc_name(ahc), 4518 "shared data") < 0) 4519 return (ENOMEM); 4520 4521 ahc->init_level++; 4522 4523 if ((ahc->features & AHC_TARGETMODE) != 0) { 4524 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4525 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4526 ahc->dma_bug_buf = ahc->shared_data_busaddr 4527 + driver_data_size - 1; 4528 /* All target command blocks start out invalid. */ 4529 for (i = 0; i < AHC_TMODE_CMDS; i++) 4530 ahc->targetcmds[i].cmd_valid = 0; 4531 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4532 ahc->tqinfifonext = 1; 4533 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4534 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4535 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4536 } 4537 ahc->qinfifo = &ahc->qoutfifo[256]; 4538 4539 ahc->init_level++; 4540 4541 /* Allocate SCB data now that buffer_dmat is initialized */ 4542 if (ahc->scb_data->maxhscbs == 0) 4543 if (ahc_init_scbdata(ahc) != 0) 4544 return (ENOMEM); 4545 4546 if (bootverbose) 4547 printf("%s: found %d SCBs\n", ahc_name(ahc), 4548 ahc->scb_data->maxhscbs); 4549 4550 /* 4551 * Allocate a tstate to house information for our 4552 * initiator presence on the bus as well as the user 4553 * data for any target mode initiator. 4554 */ 4555 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4556 printf("%s: unable to allocate ahc_tmode_tstate. " 4557 "Failing attach\n", ahc_name(ahc)); 4558 return (ENOMEM); 4559 } 4560 4561 if ((ahc->features & AHC_TWIN) != 0) { 4562 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4563 printf("%s: unable to allocate ahc_tmode_tstate. " 4564 "Failing attach\n", ahc_name(ahc)); 4565 return (ENOMEM); 4566 } 4567 } 4568 4569 ahc_outb(ahc, SEQ_FLAGS, 0); 4570 ahc_outb(ahc, SEQ_FLAGS2, 0); 4571 4572 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4573 ahc->flags |= AHC_PAGESCBS; 4574 } else { 4575 ahc->flags &= ~AHC_PAGESCBS; 4576 } 4577 4578 #ifdef AHC_DEBUG 4579 if (ahc_debug & AHC_SHOW_MISC) { 4580 printf("%s: hardware scb %lu bytes; kernel scb %lu bytes; " 4581 "ahc_dma %lu bytes\n", 4582 ahc_name(ahc), 4583 (u_long)sizeof(struct hardware_scb), 4584 (u_long)sizeof(struct scb), 4585 (u_long)sizeof(struct ahc_dma_seg)); 4586 } 4587 #endif /* AHC_DEBUG */ 4588 4589 /* 4590 * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels 4591 */ 4592 if (ahc->features & AHC_TWIN) { 4593 4594 /* 4595 * The device is gated to channel B after a chip reset, 4596 * so set those values first 4597 */ 4598 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4599 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4600 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4601 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4602 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4603 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4604 if ((ahc->features & AHC_ULTRA2) != 0) 4605 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4606 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4607 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4608 4609 if ((scsi_conf & RESET_SCSI) != 0 4610 && (ahc->flags & AHC_INITIATORROLE) != 0) 4611 ahc->flags |= AHC_RESET_BUS_B; 4612 4613 /* Select Channel A */ 4614 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4615 } 4616 4617 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4618 if ((ahc->features & AHC_ULTRA2) != 0) 4619 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4620 else 4621 ahc_outb(ahc, SCSIID, ahc->our_id); 4622 scsi_conf = ahc_inb(ahc, SCSICONF); 4623 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4624 |term|ahc->seltime 4625 |ENSTIMER|ACTNEGEN); 4626 if ((ahc->features & AHC_ULTRA2) != 0) 4627 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4628 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4629 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4630 4631 if ((scsi_conf & RESET_SCSI) != 0 4632 && (ahc->flags & AHC_INITIATORROLE) != 0) 4633 ahc->flags |= AHC_RESET_BUS_A; 4634 4635 /* 4636 * Look at the information that board initialization or 4637 * the board bios has left us. 4638 */ 4639 ultraenb = 0; 4640 tagenable = ALL_TARGETS_MASK; 4641 4642 /* Grab the disconnection disable table and invert it for our needs */ 4643 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4644 printf("%s: Host Adapter BIOS disabled. Using default SCSI " 4645 "host and target device parameters\n", ahc_name(ahc)); 4646 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4647 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4648 discenable = ALL_TARGETS_MASK; 4649 if ((ahc->features & AHC_ULTRA) != 0) 4650 ultraenb = ALL_TARGETS_MASK; 4651 } else if ((ahc->flags & AHC_USETARGETDEFAULTS) != 0) { 4652 printf("%s: Host Adapter has no SEEPROM. Using default SCSI" 4653 " target parameters\n", ahc_name(ahc)); 4654 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B; 4655 discenable = ALL_TARGETS_MASK; 4656 if ((ahc->features & AHC_ULTRA) != 0) 4657 ultraenb = ALL_TARGETS_MASK; 4658 } else { 4659 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4660 | ahc_inb(ahc, DISC_DSB)); 4661 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4662 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4663 | ahc_inb(ahc, ULTRA_ENB); 4664 } 4665 4666 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4667 max_targ = 7; 4668 4669 for (i = 0; i <= max_targ; i++) { 4670 struct ahc_initiator_tinfo *tinfo; 4671 struct ahc_tmode_tstate *tstate; 4672 u_int our_id; 4673 u_int target_id; 4674 char channel; 4675 4676 channel = 'A'; 4677 our_id = ahc->our_id; 4678 target_id = i; 4679 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4680 channel = 'B'; 4681 our_id = ahc->our_id_b; 4682 target_id = i % 8; 4683 } 4684 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4685 target_id, &tstate); 4686 /* Default to async narrow across the board */ 4687 memset(tinfo, 0, sizeof(*tinfo)); 4688 if (ahc->flags & (AHC_USEDEFAULTS | AHC_USETARGETDEFAULTS)) { 4689 if ((ahc->features & AHC_WIDE) != 0) 4690 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4691 4692 /* 4693 * These will be truncated when we determine the 4694 * connection type we have with the target. 4695 */ 4696 tinfo->user.period = ahc_syncrates->period; 4697 tinfo->user.offset = ~0; 4698 } else { 4699 u_int scsirate; 4700 uint16_t mask; 4701 4702 /* Take the settings leftover in scratch RAM. */ 4703 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4704 mask = (0x01 << i); 4705 if ((ahc->features & AHC_ULTRA2) != 0) { 4706 u_int offset; 4707 u_int maxsync; 4708 4709 if ((scsirate & SOFS) == 0x0F) { 4710 /* 4711 * Haven't negotiated yet, 4712 * so the format is different. 4713 */ 4714 scsirate = (scsirate & SXFR) >> 4 4715 | (ultraenb & mask) 4716 ? 0x08 : 0x0 4717 | (scsirate & WIDEXFER); 4718 offset = MAX_OFFSET_ULTRA2; 4719 } else 4720 offset = ahc_inb(ahc, TARG_OFFSET + i); 4721 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4722 /* Set to the lowest sync rate, 5MHz */ 4723 scsirate |= 0x1c; 4724 maxsync = AHC_SYNCRATE_ULTRA2; 4725 if ((ahc->features & AHC_DT) != 0) 4726 maxsync = AHC_SYNCRATE_DT; 4727 tinfo->user.period = 4728 ahc_find_period(ahc, scsirate, maxsync); 4729 if (offset == 0) 4730 tinfo->user.period = 0; 4731 else 4732 tinfo->user.offset = ~0; 4733 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4734 && (ahc->features & AHC_DT) != 0) 4735 tinfo->user.ppr_options = 4736 MSG_EXT_PPR_DT_REQ; 4737 } else if ((scsirate & SOFS) != 0) { 4738 if ((scsirate & SXFR) == 0x40 4739 && (ultraenb & mask) != 0) { 4740 /* Treat 10MHz as a non-ultra speed */ 4741 scsirate &= ~SXFR; 4742 ultraenb &= ~mask; 4743 } 4744 tinfo->user.period = 4745 ahc_find_period(ahc, scsirate, 4746 (ultraenb & mask) 4747 ? AHC_SYNCRATE_ULTRA 4748 : AHC_SYNCRATE_FAST); 4749 if (tinfo->user.period != 0) 4750 tinfo->user.offset = ~0; 4751 } 4752 if (tinfo->user.period == 0) 4753 tinfo->user.offset = 0; 4754 if ((scsirate & WIDEXFER) != 0 4755 && (ahc->features & AHC_WIDE) != 0) 4756 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4757 tinfo->user.protocol_version = 4; 4758 if ((ahc->features & AHC_DT) != 0) 4759 tinfo->user.transport_version = 3; 4760 else 4761 tinfo->user.transport_version = 2; 4762 tinfo->goal.protocol_version = 2; 4763 tinfo->goal.transport_version = 2; 4764 tinfo->curr.protocol_version = 2; 4765 tinfo->curr.transport_version = 2; 4766 } 4767 tstate->ultraenb = 0; 4768 tstate->discenable = discenable; 4769 } 4770 ahc->user_discenable = discenable; 4771 ahc->user_tagenable = tagenable; 4772 4773 /* There are no untagged SCBs active yet. */ 4774 for (i = 0; i < 16; i++) { 4775 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4776 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4777 int lun; 4778 4779 /* 4780 * The SCB based BTT allows an entry per 4781 * target and lun pair. 4782 */ 4783 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4784 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4785 } 4786 } 4787 4788 /* All of our queues are empty */ 4789 for (i = 0; i < 256; i++) 4790 ahc->qoutfifo[i] = SCB_LIST_NULL; 4791 4792 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4793 4794 for (i = 0; i < 256; i++) 4795 ahc->qinfifo[i] = SCB_LIST_NULL; 4796 4797 if ((ahc->features & AHC_MULTI_TID) != 0) { 4798 ahc_outb(ahc, TARGID, 0); 4799 ahc_outb(ahc, TARGID + 1, 0); 4800 } 4801 4802 /* 4803 * Tell the sequencer where it can find our arrays in memory. 4804 */ 4805 physaddr = ahc->scb_data->hscb_busaddr; 4806 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4807 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4808 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4809 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4810 4811 physaddr = ahc->shared_data_busaddr; 4812 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4813 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4814 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4815 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4816 4817 /* 4818 * Initialize the group code to command length table. 4819 * This overrides the values in TARG_SCSIRATE, so only 4820 * setup the table after we have processed that information. 4821 */ 4822 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4823 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4824 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4825 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4826 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4827 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4828 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4829 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4830 4831 /* Tell the sequencer of our initial queue positions */ 4832 ahc_outb(ahc, KERNEL_QINPOS, 0); 4833 ahc_outb(ahc, QINPOS, 0); 4834 ahc_outb(ahc, QOUTPOS, 0); 4835 4836 /* 4837 * Use the built in queue management registers 4838 * if they are available. 4839 */ 4840 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4841 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4842 ahc_outb(ahc, SDSCB_QOFF, 0); 4843 ahc_outb(ahc, SNSCB_QOFF, 0); 4844 ahc_outb(ahc, HNSCB_QOFF, 0); 4845 } 4846 4847 4848 /* We don't have any waiting selections */ 4849 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4850 4851 /* Our disconnection list is empty too */ 4852 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4853 4854 /* Message out buffer starts empty */ 4855 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4856 4857 /* 4858 * Setup the allowed SCSI Sequences based on operational mode. 4859 * If we are a target, we'll enable select in operations once 4860 * we've had a lun enabled. 4861 */ 4862 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4863 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4864 scsiseq_template |= ENRSELI; 4865 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4866 4867 /* 4868 * Load the Sequencer program and Enable the adapter 4869 * in "fast" mode. 4870 */ 4871 if (bootverbose) 4872 printf("%s: Downloading Sequencer Program...", 4873 ahc_name(ahc)); 4874 4875 ahc_loadseq(ahc); 4876 4877 if ((ahc->features & AHC_ULTRA2) != 0) { 4878 int wait; 4879 4880 /* 4881 * Wait for up to 500ms for our transceivers 4882 * to settle. If the adapter does not have 4883 * a cable attached, the transceivers may 4884 * never settle, so don't complain if we 4885 * fail here. 4886 */ 4887 ahc_pause(ahc); 4888 for (wait = 5000; 4889 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4890 wait--) 4891 ahc_delay(100); 4892 ahc_unpause(ahc); 4893 } 4894 4895 return (0); 4896 } 4897 4898 void 4899 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4900 { 4901 u_int hcntrl; 4902 4903 hcntrl = ahc_inb(ahc, HCNTRL); 4904 hcntrl &= ~INTEN; 4905 ahc->pause &= ~INTEN; 4906 ahc->unpause &= ~INTEN; 4907 if (enable) { 4908 hcntrl |= INTEN; 4909 ahc->pause |= INTEN; 4910 ahc->unpause |= INTEN; 4911 } 4912 ahc_outb(ahc, HCNTRL, hcntrl); 4913 } 4914 4915 /* 4916 * Ensure that the card is paused in a location 4917 * outside of all critical sections and that all 4918 * pending work is completed prior to returning. 4919 * This routine should only be called from outside 4920 * an interrupt context. 4921 */ 4922 void 4923 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4924 { 4925 int intstat; 4926 int maxloops; 4927 int paused; 4928 4929 maxloops = 1000; 4930 ahc->flags |= AHC_ALL_INTERRUPTS; 4931 intstat = 0; 4932 paused = FALSE; 4933 do { 4934 if (paused) 4935 ahc_unpause(ahc); 4936 ahc_intr(ahc); 4937 ahc_pause(ahc); 4938 paused = TRUE; 4939 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 4940 ahc_clear_critical_section(ahc); 4941 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4942 break; 4943 } while (--maxloops 4944 && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0 4945 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)))); 4946 if (maxloops == 0) { 4947 printf("Infinite interrupt loop, INTSTAT = %x", 4948 ahc_inb(ahc, INTSTAT)); 4949 } 4950 ahc_platform_flushwork(ahc); 4951 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4952 } 4953 4954 int 4955 ahc_suspend(struct ahc_softc *ahc) 4956 { 4957 uint8_t *ptr; 4958 int i; 4959 4960 ahc_pause_and_flushwork(ahc); 4961 4962 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4963 return (EBUSY); 4964 4965 #if AHC_TARGET_MODE 4966 /* 4967 * XXX What about ATIOs that have not yet been serviced? 4968 * Perhaps we should just refuse to be suspended if we 4969 * are acting in a target role. 4970 */ 4971 if (ahc->pending_device != NULL) 4972 return (EBUSY); 4973 #endif 4974 4975 /* Save volatile registers */ 4976 if ((ahc->features & AHC_TWIN) != 0) { 4977 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4978 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4979 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4980 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4981 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4982 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4983 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4984 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4985 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4986 } 4987 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4988 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4989 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4990 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4991 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4992 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4993 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4994 4995 if ((ahc->chip & AHC_PCI) != 0) { 4996 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4997 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4998 } 4999 5000 if ((ahc->features & AHC_DT) != 0) { 5001 u_int sfunct; 5002 5003 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5004 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5005 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 5006 ahc_outb(ahc, SFUNCT, sfunct); 5007 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 5008 } 5009 5010 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5011 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 5012 5013 if ((ahc->features & AHC_ULTRA2) != 0) 5014 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 5015 5016 ptr = ahc->suspend_state.scratch_ram; 5017 for (i = 0; i < 64; i++) 5018 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 5019 5020 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5021 for (i = 0; i < 16; i++) 5022 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 5023 } 5024 5025 ptr = ahc->suspend_state.btt; 5026 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5027 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5028 int j; 5029 5030 for (j = 0;j < AHC_NUM_LUNS; j++) { 5031 u_int tcl; 5032 5033 tcl = BUILD_TCL(i << 4, j); 5034 *ptr = ahc_index_busy_tcl(ahc, tcl); 5035 } 5036 } 5037 } 5038 ahc_shutdown(ahc); 5039 return (0); 5040 } 5041 5042 int 5043 ahc_resume(struct ahc_softc *ahc) 5044 { 5045 uint8_t *ptr; 5046 int i; 5047 5048 ahc_reset(ahc); 5049 5050 ahc_build_free_scb_list(ahc); 5051 5052 /* Restore volatile registers */ 5053 if ((ahc->features & AHC_TWIN) != 0) { 5054 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 5055 ahc_outb(ahc, SCSIID, ahc->our_id); 5056 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 5057 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 5058 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 5059 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 5060 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 5061 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 5062 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 5063 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 5064 } 5065 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 5066 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 5067 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 5068 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 5069 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 5070 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 5071 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 5072 if ((ahc->features & AHC_ULTRA2) != 0) 5073 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 5074 else 5075 ahc_outb(ahc, SCSIID, ahc->our_id); 5076 5077 if ((ahc->chip & AHC_PCI) != 0) { 5078 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 5079 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 5080 } 5081 5082 if ((ahc->features & AHC_DT) != 0) { 5083 u_int sfunct; 5084 5085 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5086 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5087 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 5088 ahc_outb(ahc, SFUNCT, sfunct); 5089 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 5090 } 5091 5092 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5093 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 5094 5095 if ((ahc->features & AHC_ULTRA2) != 0) 5096 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 5097 5098 ptr = ahc->suspend_state.scratch_ram; 5099 for (i = 0; i < 64; i++) 5100 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 5101 5102 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5103 for (i = 0; i < 16; i++) 5104 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 5105 } 5106 5107 ptr = ahc->suspend_state.btt; 5108 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5109 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5110 int j; 5111 5112 for (j = 0;j < AHC_NUM_LUNS; j++) { 5113 u_int tcl; 5114 5115 tcl = BUILD_TCL(i << 4, j); 5116 ahc_busy_tcl(ahc, tcl, *ptr); 5117 } 5118 } 5119 } 5120 return (0); 5121 } 5122 5123 /************************** Busy Target Table *********************************/ 5124 /* 5125 * Return the untagged transaction id for a given target/channel lun. 5126 * Optionally, clear the entry. 5127 */ 5128 u_int 5129 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5130 { 5131 u_int scbid; 5132 u_int target_offset; 5133 5134 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5135 u_int saved_scbptr; 5136 5137 saved_scbptr = ahc_inb(ahc, SCBPTR); 5138 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5139 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5140 ahc_outb(ahc, SCBPTR, saved_scbptr); 5141 } else { 5142 target_offset = TCL_TARGET_OFFSET(tcl); 5143 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5144 } 5145 5146 return (scbid); 5147 } 5148 5149 void 5150 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5151 { 5152 u_int target_offset; 5153 5154 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5155 u_int saved_scbptr; 5156 5157 saved_scbptr = ahc_inb(ahc, SCBPTR); 5158 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5159 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5160 ahc_outb(ahc, SCBPTR, saved_scbptr); 5161 } else { 5162 target_offset = TCL_TARGET_OFFSET(tcl); 5163 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5164 } 5165 } 5166 5167 void 5168 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5169 { 5170 u_int target_offset; 5171 5172 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5173 u_int saved_scbptr; 5174 5175 saved_scbptr = ahc_inb(ahc, SCBPTR); 5176 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5177 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5178 ahc_outb(ahc, SCBPTR, saved_scbptr); 5179 } else { 5180 target_offset = TCL_TARGET_OFFSET(tcl); 5181 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5182 } 5183 } 5184 5185 /************************** SCB and SCB queue management **********************/ 5186 int 5187 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5188 char channel, int lun, u_int tag, role_t role) 5189 { 5190 int targ = SCB_GET_TARGET(ahc, scb); 5191 char chan = SCB_GET_CHANNEL(ahc, scb); 5192 int slun = SCB_GET_LUN(scb); 5193 int match; 5194 5195 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5196 if (match != 0) 5197 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5198 if (match != 0) 5199 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5200 if (match != 0) { 5201 #if 0 5202 #if AHC_TARGET_MODE 5203 int group; 5204 5205 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5206 if (role == ROLE_INITIATOR) { 5207 match = (group != XPT_FC_GROUP_TMODE) 5208 && ((tag == scb->hscb->tag) 5209 || (tag == SCB_LIST_NULL)); 5210 } else if (role == ROLE_TARGET) { 5211 match = (group == XPT_FC_GROUP_TMODE) 5212 && ((tag == scb->io_ctx->csio.tag_id) 5213 || (tag == SCB_LIST_NULL)); 5214 } 5215 #else /* !AHC_TARGET_MODE */ 5216 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5217 #endif /* AHC_TARGET_MODE */ 5218 #endif 5219 } 5220 5221 return match; 5222 } 5223 5224 void 5225 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5226 { 5227 int target; 5228 char channel; 5229 int lun; 5230 5231 target = SCB_GET_TARGET(ahc, scb); 5232 lun = SCB_GET_LUN(scb); 5233 channel = SCB_GET_CHANNEL(ahc, scb); 5234 5235 ahc_search_qinfifo(ahc, target, channel, lun, 5236 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5237 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5238 5239 ahc_platform_freeze_devq(ahc, scb); 5240 } 5241 5242 void 5243 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5244 { 5245 struct scb *prev_scb; 5246 5247 prev_scb = NULL; 5248 if (ahc_qinfifo_count(ahc) != 0) { 5249 u_int prev_tag; 5250 uint8_t prev_pos; 5251 5252 prev_pos = ahc->qinfifonext - 1; 5253 prev_tag = ahc->qinfifo[prev_pos]; 5254 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5255 } 5256 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5257 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5258 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5259 } else { 5260 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5261 } 5262 } 5263 5264 static void 5265 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5266 struct scb *scb) 5267 { 5268 if (prev_scb == NULL) { 5269 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5270 } else { 5271 prev_scb->hscb->next = scb->hscb->tag; 5272 ahc_sync_scb(ahc, prev_scb, 5273 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5274 } 5275 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5276 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5277 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5278 } 5279 5280 static int 5281 ahc_qinfifo_count(struct ahc_softc *ahc) 5282 { 5283 uint8_t qinpos; 5284 uint8_t diff; 5285 5286 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5287 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5288 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5289 } else 5290 qinpos = ahc_inb(ahc, QINPOS); 5291 diff = ahc->qinfifonext - qinpos; 5292 return (diff); 5293 } 5294 5295 int 5296 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5297 int lun, u_int tag, role_t role, uint32_t status, 5298 ahc_search_action action) 5299 { 5300 struct scb *scb; 5301 struct scb *prev_scb; 5302 uint8_t qinstart; 5303 uint8_t qinpos; 5304 uint8_t qintail; 5305 uint8_t next; 5306 uint8_t prev; 5307 uint8_t curscbptr; 5308 int found; 5309 int have_qregs; 5310 5311 qintail = ahc->qinfifonext; 5312 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5313 if (have_qregs) { 5314 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5315 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5316 } else 5317 qinstart = ahc_inb(ahc, QINPOS); 5318 qinpos = qinstart; 5319 found = 0; 5320 prev_scb = NULL; 5321 5322 if (action == SEARCH_COMPLETE) { 5323 /* 5324 * Don't attempt to run any queued untagged transactions 5325 * until we are done with the abort process. 5326 */ 5327 ahc_freeze_untagged_queues(ahc); 5328 } 5329 5330 /* 5331 * Start with an empty queue. Entries that are not chosen 5332 * for removal will be re-added to the queue as we go. 5333 */ 5334 ahc->qinfifonext = qinpos; 5335 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5336 5337 while (qinpos != qintail) { 5338 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5339 if (scb == NULL) { 5340 printf("qinpos = %d, SCB index = %d\n", 5341 qinpos, ahc->qinfifo[qinpos]); 5342 panic("Loop 1\n"); 5343 } 5344 5345 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5346 /* 5347 * We found an scb that needs to be acted on. 5348 */ 5349 found++; 5350 switch (action) { 5351 case SEARCH_COMPLETE: 5352 { 5353 cam_status ostat; 5354 cam_status cstat; 5355 5356 ostat = ahc_get_transaction_status(scb); 5357 if (ostat == CAM_REQ_INPROG) 5358 ahc_set_transaction_status(scb, status); 5359 cstat = ahc_get_transaction_status(scb); 5360 if (cstat != CAM_REQ_CMP) 5361 ahc_freeze_scb(scb); 5362 if ((scb->flags & SCB_ACTIVE) == 0) 5363 printf("Inactive SCB in qinfifo\n"); 5364 ahc_done(ahc, scb); 5365 5366 /* FALLTHROUGH */ 5367 } 5368 case SEARCH_REMOVE: 5369 break; 5370 case SEARCH_COUNT: 5371 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5372 prev_scb = scb; 5373 break; 5374 } 5375 } else { 5376 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5377 prev_scb = scb; 5378 } 5379 qinpos++; 5380 } 5381 5382 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5383 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5384 } else { 5385 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5386 } 5387 5388 if (action != SEARCH_COUNT 5389 && (found != 0) 5390 && (qinstart != ahc->qinfifonext)) { 5391 /* 5392 * The sequencer may be in the process of DMA'ing 5393 * down the SCB at the beginning of the queue. 5394 * This could be problematic if either the first, 5395 * or the second SCB is removed from the queue 5396 * (the first SCB includes a pointer to the "next" 5397 * SCB to DMA). If we have removed any entries, swap 5398 * the first element in the queue with the next HSCB 5399 * so the sequencer will notice that NEXT_QUEUED_SCB 5400 * has changed during its DMA attempt and will retry 5401 * the DMA. 5402 */ 5403 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5404 5405 if (scb == NULL) { 5406 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5407 found, qinstart, ahc->qinfifonext); 5408 panic("First/Second Qinfifo fixup\n"); 5409 } 5410 /* 5411 * ahc_swap_with_next_hscb forces our next pointer to 5412 * point to the reserved SCB for future commands. Save 5413 * and restore our original next pointer to maintain 5414 * queue integrity. 5415 */ 5416 next = scb->hscb->next; 5417 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5418 ahc_swap_with_next_hscb(ahc, scb); 5419 scb->hscb->next = next; 5420 ahc->qinfifo[qinstart] = scb->hscb->tag; 5421 5422 /* Tell the card about the new head of the qinfifo. */ 5423 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5424 5425 /* Fixup the tail "next" pointer. */ 5426 qintail = ahc->qinfifonext - 1; 5427 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5428 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5429 } 5430 5431 /* 5432 * Search waiting for selection list. 5433 */ 5434 curscbptr = ahc_inb(ahc, SCBPTR); 5435 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5436 prev = SCB_LIST_NULL; 5437 5438 while (next != SCB_LIST_NULL) { 5439 uint8_t scb_index; 5440 5441 ahc_outb(ahc, SCBPTR, next); 5442 scb_index = ahc_inb(ahc, SCB_TAG); 5443 if (scb_index >= ahc->scb_data->numscbs) { 5444 printf("Waiting List inconsistency. " 5445 "SCB index == %d, yet numscbs == %d.", 5446 scb_index, ahc->scb_data->numscbs); 5447 ahc_dump_card_state(ahc); 5448 panic("for safety"); 5449 } 5450 scb = ahc_lookup_scb(ahc, scb_index); 5451 if (scb == NULL) { 5452 printf("scb_index = %d, next = %d\n", 5453 scb_index, next); 5454 panic("Waiting List traversal\n"); 5455 } 5456 if (ahc_match_scb(ahc, scb, target, channel, 5457 lun, SCB_LIST_NULL, role)) { 5458 /* 5459 * We found an scb that needs to be acted on. 5460 */ 5461 found++; 5462 switch (action) { 5463 case SEARCH_COMPLETE: 5464 { 5465 cam_status ostat; 5466 cam_status cstat; 5467 5468 ostat = ahc_get_transaction_status(scb); 5469 if (ostat == CAM_REQ_INPROG) 5470 ahc_set_transaction_status(scb, status); 5471 cstat = ahc_get_transaction_status(scb); 5472 if (cstat != CAM_REQ_CMP) 5473 ahc_freeze_scb(scb); 5474 if ((scb->flags & SCB_ACTIVE) == 0) 5475 printf("Inactive SCB in " 5476 "Waiting List\n"); 5477 ahc_done(ahc, scb); 5478 /* FALLTHROUGH */ 5479 } 5480 case SEARCH_REMOVE: 5481 next = ahc_rem_wscb(ahc, next, prev); 5482 break; 5483 case SEARCH_COUNT: 5484 prev = next; 5485 next = ahc_inb(ahc, SCB_NEXT); 5486 break; 5487 } 5488 } else { 5489 5490 prev = next; 5491 next = ahc_inb(ahc, SCB_NEXT); 5492 } 5493 } 5494 ahc_outb(ahc, SCBPTR, curscbptr); 5495 5496 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5497 channel, lun, status, action); 5498 5499 if (action == SEARCH_COMPLETE) 5500 ahc_release_untagged_queues(ahc); 5501 return (found); 5502 } 5503 5504 int 5505 ahc_search_untagged_queues(struct ahc_softc *ahc, 5506 struct scsipi_xfer *xs, int target, char channel, int lun, 5507 uint32_t status, ahc_search_action action) 5508 { 5509 struct scb *scb; 5510 int maxtarget; 5511 int found; 5512 int i; 5513 5514 if (action == SEARCH_COMPLETE) { 5515 /* 5516 * Don't attempt to run any queued untagged transactions 5517 * until we are done with the abort process. 5518 */ 5519 ahc_freeze_untagged_queues(ahc); 5520 } 5521 5522 found = 0; 5523 i = 0; 5524 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5525 5526 maxtarget = 16; 5527 if (target != CAM_TARGET_WILDCARD) { 5528 5529 i = target; 5530 if (channel == 'B') 5531 i += 8; 5532 maxtarget = i + 1; 5533 } 5534 } else { 5535 maxtarget = 0; 5536 } 5537 5538 for (; i < maxtarget; i++) { 5539 struct scb_tailq *untagged_q; 5540 struct scb *next_scb; 5541 5542 untagged_q = &(ahc->untagged_queues[i]); 5543 next_scb = TAILQ_FIRST(untagged_q); 5544 while (next_scb != NULL) { 5545 5546 scb = next_scb; 5547 next_scb = TAILQ_NEXT(scb, links.tqe); 5548 5549 /* 5550 * The head of the list may be the currently 5551 * active untagged command for a device. 5552 * We're only searching for commands that 5553 * have not been started. A transaction 5554 * marked active but still in the qinfifo 5555 * is removed by the qinfifo scanning code 5556 * above. 5557 */ 5558 if ((scb->flags & SCB_ACTIVE) != 0) 5559 continue; 5560 5561 if (ahc_match_scb(ahc, scb, target, channel, lun, 5562 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5563 /*|| (ctx != NULL && ctx != scb->io_ctx)*/) 5564 continue; 5565 5566 /* 5567 * We found an scb that needs to be acted on. 5568 */ 5569 found++; 5570 switch (action) { 5571 case SEARCH_COMPLETE: 5572 { 5573 cam_status ostat; 5574 cam_status cstat; 5575 5576 ostat = ahc_get_transaction_status(scb); 5577 if (ostat == CAM_REQ_INPROG) 5578 ahc_set_transaction_status(scb, status); 5579 cstat = ahc_get_transaction_status(scb); 5580 if (cstat != CAM_REQ_CMP) 5581 ahc_freeze_scb(scb); 5582 if ((scb->flags & SCB_ACTIVE) == 0) 5583 printf("Inactive SCB in untaggedQ\n"); 5584 ahc_done(ahc, scb); 5585 break; 5586 } 5587 case SEARCH_REMOVE: 5588 scb->flags &= ~SCB_UNTAGGEDQ; 5589 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5590 break; 5591 case SEARCH_COUNT: 5592 break; 5593 } 5594 } 5595 } 5596 5597 if (action == SEARCH_COMPLETE) 5598 ahc_release_untagged_queues(ahc); 5599 return (found); 5600 } 5601 5602 int 5603 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5604 int lun, u_int tag, int stop_on_first, int remove, 5605 int save_state) 5606 { 5607 struct scb *scbp; 5608 u_int next; 5609 u_int prev; 5610 u_int count; 5611 u_int active_scb; 5612 5613 count = 0; 5614 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5615 prev = SCB_LIST_NULL; 5616 5617 if (save_state) { 5618 /* restore this when we're done */ 5619 active_scb = ahc_inb(ahc, SCBPTR); 5620 } else 5621 /* Silence compiler */ 5622 active_scb = SCB_LIST_NULL; 5623 5624 while (next != SCB_LIST_NULL) { 5625 u_int scb_index; 5626 5627 ahc_outb(ahc, SCBPTR, next); 5628 scb_index = ahc_inb(ahc, SCB_TAG); 5629 if (scb_index >= ahc->scb_data->numscbs) { 5630 printf("Disconnected List inconsistency. " 5631 "SCB index == %d, yet numscbs == %d.", 5632 scb_index, ahc->scb_data->numscbs); 5633 ahc_dump_card_state(ahc); 5634 panic("for safety"); 5635 } 5636 5637 if (next == prev) { 5638 panic("Disconnected List Loop. " 5639 "cur SCBPTR == %x, prev SCBPTR == %x.", 5640 next, prev); 5641 } 5642 scbp = ahc_lookup_scb(ahc, scb_index); 5643 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5644 tag, ROLE_INITIATOR)) { 5645 count++; 5646 if (remove) { 5647 next = 5648 ahc_rem_scb_from_disc_list(ahc, prev, next); 5649 } else { 5650 prev = next; 5651 next = ahc_inb(ahc, SCB_NEXT); 5652 } 5653 if (stop_on_first) 5654 break; 5655 } else { 5656 prev = next; 5657 next = ahc_inb(ahc, SCB_NEXT); 5658 } 5659 } 5660 if (save_state) 5661 ahc_outb(ahc, SCBPTR, active_scb); 5662 return (count); 5663 } 5664 5665 /* 5666 * Remove an SCB from the on chip list of disconnected transactions. 5667 * This is empty/unused if we are not performing SCB paging. 5668 */ 5669 static u_int 5670 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5671 { 5672 u_int next; 5673 5674 ahc_outb(ahc, SCBPTR, scbptr); 5675 next = ahc_inb(ahc, SCB_NEXT); 5676 5677 ahc_outb(ahc, SCB_CONTROL, 0); 5678 5679 ahc_add_curscb_to_free_list(ahc); 5680 5681 if (prev != SCB_LIST_NULL) { 5682 ahc_outb(ahc, SCBPTR, prev); 5683 ahc_outb(ahc, SCB_NEXT, next); 5684 } else 5685 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5686 5687 return (next); 5688 } 5689 5690 /* 5691 * Add the SCB as selected by SCBPTR onto the on chip list of 5692 * free hardware SCBs. This list is empty/unused if we are not 5693 * performing SCB paging. 5694 */ 5695 static void 5696 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5697 { 5698 /* 5699 * Invalidate the tag so that our abort 5700 * routines don't think it's active. 5701 */ 5702 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5703 5704 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5705 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5706 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5707 } 5708 } 5709 5710 /* 5711 * Manipulate the waiting for selection list and return the 5712 * scb that follows the one that we remove. 5713 */ 5714 static u_int 5715 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5716 { 5717 u_int curscb, next; 5718 5719 /* 5720 * Select the SCB we want to abort and 5721 * pull the next pointer out of it. 5722 */ 5723 curscb = ahc_inb(ahc, SCBPTR); 5724 ahc_outb(ahc, SCBPTR, scbpos); 5725 next = ahc_inb(ahc, SCB_NEXT); 5726 5727 /* Clear the necessary fields */ 5728 ahc_outb(ahc, SCB_CONTROL, 0); 5729 5730 ahc_add_curscb_to_free_list(ahc); 5731 5732 /* update the waiting list */ 5733 if (prev == SCB_LIST_NULL) { 5734 /* First in the list */ 5735 ahc_outb(ahc, WAITING_SCBH, next); 5736 5737 /* 5738 * Ensure we aren't attempting to perform 5739 * selection for this entry. 5740 */ 5741 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5742 } else { 5743 /* 5744 * Select the scb that pointed to us 5745 * and update its next pointer. 5746 */ 5747 ahc_outb(ahc, SCBPTR, prev); 5748 ahc_outb(ahc, SCB_NEXT, next); 5749 } 5750 5751 /* 5752 * Point us back at the original scb position. 5753 */ 5754 ahc_outb(ahc, SCBPTR, curscb); 5755 return next; 5756 } 5757 5758 /******************************** Error Handling ******************************/ 5759 /* 5760 * Abort all SCBs that match the given description (target/channel/lun/tag), 5761 * setting their status to the passed in status if the status has not already 5762 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5763 * is paused before it is called. 5764 */ 5765 int 5766 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5767 int lun, u_int tag, role_t role, uint32_t status) 5768 { 5769 struct scb *scbp; 5770 struct scb *scbp_next; 5771 u_int active_scb; 5772 int i, j; 5773 int maxtarget; 5774 int minlun; 5775 int maxlun; 5776 5777 int found; 5778 5779 /* 5780 * Don't attempt to run any queued untagged transactions 5781 * until we are done with the abort process. 5782 */ 5783 ahc_freeze_untagged_queues(ahc); 5784 5785 /* restore this when we're done */ 5786 active_scb = ahc_inb(ahc, SCBPTR); 5787 5788 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5789 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5790 5791 /* 5792 * Clean out the busy target table for any untagged commands. 5793 */ 5794 i = 0; 5795 maxtarget = 16; 5796 if (target != CAM_TARGET_WILDCARD) { 5797 i = target; 5798 if (channel == 'B') 5799 i += 8; 5800 maxtarget = i + 1; 5801 } 5802 5803 if (lun == CAM_LUN_WILDCARD) { 5804 5805 /* 5806 * Unless we are using an SCB based 5807 * busy targets table, there is only 5808 * one table entry for all luns of 5809 * a target. 5810 */ 5811 minlun = 0; 5812 maxlun = 1; 5813 if ((ahc->flags & AHC_SCB_BTT) != 0) 5814 maxlun = AHC_NUM_LUNS; 5815 } else { 5816 minlun = lun; 5817 maxlun = lun + 1; 5818 } 5819 5820 if (role != ROLE_TARGET) { 5821 for (;i < maxtarget; i++) { 5822 for (j = minlun;j < maxlun; j++) { 5823 u_int scbid; 5824 u_int tcl; 5825 5826 tcl = BUILD_TCL(i << 4, j); 5827 scbid = ahc_index_busy_tcl(ahc, tcl); 5828 scbp = ahc_lookup_scb(ahc, scbid); 5829 if (scbp == NULL 5830 || ahc_match_scb(ahc, scbp, target, channel, 5831 lun, tag, role) == 0) 5832 continue; 5833 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5834 } 5835 } 5836 5837 /* 5838 * Go through the disconnected list and remove any entries we 5839 * have queued for completion, 0'ing their control byte too. 5840 * We save the active SCB and restore it ourselves, so there 5841 * is no reason for this search to restore it too. 5842 */ 5843 ahc_search_disc_list(ahc, target, channel, lun, tag, 5844 /*stop_on_first*/FALSE, /*remove*/TRUE, 5845 /*save_state*/FALSE); 5846 } 5847 5848 /* 5849 * Go through the hardware SCB array looking for commands that 5850 * were active but not on any list. In some cases, these remnants 5851 * might not still have mappings in the scbindex array (e.g. unexpected 5852 * bus free with the same scb queued for an abort). Don't hold this 5853 * against them. 5854 */ 5855 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5856 u_int scbid; 5857 5858 ahc_outb(ahc, SCBPTR, i); 5859 scbid = ahc_inb(ahc, SCB_TAG); 5860 scbp = ahc_lookup_scb(ahc, scbid); 5861 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5862 || (scbp != NULL 5863 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5864 ahc_add_curscb_to_free_list(ahc); 5865 } 5866 5867 /* 5868 * Go through the pending CCB list and look for 5869 * commands for this target that are still active. 5870 * These are other tagged commands that were 5871 * disconnected when the reset occurred. 5872 */ 5873 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5874 while (scbp_next != NULL) { 5875 scbp = scbp_next; 5876 scbp_next = LIST_NEXT(scbp, pending_links); 5877 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5878 cam_status ostat; 5879 5880 ostat = ahc_get_transaction_status(scbp); 5881 if (ostat == CAM_REQ_INPROG) 5882 ahc_set_transaction_status(scbp, status); 5883 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5884 ahc_freeze_scb(scbp); 5885 if ((scbp->flags & SCB_ACTIVE) == 0) 5886 printf("Inactive SCB on pending list\n"); 5887 ahc_done(ahc, scbp); 5888 found++; 5889 } 5890 } 5891 ahc_outb(ahc, SCBPTR, active_scb); 5892 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5893 ahc_release_untagged_queues(ahc); 5894 return found; 5895 } 5896 5897 static void 5898 ahc_reset_current_bus(struct ahc_softc *ahc) 5899 { 5900 uint8_t scsiseq; 5901 5902 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5903 scsiseq = ahc_inb(ahc, SCSISEQ); 5904 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5905 ahc_flush_device_writes(ahc); 5906 ahc_delay(AHC_BUSRESET_DELAY); 5907 /* Turn off the bus reset */ 5908 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5909 5910 ahc_clear_intstat(ahc); 5911 5912 /* Re-enable reset interrupts */ 5913 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5914 } 5915 5916 int 5917 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5918 { 5919 struct ahc_devinfo dinfo; 5920 u_int initiator, target, max_scsiid; 5921 u_int sblkctl; 5922 u_int scsiseq; 5923 u_int simode1; 5924 int found; 5925 int restart_needed; 5926 char cur_channel; 5927 5928 ahc->pending_device = NULL; 5929 5930 ahc_compile_devinfo(&dinfo, 5931 CAM_TARGET_WILDCARD, 5932 CAM_TARGET_WILDCARD, 5933 CAM_LUN_WILDCARD, 5934 channel, ROLE_UNKNOWN); 5935 ahc_pause(ahc); 5936 5937 /* Make sure the sequencer is in a safe location. */ 5938 ahc_clear_critical_section(ahc); 5939 5940 /* 5941 * Run our command complete fifos to ensure that we perform 5942 * completion processing on any commands that 'completed' 5943 * before the reset occurred. 5944 */ 5945 ahc_run_qoutfifo(ahc); 5946 #if AHC_TARGET_MODE 5947 /* 5948 * XXX - In Twin mode, the tqinfifo may have commands 5949 * for an unaffected channel in it. However, if 5950 * we have run out of ATIO resources to drain that 5951 * queue, we may not get them all out here. Further, 5952 * the blocked transactions for the reset channel 5953 * should just be killed off, irrespecitve of whether 5954 * we are blocked on ATIO resources. Write a routine 5955 * to compact the tqinfifo appropriately. 5956 */ 5957 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5958 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5959 } 5960 #endif 5961 5962 /* 5963 * Reset the bus if we are initiating this reset 5964 */ 5965 sblkctl = ahc_inb(ahc, SBLKCTL); 5966 cur_channel = 'A'; 5967 if ((ahc->features & AHC_TWIN) != 0 5968 && ((sblkctl & SELBUSB) != 0)) 5969 cur_channel = 'B'; 5970 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5971 if (cur_channel != channel) { 5972 /* Case 1: Command for another bus is active 5973 * Stealthily reset the other bus without 5974 * upsetting the current bus. 5975 */ 5976 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5977 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5978 #if AHC_TARGET_MODE 5979 /* 5980 * Bus resets clear ENSELI, so we cannot 5981 * defer re-enabling bus reset interrupts 5982 * if we are in target mode. 5983 */ 5984 if ((ahc->flags & AHC_TARGETROLE) != 0) 5985 simode1 |= ENSCSIRST; 5986 #endif 5987 ahc_outb(ahc, SIMODE1, simode1); 5988 if (initiate_reset) 5989 ahc_reset_current_bus(ahc); 5990 ahc_clear_intstat(ahc); 5991 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5992 ahc_outb(ahc, SBLKCTL, sblkctl); 5993 restart_needed = FALSE; 5994 } else { 5995 /* Case 2: A command from this bus is active or we're idle */ 5996 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5997 #if AHC_TARGET_MODE 5998 /* 5999 * Bus resets clear ENSELI, so we cannot 6000 * defer re-enabling bus reset interrupts 6001 * if we are in target mode. 6002 */ 6003 if ((ahc->flags & AHC_TARGETROLE) != 0) 6004 simode1 |= ENSCSIRST; 6005 #endif 6006 ahc_outb(ahc, SIMODE1, simode1); 6007 if (initiate_reset) 6008 ahc_reset_current_bus(ahc); 6009 ahc_clear_intstat(ahc); 6010 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6011 restart_needed = TRUE; 6012 } 6013 6014 /* 6015 * Clean up all the state information for the 6016 * pending transactions on this bus. 6017 */ 6018 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6019 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6020 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6021 6022 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6023 6024 #ifdef AHC_TARGET_MODE 6025 /* 6026 * Send an immediate notify ccb to all target more peripheral 6027 * drivers affected by this action. 6028 */ 6029 for (target = 0; target <= max_scsiid; target++) { 6030 struct ahc_tmode_tstate* tstate; 6031 u_int lun; 6032 6033 tstate = ahc->enabled_targets[target]; 6034 if (tstate == NULL) 6035 continue; 6036 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6037 struct ahc_tmode_lstate* lstate; 6038 6039 lstate = tstate->enabled_luns[lun]; 6040 if (lstate == NULL) 6041 continue; 6042 6043 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6044 EVENT_TYPE_BUS_RESET, /*arg*/0); 6045 ahc_send_lstate_events(ahc, lstate); 6046 } 6047 } 6048 #endif 6049 /* 6050 * Revert to async/narrow transfers until we renegotiate. 6051 */ 6052 for (target = 0; target <= max_scsiid; target++) { 6053 6054 if (ahc->enabled_targets[target] == NULL) 6055 continue; 6056 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6057 struct ahc_devinfo devinfo; 6058 6059 ahc_compile_devinfo(&devinfo, target, initiator, 6060 CAM_LUN_WILDCARD, 6061 channel, ROLE_UNKNOWN); 6062 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6063 AHC_TRANS_CUR, /*paused*/TRUE); 6064 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6065 /*period*/0, /*offset*/0, 6066 /*ppr_options*/0, AHC_TRANS_CUR, 6067 /*paused*/TRUE); 6068 } 6069 } 6070 6071 if (restart_needed) 6072 ahc_restart(ahc); 6073 else 6074 ahc_unpause(ahc); 6075 return found; 6076 } 6077 6078 6079 /***************************** Residual Processing ****************************/ 6080 /* 6081 * Calculate the residual for a just completed SCB. 6082 */ 6083 void 6084 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6085 { 6086 struct hardware_scb *hscb; 6087 struct status_pkt *spkt; 6088 uint32_t sgptr; 6089 uint32_t resid_sgptr; 6090 uint32_t resid; 6091 6092 /* 6093 * 5 cases. 6094 * 1) No residual. 6095 * SG_RESID_VALID clear in sgptr. 6096 * 2) Transferless command 6097 * 3) Never performed any transfers. 6098 * sgptr has SG_FULL_RESID set. 6099 * 4) No residual but target did not 6100 * save data pointers after the 6101 * last transfer, so sgptr was 6102 * never updated. 6103 * 5) We have a partial residual. 6104 * Use residual_sgptr to determine 6105 * where we are. 6106 */ 6107 6108 hscb = scb->hscb; 6109 sgptr = ahc_le32toh(hscb->sgptr); 6110 if ((sgptr & SG_RESID_VALID) == 0) 6111 /* Case 1 */ 6112 return; 6113 sgptr &= ~SG_RESID_VALID; 6114 6115 if ((sgptr & SG_LIST_NULL) != 0) 6116 /* Case 2 */ 6117 return; 6118 6119 spkt = &hscb->shared_data.status; 6120 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6121 if ((sgptr & SG_FULL_RESID) != 0) { 6122 /* Case 3 */ 6123 resid = ahc_get_transfer_length(scb); 6124 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6125 /* Case 4 */ 6126 return; 6127 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6128 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6129 } else { 6130 struct ahc_dma_seg *sg; 6131 6132 /* 6133 * Remainder of the SG where the transfer 6134 * stopped. 6135 */ 6136 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6137 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6138 6139 /* The residual sg_ptr always points to the next sg */ 6140 sg--; 6141 6142 /* 6143 * Add up the contents of all residual 6144 * SG segments that are after the SG where 6145 * the transfer stopped. 6146 */ 6147 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6148 sg++; 6149 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6150 } 6151 } 6152 if ((scb->flags & SCB_SENSE) == 0) 6153 ahc_set_residual(scb, resid); 6154 else 6155 ahc_set_sense_residual(scb, resid); 6156 6157 #ifdef AHC_DEBUG 6158 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6159 ahc_print_path(ahc, scb); 6160 printf("Handled %sResidual of %d bytes\n", 6161 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6162 } 6163 #endif 6164 } 6165 6166 /******************************* Target Mode **********************************/ 6167 #ifdef AHC_TARGET_MODE 6168 /* 6169 * Add a target mode event to this lun's queue 6170 */ 6171 static void 6172 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6173 u_int initiator_id, u_int event_type, u_int event_arg) 6174 { 6175 struct ahc_tmode_event *event; 6176 int pending; 6177 6178 xpt_freeze_devq(lstate->path, /*count*/1); 6179 if (lstate->event_w_idx >= lstate->event_r_idx) 6180 pending = lstate->event_w_idx - lstate->event_r_idx; 6181 else 6182 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6183 - (lstate->event_r_idx - lstate->event_w_idx); 6184 6185 if (event_type == EVENT_TYPE_BUS_RESET 6186 || event_type == MSG_BUS_DEV_RESET) { 6187 /* 6188 * Any earlier events are irrelevant, so reset our buffer. 6189 * This has the effect of allowing us to deal with reset 6190 * floods (an external device holding down the reset line) 6191 * without losing the event that is really interesting. 6192 */ 6193 lstate->event_r_idx = 0; 6194 lstate->event_w_idx = 0; 6195 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6196 } 6197 6198 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6199 xpt_print_path(lstate->path); 6200 printf("immediate event %x:%x lost\n", 6201 lstate->event_buffer[lstate->event_r_idx].event_type, 6202 lstate->event_buffer[lstate->event_r_idx].event_arg); 6203 lstate->event_r_idx++; 6204 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6205 lstate->event_r_idx = 0; 6206 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6207 } 6208 6209 event = &lstate->event_buffer[lstate->event_w_idx]; 6210 event->initiator_id = initiator_id; 6211 event->event_type = event_type; 6212 event->event_arg = event_arg; 6213 lstate->event_w_idx++; 6214 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6215 lstate->event_w_idx = 0; 6216 } 6217 6218 /* 6219 * Send any target mode events queued up waiting 6220 * for immediate notify resources. 6221 */ 6222 void 6223 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6224 { 6225 struct ccb_hdr *ccbh; 6226 struct ccb_immed_notify *inot; 6227 6228 while (lstate->event_r_idx != lstate->event_w_idx 6229 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6230 struct ahc_tmode_event *event; 6231 6232 event = &lstate->event_buffer[lstate->event_r_idx]; 6233 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6234 inot = (struct ccb_immed_notify *)ccbh; 6235 switch (event->event_type) { 6236 case EVENT_TYPE_BUS_RESET: 6237 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6238 break; 6239 default: 6240 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6241 inot->message_args[0] = event->event_type; 6242 inot->message_args[1] = event->event_arg; 6243 break; 6244 } 6245 inot->initiator_id = event->initiator_id; 6246 inot->sense_len = 0; 6247 xpt_done((union ccb *)inot); 6248 lstate->event_r_idx++; 6249 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6250 lstate->event_r_idx = 0; 6251 } 6252 } 6253 #endif 6254 6255 /******************** Sequencer Program Patching/Download *********************/ 6256 6257 #ifdef AHC_DUMP_SEQ 6258 void 6259 ahc_dumpseq(struct ahc_softc* ahc) 6260 { 6261 int i; 6262 int max_prog; 6263 6264 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6265 max_prog = 448; 6266 else if ((ahc->features & AHC_ULTRA2) != 0) 6267 max_prog = 768; 6268 else 6269 max_prog = 512; 6270 6271 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6272 ahc_outb(ahc, SEQADDR0, 0); 6273 ahc_outb(ahc, SEQADDR1, 0); 6274 for (i = 0; i < max_prog; i++) { 6275 uint8_t ins_bytes[4]; 6276 6277 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6278 printf("0x%08x\n", ins_bytes[0] << 24 6279 | ins_bytes[1] << 16 6280 | ins_bytes[2] << 8 6281 | ins_bytes[3]); 6282 } 6283 } 6284 #endif 6285 6286 static void 6287 ahc_loadseq(struct ahc_softc *ahc) 6288 { 6289 struct cs cs_table[num_critical_sections]; 6290 u_int begin_set[num_critical_sections]; 6291 u_int end_set[num_critical_sections]; 6292 struct patch *cur_patch; 6293 u_int cs_count; 6294 u_int cur_cs; 6295 u_int i; 6296 int downloaded; 6297 u_int skip_addr; 6298 u_int sg_prefetch_cnt; 6299 uint8_t download_consts[7]; 6300 6301 /* 6302 * Start out with 0 critical sections 6303 * that apply to this firmware load. 6304 */ 6305 cs_count = 0; 6306 cur_cs = 0; 6307 memset(begin_set, 0, sizeof(begin_set)); 6308 memset(end_set, 0, sizeof(end_set)); 6309 6310 /* Setup downloadable constant table */ 6311 download_consts[QOUTFIFO_OFFSET] = 0; 6312 if (ahc->targetcmds != NULL) 6313 download_consts[QOUTFIFO_OFFSET] += 32; 6314 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6315 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6316 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6317 sg_prefetch_cnt = ahc->pci_cachesize; 6318 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6319 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6320 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6321 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6322 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6323 6324 cur_patch = patches; 6325 downloaded = 0; 6326 skip_addr = 0; 6327 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6328 ahc_outb(ahc, SEQADDR0, 0); 6329 ahc_outb(ahc, SEQADDR1, 0); 6330 6331 for (i = 0; i < sizeof(seqprog)/4; i++) { 6332 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6333 /* 6334 * Don't download this instruction as it 6335 * is in a patch that was removed. 6336 */ 6337 continue; 6338 } 6339 /* 6340 * Move through the CS table until we find a CS 6341 * that might apply to this instruction. 6342 */ 6343 for (; cur_cs < num_critical_sections; cur_cs++) { 6344 if (critical_sections[cur_cs].end <= i) { 6345 if (begin_set[cs_count] == TRUE 6346 && end_set[cs_count] == FALSE) { 6347 cs_table[cs_count].end = downloaded; 6348 end_set[cs_count] = TRUE; 6349 cs_count++; 6350 } 6351 continue; 6352 } 6353 if (critical_sections[cur_cs].begin <= i 6354 && begin_set[cs_count] == FALSE) { 6355 cs_table[cs_count].begin = downloaded; 6356 begin_set[cs_count] = TRUE; 6357 } 6358 break; 6359 } 6360 ahc_download_instr(ahc, i, download_consts); 6361 downloaded++; 6362 } 6363 6364 ahc->num_critical_sections = cs_count; 6365 if (cs_count != 0) { 6366 6367 cs_count *= sizeof(struct cs); 6368 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6369 if (ahc->critical_sections == NULL) 6370 panic("ahc_loadseq: Could not malloc"); 6371 memcpy(ahc->critical_sections, cs_table, cs_count); 6372 } 6373 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6374 ahc_restart(ahc); 6375 6376 if (bootverbose) { 6377 printf(" %d instructions downloaded\n", downloaded); 6378 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6379 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6380 } 6381 } 6382 6383 static int 6384 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6385 u_int start_instr, u_int *skip_addr) 6386 { 6387 struct patch *cur_patch; 6388 struct patch *last_patch; 6389 u_int num_patches; 6390 6391 num_patches = sizeof(patches)/sizeof(struct patch); 6392 last_patch = &patches[num_patches]; 6393 cur_patch = *start_patch; 6394 6395 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6396 6397 if (cur_patch->patch_func(ahc) == 0) { 6398 6399 /* Start rejecting code */ 6400 *skip_addr = start_instr + cur_patch->skip_instr; 6401 cur_patch += cur_patch->skip_patch; 6402 } else { 6403 /* Accepted this patch. Advance to the next 6404 * one and wait for our intruction pointer to 6405 * hit this point. 6406 */ 6407 cur_patch++; 6408 } 6409 } 6410 6411 *start_patch = cur_patch; 6412 if (start_instr < *skip_addr) 6413 /* Still skipping */ 6414 return (0); 6415 6416 return (1); 6417 } 6418 6419 static void 6420 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6421 { 6422 union ins_formats instr; 6423 struct ins_format1 *fmt1_ins; 6424 struct ins_format3 *fmt3_ins; 6425 u_int opcode; 6426 6427 /* 6428 * The firmware is always compiled into a little endian format. 6429 */ 6430 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6431 6432 fmt1_ins = &instr.format1; 6433 fmt3_ins = NULL; 6434 6435 /* Pull the opcode */ 6436 opcode = instr.format1.opcode; 6437 switch (opcode) { 6438 case AIC_OP_JMP: 6439 case AIC_OP_JC: 6440 case AIC_OP_JNC: 6441 case AIC_OP_CALL: 6442 case AIC_OP_JNE: 6443 case AIC_OP_JNZ: 6444 case AIC_OP_JE: 6445 case AIC_OP_JZ: 6446 { 6447 struct patch *cur_patch; 6448 int address_offset; 6449 u_int address; 6450 u_int skip_addr; 6451 u_int i; 6452 6453 fmt3_ins = &instr.format3; 6454 address_offset = 0; 6455 address = fmt3_ins->address; 6456 cur_patch = patches; 6457 skip_addr = 0; 6458 for (i = 0; i < address;) { 6459 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6460 6461 if (skip_addr > i) { 6462 int end_addr; 6463 6464 end_addr = MIN(address, skip_addr); 6465 address_offset += end_addr - i; 6466 i = skip_addr; 6467 } else { 6468 i++; 6469 } 6470 } 6471 address -= address_offset; 6472 fmt3_ins->address = address; 6473 /* FALLTHROUGH */ 6474 } 6475 case AIC_OP_OR: 6476 case AIC_OP_AND: 6477 case AIC_OP_XOR: 6478 case AIC_OP_ADD: 6479 case AIC_OP_ADC: 6480 case AIC_OP_BMOV: 6481 if (fmt1_ins->parity != 0) { 6482 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6483 } 6484 fmt1_ins->parity = 0; 6485 if ((ahc->features & AHC_CMD_CHAN) == 0 6486 && opcode == AIC_OP_BMOV) { 6487 /* 6488 * Block move was added at the same time 6489 * as the command channel. Verify that 6490 * this is only a move of a single element 6491 * and convert the BMOV to a MOV 6492 * (AND with an immediate of FF). 6493 */ 6494 if (fmt1_ins->immediate != 1) 6495 panic("%s: BMOV not supported\n", 6496 ahc_name(ahc)); 6497 fmt1_ins->opcode = AIC_OP_AND; 6498 fmt1_ins->immediate = 0xff; 6499 } 6500 /* FALLTHROUGH */ 6501 case AIC_OP_ROL: 6502 if ((ahc->features & AHC_ULTRA2) != 0) { 6503 int i, count; 6504 6505 /* Calculate odd parity for the instruction */ 6506 for (i = 0, count = 0; i < 31; i++) { 6507 uint32_t mask; 6508 6509 mask = 0x01 << i; 6510 if ((instr.integer & mask) != 0) 6511 count++; 6512 } 6513 if ((count & 0x01) == 0) 6514 instr.format1.parity = 1; 6515 } else { 6516 /* Compress the instruction for older sequencers */ 6517 if (fmt3_ins != NULL) { 6518 instr.integer = 6519 fmt3_ins->immediate 6520 | (fmt3_ins->source << 8) 6521 | (fmt3_ins->address << 16) 6522 | (fmt3_ins->opcode << 25); 6523 } else { 6524 instr.integer = 6525 fmt1_ins->immediate 6526 | (fmt1_ins->source << 8) 6527 | (fmt1_ins->destination << 16) 6528 | (fmt1_ins->ret << 24) 6529 | (fmt1_ins->opcode << 25); 6530 } 6531 } 6532 /* The sequencer is a little endian CPU */ 6533 instr.integer = ahc_htole32(instr.integer); 6534 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6535 break; 6536 default: 6537 panic("Unknown opcode encountered in seq program"); 6538 break; 6539 } 6540 } 6541 6542 int 6543 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6544 const char *name, u_int address, u_int value, 6545 u_int *cur_column, u_int wrap_point) 6546 { 6547 size_t printed; 6548 u_int printed_mask; 6549 char line[1024]; 6550 6551 line[0] = 0; 6552 6553 if (cur_column != NULL && *cur_column >= wrap_point) { 6554 printf("\n"); 6555 *cur_column = 0; 6556 } 6557 printed = snprintf(line, sizeof(line), "%s[0x%x]", name, value); 6558 if (printed > sizeof(line)) 6559 printed = sizeof(line); 6560 if (table == NULL) { 6561 printed += snprintf(&line[printed], (sizeof line) - printed, 6562 " "); 6563 if (printed > sizeof(line)) 6564 printed = sizeof(line); 6565 printf("%s", line); 6566 if (cur_column != NULL) 6567 *cur_column += printed; 6568 return (printed); 6569 } 6570 printed_mask = 0; 6571 while (printed_mask != 0xFF) { 6572 int entry; 6573 6574 for (entry = 0; entry < num_entries; entry++) { 6575 if (((value & table[entry].mask) 6576 != table[entry].value) 6577 || ((printed_mask & table[entry].mask) 6578 == table[entry].mask)) 6579 continue; 6580 if (printed > sizeof(line)) 6581 printed = sizeof(line); 6582 printed += snprintf(&line[printed], 6583 (sizeof line) - printed, "%s%s", 6584 printed_mask == 0 ? ":(" : "|", 6585 table[entry].name); 6586 printed_mask |= table[entry].mask; 6587 6588 break; 6589 } 6590 if (entry >= num_entries) 6591 break; 6592 } 6593 if (printed > sizeof(line)) 6594 printed = sizeof(line); 6595 if (printed_mask != 0) 6596 printed += snprintf(&line[printed], 6597 (sizeof line) - printed, ") "); 6598 else 6599 printed += snprintf(&line[printed], 6600 (sizeof line) - printed, " "); 6601 if (cur_column != NULL) 6602 *cur_column += printed; 6603 printf("%s", line); 6604 6605 return (printed); 6606 } 6607 6608 void 6609 ahc_dump_card_state(struct ahc_softc *ahc) 6610 { 6611 struct scb *scb; 6612 struct scb_tailq *untagged_q; 6613 u_int cur_col; 6614 int paused; 6615 int target; 6616 int maxtarget; 6617 int i; 6618 uint8_t last_phase; 6619 uint8_t qinpos; 6620 uint8_t qintail; 6621 uint8_t qoutpos; 6622 uint8_t scb_index; 6623 uint8_t saved_scbptr; 6624 6625 if (ahc_is_paused(ahc)) { 6626 paused = 1; 6627 } else { 6628 paused = 0; 6629 ahc_pause(ahc); 6630 } 6631 6632 saved_scbptr = ahc_inb(ahc, SCBPTR); 6633 last_phase = ahc_inb(ahc, LASTPHASE); 6634 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6635 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6636 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6637 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6638 if (paused) 6639 printf("Card was paused\n"); 6640 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6641 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6642 ahc_inb(ahc, ARG_2)); 6643 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6644 ahc_inb(ahc, SCBPTR)); 6645 cur_col = 0; 6646 if ((ahc->features & AHC_DT) != 0) 6647 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6648 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6649 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6650 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6651 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6652 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6653 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6654 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6655 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6656 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6657 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6658 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6659 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6660 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6661 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6662 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6663 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6664 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6665 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6666 if (cur_col != 0) 6667 printf("\n"); 6668 printf("STACK:"); 6669 for (i = 0; i < STACK_SIZE; i++) 6670 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6671 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6672 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6673 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6674 /* QINFIFO */ 6675 printf("QINFIFO entries: "); 6676 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6677 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6678 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6679 } else 6680 qinpos = ahc_inb(ahc, QINPOS); 6681 qintail = ahc->qinfifonext; 6682 while (qinpos != qintail) { 6683 printf("%d ", ahc->qinfifo[qinpos]); 6684 qinpos++; 6685 } 6686 printf("\n"); 6687 6688 printf("Waiting Queue entries: "); 6689 scb_index = ahc_inb(ahc, WAITING_SCBH); 6690 i = 0; 6691 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6692 ahc_outb(ahc, SCBPTR, scb_index); 6693 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6694 scb_index = ahc_inb(ahc, SCB_NEXT); 6695 } 6696 printf("\n"); 6697 6698 printf("Disconnected Queue entries: "); 6699 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6700 i = 0; 6701 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6702 ahc_outb(ahc, SCBPTR, scb_index); 6703 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6704 scb_index = ahc_inb(ahc, SCB_NEXT); 6705 } 6706 printf("\n"); 6707 6708 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6709 printf("QOUTFIFO entries: "); 6710 qoutpos = ahc->qoutfifonext; 6711 i = 0; 6712 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6713 printf("%d ", ahc->qoutfifo[qoutpos]); 6714 qoutpos++; 6715 } 6716 printf("\n"); 6717 6718 printf("Sequencer Free SCB List: "); 6719 scb_index = ahc_inb(ahc, FREE_SCBH); 6720 i = 0; 6721 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6722 ahc_outb(ahc, SCBPTR, scb_index); 6723 printf("%d ", scb_index); 6724 scb_index = ahc_inb(ahc, SCB_NEXT); 6725 } 6726 printf("\n"); 6727 6728 printf("Sequencer SCB Info: "); 6729 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6730 ahc_outb(ahc, SCBPTR, i); 6731 /*cur_col =*/ printf("\n%3d ", i); 6732 6733 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6734 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6735 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6736 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6737 } 6738 printf("\n"); 6739 6740 printf("Pending list: "); 6741 i = 0; 6742 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6743 if (i++ > 256) 6744 break; 6745 /*cur_col =*/ printf("\n%3d ", scb->hscb->tag); 6746 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6747 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6748 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6749 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6750 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6751 printf("("); 6752 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6753 &cur_col, 60); 6754 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6755 printf(")"); 6756 } 6757 } 6758 printf("\n"); 6759 6760 printf("Kernel Free SCB list: "); 6761 i = 0; 6762 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6763 if (i++ > 256) 6764 break; 6765 printf("%d ", scb->hscb->tag); 6766 } 6767 printf("\n"); 6768 6769 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6770 for (target = 0; target <= maxtarget; target++) { 6771 untagged_q = &ahc->untagged_queues[target]; 6772 if (TAILQ_FIRST(untagged_q) == NULL) 6773 continue; 6774 printf("Untagged Q(%d): ", target); 6775 i = 0; 6776 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6777 if (i++ > 256) 6778 break; 6779 printf("%d ", scb->hscb->tag); 6780 } 6781 printf("\n"); 6782 } 6783 6784 ahc_platform_dump_card_state(ahc); 6785 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6786 ahc_outb(ahc, SCBPTR, saved_scbptr); 6787 if (paused == 0) 6788 ahc_unpause(ahc); 6789 } 6790 6791 /************************* Target Mode ****************************************/ 6792 #ifdef AHC_TARGET_MODE 6793 cam_status 6794 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6795 struct ahc_tmode_tstate **tstate, 6796 struct ahc_tmode_lstate **lstate, 6797 int notfound_failure) 6798 { 6799 6800 if ((ahc->features & AHC_TARGETMODE) == 0) 6801 return (CAM_REQ_INVALID); 6802 6803 /* 6804 * Handle the 'black hole' device that sucks up 6805 * requests to unattached luns on enabled targets. 6806 */ 6807 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6808 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6809 *tstate = NULL; 6810 *lstate = ahc->black_hole; 6811 } else { 6812 u_int max_id; 6813 6814 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6815 if (ccb->ccb_h.target_id > max_id) 6816 return (CAM_TID_INVALID); 6817 6818 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6819 return (CAM_LUN_INVALID); 6820 6821 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6822 *lstate = NULL; 6823 if (*tstate != NULL) 6824 *lstate = 6825 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6826 } 6827 6828 if (notfound_failure != 0 && *lstate == NULL) 6829 return (CAM_PATH_INVALID); 6830 6831 return (CAM_REQ_CMP); 6832 } 6833 6834 void 6835 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6836 { 6837 struct ahc_tmode_tstate *tstate; 6838 struct ahc_tmode_lstate *lstate; 6839 struct ccb_en_lun *cel; 6840 cam_status status; 6841 u_int target; 6842 u_int lun; 6843 u_int target_mask; 6844 u_int our_id; 6845 u_long s; 6846 char channel; 6847 6848 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6849 /*notfound_failure*/FALSE); 6850 6851 if (status != CAM_REQ_CMP) { 6852 ccb->ccb_h.status = status; 6853 return; 6854 } 6855 6856 if (cam_sim_bus(sim) == 0) 6857 our_id = ahc->our_id; 6858 else 6859 our_id = ahc->our_id_b; 6860 6861 if (ccb->ccb_h.target_id != our_id) { 6862 /* 6863 * our_id represents our initiator ID, or 6864 * the ID of the first target to have an 6865 * enabled lun in target mode. There are 6866 * two cases that may preclude enabling a 6867 * target id other than our_id. 6868 * 6869 * o our_id is for an active initiator role. 6870 * Since the hardware does not support 6871 * reselections to the initiator role at 6872 * anything other than our_id, and our_id 6873 * is used by the hardware to indicate the 6874 * ID to use for both select-out and 6875 * reselect-out operations, the only target 6876 * ID we can support in this mode is our_id. 6877 * 6878 * o The MULTARGID feature is not available and 6879 * a previous target mode ID has been enabled. 6880 */ 6881 if ((ahc->features & AHC_MULTIROLE) != 0) { 6882 6883 if ((ahc->features & AHC_MULTI_TID) != 0 6884 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6885 /* 6886 * Only allow additional targets if 6887 * the initiator role is disabled. 6888 * The hardware cannot handle a re-select-in 6889 * on the initiator id during a re-select-out 6890 * on a different target id. 6891 */ 6892 status = CAM_TID_INVALID; 6893 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6894 || ahc->enabled_luns > 0) { 6895 /* 6896 * Only allow our target id to change 6897 * if the initiator role is not configured 6898 * and there are no enabled luns which 6899 * are attached to the currently registered 6900 * scsi id. 6901 */ 6902 status = CAM_TID_INVALID; 6903 } 6904 } else if ((ahc->features & AHC_MULTI_TID) == 0 6905 && ahc->enabled_luns > 0) { 6906 6907 status = CAM_TID_INVALID; 6908 } 6909 } 6910 6911 if (status != CAM_REQ_CMP) { 6912 ccb->ccb_h.status = status; 6913 return; 6914 } 6915 6916 /* 6917 * We now have an id that is valid. 6918 * If we aren't in target mode, switch modes. 6919 */ 6920 if ((ahc->flags & AHC_TARGETROLE) == 0 6921 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6922 u_long s; 6923 6924 printf("Configuring Target Mode\n"); 6925 ahc_lock(ahc, &s); 6926 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6927 ccb->ccb_h.status = CAM_BUSY; 6928 ahc_unlock(ahc, &s); 6929 return; 6930 } 6931 ahc->flags |= AHC_TARGETROLE; 6932 if ((ahc->features & AHC_MULTIROLE) == 0) 6933 ahc->flags &= ~AHC_INITIATORROLE; 6934 ahc_pause(ahc); 6935 ahc_loadseq(ahc); 6936 ahc_unlock(ahc, &s); 6937 } 6938 cel = &ccb->cel; 6939 target = ccb->ccb_h.target_id; 6940 lun = ccb->ccb_h.target_lun; 6941 channel = SIM_CHANNEL(ahc, sim); 6942 target_mask = 0x01 << target; 6943 if (channel == 'B') 6944 target_mask <<= 8; 6945 6946 if (cel->enable != 0) { 6947 u_int scsiseq; 6948 6949 /* Are we already enabled?? */ 6950 if (lstate != NULL) { 6951 xpt_print_path(ccb->ccb_h.path); 6952 printf("Lun already enabled\n"); 6953 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6954 return; 6955 } 6956 6957 if (cel->grp6_len != 0 6958 || cel->grp7_len != 0) { 6959 /* 6960 * Don't (yet?) support vendor 6961 * specific commands. 6962 */ 6963 ccb->ccb_h.status = CAM_REQ_INVALID; 6964 printf("Non-zero Group Codes\n"); 6965 return; 6966 } 6967 6968 /* 6969 * Seems to be okay. 6970 * Setup our data structures. 6971 */ 6972 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6973 tstate = ahc_alloc_tstate(ahc, target, channel); 6974 if (tstate == NULL) { 6975 xpt_print_path(ccb->ccb_h.path); 6976 printf("Couldn't allocate tstate\n"); 6977 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6978 return; 6979 } 6980 } 6981 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6982 if (lstate == NULL) { 6983 xpt_print_path(ccb->ccb_h.path); 6984 printf("Couldn't allocate lstate\n"); 6985 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6986 return; 6987 } 6988 memset(lstate, 0, sizeof(*lstate)); 6989 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6990 xpt_path_path_id(ccb->ccb_h.path), 6991 xpt_path_target_id(ccb->ccb_h.path), 6992 xpt_path_lun_id(ccb->ccb_h.path)); 6993 if (status != CAM_REQ_CMP) { 6994 free(lstate, M_DEVBUF); 6995 xpt_print_path(ccb->ccb_h.path); 6996 printf("Couldn't allocate path\n"); 6997 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6998 return; 6999 } 7000 SLIST_INIT(&lstate->accept_tios); 7001 SLIST_INIT(&lstate->immed_notifies); 7002 ahc_lock(ahc, &s); 7003 ahc_pause(ahc); 7004 if (target != CAM_TARGET_WILDCARD) { 7005 tstate->enabled_luns[lun] = lstate; 7006 ahc->enabled_luns++; 7007 7008 if ((ahc->features & AHC_MULTI_TID) != 0) { 7009 u_int targid_mask; 7010 7011 targid_mask = ahc_inb(ahc, TARGID) 7012 | (ahc_inb(ahc, TARGID + 1) << 8); 7013 7014 targid_mask |= target_mask; 7015 ahc_outb(ahc, TARGID, targid_mask); 7016 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 7017 7018 ahc_update_scsiid(ahc, targid_mask); 7019 } else { 7020 u_int our_id; 7021 char channel; 7022 7023 channel = SIM_CHANNEL(ahc, sim); 7024 our_id = SIM_SCSI_ID(ahc, sim); 7025 7026 /* 7027 * This can only happen if selections 7028 * are not enabled 7029 */ 7030 if (target != our_id) { 7031 u_int sblkctl; 7032 char cur_channel; 7033 int swap; 7034 7035 sblkctl = ahc_inb(ahc, SBLKCTL); 7036 cur_channel = (sblkctl & SELBUSB) 7037 ? 'B' : 'A'; 7038 if ((ahc->features & AHC_TWIN) == 0) 7039 cur_channel = 'A'; 7040 swap = cur_channel != channel; 7041 if (channel == 'A') 7042 ahc->our_id = target; 7043 else 7044 ahc->our_id_b = target; 7045 7046 if (swap) 7047 ahc_outb(ahc, SBLKCTL, 7048 sblkctl ^ SELBUSB); 7049 7050 ahc_outb(ahc, SCSIID, target); 7051 7052 if (swap) 7053 ahc_outb(ahc, SBLKCTL, sblkctl); 7054 } 7055 } 7056 } else 7057 ahc->black_hole = lstate; 7058 /* Allow select-in operations */ 7059 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7060 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7061 scsiseq |= ENSELI; 7062 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7063 scsiseq = ahc_inb(ahc, SCSISEQ); 7064 scsiseq |= ENSELI; 7065 ahc_outb(ahc, SCSISEQ, scsiseq); 7066 } 7067 ahc_unpause(ahc); 7068 ahc_unlock(ahc, &s); 7069 ccb->ccb_h.status = CAM_REQ_CMP; 7070 xpt_print_path(ccb->ccb_h.path); 7071 printf("Lun now enabled for target mode\n"); 7072 } else { 7073 struct scb *scb; 7074 int i, empty; 7075 7076 if (lstate == NULL) { 7077 ccb->ccb_h.status = CAM_LUN_INVALID; 7078 return; 7079 } 7080 7081 ahc_lock(ahc, &s); 7082 7083 ccb->ccb_h.status = CAM_REQ_CMP; 7084 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7085 struct ccb_hdr *ccbh; 7086 7087 ccbh = &scb->io_ctx->ccb_h; 7088 if (ccbh->func_code == XPT_CONT_TARGET_IO 7089 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7090 printf("CTIO pending\n"); 7091 ccb->ccb_h.status = CAM_REQ_INVALID; 7092 ahc_unlock(ahc, &s); 7093 return; 7094 } 7095 } 7096 7097 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7098 printf("ATIOs pending\n"); 7099 ccb->ccb_h.status = CAM_REQ_INVALID; 7100 } 7101 7102 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7103 printf("INOTs pending\n"); 7104 ccb->ccb_h.status = CAM_REQ_INVALID; 7105 } 7106 7107 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7108 ahc_unlock(ahc, &s); 7109 return; 7110 } 7111 7112 xpt_print_path(ccb->ccb_h.path); 7113 printf("Target mode disabled\n"); 7114 xpt_free_path(lstate->path); 7115 free(lstate, M_DEVBUF); 7116 7117 ahc_pause(ahc); 7118 /* Can we clean up the target too? */ 7119 if (target != CAM_TARGET_WILDCARD) { 7120 tstate->enabled_luns[lun] = NULL; 7121 ahc->enabled_luns--; 7122 for (empty = 1, i = 0; i < 8; i++) 7123 if (tstate->enabled_luns[i] != NULL) { 7124 empty = 0; 7125 break; 7126 } 7127 7128 if (empty) { 7129 ahc_free_tstate(ahc, target, channel, 7130 /*force*/FALSE); 7131 if (ahc->features & AHC_MULTI_TID) { 7132 u_int targid_mask; 7133 7134 targid_mask = ahc_inb(ahc, TARGID) 7135 | (ahc_inb(ahc, TARGID + 1) 7136 << 8); 7137 7138 targid_mask &= ~target_mask; 7139 ahc_outb(ahc, TARGID, targid_mask); 7140 ahc_outb(ahc, TARGID+1, 7141 (targid_mask >> 8)); 7142 ahc_update_scsiid(ahc, targid_mask); 7143 } 7144 } 7145 } else { 7146 7147 ahc->black_hole = NULL; 7148 7149 /* 7150 * We can't allow selections without 7151 * our black hole device. 7152 */ 7153 empty = TRUE; 7154 } 7155 if (ahc->enabled_luns == 0) { 7156 /* Disallow select-in */ 7157 u_int scsiseq; 7158 7159 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7160 scsiseq &= ~ENSELI; 7161 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7162 scsiseq = ahc_inb(ahc, SCSISEQ); 7163 scsiseq &= ~ENSELI; 7164 ahc_outb(ahc, SCSISEQ, scsiseq); 7165 7166 if ((ahc->features & AHC_MULTIROLE) == 0) { 7167 printf("Configuring Initiator Mode\n"); 7168 ahc->flags &= ~AHC_TARGETROLE; 7169 ahc->flags |= AHC_INITIATORROLE; 7170 ahc_pause(ahc); 7171 ahc_loadseq(ahc); 7172 } 7173 } 7174 ahc_unpause(ahc); 7175 ahc_unlock(ahc, &s); 7176 } 7177 } 7178 7179 static void 7180 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7181 { 7182 u_int scsiid_mask; 7183 u_int scsiid; 7184 7185 if ((ahc->features & AHC_MULTI_TID) == 0) 7186 panic("ahc_update_scsiid called on non-multitid unit\n"); 7187 7188 /* 7189 * Since we will rely on the TARGID mask 7190 * for selection enables, ensure that OID 7191 * in SCSIID is not set to some other ID 7192 * that we don't want to allow selections on. 7193 */ 7194 if ((ahc->features & AHC_ULTRA2) != 0) 7195 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7196 else 7197 scsiid = ahc_inb(ahc, SCSIID); 7198 scsiid_mask = 0x1 << (scsiid & OID); 7199 if ((targid_mask & scsiid_mask) == 0) { 7200 u_int our_id; 7201 7202 /* ffs counts from 1 */ 7203 our_id = ffs(targid_mask); 7204 if (our_id == 0) 7205 our_id = ahc->our_id; 7206 else 7207 our_id--; 7208 scsiid &= TID; 7209 scsiid |= our_id; 7210 } 7211 if ((ahc->features & AHC_ULTRA2) != 0) 7212 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7213 else 7214 ahc_outb(ahc, SCSIID, scsiid); 7215 } 7216 7217 void 7218 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7219 { 7220 struct target_cmd *cmd; 7221 7222 /* 7223 * If the card supports auto-access pause, 7224 * we can access the card directly regardless 7225 * of whether it is paused or not. 7226 */ 7227 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7228 paused = TRUE; 7229 7230 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7231 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7232 7233 /* 7234 * Only advance through the queue if we 7235 * have the resources to process the command. 7236 */ 7237 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7238 break; 7239 7240 cmd->cmd_valid = 0; 7241 ahc_dmamap_sync(ahc, ahc->parent_dmat/*shared_data_dmat*/, 7242 ahc->shared_data_dmamap, 7243 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7244 sizeof(struct target_cmd), 7245 BUS_DMASYNC_PREREAD); 7246 ahc->tqinfifonext++; 7247 7248 /* 7249 * Lazily update our position in the target mode incoming 7250 * command queue as seen by the sequencer. 7251 */ 7252 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7253 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7254 u_int hs_mailbox; 7255 7256 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7257 hs_mailbox &= ~HOST_TQINPOS; 7258 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7259 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7260 } else { 7261 if (!paused) 7262 ahc_pause(ahc); 7263 ahc_outb(ahc, KERNEL_TQINPOS, 7264 ahc->tqinfifonext & HOST_TQINPOS); 7265 if (!paused) 7266 ahc_unpause(ahc); 7267 } 7268 } 7269 } 7270 } 7271 7272 static int 7273 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7274 { 7275 struct ahc_tmode_tstate *tstate; 7276 struct ahc_tmode_lstate *lstate; 7277 struct ccb_accept_tio *atio; 7278 uint8_t *byte; 7279 int initiator; 7280 int target; 7281 int lun; 7282 7283 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7284 target = SCSIID_OUR_ID(cmd->scsiid); 7285 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7286 7287 byte = cmd->bytes; 7288 tstate = ahc->enabled_targets[target]; 7289 lstate = NULL; 7290 if (tstate != NULL) 7291 lstate = tstate->enabled_luns[lun]; 7292 7293 /* 7294 * Commands for disabled luns go to the black hole driver. 7295 */ 7296 if (lstate == NULL) 7297 lstate = ahc->black_hole; 7298 7299 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7300 if (atio == NULL) { 7301 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7302 /* 7303 * Wait for more ATIOs from the peripheral driver for this lun. 7304 */ 7305 if (bootverbose) 7306 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7307 return (1); 7308 } else 7309 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7310 #if 0 7311 printf("Incoming command from %d for %d:%d%s\n", 7312 initiator, target, lun, 7313 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7314 #endif 7315 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7316 7317 if (lstate == ahc->black_hole) { 7318 /* Fill in the wildcards */ 7319 atio->ccb_h.target_id = target; 7320 atio->ccb_h.target_lun = lun; 7321 } 7322 7323 /* 7324 * Package it up and send it off to 7325 * whomever has this lun enabled. 7326 */ 7327 atio->sense_len = 0; 7328 atio->init_id = initiator; 7329 if (byte[0] != 0xFF) { 7330 /* Tag was included */ 7331 atio->tag_action = *byte++; 7332 atio->tag_id = *byte++; 7333 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7334 } else { 7335 atio->ccb_h.flags = 0; 7336 } 7337 byte++; 7338 7339 /* Okay. Now determine the cdb size based on the command code */ 7340 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7341 case 0: 7342 atio->cdb_len = 6; 7343 break; 7344 case 1: 7345 case 2: 7346 atio->cdb_len = 10; 7347 break; 7348 case 4: 7349 atio->cdb_len = 16; 7350 break; 7351 case 5: 7352 atio->cdb_len = 12; 7353 break; 7354 case 3: 7355 default: 7356 /* Only copy the opcode. */ 7357 atio->cdb_len = 1; 7358 printf("Reserved or VU command code type encountered\n"); 7359 break; 7360 } 7361 7362 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7363 7364 atio->ccb_h.status |= CAM_CDB_RECVD; 7365 7366 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7367 /* 7368 * We weren't allowed to disconnect. 7369 * We're hanging on the bus until a 7370 * continue target I/O comes in response 7371 * to this accept tio. 7372 */ 7373 #if 0 7374 printf("Received Immediate Command %d:%d:%d - %p\n", 7375 initiator, target, lun, ahc->pending_device); 7376 #endif 7377 ahc->pending_device = lstate; 7378 ahc_freeze_ccb((union ccb *)atio); 7379 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7380 } 7381 xpt_done((union ccb*)atio); 7382 return (0); 7383 } 7384 #endif 7385 7386 static int 7387 ahc_createdmamem(bus_dma_tag_t tag, int size, int flags, bus_dmamap_t *mapp, 7388 void **vaddr, bus_addr_t *baddr, bus_dma_segment_t *seg, int *nseg, 7389 const char *myname, const char *what) 7390 { 7391 int error, level = 0; 7392 7393 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 7394 seg, 1, nseg, BUS_DMA_WAITOK)) != 0) { 7395 printf("%s: failed to allocate DMA mem for %s, error = %d\n", 7396 myname, what, error); 7397 goto out; 7398 } 7399 level++; 7400 7401 if ((error = bus_dmamem_map(tag, seg, *nseg, size, vaddr, 7402 BUS_DMA_WAITOK|BUS_DMA_COHERENT)) != 0) { 7403 printf("%s: failed to map DMA mem for %s, error = %d\n", 7404 myname, what, error); 7405 goto out; 7406 } 7407 level++; 7408 7409 if ((error = bus_dmamap_create(tag, size, 1, size, 0, 7410 BUS_DMA_WAITOK | flags, mapp)) != 0) { 7411 printf("%s: failed to create DMA map for %s, error = %d\n", 7412 myname, what, error); 7413 goto out; 7414 } 7415 level++; 7416 7417 7418 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 7419 BUS_DMA_WAITOK)) != 0) { 7420 printf("%s: failed to load DMA map for %s, error = %d\n", 7421 myname, what, error); 7422 goto out; 7423 } 7424 7425 *baddr = (*mapp)->dm_segs[0].ds_addr; 7426 7427 return 0; 7428 out: 7429 printf("ahc_createdmamem error (%d)\n", level); 7430 switch (level) { 7431 case 3: 7432 bus_dmamap_destroy(tag, *mapp); 7433 /* FALLTHROUGH */ 7434 case 2: 7435 bus_dmamem_unmap(tag, *vaddr, size); 7436 /* FALLTHROUGH */ 7437 case 1: 7438 bus_dmamem_free(tag, seg, *nseg); 7439 break; 7440 default: 7441 break; 7442 } 7443 7444 return -1; 7445 } 7446 7447 static void 7448 ahc_freedmamem(bus_dma_tag_t tag, int size, bus_dmamap_t map, void *vaddr, 7449 bus_dma_segment_t *seg, int nseg) 7450 { 7451 7452 bus_dmamap_unload(tag, map); 7453 bus_dmamap_destroy(tag, map); 7454 bus_dmamem_unmap(tag, vaddr, size); 7455 bus_dmamem_free(tag, seg, nseg); 7456 } 7457