1 /* $NetBSD: aic7xxx.c,v 1.132 2015/02/07 04:27:54 christos Exp $ */ 2 3 /* 4 * Core routines and tables shareable across OS platforms. 5 * 6 * Copyright (c) 1994-2002 Justin T. Gibbs. 7 * Copyright (c) 2000-2002 Adaptec Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 3. Neither the names of the above-listed copyright holders nor the names 22 * of any contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * Alternatively, this software may be distributed under the terms of the 26 * GNU General Public License ("GPL") version 2 as published by the Free 27 * Software Foundation. 28 * 29 * NO WARRANTY 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGES. 41 * 42 * $Id: aic7xxx.c,v 1.132 2015/02/07 04:27:54 christos Exp $ 43 * 44 * //depot/aic7xxx/aic7xxx/aic7xxx.c#112 $ 45 * 46 * $FreeBSD: /repoman/r/ncvs/src/sys/dev/aic7xxx/aic7xxx.c,v 1.88 2003/01/20 20:44:55 gibbs Exp $ 47 */ 48 /* 49 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: aic7xxx.c,v 1.132 2015/02/07 04:27:54 christos Exp $"); 54 55 #include <dev/ic/aic7xxx_osm.h> 56 #include <dev/ic/aic7xxx_inline.h> 57 #include <dev/ic/aic7xxx_cam.h> 58 59 /****************************** Softc Data ************************************/ 60 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 61 62 /***************************** Lookup Tables **********************************/ 63 const char *ahc_chip_names[] = 64 { 65 "NONE", 66 "aic7770", 67 "aic7850", 68 "aic7855", 69 "aic7859", 70 "aic7860", 71 "aic7870", 72 "aic7880", 73 "aic7895", 74 "aic7895C", 75 "aic7890/91", 76 "aic7896/97", 77 "aic7892", 78 "aic7899" 79 }; 80 81 /* 82 * Hardware error codes. 83 */ 84 struct ahc_hard_error_entry { 85 uint8_t errno; 86 const char *errmesg; 87 }; 88 89 static struct ahc_hard_error_entry ahc_hard_errors[] = { 90 { ILLHADDR, "Illegal Host Access" }, 91 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 92 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 93 { SQPARERR, "Sequencer Parity Error" }, 94 { DPARERR, "Data-path Parity Error" }, 95 { MPARERR, "Scratch or SCB Memory Parity Error" }, 96 { PCIERRSTAT, "PCI Error detected" }, 97 { CIOPARERR, "CIOBUS Parity Error" }, 98 }; 99 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 100 101 static struct ahc_phase_table_entry ahc_phase_table[] = 102 { 103 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 104 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 105 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 106 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 107 { P_COMMAND, MSG_NOOP, "in Command phase" }, 108 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 109 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 110 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 111 { P_BUSFREE, MSG_NOOP, "while idle" }, 112 { 0, MSG_NOOP, "in unknown phase" } 113 }; 114 115 /* 116 * In most cases we only wish to itterate over real phases, so 117 * exclude the last element from the count. 118 */ 119 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 120 121 /* 122 * Valid SCSIRATE values. (p. 3-17) 123 * Provides a mapping of transfer periods in ns to the proper value to 124 * stick in the scsixfer reg. 125 */ 126 static struct ahc_syncrate ahc_syncrates[] = 127 { 128 /* ultra2 fast/ultra period rate */ 129 { 0x42, 0x000, 9, "80.0" }, 130 { 0x03, 0x000, 10, "40.0" }, 131 { 0x04, 0x000, 11, "33.0" }, 132 { 0x05, 0x100, 12, "20.0" }, 133 { 0x06, 0x110, 15, "16.0" }, 134 { 0x07, 0x120, 18, "13.4" }, 135 { 0x08, 0x000, 25, "10.0" }, 136 { 0x19, 0x010, 31, "8.0" }, 137 { 0x1a, 0x020, 37, "6.67" }, 138 { 0x1b, 0x030, 43, "5.7" }, 139 { 0x1c, 0x040, 50, "5.0" }, 140 { 0x00, 0x050, 56, "4.4" }, 141 { 0x00, 0x060, 62, "4.0" }, 142 { 0x00, 0x070, 68, "3.6" }, 143 { 0x00, 0x000, 0, NULL } 144 }; 145 146 /* Our Sequencer Program */ 147 #include <dev/microcode/aic7xxx/aic7xxx_seq.h> 148 149 /**************************** Function Declarations ***************************/ 150 static void ahc_force_renegotiation(struct ahc_softc *ahc); 151 static struct ahc_tmode_tstate* 152 ahc_alloc_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel); 154 #ifdef AHC_TARGET_MODE 155 static void ahc_free_tstate(struct ahc_softc *ahc, 156 u_int scsi_id, char channel, int force); 157 #endif 158 static struct ahc_syncrate* 159 ahc_devlimited_syncrate(struct ahc_softc *ahc, 160 struct ahc_initiator_tinfo *, 161 u_int *period, 162 u_int *ppr_options, 163 role_t role); 164 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 165 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 166 struct ahc_devinfo *devinfo); 167 static void ahc_scb_devinfo(struct ahc_softc *ahc, 168 struct ahc_devinfo *devinfo, 169 struct scb *scb); 170 static void ahc_assert_atn(struct ahc_softc *ahc); 171 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo, 173 struct scb *scb); 174 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo); 176 static void ahc_construct_sdtr(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 u_int period, u_int offset); 179 static void ahc_construct_wdtr(struct ahc_softc *ahc, 180 struct ahc_devinfo *devinfo, 181 u_int bus_width); 182 static void ahc_construct_ppr(struct ahc_softc *ahc, 183 struct ahc_devinfo *devinfo, 184 u_int period, u_int offset, 185 u_int bus_width, u_int ppr_options); 186 static void ahc_clear_msg_state(struct ahc_softc *ahc); 187 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 188 static void ahc_handle_message_phase(struct ahc_softc *ahc); 189 typedef enum { 190 AHCMSG_1B, 191 AHCMSG_2B, 192 AHCMSG_EXT 193 } ahc_msgtype; 194 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 195 u_int msgval, int full); 196 static int ahc_parse_msg(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo); 200 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 201 struct ahc_devinfo *devinfo); 202 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 203 static void ahc_handle_devreset(struct ahc_softc *ahc, 204 struct ahc_devinfo *devinfo, 205 cam_status status, 206 const char *message, 207 int verbose_level); 208 #if AHC_TARGET_MODE 209 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 210 struct ahc_devinfo *devinfo, 211 struct scb *scb); 212 #endif 213 214 #if 0 215 static bus_dmamap_callback_t ahc_dmamap_cb; 216 #endif 217 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 218 static int ahc_init_scbdata(struct ahc_softc *ahc); 219 static void ahc_fini_scbdata(struct ahc_softc *ahc); 220 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 221 struct scb *prev_scb, 222 struct scb *scb); 223 static int ahc_qinfifo_count(struct ahc_softc *ahc); 224 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 225 u_int prev, u_int scbptr); 226 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 227 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 228 u_int scbpos, u_int prev); 229 static void ahc_reset_current_bus(struct ahc_softc *ahc); 230 #ifdef AHC_DUMP_SEQ 231 static void ahc_dumpseq(struct ahc_softc *ahc); 232 #endif 233 static void ahc_loadseq(struct ahc_softc *ahc); 234 static int ahc_check_patch(struct ahc_softc *ahc, 235 struct patch **start_patch, 236 u_int start_instr, u_int *skip_addr); 237 static void ahc_download_instr(struct ahc_softc *ahc, 238 u_int instrptr, uint8_t *dconsts); 239 #ifdef AHC_TARGET_MODE 240 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 241 struct ahc_tmode_lstate *lstate, 242 u_int initiator_id, 243 u_int event_type, 244 u_int event_arg); 245 static void ahc_update_scsiid(struct ahc_softc *ahc, 246 u_int targid_mask); 247 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 248 struct target_cmd *cmd); 249 #endif 250 251 /************************** Added for porting to NetBSD ***********************/ 252 static int ahc_createdmamem(bus_dma_tag_t tag, 253 int size, 254 int flags, 255 bus_dmamap_t *mapp, 256 void **vaddr, 257 bus_addr_t *baddr, 258 bus_dma_segment_t *seg, 259 int *nseg, 260 const char *myname, const char *what); 261 static void ahc_freedmamem(bus_dma_tag_t tag, 262 int size, 263 bus_dmamap_t map, 264 void *vaddr, 265 bus_dma_segment_t *seg, 266 int nseg); 267 268 /************************* Sequencer Execution Control ************************/ 269 /* 270 * Restart the sequencer program from address zero 271 */ 272 void 273 ahc_restart(struct ahc_softc *ahc) 274 { 275 276 ahc_pause(ahc); 277 278 /* No more pending messages. */ 279 ahc_clear_msg_state(ahc); 280 281 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 282 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 283 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 284 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 285 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 286 ahc_outb(ahc, SAVED_LUN, 0xFF); 287 288 /* 289 * Ensure that the sequencer's idea of TQINPOS 290 * matches our own. The sequencer increments TQINPOS 291 * only after it sees a DMA complete and a reset could 292 * occur before the increment leaving the kernel to believe 293 * the command arrived but the sequencer to not. 294 */ 295 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 296 297 /* Always allow reselection */ 298 ahc_outb(ahc, SCSISEQ, 299 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 300 if ((ahc->features & AHC_CMD_CHAN) != 0) { 301 /* Ensure that no DMA operations are in progress */ 302 ahc_outb(ahc, CCSCBCNT, 0); 303 ahc_outb(ahc, CCSGCTL, 0); 304 ahc_outb(ahc, CCSCBCTL, 0); 305 } 306 /* 307 * If we were in the process of DMA'ing SCB data into 308 * an SCB, replace that SCB on the free list. This prevents 309 * an SCB leak. 310 */ 311 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 312 ahc_add_curscb_to_free_list(ahc); 313 ahc_outb(ahc, SEQ_FLAGS2, 314 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 315 } 316 ahc_outb(ahc, MWI_RESIDUAL, 0); 317 ahc_outb(ahc, SEQCTL, FASTMODE); 318 ahc_outb(ahc, SEQADDR0, 0); 319 ahc_outb(ahc, SEQADDR1, 0); 320 ahc_unpause(ahc); 321 } 322 323 /************************* Input/Output Queues ********************************/ 324 void 325 ahc_run_qoutfifo(struct ahc_softc *ahc) 326 { 327 struct scb *scb; 328 u_int scb_index; 329 330 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 331 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 332 333 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 334 if ((ahc->qoutfifonext & 0x03) == 0x03) { 335 u_int modnext; 336 337 /* 338 * Clear 32bits of QOUTFIFO at a time 339 * so that we don't clobber an incoming 340 * byte DMA to the array on architectures 341 * that only support 32bit load and store 342 * operations. 343 */ 344 modnext = ahc->qoutfifonext & ~0x3; 345 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 346 ahc_dmamap_sync(ahc, 347 ahc->parent_dmat /*shared_data_dmat*/, 348 ahc->shared_data_dmamap, 349 /*offset*/modnext, /*len*/4, 350 BUS_DMASYNC_PREREAD); 351 } 352 ahc->qoutfifonext++; 353 354 scb = ahc_lookup_scb(ahc, scb_index); 355 if (scb == NULL) { 356 printf("%s: WARNING no command for scb %d " 357 "(cmdcmplt)\nQOUTPOS = %d\n", 358 ahc_name(ahc), scb_index, 359 (ahc->qoutfifonext - 1) & 0xFF); 360 continue; 361 } 362 363 /* 364 * Save off the residual 365 * if there is one. 366 */ 367 ahc_update_residual(ahc, scb); 368 ahc_done(ahc, scb); 369 } 370 } 371 372 void 373 ahc_run_untagged_queues(struct ahc_softc *ahc) 374 { 375 int i; 376 377 for (i = 0; i < 16; i++) 378 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 379 } 380 381 void 382 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 383 { 384 struct scb *scb; 385 386 if (ahc->untagged_queue_lock != 0) 387 return; 388 389 if ((scb = TAILQ_FIRST(queue)) != NULL 390 && (scb->flags & SCB_ACTIVE) == 0) { 391 scb->flags |= SCB_ACTIVE; 392 ahc_queue_scb(ahc, scb); 393 } 394 } 395 396 /************************* Interrupt Handling *********************************/ 397 void 398 ahc_handle_brkadrint(struct ahc_softc *ahc) 399 { 400 /* 401 * We upset the sequencer :-( 402 * Lookup the error message 403 */ 404 int i; 405 int error; 406 407 error = ahc_inb(ahc, ERROR); 408 for (i = 0; error != 1 && i < num_errors; i++) 409 error >>= 1; 410 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 411 ahc_name(ahc), ahc_hard_errors[i].errmesg, 412 ahc_inb(ahc, SEQADDR0) | 413 (ahc_inb(ahc, SEQADDR1) << 8)); 414 415 ahc_dump_card_state(ahc); 416 417 /* Tell everyone that this HBA is no longer available */ 418 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 419 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 420 CAM_NO_HBA); 421 422 /* Disable all interrupt sources by resetting the controller */ 423 ahc_shutdown(ahc); 424 } 425 426 void 427 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 428 { 429 struct scb *scb; 430 struct ahc_devinfo devinfo; 431 432 ahc_fetch_devinfo(ahc, &devinfo); 433 434 /* 435 * Clear the upper byte that holds SEQINT status 436 * codes and clear the SEQINT bit. We will unpause 437 * the sequencer, if appropriate, after servicing 438 * the request. 439 */ 440 ahc_outb(ahc, CLRINT, CLRSEQINT); 441 switch (intstat & SEQINT_MASK) { 442 case BAD_STATUS: 443 { 444 u_int scb_index; 445 struct hardware_scb *hscb; 446 447 /* 448 * Set the default return value to 0 (don't 449 * send sense). The sense code will change 450 * this if needed. 451 */ 452 ahc_outb(ahc, RETURN_1, 0); 453 454 /* 455 * The sequencer will notify us when a command 456 * has an error that would be of interest to 457 * the kernel. This allows us to leave the sequencer 458 * running in the common case of command completes 459 * without error. The sequencer will already have 460 * DMA'd the SCB back up to us, so we can reference 461 * the in kernel copy directly. 462 */ 463 scb_index = ahc_inb(ahc, SCB_TAG); 464 scb = ahc_lookup_scb(ahc, scb_index); 465 if (scb == NULL) { 466 ahc_print_devinfo(ahc, &devinfo); 467 printf("ahc_intr - referenced scb " 468 "not valid during seqint 0x%x scb(%d)\n", 469 intstat, scb_index); 470 ahc_dump_card_state(ahc); 471 panic("for safety"); 472 goto unpause; 473 } 474 475 hscb = scb->hscb; 476 477 /* Don't want to clobber the original sense code */ 478 if ((scb->flags & SCB_SENSE) != 0) { 479 /* 480 * Clear the SCB_SENSE Flag and have 481 * the sequencer do a normal command 482 * complete. 483 */ 484 scb->flags &= ~SCB_SENSE; 485 break; 486 } 487 /* Freeze the queue until the client sees the error. */ 488 ahc_freeze_devq(ahc, scb); 489 ahc_freeze_scb(scb); 490 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 491 switch (hscb->shared_data.status.scsi_status) { 492 case SCSI_STATUS_OK: 493 printf("%s: Interrupted for status of 0 (?)\n", 494 ahc_name(ahc)); 495 break; 496 case SCSI_STATUS_CMD_TERMINATED: 497 case SCSI_STATUS_CHECK_COND: 498 { 499 struct ahc_dma_seg *sg; 500 struct scsi_request_sense *sc; 501 struct ahc_initiator_tinfo *targ_info; 502 struct ahc_tmode_tstate *tstate; 503 struct ahc_transinfo *tinfo; 504 uint32_t len; 505 #ifdef AHC_DEBUG 506 if (ahc_debug & AHC_SHOW_SENSE) { 507 ahc_print_path(ahc, scb); 508 printf("SCB %d: requests Check Status\n", 509 scb->hscb->tag); 510 } 511 #endif 512 513 if (ahc_perform_autosense(scb) == 0) 514 break; 515 516 targ_info = ahc_fetch_transinfo(ahc, 517 devinfo.channel, 518 devinfo.our_scsiid, 519 devinfo.target, 520 &tstate); 521 tinfo = &targ_info->curr; 522 sg = scb->sg_list; 523 sc = (struct scsi_request_sense *) 524 (&hscb->shared_data.cdb); 525 /* 526 * Save off the residual if there is one. 527 */ 528 ahc_update_residual(ahc, scb); 529 #ifdef AHC_DEBUG 530 if (ahc_debug & AHC_SHOW_SENSE) { 531 ahc_print_path(ahc, scb); 532 printf("Sending Sense\n"); 533 } 534 #endif 535 sg->addr = ahc_htole32(ahc_get_sense_bufaddr(ahc, scb)); 536 len = ahc_get_sense_bufsize(ahc, scb); 537 sg->len = ahc_htole32(len | AHC_DMA_LAST_SEG); 538 539 memset(sc, 0, sizeof(*sc)); 540 sc->opcode = SCSI_REQUEST_SENSE; 541 if (tinfo->protocol_version <= SCSI_REV_2 542 && SCB_GET_LUN(scb) < 8) 543 sc->byte2 = SCB_GET_LUN(scb) << 5; 544 sc->length = len; 545 546 /* 547 * We can't allow the target to disconnect. 548 * This will be an untagged transaction and 549 * having the target disconnect will make this 550 * transaction indistinguishable from outstanding 551 * tagged transactions. 552 */ 553 hscb->control = 0; 554 555 /* 556 * This request sense could be because the 557 * the device lost power or in some other 558 * way has lost our transfer negotiations. 559 * Renegotiate if appropriate. Unit attention 560 * errors will be reported before any data 561 * phases occur. 562 */ 563 if (ahc_get_residual(scb) 564 == ahc_get_transfer_length(scb)) { 565 ahc_update_neg_request(ahc, &devinfo, 566 tstate, targ_info, 567 AHC_NEG_IF_NON_ASYNC); 568 } 569 if (tstate->auto_negotiate & devinfo.target_mask) { 570 hscb->control |= MK_MESSAGE; 571 scb->flags &= ~SCB_NEGOTIATE; 572 scb->flags |= SCB_AUTO_NEGOTIATE; 573 } 574 hscb->cdb_len = sizeof(*sc); 575 hscb->dataptr = sg->addr; 576 hscb->datacnt = sg->len; 577 hscb->sgptr = 578 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 579 scb->sg_count = 1; 580 scb->flags |= SCB_SENSE; 581 ahc_qinfifo_requeue_tail(ahc, scb); 582 ahc_outb(ahc, RETURN_1, SEND_SENSE); 583 /* 584 * Ensure we have enough time to actually 585 * retrieve the sense. 586 */ 587 ahc_scb_timer_reset(scb, 5 * 1000000); 588 break; 589 } 590 default: 591 break; 592 } 593 break; 594 } 595 case NO_MATCH: 596 { 597 /* Ensure we don't leave the selection hardware on */ 598 ahc_outb(ahc, SCSISEQ, 599 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 600 601 printf("%s:%c:%d: no active SCB for reconnecting " 602 "target - issuing BUS DEVICE RESET\n", 603 ahc_name(ahc), devinfo.channel, devinfo.target); 604 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 605 "ARG_1 == 0x%x ACCUM = 0x%x\n", 606 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 607 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 608 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 609 "SINDEX == 0x%x\n", 610 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 611 ahc_index_busy_tcl(ahc, 612 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 613 ahc_inb(ahc, SAVED_LUN))), 614 ahc_inb(ahc, SINDEX)); 615 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 616 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 617 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 618 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 619 ahc_inb(ahc, SCB_CONTROL)); 620 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 621 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 622 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 623 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 624 ahc_dump_card_state(ahc); 625 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 626 ahc->msgout_len = 1; 627 ahc->msgout_index = 0; 628 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 629 ahc_outb(ahc, MSG_OUT, HOST_MSG); 630 ahc_assert_atn(ahc); 631 break; 632 } 633 case SEND_REJECT: 634 { 635 u_int rejbyte = ahc_inb(ahc, ACCUM); 636 printf("%s:%c:%d: Warning - unknown message received from " 637 "target (0x%x). Rejecting\n", 638 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 639 break; 640 } 641 case PROTO_VIOLATION: 642 { 643 ahc_handle_proto_violation(ahc); 644 break; 645 } 646 case IGN_WIDE_RES: 647 ahc_handle_ign_wide_residue(ahc, &devinfo); 648 break; 649 case PDATA_REINIT: 650 ahc_reinitialize_dataptrs(ahc); 651 break; 652 case BAD_PHASE: 653 { 654 u_int lastphase; 655 656 lastphase = ahc_inb(ahc, LASTPHASE); 657 printf("%s:%c:%d: unknown scsi bus phase %x, " 658 "lastphase = 0x%x. Attempting to continue\n", 659 ahc_name(ahc), devinfo.channel, devinfo.target, 660 lastphase, ahc_inb(ahc, SCSISIGI)); 661 break; 662 } 663 case MISSED_BUSFREE: 664 { 665 u_int lastphase; 666 667 lastphase = ahc_inb(ahc, LASTPHASE); 668 printf("%s:%c:%d: Missed busfree. " 669 "Lastphase = 0x%x, Curphase = 0x%x\n", 670 ahc_name(ahc), devinfo.channel, devinfo.target, 671 lastphase, ahc_inb(ahc, SCSISIGI)); 672 ahc_restart(ahc); 673 return; 674 } 675 case HOST_MSG_LOOP: 676 { 677 /* 678 * The sequencer has encountered a message phase 679 * that requires host assistance for completion. 680 * While handling the message phase(s), we will be 681 * notified by the sequencer after each byte is 682 * transferred so we can track bus phase changes. 683 * 684 * If this is the first time we've seen a HOST_MSG_LOOP 685 * interrupt, initialize the state of the host message 686 * loop. 687 */ 688 if (ahc->msg_type == MSG_TYPE_NONE) { 689 struct scb *scb1; 690 u_int scb_index; 691 u_int bus_phase; 692 693 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 694 if (bus_phase != P_MESGIN 695 && bus_phase != P_MESGOUT) { 696 printf("ahc_intr: HOST_MSG_LOOP bad " 697 "phase 0x%x\n", 698 bus_phase); 699 /* 700 * Probably transitioned to bus free before 701 * we got here. Just punt the message. 702 */ 703 ahc_clear_intstat(ahc); 704 ahc_restart(ahc); 705 return; 706 } 707 708 scb_index = ahc_inb(ahc, SCB_TAG); 709 scb1 = ahc_lookup_scb(ahc, scb_index); 710 if (devinfo.role == ROLE_INITIATOR) { 711 if (scb1 == NULL) 712 panic("HOST_MSG_LOOP with " 713 "invalid SCB %x\n", scb_index); 714 715 if (bus_phase == P_MESGOUT) 716 ahc_setup_initiator_msgout(ahc, 717 &devinfo, 718 scb1); 719 else { 720 ahc->msg_type = 721 MSG_TYPE_INITIATOR_MSGIN; 722 ahc->msgin_index = 0; 723 } 724 } 725 #if AHC_TARGET_MODE 726 else { 727 if (bus_phase == P_MESGOUT) { 728 ahc->msg_type = 729 MSG_TYPE_TARGET_MSGOUT; 730 ahc->msgin_index = 0; 731 } 732 else 733 ahc_setup_target_msgin(ahc, 734 &devinfo, 735 scb1); 736 } 737 #endif 738 } 739 740 ahc_handle_message_phase(ahc); 741 break; 742 } 743 case PERR_DETECTED: 744 { 745 /* 746 * If we've cleared the parity error interrupt 747 * but the sequencer still believes that SCSIPERR 748 * is true, it must be that the parity error is 749 * for the currently presented byte on the bus, 750 * and we are not in a phase (data-in) where we will 751 * eventually ack this byte. Ack the byte and 752 * throw it away in the hope that the target will 753 * take us to message out to deliver the appropriate 754 * error message. 755 */ 756 if ((intstat & SCSIINT) == 0 757 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 758 759 if ((ahc->features & AHC_DT) == 0) { 760 u_int curphase; 761 762 /* 763 * The hardware will only let you ack bytes 764 * if the expected phase in SCSISIGO matches 765 * the current phase. Make sure this is 766 * currently the case. 767 */ 768 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 769 ahc_outb(ahc, LASTPHASE, curphase); 770 ahc_outb(ahc, SCSISIGO, curphase); 771 } 772 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 773 int wait; 774 775 /* 776 * In a data phase. Faster to bitbucket 777 * the data than to individually ack each 778 * byte. This is also the only strategy 779 * that will work with AUTOACK enabled. 780 */ 781 ahc_outb(ahc, SXFRCTL1, 782 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 783 wait = 5000; 784 while (--wait != 0) { 785 if ((ahc_inb(ahc, SCSISIGI) 786 & (CDI|MSGI)) != 0) 787 break; 788 ahc_delay(100); 789 } 790 ahc_outb(ahc, SXFRCTL1, 791 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 792 if (wait == 0) { 793 struct scb *scb1; 794 u_int scb_index; 795 796 ahc_print_devinfo(ahc, &devinfo); 797 printf("Unable to clear parity error. " 798 "Resetting bus.\n"); 799 scb_index = ahc_inb(ahc, SCB_TAG); 800 scb1 = ahc_lookup_scb(ahc, scb_index); 801 if (scb1 != NULL) 802 ahc_set_transaction_status(scb1, 803 CAM_UNCOR_PARITY); 804 ahc_reset_channel(ahc, devinfo.channel, 805 /*init reset*/TRUE); 806 } 807 } else { 808 (void)ahc_inb(ahc, SCSIDATL); 809 } 810 } 811 break; 812 } 813 case DATA_OVERRUN: 814 { 815 /* 816 * When the sequencer detects an overrun, it 817 * places the controller in "BITBUCKET" mode 818 * and allows the target to complete its transfer. 819 * Unfortunately, none of the counters get updated 820 * when the controller is in this mode, so we have 821 * no way of knowing how large the overrun was. 822 */ 823 u_int scbindex = ahc_inb(ahc, SCB_TAG); 824 u_int lastphase = ahc_inb(ahc, LASTPHASE); 825 u_int i; 826 827 scb = ahc_lookup_scb(ahc, scbindex); 828 for (i = 0; i < num_phases; i++) { 829 if (lastphase == ahc_phase_table[i].phase) 830 break; 831 } 832 ahc_print_path(ahc, scb); 833 printf("data overrun detected %s." 834 " Tag == 0x%x.\n", 835 ahc_phase_table[i].phasemsg, 836 scb->hscb->tag); 837 ahc_print_path(ahc, scb); 838 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 839 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 840 ahc_get_transfer_length(scb), scb->sg_count); 841 if (scb->sg_count > 0) { 842 for (i = 0; i < scb->sg_count; i++) { 843 844 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 845 i, 846 (ahc_le32toh(scb->sg_list[i].len) >> 24 847 & SG_HIGH_ADDR_BITS), 848 ahc_le32toh(scb->sg_list[i].addr), 849 ahc_le32toh(scb->sg_list[i].len) 850 & AHC_SG_LEN_MASK); 851 } 852 } 853 /* 854 * Set this and it will take effect when the 855 * target does a command complete. 856 */ 857 ahc_freeze_devq(ahc, scb); 858 if ((scb->flags & SCB_SENSE) == 0) { 859 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 860 } else { 861 scb->flags &= ~SCB_SENSE; 862 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 863 } 864 ahc_freeze_scb(scb); 865 866 if ((ahc->features & AHC_ULTRA2) != 0) { 867 /* 868 * Clear the channel in case we return 869 * to data phase later. 870 */ 871 ahc_outb(ahc, SXFRCTL0, 872 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 873 ahc_outb(ahc, SXFRCTL0, 874 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 875 } 876 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 877 u_int dscommand1; 878 879 /* Ensure HHADDR is 0 for future DMA operations. */ 880 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 881 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 882 ahc_outb(ahc, HADDR, 0); 883 ahc_outb(ahc, DSCOMMAND1, dscommand1); 884 } 885 break; 886 } 887 case MKMSG_FAILED: 888 { 889 u_int scbindex; 890 891 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 892 ahc_name(ahc), devinfo.channel, devinfo.target, 893 devinfo.lun); 894 scbindex = ahc_inb(ahc, SCB_TAG); 895 scb = ahc_lookup_scb(ahc, scbindex); 896 if (scb != NULL 897 && (scb->flags & SCB_RECOVERY_SCB) != 0) 898 /* 899 * Ensure that we didn't put a second instance of this 900 * SCB into the QINFIFO. 901 */ 902 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 903 SCB_GET_CHANNEL(ahc, scb), 904 SCB_GET_LUN(scb), scb->hscb->tag, 905 ROLE_INITIATOR, /*status*/0, 906 SEARCH_REMOVE); 907 break; 908 } 909 case NO_FREE_SCB: 910 { 911 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 912 ahc_dump_card_state(ahc); 913 panic("for safety"); 914 break; 915 } 916 case SCB_MISMATCH: 917 { 918 u_int scbptr; 919 920 scbptr = ahc_inb(ahc, SCBPTR); 921 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 922 scbptr, ahc_inb(ahc, ARG_1), 923 ahc->scb_data->hscbs[scbptr].tag); 924 ahc_dump_card_state(ahc); 925 panic("for saftey"); 926 break; 927 } 928 case OUT_OF_RANGE: 929 { 930 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 931 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 932 "ARG_1 == 0x%x ACCUM = 0x%x\n", 933 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 934 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 935 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 936 "SINDEX == 0x%x\n, A == 0x%x\n", 937 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 938 ahc_index_busy_tcl(ahc, 939 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 940 ahc_inb(ahc, SAVED_LUN))), 941 ahc_inb(ahc, SINDEX), 942 ahc_inb(ahc, ACCUM)); 943 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 944 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 945 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 946 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 947 ahc_inb(ahc, SCB_CONTROL)); 948 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 949 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 950 ahc_dump_card_state(ahc); 951 panic("for safety"); 952 break; 953 } 954 default: 955 printf("ahc_intr: seqint, " 956 "intstat == 0x%x, scsisigi = 0x%x\n", 957 intstat, ahc_inb(ahc, SCSISIGI)); 958 break; 959 } 960 unpause: 961 /* 962 * The sequencer is paused immediately on 963 * a SEQINT, so we should restart it when 964 * we're done. 965 */ 966 ahc_unpause(ahc); 967 } 968 969 void 970 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 971 { 972 u_int scb_index; 973 u_int status0; 974 u_int status; 975 struct scb *scb; 976 char cur_channel; 977 char intr_channel; 978 979 if ((ahc->features & AHC_TWIN) != 0 980 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 981 cur_channel = 'B'; 982 else 983 cur_channel = 'A'; 984 intr_channel = cur_channel; 985 986 if ((ahc->features & AHC_ULTRA2) != 0) 987 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 988 else 989 status0 = 0; 990 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 991 if (status == 0 && status0 == 0) { 992 if ((ahc->features & AHC_TWIN) != 0) { 993 /* Try the other channel */ 994 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 995 status = ahc_inb(ahc, SSTAT1) 996 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 997 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 998 } 999 if (status == 0) { 1000 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 1001 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1002 ahc_unpause(ahc); 1003 return; 1004 } 1005 } 1006 1007 /* Make sure the sequencer is in a safe location. */ 1008 ahc_clear_critical_section(ahc); 1009 1010 scb_index = ahc_inb(ahc, SCB_TAG); 1011 scb = ahc_lookup_scb(ahc, scb_index); 1012 if (scb != NULL 1013 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1014 scb = NULL; 1015 1016 if ((ahc->features & AHC_ULTRA2) != 0 1017 && (status0 & IOERR) != 0) { 1018 int now_lvd; 1019 1020 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1021 printf("%s: Transceiver State Has Changed to %s mode\n", 1022 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1023 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1024 /* 1025 * When transitioning to SE mode, the reset line 1026 * glitches, triggering an arbitration bug in some 1027 * Ultra2 controllers. This bug is cleared when we 1028 * assert the reset line. Since a reset glitch has 1029 * already occurred with this transition and a 1030 * transceiver state change is handled just like 1031 * a bus reset anyway, asserting the reset line 1032 * ourselves is safe. 1033 */ 1034 ahc_reset_channel(ahc, intr_channel, 1035 /*Initiate Reset*/now_lvd == 0); 1036 } else if ((status & SCSIRSTI) != 0) { 1037 printf("%s: Someone reset channel %c\n", 1038 ahc_name(ahc), intr_channel); 1039 if (intr_channel != cur_channel) 1040 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1041 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1042 } else if ((status & SCSIPERR) != 0) { 1043 /* 1044 * Determine the bus phase and queue an appropriate message. 1045 * SCSIPERR is latched true as soon as a parity error 1046 * occurs. If the sequencer acked the transfer that 1047 * caused the parity error and the currently presented 1048 * transfer on the bus has correct parity, SCSIPERR will 1049 * be cleared by CLRSCSIPERR. Use this to determine if 1050 * we should look at the last phase the sequencer recorded, 1051 * or the current phase presented on the bus. 1052 */ 1053 u_int mesg_out; 1054 u_int curphase; 1055 u_int errorphase; 1056 u_int lastphase; 1057 u_int scsirate; 1058 u_int i; 1059 u_int sstat2; 1060 int silent; 1061 1062 lastphase = ahc_inb(ahc, LASTPHASE); 1063 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1064 sstat2 = ahc_inb(ahc, SSTAT2); 1065 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1066 /* 1067 * For all phases save DATA, the sequencer won't 1068 * automatically ack a byte that has a parity error 1069 * in it. So the only way that the current phase 1070 * could be 'data-in' is if the parity error is for 1071 * an already acked byte in the data phase. During 1072 * synchronous data-in transfers, we may actually 1073 * ack bytes before latching the current phase in 1074 * LASTPHASE, leading to the discrepancy between 1075 * curphase and lastphase. 1076 */ 1077 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1078 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1079 errorphase = curphase; 1080 else 1081 errorphase = lastphase; 1082 1083 for (i = 0; i < num_phases; i++) { 1084 if (errorphase == ahc_phase_table[i].phase) 1085 break; 1086 } 1087 mesg_out = ahc_phase_table[i].mesg_out; 1088 silent = FALSE; 1089 if (scb != NULL) { 1090 if (SCB_IS_SILENT(scb)) 1091 silent = TRUE; 1092 else 1093 ahc_print_path(ahc, scb); 1094 scb->flags |= SCB_TRANSMISSION_ERROR; 1095 } else 1096 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1097 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1098 scsirate = ahc_inb(ahc, SCSIRATE); 1099 if (silent == FALSE) { 1100 printf("parity error detected %s. " 1101 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1102 ahc_phase_table[i].phasemsg, 1103 ahc_inw(ahc, SEQADDR0), 1104 scsirate); 1105 if ((ahc->features & AHC_DT) != 0) { 1106 if ((sstat2 & CRCVALERR) != 0) 1107 printf("\tCRC Value Mismatch\n"); 1108 if ((sstat2 & CRCENDERR) != 0) 1109 printf("\tNo terminal CRC packet " 1110 "recevied\n"); 1111 if ((sstat2 & CRCREQERR) != 0) 1112 printf("\tIllegal CRC packet " 1113 "request\n"); 1114 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1115 printf("\tUnexpected %sDT Data Phase\n", 1116 (scsirate & SINGLE_EDGE) 1117 ? "" : "non-"); 1118 } 1119 } 1120 1121 if ((ahc->features & AHC_DT) != 0 1122 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1123 /* 1124 * This error applies regardless of 1125 * data direction, so ignore the value 1126 * in the phase table. 1127 */ 1128 mesg_out = MSG_INITIATOR_DET_ERR; 1129 } 1130 1131 /* 1132 * We've set the hardware to assert ATN if we 1133 * get a parity error on "in" phases, so all we 1134 * need to do is stuff the message buffer with 1135 * the appropriate message. "In" phases have set 1136 * mesg_out to something other than MSG_NOP. 1137 */ 1138 if (mesg_out != MSG_NOOP) { 1139 if (ahc->msg_type != MSG_TYPE_NONE) 1140 ahc->send_msg_perror = TRUE; 1141 else 1142 ahc_outb(ahc, MSG_OUT, mesg_out); 1143 } 1144 /* 1145 * Force a renegotiation with this target just in 1146 * case we are out of sync for some external reason 1147 * unknown (or unreported) by the target. 1148 */ 1149 ahc_force_renegotiation(ahc); 1150 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1151 ahc_unpause(ahc); 1152 } else if ((status & SELTO) != 0) { 1153 u_int scbptr; 1154 1155 /* Stop the selection */ 1156 ahc_outb(ahc, SCSISEQ, 0); 1157 1158 /* No more pending messages */ 1159 ahc_clear_msg_state(ahc); 1160 1161 /* Clear interrupt state */ 1162 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1163 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1164 1165 /* 1166 * Although the driver does not care about the 1167 * 'Selection in Progress' status bit, the busy 1168 * LED does. SELINGO is only cleared by a sucessfull 1169 * selection, so we must manually clear it to insure 1170 * the LED turns off just incase no future successful 1171 * selections occur (e.g. no devices on the bus). 1172 */ 1173 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1174 1175 scbptr = ahc_inb(ahc, WAITING_SCBH); 1176 ahc_outb(ahc, SCBPTR, scbptr); 1177 scb_index = ahc_inb(ahc, SCB_TAG); 1178 1179 scb = ahc_lookup_scb(ahc, scb_index); 1180 if (scb == NULL) { 1181 printf("%s: ahc_intr - referenced scb not " 1182 "valid during SELTO scb(%d, %d)\n", 1183 ahc_name(ahc), scbptr, scb_index); 1184 ahc_dump_card_state(ahc); 1185 } else { 1186 #ifdef AHC_DEBUG 1187 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1188 ahc_print_path(ahc, scb); 1189 printf("Saw Selection Timeout for SCB 0x%x\n", 1190 scb_index); 1191 } 1192 #endif 1193 /* 1194 * Force a renegotiation with this target just in 1195 * case the cable was pulled and will later be 1196 * re-attached. The target may forget its negotiation 1197 * settings with us should it attempt to reselect 1198 * during the interruption. The target will not issue 1199 * a unit attention in this case, so we must always 1200 * renegotiate. 1201 */ 1202 ahc_force_renegotiation(ahc); 1203 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1204 ahc_freeze_devq(ahc, scb); 1205 } 1206 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1207 ahc_restart(ahc); 1208 } else if ((status & BUSFREE) != 0 1209 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1210 u_int lastphase; 1211 u_int saved_scsiid; 1212 u_int saved_lun; 1213 u_int target; 1214 u_int initiator_role_id; 1215 char channel; 1216 int printerror; 1217 1218 /* 1219 * Clear our selection hardware as soon as possible. 1220 * We may have an entry in the waiting Q for this target, 1221 * that is affected by this busfree and we don't want to 1222 * go about selecting the target while we handle the event. 1223 */ 1224 ahc_outb(ahc, SCSISEQ, 1225 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1226 1227 /* 1228 * Disable busfree interrupts and clear the busfree 1229 * interrupt status. We do this here so that several 1230 * bus transactions occur prior to clearing the SCSIINT 1231 * latch. It can take a bit for the clearing to take effect. 1232 */ 1233 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1234 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1235 1236 /* 1237 * Look at what phase we were last in. 1238 * If its message out, chances are pretty good 1239 * that the busfree was in response to one of 1240 * our abort requests. 1241 */ 1242 lastphase = ahc_inb(ahc, LASTPHASE); 1243 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1244 saved_lun = ahc_inb(ahc, SAVED_LUN); 1245 target = SCSIID_TARGET(ahc, saved_scsiid); 1246 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1247 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1248 printerror = 1; 1249 1250 if (lastphase == P_MESGOUT) { 1251 struct ahc_devinfo devinfo; 1252 u_int tag; 1253 1254 ahc_fetch_devinfo(ahc, &devinfo); 1255 tag = SCB_LIST_NULL; 1256 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1257 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1258 if (ahc->msgout_buf[ahc->msgout_index - 1] 1259 == MSG_ABORT_TAG) 1260 tag = scb->hscb->tag; 1261 ahc_print_path(ahc, scb); 1262 printf("SCB %d - Abort%s Completed.\n", 1263 scb->hscb->tag, tag == SCB_LIST_NULL ? 1264 "" : " Tag"); 1265 ahc_abort_scbs(ahc, target, channel, 1266 saved_lun, tag, 1267 ROLE_INITIATOR, 1268 CAM_REQ_ABORTED); 1269 printerror = 0; 1270 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1271 MSG_BUS_DEV_RESET, TRUE)) { 1272 #ifdef __FreeBSD__ 1273 /* 1274 * Don't mark the user's request for this BDR 1275 * as completing with CAM_BDR_SENT. CAM3 1276 * specifies CAM_REQ_CMP. 1277 */ 1278 if (scb != NULL 1279 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1280 && ahc_match_scb(ahc, scb, target, channel, 1281 CAM_LUN_WILDCARD, 1282 SCB_LIST_NULL, 1283 ROLE_INITIATOR)) { 1284 ahc_set_transaction_status(scb, 1285 CAM_REQ_CMP); 1286 } 1287 #endif 1288 ahc_compile_devinfo(&devinfo, 1289 initiator_role_id, 1290 target, 1291 CAM_LUN_WILDCARD, 1292 channel, 1293 ROLE_INITIATOR); 1294 ahc_handle_devreset(ahc, &devinfo, 1295 CAM_BDR_SENT, 1296 "Bus Device Reset", 1297 /*verbose_level*/0); 1298 printerror = 0; 1299 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1300 MSG_EXT_PPR, FALSE)) { 1301 struct ahc_initiator_tinfo *tinfo; 1302 struct ahc_tmode_tstate *tstate; 1303 1304 /* 1305 * PPR Rejected. Try non-ppr negotiation 1306 * and retry command. 1307 */ 1308 tinfo = ahc_fetch_transinfo(ahc, 1309 devinfo.channel, 1310 devinfo.our_scsiid, 1311 devinfo.target, 1312 &tstate); 1313 tinfo->curr.transport_version = 2; 1314 tinfo->goal.transport_version = 2; 1315 tinfo->goal.ppr_options = 0; 1316 ahc_qinfifo_requeue_tail(ahc, scb); 1317 printerror = 0; 1318 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1319 MSG_EXT_WDTR, FALSE) 1320 || ahc_sent_msg(ahc, AHCMSG_EXT, 1321 MSG_EXT_SDTR, FALSE)) { 1322 /* 1323 * Negotiation Rejected. Go-async and 1324 * retry command. 1325 */ 1326 ahc_set_width(ahc, &devinfo, 1327 MSG_EXT_WDTR_BUS_8_BIT, 1328 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1329 /*paused*/TRUE); 1330 ahc_set_syncrate(ahc, &devinfo, 1331 /*syncrate*/NULL, 1332 /*period*/0, /*offset*/0, 1333 /*ppr_options*/0, 1334 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1335 /*paused*/TRUE); 1336 ahc_qinfifo_requeue_tail(ahc, scb); 1337 printerror = 0; 1338 } 1339 } 1340 if (printerror != 0) { 1341 u_int i; 1342 1343 if (scb != NULL) { 1344 u_int tag; 1345 1346 if ((scb->hscb->control & TAG_ENB) != 0) 1347 tag = scb->hscb->tag; 1348 else 1349 tag = SCB_LIST_NULL; 1350 ahc_print_path(ahc, scb); 1351 ahc_abort_scbs(ahc, target, channel, 1352 SCB_GET_LUN(scb), tag, 1353 ROLE_INITIATOR, 1354 CAM_UNEXP_BUSFREE); 1355 } else { 1356 /* 1357 * We had not fully identified this connection, 1358 * so we cannot abort anything. 1359 */ 1360 printf("%s: ", ahc_name(ahc)); 1361 } 1362 for (i = 0; i < num_phases; i++) { 1363 if (lastphase == ahc_phase_table[i].phase) 1364 break; 1365 } 1366 /* 1367 * Renegotiate with this device at the 1368 * next opportunity just in case this busfree 1369 * is due to a negotiation mismatch with the 1370 * device. 1371 */ 1372 ahc_force_renegotiation(ahc); 1373 printf("Unexpected busfree %s\n" 1374 "SEQADDR == 0x%x\n", 1375 ahc_phase_table[i].phasemsg, 1376 ahc_inb(ahc, SEQADDR0) 1377 | (ahc_inb(ahc, SEQADDR1) << 8)); 1378 } 1379 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1380 ahc_restart(ahc); 1381 } else { 1382 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1383 ahc_name(ahc), status); 1384 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1385 } 1386 } 1387 1388 /* 1389 * Force renegotiation to occur the next time we initiate 1390 * a command to the current device. 1391 */ 1392 static void 1393 ahc_force_renegotiation(struct ahc_softc *ahc) 1394 { 1395 struct ahc_devinfo devinfo; 1396 struct ahc_initiator_tinfo *targ_info; 1397 struct ahc_tmode_tstate *tstate; 1398 1399 ahc_fetch_devinfo(ahc, &devinfo); 1400 targ_info = ahc_fetch_transinfo(ahc, 1401 devinfo.channel, 1402 devinfo.our_scsiid, 1403 devinfo.target, 1404 &tstate); 1405 ahc_update_neg_request(ahc, &devinfo, tstate, 1406 targ_info, AHC_NEG_IF_NON_ASYNC); 1407 } 1408 1409 #define AHC_MAX_STEPS 2000 1410 void 1411 ahc_clear_critical_section(struct ahc_softc *ahc) 1412 { 1413 int stepping; 1414 int steps; 1415 u_int simode0; 1416 u_int simode1; 1417 1418 if (ahc->num_critical_sections == 0) 1419 return; 1420 1421 stepping = FALSE; 1422 steps = 0; 1423 simode0 = 0; 1424 simode1 = 0; 1425 for (;;) { 1426 struct cs *cs; 1427 u_int seqaddr; 1428 u_int i; 1429 1430 seqaddr = ahc_inb(ahc, SEQADDR0) 1431 | (ahc_inb(ahc, SEQADDR1) << 8); 1432 1433 /* 1434 * Seqaddr represents the next instruction to execute, 1435 * so we are really executing the instruction just 1436 * before it. 1437 */ 1438 if (seqaddr != 0) 1439 seqaddr -= 1; 1440 cs = ahc->critical_sections; 1441 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1442 1443 if (cs->begin < seqaddr && cs->end >= seqaddr) 1444 break; 1445 } 1446 1447 if (i == ahc->num_critical_sections) 1448 break; 1449 1450 if (steps > AHC_MAX_STEPS) { 1451 printf("%s: Infinite loop in critical section\n", 1452 ahc_name(ahc)); 1453 ahc_dump_card_state(ahc); 1454 panic("critical section loop"); 1455 } 1456 1457 steps++; 1458 if (stepping == FALSE) { 1459 1460 /* 1461 * Disable all interrupt sources so that the 1462 * sequencer will not be stuck by a pausing 1463 * interrupt condition while we attempt to 1464 * leave a critical section. 1465 */ 1466 simode0 = ahc_inb(ahc, SIMODE0); 1467 ahc_outb(ahc, SIMODE0, 0); 1468 simode1 = ahc_inb(ahc, SIMODE1); 1469 if ((ahc->features & AHC_DT) != 0) 1470 /* 1471 * On DT class controllers, we 1472 * use the enhanced busfree logic. 1473 * Unfortunately we cannot re-enable 1474 * busfree detection within the 1475 * current connection, so we must 1476 * leave it on while single stepping. 1477 */ 1478 ahc_outb(ahc, SIMODE1, ENBUSFREE); 1479 else 1480 ahc_outb(ahc, SIMODE1, 0); 1481 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1482 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1483 stepping = TRUE; 1484 } 1485 if ((ahc->features & AHC_DT) != 0) { 1486 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1487 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1488 } 1489 ahc_outb(ahc, HCNTRL, ahc->unpause); 1490 while (!ahc_is_paused(ahc)) 1491 ahc_delay(200); 1492 } 1493 if (stepping) { 1494 ahc_outb(ahc, SIMODE0, simode0); 1495 ahc_outb(ahc, SIMODE1, simode1); 1496 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1497 } 1498 } 1499 1500 /* 1501 * Clear any pending interrupt status. 1502 */ 1503 void 1504 ahc_clear_intstat(struct ahc_softc *ahc) 1505 { 1506 /* Clear any interrupt conditions this may have caused */ 1507 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1508 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1509 CLRREQINIT); 1510 ahc_flush_device_writes(ahc); 1511 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1512 ahc_flush_device_writes(ahc); 1513 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1514 ahc_flush_device_writes(ahc); 1515 } 1516 1517 /**************************** Debugging Routines ******************************/ 1518 #ifdef AHC_DEBUG 1519 uint32_t ahc_debug = 0; /* AHC_SHOW_MISC|AHC_SHOW_SENSE|AHC_DEBUG_OPTS;*/ 1520 #endif 1521 1522 void 1523 ahc_print_scb(struct scb *scb) 1524 { 1525 int i; 1526 1527 struct hardware_scb *hscb = scb->hscb; 1528 1529 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1530 (void *)scb, 1531 hscb->control, 1532 hscb->scsiid, 1533 hscb->lun, 1534 hscb->cdb_len); 1535 printf("Shared Data: "); 1536 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1537 printf("%#02x", hscb->shared_data.cdb[i]); 1538 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1539 ahc_le32toh(hscb->dataptr), 1540 ahc_le32toh(hscb->datacnt), 1541 ahc_le32toh(hscb->sgptr), 1542 hscb->tag); 1543 if (scb->sg_count > 0) { 1544 for (i = 0; i < scb->sg_count; i++) { 1545 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1546 i, 1547 (ahc_le32toh(scb->sg_list[i].len) >> 24 1548 & SG_HIGH_ADDR_BITS), 1549 ahc_le32toh(scb->sg_list[i].addr), 1550 ahc_le32toh(scb->sg_list[i].len)); 1551 } 1552 } 1553 } 1554 1555 /************************* Transfer Negotiation *******************************/ 1556 /* 1557 * Allocate per target mode instance (ID we respond to as a target) 1558 * transfer negotiation data structures. 1559 */ 1560 static struct ahc_tmode_tstate * 1561 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1562 { 1563 struct ahc_tmode_tstate *master_tstate; 1564 struct ahc_tmode_tstate *tstate; 1565 int i; 1566 1567 master_tstate = ahc->enabled_targets[ahc->our_id]; 1568 if (channel == 'B') { 1569 scsi_id += 8; 1570 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1571 } 1572 if (ahc->enabled_targets[scsi_id] != NULL 1573 && ahc->enabled_targets[scsi_id] != master_tstate) 1574 panic("%s: ahc_alloc_tstate - Target already allocated", 1575 ahc_name(ahc)); 1576 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1577 if (tstate == NULL) 1578 return (NULL); 1579 1580 /* 1581 * If we have allocated a master tstate, copy user settings from 1582 * the master tstate (taken from SRAM or the EEPROM) for this 1583 * channel, but reset our current and goal settings to async/narrow 1584 * until an initiator talks to us. 1585 */ 1586 if (master_tstate != NULL) { 1587 memcpy(tstate, master_tstate, sizeof(*tstate)); 1588 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1589 tstate->ultraenb = 0; 1590 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1591 memset(&tstate->transinfo[i].curr, 0, 1592 sizeof(tstate->transinfo[i].curr)); 1593 memset(&tstate->transinfo[i].goal, 0, 1594 sizeof(tstate->transinfo[i].goal)); 1595 } 1596 } else 1597 memset(tstate, 0, sizeof(*tstate)); 1598 ahc->enabled_targets[scsi_id] = tstate; 1599 return (tstate); 1600 } 1601 1602 #ifdef AHC_TARGET_MODE 1603 /* 1604 * Free per target mode instance (ID we respond to as a target) 1605 * transfer negotiation data structures. 1606 */ 1607 static void 1608 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1609 { 1610 struct ahc_tmode_tstate *tstate; 1611 1612 /* 1613 * Don't clean up our "master" tstate. 1614 * It has our default user settings. 1615 */ 1616 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1617 || (channel == 'A' && scsi_id == ahc->our_id)) 1618 && force == FALSE) 1619 return; 1620 1621 if (channel == 'B') 1622 scsi_id += 8; 1623 tstate = ahc->enabled_targets[scsi_id]; 1624 if (tstate != NULL) 1625 free(tstate, M_DEVBUF); 1626 ahc->enabled_targets[scsi_id] = NULL; 1627 } 1628 #endif 1629 1630 /* 1631 * Called when we have an active connection to a target on the bus, 1632 * this function finds the nearest syncrate to the input period limited 1633 * by the capabilities of the bus connectivity of and sync settings for 1634 * the target. 1635 */ 1636 struct ahc_syncrate * 1637 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1638 struct ahc_initiator_tinfo *tinfo, 1639 u_int *period, u_int *ppr_options, role_t role) 1640 { 1641 struct ahc_transinfo *transinfo; 1642 u_int maxsync; 1643 1644 if ((ahc->features & AHC_ULTRA2) != 0) { 1645 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1646 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1647 maxsync = AHC_SYNCRATE_DT; 1648 } else { 1649 maxsync = AHC_SYNCRATE_ULTRA; 1650 /* Can't do DT on an SE bus */ 1651 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1652 } 1653 } else if ((ahc->features & AHC_ULTRA) != 0) { 1654 maxsync = AHC_SYNCRATE_ULTRA; 1655 } else { 1656 maxsync = AHC_SYNCRATE_FAST; 1657 } 1658 /* 1659 * Never allow a value higher than our current goal 1660 * period otherwise we may allow a target initiated 1661 * negotiation to go above the limit as set by the 1662 * user. In the case of an initiator initiated 1663 * sync negotiation, we limit based on the user 1664 * setting. This allows the system to still accept 1665 * incoming negotiations even if target initiated 1666 * negotiation is not performed. 1667 */ 1668 if (role == ROLE_TARGET) 1669 transinfo = &tinfo->user; 1670 else 1671 transinfo = &tinfo->goal; 1672 *ppr_options &= transinfo->ppr_options; 1673 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1674 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1675 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1676 } 1677 if (transinfo->period == 0) { 1678 *period = 0; 1679 *ppr_options = 0; 1680 return (NULL); 1681 } 1682 *period = MAX(*period, transinfo->period); 1683 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1684 } 1685 1686 /* 1687 * Look up the valid period to SCSIRATE conversion in our table. 1688 * Return the period and offset that should be sent to the target 1689 * if this was the beginning of an SDTR. 1690 */ 1691 struct ahc_syncrate * 1692 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1693 u_int *ppr_options, u_int maxsync) 1694 { 1695 struct ahc_syncrate *syncrate; 1696 1697 if ((ahc->features & AHC_DT) == 0) 1698 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1699 1700 /* Skip all DT only entries if DT is not available */ 1701 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1702 && maxsync < AHC_SYNCRATE_ULTRA2) 1703 maxsync = AHC_SYNCRATE_ULTRA2; 1704 1705 for (syncrate = &ahc_syncrates[maxsync]; 1706 syncrate->rate != NULL; 1707 syncrate++) { 1708 1709 /* 1710 * The Ultra2 table doesn't go as low 1711 * as for the Fast/Ultra cards. 1712 */ 1713 if ((ahc->features & AHC_ULTRA2) != 0 1714 && (syncrate->sxfr_u2 == 0)) 1715 break; 1716 1717 if (*period <= syncrate->period) { 1718 /* 1719 * When responding to a target that requests 1720 * sync, the requested rate may fall between 1721 * two rates that we can output, but still be 1722 * a rate that we can receive. Because of this, 1723 * we want to respond to the target with 1724 * the same rate that it sent to us even 1725 * if the period we use to send data to it 1726 * is lower. Only lower the response period 1727 * if we must. 1728 */ 1729 if (syncrate == &ahc_syncrates[maxsync]) 1730 *period = syncrate->period; 1731 1732 /* 1733 * At some speeds, we only support 1734 * ST transfers. 1735 */ 1736 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1737 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1738 break; 1739 } 1740 } 1741 1742 if ((*period == 0) 1743 || (syncrate->rate == NULL) 1744 || ((ahc->features & AHC_ULTRA2) != 0 1745 && (syncrate->sxfr_u2 == 0))) { 1746 /* Use asynchronous transfers. */ 1747 *period = 0; 1748 syncrate = NULL; 1749 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1750 } 1751 return (syncrate); 1752 } 1753 1754 /* 1755 * Convert from an entry in our syncrate table to the SCSI equivalent 1756 * sync "period" factor. 1757 */ 1758 u_int 1759 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1760 { 1761 struct ahc_syncrate *syncrate; 1762 1763 if ((ahc->features & AHC_ULTRA2) != 0) 1764 scsirate &= SXFR_ULTRA2; 1765 else 1766 scsirate &= SXFR; 1767 1768 syncrate = &ahc_syncrates[maxsync]; 1769 while (syncrate->rate != NULL) { 1770 1771 if ((ahc->features & AHC_ULTRA2) != 0) { 1772 if (syncrate->sxfr_u2 == 0) 1773 break; 1774 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1775 return (syncrate->period); 1776 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1777 return (syncrate->period); 1778 } 1779 syncrate++; 1780 } 1781 return (0); /* async */ 1782 } 1783 1784 /* 1785 * Truncate the given synchronous offset to a value the 1786 * current adapter type and syncrate are capable of. 1787 */ 1788 void 1789 ahc_validate_offset(struct ahc_softc *ahc, 1790 struct ahc_initiator_tinfo *tinfo, 1791 struct ahc_syncrate *syncrate, 1792 u_int *offset, int wide, role_t role) 1793 { 1794 u_int maxoffset; 1795 1796 /* Limit offset to what we can do */ 1797 if (syncrate == NULL) { 1798 maxoffset = 0; 1799 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1800 maxoffset = MAX_OFFSET_ULTRA2; 1801 } else { 1802 if (wide) 1803 maxoffset = MAX_OFFSET_16BIT; 1804 else 1805 maxoffset = MAX_OFFSET_8BIT; 1806 } 1807 *offset = MIN(*offset, maxoffset); 1808 if (tinfo != NULL) { 1809 if (role == ROLE_TARGET) 1810 *offset = MIN(*offset, tinfo->user.offset); 1811 else 1812 *offset = MIN(*offset, tinfo->goal.offset); 1813 } 1814 } 1815 1816 /* 1817 * Truncate the given transfer width parameter to a value the 1818 * current adapter type is capable of. 1819 */ 1820 void 1821 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1822 u_int *bus_width, role_t role) 1823 { 1824 switch (*bus_width) { 1825 default: 1826 if (ahc->features & AHC_WIDE) { 1827 /* Respond Wide */ 1828 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1829 break; 1830 } 1831 /* FALLTHROUGH */ 1832 case MSG_EXT_WDTR_BUS_8_BIT: 1833 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1834 break; 1835 } 1836 if (tinfo != NULL) { 1837 if (role == ROLE_TARGET) 1838 *bus_width = MIN(tinfo->user.width, *bus_width); 1839 else 1840 *bus_width = MIN(tinfo->goal.width, *bus_width); 1841 } 1842 } 1843 1844 /* 1845 * Update the bitmask of targets for which the controller should 1846 * negotiate with at the next convenient opportunity. This currently 1847 * means the next time we send the initial identify messages for 1848 * a new transaction. 1849 */ 1850 int 1851 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1852 struct ahc_tmode_tstate *tstate, 1853 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1854 { 1855 u_int auto_negotiate_orig; 1856 1857 auto_negotiate_orig = tstate->auto_negotiate; 1858 if (neg_type == AHC_NEG_ALWAYS) { 1859 /* 1860 * Force our "current" settings to be 1861 * unknown so that unless a bus reset 1862 * occurs the need to renegotiate is 1863 * recorded persistently. 1864 */ 1865 if ((ahc->features & AHC_WIDE) != 0) 1866 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1867 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1868 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1869 } 1870 if (tinfo->curr.period != tinfo->goal.period 1871 || tinfo->curr.width != tinfo->goal.width 1872 || tinfo->curr.offset != tinfo->goal.offset 1873 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1874 || (neg_type == AHC_NEG_IF_NON_ASYNC 1875 && (tinfo->goal.offset != 0 1876 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1877 || tinfo->goal.ppr_options != 0))) 1878 tstate->auto_negotiate |= devinfo->target_mask; 1879 else 1880 tstate->auto_negotiate &= ~devinfo->target_mask; 1881 1882 return (auto_negotiate_orig != tstate->auto_negotiate); 1883 } 1884 1885 /* 1886 * Update the user/goal/curr tables of synchronous negotiation 1887 * parameters as well as, in the case of a current or active update, 1888 * any data structures on the host controller. In the case of an 1889 * active update, the specified target is currently talking to us on 1890 * the bus, so the transfer parameter update must take effect 1891 * immediately. 1892 */ 1893 void 1894 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1895 struct ahc_syncrate *syncrate, u_int period, 1896 u_int offset, u_int ppr_options, u_int type, int paused) 1897 { 1898 struct ahc_initiator_tinfo *tinfo; 1899 struct ahc_tmode_tstate *tstate; 1900 u_int old_period; 1901 u_int old_offset; 1902 u_int old_ppr; 1903 int active; 1904 int update_needed; 1905 1906 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1907 update_needed = 0; 1908 1909 if (syncrate == NULL) { 1910 period = 0; 1911 offset = 0; 1912 } 1913 1914 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1915 devinfo->target, &tstate); 1916 1917 if ((type & AHC_TRANS_USER) != 0) { 1918 tinfo->user.period = period; 1919 tinfo->user.offset = offset; 1920 tinfo->user.ppr_options = ppr_options; 1921 } 1922 1923 if ((type & AHC_TRANS_GOAL) != 0) { 1924 tinfo->goal.period = period; 1925 tinfo->goal.offset = offset; 1926 tinfo->goal.ppr_options = ppr_options; 1927 } 1928 1929 old_period = tinfo->curr.period; 1930 old_offset = tinfo->curr.offset; 1931 old_ppr = tinfo->curr.ppr_options; 1932 1933 if ((type & AHC_TRANS_CUR) != 0 1934 && (old_period != period 1935 || old_offset != offset 1936 || old_ppr != ppr_options)) { 1937 u_int scsirate; 1938 1939 update_needed++; 1940 scsirate = tinfo->scsirate; 1941 if ((ahc->features & AHC_ULTRA2) != 0) { 1942 1943 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1944 if (syncrate != NULL) { 1945 scsirate |= syncrate->sxfr_u2; 1946 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1947 scsirate |= ENABLE_CRC; 1948 else 1949 scsirate |= SINGLE_EDGE; 1950 } 1951 } else { 1952 1953 scsirate &= ~(SXFR|SOFS); 1954 /* 1955 * Ensure Ultra mode is set properly for 1956 * this target. 1957 */ 1958 tstate->ultraenb &= ~devinfo->target_mask; 1959 if (syncrate != NULL) { 1960 if (syncrate->sxfr & ULTRA_SXFR) { 1961 tstate->ultraenb |= 1962 devinfo->target_mask; 1963 } 1964 scsirate |= syncrate->sxfr & SXFR; 1965 scsirate |= offset & SOFS; 1966 } 1967 if (active) { 1968 u_int sxfrctl0; 1969 1970 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1971 sxfrctl0 &= ~FAST20; 1972 if (tstate->ultraenb & devinfo->target_mask) 1973 sxfrctl0 |= FAST20; 1974 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1975 } 1976 } 1977 if (active) { 1978 ahc_outb(ahc, SCSIRATE, scsirate); 1979 if ((ahc->features & AHC_ULTRA2) != 0) 1980 ahc_outb(ahc, SCSIOFFSET, offset); 1981 } 1982 1983 tinfo->scsirate = scsirate; 1984 tinfo->curr.period = period; 1985 tinfo->curr.offset = offset; 1986 tinfo->curr.ppr_options = ppr_options; 1987 1988 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1989 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1990 if (bootverbose) { 1991 if (offset != 0) { 1992 printf("%s: target %d synchronous at %sMHz%s, " 1993 "offset = 0x%x\n", ahc_name(ahc), 1994 devinfo->target, syncrate->rate, 1995 (ppr_options & MSG_EXT_PPR_DT_REQ) 1996 ? " DT" : "", offset); 1997 } else { 1998 printf("%s: target %d using " 1999 "asynchronous transfers\n", 2000 ahc_name(ahc), devinfo->target); 2001 } 2002 } 2003 } 2004 2005 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2006 tinfo, AHC_NEG_TO_GOAL); 2007 2008 if (update_needed) 2009 ahc_update_pending_scbs(ahc); 2010 } 2011 2012 /* 2013 * Update the user/goal/curr tables of wide negotiation 2014 * parameters as well as, in the case of a current or active update, 2015 * any data structures on the host controller. In the case of an 2016 * active update, the specified target is currently talking to us on 2017 * the bus, so the transfer parameter update must take effect 2018 * immediately. 2019 */ 2020 void 2021 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2022 u_int width, u_int type, int paused) 2023 { 2024 struct ahc_initiator_tinfo *tinfo; 2025 struct ahc_tmode_tstate *tstate; 2026 u_int oldwidth; 2027 int active; 2028 int update_needed; 2029 2030 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2031 update_needed = 0; 2032 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2033 devinfo->target, &tstate); 2034 2035 if ((type & AHC_TRANS_USER) != 0) 2036 tinfo->user.width = width; 2037 2038 if ((type & AHC_TRANS_GOAL) != 0) 2039 tinfo->goal.width = width; 2040 2041 oldwidth = tinfo->curr.width; 2042 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2043 u_int scsirate; 2044 2045 update_needed++; 2046 scsirate = tinfo->scsirate; 2047 scsirate &= ~WIDEXFER; 2048 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2049 scsirate |= WIDEXFER; 2050 2051 tinfo->scsirate = scsirate; 2052 2053 if (active) 2054 ahc_outb(ahc, SCSIRATE, scsirate); 2055 2056 tinfo->curr.width = width; 2057 2058 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2059 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2060 if (bootverbose) { 2061 printf("%s: target %d using %dbit transfers\n", 2062 ahc_name(ahc), devinfo->target, 2063 8 * (0x01 << width)); 2064 } 2065 } 2066 2067 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2068 tinfo, AHC_NEG_TO_GOAL); 2069 if (update_needed) 2070 ahc_update_pending_scbs(ahc); 2071 } 2072 2073 /* 2074 * Update the current state of tagged queuing for a given target. 2075 */ 2076 void 2077 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2078 ahc_queue_alg alg) 2079 { 2080 ahc_platform_set_tags(ahc, devinfo, alg); 2081 } 2082 2083 /* 2084 * When the transfer settings for a connection change, update any 2085 * in-transit SCBs to contain the new data so the hardware will 2086 * be set correctly during future (re)selections. 2087 */ 2088 static void 2089 ahc_update_pending_scbs(struct ahc_softc *ahc) 2090 { 2091 struct scb *pending_scb; 2092 int pending_scb_count; 2093 int i; 2094 int paused; 2095 u_int saved_scbptr; 2096 2097 /* 2098 * Traverse the pending SCB list and ensure that all of the 2099 * SCBs there have the proper settings. 2100 */ 2101 pending_scb_count = 0; 2102 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2103 struct ahc_devinfo devinfo; 2104 struct hardware_scb *pending_hscb; 2105 struct ahc_initiator_tinfo *tinfo; 2106 struct ahc_tmode_tstate *tstate; 2107 2108 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2109 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2110 devinfo.our_scsiid, 2111 devinfo.target, &tstate); 2112 pending_hscb = pending_scb->hscb; 2113 pending_hscb->control &= ~ULTRAENB; 2114 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2115 pending_hscb->control |= ULTRAENB; 2116 pending_hscb->scsirate = tinfo->scsirate; 2117 pending_hscb->scsioffset = tinfo->curr.offset; 2118 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2119 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2120 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2121 pending_hscb->control &= ~MK_MESSAGE; 2122 } 2123 ahc_sync_scb(ahc, pending_scb, 2124 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2125 pending_scb_count++; 2126 } 2127 2128 if (pending_scb_count == 0) 2129 return; 2130 2131 if (ahc_is_paused(ahc)) { 2132 paused = 1; 2133 } else { 2134 paused = 0; 2135 ahc_pause(ahc); 2136 } 2137 2138 saved_scbptr = ahc_inb(ahc, SCBPTR); 2139 /* Ensure that the hscbs down on the card match the new information */ 2140 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2141 struct hardware_scb *pending_hscb; 2142 u_int control; 2143 u_int scb_tag; 2144 2145 ahc_outb(ahc, SCBPTR, i); 2146 scb_tag = ahc_inb(ahc, SCB_TAG); 2147 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2148 if (pending_scb == NULL) 2149 continue; 2150 2151 pending_hscb = pending_scb->hscb; 2152 control = ahc_inb(ahc, SCB_CONTROL); 2153 control &= ~(ULTRAENB|MK_MESSAGE); 2154 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2155 ahc_outb(ahc, SCB_CONTROL, control); 2156 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2157 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2158 } 2159 ahc_outb(ahc, SCBPTR, saved_scbptr); 2160 2161 if (paused == 0) 2162 ahc_unpause(ahc); 2163 } 2164 2165 /**************************** Pathing Information *****************************/ 2166 static void 2167 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2168 { 2169 u_int saved_scsiid; 2170 role_t role; 2171 int our_id; 2172 2173 if (ahc_inb(ahc, SSTAT0) & TARGET) 2174 role = ROLE_TARGET; 2175 else 2176 role = ROLE_INITIATOR; 2177 2178 if (role == ROLE_TARGET 2179 && (ahc->features & AHC_MULTI_TID) != 0 2180 && (ahc_inb(ahc, SEQ_FLAGS) 2181 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2182 /* We were selected, so pull our id from TARGIDIN */ 2183 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2184 } else if ((ahc->features & AHC_ULTRA2) != 0) 2185 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2186 else 2187 our_id = ahc_inb(ahc, SCSIID) & OID; 2188 2189 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2190 ahc_compile_devinfo(devinfo, 2191 our_id, 2192 SCSIID_TARGET(ahc, saved_scsiid), 2193 ahc_inb(ahc, SAVED_LUN), 2194 SCSIID_CHANNEL(ahc, saved_scsiid), 2195 role); 2196 } 2197 2198 struct ahc_phase_table_entry* 2199 ahc_lookup_phase_entry(int phase) 2200 { 2201 struct ahc_phase_table_entry *entry; 2202 struct ahc_phase_table_entry *last_entry; 2203 2204 /* 2205 * num_phases doesn't include the default entry which 2206 * will be returned if the phase doesn't match. 2207 */ 2208 last_entry = &ahc_phase_table[num_phases]; 2209 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2210 if (phase == entry->phase) 2211 break; 2212 } 2213 return (entry); 2214 } 2215 2216 void 2217 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2218 u_int lun, char channel, role_t role) 2219 { 2220 devinfo->our_scsiid = our_id; 2221 devinfo->target = target; 2222 devinfo->lun = lun; 2223 devinfo->target_offset = target; 2224 devinfo->channel = channel; 2225 devinfo->role = role; 2226 if (channel == 'B') 2227 devinfo->target_offset += 8; 2228 devinfo->target_mask = (0x01 << devinfo->target_offset); 2229 } 2230 2231 void 2232 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2233 { 2234 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2235 devinfo->target, devinfo->lun); 2236 } 2237 2238 static void 2239 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2240 struct scb *scb) 2241 { 2242 role_t role; 2243 int our_id; 2244 2245 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2246 role = ROLE_INITIATOR; 2247 if ((scb->flags & SCB_TARGET_SCB) != 0) 2248 role = ROLE_TARGET; 2249 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2250 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2251 } 2252 2253 2254 /************************ Message Phase Processing ****************************/ 2255 static void 2256 ahc_assert_atn(struct ahc_softc *ahc) 2257 { 2258 u_int scsisigo; 2259 2260 scsisigo = ATNO; 2261 if ((ahc->features & AHC_DT) == 0) 2262 scsisigo |= ahc_inb(ahc, SCSISIGI); 2263 ahc_outb(ahc, SCSISIGO, scsisigo); 2264 } 2265 2266 /* 2267 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2268 * or enters the initial message out phase, we are interrupted. Fill our 2269 * outgoing message buffer with the appropriate message and begin handing 2270 * the message phase(s) manually. 2271 */ 2272 static void 2273 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2274 struct scb *scb) 2275 { 2276 /* 2277 * To facilitate adding multiple messages together, 2278 * each routine should increment the index and len 2279 * variables instead of setting them explicitly. 2280 */ 2281 ahc->msgout_index = 0; 2282 ahc->msgout_len = 0; 2283 2284 if ((scb->flags & SCB_DEVICE_RESET) == 0 2285 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2286 u_int identify_msg; 2287 2288 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2289 if ((scb->hscb->control & DISCENB) != 0) 2290 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2291 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2292 ahc->msgout_len++; 2293 2294 if ((scb->hscb->control & TAG_ENB) != 0) { 2295 ahc->msgout_buf[ahc->msgout_index++] = 2296 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2297 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2298 ahc->msgout_len += 2; 2299 } 2300 } 2301 2302 if (scb->flags & SCB_DEVICE_RESET) { 2303 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2304 ahc->msgout_len++; 2305 ahc_print_path(ahc, scb); 2306 printf("Bus Device Reset Message Sent\n"); 2307 /* 2308 * Clear our selection hardware in advance of 2309 * the busfree. We may have an entry in the waiting 2310 * Q for this target, and we don't want to go about 2311 * selecting while we handle the busfree and blow it 2312 * away. 2313 */ 2314 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2315 } else if ((scb->flags & SCB_ABORT) != 0) { 2316 if ((scb->hscb->control & TAG_ENB) != 0) 2317 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2318 else 2319 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2320 ahc->msgout_len++; 2321 ahc_print_path(ahc, scb); 2322 printf("Abort%s Message Sent\n", 2323 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2324 /* 2325 * Clear our selection hardware in advance of 2326 * the busfree. We may have an entry in the waiting 2327 * Q for this target, and we don't want to go about 2328 * selecting while we handle the busfree and blow it 2329 * away. 2330 */ 2331 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2332 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2333 ahc_build_transfer_msg(ahc, devinfo); 2334 } else { 2335 printf("ahc_intr: AWAITING_MSG for an SCB that " 2336 "does not have a waiting message\n"); 2337 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2338 devinfo->target_mask); 2339 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2340 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2341 ahc_inb(ahc, MSG_OUT), scb->flags); 2342 } 2343 2344 /* 2345 * Clear the MK_MESSAGE flag from the SCB so we aren't 2346 * asked to send this message again. 2347 */ 2348 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2349 scb->hscb->control &= ~MK_MESSAGE; 2350 ahc->msgout_index = 0; 2351 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2352 } 2353 2354 /* 2355 * Build an appropriate transfer negotiation message for the 2356 * currently active target. 2357 */ 2358 static void 2359 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2360 { 2361 /* 2362 * We need to initiate transfer negotiations. 2363 * If our current and goal settings are identical, 2364 * we want to renegotiate due to a check condition. 2365 */ 2366 struct ahc_initiator_tinfo *tinfo; 2367 struct ahc_tmode_tstate *tstate; 2368 struct ahc_syncrate *rate; 2369 int dowide; 2370 int dosync; 2371 int doppr; 2372 u_int period; 2373 u_int ppr_options; 2374 u_int offset; 2375 2376 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2377 devinfo->target, &tstate); 2378 /* 2379 * Filter our period based on the current connection. 2380 * If we can't perform DT transfers on this segment (not in LVD 2381 * mode for instance), then our decision to issue a PPR message 2382 * may change. 2383 */ 2384 period = tinfo->goal.period; 2385 ppr_options = tinfo->goal.ppr_options; 2386 /* Target initiated PPR is not allowed in the SCSI spec */ 2387 if (devinfo->role == ROLE_TARGET) 2388 ppr_options = 0; 2389 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2390 &ppr_options, devinfo->role); 2391 dowide = tinfo->curr.width != tinfo->goal.width; 2392 dosync = tinfo->curr.period != period; 2393 /* 2394 * Only use PPR if we have options that need it, even if the device 2395 * claims to support it. There might be an expander in the way 2396 * that doesn't. 2397 */ 2398 doppr = ppr_options != 0; 2399 2400 if (!dowide && !dosync && !doppr) { 2401 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2402 dosync = tinfo->goal.offset != 0; 2403 } 2404 2405 if (!dowide && !dosync && !doppr) { 2406 /* 2407 * Force async with a WDTR message if we have a wide bus, 2408 * or just issue an SDTR with a 0 offset. 2409 */ 2410 if ((ahc->features & AHC_WIDE) != 0) 2411 dowide = 1; 2412 else 2413 dosync = 1; 2414 2415 if (bootverbose) { 2416 ahc_print_devinfo(ahc, devinfo); 2417 printf("Ensuring async\n"); 2418 } 2419 } 2420 2421 /* Target initiated PPR is not allowed in the SCSI spec */ 2422 if (devinfo->role == ROLE_TARGET) 2423 doppr = 0; 2424 2425 /* 2426 * Both the PPR message and SDTR message require the 2427 * goal syncrate to be limited to what the target device 2428 * is capable of handling (based on whether an LVD->SE 2429 * expander is on the bus), so combine these two cases. 2430 * Regardless, guarantee that if we are using WDTR and SDTR 2431 * messages that WDTR comes first. 2432 */ 2433 if (doppr || (dosync && !dowide)) { 2434 2435 offset = tinfo->goal.offset; 2436 ahc_validate_offset(ahc, tinfo, rate, &offset, 2437 doppr ? tinfo->goal.width 2438 : tinfo->curr.width, 2439 devinfo->role); 2440 if (doppr) { 2441 ahc_construct_ppr(ahc, devinfo, period, offset, 2442 tinfo->goal.width, ppr_options); 2443 } else { 2444 ahc_construct_sdtr(ahc, devinfo, period, offset); 2445 } 2446 } else { 2447 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2448 } 2449 } 2450 2451 /* 2452 * Build a synchronous negotiation message in our message 2453 * buffer based on the input parameters. 2454 */ 2455 static void 2456 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2457 u_int period, u_int offset) 2458 { 2459 if (offset == 0) 2460 period = AHC_ASYNC_XFER_PERIOD; 2461 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2462 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2463 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2464 ahc->msgout_buf[ahc->msgout_index++] = period; 2465 ahc->msgout_buf[ahc->msgout_index++] = offset; 2466 ahc->msgout_len += 5; 2467 if (bootverbose) { 2468 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2469 ahc_name(ahc), devinfo->channel, devinfo->target, 2470 devinfo->lun, period, offset); 2471 } 2472 } 2473 2474 /* 2475 * Build a wide negotiation message in our message 2476 * buffer based on the input parameters. 2477 */ 2478 static void 2479 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2480 u_int bus_width) 2481 { 2482 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2483 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2484 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2485 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2486 ahc->msgout_len += 4; 2487 if (bootverbose) { 2488 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2489 ahc_name(ahc), devinfo->channel, devinfo->target, 2490 devinfo->lun, bus_width); 2491 } 2492 } 2493 2494 /* 2495 * Build a parallel protocol request message in our message 2496 * buffer based on the input parameters. 2497 */ 2498 static void 2499 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2500 u_int period, u_int offset, u_int bus_width, 2501 u_int ppr_options) 2502 { 2503 if (offset == 0) 2504 period = AHC_ASYNC_XFER_PERIOD; 2505 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2506 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2507 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2508 ahc->msgout_buf[ahc->msgout_index++] = period; 2509 ahc->msgout_buf[ahc->msgout_index++] = 0; 2510 ahc->msgout_buf[ahc->msgout_index++] = offset; 2511 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2512 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2513 ahc->msgout_len += 8; 2514 if (bootverbose) { 2515 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2516 "offset %x, ppr_options %x\n", ahc_name(ahc), 2517 devinfo->channel, devinfo->target, devinfo->lun, 2518 bus_width, period, offset, ppr_options); 2519 } 2520 } 2521 2522 /* 2523 * Clear any active message state. 2524 */ 2525 static void 2526 ahc_clear_msg_state(struct ahc_softc *ahc) 2527 { 2528 ahc->msgout_len = 0; 2529 ahc->msgin_index = 0; 2530 ahc->msg_type = MSG_TYPE_NONE; 2531 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2532 /* 2533 * The target didn't care to respond to our 2534 * message request, so clear ATN. 2535 */ 2536 ahc_outb(ahc, CLRSINT1, CLRATNO); 2537 } 2538 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2539 ahc_outb(ahc, SEQ_FLAGS2, 2540 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2541 } 2542 2543 static void 2544 ahc_handle_proto_violation(struct ahc_softc *ahc) 2545 { 2546 struct ahc_devinfo devinfo; 2547 struct scb *scb; 2548 u_int scbid; 2549 u_int seq_flags; 2550 u_int curphase; 2551 u_int lastphase; 2552 int found; 2553 2554 ahc_fetch_devinfo(ahc, &devinfo); 2555 scbid = ahc_inb(ahc, SCB_TAG); 2556 scb = ahc_lookup_scb(ahc, scbid); 2557 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2558 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2559 lastphase = ahc_inb(ahc, LASTPHASE); 2560 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2561 2562 /* 2563 * The reconnecting target either did not send an 2564 * identify message, or did, but we didn't find an SCB 2565 * to match. 2566 */ 2567 ahc_print_devinfo(ahc, &devinfo); 2568 printf("Target did not send an IDENTIFY message. " 2569 "LASTPHASE = 0x%x.\n", lastphase); 2570 scb = NULL; 2571 } else if (scb == NULL) { 2572 /* 2573 * We don't seem to have an SCB active for this 2574 * transaction. Print an error and reset the bus. 2575 */ 2576 ahc_print_devinfo(ahc, &devinfo); 2577 printf("No SCB found during protocol violation\n"); 2578 goto proto_violation_reset; 2579 } else { 2580 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2581 if ((seq_flags & NO_CDB_SENT) != 0) { 2582 ahc_print_path(ahc, scb); 2583 printf("No or incomplete CDB sent to device.\n"); 2584 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2585 /* 2586 * The target never bothered to provide status to 2587 * us prior to completing the command. Since we don't 2588 * know the disposition of this command, we must attempt 2589 * to abort it. Assert ATN and prepare to send an abort 2590 * message. 2591 */ 2592 ahc_print_path(ahc, scb); 2593 printf("Completed command without status.\n"); 2594 } else { 2595 ahc_print_path(ahc, scb); 2596 printf("Unknown protocol violation.\n"); 2597 ahc_dump_card_state(ahc); 2598 } 2599 } 2600 if ((lastphase & ~P_DATAIN_DT) == 0 2601 || lastphase == P_COMMAND) { 2602 proto_violation_reset: 2603 /* 2604 * Target either went directly to data/command 2605 * phase or didn't respond to our ATN. 2606 * The only safe thing to do is to blow 2607 * it away with a bus reset. 2608 */ 2609 found = ahc_reset_channel(ahc, 'A', TRUE); 2610 printf("%s: Issued Channel %c Bus Reset. " 2611 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2612 } else { 2613 /* 2614 * Leave the selection hardware off in case 2615 * this abort attempt will affect yet to 2616 * be sent commands. 2617 */ 2618 ahc_outb(ahc, SCSISEQ, 2619 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2620 ahc_assert_atn(ahc); 2621 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2622 if (scb == NULL) { 2623 ahc_print_devinfo(ahc, &devinfo); 2624 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2625 ahc->msgout_len = 1; 2626 ahc->msgout_index = 0; 2627 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2628 } else { 2629 ahc_print_path(ahc, scb); 2630 scb->flags |= SCB_ABORT; 2631 } 2632 printf("Protocol violation %s. Attempting to abort.\n", 2633 ahc_lookup_phase_entry(curphase)->phasemsg); 2634 } 2635 } 2636 2637 /* 2638 * Manual message loop handler. 2639 */ 2640 static void 2641 ahc_handle_message_phase(struct ahc_softc *ahc) 2642 { 2643 struct ahc_devinfo devinfo; 2644 u_int bus_phase; 2645 int end_session; 2646 2647 ahc_fetch_devinfo(ahc, &devinfo); 2648 end_session = FALSE; 2649 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2650 2651 reswitch: 2652 switch (ahc->msg_type) { 2653 case MSG_TYPE_INITIATOR_MSGOUT: 2654 { 2655 int lastbyte; 2656 int phasemis; 2657 int msgdone; 2658 2659 if (ahc->msgout_len == 0) 2660 panic("HOST_MSG_LOOP interrupt with no active message"); 2661 2662 #ifdef AHC_DEBUG 2663 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2664 ahc_print_devinfo(ahc, &devinfo); 2665 printf("INITIATOR_MSG_OUT"); 2666 } 2667 #endif 2668 phasemis = bus_phase != P_MESGOUT; 2669 if (phasemis) { 2670 #ifdef AHC_DEBUG 2671 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2672 printf(" PHASEMIS %s\n", 2673 ahc_lookup_phase_entry(bus_phase) 2674 ->phasemsg); 2675 } 2676 #endif 2677 if (bus_phase == P_MESGIN) { 2678 /* 2679 * Change gears and see if 2680 * this messages is of interest to 2681 * us or should be passed back to 2682 * the sequencer. 2683 */ 2684 ahc_outb(ahc, CLRSINT1, CLRATNO); 2685 ahc->send_msg_perror = FALSE; 2686 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2687 ahc->msgin_index = 0; 2688 goto reswitch; 2689 } 2690 end_session = TRUE; 2691 break; 2692 } 2693 2694 if (ahc->send_msg_perror) { 2695 ahc_outb(ahc, CLRSINT1, CLRATNO); 2696 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2697 #ifdef AHC_DEBUG 2698 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2699 printf(" byte 0x%x\n", ahc->send_msg_perror); 2700 #endif 2701 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2702 break; 2703 } 2704 2705 msgdone = ahc->msgout_index == ahc->msgout_len; 2706 if (msgdone) { 2707 /* 2708 * The target has requested a retry. 2709 * Re-assert ATN, reset our message index to 2710 * 0, and try again. 2711 */ 2712 ahc->msgout_index = 0; 2713 ahc_assert_atn(ahc); 2714 } 2715 2716 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2717 if (lastbyte) { 2718 /* Last byte is signified by dropping ATN */ 2719 ahc_outb(ahc, CLRSINT1, CLRATNO); 2720 } 2721 2722 /* 2723 * Clear our interrupt status and present 2724 * the next byte on the bus. 2725 */ 2726 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2727 #ifdef AHC_DEBUG 2728 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2729 printf(" byte 0x%x\n", 2730 ahc->msgout_buf[ahc->msgout_index]); 2731 #endif 2732 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2733 break; 2734 } 2735 case MSG_TYPE_INITIATOR_MSGIN: 2736 { 2737 int phasemis; 2738 int message_done; 2739 2740 #ifdef AHC_DEBUG 2741 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2742 ahc_print_devinfo(ahc, &devinfo); 2743 printf("INITIATOR_MSG_IN"); 2744 } 2745 #endif 2746 phasemis = bus_phase != P_MESGIN; 2747 if (phasemis) { 2748 #ifdef AHC_DEBUG 2749 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2750 printf(" PHASEMIS %s\n", 2751 ahc_lookup_phase_entry(bus_phase) 2752 ->phasemsg); 2753 } 2754 #endif 2755 ahc->msgin_index = 0; 2756 if (bus_phase == P_MESGOUT 2757 && (ahc->send_msg_perror == TRUE 2758 || (ahc->msgout_len != 0 2759 && ahc->msgout_index == 0))) { 2760 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2761 goto reswitch; 2762 } 2763 end_session = TRUE; 2764 break; 2765 } 2766 2767 /* Pull the byte in without acking it */ 2768 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2769 #ifdef AHC_DEBUG 2770 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2771 printf(" byte 0x%x\n", 2772 ahc->msgin_buf[ahc->msgin_index]); 2773 #endif 2774 2775 message_done = ahc_parse_msg(ahc, &devinfo); 2776 2777 if (message_done) { 2778 /* 2779 * Clear our incoming message buffer in case there 2780 * is another message following this one. 2781 */ 2782 ahc->msgin_index = 0; 2783 2784 /* 2785 * If this message illicited a response, 2786 * assert ATN so the target takes us to the 2787 * message out phase. 2788 */ 2789 if (ahc->msgout_len != 0) { 2790 #ifdef AHC_DEBUG 2791 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2792 ahc_print_devinfo(ahc, &devinfo); 2793 printf("Asserting ATN for response\n"); 2794 } 2795 #endif 2796 ahc_assert_atn(ahc); 2797 } 2798 } else 2799 ahc->msgin_index++; 2800 2801 if (message_done == MSGLOOP_TERMINATED) { 2802 end_session = TRUE; 2803 } else { 2804 /* Ack the byte */ 2805 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2806 (void)ahc_inb(ahc, SCSIDATL); 2807 } 2808 break; 2809 } 2810 case MSG_TYPE_TARGET_MSGIN: 2811 { 2812 int msgdone; 2813 int msgout_request; 2814 2815 if (ahc->msgout_len == 0) 2816 panic("Target MSGIN with no active message"); 2817 2818 /* 2819 * If we interrupted a mesgout session, the initiator 2820 * will not know this until our first REQ. So, we 2821 * only honor mesgout requests after we've sent our 2822 * first byte. 2823 */ 2824 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2825 && ahc->msgout_index > 0) 2826 msgout_request = TRUE; 2827 else 2828 msgout_request = FALSE; 2829 2830 if (msgout_request) { 2831 2832 /* 2833 * Change gears and see if 2834 * this messages is of interest to 2835 * us or should be passed back to 2836 * the sequencer. 2837 */ 2838 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2839 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2840 ahc->msgin_index = 0; 2841 /* Dummy read to REQ for first byte */ 2842 (void)ahc_inb(ahc, SCSIDATL); 2843 ahc_outb(ahc, SXFRCTL0, 2844 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2845 break; 2846 } 2847 2848 msgdone = ahc->msgout_index == ahc->msgout_len; 2849 if (msgdone) { 2850 ahc_outb(ahc, SXFRCTL0, 2851 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2852 end_session = TRUE; 2853 break; 2854 } 2855 2856 /* 2857 * Present the next byte on the bus. 2858 */ 2859 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2860 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2861 break; 2862 } 2863 case MSG_TYPE_TARGET_MSGOUT: 2864 { 2865 int lastbyte; 2866 int msgdone; 2867 2868 /* 2869 * The initiator signals that this is 2870 * the last byte by dropping ATN. 2871 */ 2872 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2873 2874 /* 2875 * Read the latched byte, but turn off SPIOEN first 2876 * so that we don't inadvertently cause a REQ for the 2877 * next byte. 2878 */ 2879 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2880 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2881 msgdone = ahc_parse_msg(ahc, &devinfo); 2882 if (msgdone == MSGLOOP_TERMINATED) { 2883 /* 2884 * The message is *really* done in that it caused 2885 * us to go to bus free. The sequencer has already 2886 * been reset at this point, so pull the ejection 2887 * handle. 2888 */ 2889 return; 2890 } 2891 2892 ahc->msgin_index++; 2893 2894 /* 2895 * XXX Read spec about initiator dropping ATN too soon 2896 * and use msgdone to detect it. 2897 */ 2898 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2899 ahc->msgin_index = 0; 2900 2901 /* 2902 * If this message illicited a response, transition 2903 * to the Message in phase and send it. 2904 */ 2905 if (ahc->msgout_len != 0) { 2906 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2907 ahc_outb(ahc, SXFRCTL0, 2908 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2909 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2910 ahc->msgin_index = 0; 2911 break; 2912 } 2913 } 2914 2915 if (lastbyte) 2916 end_session = TRUE; 2917 else { 2918 /* Ask for the next byte. */ 2919 ahc_outb(ahc, SXFRCTL0, 2920 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2921 } 2922 2923 break; 2924 } 2925 default: 2926 panic("Unknown REQINIT message type"); 2927 } 2928 2929 if (end_session) { 2930 ahc_clear_msg_state(ahc); 2931 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2932 } else 2933 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2934 } 2935 2936 /* 2937 * See if we sent a particular extended message to the target. 2938 * If "full" is true, return true only if the target saw the full 2939 * message. If "full" is false, return true if the target saw at 2940 * least the first byte of the message. 2941 */ 2942 static int 2943 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2944 { 2945 int found; 2946 u_int index; 2947 2948 found = FALSE; 2949 index = 0; 2950 2951 while (index < ahc->msgout_len) { 2952 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2953 u_int end_index; 2954 2955 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2956 if (ahc->msgout_buf[index+2] == msgval 2957 && type == AHCMSG_EXT) { 2958 2959 if (full) { 2960 if (ahc->msgout_index > end_index) 2961 found = TRUE; 2962 } else if (ahc->msgout_index > index) 2963 found = TRUE; 2964 } 2965 index = end_index; 2966 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2967 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2968 2969 /* Skip tag type and tag id or residue param*/ 2970 index += 2; 2971 } else { 2972 /* Single byte message */ 2973 if (type == AHCMSG_1B 2974 && ahc->msgout_buf[index] == msgval 2975 && ahc->msgout_index > index) 2976 found = TRUE; 2977 index++; 2978 } 2979 2980 if (found) 2981 break; 2982 } 2983 return (found); 2984 } 2985 2986 /* 2987 * Wait for a complete incoming message, parse it, and respond accordingly. 2988 */ 2989 static int 2990 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2991 { 2992 struct ahc_initiator_tinfo *tinfo; 2993 struct ahc_tmode_tstate *tstate; 2994 int reject; 2995 int done; 2996 int response; 2997 u_int targ_scsirate; 2998 2999 done = MSGLOOP_IN_PROG; 3000 response = FALSE; 3001 reject = FALSE; 3002 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3003 devinfo->target, &tstate); 3004 targ_scsirate = tinfo->scsirate; 3005 3006 /* 3007 * Parse as much of the message as is available, 3008 * rejecting it if we don't support it. When 3009 * the entire message is available and has been 3010 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3011 * that we have parsed an entire message. 3012 * 3013 * In the case of extended messages, we accept the length 3014 * byte outright and perform more checking once we know the 3015 * extended message type. 3016 */ 3017 switch (ahc->msgin_buf[0]) { 3018 case MSG_DISCONNECT: 3019 case MSG_SAVEDATAPOINTER: 3020 case MSG_CMDCOMPLETE: 3021 case MSG_RESTOREPOINTERS: 3022 case MSG_IGN_WIDE_RESIDUE: 3023 /* 3024 * End our message loop as these are messages 3025 * the sequencer handles on its own. 3026 */ 3027 done = MSGLOOP_TERMINATED; 3028 break; 3029 case MSG_MESSAGE_REJECT: 3030 response = ahc_handle_msg_reject(ahc, devinfo); 3031 /* FALLTHROUGH */ 3032 case MSG_NOOP: 3033 done = MSGLOOP_MSGCOMPLETE; 3034 break; 3035 case MSG_EXTENDED: 3036 { 3037 /* Wait for enough of the message to begin validation */ 3038 if (ahc->msgin_index < 2) 3039 break; 3040 switch (ahc->msgin_buf[2]) { 3041 case MSG_EXT_SDTR: 3042 { 3043 struct ahc_syncrate *syncrate; 3044 u_int period; 3045 u_int ppr_options; 3046 u_int offset; 3047 u_int saved_offset; 3048 3049 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3050 reject = TRUE; 3051 break; 3052 } 3053 3054 /* 3055 * Wait until we have both args before validating 3056 * and acting on this message. 3057 * 3058 * Add one to MSG_EXT_SDTR_LEN to account for 3059 * the extended message preamble. 3060 */ 3061 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3062 break; 3063 3064 period = ahc->msgin_buf[3]; 3065 ppr_options = 0; 3066 saved_offset = offset = ahc->msgin_buf[4]; 3067 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3068 &ppr_options, 3069 devinfo->role); 3070 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3071 targ_scsirate & WIDEXFER, 3072 devinfo->role); 3073 if (bootverbose) { 3074 printf("(%s:%c:%d:%d): Received " 3075 "SDTR period %x, offset %x\n\t" 3076 "Filtered to period %x, offset %x\n", 3077 ahc_name(ahc), devinfo->channel, 3078 devinfo->target, devinfo->lun, 3079 ahc->msgin_buf[3], saved_offset, 3080 period, offset); 3081 } 3082 ahc_set_syncrate(ahc, devinfo, 3083 syncrate, period, 3084 offset, ppr_options, 3085 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3086 /*paused*/TRUE); 3087 3088 /* 3089 * See if we initiated Sync Negotiation 3090 * and didn't have to fall down to async 3091 * transfers. 3092 */ 3093 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3094 /* We started it */ 3095 if (saved_offset != offset) { 3096 /* Went too low - force async */ 3097 reject = TRUE; 3098 } 3099 } else { 3100 /* 3101 * Send our own SDTR in reply 3102 */ 3103 if (bootverbose 3104 && devinfo->role == ROLE_INITIATOR) { 3105 printf("(%s:%c:%d:%d): Target " 3106 "Initiated SDTR\n", 3107 ahc_name(ahc), devinfo->channel, 3108 devinfo->target, devinfo->lun); 3109 } 3110 ahc->msgout_index = 0; 3111 ahc->msgout_len = 0; 3112 ahc_construct_sdtr(ahc, devinfo, 3113 period, offset); 3114 ahc->msgout_index = 0; 3115 response = TRUE; 3116 } 3117 done = MSGLOOP_MSGCOMPLETE; 3118 break; 3119 } 3120 case MSG_EXT_WDTR: 3121 { 3122 u_int bus_width; 3123 u_int saved_width; 3124 u_int sending_reply; 3125 3126 sending_reply = FALSE; 3127 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3128 reject = TRUE; 3129 break; 3130 } 3131 3132 /* 3133 * Wait until we have our arg before validating 3134 * and acting on this message. 3135 * 3136 * Add one to MSG_EXT_WDTR_LEN to account for 3137 * the extended message preamble. 3138 */ 3139 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3140 break; 3141 3142 bus_width = ahc->msgin_buf[3]; 3143 saved_width = bus_width; 3144 ahc_validate_width(ahc, tinfo, &bus_width, 3145 devinfo->role); 3146 if (bootverbose) { 3147 printf("(%s:%c:%d:%d): Received WDTR " 3148 "%x filtered to %x\n", 3149 ahc_name(ahc), devinfo->channel, 3150 devinfo->target, devinfo->lun, 3151 saved_width, bus_width); 3152 } 3153 3154 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3155 /* 3156 * Don't send a WDTR back to the 3157 * target, since we asked first. 3158 * If the width went higher than our 3159 * request, reject it. 3160 */ 3161 if (saved_width > bus_width) { 3162 reject = TRUE; 3163 printf("(%s:%c:%d:%d): requested %dBit " 3164 "transfers. Rejecting...\n", 3165 ahc_name(ahc), devinfo->channel, 3166 devinfo->target, devinfo->lun, 3167 8 * (0x01 << bus_width)); 3168 bus_width = 0; 3169 } 3170 } else { 3171 /* 3172 * Send our own WDTR in reply 3173 */ 3174 if (bootverbose 3175 && devinfo->role == ROLE_INITIATOR) { 3176 printf("(%s:%c:%d:%d): Target " 3177 "Initiated WDTR\n", 3178 ahc_name(ahc), devinfo->channel, 3179 devinfo->target, devinfo->lun); 3180 } 3181 ahc->msgout_index = 0; 3182 ahc->msgout_len = 0; 3183 ahc_construct_wdtr(ahc, devinfo, bus_width); 3184 ahc->msgout_index = 0; 3185 response = TRUE; 3186 sending_reply = TRUE; 3187 } 3188 ahc_set_width(ahc, devinfo, bus_width, 3189 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3190 /*paused*/TRUE); 3191 /* After a wide message, we are async */ 3192 ahc_set_syncrate(ahc, devinfo, 3193 /*syncrate*/NULL, /*period*/0, 3194 /*offset*/0, /*ppr_options*/0, 3195 AHC_TRANS_ACTIVE, /*paused*/TRUE); 3196 if (sending_reply == FALSE && reject == FALSE) { 3197 3198 if (tinfo->goal.offset) { 3199 ahc->msgout_index = 0; 3200 ahc->msgout_len = 0; 3201 ahc_build_transfer_msg(ahc, devinfo); 3202 ahc->msgout_index = 0; 3203 response = TRUE; 3204 } 3205 } 3206 done = MSGLOOP_MSGCOMPLETE; 3207 break; 3208 } 3209 case MSG_EXT_PPR: 3210 { 3211 struct ahc_syncrate *syncrate; 3212 u_int period; 3213 u_int offset; 3214 u_int bus_width; 3215 u_int ppr_options; 3216 u_int saved_width; 3217 u_int saved_offset; 3218 u_int saved_ppr_options; 3219 3220 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3221 reject = TRUE; 3222 break; 3223 } 3224 3225 /* 3226 * Wait until we have all args before validating 3227 * and acting on this message. 3228 * 3229 * Add one to MSG_EXT_PPR_LEN to account for 3230 * the extended message preamble. 3231 */ 3232 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3233 break; 3234 3235 period = ahc->msgin_buf[3]; 3236 offset = ahc->msgin_buf[5]; 3237 bus_width = ahc->msgin_buf[6]; 3238 saved_width = bus_width; 3239 ppr_options = ahc->msgin_buf[7]; 3240 /* 3241 * According to the spec, a DT only 3242 * period factor with no DT option 3243 * set implies async. 3244 */ 3245 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3246 && period == 9) 3247 offset = 0; 3248 saved_ppr_options = ppr_options; 3249 saved_offset = offset; 3250 3251 /* 3252 * Mask out any options we don't support 3253 * on any controller. Transfer options are 3254 * only available if we are negotiating wide. 3255 */ 3256 ppr_options &= MSG_EXT_PPR_DT_REQ; 3257 if (bus_width == 0) 3258 ppr_options = 0; 3259 3260 ahc_validate_width(ahc, tinfo, &bus_width, 3261 devinfo->role); 3262 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3263 &ppr_options, 3264 devinfo->role); 3265 ahc_validate_offset(ahc, tinfo, syncrate, 3266 &offset, bus_width, 3267 devinfo->role); 3268 3269 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3270 /* 3271 * If we are unable to do any of the 3272 * requested options (we went too low), 3273 * then we'll have to reject the message. 3274 */ 3275 if (saved_width > bus_width 3276 || saved_offset != offset 3277 || saved_ppr_options != ppr_options) { 3278 reject = TRUE; 3279 period = 0; 3280 offset = 0; 3281 bus_width = 0; 3282 ppr_options = 0; 3283 syncrate = NULL; 3284 } 3285 } else { 3286 if (devinfo->role != ROLE_TARGET) 3287 printf("(%s:%c:%d:%d): Target " 3288 "Initiated PPR\n", 3289 ahc_name(ahc), devinfo->channel, 3290 devinfo->target, devinfo->lun); 3291 else 3292 printf("(%s:%c:%d:%d): Initiator " 3293 "Initiated PPR\n", 3294 ahc_name(ahc), devinfo->channel, 3295 devinfo->target, devinfo->lun); 3296 ahc->msgout_index = 0; 3297 ahc->msgout_len = 0; 3298 ahc_construct_ppr(ahc, devinfo, period, offset, 3299 bus_width, ppr_options); 3300 ahc->msgout_index = 0; 3301 response = TRUE; 3302 } 3303 if (bootverbose) { 3304 printf("(%s:%c:%d:%d): Received PPR width %x, " 3305 "period %x, offset %x,options %x\n" 3306 "\tFiltered to width %x, period %x, " 3307 "offset %x, options %x\n", 3308 ahc_name(ahc), devinfo->channel, 3309 devinfo->target, devinfo->lun, 3310 saved_width, ahc->msgin_buf[3], 3311 saved_offset, saved_ppr_options, 3312 bus_width, period, offset, ppr_options); 3313 } 3314 ahc_set_width(ahc, devinfo, bus_width, 3315 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3316 /*paused*/TRUE); 3317 ahc_set_syncrate(ahc, devinfo, 3318 syncrate, period, 3319 offset, ppr_options, 3320 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3321 /*paused*/TRUE); 3322 done = MSGLOOP_MSGCOMPLETE; 3323 break; 3324 } 3325 default: 3326 /* Unknown extended message. Reject it. */ 3327 reject = TRUE; 3328 break; 3329 } 3330 break; 3331 } 3332 #ifdef AHC_TARGET_MODE 3333 case MSG_BUS_DEV_RESET: 3334 ahc_handle_devreset(ahc, devinfo, 3335 CAM_BDR_SENT, 3336 "Bus Device Reset Received", 3337 /*verbose_level*/0); 3338 ahc_restart(ahc); 3339 done = MSGLOOP_TERMINATED; 3340 break; 3341 case MSG_ABORT_TAG: 3342 case MSG_ABORT: 3343 case MSG_CLEAR_QUEUE: 3344 { 3345 int tag; 3346 3347 /* Target mode messages */ 3348 if (devinfo->role != ROLE_TARGET) { 3349 reject = TRUE; 3350 break; 3351 } 3352 tag = SCB_LIST_NULL; 3353 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3354 tag = ahc_inb(ahc, INITIATOR_TAG); 3355 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3356 devinfo->lun, tag, ROLE_TARGET, 3357 CAM_REQ_ABORTED); 3358 3359 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3360 if (tstate != NULL) { 3361 struct ahc_tmode_lstate* lstate; 3362 3363 lstate = tstate->enabled_luns[devinfo->lun]; 3364 if (lstate != NULL) { 3365 ahc_queue_lstate_event(ahc, lstate, 3366 devinfo->our_scsiid, 3367 ahc->msgin_buf[0], 3368 /*arg*/tag); 3369 ahc_send_lstate_events(ahc, lstate); 3370 } 3371 } 3372 ahc_restart(ahc); 3373 done = MSGLOOP_TERMINATED; 3374 break; 3375 } 3376 #endif 3377 case MSG_TERM_IO_PROC: 3378 default: 3379 reject = TRUE; 3380 break; 3381 } 3382 3383 if (reject) { 3384 /* 3385 * Setup to reject the message. 3386 */ 3387 ahc->msgout_index = 0; 3388 ahc->msgout_len = 1; 3389 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3390 done = MSGLOOP_MSGCOMPLETE; 3391 response = TRUE; 3392 } 3393 3394 if (done != MSGLOOP_IN_PROG && !response) 3395 /* Clear the outgoing message buffer */ 3396 ahc->msgout_len = 0; 3397 3398 return (done); 3399 } 3400 3401 /* 3402 * Process a message reject message. 3403 */ 3404 static int 3405 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3406 { 3407 /* 3408 * What we care about here is if we had an 3409 * outstanding SDTR or WDTR message for this 3410 * target. If we did, this is a signal that 3411 * the target is refusing negotiation. 3412 */ 3413 struct scb *scb; 3414 struct ahc_initiator_tinfo *tinfo; 3415 struct ahc_tmode_tstate *tstate; 3416 u_int scb_index; 3417 u_int last_msg; 3418 int response = 0; 3419 3420 scb_index = ahc_inb(ahc, SCB_TAG); 3421 scb = ahc_lookup_scb(ahc, scb_index); 3422 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3423 devinfo->our_scsiid, 3424 devinfo->target, &tstate); 3425 /* Might be necessary */ 3426 last_msg = ahc_inb(ahc, LAST_MSG); 3427 3428 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3429 /* 3430 * Target does not support the PPR message. 3431 * Attempt to negotiate SPI-2 style. 3432 */ 3433 if (bootverbose) { 3434 printf("(%s:%c:%d:%d): PPR Rejected. " 3435 "Trying WDTR/SDTR\n", 3436 ahc_name(ahc), devinfo->channel, 3437 devinfo->target, devinfo->lun); 3438 } 3439 tinfo->goal.ppr_options = 0; 3440 tinfo->curr.transport_version = 2; 3441 tinfo->goal.transport_version = 2; 3442 ahc->msgout_index = 0; 3443 ahc->msgout_len = 0; 3444 ahc_build_transfer_msg(ahc, devinfo); 3445 ahc->msgout_index = 0; 3446 response = 1; 3447 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3448 3449 /* note 8bit xfers */ 3450 if (bootverbose) 3451 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3452 "8bit transfers\n", ahc_name(ahc), 3453 devinfo->channel, devinfo->target, devinfo->lun); 3454 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3455 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3456 /*paused*/TRUE); 3457 /* 3458 * No need to clear the sync rate. If the target 3459 * did not accept the command, our syncrate is 3460 * unaffected. If the target started the negotiation, 3461 * but rejected our response, we already cleared the 3462 * sync rate before sending our WDTR. 3463 */ 3464 if (tinfo->goal.offset != tinfo->curr.offset) { 3465 3466 /* Start the sync negotiation */ 3467 ahc->msgout_index = 0; 3468 ahc->msgout_len = 0; 3469 ahc_build_transfer_msg(ahc, devinfo); 3470 ahc->msgout_index = 0; 3471 response = 1; 3472 } 3473 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3474 /* note asynch xfers and clear flag */ 3475 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3476 /*offset*/0, /*ppr_options*/0, 3477 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3478 /*paused*/TRUE); 3479 if (bootverbose) 3480 printf("(%s:%c:%d:%d): refuses synchronous negotiation." 3481 " Using asynchronous transfers\n", 3482 ahc_name(ahc), devinfo->channel, 3483 devinfo->target, devinfo->lun); 3484 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3485 int tag_type; 3486 int mask; 3487 3488 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3489 3490 if (tag_type == MSG_SIMPLE_TASK) { 3491 if (bootverbose) 3492 printf("(%s:%c:%d:%d): refuses tagged commands." 3493 " Performing non-tagged I/O\n", 3494 ahc_name(ahc), devinfo->channel, 3495 devinfo->target, devinfo->lun); 3496 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3497 mask = ~0x23; 3498 } else { 3499 if (bootverbose) 3500 printf("(%s:%c:%d:%d): refuses %s tagged " 3501 "commands. Performing simple queue " 3502 "tagged I/O only\n", 3503 ahc_name(ahc), devinfo->channel, 3504 devinfo->target, devinfo->lun, 3505 tag_type == MSG_ORDERED_TASK 3506 ? "ordered" : "head of queue"); 3507 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3508 mask = ~0x03; 3509 } 3510 3511 /* 3512 * Resend the identify for this CCB as the target 3513 * may believe that the selection is invalid otherwise. 3514 */ 3515 ahc_outb(ahc, SCB_CONTROL, 3516 ahc_inb(ahc, SCB_CONTROL) & mask); 3517 scb->hscb->control &= mask; 3518 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3519 /*type*/MSG_SIMPLE_TASK); 3520 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3521 ahc_assert_atn(ahc); 3522 3523 /* 3524 * This transaction is now at the head of 3525 * the untagged queue for this target. 3526 */ 3527 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3528 struct scb_tailq *untagged_q; 3529 3530 untagged_q = 3531 &(ahc->untagged_queues[devinfo->target_offset]); 3532 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3533 scb->flags |= SCB_UNTAGGEDQ; 3534 } 3535 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3536 scb->hscb->tag); 3537 3538 /* 3539 * Requeue all tagged commands for this target 3540 * currently in our possession so they can be 3541 * converted to untagged commands. 3542 */ 3543 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3544 SCB_GET_CHANNEL(ahc, scb), 3545 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3546 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3547 SEARCH_COMPLETE); 3548 } else { 3549 /* 3550 * Otherwise, we ignore it. 3551 */ 3552 if (bootverbose) 3553 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3554 ahc_name(ahc), devinfo->channel, devinfo->target, 3555 last_msg); 3556 } 3557 return (response); 3558 } 3559 3560 /* 3561 * Process an ingnore wide residue message. 3562 */ 3563 static void 3564 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 3565 struct ahc_devinfo *devinfo) 3566 { 3567 u_int scb_index; 3568 struct scb *scb; 3569 3570 scb_index = ahc_inb(ahc, SCB_TAG); 3571 scb = ahc_lookup_scb(ahc, scb_index); 3572 /* 3573 * XXX Actually check data direction in the sequencer? 3574 * Perhaps add datadir to some spare bits in the hscb? 3575 */ 3576 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3577 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3578 /* 3579 * Ignore the message if we haven't 3580 * seen an appropriate data phase yet. 3581 */ 3582 } else { 3583 /* 3584 * If the residual occurred on the last 3585 * transfer and the transfer request was 3586 * expected to end on an odd count, do 3587 * nothing. Otherwise, subtract a byte 3588 * and update the residual count accordingly. 3589 */ 3590 uint32_t sgptr; 3591 3592 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3593 if ((sgptr & SG_LIST_NULL) != 0 3594 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3595 /* 3596 * If the residual occurred on the last 3597 * transfer and the transfer request was 3598 * expected to end on an odd count, do 3599 * nothing. 3600 */ 3601 } else { 3602 struct ahc_dma_seg *sg; 3603 uint32_t data_cnt; 3604 uint32_t data_addr; 3605 uint32_t sglen; 3606 3607 /* Pull in the rest of the sgptr */ 3608 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3609 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3610 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3611 sgptr &= SG_PTR_MASK; 3612 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3613 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3614 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3615 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3616 3617 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3618 | (ahc_inb(ahc, SHADDR + 2) << 16) 3619 | (ahc_inb(ahc, SHADDR + 1) << 8) 3620 | (ahc_inb(ahc, SHADDR)); 3621 3622 data_cnt += 1; 3623 data_addr -= 1; 3624 3625 sg = ahc_sg_bus_to_virt(scb, sgptr); 3626 /* 3627 * The residual sg ptr points to the next S/G 3628 * to load so we must go back one. 3629 */ 3630 sg--; 3631 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3632 if (sg != scb->sg_list 3633 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3634 3635 sg--; 3636 sglen = ahc_le32toh(sg->len); 3637 /* 3638 * Preserve High Address and SG_LIST bits 3639 * while setting the count to 1. 3640 */ 3641 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3642 data_addr = ahc_le32toh(sg->addr) 3643 + (sglen & AHC_SG_LEN_MASK) - 1; 3644 3645 /* 3646 * Increment sg so it points to the 3647 * "next" sg. 3648 */ 3649 sg++; 3650 sgptr = ahc_sg_virt_to_bus(scb, sg); 3651 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3652 sgptr >> 24); 3653 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3654 sgptr >> 16); 3655 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3656 sgptr >> 8); 3657 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3658 } 3659 3660 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3661 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3662 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3663 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3664 } 3665 } 3666 } 3667 3668 3669 /* 3670 * Reinitialize the data pointers for the active transfer 3671 * based on its current residual. 3672 */ 3673 static void 3674 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3675 { 3676 struct scb *scb; 3677 struct ahc_dma_seg *sg; 3678 u_int scb_index; 3679 uint32_t sgptr; 3680 uint32_t resid; 3681 uint32_t dataptr; 3682 3683 scb_index = ahc_inb(ahc, SCB_TAG); 3684 scb = ahc_lookup_scb(ahc, scb_index); 3685 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3686 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3687 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3688 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3689 3690 sgptr &= SG_PTR_MASK; 3691 sg = ahc_sg_bus_to_virt(scb, sgptr); 3692 3693 /* The residual sg_ptr always points to the next sg */ 3694 sg--; 3695 3696 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3697 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3698 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3699 3700 dataptr = ahc_le32toh(sg->addr) 3701 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3702 - resid; 3703 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3704 u_int dscommand1; 3705 3706 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3707 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3708 ahc_outb(ahc, HADDR, 3709 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3710 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3711 } 3712 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3713 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3714 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3715 ahc_outb(ahc, HADDR, dataptr); 3716 ahc_outb(ahc, HCNT + 2, resid >> 16); 3717 ahc_outb(ahc, HCNT + 1, resid >> 8); 3718 ahc_outb(ahc, HCNT, resid); 3719 if ((ahc->features & AHC_ULTRA2) == 0) { 3720 ahc_outb(ahc, STCNT + 2, resid >> 16); 3721 ahc_outb(ahc, STCNT + 1, resid >> 8); 3722 ahc_outb(ahc, STCNT, resid); 3723 } 3724 } 3725 3726 /* 3727 * Handle the effects of issuing a bus device reset message. 3728 */ 3729 static void 3730 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3731 cam_status status, const char *message, int verbose_level) 3732 { 3733 #ifdef AHC_TARGET_MODE 3734 struct ahc_tmode_tstate* tstate; 3735 u_int lun; 3736 #endif 3737 int found; 3738 3739 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3740 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3741 status); 3742 3743 #ifdef AHC_TARGET_MODE 3744 /* 3745 * Send an immediate notify ccb to all target mord peripheral 3746 * drivers affected by this action. 3747 */ 3748 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3749 if (tstate != NULL) { 3750 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3751 struct ahc_tmode_lstate* lstate; 3752 3753 lstate = tstate->enabled_luns[lun]; 3754 if (lstate == NULL) 3755 continue; 3756 3757 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3758 MSG_BUS_DEV_RESET, /*arg*/0); 3759 ahc_send_lstate_events(ahc, lstate); 3760 } 3761 } 3762 #endif 3763 3764 /* 3765 * Go back to async/narrow transfers and renegotiate. 3766 */ 3767 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3768 AHC_TRANS_CUR, /*paused*/TRUE); 3769 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3770 /*period*/0, /*offset*/0, /*ppr_options*/0, 3771 AHC_TRANS_CUR, /*paused*/TRUE); 3772 3773 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3774 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3775 3776 if (message != NULL 3777 && (verbose_level <= bootverbose)) 3778 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3779 message, devinfo->channel, devinfo->target, found); 3780 } 3781 3782 #ifdef AHC_TARGET_MODE 3783 static void 3784 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3785 struct scb *scb) 3786 { 3787 3788 /* 3789 * To facilitate adding multiple messages together, 3790 * each routine should increment the index and len 3791 * variables instead of setting them explicitly. 3792 */ 3793 ahc->msgout_index = 0; 3794 ahc->msgout_len = 0; 3795 3796 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3797 ahc_build_transfer_msg(ahc, devinfo); 3798 else 3799 panic("ahc_intr: AWAITING target message with no message"); 3800 3801 ahc->msgout_index = 0; 3802 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3803 } 3804 #endif 3805 3806 int 3807 ahc_softc_init(struct ahc_softc *ahc) 3808 { 3809 3810 /* The IRQMS bit is only valid on VL and EISA chips */ 3811 if ((ahc->chip & AHC_PCI) == 0) 3812 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3813 else 3814 ahc->unpause = 0; 3815 ahc->pause = ahc->unpause | PAUSE; 3816 /* XXX The shared scb data stuff should be deprecated */ 3817 if (ahc->scb_data == NULL) { 3818 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3819 M_DEVBUF, M_NOWAIT); 3820 if (ahc->scb_data == NULL) 3821 return (ENOMEM); 3822 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3823 } 3824 3825 return (0); 3826 } 3827 3828 void 3829 ahc_softc_insert(struct ahc_softc *ahc) 3830 { 3831 struct ahc_softc *list_ahc; 3832 3833 #if AHC_PCI_CONFIG > 0 3834 /* 3835 * Second Function PCI devices need to inherit some 3836 * settings from function 0. 3837 */ 3838 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3839 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3840 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3841 ahc_dev_softc_t list_pci; 3842 ahc_dev_softc_t pci; 3843 3844 list_pci = list_ahc->dev_softc; 3845 pci = ahc->dev_softc; 3846 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3847 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3848 struct ahc_softc *master; 3849 struct ahc_softc *slave; 3850 3851 if (ahc_get_pci_function(list_pci) == 0) { 3852 master = list_ahc; 3853 slave = ahc; 3854 } else { 3855 master = ahc; 3856 slave = list_ahc; 3857 } 3858 slave->flags &= ~AHC_BIOS_ENABLED; 3859 slave->flags |= 3860 master->flags & AHC_BIOS_ENABLED; 3861 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3862 slave->flags |= 3863 master->flags & AHC_PRIMARY_CHANNEL; 3864 break; 3865 } 3866 } 3867 } 3868 #endif 3869 3870 /* 3871 * Insertion sort into our list of softcs. 3872 */ 3873 list_ahc = TAILQ_FIRST(&ahc_tailq); 3874 while (list_ahc != NULL 3875 && ahc_softc_comp(list_ahc, ahc) <= 0) 3876 list_ahc = TAILQ_NEXT(list_ahc, links); 3877 if (list_ahc != NULL) 3878 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3879 else 3880 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3881 ahc->init_level++; 3882 } 3883 3884 /* 3885 * Verify that the passed in softc pointer is for a 3886 * controller that is still configured. 3887 */ 3888 struct ahc_softc * 3889 ahc_find_softc(struct ahc_softc *ahc) 3890 { 3891 struct ahc_softc *list_ahc; 3892 3893 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3894 if (list_ahc == ahc) 3895 return (ahc); 3896 } 3897 return (NULL); 3898 } 3899 3900 void 3901 ahc_set_unit(struct ahc_softc *ahc, int unit) 3902 { 3903 ahc->unit = unit; 3904 } 3905 3906 void 3907 ahc_set_name(struct ahc_softc *ahc, const char *name) 3908 { 3909 ahc->name = name; 3910 } 3911 3912 void 3913 ahc_free(struct ahc_softc *ahc) 3914 { 3915 int i; 3916 3917 ahc_fini_scbdata(ahc); 3918 switch (ahc->init_level) { 3919 default: 3920 case 2: 3921 ahc_shutdown(ahc); 3922 /* TAILQ_REMOVE(&ahc_tailq, ahc, links); XXX */ 3923 /* FALLTHROUGH */ 3924 case 1: 3925 bus_dmamap_unload(ahc->parent_dmat, ahc->shared_data_dmamap); 3926 bus_dmamap_destroy(ahc->parent_dmat, ahc->shared_data_dmamap); 3927 bus_dmamem_unmap(ahc->parent_dmat, (void *)ahc->qoutfifo, 3928 ahc->shared_data_size); 3929 bus_dmamem_free(ahc->parent_dmat, &ahc->shared_data_seg, 3930 ahc->shared_data_nseg); 3931 break; 3932 case 0: 3933 break; 3934 } 3935 3936 ahc_platform_free(ahc); 3937 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3938 struct ahc_tmode_tstate *tstate; 3939 3940 tstate = ahc->enabled_targets[i]; 3941 if (tstate != NULL) { 3942 #if AHC_TARGET_MODE 3943 int j; 3944 3945 for (j = 0; j < AHC_NUM_LUNS; j++) { 3946 struct ahc_tmode_lstate *lstate; 3947 3948 lstate = tstate->enabled_luns[j]; 3949 if (lstate != NULL) { 3950 /*xpt_free_path(lstate->path);*/ 3951 free(lstate, M_DEVBUF); 3952 } 3953 } 3954 #endif 3955 free(tstate, M_DEVBUF); 3956 } 3957 } 3958 #if AHC_TARGET_MODE 3959 if (ahc->black_hole != NULL) { 3960 /*xpt_free_path(ahc->black_hole->path);*/ 3961 free(ahc->black_hole, M_DEVBUF); 3962 } 3963 #endif 3964 #ifndef __NetBSD__ 3965 if (ahc->name != NULL) 3966 free(ahc->name, M_DEVBUF); 3967 #endif 3968 if (ahc->seep_config != NULL) 3969 free(ahc->seep_config, M_DEVBUF); 3970 #if !defined(__FreeBSD__) && !defined(__NetBSD__) 3971 free(ahc, M_DEVBUF); 3972 #endif 3973 return; 3974 } 3975 3976 void 3977 ahc_shutdown(void *arg) 3978 { 3979 struct ahc_softc *ahc; 3980 int i; 3981 3982 ahc = arg; 3983 3984 /* This will reset most registers to 0, but not all */ 3985 ahc_reset(ahc); 3986 ahc_outb(ahc, SCSISEQ, 0); 3987 ahc_outb(ahc, SXFRCTL0, 0); 3988 ahc_outb(ahc, DSPCISTATUS, 0); 3989 3990 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3991 ahc_outb(ahc, i, 0); 3992 } 3993 3994 /* 3995 * Reset the controller and record some information about it 3996 * that is only available just after a reset. 3997 */ 3998 int 3999 ahc_reset(struct ahc_softc *ahc) 4000 { 4001 u_int sblkctl; 4002 u_int sxfrctl1_a, sxfrctl1_b; 4003 int wait; 4004 4005 /* 4006 * Preserve the value of the SXFRCTL1 register for all channels. 4007 * It contains settings that affect termination and we don't want 4008 * to disturb the integrity of the bus. 4009 */ 4010 ahc_pause(ahc); 4011 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { 4012 /* 4013 * The chip has not been initialized since 4014 * PCI/EISA/VLB bus reset. Don't trust 4015 * "left over BIOS data". 4016 */ 4017 ahc->flags |= AHC_NO_BIOS_INIT; 4018 } 4019 sxfrctl1_b = 0; 4020 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4021 u_int sblkctl1; 4022 4023 /* 4024 * Save channel B's settings in case this chip 4025 * is setup for TWIN channel operation. 4026 */ 4027 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4028 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4029 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4030 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4031 } 4032 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4033 4034 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4035 4036 /* 4037 * Ensure that the reset has finished. We delay 1000us 4038 * prior to reading the register to make sure the chip 4039 * has sufficiently completed its reset to handle register 4040 * accesses. 4041 */ 4042 wait = 1000; 4043 do { 4044 ahc_delay(1000); 4045 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4046 4047 if (wait == 0) { 4048 printf("%s: WARNING - Failed chip reset! " 4049 "Trying to initialize anyway.\n", ahc_name(ahc)); 4050 } 4051 ahc_outb(ahc, HCNTRL, ahc->pause); 4052 4053 /* Determine channel configuration */ 4054 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4055 /* No Twin Channel PCI cards */ 4056 if ((ahc->chip & AHC_PCI) != 0) 4057 sblkctl &= ~SELBUSB; 4058 switch (sblkctl) { 4059 case 0: 4060 /* Single Narrow Channel */ 4061 break; 4062 case 2: 4063 /* Wide Channel */ 4064 ahc->features |= AHC_WIDE; 4065 break; 4066 case 8: 4067 /* Twin Channel */ 4068 ahc->features |= AHC_TWIN; 4069 break; 4070 default: 4071 printf(" Unsupported adapter type (0x%x). Ignoring\n", 4072 sblkctl); 4073 return(-1); 4074 } 4075 4076 /* 4077 * Reload sxfrctl1. 4078 * 4079 * We must always initialize STPWEN to 1 before we 4080 * restore the saved values. STPWEN is initialized 4081 * to a tri-state condition which can only be cleared 4082 * by turning it on. 4083 */ 4084 if ((ahc->features & AHC_TWIN) != 0) { 4085 u_int sblkctl1; 4086 4087 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4088 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4089 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4090 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4091 } 4092 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4093 4094 #ifdef AHC_DUMP_SEQ 4095 if (ahc->init_level == 0) 4096 ahc_dumpseq(ahc); 4097 #endif 4098 4099 return (0); 4100 } 4101 4102 /* 4103 * Determine the number of SCBs available on the controller 4104 */ 4105 int 4106 ahc_probe_scbs(struct ahc_softc *ahc) { 4107 int i; 4108 4109 for (i = 0; i < AHC_SCB_MAX; i++) { 4110 4111 ahc_outb(ahc, SCBPTR, i); 4112 ahc_outb(ahc, SCB_BASE, i); 4113 if (ahc_inb(ahc, SCB_BASE) != i) 4114 break; 4115 ahc_outb(ahc, SCBPTR, 0); 4116 if (ahc_inb(ahc, SCB_BASE) != 0) 4117 break; 4118 } 4119 return (i); 4120 } 4121 4122 #if 0 4123 static void 4124 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4125 { 4126 bus_addr_t *baddr; 4127 4128 baddr = (bus_addr_t *)arg; 4129 *baddr = segs->ds_addr; 4130 } 4131 #endif 4132 4133 static void 4134 ahc_build_free_scb_list(struct ahc_softc *ahc) 4135 { 4136 int scbsize; 4137 int i; 4138 4139 scbsize = 32; 4140 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4141 scbsize = 64; 4142 4143 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4144 int j; 4145 4146 ahc_outb(ahc, SCBPTR, i); 4147 4148 /* 4149 * Touch all SCB bytes to avoid parity errors 4150 * should one of our debugging routines read 4151 * an otherwise uninitiatlized byte. 4152 */ 4153 for (j = 0; j < scbsize; j++) 4154 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4155 4156 /* Clear the control byte. */ 4157 ahc_outb(ahc, SCB_CONTROL, 0); 4158 4159 /* Set the next pointer */ 4160 if ((ahc->flags & AHC_PAGESCBS) != 0) 4161 ahc_outb(ahc, SCB_NEXT, i+1); 4162 else 4163 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4164 4165 /* Make the tag number, SCSIID, and lun invalid */ 4166 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4167 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4168 ahc_outb(ahc, SCB_LUN, 0xFF); 4169 } 4170 4171 /* Make sure that the last SCB terminates the free list */ 4172 ahc_outb(ahc, SCBPTR, i-1); 4173 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4174 } 4175 4176 static int 4177 ahc_init_scbdata(struct ahc_softc *ahc) 4178 { 4179 struct scb_data *scb_data; 4180 4181 scb_data = ahc->scb_data; 4182 SLIST_INIT(&scb_data->free_scbs); 4183 SLIST_INIT(&scb_data->sg_maps); 4184 4185 /* Allocate SCB resources */ 4186 scb_data->scbarray = malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4187 M_DEVBUF, M_NOWAIT); 4188 if (scb_data->scbarray == NULL) 4189 return (ENOMEM); 4190 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4191 4192 /* Determine the number of hardware SCBs and initialize them */ 4193 4194 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4195 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4196 /* SCB 0 heads the free list */ 4197 ahc_outb(ahc, FREE_SCBH, 0); 4198 } else { 4199 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4200 } 4201 4202 if (ahc->scb_data->maxhscbs == 0) { 4203 printf("%s: No SCB space found\n", ahc_name(ahc)); 4204 return (ENXIO); 4205 } 4206 4207 ahc_build_free_scb_list(ahc); 4208 4209 /* 4210 * Create our DMA tags. These tags define the kinds of device 4211 * accessible memory allocations and memory mappings we will 4212 * need to perform during normal operation. 4213 * 4214 * Unless we need to further restrict the allocation, we rely 4215 * on the restrictions of the parent dmat, hence the common 4216 * use of MAXADDR and MAXSIZE. 4217 */ 4218 4219 if (ahc_createdmamem(ahc->parent_dmat, 4220 AHC_SCB_MAX * sizeof(struct hardware_scb), ahc->sc_dmaflags, 4221 &scb_data->hscb_dmamap, 4222 (void **)&scb_data->hscbs, &scb_data->hscb_busaddr, 4223 &scb_data->hscb_seg, &scb_data->hscb_nseg, ahc_name(ahc), 4224 "hardware SCB structures") < 0) 4225 goto error_exit; 4226 4227 scb_data->init_level++; 4228 4229 if (ahc_createdmamem(ahc->parent_dmat, 4230 AHC_SCB_MAX * sizeof(struct scsi_sense_data), ahc->sc_dmaflags, 4231 &scb_data->sense_dmamap, (void **)&scb_data->sense, 4232 &scb_data->sense_busaddr, &scb_data->sense_seg, 4233 &scb_data->sense_nseg, ahc_name(ahc), "sense buffers") < 0) 4234 goto error_exit; 4235 4236 scb_data->init_level++; 4237 4238 /* Perform initial CCB allocation */ 4239 memset(scb_data->hscbs, 0, 4240 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4241 ahc_alloc_scbs(ahc); 4242 scb_data->init_level++; 4243 4244 if (scb_data->numscbs == 0) { 4245 printf("%s: ahc_init_scbdata - " 4246 "Unable to allocate initial scbs\n", 4247 ahc_name(ahc)); 4248 goto error_exit; 4249 } 4250 4251 /* 4252 * Tell the sequencer which SCB will be the next one it receives. 4253 */ 4254 ahc->next_queued_scb = ahc_get_scb(ahc); 4255 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4256 4257 /* 4258 * Note that we were successfull 4259 */ 4260 return (0); 4261 4262 error_exit: 4263 4264 return (ENOMEM); 4265 } 4266 4267 static void 4268 ahc_fini_scbdata(struct ahc_softc *ahc) 4269 { 4270 struct scb_data *scb_data; 4271 4272 scb_data = ahc->scb_data; 4273 if (scb_data == NULL) 4274 return; 4275 4276 switch (scb_data->init_level) { 4277 default: 4278 case 5: 4279 { 4280 struct sg_map_node *sg_map; 4281 4282 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4283 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4284 ahc_freedmamem(ahc->parent_dmat, PAGE_SIZE, 4285 sg_map->sg_dmamap, (void *)sg_map->sg_vaddr, 4286 &sg_map->sg_dmasegs, sg_map->sg_nseg); 4287 free(sg_map, M_DEVBUF); 4288 } 4289 } 4290 /*FALLTHROUGH*/ 4291 case 4: 4292 ahc_freedmamem(ahc->parent_dmat, 4293 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 4294 scb_data->sense_dmamap, (void *)scb_data->sense, 4295 &scb_data->sense_seg, scb_data->sense_nseg); 4296 /*FALLTHROUGH*/ 4297 case 3: 4298 ahc_freedmamem(ahc->parent_dmat, 4299 AHC_SCB_MAX * sizeof(struct hardware_scb), 4300 scb_data->hscb_dmamap, (void *)scb_data->hscbs, 4301 &scb_data->hscb_seg, scb_data->hscb_nseg); 4302 /*FALLTHROUGH*/ 4303 case 2: 4304 case 1: 4305 case 0: 4306 break; 4307 } 4308 if (scb_data->scbarray != NULL) 4309 free(scb_data->scbarray, M_DEVBUF); 4310 } 4311 4312 int 4313 ahc_alloc_scbs(struct ahc_softc *ahc) 4314 { 4315 struct scb_data *scb_data; 4316 struct scb *next_scb; 4317 struct sg_map_node *sg_map; 4318 bus_addr_t physaddr; 4319 struct ahc_dma_seg *segs; 4320 int newcount; 4321 int i; 4322 4323 scb_data = ahc->scb_data; 4324 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4325 /* Can't allocate any more */ 4326 return (0); 4327 4328 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4329 4330 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_WAITOK); 4331 4332 if (sg_map == NULL) 4333 return (0); 4334 4335 /* Allocate S/G space for the next batch of SCBS */ 4336 if (ahc_createdmamem(ahc->parent_dmat, PAGE_SIZE, ahc->sc_dmaflags, 4337 &sg_map->sg_dmamap, 4338 (void **)&sg_map->sg_vaddr, &sg_map->sg_physaddr, 4339 &sg_map->sg_dmasegs, &sg_map->sg_nseg, 4340 ahc_name(ahc), 4341 "SG space") < 0) { 4342 free(sg_map, M_DEVBUF); 4343 return (0); 4344 } 4345 4346 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4347 4348 segs = sg_map->sg_vaddr; 4349 physaddr = sg_map->sg_physaddr; 4350 4351 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4352 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4353 for (i = 0; i < newcount; i++) { 4354 struct scb_platform_data *pdata; 4355 int error; 4356 4357 pdata = malloc(sizeof(*pdata), M_DEVBUF, M_WAITOK); 4358 if (pdata == NULL) 4359 break; 4360 next_scb->platform_data = pdata; 4361 next_scb->sg_map = sg_map; 4362 next_scb->sg_list = segs; 4363 /* 4364 * The sequencer always starts with the second entry. 4365 * The first entry is embedded in the scb. 4366 */ 4367 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4368 next_scb->ahc_softc = ahc; 4369 next_scb->flags = SCB_FREE; 4370 4371 error = bus_dmamap_create(ahc->parent_dmat, 4372 AHC_MAXTRANSFER_SIZE, AHC_NSEG, MAXPHYS, 0, 4373 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW|ahc->sc_dmaflags, 4374 &next_scb->dmamap); 4375 if (error != 0) { 4376 free(pdata, M_DEVBUF); 4377 break; 4378 } 4379 4380 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4381 next_scb->hscb->tag = ahc->scb_data->numscbs; 4382 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4383 next_scb, links.sle); 4384 segs += AHC_NSEG; 4385 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4386 next_scb++; 4387 ahc->scb_data->numscbs++; 4388 } 4389 return (newcount); 4390 } 4391 4392 void 4393 ahc_controller_info(struct ahc_softc *ahc, char *tbuf, size_t l) 4394 { 4395 size_t len; 4396 4397 len = snprintf(tbuf, l, "%s: ", 4398 ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4399 if (len > l) 4400 return; 4401 if ((ahc->features & AHC_TWIN) != 0) 4402 len += snprintf(tbuf + len, l - len, 4403 "Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ", 4404 ahc->our_id, ahc->our_id_b, 4405 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4406 else { 4407 const char *speed; 4408 const char *type; 4409 4410 speed = ""; 4411 if ((ahc->features & AHC_ULTRA) != 0) { 4412 speed = "Ultra "; 4413 } else if ((ahc->features & AHC_DT) != 0) { 4414 speed = "Ultra160 "; 4415 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4416 speed = "Ultra2 "; 4417 } 4418 if ((ahc->features & AHC_WIDE) != 0) { 4419 type = "Wide"; 4420 } else { 4421 type = "Single"; 4422 } 4423 len += snprintf(tbuf + len, l - len, "%s%s Channel %c, SCSI Id=%d, ", 4424 speed, type, ahc->channel, ahc->our_id); 4425 } 4426 if (len > l) 4427 return; 4428 4429 if ((ahc->flags & AHC_PAGESCBS) != 0) 4430 snprintf(tbuf + len, l - len, "%d/%d SCBs", 4431 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4432 else 4433 snprintf(tbuf + len, l - len, "%d SCBs", ahc->scb_data->maxhscbs); 4434 } 4435 4436 /* 4437 * Start the board, ready for normal operation 4438 */ 4439 int 4440 ahc_init(struct ahc_softc *ahc) 4441 { 4442 int max_targ; 4443 int i; 4444 int term; 4445 u_int scsi_conf; 4446 u_int scsiseq_template; 4447 u_int ultraenb; 4448 u_int discenable; 4449 u_int tagenable; 4450 size_t driver_data_size; 4451 uint32_t physaddr; 4452 4453 #ifdef AHC_DEBUG 4454 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4455 ahc->flags |= AHC_SEQUENCER_DEBUG; 4456 #endif 4457 4458 #ifdef AHC_PRINT_SRAM 4459 printf("Scratch Ram:"); 4460 for (i = 0x20; i < 0x5f; i++) { 4461 if (((i % 8) == 0) && (i != 0)) { 4462 printf ("\n "); 4463 } 4464 printf (" 0x%x", ahc_inb(ahc, i)); 4465 } 4466 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4467 for (i = 0x70; i < 0x7f; i++) { 4468 if (((i % 8) == 0) && (i != 0)) { 4469 printf ("\n "); 4470 } 4471 printf (" 0x%x", ahc_inb(ahc, i)); 4472 } 4473 } 4474 printf ("\n"); 4475 /* 4476 * Reading uninitialized scratch ram may 4477 * generate parity errors. 4478 */ 4479 ahc_outb(ahc, CLRINT, CLRPARERR); 4480 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4481 #endif 4482 max_targ = 15; 4483 4484 /* 4485 * Assume we have a board at this stage and it has been reset. 4486 */ 4487 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4488 ahc->our_id = ahc->our_id_b = 7; 4489 4490 /* 4491 * Default to allowing initiator operations. 4492 */ 4493 ahc->flags |= AHC_INITIATORROLE; 4494 4495 /* 4496 * Only allow target mode features if this unit has them enabled. 4497 */ 4498 //if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4499 ahc->features &= ~AHC_TARGETMODE; 4500 4501 /* 4502 * DMA tag for our command fifos and other data in system memory 4503 * the card's sequencer must be able to access. For initiator 4504 * roles, we need to allocate space for the qinfifo and qoutfifo. 4505 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4506 * When providing for the target mode role, we must additionally 4507 * provide space for the incoming target command fifo and an extra 4508 * byte to deal with a DMA bug in some chip versions. 4509 */ 4510 driver_data_size = 2 * 256 * sizeof(uint8_t); 4511 if ((ahc->features & AHC_TARGETMODE) != 0) 4512 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4513 + /*DMA WideOdd Bug Buffer*/1; 4514 4515 if (ahc_createdmamem(ahc->parent_dmat, driver_data_size, 4516 ahc->sc_dmaflags, 4517 &ahc->shared_data_dmamap, (void **)&ahc->qoutfifo, 4518 &ahc->shared_data_busaddr, &ahc->shared_data_seg, 4519 &ahc->shared_data_nseg, ahc_name(ahc), 4520 "shared data") < 0) 4521 return (ENOMEM); 4522 4523 ahc->init_level++; 4524 4525 if ((ahc->features & AHC_TARGETMODE) != 0) { 4526 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4527 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4528 ahc->dma_bug_buf = ahc->shared_data_busaddr 4529 + driver_data_size - 1; 4530 /* All target command blocks start out invalid. */ 4531 for (i = 0; i < AHC_TMODE_CMDS; i++) 4532 ahc->targetcmds[i].cmd_valid = 0; 4533 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4534 ahc->tqinfifonext = 1; 4535 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4536 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4537 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4538 } 4539 ahc->qinfifo = &ahc->qoutfifo[256]; 4540 4541 ahc->init_level++; 4542 4543 /* Allocate SCB data now that buffer_dmat is initialized */ 4544 if (ahc->scb_data->maxhscbs == 0) 4545 if (ahc_init_scbdata(ahc) != 0) 4546 return (ENOMEM); 4547 4548 if (bootverbose) 4549 printf("%s: found %d SCBs\n", ahc_name(ahc), 4550 ahc->scb_data->maxhscbs); 4551 4552 /* 4553 * Allocate a tstate to house information for our 4554 * initiator presence on the bus as well as the user 4555 * data for any target mode initiator. 4556 */ 4557 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4558 printf("%s: unable to allocate ahc_tmode_tstate. " 4559 "Failing attach\n", ahc_name(ahc)); 4560 return (ENOMEM); 4561 } 4562 4563 if ((ahc->features & AHC_TWIN) != 0) { 4564 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4565 printf("%s: unable to allocate ahc_tmode_tstate. " 4566 "Failing attach\n", ahc_name(ahc)); 4567 return (ENOMEM); 4568 } 4569 } 4570 4571 ahc_outb(ahc, SEQ_FLAGS, 0); 4572 ahc_outb(ahc, SEQ_FLAGS2, 0); 4573 4574 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4575 ahc->flags |= AHC_PAGESCBS; 4576 } else { 4577 ahc->flags &= ~AHC_PAGESCBS; 4578 } 4579 4580 #ifdef AHC_DEBUG 4581 if (ahc_debug & AHC_SHOW_MISC) { 4582 printf("%s: hardware scb %lu bytes; kernel scb %lu bytes; " 4583 "ahc_dma %lu bytes\n", 4584 ahc_name(ahc), 4585 (u_long)sizeof(struct hardware_scb), 4586 (u_long)sizeof(struct scb), 4587 (u_long)sizeof(struct ahc_dma_seg)); 4588 } 4589 #endif /* AHC_DEBUG */ 4590 4591 /* 4592 * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels 4593 */ 4594 if (ahc->features & AHC_TWIN) { 4595 4596 /* 4597 * The device is gated to channel B after a chip reset, 4598 * so set those values first 4599 */ 4600 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4601 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4602 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4603 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4604 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4605 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4606 if ((ahc->features & AHC_ULTRA2) != 0) 4607 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4608 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4609 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4610 4611 if ((scsi_conf & RESET_SCSI) != 0 4612 && (ahc->flags & AHC_INITIATORROLE) != 0) 4613 ahc->flags |= AHC_RESET_BUS_B; 4614 4615 /* Select Channel A */ 4616 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4617 } 4618 4619 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4620 if ((ahc->features & AHC_ULTRA2) != 0) 4621 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4622 else 4623 ahc_outb(ahc, SCSIID, ahc->our_id); 4624 scsi_conf = ahc_inb(ahc, SCSICONF); 4625 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4626 |term|ahc->seltime 4627 |ENSTIMER|ACTNEGEN); 4628 if ((ahc->features & AHC_ULTRA2) != 0) 4629 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4630 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4631 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4632 4633 if ((scsi_conf & RESET_SCSI) != 0 4634 && (ahc->flags & AHC_INITIATORROLE) != 0) 4635 ahc->flags |= AHC_RESET_BUS_A; 4636 4637 /* 4638 * Look at the information that board initialization or 4639 * the board bios has left us. 4640 */ 4641 ultraenb = 0; 4642 tagenable = ALL_TARGETS_MASK; 4643 4644 /* Grab the disconnection disable table and invert it for our needs */ 4645 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4646 printf("%s: Host Adapter BIOS disabled. Using default SCSI " 4647 "host and target device parameters\n", ahc_name(ahc)); 4648 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4649 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4650 discenable = ALL_TARGETS_MASK; 4651 if ((ahc->features & AHC_ULTRA) != 0) 4652 ultraenb = ALL_TARGETS_MASK; 4653 } else if ((ahc->flags & AHC_USETARGETDEFAULTS) != 0) { 4654 printf("%s: Host Adapter has no SEEPROM. Using default SCSI" 4655 " target parameters\n", ahc_name(ahc)); 4656 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B; 4657 discenable = ALL_TARGETS_MASK; 4658 if ((ahc->features & AHC_ULTRA) != 0) 4659 ultraenb = ALL_TARGETS_MASK; 4660 } else { 4661 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4662 | ahc_inb(ahc, DISC_DSB)); 4663 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4664 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4665 | ahc_inb(ahc, ULTRA_ENB); 4666 } 4667 4668 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4669 max_targ = 7; 4670 4671 for (i = 0; i <= max_targ; i++) { 4672 struct ahc_initiator_tinfo *tinfo; 4673 struct ahc_tmode_tstate *tstate; 4674 u_int our_id; 4675 u_int target_id; 4676 char channel; 4677 4678 channel = 'A'; 4679 our_id = ahc->our_id; 4680 target_id = i; 4681 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4682 channel = 'B'; 4683 our_id = ahc->our_id_b; 4684 target_id = i % 8; 4685 } 4686 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4687 target_id, &tstate); 4688 /* Default to async narrow across the board */ 4689 memset(tinfo, 0, sizeof(*tinfo)); 4690 if (ahc->flags & (AHC_USEDEFAULTS | AHC_USETARGETDEFAULTS)) { 4691 if ((ahc->features & AHC_WIDE) != 0) 4692 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4693 4694 /* 4695 * These will be truncated when we determine the 4696 * connection type we have with the target. 4697 */ 4698 tinfo->user.period = ahc_syncrates->period; 4699 tinfo->user.offset = ~0; 4700 } else { 4701 u_int scsirate; 4702 uint16_t mask; 4703 4704 /* Take the settings leftover in scratch RAM. */ 4705 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4706 mask = (0x01 << i); 4707 if ((ahc->features & AHC_ULTRA2) != 0) { 4708 u_int offset; 4709 u_int maxsync; 4710 4711 if ((scsirate & SOFS) == 0x0F) { 4712 /* 4713 * Haven't negotiated yet, 4714 * so the format is different. 4715 */ 4716 scsirate = (scsirate & SXFR) >> 4 4717 | (ultraenb & mask) 4718 ? 0x08 : 0x0 4719 | (scsirate & WIDEXFER); 4720 offset = MAX_OFFSET_ULTRA2; 4721 } else 4722 offset = ahc_inb(ahc, TARG_OFFSET + i); 4723 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4724 /* Set to the lowest sync rate, 5MHz */ 4725 scsirate |= 0x1c; 4726 maxsync = AHC_SYNCRATE_ULTRA2; 4727 if ((ahc->features & AHC_DT) != 0) 4728 maxsync = AHC_SYNCRATE_DT; 4729 tinfo->user.period = 4730 ahc_find_period(ahc, scsirate, maxsync); 4731 if (offset == 0) 4732 tinfo->user.period = 0; 4733 else 4734 tinfo->user.offset = ~0; 4735 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4736 && (ahc->features & AHC_DT) != 0) 4737 tinfo->user.ppr_options = 4738 MSG_EXT_PPR_DT_REQ; 4739 } else if ((scsirate & SOFS) != 0) { 4740 if ((scsirate & SXFR) == 0x40 4741 && (ultraenb & mask) != 0) { 4742 /* Treat 10MHz as a non-ultra speed */ 4743 scsirate &= ~SXFR; 4744 ultraenb &= ~mask; 4745 } 4746 tinfo->user.period = 4747 ahc_find_period(ahc, scsirate, 4748 (ultraenb & mask) 4749 ? AHC_SYNCRATE_ULTRA 4750 : AHC_SYNCRATE_FAST); 4751 if (tinfo->user.period != 0) 4752 tinfo->user.offset = ~0; 4753 } 4754 if (tinfo->user.period == 0) 4755 tinfo->user.offset = 0; 4756 if ((scsirate & WIDEXFER) != 0 4757 && (ahc->features & AHC_WIDE) != 0) 4758 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4759 tinfo->user.protocol_version = 4; 4760 if ((ahc->features & AHC_DT) != 0) 4761 tinfo->user.transport_version = 3; 4762 else 4763 tinfo->user.transport_version = 2; 4764 tinfo->goal.protocol_version = 2; 4765 tinfo->goal.transport_version = 2; 4766 tinfo->curr.protocol_version = 2; 4767 tinfo->curr.transport_version = 2; 4768 } 4769 tstate->ultraenb = 0; 4770 tstate->discenable = discenable; 4771 } 4772 ahc->user_discenable = discenable; 4773 ahc->user_tagenable = tagenable; 4774 4775 /* There are no untagged SCBs active yet. */ 4776 for (i = 0; i < 16; i++) { 4777 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4778 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4779 int lun; 4780 4781 /* 4782 * The SCB based BTT allows an entry per 4783 * target and lun pair. 4784 */ 4785 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4786 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4787 } 4788 } 4789 4790 /* All of our queues are empty */ 4791 for (i = 0; i < 256; i++) 4792 ahc->qoutfifo[i] = SCB_LIST_NULL; 4793 4794 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4795 4796 for (i = 0; i < 256; i++) 4797 ahc->qinfifo[i] = SCB_LIST_NULL; 4798 4799 if ((ahc->features & AHC_MULTI_TID) != 0) { 4800 ahc_outb(ahc, TARGID, 0); 4801 ahc_outb(ahc, TARGID + 1, 0); 4802 } 4803 4804 /* 4805 * Tell the sequencer where it can find our arrays in memory. 4806 */ 4807 physaddr = ahc->scb_data->hscb_busaddr; 4808 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4809 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4810 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4811 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4812 4813 physaddr = ahc->shared_data_busaddr; 4814 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4815 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4816 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4817 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4818 4819 /* 4820 * Initialize the group code to command length table. 4821 * This overrides the values in TARG_SCSIRATE, so only 4822 * setup the table after we have processed that information. 4823 */ 4824 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4825 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4826 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4827 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4828 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4829 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4830 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4831 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4832 4833 /* Tell the sequencer of our initial queue positions */ 4834 ahc_outb(ahc, KERNEL_QINPOS, 0); 4835 ahc_outb(ahc, QINPOS, 0); 4836 ahc_outb(ahc, QOUTPOS, 0); 4837 4838 /* 4839 * Use the built in queue management registers 4840 * if they are available. 4841 */ 4842 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4843 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4844 ahc_outb(ahc, SDSCB_QOFF, 0); 4845 ahc_outb(ahc, SNSCB_QOFF, 0); 4846 ahc_outb(ahc, HNSCB_QOFF, 0); 4847 } 4848 4849 4850 /* We don't have any waiting selections */ 4851 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4852 4853 /* Our disconnection list is empty too */ 4854 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4855 4856 /* Message out buffer starts empty */ 4857 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4858 4859 /* 4860 * Setup the allowed SCSI Sequences based on operational mode. 4861 * If we are a target, we'll enable select in operations once 4862 * we've had a lun enabled. 4863 */ 4864 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4865 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4866 scsiseq_template |= ENRSELI; 4867 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4868 4869 /* 4870 * Load the Sequencer program and Enable the adapter 4871 * in "fast" mode. 4872 */ 4873 if (bootverbose) 4874 printf("%s: Downloading Sequencer Program...", 4875 ahc_name(ahc)); 4876 4877 ahc_loadseq(ahc); 4878 4879 if ((ahc->features & AHC_ULTRA2) != 0) { 4880 int wait; 4881 4882 /* 4883 * Wait for up to 500ms for our transceivers 4884 * to settle. If the adapter does not have 4885 * a cable attached, the transceivers may 4886 * never settle, so don't complain if we 4887 * fail here. 4888 */ 4889 ahc_pause(ahc); 4890 for (wait = 5000; 4891 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4892 wait--) 4893 ahc_delay(100); 4894 ahc_unpause(ahc); 4895 } 4896 4897 return (0); 4898 } 4899 4900 void 4901 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4902 { 4903 u_int hcntrl; 4904 4905 hcntrl = ahc_inb(ahc, HCNTRL); 4906 hcntrl &= ~INTEN; 4907 ahc->pause &= ~INTEN; 4908 ahc->unpause &= ~INTEN; 4909 if (enable) { 4910 hcntrl |= INTEN; 4911 ahc->pause |= INTEN; 4912 ahc->unpause |= INTEN; 4913 } 4914 ahc_outb(ahc, HCNTRL, hcntrl); 4915 } 4916 4917 /* 4918 * Ensure that the card is paused in a location 4919 * outside of all critical sections and that all 4920 * pending work is completed prior to returning. 4921 * This routine should only be called from outside 4922 * an interrupt context. 4923 */ 4924 void 4925 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4926 { 4927 int intstat; 4928 int maxloops; 4929 int paused; 4930 4931 maxloops = 1000; 4932 ahc->flags |= AHC_ALL_INTERRUPTS; 4933 intstat = 0; 4934 paused = FALSE; 4935 do { 4936 if (paused) 4937 ahc_unpause(ahc); 4938 ahc_intr(ahc); 4939 ahc_pause(ahc); 4940 paused = TRUE; 4941 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 4942 ahc_clear_critical_section(ahc); 4943 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4944 break; 4945 } while (--maxloops 4946 && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0 4947 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)))); 4948 if (maxloops == 0) { 4949 printf("Infinite interrupt loop, INTSTAT = %x", 4950 ahc_inb(ahc, INTSTAT)); 4951 } 4952 ahc_platform_flushwork(ahc); 4953 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4954 } 4955 4956 int 4957 ahc_suspend(struct ahc_softc *ahc) 4958 { 4959 uint8_t *ptr; 4960 int i; 4961 4962 ahc_pause_and_flushwork(ahc); 4963 4964 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4965 return (EBUSY); 4966 4967 #if AHC_TARGET_MODE 4968 /* 4969 * XXX What about ATIOs that have not yet been serviced? 4970 * Perhaps we should just refuse to be suspended if we 4971 * are acting in a target role. 4972 */ 4973 if (ahc->pending_device != NULL) 4974 return (EBUSY); 4975 #endif 4976 4977 /* Save volatile registers */ 4978 if ((ahc->features & AHC_TWIN) != 0) { 4979 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4980 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4981 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4982 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4983 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4984 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4985 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4986 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4987 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4988 } 4989 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4990 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4991 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4992 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4993 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4994 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4995 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4996 4997 if ((ahc->chip & AHC_PCI) != 0) { 4998 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4999 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 5000 } 5001 5002 if ((ahc->features & AHC_DT) != 0) { 5003 u_int sfunct; 5004 5005 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5006 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5007 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 5008 ahc_outb(ahc, SFUNCT, sfunct); 5009 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 5010 } 5011 5012 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5013 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 5014 5015 if ((ahc->features & AHC_ULTRA2) != 0) 5016 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 5017 5018 ptr = ahc->suspend_state.scratch_ram; 5019 for (i = 0; i < 64; i++) 5020 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 5021 5022 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5023 for (i = 0; i < 16; i++) 5024 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 5025 } 5026 5027 ptr = ahc->suspend_state.btt; 5028 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5029 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5030 int j; 5031 5032 for (j = 0;j < AHC_NUM_LUNS; j++) { 5033 u_int tcl; 5034 5035 tcl = BUILD_TCL(i << 4, j); 5036 *ptr = ahc_index_busy_tcl(ahc, tcl); 5037 } 5038 } 5039 } 5040 ahc_shutdown(ahc); 5041 return (0); 5042 } 5043 5044 int 5045 ahc_resume(struct ahc_softc *ahc) 5046 { 5047 uint8_t *ptr; 5048 int i; 5049 5050 ahc_reset(ahc); 5051 5052 ahc_build_free_scb_list(ahc); 5053 5054 /* Restore volatile registers */ 5055 if ((ahc->features & AHC_TWIN) != 0) { 5056 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 5057 ahc_outb(ahc, SCSIID, ahc->our_id); 5058 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 5059 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 5060 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 5061 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 5062 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 5063 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 5064 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 5065 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 5066 } 5067 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 5068 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 5069 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 5070 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 5071 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 5072 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 5073 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 5074 if ((ahc->features & AHC_ULTRA2) != 0) 5075 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 5076 else 5077 ahc_outb(ahc, SCSIID, ahc->our_id); 5078 5079 if ((ahc->chip & AHC_PCI) != 0) { 5080 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 5081 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 5082 } 5083 5084 if ((ahc->features & AHC_DT) != 0) { 5085 u_int sfunct; 5086 5087 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5088 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5089 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 5090 ahc_outb(ahc, SFUNCT, sfunct); 5091 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 5092 } 5093 5094 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5095 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 5096 5097 if ((ahc->features & AHC_ULTRA2) != 0) 5098 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 5099 5100 ptr = ahc->suspend_state.scratch_ram; 5101 for (i = 0; i < 64; i++) 5102 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 5103 5104 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5105 for (i = 0; i < 16; i++) 5106 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 5107 } 5108 5109 ptr = ahc->suspend_state.btt; 5110 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5111 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5112 int j; 5113 5114 for (j = 0;j < AHC_NUM_LUNS; j++) { 5115 u_int tcl; 5116 5117 tcl = BUILD_TCL(i << 4, j); 5118 ahc_busy_tcl(ahc, tcl, *ptr); 5119 } 5120 } 5121 } 5122 return (0); 5123 } 5124 5125 /************************** Busy Target Table *********************************/ 5126 /* 5127 * Return the untagged transaction id for a given target/channel lun. 5128 * Optionally, clear the entry. 5129 */ 5130 u_int 5131 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5132 { 5133 u_int scbid; 5134 u_int target_offset; 5135 5136 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5137 u_int saved_scbptr; 5138 5139 saved_scbptr = ahc_inb(ahc, SCBPTR); 5140 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5141 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5142 ahc_outb(ahc, SCBPTR, saved_scbptr); 5143 } else { 5144 target_offset = TCL_TARGET_OFFSET(tcl); 5145 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5146 } 5147 5148 return (scbid); 5149 } 5150 5151 void 5152 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5153 { 5154 u_int target_offset; 5155 5156 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5157 u_int saved_scbptr; 5158 5159 saved_scbptr = ahc_inb(ahc, SCBPTR); 5160 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5161 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5162 ahc_outb(ahc, SCBPTR, saved_scbptr); 5163 } else { 5164 target_offset = TCL_TARGET_OFFSET(tcl); 5165 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5166 } 5167 } 5168 5169 void 5170 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5171 { 5172 u_int target_offset; 5173 5174 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5175 u_int saved_scbptr; 5176 5177 saved_scbptr = ahc_inb(ahc, SCBPTR); 5178 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5179 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5180 ahc_outb(ahc, SCBPTR, saved_scbptr); 5181 } else { 5182 target_offset = TCL_TARGET_OFFSET(tcl); 5183 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5184 } 5185 } 5186 5187 /************************** SCB and SCB queue management **********************/ 5188 int 5189 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5190 char channel, int lun, u_int tag, role_t role) 5191 { 5192 int targ = SCB_GET_TARGET(ahc, scb); 5193 char chan = SCB_GET_CHANNEL(ahc, scb); 5194 int slun = SCB_GET_LUN(scb); 5195 int match; 5196 5197 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5198 if (match != 0) 5199 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5200 if (match != 0) 5201 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5202 if (match != 0) { 5203 #if 0 5204 #if AHC_TARGET_MODE 5205 int group; 5206 5207 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5208 if (role == ROLE_INITIATOR) { 5209 match = (group != XPT_FC_GROUP_TMODE) 5210 && ((tag == scb->hscb->tag) 5211 || (tag == SCB_LIST_NULL)); 5212 } else if (role == ROLE_TARGET) { 5213 match = (group == XPT_FC_GROUP_TMODE) 5214 && ((tag == scb->io_ctx->csio.tag_id) 5215 || (tag == SCB_LIST_NULL)); 5216 } 5217 #else /* !AHC_TARGET_MODE */ 5218 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5219 #endif /* AHC_TARGET_MODE */ 5220 #endif 5221 } 5222 5223 return match; 5224 } 5225 5226 void 5227 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5228 { 5229 int target; 5230 char channel; 5231 int lun; 5232 5233 target = SCB_GET_TARGET(ahc, scb); 5234 lun = SCB_GET_LUN(scb); 5235 channel = SCB_GET_CHANNEL(ahc, scb); 5236 5237 ahc_search_qinfifo(ahc, target, channel, lun, 5238 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5239 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5240 5241 ahc_platform_freeze_devq(ahc, scb); 5242 } 5243 5244 void 5245 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5246 { 5247 struct scb *prev_scb; 5248 5249 prev_scb = NULL; 5250 if (ahc_qinfifo_count(ahc) != 0) { 5251 u_int prev_tag; 5252 uint8_t prev_pos; 5253 5254 prev_pos = ahc->qinfifonext - 1; 5255 prev_tag = ahc->qinfifo[prev_pos]; 5256 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5257 } 5258 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5259 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5260 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5261 } else { 5262 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5263 } 5264 } 5265 5266 static void 5267 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5268 struct scb *scb) 5269 { 5270 if (prev_scb == NULL) { 5271 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5272 } else { 5273 prev_scb->hscb->next = scb->hscb->tag; 5274 ahc_sync_scb(ahc, prev_scb, 5275 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5276 } 5277 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5278 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5279 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5280 } 5281 5282 static int 5283 ahc_qinfifo_count(struct ahc_softc *ahc) 5284 { 5285 uint8_t qinpos; 5286 uint8_t diff; 5287 5288 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5289 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5290 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5291 } else 5292 qinpos = ahc_inb(ahc, QINPOS); 5293 diff = ahc->qinfifonext - qinpos; 5294 return (diff); 5295 } 5296 5297 int 5298 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5299 int lun, u_int tag, role_t role, uint32_t status, 5300 ahc_search_action action) 5301 { 5302 struct scb *scb; 5303 struct scb *prev_scb; 5304 uint8_t qinstart; 5305 uint8_t qinpos; 5306 uint8_t qintail; 5307 uint8_t next; 5308 uint8_t prev; 5309 uint8_t curscbptr; 5310 int found; 5311 int have_qregs; 5312 5313 qintail = ahc->qinfifonext; 5314 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5315 if (have_qregs) { 5316 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5317 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5318 } else 5319 qinstart = ahc_inb(ahc, QINPOS); 5320 qinpos = qinstart; 5321 found = 0; 5322 prev_scb = NULL; 5323 5324 if (action == SEARCH_COMPLETE) { 5325 /* 5326 * Don't attempt to run any queued untagged transactions 5327 * until we are done with the abort process. 5328 */ 5329 ahc_freeze_untagged_queues(ahc); 5330 } 5331 5332 /* 5333 * Start with an empty queue. Entries that are not chosen 5334 * for removal will be re-added to the queue as we go. 5335 */ 5336 ahc->qinfifonext = qinpos; 5337 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5338 5339 while (qinpos != qintail) { 5340 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5341 if (scb == NULL) { 5342 printf("qinpos = %d, SCB index = %d\n", 5343 qinpos, ahc->qinfifo[qinpos]); 5344 panic("Loop 1\n"); 5345 } 5346 5347 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5348 /* 5349 * We found an scb that needs to be acted on. 5350 */ 5351 found++; 5352 switch (action) { 5353 case SEARCH_COMPLETE: 5354 { 5355 cam_status ostat; 5356 cam_status cstat; 5357 5358 ostat = ahc_get_transaction_status(scb); 5359 if (ostat == CAM_REQ_INPROG) 5360 ahc_set_transaction_status(scb, status); 5361 cstat = ahc_get_transaction_status(scb); 5362 if (cstat != CAM_REQ_CMP) 5363 ahc_freeze_scb(scb); 5364 if ((scb->flags & SCB_ACTIVE) == 0) 5365 printf("Inactive SCB in qinfifo\n"); 5366 ahc_done(ahc, scb); 5367 5368 /* FALLTHROUGH */ 5369 } 5370 case SEARCH_REMOVE: 5371 break; 5372 case SEARCH_COUNT: 5373 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5374 prev_scb = scb; 5375 break; 5376 } 5377 } else { 5378 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5379 prev_scb = scb; 5380 } 5381 qinpos++; 5382 } 5383 5384 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5385 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5386 } else { 5387 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5388 } 5389 5390 if (action != SEARCH_COUNT 5391 && (found != 0) 5392 && (qinstart != ahc->qinfifonext)) { 5393 /* 5394 * The sequencer may be in the process of DMA'ing 5395 * down the SCB at the beginning of the queue. 5396 * This could be problematic if either the first, 5397 * or the second SCB is removed from the queue 5398 * (the first SCB includes a pointer to the "next" 5399 * SCB to DMA). If we have removed any entries, swap 5400 * the first element in the queue with the next HSCB 5401 * so the sequencer will notice that NEXT_QUEUED_SCB 5402 * has changed during its DMA attempt and will retry 5403 * the DMA. 5404 */ 5405 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5406 5407 if (scb == NULL) { 5408 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5409 found, qinstart, ahc->qinfifonext); 5410 panic("First/Second Qinfifo fixup\n"); 5411 } 5412 /* 5413 * ahc_swap_with_next_hscb forces our next pointer to 5414 * point to the reserved SCB for future commands. Save 5415 * and restore our original next pointer to maintain 5416 * queue integrity. 5417 */ 5418 next = scb->hscb->next; 5419 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5420 ahc_swap_with_next_hscb(ahc, scb); 5421 scb->hscb->next = next; 5422 ahc->qinfifo[qinstart] = scb->hscb->tag; 5423 5424 /* Tell the card about the new head of the qinfifo. */ 5425 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5426 5427 /* Fixup the tail "next" pointer. */ 5428 qintail = ahc->qinfifonext - 1; 5429 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5430 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5431 } 5432 5433 /* 5434 * Search waiting for selection list. 5435 */ 5436 curscbptr = ahc_inb(ahc, SCBPTR); 5437 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5438 prev = SCB_LIST_NULL; 5439 5440 while (next != SCB_LIST_NULL) { 5441 uint8_t scb_index; 5442 5443 ahc_outb(ahc, SCBPTR, next); 5444 scb_index = ahc_inb(ahc, SCB_TAG); 5445 if (scb_index >= ahc->scb_data->numscbs) { 5446 printf("Waiting List inconsistency. " 5447 "SCB index == %d, yet numscbs == %d.", 5448 scb_index, ahc->scb_data->numscbs); 5449 ahc_dump_card_state(ahc); 5450 panic("for safety"); 5451 } 5452 scb = ahc_lookup_scb(ahc, scb_index); 5453 if (scb == NULL) { 5454 printf("scb_index = %d, next = %d\n", 5455 scb_index, next); 5456 panic("Waiting List traversal\n"); 5457 } 5458 if (ahc_match_scb(ahc, scb, target, channel, 5459 lun, SCB_LIST_NULL, role)) { 5460 /* 5461 * We found an scb that needs to be acted on. 5462 */ 5463 found++; 5464 switch (action) { 5465 case SEARCH_COMPLETE: 5466 { 5467 cam_status ostat; 5468 cam_status cstat; 5469 5470 ostat = ahc_get_transaction_status(scb); 5471 if (ostat == CAM_REQ_INPROG) 5472 ahc_set_transaction_status(scb, status); 5473 cstat = ahc_get_transaction_status(scb); 5474 if (cstat != CAM_REQ_CMP) 5475 ahc_freeze_scb(scb); 5476 if ((scb->flags & SCB_ACTIVE) == 0) 5477 printf("Inactive SCB in " 5478 "Waiting List\n"); 5479 ahc_done(ahc, scb); 5480 /* FALLTHROUGH */ 5481 } 5482 case SEARCH_REMOVE: 5483 next = ahc_rem_wscb(ahc, next, prev); 5484 break; 5485 case SEARCH_COUNT: 5486 prev = next; 5487 next = ahc_inb(ahc, SCB_NEXT); 5488 break; 5489 } 5490 } else { 5491 5492 prev = next; 5493 next = ahc_inb(ahc, SCB_NEXT); 5494 } 5495 } 5496 ahc_outb(ahc, SCBPTR, curscbptr); 5497 5498 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5499 channel, lun, status, action); 5500 5501 if (action == SEARCH_COMPLETE) 5502 ahc_release_untagged_queues(ahc); 5503 return (found); 5504 } 5505 5506 int 5507 ahc_search_untagged_queues(struct ahc_softc *ahc, 5508 struct scsipi_xfer *xs, int target, char channel, int lun, 5509 uint32_t status, ahc_search_action action) 5510 { 5511 struct scb *scb; 5512 int maxtarget; 5513 int found; 5514 int i; 5515 5516 if (action == SEARCH_COMPLETE) { 5517 /* 5518 * Don't attempt to run any queued untagged transactions 5519 * until we are done with the abort process. 5520 */ 5521 ahc_freeze_untagged_queues(ahc); 5522 } 5523 5524 found = 0; 5525 i = 0; 5526 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5527 5528 maxtarget = 16; 5529 if (target != CAM_TARGET_WILDCARD) { 5530 5531 i = target; 5532 if (channel == 'B') 5533 i += 8; 5534 maxtarget = i + 1; 5535 } 5536 } else { 5537 maxtarget = 0; 5538 } 5539 5540 for (; i < maxtarget; i++) { 5541 struct scb_tailq *untagged_q; 5542 struct scb *next_scb; 5543 5544 untagged_q = &(ahc->untagged_queues[i]); 5545 next_scb = TAILQ_FIRST(untagged_q); 5546 while (next_scb != NULL) { 5547 5548 scb = next_scb; 5549 next_scb = TAILQ_NEXT(scb, links.tqe); 5550 5551 /* 5552 * The head of the list may be the currently 5553 * active untagged command for a device. 5554 * We're only searching for commands that 5555 * have not been started. A transaction 5556 * marked active but still in the qinfifo 5557 * is removed by the qinfifo scanning code 5558 * above. 5559 */ 5560 if ((scb->flags & SCB_ACTIVE) != 0) 5561 continue; 5562 5563 if (ahc_match_scb(ahc, scb, target, channel, lun, 5564 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5565 /*|| (ctx != NULL && ctx != scb->io_ctx)*/) 5566 continue; 5567 5568 /* 5569 * We found an scb that needs to be acted on. 5570 */ 5571 found++; 5572 switch (action) { 5573 case SEARCH_COMPLETE: 5574 { 5575 cam_status ostat; 5576 cam_status cstat; 5577 5578 ostat = ahc_get_transaction_status(scb); 5579 if (ostat == CAM_REQ_INPROG) 5580 ahc_set_transaction_status(scb, status); 5581 cstat = ahc_get_transaction_status(scb); 5582 if (cstat != CAM_REQ_CMP) 5583 ahc_freeze_scb(scb); 5584 if ((scb->flags & SCB_ACTIVE) == 0) 5585 printf("Inactive SCB in untaggedQ\n"); 5586 ahc_done(ahc, scb); 5587 break; 5588 } 5589 case SEARCH_REMOVE: 5590 scb->flags &= ~SCB_UNTAGGEDQ; 5591 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5592 break; 5593 case SEARCH_COUNT: 5594 break; 5595 } 5596 } 5597 } 5598 5599 if (action == SEARCH_COMPLETE) 5600 ahc_release_untagged_queues(ahc); 5601 return (found); 5602 } 5603 5604 int 5605 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5606 int lun, u_int tag, int stop_on_first, int remove, 5607 int save_state) 5608 { 5609 struct scb *scbp; 5610 u_int next; 5611 u_int prev; 5612 u_int count; 5613 u_int active_scb; 5614 5615 count = 0; 5616 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5617 prev = SCB_LIST_NULL; 5618 5619 if (save_state) { 5620 /* restore this when we're done */ 5621 active_scb = ahc_inb(ahc, SCBPTR); 5622 } else 5623 /* Silence compiler */ 5624 active_scb = SCB_LIST_NULL; 5625 5626 while (next != SCB_LIST_NULL) { 5627 u_int scb_index; 5628 5629 ahc_outb(ahc, SCBPTR, next); 5630 scb_index = ahc_inb(ahc, SCB_TAG); 5631 if (scb_index >= ahc->scb_data->numscbs) { 5632 printf("Disconnected List inconsistency. " 5633 "SCB index == %d, yet numscbs == %d.", 5634 scb_index, ahc->scb_data->numscbs); 5635 ahc_dump_card_state(ahc); 5636 panic("for safety"); 5637 } 5638 5639 if (next == prev) { 5640 panic("Disconnected List Loop. " 5641 "cur SCBPTR == %x, prev SCBPTR == %x.", 5642 next, prev); 5643 } 5644 scbp = ahc_lookup_scb(ahc, scb_index); 5645 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5646 tag, ROLE_INITIATOR)) { 5647 count++; 5648 if (remove) { 5649 next = 5650 ahc_rem_scb_from_disc_list(ahc, prev, next); 5651 } else { 5652 prev = next; 5653 next = ahc_inb(ahc, SCB_NEXT); 5654 } 5655 if (stop_on_first) 5656 break; 5657 } else { 5658 prev = next; 5659 next = ahc_inb(ahc, SCB_NEXT); 5660 } 5661 } 5662 if (save_state) 5663 ahc_outb(ahc, SCBPTR, active_scb); 5664 return (count); 5665 } 5666 5667 /* 5668 * Remove an SCB from the on chip list of disconnected transactions. 5669 * This is empty/unused if we are not performing SCB paging. 5670 */ 5671 static u_int 5672 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5673 { 5674 u_int next; 5675 5676 ahc_outb(ahc, SCBPTR, scbptr); 5677 next = ahc_inb(ahc, SCB_NEXT); 5678 5679 ahc_outb(ahc, SCB_CONTROL, 0); 5680 5681 ahc_add_curscb_to_free_list(ahc); 5682 5683 if (prev != SCB_LIST_NULL) { 5684 ahc_outb(ahc, SCBPTR, prev); 5685 ahc_outb(ahc, SCB_NEXT, next); 5686 } else 5687 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5688 5689 return (next); 5690 } 5691 5692 /* 5693 * Add the SCB as selected by SCBPTR onto the on chip list of 5694 * free hardware SCBs. This list is empty/unused if we are not 5695 * performing SCB paging. 5696 */ 5697 static void 5698 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5699 { 5700 /* 5701 * Invalidate the tag so that our abort 5702 * routines don't think it's active. 5703 */ 5704 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5705 5706 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5707 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5708 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5709 } 5710 } 5711 5712 /* 5713 * Manipulate the waiting for selection list and return the 5714 * scb that follows the one that we remove. 5715 */ 5716 static u_int 5717 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5718 { 5719 u_int curscb, next; 5720 5721 /* 5722 * Select the SCB we want to abort and 5723 * pull the next pointer out of it. 5724 */ 5725 curscb = ahc_inb(ahc, SCBPTR); 5726 ahc_outb(ahc, SCBPTR, scbpos); 5727 next = ahc_inb(ahc, SCB_NEXT); 5728 5729 /* Clear the necessary fields */ 5730 ahc_outb(ahc, SCB_CONTROL, 0); 5731 5732 ahc_add_curscb_to_free_list(ahc); 5733 5734 /* update the waiting list */ 5735 if (prev == SCB_LIST_NULL) { 5736 /* First in the list */ 5737 ahc_outb(ahc, WAITING_SCBH, next); 5738 5739 /* 5740 * Ensure we aren't attempting to perform 5741 * selection for this entry. 5742 */ 5743 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5744 } else { 5745 /* 5746 * Select the scb that pointed to us 5747 * and update its next pointer. 5748 */ 5749 ahc_outb(ahc, SCBPTR, prev); 5750 ahc_outb(ahc, SCB_NEXT, next); 5751 } 5752 5753 /* 5754 * Point us back at the original scb position. 5755 */ 5756 ahc_outb(ahc, SCBPTR, curscb); 5757 return next; 5758 } 5759 5760 /******************************** Error Handling ******************************/ 5761 /* 5762 * Abort all SCBs that match the given description (target/channel/lun/tag), 5763 * setting their status to the passed in status if the status has not already 5764 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5765 * is paused before it is called. 5766 */ 5767 int 5768 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5769 int lun, u_int tag, role_t role, uint32_t status) 5770 { 5771 struct scb *scbp; 5772 struct scb *scbp_next; 5773 u_int active_scb; 5774 int i, j; 5775 int maxtarget; 5776 int minlun; 5777 int maxlun; 5778 5779 int found; 5780 5781 /* 5782 * Don't attempt to run any queued untagged transactions 5783 * until we are done with the abort process. 5784 */ 5785 ahc_freeze_untagged_queues(ahc); 5786 5787 /* restore this when we're done */ 5788 active_scb = ahc_inb(ahc, SCBPTR); 5789 5790 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5791 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5792 5793 /* 5794 * Clean out the busy target table for any untagged commands. 5795 */ 5796 i = 0; 5797 maxtarget = 16; 5798 if (target != CAM_TARGET_WILDCARD) { 5799 i = target; 5800 if (channel == 'B') 5801 i += 8; 5802 maxtarget = i + 1; 5803 } 5804 5805 if (lun == CAM_LUN_WILDCARD) { 5806 5807 /* 5808 * Unless we are using an SCB based 5809 * busy targets table, there is only 5810 * one table entry for all luns of 5811 * a target. 5812 */ 5813 minlun = 0; 5814 maxlun = 1; 5815 if ((ahc->flags & AHC_SCB_BTT) != 0) 5816 maxlun = AHC_NUM_LUNS; 5817 } else { 5818 minlun = lun; 5819 maxlun = lun + 1; 5820 } 5821 5822 if (role != ROLE_TARGET) { 5823 for (;i < maxtarget; i++) { 5824 for (j = minlun;j < maxlun; j++) { 5825 u_int scbid; 5826 u_int tcl; 5827 5828 tcl = BUILD_TCL(i << 4, j); 5829 scbid = ahc_index_busy_tcl(ahc, tcl); 5830 scbp = ahc_lookup_scb(ahc, scbid); 5831 if (scbp == NULL 5832 || ahc_match_scb(ahc, scbp, target, channel, 5833 lun, tag, role) == 0) 5834 continue; 5835 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5836 } 5837 } 5838 5839 /* 5840 * Go through the disconnected list and remove any entries we 5841 * have queued for completion, 0'ing their control byte too. 5842 * We save the active SCB and restore it ourselves, so there 5843 * is no reason for this search to restore it too. 5844 */ 5845 ahc_search_disc_list(ahc, target, channel, lun, tag, 5846 /*stop_on_first*/FALSE, /*remove*/TRUE, 5847 /*save_state*/FALSE); 5848 } 5849 5850 /* 5851 * Go through the hardware SCB array looking for commands that 5852 * were active but not on any list. In some cases, these remnants 5853 * might not still have mappings in the scbindex array (e.g. unexpected 5854 * bus free with the same scb queued for an abort). Don't hold this 5855 * against them. 5856 */ 5857 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5858 u_int scbid; 5859 5860 ahc_outb(ahc, SCBPTR, i); 5861 scbid = ahc_inb(ahc, SCB_TAG); 5862 scbp = ahc_lookup_scb(ahc, scbid); 5863 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5864 || (scbp != NULL 5865 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5866 ahc_add_curscb_to_free_list(ahc); 5867 } 5868 5869 /* 5870 * Go through the pending CCB list and look for 5871 * commands for this target that are still active. 5872 * These are other tagged commands that were 5873 * disconnected when the reset occurred. 5874 */ 5875 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5876 while (scbp_next != NULL) { 5877 scbp = scbp_next; 5878 scbp_next = LIST_NEXT(scbp, pending_links); 5879 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5880 cam_status ostat; 5881 5882 ostat = ahc_get_transaction_status(scbp); 5883 if (ostat == CAM_REQ_INPROG) 5884 ahc_set_transaction_status(scbp, status); 5885 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5886 ahc_freeze_scb(scbp); 5887 if ((scbp->flags & SCB_ACTIVE) == 0) 5888 printf("Inactive SCB on pending list\n"); 5889 ahc_done(ahc, scbp); 5890 found++; 5891 } 5892 } 5893 ahc_outb(ahc, SCBPTR, active_scb); 5894 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5895 ahc_release_untagged_queues(ahc); 5896 return found; 5897 } 5898 5899 static void 5900 ahc_reset_current_bus(struct ahc_softc *ahc) 5901 { 5902 uint8_t scsiseq; 5903 5904 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5905 scsiseq = ahc_inb(ahc, SCSISEQ); 5906 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5907 ahc_flush_device_writes(ahc); 5908 ahc_delay(AHC_BUSRESET_DELAY); 5909 /* Turn off the bus reset */ 5910 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5911 5912 ahc_clear_intstat(ahc); 5913 5914 /* Re-enable reset interrupts */ 5915 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5916 } 5917 5918 int 5919 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5920 { 5921 struct ahc_devinfo dinfo; 5922 u_int initiator, target, max_scsiid; 5923 u_int sblkctl; 5924 u_int scsiseq; 5925 u_int simode1; 5926 int found; 5927 int restart_needed; 5928 char cur_channel; 5929 5930 ahc->pending_device = NULL; 5931 5932 ahc_compile_devinfo(&dinfo, 5933 CAM_TARGET_WILDCARD, 5934 CAM_TARGET_WILDCARD, 5935 CAM_LUN_WILDCARD, 5936 channel, ROLE_UNKNOWN); 5937 ahc_pause(ahc); 5938 5939 /* Make sure the sequencer is in a safe location. */ 5940 ahc_clear_critical_section(ahc); 5941 5942 /* 5943 * Run our command complete fifos to ensure that we perform 5944 * completion processing on any commands that 'completed' 5945 * before the reset occurred. 5946 */ 5947 ahc_run_qoutfifo(ahc); 5948 #if AHC_TARGET_MODE 5949 /* 5950 * XXX - In Twin mode, the tqinfifo may have commands 5951 * for an unaffected channel in it. However, if 5952 * we have run out of ATIO resources to drain that 5953 * queue, we may not get them all out here. Further, 5954 * the blocked transactions for the reset channel 5955 * should just be killed off, irrespecitve of whether 5956 * we are blocked on ATIO resources. Write a routine 5957 * to compact the tqinfifo appropriately. 5958 */ 5959 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5960 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5961 } 5962 #endif 5963 5964 /* 5965 * Reset the bus if we are initiating this reset 5966 */ 5967 sblkctl = ahc_inb(ahc, SBLKCTL); 5968 cur_channel = 'A'; 5969 if ((ahc->features & AHC_TWIN) != 0 5970 && ((sblkctl & SELBUSB) != 0)) 5971 cur_channel = 'B'; 5972 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5973 if (cur_channel != channel) { 5974 /* Case 1: Command for another bus is active 5975 * Stealthily reset the other bus without 5976 * upsetting the current bus. 5977 */ 5978 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5979 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5980 #if AHC_TARGET_MODE 5981 /* 5982 * Bus resets clear ENSELI, so we cannot 5983 * defer re-enabling bus reset interrupts 5984 * if we are in target mode. 5985 */ 5986 if ((ahc->flags & AHC_TARGETROLE) != 0) 5987 simode1 |= ENSCSIRST; 5988 #endif 5989 ahc_outb(ahc, SIMODE1, simode1); 5990 if (initiate_reset) 5991 ahc_reset_current_bus(ahc); 5992 ahc_clear_intstat(ahc); 5993 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5994 ahc_outb(ahc, SBLKCTL, sblkctl); 5995 restart_needed = FALSE; 5996 } else { 5997 /* Case 2: A command from this bus is active or we're idle */ 5998 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5999 #if AHC_TARGET_MODE 6000 /* 6001 * Bus resets clear ENSELI, so we cannot 6002 * defer re-enabling bus reset interrupts 6003 * if we are in target mode. 6004 */ 6005 if ((ahc->flags & AHC_TARGETROLE) != 0) 6006 simode1 |= ENSCSIRST; 6007 #endif 6008 ahc_outb(ahc, SIMODE1, simode1); 6009 if (initiate_reset) 6010 ahc_reset_current_bus(ahc); 6011 ahc_clear_intstat(ahc); 6012 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6013 restart_needed = TRUE; 6014 } 6015 6016 /* 6017 * Clean up all the state information for the 6018 * pending transactions on this bus. 6019 */ 6020 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6021 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6022 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6023 6024 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6025 6026 #ifdef AHC_TARGET_MODE 6027 /* 6028 * Send an immediate notify ccb to all target more peripheral 6029 * drivers affected by this action. 6030 */ 6031 for (target = 0; target <= max_scsiid; target++) { 6032 struct ahc_tmode_tstate* tstate; 6033 u_int lun; 6034 6035 tstate = ahc->enabled_targets[target]; 6036 if (tstate == NULL) 6037 continue; 6038 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6039 struct ahc_tmode_lstate* lstate; 6040 6041 lstate = tstate->enabled_luns[lun]; 6042 if (lstate == NULL) 6043 continue; 6044 6045 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6046 EVENT_TYPE_BUS_RESET, /*arg*/0); 6047 ahc_send_lstate_events(ahc, lstate); 6048 } 6049 } 6050 #endif 6051 /* 6052 * Revert to async/narrow transfers until we renegotiate. 6053 */ 6054 for (target = 0; target <= max_scsiid; target++) { 6055 6056 if (ahc->enabled_targets[target] == NULL) 6057 continue; 6058 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6059 struct ahc_devinfo devinfo; 6060 6061 ahc_compile_devinfo(&devinfo, target, initiator, 6062 CAM_LUN_WILDCARD, 6063 channel, ROLE_UNKNOWN); 6064 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6065 AHC_TRANS_CUR, /*paused*/TRUE); 6066 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6067 /*period*/0, /*offset*/0, 6068 /*ppr_options*/0, AHC_TRANS_CUR, 6069 /*paused*/TRUE); 6070 } 6071 } 6072 6073 if (restart_needed) 6074 ahc_restart(ahc); 6075 else 6076 ahc_unpause(ahc); 6077 return found; 6078 } 6079 6080 6081 /***************************** Residual Processing ****************************/ 6082 /* 6083 * Calculate the residual for a just completed SCB. 6084 */ 6085 void 6086 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6087 { 6088 struct hardware_scb *hscb; 6089 struct status_pkt *spkt; 6090 uint32_t sgptr; 6091 uint32_t resid_sgptr; 6092 uint32_t resid; 6093 6094 /* 6095 * 5 cases. 6096 * 1) No residual. 6097 * SG_RESID_VALID clear in sgptr. 6098 * 2) Transferless command 6099 * 3) Never performed any transfers. 6100 * sgptr has SG_FULL_RESID set. 6101 * 4) No residual but target did not 6102 * save data pointers after the 6103 * last transfer, so sgptr was 6104 * never updated. 6105 * 5) We have a partial residual. 6106 * Use residual_sgptr to determine 6107 * where we are. 6108 */ 6109 6110 hscb = scb->hscb; 6111 sgptr = ahc_le32toh(hscb->sgptr); 6112 if ((sgptr & SG_RESID_VALID) == 0) 6113 /* Case 1 */ 6114 return; 6115 sgptr &= ~SG_RESID_VALID; 6116 6117 if ((sgptr & SG_LIST_NULL) != 0) 6118 /* Case 2 */ 6119 return; 6120 6121 spkt = &hscb->shared_data.status; 6122 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6123 if ((sgptr & SG_FULL_RESID) != 0) { 6124 /* Case 3 */ 6125 resid = ahc_get_transfer_length(scb); 6126 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6127 /* Case 4 */ 6128 return; 6129 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6130 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6131 } else { 6132 struct ahc_dma_seg *sg; 6133 6134 /* 6135 * Remainder of the SG where the transfer 6136 * stopped. 6137 */ 6138 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6139 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6140 6141 /* The residual sg_ptr always points to the next sg */ 6142 sg--; 6143 6144 /* 6145 * Add up the contents of all residual 6146 * SG segments that are after the SG where 6147 * the transfer stopped. 6148 */ 6149 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6150 sg++; 6151 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6152 } 6153 } 6154 if ((scb->flags & SCB_SENSE) == 0) 6155 ahc_set_residual(scb, resid); 6156 else 6157 ahc_set_sense_residual(scb, resid); 6158 6159 #ifdef AHC_DEBUG 6160 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6161 ahc_print_path(ahc, scb); 6162 printf("Handled %sResidual of %d bytes\n", 6163 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6164 } 6165 #endif 6166 } 6167 6168 /******************************* Target Mode **********************************/ 6169 #ifdef AHC_TARGET_MODE 6170 /* 6171 * Add a target mode event to this lun's queue 6172 */ 6173 static void 6174 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6175 u_int initiator_id, u_int event_type, u_int event_arg) 6176 { 6177 struct ahc_tmode_event *event; 6178 int pending; 6179 6180 xpt_freeze_devq(lstate->path, /*count*/1); 6181 if (lstate->event_w_idx >= lstate->event_r_idx) 6182 pending = lstate->event_w_idx - lstate->event_r_idx; 6183 else 6184 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6185 - (lstate->event_r_idx - lstate->event_w_idx); 6186 6187 if (event_type == EVENT_TYPE_BUS_RESET 6188 || event_type == MSG_BUS_DEV_RESET) { 6189 /* 6190 * Any earlier events are irrelevant, so reset our buffer. 6191 * This has the effect of allowing us to deal with reset 6192 * floods (an external device holding down the reset line) 6193 * without losing the event that is really interesting. 6194 */ 6195 lstate->event_r_idx = 0; 6196 lstate->event_w_idx = 0; 6197 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6198 } 6199 6200 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6201 xpt_print_path(lstate->path); 6202 printf("immediate event %x:%x lost\n", 6203 lstate->event_buffer[lstate->event_r_idx].event_type, 6204 lstate->event_buffer[lstate->event_r_idx].event_arg); 6205 lstate->event_r_idx++; 6206 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6207 lstate->event_r_idx = 0; 6208 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6209 } 6210 6211 event = &lstate->event_buffer[lstate->event_w_idx]; 6212 event->initiator_id = initiator_id; 6213 event->event_type = event_type; 6214 event->event_arg = event_arg; 6215 lstate->event_w_idx++; 6216 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6217 lstate->event_w_idx = 0; 6218 } 6219 6220 /* 6221 * Send any target mode events queued up waiting 6222 * for immediate notify resources. 6223 */ 6224 void 6225 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6226 { 6227 struct ccb_hdr *ccbh; 6228 struct ccb_immed_notify *inot; 6229 6230 while (lstate->event_r_idx != lstate->event_w_idx 6231 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6232 struct ahc_tmode_event *event; 6233 6234 event = &lstate->event_buffer[lstate->event_r_idx]; 6235 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6236 inot = (struct ccb_immed_notify *)ccbh; 6237 switch (event->event_type) { 6238 case EVENT_TYPE_BUS_RESET: 6239 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6240 break; 6241 default: 6242 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6243 inot->message_args[0] = event->event_type; 6244 inot->message_args[1] = event->event_arg; 6245 break; 6246 } 6247 inot->initiator_id = event->initiator_id; 6248 inot->sense_len = 0; 6249 xpt_done((union ccb *)inot); 6250 lstate->event_r_idx++; 6251 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6252 lstate->event_r_idx = 0; 6253 } 6254 } 6255 #endif 6256 6257 /******************** Sequencer Program Patching/Download *********************/ 6258 6259 #ifdef AHC_DUMP_SEQ 6260 void 6261 ahc_dumpseq(struct ahc_softc* ahc) 6262 { 6263 int i; 6264 int max_prog; 6265 6266 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6267 max_prog = 448; 6268 else if ((ahc->features & AHC_ULTRA2) != 0) 6269 max_prog = 768; 6270 else 6271 max_prog = 512; 6272 6273 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6274 ahc_outb(ahc, SEQADDR0, 0); 6275 ahc_outb(ahc, SEQADDR1, 0); 6276 for (i = 0; i < max_prog; i++) { 6277 uint8_t ins_bytes[4]; 6278 6279 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6280 printf("0x%08x\n", ins_bytes[0] << 24 6281 | ins_bytes[1] << 16 6282 | ins_bytes[2] << 8 6283 | ins_bytes[3]); 6284 } 6285 } 6286 #endif 6287 6288 static void 6289 ahc_loadseq(struct ahc_softc *ahc) 6290 { 6291 struct cs cs_table[num_critical_sections]; 6292 u_int begin_set[num_critical_sections]; 6293 u_int end_set[num_critical_sections]; 6294 struct patch *cur_patch; 6295 u_int cs_count; 6296 u_int cur_cs; 6297 u_int i; 6298 int downloaded; 6299 u_int skip_addr; 6300 u_int sg_prefetch_cnt; 6301 uint8_t download_consts[7]; 6302 6303 /* 6304 * Start out with 0 critical sections 6305 * that apply to this firmware load. 6306 */ 6307 cs_count = 0; 6308 cur_cs = 0; 6309 memset(begin_set, 0, sizeof(begin_set)); 6310 memset(end_set, 0, sizeof(end_set)); 6311 6312 /* Setup downloadable constant table */ 6313 download_consts[QOUTFIFO_OFFSET] = 0; 6314 if (ahc->targetcmds != NULL) 6315 download_consts[QOUTFIFO_OFFSET] += 32; 6316 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6317 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6318 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6319 sg_prefetch_cnt = ahc->pci_cachesize; 6320 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6321 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6322 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6323 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6324 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6325 6326 cur_patch = patches; 6327 downloaded = 0; 6328 skip_addr = 0; 6329 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6330 ahc_outb(ahc, SEQADDR0, 0); 6331 ahc_outb(ahc, SEQADDR1, 0); 6332 6333 for (i = 0; i < sizeof(seqprog)/4; i++) { 6334 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6335 /* 6336 * Don't download this instruction as it 6337 * is in a patch that was removed. 6338 */ 6339 continue; 6340 } 6341 /* 6342 * Move through the CS table until we find a CS 6343 * that might apply to this instruction. 6344 */ 6345 for (; cur_cs < num_critical_sections; cur_cs++) { 6346 if (critical_sections[cur_cs].end <= i) { 6347 if (begin_set[cs_count] == TRUE 6348 && end_set[cs_count] == FALSE) { 6349 cs_table[cs_count].end = downloaded; 6350 end_set[cs_count] = TRUE; 6351 cs_count++; 6352 } 6353 continue; 6354 } 6355 if (critical_sections[cur_cs].begin <= i 6356 && begin_set[cs_count] == FALSE) { 6357 cs_table[cs_count].begin = downloaded; 6358 begin_set[cs_count] = TRUE; 6359 } 6360 break; 6361 } 6362 ahc_download_instr(ahc, i, download_consts); 6363 downloaded++; 6364 } 6365 6366 ahc->num_critical_sections = cs_count; 6367 if (cs_count != 0) { 6368 6369 cs_count *= sizeof(struct cs); 6370 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6371 if (ahc->critical_sections == NULL) 6372 panic("ahc_loadseq: Could not malloc"); 6373 memcpy(ahc->critical_sections, cs_table, cs_count); 6374 } 6375 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6376 ahc_restart(ahc); 6377 6378 if (bootverbose) { 6379 printf(" %d instructions downloaded\n", downloaded); 6380 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6381 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6382 } 6383 } 6384 6385 static int 6386 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6387 u_int start_instr, u_int *skip_addr) 6388 { 6389 struct patch *cur_patch; 6390 struct patch *last_patch; 6391 u_int num_patches; 6392 6393 num_patches = sizeof(patches)/sizeof(struct patch); 6394 last_patch = &patches[num_patches]; 6395 cur_patch = *start_patch; 6396 6397 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6398 6399 if (cur_patch->patch_func(ahc) == 0) { 6400 6401 /* Start rejecting code */ 6402 *skip_addr = start_instr + cur_patch->skip_instr; 6403 cur_patch += cur_patch->skip_patch; 6404 } else { 6405 /* Accepted this patch. Advance to the next 6406 * one and wait for our intruction pointer to 6407 * hit this point. 6408 */ 6409 cur_patch++; 6410 } 6411 } 6412 6413 *start_patch = cur_patch; 6414 if (start_instr < *skip_addr) 6415 /* Still skipping */ 6416 return (0); 6417 6418 return (1); 6419 } 6420 6421 static void 6422 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6423 { 6424 union ins_formats instr; 6425 struct ins_format1 *fmt1_ins; 6426 struct ins_format3 *fmt3_ins; 6427 u_int opcode; 6428 6429 /* 6430 * The firmware is always compiled into a little endian format. 6431 */ 6432 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6433 6434 fmt1_ins = &instr.format1; 6435 fmt3_ins = NULL; 6436 6437 /* Pull the opcode */ 6438 opcode = instr.format1.opcode; 6439 switch (opcode) { 6440 case AIC_OP_JMP: 6441 case AIC_OP_JC: 6442 case AIC_OP_JNC: 6443 case AIC_OP_CALL: 6444 case AIC_OP_JNE: 6445 case AIC_OP_JNZ: 6446 case AIC_OP_JE: 6447 case AIC_OP_JZ: 6448 { 6449 struct patch *cur_patch; 6450 int address_offset; 6451 u_int address; 6452 u_int skip_addr; 6453 u_int i; 6454 6455 fmt3_ins = &instr.format3; 6456 address_offset = 0; 6457 address = fmt3_ins->address; 6458 cur_patch = patches; 6459 skip_addr = 0; 6460 for (i = 0; i < address;) { 6461 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6462 6463 if (skip_addr > i) { 6464 int end_addr; 6465 6466 end_addr = MIN(address, skip_addr); 6467 address_offset += end_addr - i; 6468 i = skip_addr; 6469 } else { 6470 i++; 6471 } 6472 } 6473 address -= address_offset; 6474 fmt3_ins->address = address; 6475 /* FALLTHROUGH */ 6476 } 6477 case AIC_OP_OR: 6478 case AIC_OP_AND: 6479 case AIC_OP_XOR: 6480 case AIC_OP_ADD: 6481 case AIC_OP_ADC: 6482 case AIC_OP_BMOV: 6483 if (fmt1_ins->parity != 0) { 6484 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6485 } 6486 fmt1_ins->parity = 0; 6487 if ((ahc->features & AHC_CMD_CHAN) == 0 6488 && opcode == AIC_OP_BMOV) { 6489 /* 6490 * Block move was added at the same time 6491 * as the command channel. Verify that 6492 * this is only a move of a single element 6493 * and convert the BMOV to a MOV 6494 * (AND with an immediate of FF). 6495 */ 6496 if (fmt1_ins->immediate != 1) 6497 panic("%s: BMOV not supported\n", 6498 ahc_name(ahc)); 6499 fmt1_ins->opcode = AIC_OP_AND; 6500 fmt1_ins->immediate = 0xff; 6501 } 6502 /* FALLTHROUGH */ 6503 case AIC_OP_ROL: 6504 if ((ahc->features & AHC_ULTRA2) != 0) { 6505 int i, count; 6506 6507 /* Calculate odd parity for the instruction */ 6508 for (i = 0, count = 0; i < 31; i++) { 6509 uint32_t mask; 6510 6511 mask = 0x01 << i; 6512 if ((instr.integer & mask) != 0) 6513 count++; 6514 } 6515 if ((count & 0x01) == 0) 6516 instr.format1.parity = 1; 6517 } else { 6518 /* Compress the instruction for older sequencers */ 6519 if (fmt3_ins != NULL) { 6520 instr.integer = 6521 fmt3_ins->immediate 6522 | (fmt3_ins->source << 8) 6523 | (fmt3_ins->address << 16) 6524 | (fmt3_ins->opcode << 25); 6525 } else { 6526 instr.integer = 6527 fmt1_ins->immediate 6528 | (fmt1_ins->source << 8) 6529 | (fmt1_ins->destination << 16) 6530 | (fmt1_ins->ret << 24) 6531 | (fmt1_ins->opcode << 25); 6532 } 6533 } 6534 /* The sequencer is a little endian CPU */ 6535 instr.integer = ahc_htole32(instr.integer); 6536 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6537 break; 6538 default: 6539 panic("Unknown opcode encountered in seq program"); 6540 break; 6541 } 6542 } 6543 6544 int 6545 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6546 const char *name, u_int address, u_int value, 6547 u_int *cur_column, u_int wrap_point) 6548 { 6549 size_t printed; 6550 u_int printed_mask; 6551 char line[1024]; 6552 6553 line[0] = 0; 6554 6555 if (cur_column != NULL && *cur_column >= wrap_point) { 6556 printf("\n"); 6557 *cur_column = 0; 6558 } 6559 printed = snprintf(line, sizeof(line), "%s[0x%x]", name, value); 6560 if (printed > sizeof(line)) 6561 printed = sizeof(line); 6562 if (table == NULL) { 6563 printed += snprintf(&line[printed], (sizeof line) - printed, 6564 " "); 6565 if (printed > sizeof(line)) 6566 printed = sizeof(line); 6567 printf("%s", line); 6568 if (cur_column != NULL) 6569 *cur_column += printed; 6570 return (printed); 6571 } 6572 printed_mask = 0; 6573 while (printed_mask != 0xFF) { 6574 int entry; 6575 6576 for (entry = 0; entry < num_entries; entry++) { 6577 if (((value & table[entry].mask) 6578 != table[entry].value) 6579 || ((printed_mask & table[entry].mask) 6580 == table[entry].mask)) 6581 continue; 6582 if (printed > sizeof(line)) 6583 printed = sizeof(line); 6584 printed += snprintf(&line[printed], 6585 (sizeof line) - printed, "%s%s", 6586 printed_mask == 0 ? ":(" : "|", 6587 table[entry].name); 6588 printed_mask |= table[entry].mask; 6589 6590 break; 6591 } 6592 if (entry >= num_entries) 6593 break; 6594 } 6595 if (printed > sizeof(line)) 6596 printed = sizeof(line); 6597 if (printed_mask != 0) 6598 printed += snprintf(&line[printed], 6599 (sizeof line) - printed, ") "); 6600 else 6601 printed += snprintf(&line[printed], 6602 (sizeof line) - printed, " "); 6603 if (cur_column != NULL) 6604 *cur_column += printed; 6605 printf("%s", line); 6606 6607 return (printed); 6608 } 6609 6610 void 6611 ahc_dump_card_state(struct ahc_softc *ahc) 6612 { 6613 struct scb *scb; 6614 struct scb_tailq *untagged_q; 6615 u_int cur_col; 6616 int paused; 6617 int target; 6618 int maxtarget; 6619 int i; 6620 uint8_t last_phase; 6621 uint8_t qinpos; 6622 uint8_t qintail; 6623 uint8_t qoutpos; 6624 uint8_t scb_index; 6625 uint8_t saved_scbptr; 6626 6627 if (ahc_is_paused(ahc)) { 6628 paused = 1; 6629 } else { 6630 paused = 0; 6631 ahc_pause(ahc); 6632 } 6633 6634 saved_scbptr = ahc_inb(ahc, SCBPTR); 6635 last_phase = ahc_inb(ahc, LASTPHASE); 6636 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6637 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6638 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6639 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6640 if (paused) 6641 printf("Card was paused\n"); 6642 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6643 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6644 ahc_inb(ahc, ARG_2)); 6645 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6646 ahc_inb(ahc, SCBPTR)); 6647 cur_col = 0; 6648 if ((ahc->features & AHC_DT) != 0) 6649 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6650 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6651 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6652 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6653 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6654 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6655 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6656 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6657 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6658 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6659 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6660 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6661 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6662 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6663 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6664 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6665 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6666 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6667 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6668 if (cur_col != 0) 6669 printf("\n"); 6670 printf("STACK:"); 6671 for (i = 0; i < STACK_SIZE; i++) 6672 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6673 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6674 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6675 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6676 /* QINFIFO */ 6677 printf("QINFIFO entries: "); 6678 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6679 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6680 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6681 } else 6682 qinpos = ahc_inb(ahc, QINPOS); 6683 qintail = ahc->qinfifonext; 6684 while (qinpos != qintail) { 6685 printf("%d ", ahc->qinfifo[qinpos]); 6686 qinpos++; 6687 } 6688 printf("\n"); 6689 6690 printf("Waiting Queue entries: "); 6691 scb_index = ahc_inb(ahc, WAITING_SCBH); 6692 i = 0; 6693 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6694 ahc_outb(ahc, SCBPTR, scb_index); 6695 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6696 scb_index = ahc_inb(ahc, SCB_NEXT); 6697 } 6698 printf("\n"); 6699 6700 printf("Disconnected Queue entries: "); 6701 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6702 i = 0; 6703 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6704 ahc_outb(ahc, SCBPTR, scb_index); 6705 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6706 scb_index = ahc_inb(ahc, SCB_NEXT); 6707 } 6708 printf("\n"); 6709 6710 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6711 printf("QOUTFIFO entries: "); 6712 qoutpos = ahc->qoutfifonext; 6713 i = 0; 6714 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6715 printf("%d ", ahc->qoutfifo[qoutpos]); 6716 qoutpos++; 6717 } 6718 printf("\n"); 6719 6720 printf("Sequencer Free SCB List: "); 6721 scb_index = ahc_inb(ahc, FREE_SCBH); 6722 i = 0; 6723 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6724 ahc_outb(ahc, SCBPTR, scb_index); 6725 printf("%d ", scb_index); 6726 scb_index = ahc_inb(ahc, SCB_NEXT); 6727 } 6728 printf("\n"); 6729 6730 printf("Sequencer SCB Info: "); 6731 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6732 ahc_outb(ahc, SCBPTR, i); 6733 /*cur_col =*/ printf("\n%3d ", i); 6734 6735 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6736 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6737 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6738 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6739 } 6740 printf("\n"); 6741 6742 printf("Pending list: "); 6743 i = 0; 6744 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6745 if (i++ > 256) 6746 break; 6747 /*cur_col =*/ printf("\n%3d ", scb->hscb->tag); 6748 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6749 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6750 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6751 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6752 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6753 printf("("); 6754 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6755 &cur_col, 60); 6756 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6757 printf(")"); 6758 } 6759 } 6760 printf("\n"); 6761 6762 printf("Kernel Free SCB list: "); 6763 i = 0; 6764 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6765 if (i++ > 256) 6766 break; 6767 printf("%d ", scb->hscb->tag); 6768 } 6769 printf("\n"); 6770 6771 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6772 for (target = 0; target <= maxtarget; target++) { 6773 untagged_q = &ahc->untagged_queues[target]; 6774 if (TAILQ_FIRST(untagged_q) == NULL) 6775 continue; 6776 printf("Untagged Q(%d): ", target); 6777 i = 0; 6778 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6779 if (i++ > 256) 6780 break; 6781 printf("%d ", scb->hscb->tag); 6782 } 6783 printf("\n"); 6784 } 6785 6786 ahc_platform_dump_card_state(ahc); 6787 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6788 ahc_outb(ahc, SCBPTR, saved_scbptr); 6789 if (paused == 0) 6790 ahc_unpause(ahc); 6791 } 6792 6793 /************************* Target Mode ****************************************/ 6794 #ifdef AHC_TARGET_MODE 6795 cam_status 6796 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6797 struct ahc_tmode_tstate **tstate, 6798 struct ahc_tmode_lstate **lstate, 6799 int notfound_failure) 6800 { 6801 6802 if ((ahc->features & AHC_TARGETMODE) == 0) 6803 return (CAM_REQ_INVALID); 6804 6805 /* 6806 * Handle the 'black hole' device that sucks up 6807 * requests to unattached luns on enabled targets. 6808 */ 6809 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6810 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6811 *tstate = NULL; 6812 *lstate = ahc->black_hole; 6813 } else { 6814 u_int max_id; 6815 6816 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6817 if (ccb->ccb_h.target_id > max_id) 6818 return (CAM_TID_INVALID); 6819 6820 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6821 return (CAM_LUN_INVALID); 6822 6823 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6824 *lstate = NULL; 6825 if (*tstate != NULL) 6826 *lstate = 6827 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6828 } 6829 6830 if (notfound_failure != 0 && *lstate == NULL) 6831 return (CAM_PATH_INVALID); 6832 6833 return (CAM_REQ_CMP); 6834 } 6835 6836 void 6837 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6838 { 6839 struct ahc_tmode_tstate *tstate; 6840 struct ahc_tmode_lstate *lstate; 6841 struct ccb_en_lun *cel; 6842 cam_status status; 6843 u_int target; 6844 u_int lun; 6845 u_int target_mask; 6846 u_int our_id; 6847 u_long s; 6848 char channel; 6849 6850 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6851 /*notfound_failure*/FALSE); 6852 6853 if (status != CAM_REQ_CMP) { 6854 ccb->ccb_h.status = status; 6855 return; 6856 } 6857 6858 if (cam_sim_bus(sim) == 0) 6859 our_id = ahc->our_id; 6860 else 6861 our_id = ahc->our_id_b; 6862 6863 if (ccb->ccb_h.target_id != our_id) { 6864 /* 6865 * our_id represents our initiator ID, or 6866 * the ID of the first target to have an 6867 * enabled lun in target mode. There are 6868 * two cases that may preclude enabling a 6869 * target id other than our_id. 6870 * 6871 * o our_id is for an active initiator role. 6872 * Since the hardware does not support 6873 * reselections to the initiator role at 6874 * anything other than our_id, and our_id 6875 * is used by the hardware to indicate the 6876 * ID to use for both select-out and 6877 * reselect-out operations, the only target 6878 * ID we can support in this mode is our_id. 6879 * 6880 * o The MULTARGID feature is not available and 6881 * a previous target mode ID has been enabled. 6882 */ 6883 if ((ahc->features & AHC_MULTIROLE) != 0) { 6884 6885 if ((ahc->features & AHC_MULTI_TID) != 0 6886 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6887 /* 6888 * Only allow additional targets if 6889 * the initiator role is disabled. 6890 * The hardware cannot handle a re-select-in 6891 * on the initiator id during a re-select-out 6892 * on a different target id. 6893 */ 6894 status = CAM_TID_INVALID; 6895 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6896 || ahc->enabled_luns > 0) { 6897 /* 6898 * Only allow our target id to change 6899 * if the initiator role is not configured 6900 * and there are no enabled luns which 6901 * are attached to the currently registered 6902 * scsi id. 6903 */ 6904 status = CAM_TID_INVALID; 6905 } 6906 } else if ((ahc->features & AHC_MULTI_TID) == 0 6907 && ahc->enabled_luns > 0) { 6908 6909 status = CAM_TID_INVALID; 6910 } 6911 } 6912 6913 if (status != CAM_REQ_CMP) { 6914 ccb->ccb_h.status = status; 6915 return; 6916 } 6917 6918 /* 6919 * We now have an id that is valid. 6920 * If we aren't in target mode, switch modes. 6921 */ 6922 if ((ahc->flags & AHC_TARGETROLE) == 0 6923 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6924 u_long s; 6925 6926 printf("Configuring Target Mode\n"); 6927 ahc_lock(ahc, &s); 6928 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6929 ccb->ccb_h.status = CAM_BUSY; 6930 ahc_unlock(ahc, &s); 6931 return; 6932 } 6933 ahc->flags |= AHC_TARGETROLE; 6934 if ((ahc->features & AHC_MULTIROLE) == 0) 6935 ahc->flags &= ~AHC_INITIATORROLE; 6936 ahc_pause(ahc); 6937 ahc_loadseq(ahc); 6938 ahc_unlock(ahc, &s); 6939 } 6940 cel = &ccb->cel; 6941 target = ccb->ccb_h.target_id; 6942 lun = ccb->ccb_h.target_lun; 6943 channel = SIM_CHANNEL(ahc, sim); 6944 target_mask = 0x01 << target; 6945 if (channel == 'B') 6946 target_mask <<= 8; 6947 6948 if (cel->enable != 0) { 6949 u_int scsiseq; 6950 6951 /* Are we already enabled?? */ 6952 if (lstate != NULL) { 6953 xpt_print_path(ccb->ccb_h.path); 6954 printf("Lun already enabled\n"); 6955 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6956 return; 6957 } 6958 6959 if (cel->grp6_len != 0 6960 || cel->grp7_len != 0) { 6961 /* 6962 * Don't (yet?) support vendor 6963 * specific commands. 6964 */ 6965 ccb->ccb_h.status = CAM_REQ_INVALID; 6966 printf("Non-zero Group Codes\n"); 6967 return; 6968 } 6969 6970 /* 6971 * Seems to be okay. 6972 * Setup our data structures. 6973 */ 6974 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6975 tstate = ahc_alloc_tstate(ahc, target, channel); 6976 if (tstate == NULL) { 6977 xpt_print_path(ccb->ccb_h.path); 6978 printf("Couldn't allocate tstate\n"); 6979 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6980 return; 6981 } 6982 } 6983 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6984 if (lstate == NULL) { 6985 xpt_print_path(ccb->ccb_h.path); 6986 printf("Couldn't allocate lstate\n"); 6987 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6988 return; 6989 } 6990 memset(lstate, 0, sizeof(*lstate)); 6991 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6992 xpt_path_path_id(ccb->ccb_h.path), 6993 xpt_path_target_id(ccb->ccb_h.path), 6994 xpt_path_lun_id(ccb->ccb_h.path)); 6995 if (status != CAM_REQ_CMP) { 6996 free(lstate, M_DEVBUF); 6997 xpt_print_path(ccb->ccb_h.path); 6998 printf("Couldn't allocate path\n"); 6999 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7000 return; 7001 } 7002 SLIST_INIT(&lstate->accept_tios); 7003 SLIST_INIT(&lstate->immed_notifies); 7004 ahc_lock(ahc, &s); 7005 ahc_pause(ahc); 7006 if (target != CAM_TARGET_WILDCARD) { 7007 tstate->enabled_luns[lun] = lstate; 7008 ahc->enabled_luns++; 7009 7010 if ((ahc->features & AHC_MULTI_TID) != 0) { 7011 u_int targid_mask; 7012 7013 targid_mask = ahc_inb(ahc, TARGID) 7014 | (ahc_inb(ahc, TARGID + 1) << 8); 7015 7016 targid_mask |= target_mask; 7017 ahc_outb(ahc, TARGID, targid_mask); 7018 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 7019 7020 ahc_update_scsiid(ahc, targid_mask); 7021 } else { 7022 u_int our_id; 7023 char channel; 7024 7025 channel = SIM_CHANNEL(ahc, sim); 7026 our_id = SIM_SCSI_ID(ahc, sim); 7027 7028 /* 7029 * This can only happen if selections 7030 * are not enabled 7031 */ 7032 if (target != our_id) { 7033 u_int sblkctl; 7034 char cur_channel; 7035 int swap; 7036 7037 sblkctl = ahc_inb(ahc, SBLKCTL); 7038 cur_channel = (sblkctl & SELBUSB) 7039 ? 'B' : 'A'; 7040 if ((ahc->features & AHC_TWIN) == 0) 7041 cur_channel = 'A'; 7042 swap = cur_channel != channel; 7043 if (channel == 'A') 7044 ahc->our_id = target; 7045 else 7046 ahc->our_id_b = target; 7047 7048 if (swap) 7049 ahc_outb(ahc, SBLKCTL, 7050 sblkctl ^ SELBUSB); 7051 7052 ahc_outb(ahc, SCSIID, target); 7053 7054 if (swap) 7055 ahc_outb(ahc, SBLKCTL, sblkctl); 7056 } 7057 } 7058 } else 7059 ahc->black_hole = lstate; 7060 /* Allow select-in operations */ 7061 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7062 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7063 scsiseq |= ENSELI; 7064 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7065 scsiseq = ahc_inb(ahc, SCSISEQ); 7066 scsiseq |= ENSELI; 7067 ahc_outb(ahc, SCSISEQ, scsiseq); 7068 } 7069 ahc_unpause(ahc); 7070 ahc_unlock(ahc, &s); 7071 ccb->ccb_h.status = CAM_REQ_CMP; 7072 xpt_print_path(ccb->ccb_h.path); 7073 printf("Lun now enabled for target mode\n"); 7074 } else { 7075 struct scb *scb; 7076 int i, empty; 7077 7078 if (lstate == NULL) { 7079 ccb->ccb_h.status = CAM_LUN_INVALID; 7080 return; 7081 } 7082 7083 ahc_lock(ahc, &s); 7084 7085 ccb->ccb_h.status = CAM_REQ_CMP; 7086 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7087 struct ccb_hdr *ccbh; 7088 7089 ccbh = &scb->io_ctx->ccb_h; 7090 if (ccbh->func_code == XPT_CONT_TARGET_IO 7091 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7092 printf("CTIO pending\n"); 7093 ccb->ccb_h.status = CAM_REQ_INVALID; 7094 ahc_unlock(ahc, &s); 7095 return; 7096 } 7097 } 7098 7099 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7100 printf("ATIOs pending\n"); 7101 ccb->ccb_h.status = CAM_REQ_INVALID; 7102 } 7103 7104 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7105 printf("INOTs pending\n"); 7106 ccb->ccb_h.status = CAM_REQ_INVALID; 7107 } 7108 7109 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7110 ahc_unlock(ahc, &s); 7111 return; 7112 } 7113 7114 xpt_print_path(ccb->ccb_h.path); 7115 printf("Target mode disabled\n"); 7116 xpt_free_path(lstate->path); 7117 free(lstate, M_DEVBUF); 7118 7119 ahc_pause(ahc); 7120 /* Can we clean up the target too? */ 7121 if (target != CAM_TARGET_WILDCARD) { 7122 tstate->enabled_luns[lun] = NULL; 7123 ahc->enabled_luns--; 7124 for (empty = 1, i = 0; i < 8; i++) 7125 if (tstate->enabled_luns[i] != NULL) { 7126 empty = 0; 7127 break; 7128 } 7129 7130 if (empty) { 7131 ahc_free_tstate(ahc, target, channel, 7132 /*force*/FALSE); 7133 if (ahc->features & AHC_MULTI_TID) { 7134 u_int targid_mask; 7135 7136 targid_mask = ahc_inb(ahc, TARGID) 7137 | (ahc_inb(ahc, TARGID + 1) 7138 << 8); 7139 7140 targid_mask &= ~target_mask; 7141 ahc_outb(ahc, TARGID, targid_mask); 7142 ahc_outb(ahc, TARGID+1, 7143 (targid_mask >> 8)); 7144 ahc_update_scsiid(ahc, targid_mask); 7145 } 7146 } 7147 } else { 7148 7149 ahc->black_hole = NULL; 7150 7151 /* 7152 * We can't allow selections without 7153 * our black hole device. 7154 */ 7155 empty = TRUE; 7156 } 7157 if (ahc->enabled_luns == 0) { 7158 /* Disallow select-in */ 7159 u_int scsiseq; 7160 7161 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7162 scsiseq &= ~ENSELI; 7163 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7164 scsiseq = ahc_inb(ahc, SCSISEQ); 7165 scsiseq &= ~ENSELI; 7166 ahc_outb(ahc, SCSISEQ, scsiseq); 7167 7168 if ((ahc->features & AHC_MULTIROLE) == 0) { 7169 printf("Configuring Initiator Mode\n"); 7170 ahc->flags &= ~AHC_TARGETROLE; 7171 ahc->flags |= AHC_INITIATORROLE; 7172 ahc_pause(ahc); 7173 ahc_loadseq(ahc); 7174 } 7175 } 7176 ahc_unpause(ahc); 7177 ahc_unlock(ahc, &s); 7178 } 7179 } 7180 7181 static void 7182 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7183 { 7184 u_int scsiid_mask; 7185 u_int scsiid; 7186 7187 if ((ahc->features & AHC_MULTI_TID) == 0) 7188 panic("ahc_update_scsiid called on non-multitid unit\n"); 7189 7190 /* 7191 * Since we will rely on the TARGID mask 7192 * for selection enables, ensure that OID 7193 * in SCSIID is not set to some other ID 7194 * that we don't want to allow selections on. 7195 */ 7196 if ((ahc->features & AHC_ULTRA2) != 0) 7197 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7198 else 7199 scsiid = ahc_inb(ahc, SCSIID); 7200 scsiid_mask = 0x1 << (scsiid & OID); 7201 if ((targid_mask & scsiid_mask) == 0) { 7202 u_int our_id; 7203 7204 /* ffs counts from 1 */ 7205 our_id = ffs(targid_mask); 7206 if (our_id == 0) 7207 our_id = ahc->our_id; 7208 else 7209 our_id--; 7210 scsiid &= TID; 7211 scsiid |= our_id; 7212 } 7213 if ((ahc->features & AHC_ULTRA2) != 0) 7214 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7215 else 7216 ahc_outb(ahc, SCSIID, scsiid); 7217 } 7218 7219 void 7220 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7221 { 7222 struct target_cmd *cmd; 7223 7224 /* 7225 * If the card supports auto-access pause, 7226 * we can access the card directly regardless 7227 * of whether it is paused or not. 7228 */ 7229 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7230 paused = TRUE; 7231 7232 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7233 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7234 7235 /* 7236 * Only advance through the queue if we 7237 * have the resources to process the command. 7238 */ 7239 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7240 break; 7241 7242 cmd->cmd_valid = 0; 7243 ahc_dmamap_sync(ahc, ahc->parent_dmat/*shared_data_dmat*/, 7244 ahc->shared_data_dmamap, 7245 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7246 sizeof(struct target_cmd), 7247 BUS_DMASYNC_PREREAD); 7248 ahc->tqinfifonext++; 7249 7250 /* 7251 * Lazily update our position in the target mode incoming 7252 * command queue as seen by the sequencer. 7253 */ 7254 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7255 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7256 u_int hs_mailbox; 7257 7258 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7259 hs_mailbox &= ~HOST_TQINPOS; 7260 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7261 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7262 } else { 7263 if (!paused) 7264 ahc_pause(ahc); 7265 ahc_outb(ahc, KERNEL_TQINPOS, 7266 ahc->tqinfifonext & HOST_TQINPOS); 7267 if (!paused) 7268 ahc_unpause(ahc); 7269 } 7270 } 7271 } 7272 } 7273 7274 static int 7275 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7276 { 7277 struct ahc_tmode_tstate *tstate; 7278 struct ahc_tmode_lstate *lstate; 7279 struct ccb_accept_tio *atio; 7280 uint8_t *byte; 7281 int initiator; 7282 int target; 7283 int lun; 7284 7285 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7286 target = SCSIID_OUR_ID(cmd->scsiid); 7287 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7288 7289 byte = cmd->bytes; 7290 tstate = ahc->enabled_targets[target]; 7291 lstate = NULL; 7292 if (tstate != NULL) 7293 lstate = tstate->enabled_luns[lun]; 7294 7295 /* 7296 * Commands for disabled luns go to the black hole driver. 7297 */ 7298 if (lstate == NULL) 7299 lstate = ahc->black_hole; 7300 7301 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7302 if (atio == NULL) { 7303 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7304 /* 7305 * Wait for more ATIOs from the peripheral driver for this lun. 7306 */ 7307 if (bootverbose) 7308 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7309 return (1); 7310 } else 7311 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7312 #if 0 7313 printf("Incoming command from %d for %d:%d%s\n", 7314 initiator, target, lun, 7315 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7316 #endif 7317 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7318 7319 if (lstate == ahc->black_hole) { 7320 /* Fill in the wildcards */ 7321 atio->ccb_h.target_id = target; 7322 atio->ccb_h.target_lun = lun; 7323 } 7324 7325 /* 7326 * Package it up and send it off to 7327 * whomever has this lun enabled. 7328 */ 7329 atio->sense_len = 0; 7330 atio->init_id = initiator; 7331 if (byte[0] != 0xFF) { 7332 /* Tag was included */ 7333 atio->tag_action = *byte++; 7334 atio->tag_id = *byte++; 7335 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7336 } else { 7337 atio->ccb_h.flags = 0; 7338 } 7339 byte++; 7340 7341 /* Okay. Now determine the cdb size based on the command code */ 7342 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7343 case 0: 7344 atio->cdb_len = 6; 7345 break; 7346 case 1: 7347 case 2: 7348 atio->cdb_len = 10; 7349 break; 7350 case 4: 7351 atio->cdb_len = 16; 7352 break; 7353 case 5: 7354 atio->cdb_len = 12; 7355 break; 7356 case 3: 7357 default: 7358 /* Only copy the opcode. */ 7359 atio->cdb_len = 1; 7360 printf("Reserved or VU command code type encountered\n"); 7361 break; 7362 } 7363 7364 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7365 7366 atio->ccb_h.status |= CAM_CDB_RECVD; 7367 7368 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7369 /* 7370 * We weren't allowed to disconnect. 7371 * We're hanging on the bus until a 7372 * continue target I/O comes in response 7373 * to this accept tio. 7374 */ 7375 #if 0 7376 printf("Received Immediate Command %d:%d:%d - %p\n", 7377 initiator, target, lun, ahc->pending_device); 7378 #endif 7379 ahc->pending_device = lstate; 7380 ahc_freeze_ccb((union ccb *)atio); 7381 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7382 } 7383 xpt_done((union ccb*)atio); 7384 return (0); 7385 } 7386 #endif 7387 7388 static int 7389 ahc_createdmamem(bus_dma_tag_t tag, int size, int flags, bus_dmamap_t *mapp, 7390 void **vaddr, bus_addr_t *baddr, bus_dma_segment_t *seg, int *nseg, 7391 const char *myname, const char *what) 7392 { 7393 int error, level = 0; 7394 7395 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 7396 seg, 1, nseg, BUS_DMA_WAITOK)) != 0) { 7397 printf("%s: failed to allocate DMA mem for %s, error = %d\n", 7398 myname, what, error); 7399 goto out; 7400 } 7401 level++; 7402 7403 if ((error = bus_dmamem_map(tag, seg, *nseg, size, vaddr, 7404 BUS_DMA_WAITOK|BUS_DMA_COHERENT)) != 0) { 7405 printf("%s: failed to map DMA mem for %s, error = %d\n", 7406 myname, what, error); 7407 goto out; 7408 } 7409 level++; 7410 7411 if ((error = bus_dmamap_create(tag, size, 1, size, 0, 7412 BUS_DMA_WAITOK | flags, mapp)) != 0) { 7413 printf("%s: failed to create DMA map for %s, error = %d\n", 7414 myname, what, error); 7415 goto out; 7416 } 7417 level++; 7418 7419 7420 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 7421 BUS_DMA_WAITOK)) != 0) { 7422 printf("%s: failed to load DMA map for %s, error = %d\n", 7423 myname, what, error); 7424 goto out; 7425 } 7426 7427 *baddr = (*mapp)->dm_segs[0].ds_addr; 7428 7429 return 0; 7430 out: 7431 printf("ahc_createdmamem error (%d)\n", level); 7432 switch (level) { 7433 case 3: 7434 bus_dmamap_destroy(tag, *mapp); 7435 /* FALLTHROUGH */ 7436 case 2: 7437 bus_dmamem_unmap(tag, *vaddr, size); 7438 /* FALLTHROUGH */ 7439 case 1: 7440 bus_dmamem_free(tag, seg, *nseg); 7441 break; 7442 default: 7443 break; 7444 } 7445 7446 return -1; 7447 } 7448 7449 static void 7450 ahc_freedmamem(bus_dma_tag_t tag, int size, bus_dmamap_t map, void *vaddr, 7451 bus_dma_segment_t *seg, int nseg) 7452 { 7453 7454 bus_dmamap_unload(tag, map); 7455 bus_dmamap_destroy(tag, map); 7456 bus_dmamem_unmap(tag, vaddr, size); 7457 bus_dmamem_free(tag, seg, nseg); 7458 } 7459