1 /* $NetBSD: aic7xxx.c,v 1.114 2005/05/30 04:43:46 christos Exp $ */ 2 3 /* 4 * Core routines and tables shareable across OS platforms. 5 * 6 * Copyright (c) 1994-2002 Justin T. Gibbs. 7 * Copyright (c) 2000-2002 Adaptec Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 3. Neither the names of the above-listed copyright holders nor the names 22 * of any contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * Alternatively, this software may be distributed under the terms of the 26 * GNU General Public License ("GPL") version 2 as published by the Free 27 * Software Foundation. 28 * 29 * NO WARRANTY 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGES. 41 * 42 * $Id: aic7xxx.c,v 1.114 2005/05/30 04:43:46 christos Exp $ 43 * 44 * //depot/aic7xxx/aic7xxx/aic7xxx.c#112 $ 45 * 46 * $FreeBSD: /repoman/r/ncvs/src/sys/dev/aic7xxx/aic7xxx.c,v 1.88 2003/01/20 20:44:55 gibbs Exp $ 47 */ 48 /* 49 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: aic7xxx.c,v 1.114 2005/05/30 04:43:46 christos Exp $"); 54 55 #include <dev/ic/aic7xxx_osm.h> 56 #include <dev/ic/aic7xxx_inline.h> 57 #include <dev/ic/aic7xxx_cam.h> 58 59 /****************************** Softc Data ************************************/ 60 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 61 62 /***************************** Lookup Tables **********************************/ 63 const char *ahc_chip_names[] = 64 { 65 "NONE", 66 "aic7770", 67 "aic7850", 68 "aic7855", 69 "aic7859", 70 "aic7860", 71 "aic7870", 72 "aic7880", 73 "aic7895", 74 "aic7895C", 75 "aic7890/91", 76 "aic7896/97", 77 "aic7892", 78 "aic7899" 79 }; 80 81 /* 82 * Hardware error codes. 83 */ 84 struct ahc_hard_error_entry { 85 uint8_t errno; 86 const char *errmesg; 87 }; 88 89 static struct ahc_hard_error_entry ahc_hard_errors[] = { 90 { ILLHADDR, "Illegal Host Access" }, 91 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 92 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 93 { SQPARERR, "Sequencer Parity Error" }, 94 { DPARERR, "Data-path Parity Error" }, 95 { MPARERR, "Scratch or SCB Memory Parity Error" }, 96 { PCIERRSTAT, "PCI Error detected" }, 97 { CIOPARERR, "CIOBUS Parity Error" }, 98 }; 99 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 100 101 static struct ahc_phase_table_entry ahc_phase_table[] = 102 { 103 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 104 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 105 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 106 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 107 { P_COMMAND, MSG_NOOP, "in Command phase" }, 108 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 109 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 110 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 111 { P_BUSFREE, MSG_NOOP, "while idle" }, 112 { 0, MSG_NOOP, "in unknown phase" } 113 }; 114 115 /* 116 * In most cases we only wish to itterate over real phases, so 117 * exclude the last element from the count. 118 */ 119 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 120 121 /* 122 * Valid SCSIRATE values. (p. 3-17) 123 * Provides a mapping of transfer periods in ns to the proper value to 124 * stick in the scsixfer reg. 125 */ 126 static struct ahc_syncrate ahc_syncrates[] = 127 { 128 /* ultra2 fast/ultra period rate */ 129 { 0x42, 0x000, 9, "80.0" }, 130 { 0x03, 0x000, 10, "40.0" }, 131 { 0x04, 0x000, 11, "33.0" }, 132 { 0x05, 0x100, 12, "20.0" }, 133 { 0x06, 0x110, 15, "16.0" }, 134 { 0x07, 0x120, 18, "13.4" }, 135 { 0x08, 0x000, 25, "10.0" }, 136 { 0x19, 0x010, 31, "8.0" }, 137 { 0x1a, 0x020, 37, "6.67" }, 138 { 0x1b, 0x030, 43, "5.7" }, 139 { 0x1c, 0x040, 50, "5.0" }, 140 { 0x00, 0x050, 56, "4.4" }, 141 { 0x00, 0x060, 62, "4.0" }, 142 { 0x00, 0x070, 68, "3.6" }, 143 { 0x00, 0x000, 0, NULL } 144 }; 145 146 /* Our Sequencer Program */ 147 #include <dev/microcode/aic7xxx/aic7xxx_seq.h> 148 149 /**************************** Function Declarations ***************************/ 150 static void ahc_force_renegotiation(struct ahc_softc *ahc); 151 static struct ahc_tmode_tstate* 152 ahc_alloc_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel); 154 #ifdef AHC_TARGET_MODE 155 static void ahc_free_tstate(struct ahc_softc *ahc, 156 u_int scsi_id, char channel, int force); 157 #endif 158 static struct ahc_syncrate* 159 ahc_devlimited_syncrate(struct ahc_softc *ahc, 160 struct ahc_initiator_tinfo *, 161 u_int *period, 162 u_int *ppr_options, 163 role_t role); 164 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 165 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 166 struct ahc_devinfo *devinfo); 167 static void ahc_scb_devinfo(struct ahc_softc *ahc, 168 struct ahc_devinfo *devinfo, 169 struct scb *scb); 170 static void ahc_assert_atn(struct ahc_softc *ahc); 171 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo, 173 struct scb *scb); 174 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo); 176 static void ahc_construct_sdtr(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 u_int period, u_int offset); 179 static void ahc_construct_wdtr(struct ahc_softc *ahc, 180 struct ahc_devinfo *devinfo, 181 u_int bus_width); 182 static void ahc_construct_ppr(struct ahc_softc *ahc, 183 struct ahc_devinfo *devinfo, 184 u_int period, u_int offset, 185 u_int bus_width, u_int ppr_options); 186 static void ahc_clear_msg_state(struct ahc_softc *ahc); 187 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 188 static void ahc_handle_message_phase(struct ahc_softc *ahc); 189 typedef enum { 190 AHCMSG_1B, 191 AHCMSG_2B, 192 AHCMSG_EXT 193 } ahc_msgtype; 194 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 195 u_int msgval, int full); 196 static int ahc_parse_msg(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo); 200 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 201 struct ahc_devinfo *devinfo); 202 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 203 static void ahc_handle_devreset(struct ahc_softc *ahc, 204 struct ahc_devinfo *devinfo, 205 cam_status status, 206 const char *message, 207 int verbose_level); 208 #if AHC_TARGET_MODE 209 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 210 struct ahc_devinfo *devinfo, 211 struct scb *scb); 212 #endif 213 214 //static bus_dmamap_callback_t ahc_dmamap_cb; 215 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 216 static int ahc_init_scbdata(struct ahc_softc *ahc); 217 static void ahc_fini_scbdata(struct ahc_softc *ahc); 218 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 219 struct scb *prev_scb, 220 struct scb *scb); 221 static int ahc_qinfifo_count(struct ahc_softc *ahc); 222 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 223 u_int prev, u_int scbptr); 224 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 225 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 226 u_int scbpos, u_int prev); 227 static void ahc_reset_current_bus(struct ahc_softc *ahc); 228 #ifdef AHC_DUMP_SEQ 229 static void ahc_dumpseq(struct ahc_softc *ahc); 230 #endif 231 static void ahc_loadseq(struct ahc_softc *ahc); 232 static int ahc_check_patch(struct ahc_softc *ahc, 233 struct patch **start_patch, 234 u_int start_instr, u_int *skip_addr); 235 static void ahc_download_instr(struct ahc_softc *ahc, 236 u_int instrptr, uint8_t *dconsts); 237 #ifdef AHC_TARGET_MODE 238 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 239 struct ahc_tmode_lstate *lstate, 240 u_int initiator_id, 241 u_int event_type, 242 u_int event_arg); 243 static void ahc_update_scsiid(struct ahc_softc *ahc, 244 u_int targid_mask); 245 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 246 struct target_cmd *cmd); 247 #endif 248 249 /************************** Added for porting to NetBSD ***********************/ 250 static int ahc_createdmamem(bus_dma_tag_t tag, 251 int size, 252 int flags, 253 bus_dmamap_t *mapp, 254 caddr_t *vaddr, 255 bus_addr_t *baddr, 256 bus_dma_segment_t *seg, 257 int *nseg, 258 const char *myname, const char *what); 259 static void ahc_freedmamem(bus_dma_tag_t tag, 260 int size, 261 bus_dmamap_t map, 262 caddr_t vaddr, 263 bus_dma_segment_t *seg, 264 int nseg); 265 266 /************************* Sequencer Execution Control ************************/ 267 /* 268 * Restart the sequencer program from address zero 269 */ 270 void 271 ahc_restart(struct ahc_softc *ahc) 272 { 273 274 ahc_pause(ahc); 275 276 /* No more pending messages. */ 277 ahc_clear_msg_state(ahc); 278 279 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 280 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 281 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 282 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 283 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 284 ahc_outb(ahc, SAVED_LUN, 0xFF); 285 286 /* 287 * Ensure that the sequencer's idea of TQINPOS 288 * matches our own. The sequencer increments TQINPOS 289 * only after it sees a DMA complete and a reset could 290 * occur before the increment leaving the kernel to believe 291 * the command arrived but the sequencer to not. 292 */ 293 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 294 295 /* Always allow reselection */ 296 ahc_outb(ahc, SCSISEQ, 297 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 298 if ((ahc->features & AHC_CMD_CHAN) != 0) { 299 /* Ensure that no DMA operations are in progress */ 300 ahc_outb(ahc, CCSCBCNT, 0); 301 ahc_outb(ahc, CCSGCTL, 0); 302 ahc_outb(ahc, CCSCBCTL, 0); 303 } 304 /* 305 * If we were in the process of DMA'ing SCB data into 306 * an SCB, replace that SCB on the free list. This prevents 307 * an SCB leak. 308 */ 309 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 310 ahc_add_curscb_to_free_list(ahc); 311 ahc_outb(ahc, SEQ_FLAGS2, 312 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 313 } 314 ahc_outb(ahc, MWI_RESIDUAL, 0); 315 ahc_outb(ahc, SEQCTL, FASTMODE); 316 ahc_outb(ahc, SEQADDR0, 0); 317 ahc_outb(ahc, SEQADDR1, 0); 318 ahc_unpause(ahc); 319 } 320 321 /************************* Input/Output Queues ********************************/ 322 void 323 ahc_run_qoutfifo(struct ahc_softc *ahc) 324 { 325 struct scb *scb; 326 u_int scb_index; 327 328 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 329 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 330 331 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 332 if ((ahc->qoutfifonext & 0x03) == 0x03) { 333 u_int modnext; 334 335 /* 336 * Clear 32bits of QOUTFIFO at a time 337 * so that we don't clobber an incoming 338 * byte DMA to the array on architectures 339 * that only support 32bit load and store 340 * operations. 341 */ 342 modnext = ahc->qoutfifonext & ~0x3; 343 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 344 ahc_dmamap_sync(ahc, ahc->parent_dmat /*shared_data_dmat*/, 345 ahc->shared_data_dmamap, 346 /*offset*/modnext, /*len*/4, 347 BUS_DMASYNC_PREREAD); 348 } 349 ahc->qoutfifonext++; 350 351 scb = ahc_lookup_scb(ahc, scb_index); 352 if (scb == NULL) { 353 printf("%s: WARNING no command for scb %d " 354 "(cmdcmplt)\nQOUTPOS = %d\n", 355 ahc_name(ahc), scb_index, 356 (ahc->qoutfifonext - 1) & 0xFF); 357 continue; 358 } 359 360 /* 361 * Save off the residual 362 * if there is one. 363 */ 364 ahc_update_residual(ahc, scb); 365 ahc_done(ahc, scb); 366 } 367 } 368 369 void 370 ahc_run_untagged_queues(struct ahc_softc *ahc) 371 { 372 int i; 373 374 for (i = 0; i < 16; i++) 375 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 376 } 377 378 void 379 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 380 { 381 struct scb *scb; 382 383 if (ahc->untagged_queue_lock != 0) 384 return; 385 386 if ((scb = TAILQ_FIRST(queue)) != NULL 387 && (scb->flags & SCB_ACTIVE) == 0) { 388 scb->flags |= SCB_ACTIVE; 389 ahc_queue_scb(ahc, scb); 390 } 391 } 392 393 /************************* Interrupt Handling *********************************/ 394 void 395 ahc_handle_brkadrint(struct ahc_softc *ahc) 396 { 397 /* 398 * We upset the sequencer :-( 399 * Lookup the error message 400 */ 401 int i; 402 int error; 403 404 error = ahc_inb(ahc, ERROR); 405 for (i = 0; error != 1 && i < num_errors; i++) 406 error >>= 1; 407 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 408 ahc_name(ahc), ahc_hard_errors[i].errmesg, 409 ahc_inb(ahc, SEQADDR0) | 410 (ahc_inb(ahc, SEQADDR1) << 8)); 411 412 ahc_dump_card_state(ahc); 413 414 /* Tell everyone that this HBA is no longer available */ 415 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 416 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 417 CAM_NO_HBA); 418 419 /* Disable all interrupt sources by resetting the controller */ 420 ahc_shutdown(ahc); 421 } 422 423 void 424 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 425 { 426 struct scb *scb; 427 struct ahc_devinfo devinfo; 428 429 ahc_fetch_devinfo(ahc, &devinfo); 430 431 /* 432 * Clear the upper byte that holds SEQINT status 433 * codes and clear the SEQINT bit. We will unpause 434 * the sequencer, if appropriate, after servicing 435 * the request. 436 */ 437 ahc_outb(ahc, CLRINT, CLRSEQINT); 438 switch (intstat & SEQINT_MASK) { 439 case BAD_STATUS: 440 { 441 u_int scb_index; 442 struct hardware_scb *hscb; 443 444 /* 445 * Set the default return value to 0 (don't 446 * send sense). The sense code will change 447 * this if needed. 448 */ 449 ahc_outb(ahc, RETURN_1, 0); 450 451 /* 452 * The sequencer will notify us when a command 453 * has an error that would be of interest to 454 * the kernel. This allows us to leave the sequencer 455 * running in the common case of command completes 456 * without error. The sequencer will already have 457 * DMA'd the SCB back up to us, so we can reference 458 * the in kernel copy directly. 459 */ 460 scb_index = ahc_inb(ahc, SCB_TAG); 461 scb = ahc_lookup_scb(ahc, scb_index); 462 if (scb == NULL) { 463 ahc_print_devinfo(ahc, &devinfo); 464 printf("ahc_intr - referenced scb " 465 "not valid during seqint 0x%x scb(%d)\n", 466 intstat, scb_index); 467 ahc_dump_card_state(ahc); 468 panic("for safety"); 469 goto unpause; 470 } 471 472 hscb = scb->hscb; 473 474 /* Don't want to clobber the original sense code */ 475 if ((scb->flags & SCB_SENSE) != 0) { 476 /* 477 * Clear the SCB_SENSE Flag and have 478 * the sequencer do a normal command 479 * complete. 480 */ 481 scb->flags &= ~SCB_SENSE; 482 break; 483 } 484 /* Freeze the queue until the client sees the error. */ 485 ahc_freeze_devq(ahc, scb); 486 ahc_freeze_scb(scb); 487 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 488 switch (hscb->shared_data.status.scsi_status) { 489 case SCSI_STATUS_OK: 490 printf("%s: Interrupted for status of 0 (?)\n", 491 ahc_name(ahc)); 492 break; 493 case SCSI_STATUS_CMD_TERMINATED: 494 case SCSI_STATUS_CHECK_COND: 495 { 496 struct ahc_dma_seg *sg; 497 struct scsi_request_sense *sc; 498 struct ahc_initiator_tinfo *targ_info; 499 struct ahc_tmode_tstate *tstate; 500 struct ahc_transinfo *tinfo; 501 #ifdef AHC_DEBUG 502 if (ahc_debug & AHC_SHOW_SENSE) { 503 ahc_print_path(ahc, scb); 504 printf("SCB %d: requests Check Status\n", 505 scb->hscb->tag); 506 } 507 #endif 508 509 if (ahc_perform_autosense(scb) == 0) 510 break; 511 512 targ_info = ahc_fetch_transinfo(ahc, 513 devinfo.channel, 514 devinfo.our_scsiid, 515 devinfo.target, 516 &tstate); 517 tinfo = &targ_info->curr; 518 sg = scb->sg_list; 519 sc = (struct scsi_request_sense *)(&hscb->shared_data.cdb); 520 /* 521 * Save off the residual if there is one. 522 */ 523 ahc_update_residual(ahc, scb); 524 #ifdef AHC_DEBUG 525 if (ahc_debug & AHC_SHOW_SENSE) { 526 ahc_print_path(ahc, scb); 527 printf("Sending Sense\n"); 528 } 529 #endif 530 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 531 sg->len = ahc_get_sense_bufsize(ahc, scb); 532 sg->len |= AHC_DMA_LAST_SEG; 533 534 /* Fixup byte order */ 535 sg->addr = ahc_htole32(sg->addr); 536 sg->len = ahc_htole32(sg->len); 537 538 memset(sc, 0, sizeof(*sc)); 539 sc->opcode = SCSI_REQUEST_SENSE; 540 if (tinfo->protocol_version <= SCSI_REV_2 541 && SCB_GET_LUN(scb) < 8) 542 sc->byte2 = SCB_GET_LUN(scb) << 5; 543 sc->length = sg->len; 544 545 /* 546 * We can't allow the target to disconnect. 547 * This will be an untagged transaction and 548 * having the target disconnect will make this 549 * transaction indistinguishable from outstanding 550 * tagged transactions. 551 */ 552 hscb->control = 0; 553 554 /* 555 * This request sense could be because the 556 * the device lost power or in some other 557 * way has lost our transfer negotiations. 558 * Renegotiate if appropriate. Unit attention 559 * errors will be reported before any data 560 * phases occur. 561 */ 562 if (ahc_get_residual(scb) 563 == ahc_get_transfer_length(scb)) { 564 ahc_update_neg_request(ahc, &devinfo, 565 tstate, targ_info, 566 AHC_NEG_IF_NON_ASYNC); 567 } 568 if (tstate->auto_negotiate & devinfo.target_mask) { 569 hscb->control |= MK_MESSAGE; 570 scb->flags &= ~SCB_NEGOTIATE; 571 scb->flags |= SCB_AUTO_NEGOTIATE; 572 } 573 hscb->cdb_len = sizeof(*sc); 574 hscb->dataptr = sg->addr; 575 hscb->datacnt = sg->len; 576 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 577 hscb->sgptr = ahc_htole32(hscb->sgptr); 578 scb->sg_count = 1; 579 scb->flags |= SCB_SENSE; 580 ahc_qinfifo_requeue_tail(ahc, scb); 581 ahc_outb(ahc, RETURN_1, SEND_SENSE); 582 /* 583 * Ensure we have enough time to actually 584 * retrieve the sense. 585 */ 586 ahc_scb_timer_reset(scb, 5 * 1000000); 587 break; 588 } 589 default: 590 break; 591 } 592 break; 593 } 594 case NO_MATCH: 595 { 596 /* Ensure we don't leave the selection hardware on */ 597 ahc_outb(ahc, SCSISEQ, 598 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 599 600 printf("%s:%c:%d: no active SCB for reconnecting " 601 "target - issuing BUS DEVICE RESET\n", 602 ahc_name(ahc), devinfo.channel, devinfo.target); 603 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 604 "ARG_1 == 0x%x ACCUM = 0x%x\n", 605 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 606 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 607 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 608 "SINDEX == 0x%x\n", 609 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 610 ahc_index_busy_tcl(ahc, 611 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 612 ahc_inb(ahc, SAVED_LUN))), 613 ahc_inb(ahc, SINDEX)); 614 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 615 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 616 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 617 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 618 ahc_inb(ahc, SCB_CONTROL)); 619 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 620 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 621 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 622 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 623 ahc_dump_card_state(ahc); 624 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 625 ahc->msgout_len = 1; 626 ahc->msgout_index = 0; 627 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 628 ahc_outb(ahc, MSG_OUT, HOST_MSG); 629 ahc_assert_atn(ahc); 630 break; 631 } 632 case SEND_REJECT: 633 { 634 u_int rejbyte = ahc_inb(ahc, ACCUM); 635 printf("%s:%c:%d: Warning - unknown message received from " 636 "target (0x%x). Rejecting\n", 637 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 638 break; 639 } 640 case PROTO_VIOLATION: 641 { 642 ahc_handle_proto_violation(ahc); 643 break; 644 } 645 case IGN_WIDE_RES: 646 ahc_handle_ign_wide_residue(ahc, &devinfo); 647 break; 648 case PDATA_REINIT: 649 ahc_reinitialize_dataptrs(ahc); 650 break; 651 case BAD_PHASE: 652 { 653 u_int lastphase; 654 655 lastphase = ahc_inb(ahc, LASTPHASE); 656 printf("%s:%c:%d: unknown scsi bus phase %x, " 657 "lastphase = 0x%x. Attempting to continue\n", 658 ahc_name(ahc), devinfo.channel, devinfo.target, 659 lastphase, ahc_inb(ahc, SCSISIGI)); 660 break; 661 } 662 case MISSED_BUSFREE: 663 { 664 u_int lastphase; 665 666 lastphase = ahc_inb(ahc, LASTPHASE); 667 printf("%s:%c:%d: Missed busfree. " 668 "Lastphase = 0x%x, Curphase = 0x%x\n", 669 ahc_name(ahc), devinfo.channel, devinfo.target, 670 lastphase, ahc_inb(ahc, SCSISIGI)); 671 ahc_restart(ahc); 672 return; 673 } 674 case HOST_MSG_LOOP: 675 { 676 /* 677 * The sequencer has encountered a message phase 678 * that requires host assistance for completion. 679 * While handling the message phase(s), we will be 680 * notified by the sequencer after each byte is 681 * transferred so we can track bus phase changes. 682 * 683 * If this is the first time we've seen a HOST_MSG_LOOP 684 * interrupt, initialize the state of the host message 685 * loop. 686 */ 687 if (ahc->msg_type == MSG_TYPE_NONE) { 688 struct scb *scb1; 689 u_int scb_index; 690 u_int bus_phase; 691 692 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 693 if (bus_phase != P_MESGIN 694 && bus_phase != P_MESGOUT) { 695 printf("ahc_intr: HOST_MSG_LOOP bad " 696 "phase 0x%x\n", 697 bus_phase); 698 /* 699 * Probably transitioned to bus free before 700 * we got here. Just punt the message. 701 */ 702 ahc_clear_intstat(ahc); 703 ahc_restart(ahc); 704 return; 705 } 706 707 scb_index = ahc_inb(ahc, SCB_TAG); 708 scb1 = ahc_lookup_scb(ahc, scb_index); 709 if (devinfo.role == ROLE_INITIATOR) { 710 if (scb1 == NULL) 711 panic("HOST_MSG_LOOP with " 712 "invalid SCB %x\n", scb_index); 713 714 if (bus_phase == P_MESGOUT) 715 ahc_setup_initiator_msgout(ahc, 716 &devinfo, 717 scb1); 718 else { 719 ahc->msg_type = 720 MSG_TYPE_INITIATOR_MSGIN; 721 ahc->msgin_index = 0; 722 } 723 } 724 #if AHC_TARGET_MODE 725 else { 726 if (bus_phase == P_MESGOUT) { 727 ahc->msg_type = 728 MSG_TYPE_TARGET_MSGOUT; 729 ahc->msgin_index = 0; 730 } 731 else 732 ahc_setup_target_msgin(ahc, 733 &devinfo, 734 scb1); 735 } 736 #endif 737 } 738 739 ahc_handle_message_phase(ahc); 740 break; 741 } 742 case PERR_DETECTED: 743 { 744 /* 745 * If we've cleared the parity error interrupt 746 * but the sequencer still believes that SCSIPERR 747 * is true, it must be that the parity error is 748 * for the currently presented byte on the bus, 749 * and we are not in a phase (data-in) where we will 750 * eventually ack this byte. Ack the byte and 751 * throw it away in the hope that the target will 752 * take us to message out to deliver the appropriate 753 * error message. 754 */ 755 if ((intstat & SCSIINT) == 0 756 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 757 758 if ((ahc->features & AHC_DT) == 0) { 759 u_int curphase; 760 761 /* 762 * The hardware will only let you ack bytes 763 * if the expected phase in SCSISIGO matches 764 * the current phase. Make sure this is 765 * currently the case. 766 */ 767 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 768 ahc_outb(ahc, LASTPHASE, curphase); 769 ahc_outb(ahc, SCSISIGO, curphase); 770 } 771 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 772 int wait; 773 774 /* 775 * In a data phase. Faster to bitbucket 776 * the data than to individually ack each 777 * byte. This is also the only strategy 778 * that will work with AUTOACK enabled. 779 */ 780 ahc_outb(ahc, SXFRCTL1, 781 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 782 wait = 5000; 783 while (--wait != 0) { 784 if ((ahc_inb(ahc, SCSISIGI) 785 & (CDI|MSGI)) != 0) 786 break; 787 ahc_delay(100); 788 } 789 ahc_outb(ahc, SXFRCTL1, 790 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 791 if (wait == 0) { 792 struct scb *scb1; 793 u_int scb_index; 794 795 ahc_print_devinfo(ahc, &devinfo); 796 printf("Unable to clear parity error. " 797 "Resetting bus.\n"); 798 scb_index = ahc_inb(ahc, SCB_TAG); 799 scb1 = ahc_lookup_scb(ahc, scb_index); 800 if (scb1 != NULL) 801 ahc_set_transaction_status(scb1, 802 CAM_UNCOR_PARITY); 803 ahc_reset_channel(ahc, devinfo.channel, 804 /*init reset*/TRUE); 805 } 806 } else { 807 ahc_inb(ahc, SCSIDATL); 808 } 809 } 810 break; 811 } 812 case DATA_OVERRUN: 813 { 814 /* 815 * When the sequencer detects an overrun, it 816 * places the controller in "BITBUCKET" mode 817 * and allows the target to complete its transfer. 818 * Unfortunately, none of the counters get updated 819 * when the controller is in this mode, so we have 820 * no way of knowing how large the overrun was. 821 */ 822 u_int scbindex = ahc_inb(ahc, SCB_TAG); 823 u_int lastphase = ahc_inb(ahc, LASTPHASE); 824 u_int i; 825 826 scb = ahc_lookup_scb(ahc, scbindex); 827 for (i = 0; i < num_phases; i++) { 828 if (lastphase == ahc_phase_table[i].phase) 829 break; 830 } 831 ahc_print_path(ahc, scb); 832 printf("data overrun detected %s." 833 " Tag == 0x%x.\n", 834 ahc_phase_table[i].phasemsg, 835 scb->hscb->tag); 836 ahc_print_path(ahc, scb); 837 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 838 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 839 ahc_get_transfer_length(scb), scb->sg_count); 840 if (scb->sg_count > 0) { 841 for (i = 0; i < scb->sg_count; i++) { 842 843 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 844 i, 845 (ahc_le32toh(scb->sg_list[i].len) >> 24 846 & SG_HIGH_ADDR_BITS), 847 ahc_le32toh(scb->sg_list[i].addr), 848 ahc_le32toh(scb->sg_list[i].len) 849 & AHC_SG_LEN_MASK); 850 } 851 } 852 /* 853 * Set this and it will take effect when the 854 * target does a command complete. 855 */ 856 ahc_freeze_devq(ahc, scb); 857 if ((scb->flags & SCB_SENSE) == 0) { 858 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 859 } else { 860 scb->flags &= ~SCB_SENSE; 861 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 862 } 863 ahc_freeze_scb(scb); 864 865 if ((ahc->features & AHC_ULTRA2) != 0) { 866 /* 867 * Clear the channel in case we return 868 * to data phase later. 869 */ 870 ahc_outb(ahc, SXFRCTL0, 871 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 872 ahc_outb(ahc, SXFRCTL0, 873 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 874 } 875 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 876 u_int dscommand1; 877 878 /* Ensure HHADDR is 0 for future DMA operations. */ 879 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 880 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 881 ahc_outb(ahc, HADDR, 0); 882 ahc_outb(ahc, DSCOMMAND1, dscommand1); 883 } 884 break; 885 } 886 case MKMSG_FAILED: 887 { 888 u_int scbindex; 889 890 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 891 ahc_name(ahc), devinfo.channel, devinfo.target, 892 devinfo.lun); 893 scbindex = ahc_inb(ahc, SCB_TAG); 894 scb = ahc_lookup_scb(ahc, scbindex); 895 if (scb != NULL 896 && (scb->flags & SCB_RECOVERY_SCB) != 0) 897 /* 898 * Ensure that we didn't put a second instance of this 899 * SCB into the QINFIFO. 900 */ 901 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 902 SCB_GET_CHANNEL(ahc, scb), 903 SCB_GET_LUN(scb), scb->hscb->tag, 904 ROLE_INITIATOR, /*status*/0, 905 SEARCH_REMOVE); 906 break; 907 } 908 case NO_FREE_SCB: 909 { 910 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 911 ahc_dump_card_state(ahc); 912 panic("for safety"); 913 break; 914 } 915 case SCB_MISMATCH: 916 { 917 u_int scbptr; 918 919 scbptr = ahc_inb(ahc, SCBPTR); 920 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 921 scbptr, ahc_inb(ahc, ARG_1), 922 ahc->scb_data->hscbs[scbptr].tag); 923 ahc_dump_card_state(ahc); 924 panic("for saftey"); 925 break; 926 } 927 case OUT_OF_RANGE: 928 { 929 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 930 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 931 "ARG_1 == 0x%x ACCUM = 0x%x\n", 932 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 933 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 934 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 935 "SINDEX == 0x%x\n, A == 0x%x\n", 936 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 937 ahc_index_busy_tcl(ahc, 938 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 939 ahc_inb(ahc, SAVED_LUN))), 940 ahc_inb(ahc, SINDEX), 941 ahc_inb(ahc, ACCUM)); 942 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 943 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 944 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 945 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 946 ahc_inb(ahc, SCB_CONTROL)); 947 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 948 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 949 ahc_dump_card_state(ahc); 950 panic("for safety"); 951 break; 952 } 953 default: 954 printf("ahc_intr: seqint, " 955 "intstat == 0x%x, scsisigi = 0x%x\n", 956 intstat, ahc_inb(ahc, SCSISIGI)); 957 break; 958 } 959 unpause: 960 /* 961 * The sequencer is paused immediately on 962 * a SEQINT, so we should restart it when 963 * we're done. 964 */ 965 ahc_unpause(ahc); 966 } 967 968 void 969 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 970 { 971 u_int scb_index; 972 u_int status0; 973 u_int status; 974 struct scb *scb; 975 char cur_channel; 976 char intr_channel; 977 978 if ((ahc->features & AHC_TWIN) != 0 979 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 980 cur_channel = 'B'; 981 else 982 cur_channel = 'A'; 983 intr_channel = cur_channel; 984 985 if ((ahc->features & AHC_ULTRA2) != 0) 986 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 987 else 988 status0 = 0; 989 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 990 if (status == 0 && status0 == 0) { 991 if ((ahc->features & AHC_TWIN) != 0) { 992 /* Try the other channel */ 993 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 994 status = ahc_inb(ahc, SSTAT1) 995 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 996 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 997 } 998 if (status == 0) { 999 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 1000 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1001 ahc_unpause(ahc); 1002 return; 1003 } 1004 } 1005 1006 /* Make sure the sequencer is in a safe location. */ 1007 ahc_clear_critical_section(ahc); 1008 1009 scb_index = ahc_inb(ahc, SCB_TAG); 1010 scb = ahc_lookup_scb(ahc, scb_index); 1011 if (scb != NULL 1012 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1013 scb = NULL; 1014 1015 if ((ahc->features & AHC_ULTRA2) != 0 1016 && (status0 & IOERR) != 0) { 1017 int now_lvd; 1018 1019 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1020 printf("%s: Transceiver State Has Changed to %s mode\n", 1021 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1022 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1023 /* 1024 * When transitioning to SE mode, the reset line 1025 * glitches, triggering an arbitration bug in some 1026 * Ultra2 controllers. This bug is cleared when we 1027 * assert the reset line. Since a reset glitch has 1028 * already occurred with this transition and a 1029 * transceiver state change is handled just like 1030 * a bus reset anyway, asserting the reset line 1031 * ourselves is safe. 1032 */ 1033 ahc_reset_channel(ahc, intr_channel, 1034 /*Initiate Reset*/now_lvd == 0); 1035 } else if ((status & SCSIRSTI) != 0) { 1036 printf("%s: Someone reset channel %c\n", 1037 ahc_name(ahc), intr_channel); 1038 if (intr_channel != cur_channel) 1039 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1040 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1041 } else if ((status & SCSIPERR) != 0) { 1042 /* 1043 * Determine the bus phase and queue an appropriate message. 1044 * SCSIPERR is latched true as soon as a parity error 1045 * occurs. If the sequencer acked the transfer that 1046 * caused the parity error and the currently presented 1047 * transfer on the bus has correct parity, SCSIPERR will 1048 * be cleared by CLRSCSIPERR. Use this to determine if 1049 * we should look at the last phase the sequencer recorded, 1050 * or the current phase presented on the bus. 1051 */ 1052 u_int mesg_out; 1053 u_int curphase; 1054 u_int errorphase; 1055 u_int lastphase; 1056 u_int scsirate; 1057 u_int i; 1058 u_int sstat2; 1059 int silent; 1060 1061 lastphase = ahc_inb(ahc, LASTPHASE); 1062 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1063 sstat2 = ahc_inb(ahc, SSTAT2); 1064 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1065 /* 1066 * For all phases save DATA, the sequencer won't 1067 * automatically ack a byte that has a parity error 1068 * in it. So the only way that the current phase 1069 * could be 'data-in' is if the parity error is for 1070 * an already acked byte in the data phase. During 1071 * synchronous data-in transfers, we may actually 1072 * ack bytes before latching the current phase in 1073 * LASTPHASE, leading to the discrepancy between 1074 * curphase and lastphase. 1075 */ 1076 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1077 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1078 errorphase = curphase; 1079 else 1080 errorphase = lastphase; 1081 1082 for (i = 0; i < num_phases; i++) { 1083 if (errorphase == ahc_phase_table[i].phase) 1084 break; 1085 } 1086 mesg_out = ahc_phase_table[i].mesg_out; 1087 silent = FALSE; 1088 if (scb != NULL) { 1089 if (SCB_IS_SILENT(scb)) 1090 silent = TRUE; 1091 else 1092 ahc_print_path(ahc, scb); 1093 scb->flags |= SCB_TRANSMISSION_ERROR; 1094 } else 1095 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1096 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1097 scsirate = ahc_inb(ahc, SCSIRATE); 1098 if (silent == FALSE) { 1099 printf("parity error detected %s. " 1100 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1101 ahc_phase_table[i].phasemsg, 1102 ahc_inw(ahc, SEQADDR0), 1103 scsirate); 1104 if ((ahc->features & AHC_DT) != 0) { 1105 if ((sstat2 & CRCVALERR) != 0) 1106 printf("\tCRC Value Mismatch\n"); 1107 if ((sstat2 & CRCENDERR) != 0) 1108 printf("\tNo terminal CRC packet " 1109 "recevied\n"); 1110 if ((sstat2 & CRCREQERR) != 0) 1111 printf("\tIllegal CRC packet " 1112 "request\n"); 1113 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1114 printf("\tUnexpected %sDT Data Phase\n", 1115 (scsirate & SINGLE_EDGE) 1116 ? "" : "non-"); 1117 } 1118 } 1119 1120 if ((ahc->features & AHC_DT) != 0 1121 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1122 /* 1123 * This error applies regardless of 1124 * data direction, so ignore the value 1125 * in the phase table. 1126 */ 1127 mesg_out = MSG_INITIATOR_DET_ERR; 1128 } 1129 1130 /* 1131 * We've set the hardware to assert ATN if we 1132 * get a parity error on "in" phases, so all we 1133 * need to do is stuff the message buffer with 1134 * the appropriate message. "In" phases have set 1135 * mesg_out to something other than MSG_NOP. 1136 */ 1137 if (mesg_out != MSG_NOOP) { 1138 if (ahc->msg_type != MSG_TYPE_NONE) 1139 ahc->send_msg_perror = TRUE; 1140 else 1141 ahc_outb(ahc, MSG_OUT, mesg_out); 1142 } 1143 /* 1144 * Force a renegotiation with this target just in 1145 * case we are out of sync for some external reason 1146 * unknown (or unreported) by the target. 1147 */ 1148 ahc_force_renegotiation(ahc); 1149 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1150 ahc_unpause(ahc); 1151 } else if ((status & SELTO) != 0) { 1152 u_int scbptr; 1153 1154 /* Stop the selection */ 1155 ahc_outb(ahc, SCSISEQ, 0); 1156 1157 /* No more pending messages */ 1158 ahc_clear_msg_state(ahc); 1159 1160 /* Clear interrupt state */ 1161 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1162 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1163 1164 /* 1165 * Although the driver does not care about the 1166 * 'Selection in Progress' status bit, the busy 1167 * LED does. SELINGO is only cleared by a sucessfull 1168 * selection, so we must manually clear it to insure 1169 * the LED turns off just incase no future successful 1170 * selections occur (e.g. no devices on the bus). 1171 */ 1172 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1173 1174 scbptr = ahc_inb(ahc, WAITING_SCBH); 1175 ahc_outb(ahc, SCBPTR, scbptr); 1176 scb_index = ahc_inb(ahc, SCB_TAG); 1177 1178 scb = ahc_lookup_scb(ahc, scb_index); 1179 if (scb == NULL) { 1180 printf("%s: ahc_intr - referenced scb not " 1181 "valid during SELTO scb(%d, %d)\n", 1182 ahc_name(ahc), scbptr, scb_index); 1183 ahc_dump_card_state(ahc); 1184 } else { 1185 #ifdef AHC_DEBUG 1186 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1187 ahc_print_path(ahc, scb); 1188 printf("Saw Selection Timeout for SCB 0x%x\n", 1189 scb_index); 1190 } 1191 #endif 1192 /* 1193 * Force a renegotiation with this target just in 1194 * case the cable was pulled and will later be 1195 * re-attached. The target may forget its negotiation 1196 * settings with us should it attempt to reselect 1197 * during the interruption. The target will not issue 1198 * a unit attention in this case, so we must always 1199 * renegotiate. 1200 */ 1201 ahc_force_renegotiation(ahc); 1202 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1203 ahc_freeze_devq(ahc, scb); 1204 } 1205 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1206 ahc_restart(ahc); 1207 } else if ((status & BUSFREE) != 0 1208 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1209 u_int lastphase; 1210 u_int saved_scsiid; 1211 u_int saved_lun; 1212 u_int target; 1213 u_int initiator_role_id; 1214 char channel; 1215 int printerror; 1216 1217 /* 1218 * Clear our selection hardware as soon as possible. 1219 * We may have an entry in the waiting Q for this target, 1220 * that is affected by this busfree and we don't want to 1221 * go about selecting the target while we handle the event. 1222 */ 1223 ahc_outb(ahc, SCSISEQ, 1224 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1225 1226 /* 1227 * Disable busfree interrupts and clear the busfree 1228 * interrupt status. We do this here so that several 1229 * bus transactions occur prior to clearing the SCSIINT 1230 * latch. It can take a bit for the clearing to take effect. 1231 */ 1232 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1233 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1234 1235 /* 1236 * Look at what phase we were last in. 1237 * If its message out, chances are pretty good 1238 * that the busfree was in response to one of 1239 * our abort requests. 1240 */ 1241 lastphase = ahc_inb(ahc, LASTPHASE); 1242 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1243 saved_lun = ahc_inb(ahc, SAVED_LUN); 1244 target = SCSIID_TARGET(ahc, saved_scsiid); 1245 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1246 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1247 printerror = 1; 1248 1249 if (lastphase == P_MESGOUT) { 1250 struct ahc_devinfo devinfo; 1251 u_int tag; 1252 1253 ahc_fetch_devinfo(ahc, &devinfo); 1254 tag = SCB_LIST_NULL; 1255 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1256 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1257 if (ahc->msgout_buf[ahc->msgout_index - 1] 1258 == MSG_ABORT_TAG) 1259 tag = scb->hscb->tag; 1260 ahc_print_path(ahc, scb); 1261 printf("SCB %d - Abort%s Completed.\n", 1262 scb->hscb->tag, tag == SCB_LIST_NULL ? 1263 "" : " Tag"); 1264 ahc_abort_scbs(ahc, target, channel, 1265 saved_lun, tag, 1266 ROLE_INITIATOR, 1267 CAM_REQ_ABORTED); 1268 printerror = 0; 1269 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1270 MSG_BUS_DEV_RESET, TRUE)) { 1271 #ifdef __FreeBSD__ 1272 /* 1273 * Don't mark the user's request for this BDR 1274 * as completing with CAM_BDR_SENT. CAM3 1275 * specifies CAM_REQ_CMP. 1276 */ 1277 if (scb != NULL 1278 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1279 && ahc_match_scb(ahc, scb, target, channel, 1280 CAM_LUN_WILDCARD, 1281 SCB_LIST_NULL, 1282 ROLE_INITIATOR)) { 1283 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1284 } 1285 #endif 1286 ahc_compile_devinfo(&devinfo, 1287 initiator_role_id, 1288 target, 1289 CAM_LUN_WILDCARD, 1290 channel, 1291 ROLE_INITIATOR); 1292 ahc_handle_devreset(ahc, &devinfo, 1293 CAM_BDR_SENT, 1294 "Bus Device Reset", 1295 /*verbose_level*/0); 1296 printerror = 0; 1297 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1298 MSG_EXT_PPR, FALSE)) { 1299 struct ahc_initiator_tinfo *tinfo; 1300 struct ahc_tmode_tstate *tstate; 1301 1302 /* 1303 * PPR Rejected. Try non-ppr negotiation 1304 * and retry command. 1305 */ 1306 tinfo = ahc_fetch_transinfo(ahc, 1307 devinfo.channel, 1308 devinfo.our_scsiid, 1309 devinfo.target, 1310 &tstate); 1311 tinfo->curr.transport_version = 2; 1312 tinfo->goal.transport_version = 2; 1313 tinfo->goal.ppr_options = 0; 1314 ahc_qinfifo_requeue_tail(ahc, scb); 1315 printerror = 0; 1316 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1317 MSG_EXT_WDTR, FALSE) 1318 || ahc_sent_msg(ahc, AHCMSG_EXT, 1319 MSG_EXT_SDTR, FALSE)) { 1320 /* 1321 * Negotiation Rejected. Go-async and 1322 * retry command. 1323 */ 1324 ahc_set_width(ahc, &devinfo, 1325 MSG_EXT_WDTR_BUS_8_BIT, 1326 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1327 /*paused*/TRUE); 1328 ahc_set_syncrate(ahc, &devinfo, 1329 /*syncrate*/NULL, 1330 /*period*/0, /*offset*/0, 1331 /*ppr_options*/0, 1332 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1333 /*paused*/TRUE); 1334 ahc_qinfifo_requeue_tail(ahc, scb); 1335 printerror = 0; 1336 } 1337 } 1338 if (printerror != 0) { 1339 u_int i; 1340 1341 if (scb != NULL) { 1342 u_int tag; 1343 1344 if ((scb->hscb->control & TAG_ENB) != 0) 1345 tag = scb->hscb->tag; 1346 else 1347 tag = SCB_LIST_NULL; 1348 ahc_print_path(ahc, scb); 1349 ahc_abort_scbs(ahc, target, channel, 1350 SCB_GET_LUN(scb), tag, 1351 ROLE_INITIATOR, 1352 CAM_UNEXP_BUSFREE); 1353 } else { 1354 /* 1355 * We had not fully identified this connection, 1356 * so we cannot abort anything. 1357 */ 1358 printf("%s: ", ahc_name(ahc)); 1359 } 1360 for (i = 0; i < num_phases; i++) { 1361 if (lastphase == ahc_phase_table[i].phase) 1362 break; 1363 } 1364 /* 1365 * Renegotiate with this device at the 1366 * next opportunity just in case this busfree 1367 * is due to a negotiation mismatch with the 1368 * device. 1369 */ 1370 ahc_force_renegotiation(ahc); 1371 printf("Unexpected busfree %s\n" 1372 "SEQADDR == 0x%x\n", 1373 ahc_phase_table[i].phasemsg, 1374 ahc_inb(ahc, SEQADDR0) 1375 | (ahc_inb(ahc, SEQADDR1) << 8)); 1376 } 1377 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1378 ahc_restart(ahc); 1379 } else { 1380 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1381 ahc_name(ahc), status); 1382 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1383 } 1384 } 1385 1386 /* 1387 * Force renegotiation to occur the next time we initiate 1388 * a command to the current device. 1389 */ 1390 static void 1391 ahc_force_renegotiation(struct ahc_softc *ahc) 1392 { 1393 struct ahc_devinfo devinfo; 1394 struct ahc_initiator_tinfo *targ_info; 1395 struct ahc_tmode_tstate *tstate; 1396 1397 ahc_fetch_devinfo(ahc, &devinfo); 1398 targ_info = ahc_fetch_transinfo(ahc, 1399 devinfo.channel, 1400 devinfo.our_scsiid, 1401 devinfo.target, 1402 &tstate); 1403 ahc_update_neg_request(ahc, &devinfo, tstate, 1404 targ_info, AHC_NEG_IF_NON_ASYNC); 1405 } 1406 1407 #define AHC_MAX_STEPS 2000 1408 void 1409 ahc_clear_critical_section(struct ahc_softc *ahc) 1410 { 1411 int stepping; 1412 int steps; 1413 u_int simode0; 1414 u_int simode1; 1415 1416 if (ahc->num_critical_sections == 0) 1417 return; 1418 1419 stepping = FALSE; 1420 steps = 0; 1421 simode0 = 0; 1422 simode1 = 0; 1423 for (;;) { 1424 struct cs *cs; 1425 u_int seqaddr; 1426 u_int i; 1427 1428 seqaddr = ahc_inb(ahc, SEQADDR0) 1429 | (ahc_inb(ahc, SEQADDR1) << 8); 1430 1431 /* 1432 * Seqaddr represents the next instruction to execute, 1433 * so we are really executing the instruction just 1434 * before it. 1435 */ 1436 if (seqaddr != 0) 1437 seqaddr -= 1; 1438 cs = ahc->critical_sections; 1439 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1440 1441 if (cs->begin < seqaddr && cs->end >= seqaddr) 1442 break; 1443 } 1444 1445 if (i == ahc->num_critical_sections) 1446 break; 1447 1448 if (steps > AHC_MAX_STEPS) { 1449 printf("%s: Infinite loop in critical section\n", 1450 ahc_name(ahc)); 1451 ahc_dump_card_state(ahc); 1452 panic("critical section loop"); 1453 } 1454 1455 steps++; 1456 if (stepping == FALSE) { 1457 1458 /* 1459 * Disable all interrupt sources so that the 1460 * sequencer will not be stuck by a pausing 1461 * interrupt condition while we attempt to 1462 * leave a critical section. 1463 */ 1464 simode0 = ahc_inb(ahc, SIMODE0); 1465 ahc_outb(ahc, SIMODE0, 0); 1466 simode1 = ahc_inb(ahc, SIMODE1); 1467 if ((ahc->features & AHC_DT) != 0) 1468 /* 1469 * On DT class controllers, we 1470 * use the enhanced busfree logic. 1471 * Unfortunately we cannot re-enable 1472 * busfree detection within the 1473 * current connection, so we must 1474 * leave it on while single stepping. 1475 */ 1476 ahc_outb(ahc, SIMODE1, ENBUSFREE); 1477 else 1478 ahc_outb(ahc, SIMODE1, 0); 1479 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1480 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1481 stepping = TRUE; 1482 } 1483 if ((ahc->features & AHC_DT) != 0) { 1484 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1485 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1486 } 1487 ahc_outb(ahc, HCNTRL, ahc->unpause); 1488 while (!ahc_is_paused(ahc)) 1489 ahc_delay(200); 1490 } 1491 if (stepping) { 1492 ahc_outb(ahc, SIMODE0, simode0); 1493 ahc_outb(ahc, SIMODE1, simode1); 1494 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1495 } 1496 } 1497 1498 /* 1499 * Clear any pending interrupt status. 1500 */ 1501 void 1502 ahc_clear_intstat(struct ahc_softc *ahc) 1503 { 1504 /* Clear any interrupt conditions this may have caused */ 1505 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1506 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1507 CLRREQINIT); 1508 ahc_flush_device_writes(ahc); 1509 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1510 ahc_flush_device_writes(ahc); 1511 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1512 ahc_flush_device_writes(ahc); 1513 } 1514 1515 /**************************** Debugging Routines ******************************/ 1516 #ifdef AHC_DEBUG 1517 uint32_t ahc_debug = 0; /* AHC_SHOW_MISC|AHC_SHOW_SENSE|AHC_DEBUG_OPTS;*/ 1518 #endif 1519 1520 void 1521 ahc_print_scb(struct scb *scb) 1522 { 1523 int i; 1524 1525 struct hardware_scb *hscb = scb->hscb; 1526 1527 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1528 (void *)scb, 1529 hscb->control, 1530 hscb->scsiid, 1531 hscb->lun, 1532 hscb->cdb_len); 1533 printf("Shared Data: "); 1534 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1535 printf("%#02x", hscb->shared_data.cdb[i]); 1536 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1537 ahc_le32toh(hscb->dataptr), 1538 ahc_le32toh(hscb->datacnt), 1539 ahc_le32toh(hscb->sgptr), 1540 hscb->tag); 1541 if (scb->sg_count > 0) { 1542 for (i = 0; i < scb->sg_count; i++) { 1543 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1544 i, 1545 (ahc_le32toh(scb->sg_list[i].len) >> 24 1546 & SG_HIGH_ADDR_BITS), 1547 ahc_le32toh(scb->sg_list[i].addr), 1548 ahc_le32toh(scb->sg_list[i].len)); 1549 } 1550 } 1551 } 1552 1553 /************************* Transfer Negotiation *******************************/ 1554 /* 1555 * Allocate per target mode instance (ID we respond to as a target) 1556 * transfer negotiation data structures. 1557 */ 1558 static struct ahc_tmode_tstate * 1559 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1560 { 1561 struct ahc_tmode_tstate *master_tstate; 1562 struct ahc_tmode_tstate *tstate; 1563 int i; 1564 1565 master_tstate = ahc->enabled_targets[ahc->our_id]; 1566 if (channel == 'B') { 1567 scsi_id += 8; 1568 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1569 } 1570 if (ahc->enabled_targets[scsi_id] != NULL 1571 && ahc->enabled_targets[scsi_id] != master_tstate) 1572 panic("%s: ahc_alloc_tstate - Target already allocated", 1573 ahc_name(ahc)); 1574 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 1575 M_DEVBUF, M_NOWAIT); 1576 if (tstate == NULL) 1577 return (NULL); 1578 1579 /* 1580 * If we have allocated a master tstate, copy user settings from 1581 * the master tstate (taken from SRAM or the EEPROM) for this 1582 * channel, but reset our current and goal settings to async/narrow 1583 * until an initiator talks to us. 1584 */ 1585 if (master_tstate != NULL) { 1586 memcpy(tstate, master_tstate, sizeof(*tstate)); 1587 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1588 tstate->ultraenb = 0; 1589 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1590 memset(&tstate->transinfo[i].curr, 0, 1591 sizeof(tstate->transinfo[i].curr)); 1592 memset(&tstate->transinfo[i].goal, 0, 1593 sizeof(tstate->transinfo[i].goal)); 1594 } 1595 } else 1596 memset(tstate, 0, sizeof(*tstate)); 1597 ahc->enabled_targets[scsi_id] = tstate; 1598 return (tstate); 1599 } 1600 1601 #ifdef AHC_TARGET_MODE 1602 /* 1603 * Free per target mode instance (ID we respond to as a target) 1604 * transfer negotiation data structures. 1605 */ 1606 static void 1607 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1608 { 1609 struct ahc_tmode_tstate *tstate; 1610 1611 /* 1612 * Don't clean up our "master" tstate. 1613 * It has our default user settings. 1614 */ 1615 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1616 || (channel == 'A' && scsi_id == ahc->our_id)) 1617 && force == FALSE) 1618 return; 1619 1620 if (channel == 'B') 1621 scsi_id += 8; 1622 tstate = ahc->enabled_targets[scsi_id]; 1623 if (tstate != NULL) 1624 free(tstate, M_DEVBUF); 1625 ahc->enabled_targets[scsi_id] = NULL; 1626 } 1627 #endif 1628 1629 /* 1630 * Called when we have an active connection to a target on the bus, 1631 * this function finds the nearest syncrate to the input period limited 1632 * by the capabilities of the bus connectivity of and sync settings for 1633 * the target. 1634 */ 1635 struct ahc_syncrate * 1636 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1637 struct ahc_initiator_tinfo *tinfo, 1638 u_int *period, u_int *ppr_options, role_t role) 1639 { 1640 struct ahc_transinfo *transinfo; 1641 u_int maxsync; 1642 1643 if ((ahc->features & AHC_ULTRA2) != 0) { 1644 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1645 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1646 maxsync = AHC_SYNCRATE_DT; 1647 } else { 1648 maxsync = AHC_SYNCRATE_ULTRA; 1649 /* Can't do DT on an SE bus */ 1650 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1651 } 1652 } else if ((ahc->features & AHC_ULTRA) != 0) { 1653 maxsync = AHC_SYNCRATE_ULTRA; 1654 } else { 1655 maxsync = AHC_SYNCRATE_FAST; 1656 } 1657 /* 1658 * Never allow a value higher than our current goal 1659 * period otherwise we may allow a target initiated 1660 * negotiation to go above the limit as set by the 1661 * user. In the case of an initiator initiated 1662 * sync negotiation, we limit based on the user 1663 * setting. This allows the system to still accept 1664 * incoming negotiations even if target initiated 1665 * negotiation is not performed. 1666 */ 1667 if (role == ROLE_TARGET) 1668 transinfo = &tinfo->user; 1669 else 1670 transinfo = &tinfo->goal; 1671 *ppr_options &= transinfo->ppr_options; 1672 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1673 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1674 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1675 } 1676 if (transinfo->period == 0) { 1677 *period = 0; 1678 *ppr_options = 0; 1679 return (NULL); 1680 } 1681 *period = MAX(*period, transinfo->period); 1682 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1683 } 1684 1685 /* 1686 * Look up the valid period to SCSIRATE conversion in our table. 1687 * Return the period and offset that should be sent to the target 1688 * if this was the beginning of an SDTR. 1689 */ 1690 struct ahc_syncrate * 1691 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1692 u_int *ppr_options, u_int maxsync) 1693 { 1694 struct ahc_syncrate *syncrate; 1695 1696 if ((ahc->features & AHC_DT) == 0) 1697 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1698 1699 /* Skip all DT only entries if DT is not available */ 1700 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1701 && maxsync < AHC_SYNCRATE_ULTRA2) 1702 maxsync = AHC_SYNCRATE_ULTRA2; 1703 1704 for (syncrate = &ahc_syncrates[maxsync]; 1705 syncrate->rate != NULL; 1706 syncrate++) { 1707 1708 /* 1709 * The Ultra2 table doesn't go as low 1710 * as for the Fast/Ultra cards. 1711 */ 1712 if ((ahc->features & AHC_ULTRA2) != 0 1713 && (syncrate->sxfr_u2 == 0)) 1714 break; 1715 1716 if (*period <= syncrate->period) { 1717 /* 1718 * When responding to a target that requests 1719 * sync, the requested rate may fall between 1720 * two rates that we can output, but still be 1721 * a rate that we can receive. Because of this, 1722 * we want to respond to the target with 1723 * the same rate that it sent to us even 1724 * if the period we use to send data to it 1725 * is lower. Only lower the response period 1726 * if we must. 1727 */ 1728 if (syncrate == &ahc_syncrates[maxsync]) 1729 *period = syncrate->period; 1730 1731 /* 1732 * At some speeds, we only support 1733 * ST transfers. 1734 */ 1735 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1736 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1737 break; 1738 } 1739 } 1740 1741 if ((*period == 0) 1742 || (syncrate->rate == NULL) 1743 || ((ahc->features & AHC_ULTRA2) != 0 1744 && (syncrate->sxfr_u2 == 0))) { 1745 /* Use asynchronous transfers. */ 1746 *period = 0; 1747 syncrate = NULL; 1748 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1749 } 1750 return (syncrate); 1751 } 1752 1753 /* 1754 * Convert from an entry in our syncrate table to the SCSI equivalent 1755 * sync "period" factor. 1756 */ 1757 u_int 1758 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1759 { 1760 struct ahc_syncrate *syncrate; 1761 1762 if ((ahc->features & AHC_ULTRA2) != 0) 1763 scsirate &= SXFR_ULTRA2; 1764 else 1765 scsirate &= SXFR; 1766 1767 syncrate = &ahc_syncrates[maxsync]; 1768 while (syncrate->rate != NULL) { 1769 1770 if ((ahc->features & AHC_ULTRA2) != 0) { 1771 if (syncrate->sxfr_u2 == 0) 1772 break; 1773 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1774 return (syncrate->period); 1775 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1776 return (syncrate->period); 1777 } 1778 syncrate++; 1779 } 1780 return (0); /* async */ 1781 } 1782 1783 /* 1784 * Truncate the given synchronous offset to a value the 1785 * current adapter type and syncrate are capable of. 1786 */ 1787 void 1788 ahc_validate_offset(struct ahc_softc *ahc, 1789 struct ahc_initiator_tinfo *tinfo, 1790 struct ahc_syncrate *syncrate, 1791 u_int *offset, int wide, role_t role) 1792 { 1793 u_int maxoffset; 1794 1795 /* Limit offset to what we can do */ 1796 if (syncrate == NULL) { 1797 maxoffset = 0; 1798 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1799 maxoffset = MAX_OFFSET_ULTRA2; 1800 } else { 1801 if (wide) 1802 maxoffset = MAX_OFFSET_16BIT; 1803 else 1804 maxoffset = MAX_OFFSET_8BIT; 1805 } 1806 *offset = MIN(*offset, maxoffset); 1807 if (tinfo != NULL) { 1808 if (role == ROLE_TARGET) 1809 *offset = MIN(*offset, tinfo->user.offset); 1810 else 1811 *offset = MIN(*offset, tinfo->goal.offset); 1812 } 1813 } 1814 1815 /* 1816 * Truncate the given transfer width parameter to a value the 1817 * current adapter type is capable of. 1818 */ 1819 void 1820 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1821 u_int *bus_width, role_t role) 1822 { 1823 switch (*bus_width) { 1824 default: 1825 if (ahc->features & AHC_WIDE) { 1826 /* Respond Wide */ 1827 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1828 break; 1829 } 1830 /* FALLTHROUGH */ 1831 case MSG_EXT_WDTR_BUS_8_BIT: 1832 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1833 break; 1834 } 1835 if (tinfo != NULL) { 1836 if (role == ROLE_TARGET) 1837 *bus_width = MIN(tinfo->user.width, *bus_width); 1838 else 1839 *bus_width = MIN(tinfo->goal.width, *bus_width); 1840 } 1841 } 1842 1843 /* 1844 * Update the bitmask of targets for which the controller should 1845 * negotiate with at the next convenient opportunity. This currently 1846 * means the next time we send the initial identify messages for 1847 * a new transaction. 1848 */ 1849 int 1850 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1851 struct ahc_tmode_tstate *tstate, 1852 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1853 { 1854 u_int auto_negotiate_orig; 1855 1856 auto_negotiate_orig = tstate->auto_negotiate; 1857 if (neg_type == AHC_NEG_ALWAYS) { 1858 /* 1859 * Force our "current" settings to be 1860 * unknown so that unless a bus reset 1861 * occurs the need to renegotiate is 1862 * recorded persistently. 1863 */ 1864 if ((ahc->features & AHC_WIDE) != 0) 1865 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1866 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1867 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1868 } 1869 if (tinfo->curr.period != tinfo->goal.period 1870 || tinfo->curr.width != tinfo->goal.width 1871 || tinfo->curr.offset != tinfo->goal.offset 1872 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1873 || (neg_type == AHC_NEG_IF_NON_ASYNC 1874 && (tinfo->goal.offset != 0 1875 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1876 || tinfo->goal.ppr_options != 0))) 1877 tstate->auto_negotiate |= devinfo->target_mask; 1878 else 1879 tstate->auto_negotiate &= ~devinfo->target_mask; 1880 1881 return (auto_negotiate_orig != tstate->auto_negotiate); 1882 } 1883 1884 /* 1885 * Update the user/goal/curr tables of synchronous negotiation 1886 * parameters as well as, in the case of a current or active update, 1887 * any data structures on the host controller. In the case of an 1888 * active update, the specified target is currently talking to us on 1889 * the bus, so the transfer parameter update must take effect 1890 * immediately. 1891 */ 1892 void 1893 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1894 struct ahc_syncrate *syncrate, u_int period, 1895 u_int offset, u_int ppr_options, u_int type, int paused) 1896 { 1897 struct ahc_initiator_tinfo *tinfo; 1898 struct ahc_tmode_tstate *tstate; 1899 u_int old_period; 1900 u_int old_offset; 1901 u_int old_ppr; 1902 int active; 1903 int update_needed; 1904 1905 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1906 update_needed = 0; 1907 1908 if (syncrate == NULL) { 1909 period = 0; 1910 offset = 0; 1911 } 1912 1913 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1914 devinfo->target, &tstate); 1915 1916 if ((type & AHC_TRANS_USER) != 0) { 1917 tinfo->user.period = period; 1918 tinfo->user.offset = offset; 1919 tinfo->user.ppr_options = ppr_options; 1920 } 1921 1922 if ((type & AHC_TRANS_GOAL) != 0) { 1923 tinfo->goal.period = period; 1924 tinfo->goal.offset = offset; 1925 tinfo->goal.ppr_options = ppr_options; 1926 } 1927 1928 old_period = tinfo->curr.period; 1929 old_offset = tinfo->curr.offset; 1930 old_ppr = tinfo->curr.ppr_options; 1931 1932 if ((type & AHC_TRANS_CUR) != 0 1933 && (old_period != period 1934 || old_offset != offset 1935 || old_ppr != ppr_options)) { 1936 u_int scsirate; 1937 1938 update_needed++; 1939 scsirate = tinfo->scsirate; 1940 if ((ahc->features & AHC_ULTRA2) != 0) { 1941 1942 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1943 if (syncrate != NULL) { 1944 scsirate |= syncrate->sxfr_u2; 1945 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1946 scsirate |= ENABLE_CRC; 1947 else 1948 scsirate |= SINGLE_EDGE; 1949 } 1950 } else { 1951 1952 scsirate &= ~(SXFR|SOFS); 1953 /* 1954 * Ensure Ultra mode is set properly for 1955 * this target. 1956 */ 1957 tstate->ultraenb &= ~devinfo->target_mask; 1958 if (syncrate != NULL) { 1959 if (syncrate->sxfr & ULTRA_SXFR) { 1960 tstate->ultraenb |= 1961 devinfo->target_mask; 1962 } 1963 scsirate |= syncrate->sxfr & SXFR; 1964 scsirate |= offset & SOFS; 1965 } 1966 if (active) { 1967 u_int sxfrctl0; 1968 1969 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1970 sxfrctl0 &= ~FAST20; 1971 if (tstate->ultraenb & devinfo->target_mask) 1972 sxfrctl0 |= FAST20; 1973 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1974 } 1975 } 1976 if (active) { 1977 ahc_outb(ahc, SCSIRATE, scsirate); 1978 if ((ahc->features & AHC_ULTRA2) != 0) 1979 ahc_outb(ahc, SCSIOFFSET, offset); 1980 } 1981 1982 tinfo->scsirate = scsirate; 1983 tinfo->curr.period = period; 1984 tinfo->curr.offset = offset; 1985 tinfo->curr.ppr_options = ppr_options; 1986 1987 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1988 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1989 if (bootverbose) { 1990 if (offset != 0) { 1991 printf("%s: target %d synchronous at %sMHz%s, " 1992 "offset = 0x%x\n", ahc_name(ahc), 1993 devinfo->target, syncrate->rate, 1994 (ppr_options & MSG_EXT_PPR_DT_REQ) 1995 ? " DT" : "", offset); 1996 } else { 1997 printf("%s: target %d using " 1998 "asynchronous transfers\n", 1999 ahc_name(ahc), devinfo->target); 2000 } 2001 } 2002 } 2003 2004 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2005 tinfo, AHC_NEG_TO_GOAL); 2006 2007 if (update_needed) 2008 ahc_update_pending_scbs(ahc); 2009 } 2010 2011 /* 2012 * Update the user/goal/curr tables of wide negotiation 2013 * parameters as well as, in the case of a current or active update, 2014 * any data structures on the host controller. In the case of an 2015 * active update, the specified target is currently talking to us on 2016 * the bus, so the transfer parameter update must take effect 2017 * immediately. 2018 */ 2019 void 2020 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2021 u_int width, u_int type, int paused) 2022 { 2023 struct ahc_initiator_tinfo *tinfo; 2024 struct ahc_tmode_tstate *tstate; 2025 u_int oldwidth; 2026 int active; 2027 int update_needed; 2028 2029 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2030 update_needed = 0; 2031 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2032 devinfo->target, &tstate); 2033 2034 if ((type & AHC_TRANS_USER) != 0) 2035 tinfo->user.width = width; 2036 2037 if ((type & AHC_TRANS_GOAL) != 0) 2038 tinfo->goal.width = width; 2039 2040 oldwidth = tinfo->curr.width; 2041 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2042 u_int scsirate; 2043 2044 update_needed++; 2045 scsirate = tinfo->scsirate; 2046 scsirate &= ~WIDEXFER; 2047 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2048 scsirate |= WIDEXFER; 2049 2050 tinfo->scsirate = scsirate; 2051 2052 if (active) 2053 ahc_outb(ahc, SCSIRATE, scsirate); 2054 2055 tinfo->curr.width = width; 2056 2057 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2058 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2059 if (bootverbose) { 2060 printf("%s: target %d using %dbit transfers\n", 2061 ahc_name(ahc), devinfo->target, 2062 8 * (0x01 << width)); 2063 } 2064 } 2065 2066 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2067 tinfo, AHC_NEG_TO_GOAL); 2068 if (update_needed) 2069 ahc_update_pending_scbs(ahc); 2070 } 2071 2072 /* 2073 * Update the current state of tagged queuing for a given target. 2074 */ 2075 void 2076 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2077 ahc_queue_alg alg) 2078 { 2079 ahc_platform_set_tags(ahc, devinfo, alg); 2080 } 2081 2082 /* 2083 * When the transfer settings for a connection change, update any 2084 * in-transit SCBs to contain the new data so the hardware will 2085 * be set correctly during future (re)selections. 2086 */ 2087 static void 2088 ahc_update_pending_scbs(struct ahc_softc *ahc) 2089 { 2090 struct scb *pending_scb; 2091 int pending_scb_count; 2092 int i; 2093 int paused; 2094 u_int saved_scbptr; 2095 2096 /* 2097 * Traverse the pending SCB list and ensure that all of the 2098 * SCBs there have the proper settings. 2099 */ 2100 pending_scb_count = 0; 2101 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2102 struct ahc_devinfo devinfo; 2103 struct hardware_scb *pending_hscb; 2104 struct ahc_initiator_tinfo *tinfo; 2105 struct ahc_tmode_tstate *tstate; 2106 2107 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2108 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2109 devinfo.our_scsiid, 2110 devinfo.target, &tstate); 2111 pending_hscb = pending_scb->hscb; 2112 pending_hscb->control &= ~ULTRAENB; 2113 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2114 pending_hscb->control |= ULTRAENB; 2115 pending_hscb->scsirate = tinfo->scsirate; 2116 pending_hscb->scsioffset = tinfo->curr.offset; 2117 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2118 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2119 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2120 pending_hscb->control &= ~MK_MESSAGE; 2121 } 2122 ahc_sync_scb(ahc, pending_scb, 2123 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2124 pending_scb_count++; 2125 } 2126 2127 if (pending_scb_count == 0) 2128 return; 2129 2130 if (ahc_is_paused(ahc)) { 2131 paused = 1; 2132 } else { 2133 paused = 0; 2134 ahc_pause(ahc); 2135 } 2136 2137 saved_scbptr = ahc_inb(ahc, SCBPTR); 2138 /* Ensure that the hscbs down on the card match the new information */ 2139 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2140 struct hardware_scb *pending_hscb; 2141 u_int control; 2142 u_int scb_tag; 2143 2144 ahc_outb(ahc, SCBPTR, i); 2145 scb_tag = ahc_inb(ahc, SCB_TAG); 2146 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2147 if (pending_scb == NULL) 2148 continue; 2149 2150 pending_hscb = pending_scb->hscb; 2151 control = ahc_inb(ahc, SCB_CONTROL); 2152 control &= ~(ULTRAENB|MK_MESSAGE); 2153 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2154 ahc_outb(ahc, SCB_CONTROL, control); 2155 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2156 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2157 } 2158 ahc_outb(ahc, SCBPTR, saved_scbptr); 2159 2160 if (paused == 0) 2161 ahc_unpause(ahc); 2162 } 2163 2164 /**************************** Pathing Information *****************************/ 2165 static void 2166 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2167 { 2168 u_int saved_scsiid; 2169 role_t role; 2170 int our_id; 2171 2172 if (ahc_inb(ahc, SSTAT0) & TARGET) 2173 role = ROLE_TARGET; 2174 else 2175 role = ROLE_INITIATOR; 2176 2177 if (role == ROLE_TARGET 2178 && (ahc->features & AHC_MULTI_TID) != 0 2179 && (ahc_inb(ahc, SEQ_FLAGS) 2180 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2181 /* We were selected, so pull our id from TARGIDIN */ 2182 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2183 } else if ((ahc->features & AHC_ULTRA2) != 0) 2184 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2185 else 2186 our_id = ahc_inb(ahc, SCSIID) & OID; 2187 2188 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2189 ahc_compile_devinfo(devinfo, 2190 our_id, 2191 SCSIID_TARGET(ahc, saved_scsiid), 2192 ahc_inb(ahc, SAVED_LUN), 2193 SCSIID_CHANNEL(ahc, saved_scsiid), 2194 role); 2195 } 2196 2197 struct ahc_phase_table_entry* 2198 ahc_lookup_phase_entry(int phase) 2199 { 2200 struct ahc_phase_table_entry *entry; 2201 struct ahc_phase_table_entry *last_entry; 2202 2203 /* 2204 * num_phases doesn't include the default entry which 2205 * will be returned if the phase doesn't match. 2206 */ 2207 last_entry = &ahc_phase_table[num_phases]; 2208 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2209 if (phase == entry->phase) 2210 break; 2211 } 2212 return (entry); 2213 } 2214 2215 void 2216 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2217 u_int lun, char channel, role_t role) 2218 { 2219 devinfo->our_scsiid = our_id; 2220 devinfo->target = target; 2221 devinfo->lun = lun; 2222 devinfo->target_offset = target; 2223 devinfo->channel = channel; 2224 devinfo->role = role; 2225 if (channel == 'B') 2226 devinfo->target_offset += 8; 2227 devinfo->target_mask = (0x01 << devinfo->target_offset); 2228 } 2229 2230 void 2231 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2232 { 2233 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2234 devinfo->target, devinfo->lun); 2235 } 2236 2237 static void 2238 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2239 struct scb *scb) 2240 { 2241 role_t role; 2242 int our_id; 2243 2244 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2245 role = ROLE_INITIATOR; 2246 if ((scb->flags & SCB_TARGET_SCB) != 0) 2247 role = ROLE_TARGET; 2248 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2249 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2250 } 2251 2252 2253 /************************ Message Phase Processing ****************************/ 2254 static void 2255 ahc_assert_atn(struct ahc_softc *ahc) 2256 { 2257 u_int scsisigo; 2258 2259 scsisigo = ATNO; 2260 if ((ahc->features & AHC_DT) == 0) 2261 scsisigo |= ahc_inb(ahc, SCSISIGI); 2262 ahc_outb(ahc, SCSISIGO, scsisigo); 2263 } 2264 2265 /* 2266 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2267 * or enters the initial message out phase, we are interrupted. Fill our 2268 * outgoing message buffer with the appropriate message and begin handing 2269 * the message phase(s) manually. 2270 */ 2271 static void 2272 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2273 struct scb *scb) 2274 { 2275 /* 2276 * To facilitate adding multiple messages together, 2277 * each routine should increment the index and len 2278 * variables instead of setting them explicitly. 2279 */ 2280 ahc->msgout_index = 0; 2281 ahc->msgout_len = 0; 2282 2283 if ((scb->flags & SCB_DEVICE_RESET) == 0 2284 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2285 u_int identify_msg; 2286 2287 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2288 if ((scb->hscb->control & DISCENB) != 0) 2289 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2290 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2291 ahc->msgout_len++; 2292 2293 if ((scb->hscb->control & TAG_ENB) != 0) { 2294 ahc->msgout_buf[ahc->msgout_index++] = 2295 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2296 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2297 ahc->msgout_len += 2; 2298 } 2299 } 2300 2301 if (scb->flags & SCB_DEVICE_RESET) { 2302 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2303 ahc->msgout_len++; 2304 ahc_print_path(ahc, scb); 2305 printf("Bus Device Reset Message Sent\n"); 2306 /* 2307 * Clear our selection hardware in advance of 2308 * the busfree. We may have an entry in the waiting 2309 * Q for this target, and we don't want to go about 2310 * selecting while we handle the busfree and blow it 2311 * away. 2312 */ 2313 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2314 } else if ((scb->flags & SCB_ABORT) != 0) { 2315 if ((scb->hscb->control & TAG_ENB) != 0) 2316 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2317 else 2318 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2319 ahc->msgout_len++; 2320 ahc_print_path(ahc, scb); 2321 printf("Abort%s Message Sent\n", 2322 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2323 /* 2324 * Clear our selection hardware in advance of 2325 * the busfree. We may have an entry in the waiting 2326 * Q for this target, and we don't want to go about 2327 * selecting while we handle the busfree and blow it 2328 * away. 2329 */ 2330 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2331 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2332 ahc_build_transfer_msg(ahc, devinfo); 2333 } else { 2334 printf("ahc_intr: AWAITING_MSG for an SCB that " 2335 "does not have a waiting message\n"); 2336 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2337 devinfo->target_mask); 2338 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2339 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2340 ahc_inb(ahc, MSG_OUT), scb->flags); 2341 } 2342 2343 /* 2344 * Clear the MK_MESSAGE flag from the SCB so we aren't 2345 * asked to send this message again. 2346 */ 2347 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2348 scb->hscb->control &= ~MK_MESSAGE; 2349 ahc->msgout_index = 0; 2350 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2351 } 2352 2353 /* 2354 * Build an appropriate transfer negotiation message for the 2355 * currently active target. 2356 */ 2357 static void 2358 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2359 { 2360 /* 2361 * We need to initiate transfer negotiations. 2362 * If our current and goal settings are identical, 2363 * we want to renegotiate due to a check condition. 2364 */ 2365 struct ahc_initiator_tinfo *tinfo; 2366 struct ahc_tmode_tstate *tstate; 2367 struct ahc_syncrate *rate; 2368 int dowide; 2369 int dosync; 2370 int doppr; 2371 u_int period; 2372 u_int ppr_options; 2373 u_int offset; 2374 2375 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2376 devinfo->target, &tstate); 2377 /* 2378 * Filter our period based on the current connection. 2379 * If we can't perform DT transfers on this segment (not in LVD 2380 * mode for instance), then our decision to issue a PPR message 2381 * may change. 2382 */ 2383 period = tinfo->goal.period; 2384 ppr_options = tinfo->goal.ppr_options; 2385 /* Target initiated PPR is not allowed in the SCSI spec */ 2386 if (devinfo->role == ROLE_TARGET) 2387 ppr_options = 0; 2388 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2389 &ppr_options, devinfo->role); 2390 dowide = tinfo->curr.width != tinfo->goal.width; 2391 dosync = tinfo->curr.period != period; 2392 /* 2393 * Only use PPR if we have options that need it, even if the device 2394 * claims to support it. There might be an expander in the way 2395 * that doesn't. 2396 */ 2397 doppr = ppr_options != 0; 2398 2399 if (!dowide && !dosync && !doppr) { 2400 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2401 dosync = tinfo->goal.offset != 0; 2402 } 2403 2404 if (!dowide && !dosync && !doppr) { 2405 /* 2406 * Force async with a WDTR message if we have a wide bus, 2407 * or just issue an SDTR with a 0 offset. 2408 */ 2409 if ((ahc->features & AHC_WIDE) != 0) 2410 dowide = 1; 2411 else 2412 dosync = 1; 2413 2414 if (bootverbose) { 2415 ahc_print_devinfo(ahc, devinfo); 2416 printf("Ensuring async\n"); 2417 } 2418 } 2419 2420 /* Target initiated PPR is not allowed in the SCSI spec */ 2421 if (devinfo->role == ROLE_TARGET) 2422 doppr = 0; 2423 2424 /* 2425 * Both the PPR message and SDTR message require the 2426 * goal syncrate to be limited to what the target device 2427 * is capable of handling (based on whether an LVD->SE 2428 * expander is on the bus), so combine these two cases. 2429 * Regardless, guarantee that if we are using WDTR and SDTR 2430 * messages that WDTR comes first. 2431 */ 2432 if (doppr || (dosync && !dowide)) { 2433 2434 offset = tinfo->goal.offset; 2435 ahc_validate_offset(ahc, tinfo, rate, &offset, 2436 doppr ? tinfo->goal.width 2437 : tinfo->curr.width, 2438 devinfo->role); 2439 if (doppr) { 2440 ahc_construct_ppr(ahc, devinfo, period, offset, 2441 tinfo->goal.width, ppr_options); 2442 } else { 2443 ahc_construct_sdtr(ahc, devinfo, period, offset); 2444 } 2445 } else { 2446 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2447 } 2448 } 2449 2450 /* 2451 * Build a synchronous negotiation message in our message 2452 * buffer based on the input parameters. 2453 */ 2454 static void 2455 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2456 u_int period, u_int offset) 2457 { 2458 if (offset == 0) 2459 period = AHC_ASYNC_XFER_PERIOD; 2460 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2461 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2462 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2463 ahc->msgout_buf[ahc->msgout_index++] = period; 2464 ahc->msgout_buf[ahc->msgout_index++] = offset; 2465 ahc->msgout_len += 5; 2466 if (bootverbose) { 2467 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2468 ahc_name(ahc), devinfo->channel, devinfo->target, 2469 devinfo->lun, period, offset); 2470 } 2471 } 2472 2473 /* 2474 * Build a wide negotiation message in our message 2475 * buffer based on the input parameters. 2476 */ 2477 static void 2478 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2479 u_int bus_width) 2480 { 2481 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2482 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2483 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2484 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2485 ahc->msgout_len += 4; 2486 if (bootverbose) { 2487 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2488 ahc_name(ahc), devinfo->channel, devinfo->target, 2489 devinfo->lun, bus_width); 2490 } 2491 } 2492 2493 /* 2494 * Build a parallel protocol request message in our message 2495 * buffer based on the input parameters. 2496 */ 2497 static void 2498 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2499 u_int period, u_int offset, u_int bus_width, 2500 u_int ppr_options) 2501 { 2502 if (offset == 0) 2503 period = AHC_ASYNC_XFER_PERIOD; 2504 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2505 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2506 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2507 ahc->msgout_buf[ahc->msgout_index++] = period; 2508 ahc->msgout_buf[ahc->msgout_index++] = 0; 2509 ahc->msgout_buf[ahc->msgout_index++] = offset; 2510 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2511 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2512 ahc->msgout_len += 8; 2513 if (bootverbose) { 2514 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2515 "offset %x, ppr_options %x\n", ahc_name(ahc), 2516 devinfo->channel, devinfo->target, devinfo->lun, 2517 bus_width, period, offset, ppr_options); 2518 } 2519 } 2520 2521 /* 2522 * Clear any active message state. 2523 */ 2524 static void 2525 ahc_clear_msg_state(struct ahc_softc *ahc) 2526 { 2527 ahc->msgout_len = 0; 2528 ahc->msgin_index = 0; 2529 ahc->msg_type = MSG_TYPE_NONE; 2530 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2531 /* 2532 * The target didn't care to respond to our 2533 * message request, so clear ATN. 2534 */ 2535 ahc_outb(ahc, CLRSINT1, CLRATNO); 2536 } 2537 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2538 ahc_outb(ahc, SEQ_FLAGS2, 2539 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2540 } 2541 2542 static void 2543 ahc_handle_proto_violation(struct ahc_softc *ahc) 2544 { 2545 struct ahc_devinfo devinfo; 2546 struct scb *scb; 2547 u_int scbid; 2548 u_int seq_flags; 2549 u_int curphase; 2550 u_int lastphase; 2551 int found; 2552 2553 ahc_fetch_devinfo(ahc, &devinfo); 2554 scbid = ahc_inb(ahc, SCB_TAG); 2555 scb = ahc_lookup_scb(ahc, scbid); 2556 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2557 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2558 lastphase = ahc_inb(ahc, LASTPHASE); 2559 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2560 2561 /* 2562 * The reconnecting target either did not send an 2563 * identify message, or did, but we didn't find an SCB 2564 * to match. 2565 */ 2566 ahc_print_devinfo(ahc, &devinfo); 2567 printf("Target did not send an IDENTIFY message. " 2568 "LASTPHASE = 0x%x.\n", lastphase); 2569 scb = NULL; 2570 } else if (scb == NULL) { 2571 /* 2572 * We don't seem to have an SCB active for this 2573 * transaction. Print an error and reset the bus. 2574 */ 2575 ahc_print_devinfo(ahc, &devinfo); 2576 printf("No SCB found during protocol violation\n"); 2577 goto proto_violation_reset; 2578 } else { 2579 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2580 if ((seq_flags & NO_CDB_SENT) != 0) { 2581 ahc_print_path(ahc, scb); 2582 printf("No or incomplete CDB sent to device.\n"); 2583 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2584 /* 2585 * The target never bothered to provide status to 2586 * us prior to completing the command. Since we don't 2587 * know the disposition of this command, we must attempt 2588 * to abort it. Assert ATN and prepare to send an abort 2589 * message. 2590 */ 2591 ahc_print_path(ahc, scb); 2592 printf("Completed command without status.\n"); 2593 } else { 2594 ahc_print_path(ahc, scb); 2595 printf("Unknown protocol violation.\n"); 2596 ahc_dump_card_state(ahc); 2597 } 2598 } 2599 if ((lastphase & ~P_DATAIN_DT) == 0 2600 || lastphase == P_COMMAND) { 2601 proto_violation_reset: 2602 /* 2603 * Target either went directly to data/command 2604 * phase or didn't respond to our ATN. 2605 * The only safe thing to do is to blow 2606 * it away with a bus reset. 2607 */ 2608 found = ahc_reset_channel(ahc, 'A', TRUE); 2609 printf("%s: Issued Channel %c Bus Reset. " 2610 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2611 } else { 2612 /* 2613 * Leave the selection hardware off in case 2614 * this abort attempt will affect yet to 2615 * be sent commands. 2616 */ 2617 ahc_outb(ahc, SCSISEQ, 2618 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2619 ahc_assert_atn(ahc); 2620 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2621 if (scb == NULL) { 2622 ahc_print_devinfo(ahc, &devinfo); 2623 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2624 ahc->msgout_len = 1; 2625 ahc->msgout_index = 0; 2626 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2627 } else { 2628 ahc_print_path(ahc, scb); 2629 scb->flags |= SCB_ABORT; 2630 } 2631 printf("Protocol violation %s. Attempting to abort.\n", 2632 ahc_lookup_phase_entry(curphase)->phasemsg); 2633 } 2634 } 2635 2636 /* 2637 * Manual message loop handler. 2638 */ 2639 static void 2640 ahc_handle_message_phase(struct ahc_softc *ahc) 2641 { 2642 struct ahc_devinfo devinfo; 2643 u_int bus_phase; 2644 int end_session; 2645 2646 ahc_fetch_devinfo(ahc, &devinfo); 2647 end_session = FALSE; 2648 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2649 2650 reswitch: 2651 switch (ahc->msg_type) { 2652 case MSG_TYPE_INITIATOR_MSGOUT: 2653 { 2654 int lastbyte; 2655 int phasemis; 2656 int msgdone; 2657 2658 if (ahc->msgout_len == 0) 2659 panic("HOST_MSG_LOOP interrupt with no active message"); 2660 2661 #ifdef AHC_DEBUG 2662 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2663 ahc_print_devinfo(ahc, &devinfo); 2664 printf("INITIATOR_MSG_OUT"); 2665 } 2666 #endif 2667 phasemis = bus_phase != P_MESGOUT; 2668 if (phasemis) { 2669 #ifdef AHC_DEBUG 2670 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2671 printf(" PHASEMIS %s\n", 2672 ahc_lookup_phase_entry(bus_phase) 2673 ->phasemsg); 2674 } 2675 #endif 2676 if (bus_phase == P_MESGIN) { 2677 /* 2678 * Change gears and see if 2679 * this messages is of interest to 2680 * us or should be passed back to 2681 * the sequencer. 2682 */ 2683 ahc_outb(ahc, CLRSINT1, CLRATNO); 2684 ahc->send_msg_perror = FALSE; 2685 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2686 ahc->msgin_index = 0; 2687 goto reswitch; 2688 } 2689 end_session = TRUE; 2690 break; 2691 } 2692 2693 if (ahc->send_msg_perror) { 2694 ahc_outb(ahc, CLRSINT1, CLRATNO); 2695 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2696 #ifdef AHC_DEBUG 2697 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2698 printf(" byte 0x%x\n", ahc->send_msg_perror); 2699 #endif 2700 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2701 break; 2702 } 2703 2704 msgdone = ahc->msgout_index == ahc->msgout_len; 2705 if (msgdone) { 2706 /* 2707 * The target has requested a retry. 2708 * Re-assert ATN, reset our message index to 2709 * 0, and try again. 2710 */ 2711 ahc->msgout_index = 0; 2712 ahc_assert_atn(ahc); 2713 } 2714 2715 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2716 if (lastbyte) { 2717 /* Last byte is signified by dropping ATN */ 2718 ahc_outb(ahc, CLRSINT1, CLRATNO); 2719 } 2720 2721 /* 2722 * Clear our interrupt status and present 2723 * the next byte on the bus. 2724 */ 2725 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2726 #ifdef AHC_DEBUG 2727 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2728 printf(" byte 0x%x\n", 2729 ahc->msgout_buf[ahc->msgout_index]); 2730 #endif 2731 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2732 break; 2733 } 2734 case MSG_TYPE_INITIATOR_MSGIN: 2735 { 2736 int phasemis; 2737 int message_done; 2738 2739 #ifdef AHC_DEBUG 2740 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2741 ahc_print_devinfo(ahc, &devinfo); 2742 printf("INITIATOR_MSG_IN"); 2743 } 2744 #endif 2745 phasemis = bus_phase != P_MESGIN; 2746 if (phasemis) { 2747 #ifdef AHC_DEBUG 2748 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2749 printf(" PHASEMIS %s\n", 2750 ahc_lookup_phase_entry(bus_phase) 2751 ->phasemsg); 2752 } 2753 #endif 2754 ahc->msgin_index = 0; 2755 if (bus_phase == P_MESGOUT 2756 && (ahc->send_msg_perror == TRUE 2757 || (ahc->msgout_len != 0 2758 && ahc->msgout_index == 0))) { 2759 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2760 goto reswitch; 2761 } 2762 end_session = TRUE; 2763 break; 2764 } 2765 2766 /* Pull the byte in without acking it */ 2767 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2768 #ifdef AHC_DEBUG 2769 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2770 printf(" byte 0x%x\n", 2771 ahc->msgin_buf[ahc->msgin_index]); 2772 #endif 2773 2774 message_done = ahc_parse_msg(ahc, &devinfo); 2775 2776 if (message_done) { 2777 /* 2778 * Clear our incoming message buffer in case there 2779 * is another message following this one. 2780 */ 2781 ahc->msgin_index = 0; 2782 2783 /* 2784 * If this message illicited a response, 2785 * assert ATN so the target takes us to the 2786 * message out phase. 2787 */ 2788 if (ahc->msgout_len != 0) { 2789 #ifdef AHC_DEBUG 2790 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2791 ahc_print_devinfo(ahc, &devinfo); 2792 printf("Asserting ATN for response\n"); 2793 } 2794 #endif 2795 ahc_assert_atn(ahc); 2796 } 2797 } else 2798 ahc->msgin_index++; 2799 2800 if (message_done == MSGLOOP_TERMINATED) { 2801 end_session = TRUE; 2802 } else { 2803 /* Ack the byte */ 2804 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2805 ahc_inb(ahc, SCSIDATL); 2806 } 2807 break; 2808 } 2809 case MSG_TYPE_TARGET_MSGIN: 2810 { 2811 int msgdone; 2812 int msgout_request; 2813 2814 if (ahc->msgout_len == 0) 2815 panic("Target MSGIN with no active message"); 2816 2817 /* 2818 * If we interrupted a mesgout session, the initiator 2819 * will not know this until our first REQ. So, we 2820 * only honor mesgout requests after we've sent our 2821 * first byte. 2822 */ 2823 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2824 && ahc->msgout_index > 0) 2825 msgout_request = TRUE; 2826 else 2827 msgout_request = FALSE; 2828 2829 if (msgout_request) { 2830 2831 /* 2832 * Change gears and see if 2833 * this messages is of interest to 2834 * us or should be passed back to 2835 * the sequencer. 2836 */ 2837 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2838 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2839 ahc->msgin_index = 0; 2840 /* Dummy read to REQ for first byte */ 2841 ahc_inb(ahc, SCSIDATL); 2842 ahc_outb(ahc, SXFRCTL0, 2843 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2844 break; 2845 } 2846 2847 msgdone = ahc->msgout_index == ahc->msgout_len; 2848 if (msgdone) { 2849 ahc_outb(ahc, SXFRCTL0, 2850 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2851 end_session = TRUE; 2852 break; 2853 } 2854 2855 /* 2856 * Present the next byte on the bus. 2857 */ 2858 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2859 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2860 break; 2861 } 2862 case MSG_TYPE_TARGET_MSGOUT: 2863 { 2864 int lastbyte; 2865 int msgdone; 2866 2867 /* 2868 * The initiator signals that this is 2869 * the last byte by dropping ATN. 2870 */ 2871 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2872 2873 /* 2874 * Read the latched byte, but turn off SPIOEN first 2875 * so that we don't inadvertently cause a REQ for the 2876 * next byte. 2877 */ 2878 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2879 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2880 msgdone = ahc_parse_msg(ahc, &devinfo); 2881 if (msgdone == MSGLOOP_TERMINATED) { 2882 /* 2883 * The message is *really* done in that it caused 2884 * us to go to bus free. The sequencer has already 2885 * been reset at this point, so pull the ejection 2886 * handle. 2887 */ 2888 return; 2889 } 2890 2891 ahc->msgin_index++; 2892 2893 /* 2894 * XXX Read spec about initiator dropping ATN too soon 2895 * and use msgdone to detect it. 2896 */ 2897 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2898 ahc->msgin_index = 0; 2899 2900 /* 2901 * If this message illicited a response, transition 2902 * to the Message in phase and send it. 2903 */ 2904 if (ahc->msgout_len != 0) { 2905 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2906 ahc_outb(ahc, SXFRCTL0, 2907 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2908 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2909 ahc->msgin_index = 0; 2910 break; 2911 } 2912 } 2913 2914 if (lastbyte) 2915 end_session = TRUE; 2916 else { 2917 /* Ask for the next byte. */ 2918 ahc_outb(ahc, SXFRCTL0, 2919 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2920 } 2921 2922 break; 2923 } 2924 default: 2925 panic("Unknown REQINIT message type"); 2926 } 2927 2928 if (end_session) { 2929 ahc_clear_msg_state(ahc); 2930 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2931 } else 2932 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2933 } 2934 2935 /* 2936 * See if we sent a particular extended message to the target. 2937 * If "full" is true, return true only if the target saw the full 2938 * message. If "full" is false, return true if the target saw at 2939 * least the first byte of the message. 2940 */ 2941 static int 2942 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2943 { 2944 int found; 2945 u_int index; 2946 2947 found = FALSE; 2948 index = 0; 2949 2950 while (index < ahc->msgout_len) { 2951 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2952 u_int end_index; 2953 2954 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2955 if (ahc->msgout_buf[index+2] == msgval 2956 && type == AHCMSG_EXT) { 2957 2958 if (full) { 2959 if (ahc->msgout_index > end_index) 2960 found = TRUE; 2961 } else if (ahc->msgout_index > index) 2962 found = TRUE; 2963 } 2964 index = end_index; 2965 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2966 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2967 2968 /* Skip tag type and tag id or residue param*/ 2969 index += 2; 2970 } else { 2971 /* Single byte message */ 2972 if (type == AHCMSG_1B 2973 && ahc->msgout_buf[index] == msgval 2974 && ahc->msgout_index > index) 2975 found = TRUE; 2976 index++; 2977 } 2978 2979 if (found) 2980 break; 2981 } 2982 return (found); 2983 } 2984 2985 /* 2986 * Wait for a complete incoming message, parse it, and respond accordingly. 2987 */ 2988 static int 2989 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2990 { 2991 struct ahc_initiator_tinfo *tinfo; 2992 struct ahc_tmode_tstate *tstate; 2993 int reject; 2994 int done; 2995 int response; 2996 u_int targ_scsirate; 2997 2998 done = MSGLOOP_IN_PROG; 2999 response = FALSE; 3000 reject = FALSE; 3001 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3002 devinfo->target, &tstate); 3003 targ_scsirate = tinfo->scsirate; 3004 3005 /* 3006 * Parse as much of the message as is available, 3007 * rejecting it if we don't support it. When 3008 * the entire message is available and has been 3009 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3010 * that we have parsed an entire message. 3011 * 3012 * In the case of extended messages, we accept the length 3013 * byte outright and perform more checking once we know the 3014 * extended message type. 3015 */ 3016 switch (ahc->msgin_buf[0]) { 3017 case MSG_DISCONNECT: 3018 case MSG_SAVEDATAPOINTER: 3019 case MSG_CMDCOMPLETE: 3020 case MSG_RESTOREPOINTERS: 3021 case MSG_IGN_WIDE_RESIDUE: 3022 /* 3023 * End our message loop as these are messages 3024 * the sequencer handles on its own. 3025 */ 3026 done = MSGLOOP_TERMINATED; 3027 break; 3028 case MSG_MESSAGE_REJECT: 3029 response = ahc_handle_msg_reject(ahc, devinfo); 3030 /* FALLTHROUGH */ 3031 case MSG_NOOP: 3032 done = MSGLOOP_MSGCOMPLETE; 3033 break; 3034 case MSG_EXTENDED: 3035 { 3036 /* Wait for enough of the message to begin validation */ 3037 if (ahc->msgin_index < 2) 3038 break; 3039 switch (ahc->msgin_buf[2]) { 3040 case MSG_EXT_SDTR: 3041 { 3042 struct ahc_syncrate *syncrate; 3043 u_int period; 3044 u_int ppr_options; 3045 u_int offset; 3046 u_int saved_offset; 3047 3048 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3049 reject = TRUE; 3050 break; 3051 } 3052 3053 /* 3054 * Wait until we have both args before validating 3055 * and acting on this message. 3056 * 3057 * Add one to MSG_EXT_SDTR_LEN to account for 3058 * the extended message preamble. 3059 */ 3060 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3061 break; 3062 3063 period = ahc->msgin_buf[3]; 3064 ppr_options = 0; 3065 saved_offset = offset = ahc->msgin_buf[4]; 3066 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3067 &ppr_options, 3068 devinfo->role); 3069 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3070 targ_scsirate & WIDEXFER, 3071 devinfo->role); 3072 if (bootverbose) { 3073 printf("(%s:%c:%d:%d): Received " 3074 "SDTR period %x, offset %x\n\t" 3075 "Filtered to period %x, offset %x\n", 3076 ahc_name(ahc), devinfo->channel, 3077 devinfo->target, devinfo->lun, 3078 ahc->msgin_buf[3], saved_offset, 3079 period, offset); 3080 } 3081 ahc_set_syncrate(ahc, devinfo, 3082 syncrate, period, 3083 offset, ppr_options, 3084 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3085 /*paused*/TRUE); 3086 3087 /* 3088 * See if we initiated Sync Negotiation 3089 * and didn't have to fall down to async 3090 * transfers. 3091 */ 3092 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3093 /* We started it */ 3094 if (saved_offset != offset) { 3095 /* Went too low - force async */ 3096 reject = TRUE; 3097 } 3098 } else { 3099 /* 3100 * Send our own SDTR in reply 3101 */ 3102 if (bootverbose 3103 && devinfo->role == ROLE_INITIATOR) { 3104 printf("(%s:%c:%d:%d): Target " 3105 "Initiated SDTR\n", 3106 ahc_name(ahc), devinfo->channel, 3107 devinfo->target, devinfo->lun); 3108 } 3109 ahc->msgout_index = 0; 3110 ahc->msgout_len = 0; 3111 ahc_construct_sdtr(ahc, devinfo, 3112 period, offset); 3113 ahc->msgout_index = 0; 3114 response = TRUE; 3115 } 3116 done = MSGLOOP_MSGCOMPLETE; 3117 break; 3118 } 3119 case MSG_EXT_WDTR: 3120 { 3121 u_int bus_width; 3122 u_int saved_width; 3123 u_int sending_reply; 3124 3125 sending_reply = FALSE; 3126 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3127 reject = TRUE; 3128 break; 3129 } 3130 3131 /* 3132 * Wait until we have our arg before validating 3133 * and acting on this message. 3134 * 3135 * Add one to MSG_EXT_WDTR_LEN to account for 3136 * the extended message preamble. 3137 */ 3138 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3139 break; 3140 3141 bus_width = ahc->msgin_buf[3]; 3142 saved_width = bus_width; 3143 ahc_validate_width(ahc, tinfo, &bus_width, 3144 devinfo->role); 3145 if (bootverbose) { 3146 printf("(%s:%c:%d:%d): Received WDTR " 3147 "%x filtered to %x\n", 3148 ahc_name(ahc), devinfo->channel, 3149 devinfo->target, devinfo->lun, 3150 saved_width, bus_width); 3151 } 3152 3153 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3154 /* 3155 * Don't send a WDTR back to the 3156 * target, since we asked first. 3157 * If the width went higher than our 3158 * request, reject it. 3159 */ 3160 if (saved_width > bus_width) { 3161 reject = TRUE; 3162 printf("(%s:%c:%d:%d): requested %dBit " 3163 "transfers. Rejecting...\n", 3164 ahc_name(ahc), devinfo->channel, 3165 devinfo->target, devinfo->lun, 3166 8 * (0x01 << bus_width)); 3167 bus_width = 0; 3168 } 3169 } else { 3170 /* 3171 * Send our own WDTR in reply 3172 */ 3173 if (bootverbose 3174 && devinfo->role == ROLE_INITIATOR) { 3175 printf("(%s:%c:%d:%d): Target " 3176 "Initiated WDTR\n", 3177 ahc_name(ahc), devinfo->channel, 3178 devinfo->target, devinfo->lun); 3179 } 3180 ahc->msgout_index = 0; 3181 ahc->msgout_len = 0; 3182 ahc_construct_wdtr(ahc, devinfo, bus_width); 3183 ahc->msgout_index = 0; 3184 response = TRUE; 3185 sending_reply = TRUE; 3186 } 3187 ahc_set_width(ahc, devinfo, bus_width, 3188 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3189 /*paused*/TRUE); 3190 /* After a wide message, we are async */ 3191 ahc_set_syncrate(ahc, devinfo, 3192 /*syncrate*/NULL, /*period*/0, 3193 /*offset*/0, /*ppr_options*/0, 3194 AHC_TRANS_ACTIVE, /*paused*/TRUE); 3195 if (sending_reply == FALSE && reject == FALSE) { 3196 3197 if (tinfo->goal.offset) { 3198 ahc->msgout_index = 0; 3199 ahc->msgout_len = 0; 3200 ahc_build_transfer_msg(ahc, devinfo); 3201 ahc->msgout_index = 0; 3202 response = TRUE; 3203 } 3204 } 3205 done = MSGLOOP_MSGCOMPLETE; 3206 break; 3207 } 3208 case MSG_EXT_PPR: 3209 { 3210 struct ahc_syncrate *syncrate; 3211 u_int period; 3212 u_int offset; 3213 u_int bus_width; 3214 u_int ppr_options; 3215 u_int saved_width; 3216 u_int saved_offset; 3217 u_int saved_ppr_options; 3218 3219 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3220 reject = TRUE; 3221 break; 3222 } 3223 3224 /* 3225 * Wait until we have all args before validating 3226 * and acting on this message. 3227 * 3228 * Add one to MSG_EXT_PPR_LEN to account for 3229 * the extended message preamble. 3230 */ 3231 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3232 break; 3233 3234 period = ahc->msgin_buf[3]; 3235 offset = ahc->msgin_buf[5]; 3236 bus_width = ahc->msgin_buf[6]; 3237 saved_width = bus_width; 3238 ppr_options = ahc->msgin_buf[7]; 3239 /* 3240 * According to the spec, a DT only 3241 * period factor with no DT option 3242 * set implies async. 3243 */ 3244 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3245 && period == 9) 3246 offset = 0; 3247 saved_ppr_options = ppr_options; 3248 saved_offset = offset; 3249 3250 /* 3251 * Mask out any options we don't support 3252 * on any controller. Transfer options are 3253 * only available if we are negotiating wide. 3254 */ 3255 ppr_options &= MSG_EXT_PPR_DT_REQ; 3256 if (bus_width == 0) 3257 ppr_options = 0; 3258 3259 ahc_validate_width(ahc, tinfo, &bus_width, 3260 devinfo->role); 3261 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3262 &ppr_options, 3263 devinfo->role); 3264 ahc_validate_offset(ahc, tinfo, syncrate, 3265 &offset, bus_width, 3266 devinfo->role); 3267 3268 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3269 /* 3270 * If we are unable to do any of the 3271 * requested options (we went too low), 3272 * then we'll have to reject the message. 3273 */ 3274 if (saved_width > bus_width 3275 || saved_offset != offset 3276 || saved_ppr_options != ppr_options) { 3277 reject = TRUE; 3278 period = 0; 3279 offset = 0; 3280 bus_width = 0; 3281 ppr_options = 0; 3282 syncrate = NULL; 3283 } 3284 } else { 3285 if (devinfo->role != ROLE_TARGET) 3286 printf("(%s:%c:%d:%d): Target " 3287 "Initiated PPR\n", 3288 ahc_name(ahc), devinfo->channel, 3289 devinfo->target, devinfo->lun); 3290 else 3291 printf("(%s:%c:%d:%d): Initiator " 3292 "Initiated PPR\n", 3293 ahc_name(ahc), devinfo->channel, 3294 devinfo->target, devinfo->lun); 3295 ahc->msgout_index = 0; 3296 ahc->msgout_len = 0; 3297 ahc_construct_ppr(ahc, devinfo, period, offset, 3298 bus_width, ppr_options); 3299 ahc->msgout_index = 0; 3300 response = TRUE; 3301 } 3302 if (bootverbose) { 3303 printf("(%s:%c:%d:%d): Received PPR width %x, " 3304 "period %x, offset %x,options %x\n" 3305 "\tFiltered to width %x, period %x, " 3306 "offset %x, options %x\n", 3307 ahc_name(ahc), devinfo->channel, 3308 devinfo->target, devinfo->lun, 3309 saved_width, ahc->msgin_buf[3], 3310 saved_offset, saved_ppr_options, 3311 bus_width, period, offset, ppr_options); 3312 } 3313 ahc_set_width(ahc, devinfo, bus_width, 3314 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3315 /*paused*/TRUE); 3316 ahc_set_syncrate(ahc, devinfo, 3317 syncrate, period, 3318 offset, ppr_options, 3319 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3320 /*paused*/TRUE); 3321 done = MSGLOOP_MSGCOMPLETE; 3322 break; 3323 } 3324 default: 3325 /* Unknown extended message. Reject it. */ 3326 reject = TRUE; 3327 break; 3328 } 3329 break; 3330 } 3331 #ifdef AHC_TARGET_MODE 3332 case MSG_BUS_DEV_RESET: 3333 ahc_handle_devreset(ahc, devinfo, 3334 CAM_BDR_SENT, 3335 "Bus Device Reset Received", 3336 /*verbose_level*/0); 3337 ahc_restart(ahc); 3338 done = MSGLOOP_TERMINATED; 3339 break; 3340 case MSG_ABORT_TAG: 3341 case MSG_ABORT: 3342 case MSG_CLEAR_QUEUE: 3343 { 3344 int tag; 3345 3346 /* Target mode messages */ 3347 if (devinfo->role != ROLE_TARGET) { 3348 reject = TRUE; 3349 break; 3350 } 3351 tag = SCB_LIST_NULL; 3352 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3353 tag = ahc_inb(ahc, INITIATOR_TAG); 3354 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3355 devinfo->lun, tag, ROLE_TARGET, 3356 CAM_REQ_ABORTED); 3357 3358 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3359 if (tstate != NULL) { 3360 struct ahc_tmode_lstate* lstate; 3361 3362 lstate = tstate->enabled_luns[devinfo->lun]; 3363 if (lstate != NULL) { 3364 ahc_queue_lstate_event(ahc, lstate, 3365 devinfo->our_scsiid, 3366 ahc->msgin_buf[0], 3367 /*arg*/tag); 3368 ahc_send_lstate_events(ahc, lstate); 3369 } 3370 } 3371 ahc_restart(ahc); 3372 done = MSGLOOP_TERMINATED; 3373 break; 3374 } 3375 #endif 3376 case MSG_TERM_IO_PROC: 3377 default: 3378 reject = TRUE; 3379 break; 3380 } 3381 3382 if (reject) { 3383 /* 3384 * Setup to reject the message. 3385 */ 3386 ahc->msgout_index = 0; 3387 ahc->msgout_len = 1; 3388 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3389 done = MSGLOOP_MSGCOMPLETE; 3390 response = TRUE; 3391 } 3392 3393 if (done != MSGLOOP_IN_PROG && !response) 3394 /* Clear the outgoing message buffer */ 3395 ahc->msgout_len = 0; 3396 3397 return (done); 3398 } 3399 3400 /* 3401 * Process a message reject message. 3402 */ 3403 static int 3404 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3405 { 3406 /* 3407 * What we care about here is if we had an 3408 * outstanding SDTR or WDTR message for this 3409 * target. If we did, this is a signal that 3410 * the target is refusing negotiation. 3411 */ 3412 struct scb *scb; 3413 struct ahc_initiator_tinfo *tinfo; 3414 struct ahc_tmode_tstate *tstate; 3415 u_int scb_index; 3416 u_int last_msg; 3417 int response = 0; 3418 3419 scb_index = ahc_inb(ahc, SCB_TAG); 3420 scb = ahc_lookup_scb(ahc, scb_index); 3421 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3422 devinfo->our_scsiid, 3423 devinfo->target, &tstate); 3424 /* Might be necessary */ 3425 last_msg = ahc_inb(ahc, LAST_MSG); 3426 3427 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3428 /* 3429 * Target does not support the PPR message. 3430 * Attempt to negotiate SPI-2 style. 3431 */ 3432 if (bootverbose) { 3433 printf("(%s:%c:%d:%d): PPR Rejected. " 3434 "Trying WDTR/SDTR\n", 3435 ahc_name(ahc), devinfo->channel, 3436 devinfo->target, devinfo->lun); 3437 } 3438 tinfo->goal.ppr_options = 0; 3439 tinfo->curr.transport_version = 2; 3440 tinfo->goal.transport_version = 2; 3441 ahc->msgout_index = 0; 3442 ahc->msgout_len = 0; 3443 ahc_build_transfer_msg(ahc, devinfo); 3444 ahc->msgout_index = 0; 3445 response = 1; 3446 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3447 3448 /* note 8bit xfers */ 3449 if (bootverbose) 3450 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3451 "8bit transfers\n", ahc_name(ahc), 3452 devinfo->channel, devinfo->target, devinfo->lun); 3453 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3454 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3455 /*paused*/TRUE); 3456 /* 3457 * No need to clear the sync rate. If the target 3458 * did not accept the command, our syncrate is 3459 * unaffected. If the target started the negotiation, 3460 * but rejected our response, we already cleared the 3461 * sync rate before sending our WDTR. 3462 */ 3463 if (tinfo->goal.offset != tinfo->curr.offset) { 3464 3465 /* Start the sync negotiation */ 3466 ahc->msgout_index = 0; 3467 ahc->msgout_len = 0; 3468 ahc_build_transfer_msg(ahc, devinfo); 3469 ahc->msgout_index = 0; 3470 response = 1; 3471 } 3472 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3473 /* note asynch xfers and clear flag */ 3474 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3475 /*offset*/0, /*ppr_options*/0, 3476 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3477 /*paused*/TRUE); 3478 if (bootverbose) 3479 printf("(%s:%c:%d:%d): refuses synchronous negotiation." 3480 " Using asynchronous transfers\n", 3481 ahc_name(ahc), devinfo->channel, 3482 devinfo->target, devinfo->lun); 3483 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3484 int tag_type; 3485 int mask; 3486 3487 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3488 3489 if (tag_type == MSG_SIMPLE_TASK) { 3490 if (bootverbose) 3491 printf("(%s:%c:%d:%d): refuses tagged commands." 3492 " Performing non-tagged I/O\n", 3493 ahc_name(ahc), devinfo->channel, 3494 devinfo->target, devinfo->lun); 3495 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3496 mask = ~0x23; 3497 } else { 3498 if (bootverbose) 3499 printf("(%s:%c:%d:%d): refuses %s tagged " 3500 "commands. Performing simple queue " 3501 "tagged I/O only\n", 3502 ahc_name(ahc), devinfo->channel, 3503 devinfo->target, devinfo->lun, 3504 tag_type == MSG_ORDERED_TASK 3505 ? "ordered" : "head of queue"); 3506 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3507 mask = ~0x03; 3508 } 3509 3510 /* 3511 * Resend the identify for this CCB as the target 3512 * may believe that the selection is invalid otherwise. 3513 */ 3514 ahc_outb(ahc, SCB_CONTROL, 3515 ahc_inb(ahc, SCB_CONTROL) & mask); 3516 scb->hscb->control &= mask; 3517 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3518 /*type*/MSG_SIMPLE_TASK); 3519 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3520 ahc_assert_atn(ahc); 3521 3522 /* 3523 * This transaction is now at the head of 3524 * the untagged queue for this target. 3525 */ 3526 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3527 struct scb_tailq *untagged_q; 3528 3529 untagged_q = 3530 &(ahc->untagged_queues[devinfo->target_offset]); 3531 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3532 scb->flags |= SCB_UNTAGGEDQ; 3533 } 3534 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3535 scb->hscb->tag); 3536 3537 /* 3538 * Requeue all tagged commands for this target 3539 * currently in our possession so they can be 3540 * converted to untagged commands. 3541 */ 3542 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3543 SCB_GET_CHANNEL(ahc, scb), 3544 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3545 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3546 SEARCH_COMPLETE); 3547 } else { 3548 /* 3549 * Otherwise, we ignore it. 3550 */ 3551 if (bootverbose) 3552 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3553 ahc_name(ahc), devinfo->channel, devinfo->target, 3554 last_msg); 3555 } 3556 return (response); 3557 } 3558 3559 /* 3560 * Process an ingnore wide residue message. 3561 */ 3562 static void 3563 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3564 { 3565 u_int scb_index; 3566 struct scb *scb; 3567 3568 scb_index = ahc_inb(ahc, SCB_TAG); 3569 scb = ahc_lookup_scb(ahc, scb_index); 3570 /* 3571 * XXX Actually check data direction in the sequencer? 3572 * Perhaps add datadir to some spare bits in the hscb? 3573 */ 3574 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3575 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3576 /* 3577 * Ignore the message if we haven't 3578 * seen an appropriate data phase yet. 3579 */ 3580 } else { 3581 /* 3582 * If the residual occurred on the last 3583 * transfer and the transfer request was 3584 * expected to end on an odd count, do 3585 * nothing. Otherwise, subtract a byte 3586 * and update the residual count accordingly. 3587 */ 3588 uint32_t sgptr; 3589 3590 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3591 if ((sgptr & SG_LIST_NULL) != 0 3592 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3593 /* 3594 * If the residual occurred on the last 3595 * transfer and the transfer request was 3596 * expected to end on an odd count, do 3597 * nothing. 3598 */ 3599 } else { 3600 struct ahc_dma_seg *sg; 3601 uint32_t data_cnt; 3602 uint32_t data_addr; 3603 uint32_t sglen; 3604 3605 /* Pull in the rest of the sgptr */ 3606 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3607 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3608 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3609 sgptr &= SG_PTR_MASK; 3610 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3611 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3612 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3613 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3614 3615 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3616 | (ahc_inb(ahc, SHADDR + 2) << 16) 3617 | (ahc_inb(ahc, SHADDR + 1) << 8) 3618 | (ahc_inb(ahc, SHADDR)); 3619 3620 data_cnt += 1; 3621 data_addr -= 1; 3622 3623 sg = ahc_sg_bus_to_virt(scb, sgptr); 3624 /* 3625 * The residual sg ptr points to the next S/G 3626 * to load so we must go back one. 3627 */ 3628 sg--; 3629 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3630 if (sg != scb->sg_list 3631 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3632 3633 sg--; 3634 sglen = ahc_le32toh(sg->len); 3635 /* 3636 * Preserve High Address and SG_LIST bits 3637 * while setting the count to 1. 3638 */ 3639 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3640 data_addr = ahc_le32toh(sg->addr) 3641 + (sglen & AHC_SG_LEN_MASK) - 1; 3642 3643 /* 3644 * Increment sg so it points to the 3645 * "next" sg. 3646 */ 3647 sg++; 3648 sgptr = ahc_sg_virt_to_bus(scb, sg); 3649 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3650 sgptr >> 24); 3651 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3652 sgptr >> 16); 3653 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3654 sgptr >> 8); 3655 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3656 } 3657 3658 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3659 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3660 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3661 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3662 } 3663 } 3664 } 3665 3666 3667 /* 3668 * Reinitialize the data pointers for the active transfer 3669 * based on its current residual. 3670 */ 3671 static void 3672 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3673 { 3674 struct scb *scb; 3675 struct ahc_dma_seg *sg; 3676 u_int scb_index; 3677 uint32_t sgptr; 3678 uint32_t resid; 3679 uint32_t dataptr; 3680 3681 scb_index = ahc_inb(ahc, SCB_TAG); 3682 scb = ahc_lookup_scb(ahc, scb_index); 3683 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3684 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3685 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3686 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3687 3688 sgptr &= SG_PTR_MASK; 3689 sg = ahc_sg_bus_to_virt(scb, sgptr); 3690 3691 /* The residual sg_ptr always points to the next sg */ 3692 sg--; 3693 3694 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3695 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3696 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3697 3698 dataptr = ahc_le32toh(sg->addr) 3699 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3700 - resid; 3701 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3702 u_int dscommand1; 3703 3704 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3705 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3706 ahc_outb(ahc, HADDR, 3707 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3708 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3709 } 3710 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3711 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3712 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3713 ahc_outb(ahc, HADDR, dataptr); 3714 ahc_outb(ahc, HCNT + 2, resid >> 16); 3715 ahc_outb(ahc, HCNT + 1, resid >> 8); 3716 ahc_outb(ahc, HCNT, resid); 3717 if ((ahc->features & AHC_ULTRA2) == 0) { 3718 ahc_outb(ahc, STCNT + 2, resid >> 16); 3719 ahc_outb(ahc, STCNT + 1, resid >> 8); 3720 ahc_outb(ahc, STCNT, resid); 3721 } 3722 } 3723 3724 /* 3725 * Handle the effects of issuing a bus device reset message. 3726 */ 3727 static void 3728 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3729 cam_status status, const char *message, int verbose_level) 3730 { 3731 #ifdef AHC_TARGET_MODE 3732 struct ahc_tmode_tstate* tstate; 3733 u_int lun; 3734 #endif 3735 int found; 3736 3737 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3738 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3739 status); 3740 3741 #ifdef AHC_TARGET_MODE 3742 /* 3743 * Send an immediate notify ccb to all target mord peripheral 3744 * drivers affected by this action. 3745 */ 3746 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3747 if (tstate != NULL) { 3748 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3749 struct ahc_tmode_lstate* lstate; 3750 3751 lstate = tstate->enabled_luns[lun]; 3752 if (lstate == NULL) 3753 continue; 3754 3755 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3756 MSG_BUS_DEV_RESET, /*arg*/0); 3757 ahc_send_lstate_events(ahc, lstate); 3758 } 3759 } 3760 #endif 3761 3762 /* 3763 * Go back to async/narrow transfers and renegotiate. 3764 */ 3765 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3766 AHC_TRANS_CUR, /*paused*/TRUE); 3767 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3768 /*period*/0, /*offset*/0, /*ppr_options*/0, 3769 AHC_TRANS_CUR, /*paused*/TRUE); 3770 3771 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3772 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3773 3774 if (message != NULL 3775 && (verbose_level <= bootverbose)) 3776 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3777 message, devinfo->channel, devinfo->target, found); 3778 } 3779 3780 #ifdef AHC_TARGET_MODE 3781 static void 3782 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3783 struct scb *scb) 3784 { 3785 3786 /* 3787 * To facilitate adding multiple messages together, 3788 * each routine should increment the index and len 3789 * variables instead of setting them explicitly. 3790 */ 3791 ahc->msgout_index = 0; 3792 ahc->msgout_len = 0; 3793 3794 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3795 ahc_build_transfer_msg(ahc, devinfo); 3796 else 3797 panic("ahc_intr: AWAITING target message with no message"); 3798 3799 ahc->msgout_index = 0; 3800 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3801 } 3802 #endif 3803 3804 int 3805 ahc_softc_init(struct ahc_softc *ahc) 3806 { 3807 3808 /* The IRQMS bit is only valid on VL and EISA chips */ 3809 if ((ahc->chip & AHC_PCI) == 0) 3810 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3811 else 3812 ahc->unpause = 0; 3813 ahc->pause = ahc->unpause | PAUSE; 3814 /* XXX The shared scb data stuff should be deprecated */ 3815 if (ahc->scb_data == NULL) { 3816 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3817 M_DEVBUF, M_NOWAIT); 3818 if (ahc->scb_data == NULL) 3819 return (ENOMEM); 3820 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3821 } 3822 3823 return (0); 3824 } 3825 3826 void 3827 ahc_softc_insert(struct ahc_softc *ahc) 3828 { 3829 struct ahc_softc *list_ahc; 3830 3831 #if AHC_PCI_CONFIG > 0 3832 /* 3833 * Second Function PCI devices need to inherit some 3834 * settings from function 0. 3835 */ 3836 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3837 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3838 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3839 ahc_dev_softc_t list_pci; 3840 ahc_dev_softc_t pci; 3841 3842 list_pci = list_ahc->dev_softc; 3843 pci = ahc->dev_softc; 3844 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3845 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3846 struct ahc_softc *master; 3847 struct ahc_softc *slave; 3848 3849 if (ahc_get_pci_function(list_pci) == 0) { 3850 master = list_ahc; 3851 slave = ahc; 3852 } else { 3853 master = ahc; 3854 slave = list_ahc; 3855 } 3856 slave->flags &= ~AHC_BIOS_ENABLED; 3857 slave->flags |= 3858 master->flags & AHC_BIOS_ENABLED; 3859 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3860 slave->flags |= 3861 master->flags & AHC_PRIMARY_CHANNEL; 3862 break; 3863 } 3864 } 3865 } 3866 #endif 3867 3868 /* 3869 * Insertion sort into our list of softcs. 3870 */ 3871 list_ahc = TAILQ_FIRST(&ahc_tailq); 3872 while (list_ahc != NULL 3873 && ahc_softc_comp(list_ahc, ahc) <= 0) 3874 list_ahc = TAILQ_NEXT(list_ahc, links); 3875 if (list_ahc != NULL) 3876 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3877 else 3878 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3879 ahc->init_level++; 3880 } 3881 3882 /* 3883 * Verify that the passed in softc pointer is for a 3884 * controller that is still configured. 3885 */ 3886 struct ahc_softc * 3887 ahc_find_softc(struct ahc_softc *ahc) 3888 { 3889 struct ahc_softc *list_ahc; 3890 3891 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3892 if (list_ahc == ahc) 3893 return (ahc); 3894 } 3895 return (NULL); 3896 } 3897 3898 void 3899 ahc_set_unit(struct ahc_softc *ahc, int unit) 3900 { 3901 ahc->unit = unit; 3902 } 3903 3904 void 3905 ahc_set_name(struct ahc_softc *ahc, char *name) 3906 { 3907 if (ahc->name != NULL) 3908 free(ahc->name, M_DEVBUF); 3909 ahc->name = name; 3910 } 3911 3912 void 3913 ahc_free(struct ahc_softc *ahc) 3914 { 3915 int i; 3916 3917 ahc_fini_scbdata(ahc); 3918 switch (ahc->init_level) { 3919 default: 3920 case 2: 3921 ahc_shutdown(ahc); 3922 /* TAILQ_REMOVE(&ahc_tailq, ahc, links); XXX */ 3923 /* FALLTHROUGH */ 3924 case 1: 3925 bus_dmamap_unload(ahc->parent_dmat, ahc->shared_data_dmamap); 3926 bus_dmamap_destroy(ahc->parent_dmat, ahc->shared_data_dmamap); 3927 bus_dmamem_unmap(ahc->parent_dmat, (caddr_t)ahc->qoutfifo, ahc->shared_data_size); 3928 bus_dmamem_free(ahc->parent_dmat, &ahc->shared_data_seg, ahc->shared_data_nseg); 3929 break; 3930 case 0: 3931 break; 3932 } 3933 3934 ahc_platform_free(ahc); 3935 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3936 struct ahc_tmode_tstate *tstate; 3937 3938 tstate = ahc->enabled_targets[i]; 3939 if (tstate != NULL) { 3940 #if AHC_TARGET_MODE 3941 int j; 3942 3943 for (j = 0; j < AHC_NUM_LUNS; j++) { 3944 struct ahc_tmode_lstate *lstate; 3945 3946 lstate = tstate->enabled_luns[j]; 3947 if (lstate != NULL) { 3948 /*xpt_free_path(lstate->path);*/ 3949 free(lstate, M_DEVBUF); 3950 } 3951 } 3952 #endif 3953 free(tstate, M_DEVBUF); 3954 } 3955 } 3956 #if AHC_TARGET_MODE 3957 if (ahc->black_hole != NULL) { 3958 /*xpt_free_path(ahc->black_hole->path);*/ 3959 free(ahc->black_hole, M_DEVBUF); 3960 } 3961 #endif 3962 #ifndef __NetBSD__ 3963 if (ahc->name != NULL) 3964 free(ahc->name, M_DEVBUF); 3965 #endif 3966 if (ahc->seep_config != NULL) 3967 free(ahc->seep_config, M_DEVBUF); 3968 #ifndef __FreeBSD__ 3969 free(ahc, M_DEVBUF); 3970 #endif 3971 return; 3972 } 3973 3974 void 3975 ahc_shutdown(void *arg) 3976 { 3977 struct ahc_softc *ahc; 3978 int i; 3979 3980 ahc = (struct ahc_softc *)arg; 3981 3982 /* This will reset most registers to 0, but not all */ 3983 ahc_reset(ahc); 3984 ahc_outb(ahc, SCSISEQ, 0); 3985 ahc_outb(ahc, SXFRCTL0, 0); 3986 ahc_outb(ahc, DSPCISTATUS, 0); 3987 3988 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3989 ahc_outb(ahc, i, 0); 3990 } 3991 3992 /* 3993 * Reset the controller and record some information about it 3994 * that is only available just after a reset. 3995 */ 3996 int 3997 ahc_reset(struct ahc_softc *ahc) 3998 { 3999 u_int sblkctl; 4000 u_int sxfrctl1_a, sxfrctl1_b; 4001 int wait; 4002 4003 /* 4004 * Preserve the value of the SXFRCTL1 register for all channels. 4005 * It contains settings that affect termination and we don't want 4006 * to disturb the integrity of the bus. 4007 */ 4008 ahc_pause(ahc); 4009 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { 4010 /* 4011 * The chip has not been initialized since 4012 * PCI/EISA/VLB bus reset. Don't trust 4013 * "left over BIOS data". 4014 */ 4015 ahc->flags |= AHC_NO_BIOS_INIT; 4016 } 4017 sxfrctl1_b = 0; 4018 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4019 u_int sblkctl1; 4020 4021 /* 4022 * Save channel B's settings in case this chip 4023 * is setup for TWIN channel operation. 4024 */ 4025 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4026 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4027 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4028 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4029 } 4030 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4031 4032 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4033 4034 /* 4035 * Ensure that the reset has finished. We delay 1000us 4036 * prior to reading the register to make sure the chip 4037 * has sufficiently completed its reset to handle register 4038 * accesses. 4039 */ 4040 wait = 1000; 4041 do { 4042 ahc_delay(1000); 4043 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4044 4045 if (wait == 0) { 4046 printf("%s: WARNING - Failed chip reset! " 4047 "Trying to initialize anyway.\n", ahc_name(ahc)); 4048 } 4049 ahc_outb(ahc, HCNTRL, ahc->pause); 4050 4051 /* Determine channel configuration */ 4052 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4053 /* No Twin Channel PCI cards */ 4054 if ((ahc->chip & AHC_PCI) != 0) 4055 sblkctl &= ~SELBUSB; 4056 switch (sblkctl) { 4057 case 0: 4058 /* Single Narrow Channel */ 4059 break; 4060 case 2: 4061 /* Wide Channel */ 4062 ahc->features |= AHC_WIDE; 4063 break; 4064 case 8: 4065 /* Twin Channel */ 4066 ahc->features |= AHC_TWIN; 4067 break; 4068 default: 4069 printf(" Unsupported adapter type (0x%x). Ignoring\n", sblkctl); 4070 return(-1); 4071 } 4072 4073 /* 4074 * Reload sxfrctl1. 4075 * 4076 * We must always initialize STPWEN to 1 before we 4077 * restore the saved values. STPWEN is initialized 4078 * to a tri-state condition which can only be cleared 4079 * by turning it on. 4080 */ 4081 if ((ahc->features & AHC_TWIN) != 0) { 4082 u_int sblkctl1; 4083 4084 sblkctl1 = ahc_inb(ahc, SBLKCTL); 4085 ahc_outb(ahc, SBLKCTL, sblkctl1 | SELBUSB); 4086 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4087 ahc_outb(ahc, SBLKCTL, sblkctl1 & ~SELBUSB); 4088 } 4089 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4090 4091 #ifdef AHC_DUMP_SEQ 4092 if (ahc->init_level == 0) 4093 ahc_dumpseq(ahc); 4094 #endif 4095 4096 return (0); 4097 } 4098 4099 /* 4100 * Determine the number of SCBs available on the controller 4101 */ 4102 int 4103 ahc_probe_scbs(struct ahc_softc *ahc) { 4104 int i; 4105 4106 for (i = 0; i < AHC_SCB_MAX; i++) { 4107 4108 ahc_outb(ahc, SCBPTR, i); 4109 ahc_outb(ahc, SCB_BASE, i); 4110 if (ahc_inb(ahc, SCB_BASE) != i) 4111 break; 4112 ahc_outb(ahc, SCBPTR, 0); 4113 if (ahc_inb(ahc, SCB_BASE) != 0) 4114 break; 4115 } 4116 return (i); 4117 } 4118 4119 #if 0 4120 static void 4121 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4122 { 4123 bus_addr_t *baddr; 4124 4125 baddr = (bus_addr_t *)arg; 4126 *baddr = segs->ds_addr; 4127 } 4128 #endif 4129 4130 static void 4131 ahc_build_free_scb_list(struct ahc_softc *ahc) 4132 { 4133 int scbsize; 4134 int i; 4135 4136 scbsize = 32; 4137 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4138 scbsize = 64; 4139 4140 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4141 int j; 4142 4143 ahc_outb(ahc, SCBPTR, i); 4144 4145 /* 4146 * Touch all SCB bytes to avoid parity errors 4147 * should one of our debugging routines read 4148 * an otherwise uninitiatlized byte. 4149 */ 4150 for (j = 0; j < scbsize; j++) 4151 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4152 4153 /* Clear the control byte. */ 4154 ahc_outb(ahc, SCB_CONTROL, 0); 4155 4156 /* Set the next pointer */ 4157 if ((ahc->flags & AHC_PAGESCBS) != 0) 4158 ahc_outb(ahc, SCB_NEXT, i+1); 4159 else 4160 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4161 4162 /* Make the tag number, SCSIID, and lun invalid */ 4163 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4164 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4165 ahc_outb(ahc, SCB_LUN, 0xFF); 4166 } 4167 4168 /* Make sure that the last SCB terminates the free list */ 4169 ahc_outb(ahc, SCBPTR, i-1); 4170 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4171 } 4172 4173 static int 4174 ahc_init_scbdata(struct ahc_softc *ahc) 4175 { 4176 struct scb_data *scb_data; 4177 4178 scb_data = ahc->scb_data; 4179 SLIST_INIT(&scb_data->free_scbs); 4180 SLIST_INIT(&scb_data->sg_maps); 4181 4182 /* Allocate SCB resources */ 4183 scb_data->scbarray = 4184 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4185 M_DEVBUF, M_NOWAIT); 4186 if (scb_data->scbarray == NULL) 4187 return (ENOMEM); 4188 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4189 4190 /* Determine the number of hardware SCBs and initialize them */ 4191 4192 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4193 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4194 /* SCB 0 heads the free list */ 4195 ahc_outb(ahc, FREE_SCBH, 0); 4196 } else { 4197 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4198 } 4199 4200 if (ahc->scb_data->maxhscbs == 0) { 4201 printf("%s: No SCB space found\n", ahc_name(ahc)); 4202 return (ENXIO); 4203 } 4204 4205 ahc_build_free_scb_list(ahc); 4206 4207 /* 4208 * Create our DMA tags. These tags define the kinds of device 4209 * accessible memory allocations and memory mappings we will 4210 * need to perform during normal operation. 4211 * 4212 * Unless we need to further restrict the allocation, we rely 4213 * on the restrictions of the parent dmat, hence the common 4214 * use of MAXADDR and MAXSIZE. 4215 */ 4216 4217 if (ahc_createdmamem(ahc->parent_dmat, 4218 AHC_SCB_MAX * sizeof(struct hardware_scb), ahc->sc_dmaflags, 4219 &scb_data->hscb_dmamap, 4220 (caddr_t *)&scb_data->hscbs, &scb_data->hscb_busaddr, 4221 &scb_data->hscb_seg, &scb_data->hscb_nseg, ahc_name(ahc), 4222 "hardware SCB structures") < 0) 4223 goto error_exit; 4224 4225 scb_data->init_level++; 4226 4227 if (ahc_createdmamem(ahc->parent_dmat, 4228 AHC_SCB_MAX * sizeof(struct scsi_sense_data), ahc->sc_dmaflags, 4229 &scb_data->sense_dmamap, (caddr_t *)&scb_data->sense, 4230 &scb_data->sense_busaddr, &scb_data->sense_seg, 4231 &scb_data->sense_nseg, ahc_name(ahc), "sense buffers") < 0) 4232 goto error_exit; 4233 4234 scb_data->init_level++; 4235 4236 /* Perform initial CCB allocation */ 4237 memset(scb_data->hscbs, 0, 4238 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4239 ahc_alloc_scbs(ahc); 4240 scb_data->init_level++; 4241 4242 if (scb_data->numscbs == 0) { 4243 printf("%s: ahc_init_scbdata - " 4244 "Unable to allocate initial scbs\n", 4245 ahc_name(ahc)); 4246 goto error_exit; 4247 } 4248 4249 /* 4250 * Tell the sequencer which SCB will be the next one it receives. 4251 */ 4252 ahc->next_queued_scb = ahc_get_scb(ahc); 4253 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4254 4255 /* 4256 * Note that we were successfull 4257 */ 4258 return (0); 4259 4260 error_exit: 4261 4262 return (ENOMEM); 4263 } 4264 4265 static void 4266 ahc_fini_scbdata(struct ahc_softc *ahc) 4267 { 4268 struct scb_data *scb_data; 4269 4270 scb_data = ahc->scb_data; 4271 if (scb_data == NULL) 4272 return; 4273 4274 switch (scb_data->init_level) { 4275 default: 4276 case 5: 4277 { 4278 struct sg_map_node *sg_map; 4279 4280 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4281 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4282 ahc_freedmamem(ahc->parent_dmat, PAGE_SIZE, 4283 sg_map->sg_dmamap, (caddr_t)sg_map->sg_vaddr, 4284 &sg_map->sg_dmasegs, sg_map->sg_nseg); 4285 free(sg_map, M_DEVBUF); 4286 } 4287 } 4288 /*FALLTHROUGH*/ 4289 case 4: 4290 ahc_freedmamem(ahc->parent_dmat, 4291 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 4292 scb_data->sense_dmamap, (caddr_t)scb_data->sense, 4293 &scb_data->sense_seg, scb_data->sense_nseg); 4294 /*FALLTHROUGH*/ 4295 case 3: 4296 ahc_freedmamem(ahc->parent_dmat, 4297 AHC_SCB_MAX * sizeof(struct hardware_scb), 4298 scb_data->hscb_dmamap, (caddr_t)scb_data->hscbs, 4299 &scb_data->hscb_seg, scb_data->hscb_nseg); 4300 /*FALLTHROUGH*/ 4301 case 2: 4302 case 1: 4303 case 0: 4304 break; 4305 } 4306 if (scb_data->scbarray != NULL) 4307 free(scb_data->scbarray, M_DEVBUF); 4308 } 4309 4310 void 4311 ahc_alloc_scbs(struct ahc_softc *ahc) 4312 { 4313 struct scb_data *scb_data; 4314 struct scb *next_scb; 4315 struct sg_map_node *sg_map; 4316 bus_addr_t physaddr; 4317 struct ahc_dma_seg *segs; 4318 int newcount; 4319 int i; 4320 4321 scb_data = ahc->scb_data; 4322 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4323 /* Can't allocate any more */ 4324 return; 4325 4326 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4327 4328 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4329 4330 if (sg_map == NULL) 4331 return; 4332 4333 /* Allocate S/G space for the next batch of SCBS */ 4334 if (ahc_createdmamem(ahc->parent_dmat, PAGE_SIZE, ahc->sc_dmaflags, 4335 &sg_map->sg_dmamap, 4336 (caddr_t *)&sg_map->sg_vaddr, &sg_map->sg_physaddr, 4337 &sg_map->sg_dmasegs, &sg_map->sg_nseg, ahc_name(ahc), 4338 "SG space") < 0) { 4339 free(sg_map, M_DEVBUF); 4340 return; 4341 } 4342 4343 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4344 4345 segs = sg_map->sg_vaddr; 4346 physaddr = sg_map->sg_physaddr; 4347 4348 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4349 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4350 for (i = 0; i < newcount; i++) { 4351 struct scb_platform_data *pdata; 4352 int error; 4353 4354 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4355 M_DEVBUF, M_NOWAIT); 4356 if (pdata == NULL) 4357 break; 4358 next_scb->platform_data = pdata; 4359 next_scb->sg_map = sg_map; 4360 next_scb->sg_list = segs; 4361 /* 4362 * The sequencer always starts with the second entry. 4363 * The first entry is embedded in the scb. 4364 */ 4365 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4366 next_scb->ahc_softc = ahc; 4367 next_scb->flags = SCB_FREE; 4368 4369 error = bus_dmamap_create(ahc->parent_dmat, 4370 AHC_MAXTRANSFER_SIZE, AHC_NSEG, MAXPHYS, 0, 4371 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW|ahc->sc_dmaflags, 4372 &next_scb->dmamap); 4373 if (error != 0) 4374 break; 4375 4376 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4377 next_scb->hscb->tag = ahc->scb_data->numscbs; 4378 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4379 next_scb, links.sle); 4380 segs += AHC_NSEG; 4381 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4382 next_scb++; 4383 ahc->scb_data->numscbs++; 4384 } 4385 } 4386 4387 void 4388 ahc_controller_info(struct ahc_softc *ahc, char *tbuf, size_t l) 4389 { 4390 int len; 4391 char *ep; 4392 4393 ep = tbuf + l; 4394 4395 len = snprintf(tbuf, ep - tbuf, "%s: ", 4396 ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4397 tbuf += len; 4398 if ((ahc->features & AHC_TWIN) != 0) 4399 len = snprintf(tbuf, ep - tbuf, "Twin Channel, A SCSI Id=%d, " 4400 "B SCSI Id=%d, primary %c, ", 4401 ahc->our_id, ahc->our_id_b, 4402 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4403 else { 4404 const char *speed; 4405 const char *type; 4406 4407 speed = ""; 4408 if ((ahc->features & AHC_ULTRA) != 0) { 4409 speed = "Ultra "; 4410 } else if ((ahc->features & AHC_DT) != 0) { 4411 speed = "Ultra160 "; 4412 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4413 speed = "Ultra2 "; 4414 } 4415 if ((ahc->features & AHC_WIDE) != 0) { 4416 type = "Wide"; 4417 } else { 4418 type = "Single"; 4419 } 4420 len = snprintf(tbuf, ep - tbuf, "%s%s Channel %c, SCSI Id=%d, ", 4421 speed, type, ahc->channel, ahc->our_id); 4422 } 4423 tbuf += len; 4424 4425 if ((ahc->flags & AHC_PAGESCBS) != 0) 4426 snprintf(tbuf, ep - tbuf, "%d/%d SCBs", 4427 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4428 else 4429 snprintf(tbuf, ep - tbuf, "%d SCBs", ahc->scb_data->maxhscbs); 4430 } 4431 4432 /* 4433 * Start the board, ready for normal operation 4434 */ 4435 int 4436 ahc_init(struct ahc_softc *ahc) 4437 { 4438 int max_targ; 4439 int i; 4440 int term; 4441 u_int scsi_conf; 4442 u_int scsiseq_template; 4443 u_int ultraenb; 4444 u_int discenable; 4445 u_int tagenable; 4446 size_t driver_data_size; 4447 uint32_t physaddr; 4448 4449 #ifdef AHC_DEBUG 4450 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4451 ahc->flags |= AHC_SEQUENCER_DEBUG; 4452 #endif 4453 4454 #ifdef AHC_PRINT_SRAM 4455 printf("Scratch Ram:"); 4456 for (i = 0x20; i < 0x5f; i++) { 4457 if (((i % 8) == 0) && (i != 0)) { 4458 printf ("\n "); 4459 } 4460 printf (" 0x%x", ahc_inb(ahc, i)); 4461 } 4462 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4463 for (i = 0x70; i < 0x7f; i++) { 4464 if (((i % 8) == 0) && (i != 0)) { 4465 printf ("\n "); 4466 } 4467 printf (" 0x%x", ahc_inb(ahc, i)); 4468 } 4469 } 4470 printf ("\n"); 4471 /* 4472 * Reading uninitialized scratch ram may 4473 * generate parity errors. 4474 */ 4475 ahc_outb(ahc, CLRINT, CLRPARERR); 4476 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4477 #endif 4478 max_targ = 15; 4479 4480 /* 4481 * Assume we have a board at this stage and it has been reset. 4482 */ 4483 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4484 ahc->our_id = ahc->our_id_b = 7; 4485 4486 /* 4487 * Default to allowing initiator operations. 4488 */ 4489 ahc->flags |= AHC_INITIATORROLE; 4490 4491 /* 4492 * Only allow target mode features if this unit has them enabled. 4493 */ 4494 //if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4495 ahc->features &= ~AHC_TARGETMODE; 4496 4497 /* 4498 * DMA tag for our command fifos and other data in system memory 4499 * the card's sequencer must be able to access. For initiator 4500 * roles, we need to allocate space for the qinfifo and qoutfifo. 4501 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4502 * When providing for the target mode role, we must additionally 4503 * provide space for the incoming target command fifo and an extra 4504 * byte to deal with a DMA bug in some chip versions. 4505 */ 4506 driver_data_size = 2 * 256 * sizeof(uint8_t); 4507 if ((ahc->features & AHC_TARGETMODE) != 0) 4508 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4509 + /*DMA WideOdd Bug Buffer*/1; 4510 4511 if (ahc_createdmamem(ahc->parent_dmat, driver_data_size, 4512 ahc->sc_dmaflags, 4513 &ahc->shared_data_dmamap, (caddr_t *)&ahc->qoutfifo, 4514 &ahc->shared_data_busaddr, &ahc->shared_data_seg, 4515 &ahc->shared_data_nseg, ahc_name(ahc), "shared data") < 0) 4516 return (ENOMEM); 4517 4518 ahc->init_level++; 4519 4520 if ((ahc->features & AHC_TARGETMODE) != 0) { 4521 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4522 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4523 ahc->dma_bug_buf = ahc->shared_data_busaddr 4524 + driver_data_size - 1; 4525 /* All target command blocks start out invalid. */ 4526 for (i = 0; i < AHC_TMODE_CMDS; i++) 4527 ahc->targetcmds[i].cmd_valid = 0; 4528 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4529 ahc->tqinfifonext = 1; 4530 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4531 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4532 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4533 } 4534 ahc->qinfifo = &ahc->qoutfifo[256]; 4535 4536 ahc->init_level++; 4537 4538 /* Allocate SCB data now that buffer_dmat is initialized */ 4539 if (ahc->scb_data->maxhscbs == 0) 4540 if (ahc_init_scbdata(ahc) != 0) 4541 return (ENOMEM); 4542 4543 if (bootverbose) 4544 printf("%s: found %d SCBs\n", ahc_name(ahc), 4545 ahc->scb_data->maxhscbs); 4546 4547 /* 4548 * Allocate a tstate to house information for our 4549 * initiator presence on the bus as well as the user 4550 * data for any target mode initiator. 4551 */ 4552 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4553 printf("%s: unable to allocate ahc_tmode_tstate. " 4554 "Failing attach\n", ahc_name(ahc)); 4555 return (ENOMEM); 4556 } 4557 4558 if ((ahc->features & AHC_TWIN) != 0) { 4559 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4560 printf("%s: unable to allocate ahc_tmode_tstate. " 4561 "Failing attach\n", ahc_name(ahc)); 4562 return (ENOMEM); 4563 } 4564 } 4565 4566 ahc_outb(ahc, SEQ_FLAGS, 0); 4567 ahc_outb(ahc, SEQ_FLAGS2, 0); 4568 4569 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4570 ahc->flags |= AHC_PAGESCBS; 4571 } else { 4572 ahc->flags &= ~AHC_PAGESCBS; 4573 } 4574 4575 #ifdef AHC_DEBUG 4576 if (ahc_debug & AHC_SHOW_MISC) { 4577 printf("%s: hardware scb %lu bytes; kernel scb %lu bytes; " 4578 "ahc_dma %lu bytes\n", 4579 ahc_name(ahc), 4580 (u_long)sizeof(struct hardware_scb), 4581 (u_long)sizeof(struct scb), 4582 (u_long)sizeof(struct ahc_dma_seg)); 4583 } 4584 #endif /* AHC_DEBUG */ 4585 4586 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4587 if (ahc->features & AHC_TWIN) { 4588 4589 /* 4590 * The device is gated to channel B after a chip reset, 4591 * so set those values first 4592 */ 4593 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4594 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4595 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4596 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4597 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4598 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4599 if ((ahc->features & AHC_ULTRA2) != 0) 4600 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4601 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4602 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4603 4604 if ((scsi_conf & RESET_SCSI) != 0 4605 && (ahc->flags & AHC_INITIATORROLE) != 0) 4606 ahc->flags |= AHC_RESET_BUS_B; 4607 4608 /* Select Channel A */ 4609 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4610 } 4611 4612 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4613 if ((ahc->features & AHC_ULTRA2) != 0) 4614 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4615 else 4616 ahc_outb(ahc, SCSIID, ahc->our_id); 4617 scsi_conf = ahc_inb(ahc, SCSICONF); 4618 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4619 |term|ahc->seltime 4620 |ENSTIMER|ACTNEGEN); 4621 if ((ahc->features & AHC_ULTRA2) != 0) 4622 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4623 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4624 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4625 4626 if ((scsi_conf & RESET_SCSI) != 0 4627 && (ahc->flags & AHC_INITIATORROLE) != 0) 4628 ahc->flags |= AHC_RESET_BUS_A; 4629 4630 /* 4631 * Look at the information that board initialization or 4632 * the board bios has left us. 4633 */ 4634 ultraenb = 0; 4635 tagenable = ALL_TARGETS_MASK; 4636 4637 /* Grab the disconnection disable table and invert it for our needs */ 4638 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4639 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4640 "device parameters\n", ahc_name(ahc)); 4641 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4642 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4643 discenable = ALL_TARGETS_MASK; 4644 if ((ahc->features & AHC_ULTRA) != 0) 4645 ultraenb = ALL_TARGETS_MASK; 4646 } else { 4647 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4648 | ahc_inb(ahc, DISC_DSB)); 4649 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4650 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4651 | ahc_inb(ahc, ULTRA_ENB); 4652 } 4653 4654 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4655 max_targ = 7; 4656 4657 for (i = 0; i <= max_targ; i++) { 4658 struct ahc_initiator_tinfo *tinfo; 4659 struct ahc_tmode_tstate *tstate; 4660 u_int our_id; 4661 u_int target_id; 4662 char channel; 4663 4664 channel = 'A'; 4665 our_id = ahc->our_id; 4666 target_id = i; 4667 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4668 channel = 'B'; 4669 our_id = ahc->our_id_b; 4670 target_id = i % 8; 4671 } 4672 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4673 target_id, &tstate); 4674 /* Default to async narrow across the board */ 4675 memset(tinfo, 0, sizeof(*tinfo)); 4676 if (ahc->flags & AHC_USEDEFAULTS) { 4677 if ((ahc->features & AHC_WIDE) != 0) 4678 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4679 4680 /* 4681 * These will be truncated when we determine the 4682 * connection type we have with the target. 4683 */ 4684 tinfo->user.period = ahc_syncrates->period; 4685 tinfo->user.offset = ~0; 4686 } else { 4687 u_int scsirate; 4688 uint16_t mask; 4689 4690 /* Take the settings leftover in scratch RAM. */ 4691 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4692 mask = (0x01 << i); 4693 if ((ahc->features & AHC_ULTRA2) != 0) { 4694 u_int offset; 4695 u_int maxsync; 4696 4697 if ((scsirate & SOFS) == 0x0F) { 4698 /* 4699 * Haven't negotiated yet, 4700 * so the format is different. 4701 */ 4702 scsirate = (scsirate & SXFR) >> 4 4703 | (ultraenb & mask) 4704 ? 0x08 : 0x0 4705 | (scsirate & WIDEXFER); 4706 offset = MAX_OFFSET_ULTRA2; 4707 } else 4708 offset = ahc_inb(ahc, TARG_OFFSET + i); 4709 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4710 /* Set to the lowest sync rate, 5MHz */ 4711 scsirate |= 0x1c; 4712 maxsync = AHC_SYNCRATE_ULTRA2; 4713 if ((ahc->features & AHC_DT) != 0) 4714 maxsync = AHC_SYNCRATE_DT; 4715 tinfo->user.period = 4716 ahc_find_period(ahc, scsirate, maxsync); 4717 if (offset == 0) 4718 tinfo->user.period = 0; 4719 else 4720 tinfo->user.offset = ~0; 4721 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4722 && (ahc->features & AHC_DT) != 0) 4723 tinfo->user.ppr_options = 4724 MSG_EXT_PPR_DT_REQ; 4725 } else if ((scsirate & SOFS) != 0) { 4726 if ((scsirate & SXFR) == 0x40 4727 && (ultraenb & mask) != 0) { 4728 /* Treat 10MHz as a non-ultra speed */ 4729 scsirate &= ~SXFR; 4730 ultraenb &= ~mask; 4731 } 4732 tinfo->user.period = 4733 ahc_find_period(ahc, scsirate, 4734 (ultraenb & mask) 4735 ? AHC_SYNCRATE_ULTRA 4736 : AHC_SYNCRATE_FAST); 4737 if (tinfo->user.period != 0) 4738 tinfo->user.offset = ~0; 4739 } 4740 if (tinfo->user.period == 0) 4741 tinfo->user.offset = 0; 4742 if ((scsirate & WIDEXFER) != 0 4743 && (ahc->features & AHC_WIDE) != 0) 4744 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4745 tinfo->user.protocol_version = 4; 4746 if ((ahc->features & AHC_DT) != 0) 4747 tinfo->user.transport_version = 3; 4748 else 4749 tinfo->user.transport_version = 2; 4750 tinfo->goal.protocol_version = 2; 4751 tinfo->goal.transport_version = 2; 4752 tinfo->curr.protocol_version = 2; 4753 tinfo->curr.transport_version = 2; 4754 } 4755 tstate->ultraenb = 0; 4756 tstate->discenable = discenable; 4757 } 4758 ahc->user_discenable = discenable; 4759 ahc->user_tagenable = tagenable; 4760 4761 /* There are no untagged SCBs active yet. */ 4762 for (i = 0; i < 16; i++) { 4763 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4764 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4765 int lun; 4766 4767 /* 4768 * The SCB based BTT allows an entry per 4769 * target and lun pair. 4770 */ 4771 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4772 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4773 } 4774 } 4775 4776 /* All of our queues are empty */ 4777 for (i = 0; i < 256; i++) 4778 ahc->qoutfifo[i] = SCB_LIST_NULL; 4779 4780 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4781 4782 for (i = 0; i < 256; i++) 4783 ahc->qinfifo[i] = SCB_LIST_NULL; 4784 4785 if ((ahc->features & AHC_MULTI_TID) != 0) { 4786 ahc_outb(ahc, TARGID, 0); 4787 ahc_outb(ahc, TARGID + 1, 0); 4788 } 4789 4790 /* 4791 * Tell the sequencer where it can find our arrays in memory. 4792 */ 4793 physaddr = ahc->scb_data->hscb_busaddr; 4794 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4795 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4796 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4797 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4798 4799 physaddr = ahc->shared_data_busaddr; 4800 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4801 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4802 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4803 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4804 4805 /* 4806 * Initialize the group code to command length table. 4807 * This overrides the values in TARG_SCSIRATE, so only 4808 * setup the table after we have processed that information. 4809 */ 4810 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4811 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4812 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4813 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4814 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4815 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4816 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4817 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4818 4819 /* Tell the sequencer of our initial queue positions */ 4820 ahc_outb(ahc, KERNEL_QINPOS, 0); 4821 ahc_outb(ahc, QINPOS, 0); 4822 ahc_outb(ahc, QOUTPOS, 0); 4823 4824 /* 4825 * Use the built in queue management registers 4826 * if they are available. 4827 */ 4828 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4829 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4830 ahc_outb(ahc, SDSCB_QOFF, 0); 4831 ahc_outb(ahc, SNSCB_QOFF, 0); 4832 ahc_outb(ahc, HNSCB_QOFF, 0); 4833 } 4834 4835 4836 /* We don't have any waiting selections */ 4837 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4838 4839 /* Our disconnection list is empty too */ 4840 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4841 4842 /* Message out buffer starts empty */ 4843 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4844 4845 /* 4846 * Setup the allowed SCSI Sequences based on operational mode. 4847 * If we are a target, we'll enalbe select in operations once 4848 * we've had a lun enabled. 4849 */ 4850 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4851 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4852 scsiseq_template |= ENRSELI; 4853 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4854 4855 /* 4856 * Load the Sequencer program and Enable the adapter 4857 * in "fast" mode. 4858 */ 4859 if (bootverbose) 4860 printf("%s: Downloading Sequencer Program...", 4861 ahc_name(ahc)); 4862 4863 ahc_loadseq(ahc); 4864 4865 if ((ahc->features & AHC_ULTRA2) != 0) { 4866 int wait; 4867 4868 /* 4869 * Wait for up to 500ms for our transceivers 4870 * to settle. If the adapter does not have 4871 * a cable attached, the transceivers may 4872 * never settle, so don't complain if we 4873 * fail here. 4874 */ 4875 ahc_pause(ahc); 4876 for (wait = 5000; 4877 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4878 wait--) 4879 ahc_delay(100); 4880 ahc_unpause(ahc); 4881 } 4882 4883 /* We have to wait until after any system dumps... */ 4884 ahc->shutdown_hook = shutdownhook_establish(ahc_shutdown, ahc); 4885 4886 return (0); 4887 } 4888 4889 void 4890 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4891 { 4892 u_int hcntrl; 4893 4894 hcntrl = ahc_inb(ahc, HCNTRL); 4895 hcntrl &= ~INTEN; 4896 ahc->pause &= ~INTEN; 4897 ahc->unpause &= ~INTEN; 4898 if (enable) { 4899 hcntrl |= INTEN; 4900 ahc->pause |= INTEN; 4901 ahc->unpause |= INTEN; 4902 } 4903 ahc_outb(ahc, HCNTRL, hcntrl); 4904 } 4905 4906 /* 4907 * Ensure that the card is paused in a location 4908 * outside of all critical sections and that all 4909 * pending work is completed prior to returning. 4910 * This routine should only be called from outside 4911 * an interrupt context. 4912 */ 4913 void 4914 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4915 { 4916 int intstat; 4917 int maxloops; 4918 int paused; 4919 4920 maxloops = 1000; 4921 ahc->flags |= AHC_ALL_INTERRUPTS; 4922 intstat = 0; 4923 paused = FALSE; 4924 do { 4925 if (paused) 4926 ahc_unpause(ahc); 4927 ahc_intr(ahc); 4928 ahc_pause(ahc); 4929 paused = TRUE; 4930 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 4931 ahc_clear_critical_section(ahc); 4932 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4933 break; 4934 } while (--maxloops 4935 && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0 4936 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)))); 4937 if (maxloops == 0) { 4938 printf("Infinite interrupt loop, INTSTAT = %x", 4939 ahc_inb(ahc, INTSTAT)); 4940 } 4941 ahc_platform_flushwork(ahc); 4942 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4943 } 4944 4945 int 4946 ahc_suspend(struct ahc_softc *ahc) 4947 { 4948 uint8_t *ptr; 4949 int i; 4950 4951 ahc_pause_and_flushwork(ahc); 4952 4953 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4954 return (EBUSY); 4955 4956 #if AHC_TARGET_MODE 4957 /* 4958 * XXX What about ATIOs that have not yet been serviced? 4959 * Perhaps we should just refuse to be suspended if we 4960 * are acting in a target role. 4961 */ 4962 if (ahc->pending_device != NULL) 4963 return (EBUSY); 4964 #endif 4965 4966 /* Save volatile registers */ 4967 if ((ahc->features & AHC_TWIN) != 0) { 4968 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4969 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4970 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4971 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4972 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4973 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4974 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4975 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4976 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4977 } 4978 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4979 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4980 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4981 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4982 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4983 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4984 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4985 4986 if ((ahc->chip & AHC_PCI) != 0) { 4987 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4988 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4989 } 4990 4991 if ((ahc->features & AHC_DT) != 0) { 4992 u_int sfunct; 4993 4994 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4995 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4996 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 4997 ahc_outb(ahc, SFUNCT, sfunct); 4998 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 4999 } 5000 5001 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5002 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 5003 5004 if ((ahc->features & AHC_ULTRA2) != 0) 5005 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 5006 5007 ptr = ahc->suspend_state.scratch_ram; 5008 for (i = 0; i < 64; i++) 5009 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 5010 5011 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5012 for (i = 0; i < 16; i++) 5013 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 5014 } 5015 5016 ptr = ahc->suspend_state.btt; 5017 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5018 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5019 int j; 5020 5021 for (j = 0;j < AHC_NUM_LUNS; j++) { 5022 u_int tcl; 5023 5024 tcl = BUILD_TCL(i << 4, j); 5025 *ptr = ahc_index_busy_tcl(ahc, tcl); 5026 } 5027 } 5028 } 5029 ahc_shutdown(ahc); 5030 return (0); 5031 } 5032 5033 int 5034 ahc_resume(struct ahc_softc *ahc) 5035 { 5036 uint8_t *ptr; 5037 int i; 5038 5039 ahc_reset(ahc); 5040 5041 ahc_build_free_scb_list(ahc); 5042 5043 /* Restore volatile registers */ 5044 if ((ahc->features & AHC_TWIN) != 0) { 5045 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 5046 ahc_outb(ahc, SCSIID, ahc->our_id); 5047 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 5048 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 5049 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 5050 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 5051 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 5052 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 5053 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 5054 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 5055 } 5056 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 5057 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 5058 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 5059 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 5060 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 5061 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 5062 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 5063 if ((ahc->features & AHC_ULTRA2) != 0) 5064 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 5065 else 5066 ahc_outb(ahc, SCSIID, ahc->our_id); 5067 5068 if ((ahc->chip & AHC_PCI) != 0) { 5069 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 5070 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 5071 } 5072 5073 if ((ahc->features & AHC_DT) != 0) { 5074 u_int sfunct; 5075 5076 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5077 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5078 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 5079 ahc_outb(ahc, SFUNCT, sfunct); 5080 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 5081 } 5082 5083 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5084 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 5085 5086 if ((ahc->features & AHC_ULTRA2) != 0) 5087 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 5088 5089 ptr = ahc->suspend_state.scratch_ram; 5090 for (i = 0; i < 64; i++) 5091 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 5092 5093 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5094 for (i = 0; i < 16; i++) 5095 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 5096 } 5097 5098 ptr = ahc->suspend_state.btt; 5099 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5100 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5101 int j; 5102 5103 for (j = 0;j < AHC_NUM_LUNS; j++) { 5104 u_int tcl; 5105 5106 tcl = BUILD_TCL(i << 4, j); 5107 ahc_busy_tcl(ahc, tcl, *ptr); 5108 } 5109 } 5110 } 5111 return (0); 5112 } 5113 5114 /************************** Busy Target Table *********************************/ 5115 /* 5116 * Return the untagged transaction id for a given target/channel lun. 5117 * Optionally, clear the entry. 5118 */ 5119 u_int 5120 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5121 { 5122 u_int scbid; 5123 u_int target_offset; 5124 5125 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5126 u_int saved_scbptr; 5127 5128 saved_scbptr = ahc_inb(ahc, SCBPTR); 5129 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5130 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5131 ahc_outb(ahc, SCBPTR, saved_scbptr); 5132 } else { 5133 target_offset = TCL_TARGET_OFFSET(tcl); 5134 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5135 } 5136 5137 return (scbid); 5138 } 5139 5140 void 5141 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5142 { 5143 u_int target_offset; 5144 5145 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5146 u_int saved_scbptr; 5147 5148 saved_scbptr = ahc_inb(ahc, SCBPTR); 5149 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5150 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5151 ahc_outb(ahc, SCBPTR, saved_scbptr); 5152 } else { 5153 target_offset = TCL_TARGET_OFFSET(tcl); 5154 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5155 } 5156 } 5157 5158 void 5159 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5160 { 5161 u_int target_offset; 5162 5163 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5164 u_int saved_scbptr; 5165 5166 saved_scbptr = ahc_inb(ahc, SCBPTR); 5167 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5168 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5169 ahc_outb(ahc, SCBPTR, saved_scbptr); 5170 } else { 5171 target_offset = TCL_TARGET_OFFSET(tcl); 5172 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5173 } 5174 } 5175 5176 /************************** SCB and SCB queue management **********************/ 5177 int 5178 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5179 char channel, int lun, u_int tag, role_t role) 5180 { 5181 int targ = SCB_GET_TARGET(ahc, scb); 5182 char chan = SCB_GET_CHANNEL(ahc, scb); 5183 int slun = SCB_GET_LUN(scb); 5184 int match; 5185 5186 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5187 if (match != 0) 5188 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5189 if (match != 0) 5190 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5191 if (match != 0) { 5192 #if 0 5193 #if AHC_TARGET_MODE 5194 int group; 5195 5196 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5197 if (role == ROLE_INITIATOR) { 5198 match = (group != XPT_FC_GROUP_TMODE) 5199 && ((tag == scb->hscb->tag) 5200 || (tag == SCB_LIST_NULL)); 5201 } else if (role == ROLE_TARGET) { 5202 match = (group == XPT_FC_GROUP_TMODE) 5203 && ((tag == scb->io_ctx->csio.tag_id) 5204 || (tag == SCB_LIST_NULL)); 5205 } 5206 #else /* !AHC_TARGET_MODE */ 5207 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5208 #endif /* AHC_TARGET_MODE */ 5209 #endif 5210 } 5211 5212 return match; 5213 } 5214 5215 void 5216 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5217 { 5218 int target; 5219 char channel; 5220 int lun; 5221 5222 target = SCB_GET_TARGET(ahc, scb); 5223 lun = SCB_GET_LUN(scb); 5224 channel = SCB_GET_CHANNEL(ahc, scb); 5225 5226 ahc_search_qinfifo(ahc, target, channel, lun, 5227 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5228 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5229 5230 ahc_platform_freeze_devq(ahc, scb); 5231 } 5232 5233 void 5234 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5235 { 5236 struct scb *prev_scb; 5237 5238 prev_scb = NULL; 5239 if (ahc_qinfifo_count(ahc) != 0) { 5240 u_int prev_tag; 5241 uint8_t prev_pos; 5242 5243 prev_pos = ahc->qinfifonext - 1; 5244 prev_tag = ahc->qinfifo[prev_pos]; 5245 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5246 } 5247 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5248 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5249 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5250 } else { 5251 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5252 } 5253 } 5254 5255 static void 5256 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5257 struct scb *scb) 5258 { 5259 if (prev_scb == NULL) { 5260 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5261 } else { 5262 prev_scb->hscb->next = scb->hscb->tag; 5263 ahc_sync_scb(ahc, prev_scb, 5264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5265 } 5266 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5267 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5268 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5269 } 5270 5271 static int 5272 ahc_qinfifo_count(struct ahc_softc *ahc) 5273 { 5274 uint8_t qinpos; 5275 uint8_t diff; 5276 5277 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5278 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5279 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5280 } else 5281 qinpos = ahc_inb(ahc, QINPOS); 5282 diff = ahc->qinfifonext - qinpos; 5283 return (diff); 5284 } 5285 5286 int 5287 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5288 int lun, u_int tag, role_t role, uint32_t status, 5289 ahc_search_action action) 5290 { 5291 struct scb *scb; 5292 struct scb *prev_scb; 5293 uint8_t qinstart; 5294 uint8_t qinpos; 5295 uint8_t qintail; 5296 uint8_t next; 5297 uint8_t prev; 5298 uint8_t curscbptr; 5299 int found; 5300 int have_qregs; 5301 5302 qintail = ahc->qinfifonext; 5303 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5304 if (have_qregs) { 5305 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5306 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5307 } else 5308 qinstart = ahc_inb(ahc, QINPOS); 5309 qinpos = qinstart; 5310 found = 0; 5311 prev_scb = NULL; 5312 5313 if (action == SEARCH_COMPLETE) { 5314 /* 5315 * Don't attempt to run any queued untagged transactions 5316 * until we are done with the abort process. 5317 */ 5318 ahc_freeze_untagged_queues(ahc); 5319 } 5320 5321 /* 5322 * Start with an empty queue. Entries that are not chosen 5323 * for removal will be re-added to the queue as we go. 5324 */ 5325 ahc->qinfifonext = qinpos; 5326 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5327 5328 while (qinpos != qintail) { 5329 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5330 if (scb == NULL) { 5331 printf("qinpos = %d, SCB index = %d\n", 5332 qinpos, ahc->qinfifo[qinpos]); 5333 panic("Loop 1\n"); 5334 } 5335 5336 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5337 /* 5338 * We found an scb that needs to be acted on. 5339 */ 5340 found++; 5341 switch (action) { 5342 case SEARCH_COMPLETE: 5343 { 5344 cam_status ostat; 5345 cam_status cstat; 5346 5347 ostat = ahc_get_transaction_status(scb); 5348 if (ostat == CAM_REQ_INPROG) 5349 ahc_set_transaction_status(scb, status); 5350 cstat = ahc_get_transaction_status(scb); 5351 if (cstat != CAM_REQ_CMP) 5352 ahc_freeze_scb(scb); 5353 if ((scb->flags & SCB_ACTIVE) == 0) 5354 printf("Inactive SCB in qinfifo\n"); 5355 ahc_done(ahc, scb); 5356 5357 /* FALLTHROUGH */ 5358 } 5359 case SEARCH_REMOVE: 5360 break; 5361 case SEARCH_COUNT: 5362 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5363 prev_scb = scb; 5364 break; 5365 } 5366 } else { 5367 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5368 prev_scb = scb; 5369 } 5370 qinpos++; 5371 } 5372 5373 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5374 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5375 } else { 5376 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5377 } 5378 5379 if (action != SEARCH_COUNT 5380 && (found != 0) 5381 && (qinstart != ahc->qinfifonext)) { 5382 /* 5383 * The sequencer may be in the process of DMA'ing 5384 * down the SCB at the beginning of the queue. 5385 * This could be problematic if either the first, 5386 * or the second SCB is removed from the queue 5387 * (the first SCB includes a pointer to the "next" 5388 * SCB to DMA). If we have removed any entries, swap 5389 * the first element in the queue with the next HSCB 5390 * so the sequencer will notice that NEXT_QUEUED_SCB 5391 * has changed during its DMA attempt and will retry 5392 * the DMA. 5393 */ 5394 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5395 5396 if (scb == NULL) { 5397 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5398 found, qinstart, ahc->qinfifonext); 5399 panic("First/Second Qinfifo fixup\n"); 5400 } 5401 /* 5402 * ahc_swap_with_next_hscb forces our next pointer to 5403 * point to the reserved SCB for future commands. Save 5404 * and restore our original next pointer to maintain 5405 * queue integrity. 5406 */ 5407 next = scb->hscb->next; 5408 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5409 ahc_swap_with_next_hscb(ahc, scb); 5410 scb->hscb->next = next; 5411 ahc->qinfifo[qinstart] = scb->hscb->tag; 5412 5413 /* Tell the card about the new head of the qinfifo. */ 5414 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5415 5416 /* Fixup the tail "next" pointer. */ 5417 qintail = ahc->qinfifonext - 1; 5418 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5419 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5420 } 5421 5422 /* 5423 * Search waiting for selection list. 5424 */ 5425 curscbptr = ahc_inb(ahc, SCBPTR); 5426 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5427 prev = SCB_LIST_NULL; 5428 5429 while (next != SCB_LIST_NULL) { 5430 uint8_t scb_index; 5431 5432 ahc_outb(ahc, SCBPTR, next); 5433 scb_index = ahc_inb(ahc, SCB_TAG); 5434 if (scb_index >= ahc->scb_data->numscbs) { 5435 printf("Waiting List inconsistency. " 5436 "SCB index == %d, yet numscbs == %d.", 5437 scb_index, ahc->scb_data->numscbs); 5438 ahc_dump_card_state(ahc); 5439 panic("for safety"); 5440 } 5441 scb = ahc_lookup_scb(ahc, scb_index); 5442 if (scb == NULL) { 5443 printf("scb_index = %d, next = %d\n", 5444 scb_index, next); 5445 panic("Waiting List traversal\n"); 5446 } 5447 if (ahc_match_scb(ahc, scb, target, channel, 5448 lun, SCB_LIST_NULL, role)) { 5449 /* 5450 * We found an scb that needs to be acted on. 5451 */ 5452 found++; 5453 switch (action) { 5454 case SEARCH_COMPLETE: 5455 { 5456 cam_status ostat; 5457 cam_status cstat; 5458 5459 ostat = ahc_get_transaction_status(scb); 5460 if (ostat == CAM_REQ_INPROG) 5461 ahc_set_transaction_status(scb, status); 5462 cstat = ahc_get_transaction_status(scb); 5463 if (cstat != CAM_REQ_CMP) 5464 ahc_freeze_scb(scb); 5465 if ((scb->flags & SCB_ACTIVE) == 0) 5466 printf("Inactive SCB in Waiting List\n"); 5467 ahc_done(ahc, scb); 5468 /* FALLTHROUGH */ 5469 } 5470 case SEARCH_REMOVE: 5471 next = ahc_rem_wscb(ahc, next, prev); 5472 break; 5473 case SEARCH_COUNT: 5474 prev = next; 5475 next = ahc_inb(ahc, SCB_NEXT); 5476 break; 5477 } 5478 } else { 5479 5480 prev = next; 5481 next = ahc_inb(ahc, SCB_NEXT); 5482 } 5483 } 5484 ahc_outb(ahc, SCBPTR, curscbptr); 5485 5486 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5487 channel, lun, status, action); 5488 5489 if (action == SEARCH_COMPLETE) 5490 ahc_release_untagged_queues(ahc); 5491 return (found); 5492 } 5493 5494 int 5495 ahc_search_untagged_queues(struct ahc_softc *ahc, struct scsipi_xfer *xs, /*ahc_io_ctx_t ctx,*/ 5496 int target, char channel, int lun, uint32_t status, 5497 ahc_search_action action) 5498 { 5499 struct scb *scb; 5500 int maxtarget; 5501 int found; 5502 int i; 5503 5504 if (action == SEARCH_COMPLETE) { 5505 /* 5506 * Don't attempt to run any queued untagged transactions 5507 * until we are done with the abort process. 5508 */ 5509 ahc_freeze_untagged_queues(ahc); 5510 } 5511 5512 found = 0; 5513 i = 0; 5514 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5515 5516 maxtarget = 16; 5517 if (target != CAM_TARGET_WILDCARD) { 5518 5519 i = target; 5520 if (channel == 'B') 5521 i += 8; 5522 maxtarget = i + 1; 5523 } 5524 } else { 5525 maxtarget = 0; 5526 } 5527 5528 for (; i < maxtarget; i++) { 5529 struct scb_tailq *untagged_q; 5530 struct scb *next_scb; 5531 5532 untagged_q = &(ahc->untagged_queues[i]); 5533 next_scb = TAILQ_FIRST(untagged_q); 5534 while (next_scb != NULL) { 5535 5536 scb = next_scb; 5537 next_scb = TAILQ_NEXT(scb, links.tqe); 5538 5539 /* 5540 * The head of the list may be the currently 5541 * active untagged command for a device. 5542 * We're only searching for commands that 5543 * have not been started. A transaction 5544 * marked active but still in the qinfifo 5545 * is removed by the qinfifo scanning code 5546 * above. 5547 */ 5548 if ((scb->flags & SCB_ACTIVE) != 0) 5549 continue; 5550 5551 if (ahc_match_scb(ahc, scb, target, channel, lun, 5552 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5553 /*|| (ctx != NULL && ctx != scb->io_ctx)*/) 5554 continue; 5555 5556 /* 5557 * We found an scb that needs to be acted on. 5558 */ 5559 found++; 5560 switch (action) { 5561 case SEARCH_COMPLETE: 5562 { 5563 cam_status ostat; 5564 cam_status cstat; 5565 5566 ostat = ahc_get_transaction_status(scb); 5567 if (ostat == CAM_REQ_INPROG) 5568 ahc_set_transaction_status(scb, status); 5569 cstat = ahc_get_transaction_status(scb); 5570 if (cstat != CAM_REQ_CMP) 5571 ahc_freeze_scb(scb); 5572 if ((scb->flags & SCB_ACTIVE) == 0) 5573 printf("Inactive SCB in untaggedQ\n"); 5574 ahc_done(ahc, scb); 5575 break; 5576 } 5577 case SEARCH_REMOVE: 5578 scb->flags &= ~SCB_UNTAGGEDQ; 5579 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5580 break; 5581 case SEARCH_COUNT: 5582 break; 5583 } 5584 } 5585 } 5586 5587 if (action == SEARCH_COMPLETE) 5588 ahc_release_untagged_queues(ahc); 5589 return (found); 5590 } 5591 5592 int 5593 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5594 int lun, u_int tag, int stop_on_first, int remove, 5595 int save_state) 5596 { 5597 struct scb *scbp; 5598 u_int next; 5599 u_int prev; 5600 u_int count; 5601 u_int active_scb; 5602 5603 count = 0; 5604 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5605 prev = SCB_LIST_NULL; 5606 5607 if (save_state) { 5608 /* restore this when we're done */ 5609 active_scb = ahc_inb(ahc, SCBPTR); 5610 } else 5611 /* Silence compiler */ 5612 active_scb = SCB_LIST_NULL; 5613 5614 while (next != SCB_LIST_NULL) { 5615 u_int scb_index; 5616 5617 ahc_outb(ahc, SCBPTR, next); 5618 scb_index = ahc_inb(ahc, SCB_TAG); 5619 if (scb_index >= ahc->scb_data->numscbs) { 5620 printf("Disconnected List inconsistency. " 5621 "SCB index == %d, yet numscbs == %d.", 5622 scb_index, ahc->scb_data->numscbs); 5623 ahc_dump_card_state(ahc); 5624 panic("for safety"); 5625 } 5626 5627 if (next == prev) { 5628 panic("Disconnected List Loop. " 5629 "cur SCBPTR == %x, prev SCBPTR == %x.", 5630 next, prev); 5631 } 5632 scbp = ahc_lookup_scb(ahc, scb_index); 5633 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5634 tag, ROLE_INITIATOR)) { 5635 count++; 5636 if (remove) { 5637 next = 5638 ahc_rem_scb_from_disc_list(ahc, prev, next); 5639 } else { 5640 prev = next; 5641 next = ahc_inb(ahc, SCB_NEXT); 5642 } 5643 if (stop_on_first) 5644 break; 5645 } else { 5646 prev = next; 5647 next = ahc_inb(ahc, SCB_NEXT); 5648 } 5649 } 5650 if (save_state) 5651 ahc_outb(ahc, SCBPTR, active_scb); 5652 return (count); 5653 } 5654 5655 /* 5656 * Remove an SCB from the on chip list of disconnected transactions. 5657 * This is empty/unused if we are not performing SCB paging. 5658 */ 5659 static u_int 5660 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5661 { 5662 u_int next; 5663 5664 ahc_outb(ahc, SCBPTR, scbptr); 5665 next = ahc_inb(ahc, SCB_NEXT); 5666 5667 ahc_outb(ahc, SCB_CONTROL, 0); 5668 5669 ahc_add_curscb_to_free_list(ahc); 5670 5671 if (prev != SCB_LIST_NULL) { 5672 ahc_outb(ahc, SCBPTR, prev); 5673 ahc_outb(ahc, SCB_NEXT, next); 5674 } else 5675 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5676 5677 return (next); 5678 } 5679 5680 /* 5681 * Add the SCB as selected by SCBPTR onto the on chip list of 5682 * free hardware SCBs. This list is empty/unused if we are not 5683 * performing SCB paging. 5684 */ 5685 static void 5686 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5687 { 5688 /* 5689 * Invalidate the tag so that our abort 5690 * routines don't think it's active. 5691 */ 5692 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5693 5694 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5695 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5696 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5697 } 5698 } 5699 5700 /* 5701 * Manipulate the waiting for selection list and return the 5702 * scb that follows the one that we remove. 5703 */ 5704 static u_int 5705 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5706 { 5707 u_int curscb, next; 5708 5709 /* 5710 * Select the SCB we want to abort and 5711 * pull the next pointer out of it. 5712 */ 5713 curscb = ahc_inb(ahc, SCBPTR); 5714 ahc_outb(ahc, SCBPTR, scbpos); 5715 next = ahc_inb(ahc, SCB_NEXT); 5716 5717 /* Clear the necessary fields */ 5718 ahc_outb(ahc, SCB_CONTROL, 0); 5719 5720 ahc_add_curscb_to_free_list(ahc); 5721 5722 /* update the waiting list */ 5723 if (prev == SCB_LIST_NULL) { 5724 /* First in the list */ 5725 ahc_outb(ahc, WAITING_SCBH, next); 5726 5727 /* 5728 * Ensure we aren't attempting to perform 5729 * selection for this entry. 5730 */ 5731 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5732 } else { 5733 /* 5734 * Select the scb that pointed to us 5735 * and update its next pointer. 5736 */ 5737 ahc_outb(ahc, SCBPTR, prev); 5738 ahc_outb(ahc, SCB_NEXT, next); 5739 } 5740 5741 /* 5742 * Point us back at the original scb position. 5743 */ 5744 ahc_outb(ahc, SCBPTR, curscb); 5745 return next; 5746 } 5747 5748 /******************************** Error Handling ******************************/ 5749 /* 5750 * Abort all SCBs that match the given description (target/channel/lun/tag), 5751 * setting their status to the passed in status if the status has not already 5752 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5753 * is paused before it is called. 5754 */ 5755 int 5756 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5757 int lun, u_int tag, role_t role, uint32_t status) 5758 { 5759 struct scb *scbp; 5760 struct scb *scbp_next; 5761 u_int active_scb; 5762 int i, j; 5763 int maxtarget; 5764 int minlun; 5765 int maxlun; 5766 5767 int found; 5768 5769 /* 5770 * Don't attempt to run any queued untagged transactions 5771 * until we are done with the abort process. 5772 */ 5773 ahc_freeze_untagged_queues(ahc); 5774 5775 /* restore this when we're done */ 5776 active_scb = ahc_inb(ahc, SCBPTR); 5777 5778 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5779 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5780 5781 /* 5782 * Clean out the busy target table for any untagged commands. 5783 */ 5784 i = 0; 5785 maxtarget = 16; 5786 if (target != CAM_TARGET_WILDCARD) { 5787 i = target; 5788 if (channel == 'B') 5789 i += 8; 5790 maxtarget = i + 1; 5791 } 5792 5793 if (lun == CAM_LUN_WILDCARD) { 5794 5795 /* 5796 * Unless we are using an SCB based 5797 * busy targets table, there is only 5798 * one table entry for all luns of 5799 * a target. 5800 */ 5801 minlun = 0; 5802 maxlun = 1; 5803 if ((ahc->flags & AHC_SCB_BTT) != 0) 5804 maxlun = AHC_NUM_LUNS; 5805 } else { 5806 minlun = lun; 5807 maxlun = lun + 1; 5808 } 5809 5810 if (role != ROLE_TARGET) { 5811 for (;i < maxtarget; i++) { 5812 for (j = minlun;j < maxlun; j++) { 5813 u_int scbid; 5814 u_int tcl; 5815 5816 tcl = BUILD_TCL(i << 4, j); 5817 scbid = ahc_index_busy_tcl(ahc, tcl); 5818 scbp = ahc_lookup_scb(ahc, scbid); 5819 if (scbp == NULL 5820 || ahc_match_scb(ahc, scbp, target, channel, 5821 lun, tag, role) == 0) 5822 continue; 5823 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5824 } 5825 } 5826 5827 /* 5828 * Go through the disconnected list and remove any entries we 5829 * have queued for completion, 0'ing their control byte too. 5830 * We save the active SCB and restore it ourselves, so there 5831 * is no reason for this search to restore it too. 5832 */ 5833 ahc_search_disc_list(ahc, target, channel, lun, tag, 5834 /*stop_on_first*/FALSE, /*remove*/TRUE, 5835 /*save_state*/FALSE); 5836 } 5837 5838 /* 5839 * Go through the hardware SCB array looking for commands that 5840 * were active but not on any list. In some cases, these remnants 5841 * might not still have mappings in the scbindex array (e.g. unexpected 5842 * bus free with the same scb queued for an abort). Don't hold this 5843 * against them. 5844 */ 5845 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5846 u_int scbid; 5847 5848 ahc_outb(ahc, SCBPTR, i); 5849 scbid = ahc_inb(ahc, SCB_TAG); 5850 scbp = ahc_lookup_scb(ahc, scbid); 5851 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5852 || (scbp != NULL 5853 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5854 ahc_add_curscb_to_free_list(ahc); 5855 } 5856 5857 /* 5858 * Go through the pending CCB list and look for 5859 * commands for this target that are still active. 5860 * These are other tagged commands that were 5861 * disconnected when the reset occurred. 5862 */ 5863 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5864 while (scbp_next != NULL) { 5865 scbp = scbp_next; 5866 scbp_next = LIST_NEXT(scbp, pending_links); 5867 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5868 cam_status ostat; 5869 5870 ostat = ahc_get_transaction_status(scbp); 5871 if (ostat == CAM_REQ_INPROG) 5872 ahc_set_transaction_status(scbp, status); 5873 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5874 ahc_freeze_scb(scbp); 5875 if ((scbp->flags & SCB_ACTIVE) == 0) 5876 printf("Inactive SCB on pending list\n"); 5877 ahc_done(ahc, scbp); 5878 found++; 5879 } 5880 } 5881 ahc_outb(ahc, SCBPTR, active_scb); 5882 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5883 ahc_release_untagged_queues(ahc); 5884 return found; 5885 } 5886 5887 static void 5888 ahc_reset_current_bus(struct ahc_softc *ahc) 5889 { 5890 uint8_t scsiseq; 5891 5892 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5893 scsiseq = ahc_inb(ahc, SCSISEQ); 5894 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5895 ahc_flush_device_writes(ahc); 5896 ahc_delay(AHC_BUSRESET_DELAY); 5897 /* Turn off the bus reset */ 5898 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5899 5900 ahc_clear_intstat(ahc); 5901 5902 /* Re-enable reset interrupts */ 5903 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5904 } 5905 5906 int 5907 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5908 { 5909 struct ahc_devinfo dinfo; 5910 u_int initiator, target, max_scsiid; 5911 u_int sblkctl; 5912 u_int scsiseq; 5913 u_int simode1; 5914 int found; 5915 int restart_needed; 5916 char cur_channel; 5917 5918 ahc->pending_device = NULL; 5919 5920 ahc_compile_devinfo(&dinfo, 5921 CAM_TARGET_WILDCARD, 5922 CAM_TARGET_WILDCARD, 5923 CAM_LUN_WILDCARD, 5924 channel, ROLE_UNKNOWN); 5925 ahc_pause(ahc); 5926 5927 /* Make sure the sequencer is in a safe location. */ 5928 ahc_clear_critical_section(ahc); 5929 5930 /* 5931 * Run our command complete fifos to ensure that we perform 5932 * completion processing on any commands that 'completed' 5933 * before the reset occurred. 5934 */ 5935 ahc_run_qoutfifo(ahc); 5936 #if AHC_TARGET_MODE 5937 /* 5938 * XXX - In Twin mode, the tqinfifo may have commands 5939 * for an unaffected channel in it. However, if 5940 * we have run out of ATIO resources to drain that 5941 * queue, we may not get them all out here. Further, 5942 * the blocked transactions for the reset channel 5943 * should just be killed off, irrespecitve of whether 5944 * we are blocked on ATIO resources. Write a routine 5945 * to compact the tqinfifo appropriately. 5946 */ 5947 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5948 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5949 } 5950 #endif 5951 5952 /* 5953 * Reset the bus if we are initiating this reset 5954 */ 5955 sblkctl = ahc_inb(ahc, SBLKCTL); 5956 cur_channel = 'A'; 5957 if ((ahc->features & AHC_TWIN) != 0 5958 && ((sblkctl & SELBUSB) != 0)) 5959 cur_channel = 'B'; 5960 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5961 if (cur_channel != channel) { 5962 /* Case 1: Command for another bus is active 5963 * Stealthily reset the other bus without 5964 * upsetting the current bus. 5965 */ 5966 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5967 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5968 #if AHC_TARGET_MODE 5969 /* 5970 * Bus resets clear ENSELI, so we cannot 5971 * defer re-enabling bus reset interrupts 5972 * if we are in target mode. 5973 */ 5974 if ((ahc->flags & AHC_TARGETROLE) != 0) 5975 simode1 |= ENSCSIRST; 5976 #endif 5977 ahc_outb(ahc, SIMODE1, simode1); 5978 if (initiate_reset) 5979 ahc_reset_current_bus(ahc); 5980 ahc_clear_intstat(ahc); 5981 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5982 ahc_outb(ahc, SBLKCTL, sblkctl); 5983 restart_needed = FALSE; 5984 } else { 5985 /* Case 2: A command from this bus is active or we're idle */ 5986 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5987 #if AHC_TARGET_MODE 5988 /* 5989 * Bus resets clear ENSELI, so we cannot 5990 * defer re-enabling bus reset interrupts 5991 * if we are in target mode. 5992 */ 5993 if ((ahc->flags & AHC_TARGETROLE) != 0) 5994 simode1 |= ENSCSIRST; 5995 #endif 5996 ahc_outb(ahc, SIMODE1, simode1); 5997 if (initiate_reset) 5998 ahc_reset_current_bus(ahc); 5999 ahc_clear_intstat(ahc); 6000 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6001 restart_needed = TRUE; 6002 } 6003 6004 /* 6005 * Clean up all the state information for the 6006 * pending transactions on this bus. 6007 */ 6008 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6009 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6010 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6011 6012 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6013 6014 #ifdef AHC_TARGET_MODE 6015 /* 6016 * Send an immediate notify ccb to all target more peripheral 6017 * drivers affected by this action. 6018 */ 6019 for (target = 0; target <= max_scsiid; target++) { 6020 struct ahc_tmode_tstate* tstate; 6021 u_int lun; 6022 6023 tstate = ahc->enabled_targets[target]; 6024 if (tstate == NULL) 6025 continue; 6026 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6027 struct ahc_tmode_lstate* lstate; 6028 6029 lstate = tstate->enabled_luns[lun]; 6030 if (lstate == NULL) 6031 continue; 6032 6033 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6034 EVENT_TYPE_BUS_RESET, /*arg*/0); 6035 ahc_send_lstate_events(ahc, lstate); 6036 } 6037 } 6038 #endif 6039 /* 6040 * Revert to async/narrow transfers until we renegotiate. 6041 */ 6042 for (target = 0; target <= max_scsiid; target++) { 6043 6044 if (ahc->enabled_targets[target] == NULL) 6045 continue; 6046 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6047 struct ahc_devinfo devinfo; 6048 6049 ahc_compile_devinfo(&devinfo, target, initiator, 6050 CAM_LUN_WILDCARD, 6051 channel, ROLE_UNKNOWN); 6052 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6053 AHC_TRANS_CUR, /*paused*/TRUE); 6054 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6055 /*period*/0, /*offset*/0, 6056 /*ppr_options*/0, AHC_TRANS_CUR, 6057 /*paused*/TRUE); 6058 } 6059 } 6060 6061 if (restart_needed) 6062 ahc_restart(ahc); 6063 else 6064 ahc_unpause(ahc); 6065 return found; 6066 } 6067 6068 6069 /***************************** Residual Processing ****************************/ 6070 /* 6071 * Calculate the residual for a just completed SCB. 6072 */ 6073 void 6074 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6075 { 6076 struct hardware_scb *hscb; 6077 struct status_pkt *spkt; 6078 uint32_t sgptr; 6079 uint32_t resid_sgptr; 6080 uint32_t resid; 6081 6082 /* 6083 * 5 cases. 6084 * 1) No residual. 6085 * SG_RESID_VALID clear in sgptr. 6086 * 2) Transferless command 6087 * 3) Never performed any transfers. 6088 * sgptr has SG_FULL_RESID set. 6089 * 4) No residual but target did not 6090 * save data pointers after the 6091 * last transfer, so sgptr was 6092 * never updated. 6093 * 5) We have a partial residual. 6094 * Use residual_sgptr to determine 6095 * where we are. 6096 */ 6097 6098 hscb = scb->hscb; 6099 sgptr = ahc_le32toh(hscb->sgptr); 6100 if ((sgptr & SG_RESID_VALID) == 0) 6101 /* Case 1 */ 6102 return; 6103 sgptr &= ~SG_RESID_VALID; 6104 6105 if ((sgptr & SG_LIST_NULL) != 0) 6106 /* Case 2 */ 6107 return; 6108 6109 spkt = &hscb->shared_data.status; 6110 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6111 if ((sgptr & SG_FULL_RESID) != 0) { 6112 /* Case 3 */ 6113 resid = ahc_get_transfer_length(scb); 6114 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6115 /* Case 4 */ 6116 return; 6117 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6118 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6119 } else { 6120 struct ahc_dma_seg *sg; 6121 6122 /* 6123 * Remainder of the SG where the transfer 6124 * stopped. 6125 */ 6126 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6127 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6128 6129 /* The residual sg_ptr always points to the next sg */ 6130 sg--; 6131 6132 /* 6133 * Add up the contents of all residual 6134 * SG segments that are after the SG where 6135 * the transfer stopped. 6136 */ 6137 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6138 sg++; 6139 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6140 } 6141 } 6142 if ((scb->flags & SCB_SENSE) == 0) 6143 ahc_set_residual(scb, resid); 6144 else 6145 ahc_set_sense_residual(scb, resid); 6146 6147 #ifdef AHC_DEBUG 6148 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6149 ahc_print_path(ahc, scb); 6150 printf("Handled %sResidual of %d bytes\n", 6151 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6152 } 6153 #endif 6154 } 6155 6156 /******************************* Target Mode **********************************/ 6157 #ifdef AHC_TARGET_MODE 6158 /* 6159 * Add a target mode event to this lun's queue 6160 */ 6161 static void 6162 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6163 u_int initiator_id, u_int event_type, u_int event_arg) 6164 { 6165 struct ahc_tmode_event *event; 6166 int pending; 6167 6168 xpt_freeze_devq(lstate->path, /*count*/1); 6169 if (lstate->event_w_idx >= lstate->event_r_idx) 6170 pending = lstate->event_w_idx - lstate->event_r_idx; 6171 else 6172 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6173 - (lstate->event_r_idx - lstate->event_w_idx); 6174 6175 if (event_type == EVENT_TYPE_BUS_RESET 6176 || event_type == MSG_BUS_DEV_RESET) { 6177 /* 6178 * Any earlier events are irrelevant, so reset our buffer. 6179 * This has the effect of allowing us to deal with reset 6180 * floods (an external device holding down the reset line) 6181 * without losing the event that is really interesting. 6182 */ 6183 lstate->event_r_idx = 0; 6184 lstate->event_w_idx = 0; 6185 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6186 } 6187 6188 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6189 xpt_print_path(lstate->path); 6190 printf("immediate event %x:%x lost\n", 6191 lstate->event_buffer[lstate->event_r_idx].event_type, 6192 lstate->event_buffer[lstate->event_r_idx].event_arg); 6193 lstate->event_r_idx++; 6194 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6195 lstate->event_r_idx = 0; 6196 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6197 } 6198 6199 event = &lstate->event_buffer[lstate->event_w_idx]; 6200 event->initiator_id = initiator_id; 6201 event->event_type = event_type; 6202 event->event_arg = event_arg; 6203 lstate->event_w_idx++; 6204 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6205 lstate->event_w_idx = 0; 6206 } 6207 6208 /* 6209 * Send any target mode events queued up waiting 6210 * for immediate notify resources. 6211 */ 6212 void 6213 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6214 { 6215 struct ccb_hdr *ccbh; 6216 struct ccb_immed_notify *inot; 6217 6218 while (lstate->event_r_idx != lstate->event_w_idx 6219 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6220 struct ahc_tmode_event *event; 6221 6222 event = &lstate->event_buffer[lstate->event_r_idx]; 6223 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6224 inot = (struct ccb_immed_notify *)ccbh; 6225 switch (event->event_type) { 6226 case EVENT_TYPE_BUS_RESET: 6227 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6228 break; 6229 default: 6230 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6231 inot->message_args[0] = event->event_type; 6232 inot->message_args[1] = event->event_arg; 6233 break; 6234 } 6235 inot->initiator_id = event->initiator_id; 6236 inot->sense_len = 0; 6237 xpt_done((union ccb *)inot); 6238 lstate->event_r_idx++; 6239 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6240 lstate->event_r_idx = 0; 6241 } 6242 } 6243 #endif 6244 6245 /******************** Sequencer Program Patching/Download *********************/ 6246 6247 #ifdef AHC_DUMP_SEQ 6248 void 6249 ahc_dumpseq(struct ahc_softc* ahc) 6250 { 6251 int i; 6252 int max_prog; 6253 6254 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6255 max_prog = 448; 6256 else if ((ahc->features & AHC_ULTRA2) != 0) 6257 max_prog = 768; 6258 else 6259 max_prog = 512; 6260 6261 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6262 ahc_outb(ahc, SEQADDR0, 0); 6263 ahc_outb(ahc, SEQADDR1, 0); 6264 for (i = 0; i < max_prog; i++) { 6265 uint8_t ins_bytes[4]; 6266 6267 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6268 printf("0x%08x\n", ins_bytes[0] << 24 6269 | ins_bytes[1] << 16 6270 | ins_bytes[2] << 8 6271 | ins_bytes[3]); 6272 } 6273 } 6274 #endif 6275 6276 static void 6277 ahc_loadseq(struct ahc_softc *ahc) 6278 { 6279 struct cs cs_table[num_critical_sections]; 6280 u_int begin_set[num_critical_sections]; 6281 u_int end_set[num_critical_sections]; 6282 struct patch *cur_patch; 6283 u_int cs_count; 6284 u_int cur_cs; 6285 u_int i; 6286 int downloaded; 6287 u_int skip_addr; 6288 u_int sg_prefetch_cnt; 6289 uint8_t download_consts[7]; 6290 6291 /* 6292 * Start out with 0 critical sections 6293 * that apply to this firmware load. 6294 */ 6295 cs_count = 0; 6296 cur_cs = 0; 6297 memset(begin_set, 0, sizeof(begin_set)); 6298 memset(end_set, 0, sizeof(end_set)); 6299 6300 /* Setup downloadable constant table */ 6301 download_consts[QOUTFIFO_OFFSET] = 0; 6302 if (ahc->targetcmds != NULL) 6303 download_consts[QOUTFIFO_OFFSET] += 32; 6304 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6305 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6306 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6307 sg_prefetch_cnt = ahc->pci_cachesize; 6308 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6309 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6310 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6311 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6312 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6313 6314 cur_patch = patches; 6315 downloaded = 0; 6316 skip_addr = 0; 6317 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6318 ahc_outb(ahc, SEQADDR0, 0); 6319 ahc_outb(ahc, SEQADDR1, 0); 6320 6321 for (i = 0; i < sizeof(seqprog)/4; i++) { 6322 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6323 /* 6324 * Don't download this instruction as it 6325 * is in a patch that was removed. 6326 */ 6327 continue; 6328 } 6329 /* 6330 * Move through the CS table until we find a CS 6331 * that might apply to this instruction. 6332 */ 6333 for (; cur_cs < num_critical_sections; cur_cs++) { 6334 if (critical_sections[cur_cs].end <= i) { 6335 if (begin_set[cs_count] == TRUE 6336 && end_set[cs_count] == FALSE) { 6337 cs_table[cs_count].end = downloaded; 6338 end_set[cs_count] = TRUE; 6339 cs_count++; 6340 } 6341 continue; 6342 } 6343 if (critical_sections[cur_cs].begin <= i 6344 && begin_set[cs_count] == FALSE) { 6345 cs_table[cs_count].begin = downloaded; 6346 begin_set[cs_count] = TRUE; 6347 } 6348 break; 6349 } 6350 ahc_download_instr(ahc, i, download_consts); 6351 downloaded++; 6352 } 6353 6354 ahc->num_critical_sections = cs_count; 6355 if (cs_count != 0) { 6356 6357 cs_count *= sizeof(struct cs); 6358 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6359 if (ahc->critical_sections == NULL) 6360 panic("ahc_loadseq: Could not malloc"); 6361 memcpy(ahc->critical_sections, cs_table, cs_count); 6362 } 6363 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6364 ahc_restart(ahc); 6365 6366 if (bootverbose) { 6367 printf(" %d instructions downloaded\n", downloaded); 6368 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6369 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6370 } 6371 } 6372 6373 static int 6374 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6375 u_int start_instr, u_int *skip_addr) 6376 { 6377 struct patch *cur_patch; 6378 struct patch *last_patch; 6379 u_int num_patches; 6380 6381 num_patches = sizeof(patches)/sizeof(struct patch); 6382 last_patch = &patches[num_patches]; 6383 cur_patch = *start_patch; 6384 6385 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6386 6387 if (cur_patch->patch_func(ahc) == 0) { 6388 6389 /* Start rejecting code */ 6390 *skip_addr = start_instr + cur_patch->skip_instr; 6391 cur_patch += cur_patch->skip_patch; 6392 } else { 6393 /* Accepted this patch. Advance to the next 6394 * one and wait for our intruction pointer to 6395 * hit this point. 6396 */ 6397 cur_patch++; 6398 } 6399 } 6400 6401 *start_patch = cur_patch; 6402 if (start_instr < *skip_addr) 6403 /* Still skipping */ 6404 return (0); 6405 6406 return (1); 6407 } 6408 6409 static void 6410 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6411 { 6412 union ins_formats instr; 6413 struct ins_format1 *fmt1_ins; 6414 struct ins_format3 *fmt3_ins; 6415 u_int opcode; 6416 6417 /* 6418 * The firmware is always compiled into a little endian format. 6419 */ 6420 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6421 6422 fmt1_ins = &instr.format1; 6423 fmt3_ins = NULL; 6424 6425 /* Pull the opcode */ 6426 opcode = instr.format1.opcode; 6427 switch (opcode) { 6428 case AIC_OP_JMP: 6429 case AIC_OP_JC: 6430 case AIC_OP_JNC: 6431 case AIC_OP_CALL: 6432 case AIC_OP_JNE: 6433 case AIC_OP_JNZ: 6434 case AIC_OP_JE: 6435 case AIC_OP_JZ: 6436 { 6437 struct patch *cur_patch; 6438 int address_offset; 6439 u_int address; 6440 u_int skip_addr; 6441 u_int i; 6442 6443 fmt3_ins = &instr.format3; 6444 address_offset = 0; 6445 address = fmt3_ins->address; 6446 cur_patch = patches; 6447 skip_addr = 0; 6448 for (i = 0; i < address;) { 6449 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6450 6451 if (skip_addr > i) { 6452 int end_addr; 6453 6454 end_addr = MIN(address, skip_addr); 6455 address_offset += end_addr - i; 6456 i = skip_addr; 6457 } else { 6458 i++; 6459 } 6460 } 6461 address -= address_offset; 6462 fmt3_ins->address = address; 6463 /* FALLTHROUGH */ 6464 } 6465 case AIC_OP_OR: 6466 case AIC_OP_AND: 6467 case AIC_OP_XOR: 6468 case AIC_OP_ADD: 6469 case AIC_OP_ADC: 6470 case AIC_OP_BMOV: 6471 if (fmt1_ins->parity != 0) { 6472 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6473 } 6474 fmt1_ins->parity = 0; 6475 if ((ahc->features & AHC_CMD_CHAN) == 0 6476 && opcode == AIC_OP_BMOV) { 6477 /* 6478 * Block move was added at the same time 6479 * as the command channel. Verify that 6480 * this is only a move of a single element 6481 * and convert the BMOV to a MOV 6482 * (AND with an immediate of FF). 6483 */ 6484 if (fmt1_ins->immediate != 1) 6485 panic("%s: BMOV not supported\n", 6486 ahc_name(ahc)); 6487 fmt1_ins->opcode = AIC_OP_AND; 6488 fmt1_ins->immediate = 0xff; 6489 } 6490 /* FALLTHROUGH */ 6491 case AIC_OP_ROL: 6492 if ((ahc->features & AHC_ULTRA2) != 0) { 6493 int i, count; 6494 6495 /* Calculate odd parity for the instruction */ 6496 for (i = 0, count = 0; i < 31; i++) { 6497 uint32_t mask; 6498 6499 mask = 0x01 << i; 6500 if ((instr.integer & mask) != 0) 6501 count++; 6502 } 6503 if ((count & 0x01) == 0) 6504 instr.format1.parity = 1; 6505 } else { 6506 /* Compress the instruction for older sequencers */ 6507 if (fmt3_ins != NULL) { 6508 instr.integer = 6509 fmt3_ins->immediate 6510 | (fmt3_ins->source << 8) 6511 | (fmt3_ins->address << 16) 6512 | (fmt3_ins->opcode << 25); 6513 } else { 6514 instr.integer = 6515 fmt1_ins->immediate 6516 | (fmt1_ins->source << 8) 6517 | (fmt1_ins->destination << 16) 6518 | (fmt1_ins->ret << 24) 6519 | (fmt1_ins->opcode << 25); 6520 } 6521 } 6522 /* The sequencer is a little endian CPU */ 6523 instr.integer = ahc_htole32(instr.integer); 6524 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6525 break; 6526 default: 6527 panic("Unknown opcode encountered in seq program"); 6528 break; 6529 } 6530 } 6531 6532 int 6533 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6534 const char *name, u_int address, u_int value, 6535 u_int *cur_column, u_int wrap_point) 6536 { 6537 int printed; 6538 u_int printed_mask; 6539 char line[1024]; 6540 6541 line[0] = 0; 6542 6543 if (cur_column != NULL && *cur_column >= wrap_point) { 6544 printf("\n"); 6545 *cur_column = 0; 6546 } 6547 printed = snprintf(line, sizeof(line), "%s[0x%x]", name, value); 6548 if (table == NULL) { 6549 printed += snprintf(&line[printed], (sizeof line) - printed, 6550 " "); 6551 printf("%s", line); 6552 if (cur_column != NULL) 6553 *cur_column += printed; 6554 return (printed); 6555 } 6556 printed_mask = 0; 6557 while (printed_mask != 0xFF) { 6558 int entry; 6559 6560 for (entry = 0; entry < num_entries; entry++) { 6561 if (((value & table[entry].mask) 6562 != table[entry].value) 6563 || ((printed_mask & table[entry].mask) 6564 == table[entry].mask)) 6565 continue; 6566 printed += snprintf(&line[printed], 6567 (sizeof line) - printed, "%s%s", 6568 printed_mask == 0 ? ":(" : "|", 6569 table[entry].name); 6570 printed_mask |= table[entry].mask; 6571 6572 break; 6573 } 6574 if (entry >= num_entries) 6575 break; 6576 } 6577 if (printed_mask != 0) 6578 printed += snprintf(&line[printed], 6579 (sizeof line) - printed, ") "); 6580 else 6581 printed += snprintf(&line[printed], 6582 (sizeof line) - printed, " "); 6583 if (cur_column != NULL) 6584 *cur_column += printed; 6585 printf("%s", line); 6586 6587 return (printed); 6588 } 6589 6590 void 6591 ahc_dump_card_state(struct ahc_softc *ahc) 6592 { 6593 struct scb *scb; 6594 struct scb_tailq *untagged_q; 6595 u_int cur_col; 6596 int paused; 6597 int target; 6598 int maxtarget; 6599 int i; 6600 uint8_t last_phase; 6601 uint8_t qinpos; 6602 uint8_t qintail; 6603 uint8_t qoutpos; 6604 uint8_t scb_index; 6605 uint8_t saved_scbptr; 6606 6607 if (ahc_is_paused(ahc)) { 6608 paused = 1; 6609 } else { 6610 paused = 0; 6611 ahc_pause(ahc); 6612 } 6613 6614 saved_scbptr = ahc_inb(ahc, SCBPTR); 6615 last_phase = ahc_inb(ahc, LASTPHASE); 6616 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6617 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6618 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6619 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6620 if (paused) 6621 printf("Card was paused\n"); 6622 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6623 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6624 ahc_inb(ahc, ARG_2)); 6625 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6626 ahc_inb(ahc, SCBPTR)); 6627 cur_col = 0; 6628 if ((ahc->features & AHC_DT) != 0) 6629 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6630 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6631 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6632 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6633 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6634 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6635 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6636 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6637 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6638 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6639 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6640 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6641 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6642 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6643 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6644 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6645 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6646 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6647 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6648 if (cur_col != 0) 6649 printf("\n"); 6650 printf("STACK:"); 6651 for (i = 0; i < STACK_SIZE; i++) 6652 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6653 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6654 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6655 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6656 /* QINFIFO */ 6657 printf("QINFIFO entries: "); 6658 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6659 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6660 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6661 } else 6662 qinpos = ahc_inb(ahc, QINPOS); 6663 qintail = ahc->qinfifonext; 6664 while (qinpos != qintail) { 6665 printf("%d ", ahc->qinfifo[qinpos]); 6666 qinpos++; 6667 } 6668 printf("\n"); 6669 6670 printf("Waiting Queue entries: "); 6671 scb_index = ahc_inb(ahc, WAITING_SCBH); 6672 i = 0; 6673 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6674 ahc_outb(ahc, SCBPTR, scb_index); 6675 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6676 scb_index = ahc_inb(ahc, SCB_NEXT); 6677 } 6678 printf("\n"); 6679 6680 printf("Disconnected Queue entries: "); 6681 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6682 i = 0; 6683 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6684 ahc_outb(ahc, SCBPTR, scb_index); 6685 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6686 scb_index = ahc_inb(ahc, SCB_NEXT); 6687 } 6688 printf("\n"); 6689 6690 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6691 printf("QOUTFIFO entries: "); 6692 qoutpos = ahc->qoutfifonext; 6693 i = 0; 6694 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6695 printf("%d ", ahc->qoutfifo[qoutpos]); 6696 qoutpos++; 6697 } 6698 printf("\n"); 6699 6700 printf("Sequencer Free SCB List: "); 6701 scb_index = ahc_inb(ahc, FREE_SCBH); 6702 i = 0; 6703 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6704 ahc_outb(ahc, SCBPTR, scb_index); 6705 printf("%d ", scb_index); 6706 scb_index = ahc_inb(ahc, SCB_NEXT); 6707 } 6708 printf("\n"); 6709 6710 printf("Sequencer SCB Info: "); 6711 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6712 ahc_outb(ahc, SCBPTR, i); 6713 /*cur_col =*/ printf("\n%3d ", i); 6714 6715 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6716 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6717 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6718 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6719 } 6720 printf("\n"); 6721 6722 printf("Pending list: "); 6723 i = 0; 6724 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6725 if (i++ > 256) 6726 break; 6727 /*cur_col =*/ printf("\n%3d ", scb->hscb->tag); 6728 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6729 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6730 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6731 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6732 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6733 printf("("); 6734 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6735 &cur_col, 60); 6736 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6737 printf(")"); 6738 } 6739 } 6740 printf("\n"); 6741 6742 printf("Kernel Free SCB list: "); 6743 i = 0; 6744 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6745 if (i++ > 256) 6746 break; 6747 printf("%d ", scb->hscb->tag); 6748 } 6749 printf("\n"); 6750 6751 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6752 for (target = 0; target <= maxtarget; target++) { 6753 untagged_q = &ahc->untagged_queues[target]; 6754 if (TAILQ_FIRST(untagged_q) == NULL) 6755 continue; 6756 printf("Untagged Q(%d): ", target); 6757 i = 0; 6758 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6759 if (i++ > 256) 6760 break; 6761 printf("%d ", scb->hscb->tag); 6762 } 6763 printf("\n"); 6764 } 6765 6766 ahc_platform_dump_card_state(ahc); 6767 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6768 ahc_outb(ahc, SCBPTR, saved_scbptr); 6769 if (paused == 0) 6770 ahc_unpause(ahc); 6771 } 6772 6773 /************************* Target Mode ****************************************/ 6774 #ifdef AHC_TARGET_MODE 6775 cam_status 6776 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6777 struct ahc_tmode_tstate **tstate, 6778 struct ahc_tmode_lstate **lstate, 6779 int notfound_failure) 6780 { 6781 6782 if ((ahc->features & AHC_TARGETMODE) == 0) 6783 return (CAM_REQ_INVALID); 6784 6785 /* 6786 * Handle the 'black hole' device that sucks up 6787 * requests to unattached luns on enabled targets. 6788 */ 6789 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6790 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6791 *tstate = NULL; 6792 *lstate = ahc->black_hole; 6793 } else { 6794 u_int max_id; 6795 6796 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6797 if (ccb->ccb_h.target_id > max_id) 6798 return (CAM_TID_INVALID); 6799 6800 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6801 return (CAM_LUN_INVALID); 6802 6803 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6804 *lstate = NULL; 6805 if (*tstate != NULL) 6806 *lstate = 6807 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6808 } 6809 6810 if (notfound_failure != 0 && *lstate == NULL) 6811 return (CAM_PATH_INVALID); 6812 6813 return (CAM_REQ_CMP); 6814 } 6815 6816 void 6817 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6818 { 6819 struct ahc_tmode_tstate *tstate; 6820 struct ahc_tmode_lstate *lstate; 6821 struct ccb_en_lun *cel; 6822 cam_status status; 6823 u_int target; 6824 u_int lun; 6825 u_int target_mask; 6826 u_int our_id; 6827 u_long s; 6828 char channel; 6829 6830 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6831 /*notfound_failure*/FALSE); 6832 6833 if (status != CAM_REQ_CMP) { 6834 ccb->ccb_h.status = status; 6835 return; 6836 } 6837 6838 if (cam_sim_bus(sim) == 0) 6839 our_id = ahc->our_id; 6840 else 6841 our_id = ahc->our_id_b; 6842 6843 if (ccb->ccb_h.target_id != our_id) { 6844 /* 6845 * our_id represents our initiator ID, or 6846 * the ID of the first target to have an 6847 * enabled lun in target mode. There are 6848 * two cases that may preclude enabling a 6849 * target id other than our_id. 6850 * 6851 * o our_id is for an active initiator role. 6852 * Since the hardware does not support 6853 * reselections to the initiator role at 6854 * anything other than our_id, and our_id 6855 * is used by the hardware to indicate the 6856 * ID to use for both select-out and 6857 * reselect-out operations, the only target 6858 * ID we can support in this mode is our_id. 6859 * 6860 * o The MULTARGID feature is not available and 6861 * a previous target mode ID has been enabled. 6862 */ 6863 if ((ahc->features & AHC_MULTIROLE) != 0) { 6864 6865 if ((ahc->features & AHC_MULTI_TID) != 0 6866 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6867 /* 6868 * Only allow additional targets if 6869 * the initiator role is disabled. 6870 * The hardware cannot handle a re-select-in 6871 * on the initiator id during a re-select-out 6872 * on a different target id. 6873 */ 6874 status = CAM_TID_INVALID; 6875 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6876 || ahc->enabled_luns > 0) { 6877 /* 6878 * Only allow our target id to change 6879 * if the initiator role is not configured 6880 * and there are no enabled luns which 6881 * are attached to the currently registered 6882 * scsi id. 6883 */ 6884 status = CAM_TID_INVALID; 6885 } 6886 } else if ((ahc->features & AHC_MULTI_TID) == 0 6887 && ahc->enabled_luns > 0) { 6888 6889 status = CAM_TID_INVALID; 6890 } 6891 } 6892 6893 if (status != CAM_REQ_CMP) { 6894 ccb->ccb_h.status = status; 6895 return; 6896 } 6897 6898 /* 6899 * We now have an id that is valid. 6900 * If we aren't in target mode, switch modes. 6901 */ 6902 if ((ahc->flags & AHC_TARGETROLE) == 0 6903 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6904 u_long s; 6905 6906 printf("Configuring Target Mode\n"); 6907 ahc_lock(ahc, &s); 6908 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6909 ccb->ccb_h.status = CAM_BUSY; 6910 ahc_unlock(ahc, &s); 6911 return; 6912 } 6913 ahc->flags |= AHC_TARGETROLE; 6914 if ((ahc->features & AHC_MULTIROLE) == 0) 6915 ahc->flags &= ~AHC_INITIATORROLE; 6916 ahc_pause(ahc); 6917 ahc_loadseq(ahc); 6918 ahc_unlock(ahc, &s); 6919 } 6920 cel = &ccb->cel; 6921 target = ccb->ccb_h.target_id; 6922 lun = ccb->ccb_h.target_lun; 6923 channel = SIM_CHANNEL(ahc, sim); 6924 target_mask = 0x01 << target; 6925 if (channel == 'B') 6926 target_mask <<= 8; 6927 6928 if (cel->enable != 0) { 6929 u_int scsiseq; 6930 6931 /* Are we already enabled?? */ 6932 if (lstate != NULL) { 6933 xpt_print_path(ccb->ccb_h.path); 6934 printf("Lun already enabled\n"); 6935 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6936 return; 6937 } 6938 6939 if (cel->grp6_len != 0 6940 || cel->grp7_len != 0) { 6941 /* 6942 * Don't (yet?) support vendor 6943 * specific commands. 6944 */ 6945 ccb->ccb_h.status = CAM_REQ_INVALID; 6946 printf("Non-zero Group Codes\n"); 6947 return; 6948 } 6949 6950 /* 6951 * Seems to be okay. 6952 * Setup our data structures. 6953 */ 6954 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6955 tstate = ahc_alloc_tstate(ahc, target, channel); 6956 if (tstate == NULL) { 6957 xpt_print_path(ccb->ccb_h.path); 6958 printf("Couldn't allocate tstate\n"); 6959 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6960 return; 6961 } 6962 } 6963 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6964 if (lstate == NULL) { 6965 xpt_print_path(ccb->ccb_h.path); 6966 printf("Couldn't allocate lstate\n"); 6967 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6968 return; 6969 } 6970 memset(lstate, 0, sizeof(*lstate)); 6971 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6972 xpt_path_path_id(ccb->ccb_h.path), 6973 xpt_path_target_id(ccb->ccb_h.path), 6974 xpt_path_lun_id(ccb->ccb_h.path)); 6975 if (status != CAM_REQ_CMP) { 6976 free(lstate, M_DEVBUF); 6977 xpt_print_path(ccb->ccb_h.path); 6978 printf("Couldn't allocate path\n"); 6979 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6980 return; 6981 } 6982 SLIST_INIT(&lstate->accept_tios); 6983 SLIST_INIT(&lstate->immed_notifies); 6984 ahc_lock(ahc, &s); 6985 ahc_pause(ahc); 6986 if (target != CAM_TARGET_WILDCARD) { 6987 tstate->enabled_luns[lun] = lstate; 6988 ahc->enabled_luns++; 6989 6990 if ((ahc->features & AHC_MULTI_TID) != 0) { 6991 u_int targid_mask; 6992 6993 targid_mask = ahc_inb(ahc, TARGID) 6994 | (ahc_inb(ahc, TARGID + 1) << 8); 6995 6996 targid_mask |= target_mask; 6997 ahc_outb(ahc, TARGID, targid_mask); 6998 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 6999 7000 ahc_update_scsiid(ahc, targid_mask); 7001 } else { 7002 u_int our_id; 7003 char channel; 7004 7005 channel = SIM_CHANNEL(ahc, sim); 7006 our_id = SIM_SCSI_ID(ahc, sim); 7007 7008 /* 7009 * This can only happen if selections 7010 * are not enabled 7011 */ 7012 if (target != our_id) { 7013 u_int sblkctl; 7014 char cur_channel; 7015 int swap; 7016 7017 sblkctl = ahc_inb(ahc, SBLKCTL); 7018 cur_channel = (sblkctl & SELBUSB) 7019 ? 'B' : 'A'; 7020 if ((ahc->features & AHC_TWIN) == 0) 7021 cur_channel = 'A'; 7022 swap = cur_channel != channel; 7023 if (channel == 'A') 7024 ahc->our_id = target; 7025 else 7026 ahc->our_id_b = target; 7027 7028 if (swap) 7029 ahc_outb(ahc, SBLKCTL, 7030 sblkctl ^ SELBUSB); 7031 7032 ahc_outb(ahc, SCSIID, target); 7033 7034 if (swap) 7035 ahc_outb(ahc, SBLKCTL, sblkctl); 7036 } 7037 } 7038 } else 7039 ahc->black_hole = lstate; 7040 /* Allow select-in operations */ 7041 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7042 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7043 scsiseq |= ENSELI; 7044 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7045 scsiseq = ahc_inb(ahc, SCSISEQ); 7046 scsiseq |= ENSELI; 7047 ahc_outb(ahc, SCSISEQ, scsiseq); 7048 } 7049 ahc_unpause(ahc); 7050 ahc_unlock(ahc, &s); 7051 ccb->ccb_h.status = CAM_REQ_CMP; 7052 xpt_print_path(ccb->ccb_h.path); 7053 printf("Lun now enabled for target mode\n"); 7054 } else { 7055 struct scb *scb; 7056 int i, empty; 7057 7058 if (lstate == NULL) { 7059 ccb->ccb_h.status = CAM_LUN_INVALID; 7060 return; 7061 } 7062 7063 ahc_lock(ahc, &s); 7064 7065 ccb->ccb_h.status = CAM_REQ_CMP; 7066 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7067 struct ccb_hdr *ccbh; 7068 7069 ccbh = &scb->io_ctx->ccb_h; 7070 if (ccbh->func_code == XPT_CONT_TARGET_IO 7071 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7072 printf("CTIO pending\n"); 7073 ccb->ccb_h.status = CAM_REQ_INVALID; 7074 ahc_unlock(ahc, &s); 7075 return; 7076 } 7077 } 7078 7079 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7080 printf("ATIOs pending\n"); 7081 ccb->ccb_h.status = CAM_REQ_INVALID; 7082 } 7083 7084 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7085 printf("INOTs pending\n"); 7086 ccb->ccb_h.status = CAM_REQ_INVALID; 7087 } 7088 7089 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7090 ahc_unlock(ahc, &s); 7091 return; 7092 } 7093 7094 xpt_print_path(ccb->ccb_h.path); 7095 printf("Target mode disabled\n"); 7096 xpt_free_path(lstate->path); 7097 free(lstate, M_DEVBUF); 7098 7099 ahc_pause(ahc); 7100 /* Can we clean up the target too? */ 7101 if (target != CAM_TARGET_WILDCARD) { 7102 tstate->enabled_luns[lun] = NULL; 7103 ahc->enabled_luns--; 7104 for (empty = 1, i = 0; i < 8; i++) 7105 if (tstate->enabled_luns[i] != NULL) { 7106 empty = 0; 7107 break; 7108 } 7109 7110 if (empty) { 7111 ahc_free_tstate(ahc, target, channel, 7112 /*force*/FALSE); 7113 if (ahc->features & AHC_MULTI_TID) { 7114 u_int targid_mask; 7115 7116 targid_mask = ahc_inb(ahc, TARGID) 7117 | (ahc_inb(ahc, TARGID + 1) 7118 << 8); 7119 7120 targid_mask &= ~target_mask; 7121 ahc_outb(ahc, TARGID, targid_mask); 7122 ahc_outb(ahc, TARGID+1, 7123 (targid_mask >> 8)); 7124 ahc_update_scsiid(ahc, targid_mask); 7125 } 7126 } 7127 } else { 7128 7129 ahc->black_hole = NULL; 7130 7131 /* 7132 * We can't allow selections without 7133 * our black hole device. 7134 */ 7135 empty = TRUE; 7136 } 7137 if (ahc->enabled_luns == 0) { 7138 /* Disallow select-in */ 7139 u_int scsiseq; 7140 7141 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7142 scsiseq &= ~ENSELI; 7143 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7144 scsiseq = ahc_inb(ahc, SCSISEQ); 7145 scsiseq &= ~ENSELI; 7146 ahc_outb(ahc, SCSISEQ, scsiseq); 7147 7148 if ((ahc->features & AHC_MULTIROLE) == 0) { 7149 printf("Configuring Initiator Mode\n"); 7150 ahc->flags &= ~AHC_TARGETROLE; 7151 ahc->flags |= AHC_INITIATORROLE; 7152 ahc_pause(ahc); 7153 ahc_loadseq(ahc); 7154 } 7155 } 7156 ahc_unpause(ahc); 7157 ahc_unlock(ahc, &s); 7158 } 7159 } 7160 7161 static void 7162 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7163 { 7164 u_int scsiid_mask; 7165 u_int scsiid; 7166 7167 if ((ahc->features & AHC_MULTI_TID) == 0) 7168 panic("ahc_update_scsiid called on non-multitid unit\n"); 7169 7170 /* 7171 * Since we will rely on the TARGID mask 7172 * for selection enables, ensure that OID 7173 * in SCSIID is not set to some other ID 7174 * that we don't want to allow selections on. 7175 */ 7176 if ((ahc->features & AHC_ULTRA2) != 0) 7177 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7178 else 7179 scsiid = ahc_inb(ahc, SCSIID); 7180 scsiid_mask = 0x1 << (scsiid & OID); 7181 if ((targid_mask & scsiid_mask) == 0) { 7182 u_int our_id; 7183 7184 /* ffs counts from 1 */ 7185 our_id = ffs(targid_mask); 7186 if (our_id == 0) 7187 our_id = ahc->our_id; 7188 else 7189 our_id--; 7190 scsiid &= TID; 7191 scsiid |= our_id; 7192 } 7193 if ((ahc->features & AHC_ULTRA2) != 0) 7194 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7195 else 7196 ahc_outb(ahc, SCSIID, scsiid); 7197 } 7198 7199 void 7200 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7201 { 7202 struct target_cmd *cmd; 7203 7204 /* 7205 * If the card supports auto-access pause, 7206 * we can access the card directly regardless 7207 * of whether it is paused or not. 7208 */ 7209 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7210 paused = TRUE; 7211 7212 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7213 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7214 7215 /* 7216 * Only advance through the queue if we 7217 * have the resources to process the command. 7218 */ 7219 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7220 break; 7221 7222 cmd->cmd_valid = 0; 7223 ahc_dmamap_sync(ahc, ahc->parent_dmat/*shared_data_dmat*/, 7224 ahc->shared_data_dmamap, 7225 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7226 sizeof(struct target_cmd), 7227 BUS_DMASYNC_PREREAD); 7228 ahc->tqinfifonext++; 7229 7230 /* 7231 * Lazily update our position in the target mode incoming 7232 * command queue as seen by the sequencer. 7233 */ 7234 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7235 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7236 u_int hs_mailbox; 7237 7238 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7239 hs_mailbox &= ~HOST_TQINPOS; 7240 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7241 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7242 } else { 7243 if (!paused) 7244 ahc_pause(ahc); 7245 ahc_outb(ahc, KERNEL_TQINPOS, 7246 ahc->tqinfifonext & HOST_TQINPOS); 7247 if (!paused) 7248 ahc_unpause(ahc); 7249 } 7250 } 7251 } 7252 } 7253 7254 static int 7255 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7256 { 7257 struct ahc_tmode_tstate *tstate; 7258 struct ahc_tmode_lstate *lstate; 7259 struct ccb_accept_tio *atio; 7260 uint8_t *byte; 7261 int initiator; 7262 int target; 7263 int lun; 7264 7265 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7266 target = SCSIID_OUR_ID(cmd->scsiid); 7267 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7268 7269 byte = cmd->bytes; 7270 tstate = ahc->enabled_targets[target]; 7271 lstate = NULL; 7272 if (tstate != NULL) 7273 lstate = tstate->enabled_luns[lun]; 7274 7275 /* 7276 * Commands for disabled luns go to the black hole driver. 7277 */ 7278 if (lstate == NULL) 7279 lstate = ahc->black_hole; 7280 7281 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7282 if (atio == NULL) { 7283 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7284 /* 7285 * Wait for more ATIOs from the peripheral driver for this lun. 7286 */ 7287 if (bootverbose) 7288 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7289 return (1); 7290 } else 7291 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7292 #if 0 7293 printf("Incoming command from %d for %d:%d%s\n", 7294 initiator, target, lun, 7295 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7296 #endif 7297 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7298 7299 if (lstate == ahc->black_hole) { 7300 /* Fill in the wildcards */ 7301 atio->ccb_h.target_id = target; 7302 atio->ccb_h.target_lun = lun; 7303 } 7304 7305 /* 7306 * Package it up and send it off to 7307 * whomever has this lun enabled. 7308 */ 7309 atio->sense_len = 0; 7310 atio->init_id = initiator; 7311 if (byte[0] != 0xFF) { 7312 /* Tag was included */ 7313 atio->tag_action = *byte++; 7314 atio->tag_id = *byte++; 7315 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7316 } else { 7317 atio->ccb_h.flags = 0; 7318 } 7319 byte++; 7320 7321 /* Okay. Now determine the cdb size based on the command code */ 7322 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7323 case 0: 7324 atio->cdb_len = 6; 7325 break; 7326 case 1: 7327 case 2: 7328 atio->cdb_len = 10; 7329 break; 7330 case 4: 7331 atio->cdb_len = 16; 7332 break; 7333 case 5: 7334 atio->cdb_len = 12; 7335 break; 7336 case 3: 7337 default: 7338 /* Only copy the opcode. */ 7339 atio->cdb_len = 1; 7340 printf("Reserved or VU command code type encountered\n"); 7341 break; 7342 } 7343 7344 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7345 7346 atio->ccb_h.status |= CAM_CDB_RECVD; 7347 7348 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7349 /* 7350 * We weren't allowed to disconnect. 7351 * We're hanging on the bus until a 7352 * continue target I/O comes in response 7353 * to this accept tio. 7354 */ 7355 #if 0 7356 printf("Received Immediate Command %d:%d:%d - %p\n", 7357 initiator, target, lun, ahc->pending_device); 7358 #endif 7359 ahc->pending_device = lstate; 7360 ahc_freeze_ccb((union ccb *)atio); 7361 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7362 } 7363 xpt_done((union ccb*)atio); 7364 return (0); 7365 } 7366 #endif 7367 7368 static int 7369 ahc_createdmamem(tag, size, flags, mapp, vaddr, baddr, seg, nseg, myname, what) 7370 bus_dma_tag_t tag; 7371 int size; 7372 int flags; 7373 bus_dmamap_t *mapp; 7374 caddr_t *vaddr; 7375 bus_addr_t *baddr; 7376 bus_dma_segment_t *seg; 7377 int *nseg; 7378 const char *myname, *what; 7379 { 7380 int error, level = 0; 7381 7382 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 7383 seg, 1, nseg, BUS_DMA_NOWAIT)) != 0) { 7384 printf("%s: failed to allocate DMA mem for %s, error = %d\n", 7385 myname, what, error); 7386 goto out; 7387 } 7388 level++; 7389 7390 if ((error = bus_dmamem_map(tag, seg, *nseg, size, vaddr, 7391 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 7392 printf("%s: failed to map DMA mem for %s, error = %d\n", 7393 myname, what, error); 7394 goto out; 7395 } 7396 level++; 7397 7398 if ((error = bus_dmamap_create(tag, size, 1, size, 0, 7399 BUS_DMA_NOWAIT | flags, mapp)) != 0) { 7400 printf("%s: failed to create DMA map for %s, error = %d\n", 7401 myname, what, error); 7402 goto out; 7403 } 7404 level++; 7405 7406 7407 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 7408 BUS_DMA_NOWAIT)) != 0) { 7409 printf("%s: failed to load DMA map for %s, error = %d\n", 7410 myname, what, error); 7411 goto out; 7412 } 7413 7414 *baddr = (*mapp)->dm_segs[0].ds_addr; 7415 7416 return 0; 7417 out: 7418 printf("ahc_createdmamem error (%d)\n", level); 7419 switch (level) { 7420 case 3: 7421 bus_dmamap_destroy(tag, *mapp); 7422 /* FALLTHROUGH */ 7423 case 2: 7424 bus_dmamem_unmap(tag, *vaddr, size); 7425 /* FALLTHROUGH */ 7426 case 1: 7427 bus_dmamem_free(tag, seg, *nseg); 7428 break; 7429 default: 7430 break; 7431 } 7432 7433 return -1; 7434 } 7435 7436 static void 7437 ahc_freedmamem(tag, size, map, vaddr, seg, nseg) 7438 bus_dma_tag_t tag; 7439 int size; 7440 bus_dmamap_t map; 7441 caddr_t vaddr; 7442 bus_dma_segment_t *seg; 7443 int nseg; 7444 { 7445 7446 bus_dmamap_unload(tag, map); 7447 bus_dmamap_destroy(tag, map); 7448 bus_dmamem_unmap(tag, vaddr, size); 7449 bus_dmamem_free(tag, seg, nseg); 7450 } 7451