1 /* $NetBSD: iha.c,v 1.14 2001/12/16 04:18:12 tsutsui Exp $ */ 2 /* 3 * Initio INI-9xxxU/UW SCSI Device Driver 4 * 5 * Copyright (c) 2000 Ken Westerback 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 *------------------------------------------------------------------------- 30 * 31 * Ported from i91u.c, provided by Initio Corporation, which credits: 32 * 33 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller. 34 * 35 * FreeBSD 36 * 37 * Written for 386bsd and FreeBSD by 38 * Winston Hung <winstonh@initio.com> 39 * 40 * Copyright (c) 1997-99 Initio Corp. All rights reserved. 41 * 42 *------------------------------------------------------------------------- 43 */ 44 45 /* 46 * Ported to NetBSD by Izumi Tsutsui <tsutsui@ceres.dti.ne.jp> from OpenBSD: 47 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $ 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: iha.c,v 1.14 2001/12/16 04:18:12 tsutsui Exp $"); 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/buf.h> 57 #include <sys/device.h> 58 #include <sys/malloc.h> 59 60 #include <uvm/uvm_extern.h> 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <dev/scsipi/scsi_all.h> 66 #include <dev/scsipi/scsipi_all.h> 67 #include <dev/scsipi/scsiconf.h> 68 #include <dev/scsipi/scsi_message.h> 69 70 #include <dev/ic/ihareg.h> 71 #include <dev/ic/ihavar.h> 72 73 /* 74 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of 75 * tcs flags. 76 */ 77 static const u_int8_t iha_rate_tbl[] = { 78 /* fast 20 */ 79 /* nanosecond divide by 4 */ 80 12, /* 50ns, 20M */ 81 18, /* 75ns, 13.3M */ 82 25, /* 100ns, 10M */ 83 31, /* 125ns, 8M */ 84 37, /* 150ns, 6.6M */ 85 43, /* 175ns, 5.7M */ 86 50, /* 200ns, 5M */ 87 62 /* 250ns, 4M */ 88 }; 89 #define IHA_MAX_PERIOD 62 90 91 #ifdef notused 92 static u_int16_t eeprom_default[EEPROM_SIZE] = { 93 /* -- Header ------------------------------------ */ 94 /* signature */ 95 EEP_SIGNATURE, 96 /* size, revision */ 97 EEP_WORD(EEPROM_SIZE * 2, 0x01), 98 /* -- Host Adapter Structure -------------------- */ 99 /* model */ 100 0x0095, 101 /* model info, number of channel */ 102 EEP_WORD(0x00, 1), 103 /* BIOS config */ 104 EEP_BIOSCFG_DEFAULT, 105 /* host adapter config */ 106 0, 107 108 /* -- eeprom_adapter[0] ------------------------------- */ 109 /* ID, adapter config 1 */ 110 EEP_WORD(7, CFG_DEFAULT), 111 /* adapter config 2, number of targets */ 112 EEP_WORD(0x00, 8), 113 /* target flags */ 114 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 115 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 116 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 117 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 118 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 119 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 120 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 121 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 122 123 /* -- eeprom_adapter[1] ------------------------------- */ 124 /* ID, adapter config 1 */ 125 EEP_WORD(7, CFG_DEFAULT), 126 /* adapter config 2, number of targets */ 127 EEP_WORD(0x00, 8), 128 /* target flags */ 129 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 130 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 131 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 132 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 133 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 134 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 135 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 136 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 137 /* reserved[5] */ 138 0, 0, 0, 0, 0, 139 /* checksum */ 140 0 141 }; 142 #endif 143 144 static u_int8_t iha_data_over_run(struct iha_scsi_req_q *); 145 146 static int iha_push_sense_request(struct iha_softc *, struct iha_scsi_req_q *); 147 static void iha_timeout(void *); 148 static int iha_alloc_sglist(struct iha_softc *); 149 150 static void iha_read_eeprom(struct iha_softc *, struct iha_eeprom *); 151 static int iha_se2_rd_all(struct iha_softc *, u_int16_t *); 152 static void iha_se2_instr(struct iha_softc *, int); 153 static u_int16_t iha_se2_rd(struct iha_softc *, int); 154 #ifdef notused 155 static void iha_se2_update_all(struct iha_softc *); 156 static void iha_se2_wr(struct iha_softc *, int, u_int16_t); 157 #endif 158 159 static void iha_reset_scsi_bus(struct iha_softc *); 160 static void iha_reset_chip(struct iha_softc *); 161 static void iha_reset_dma(struct iha_softc *); 162 163 static void iha_reset_tcs(struct tcs *, u_int8_t); 164 165 static void iha_done_scb(struct iha_softc *, struct iha_scsi_req_q *); 166 static void iha_exec_scb(struct iha_softc *, struct iha_scsi_req_q *); 167 168 static void iha_main(struct iha_softc *); 169 static void iha_scsi(struct iha_softc *); 170 171 static int iha_wait(struct iha_softc *, u_int8_t); 172 173 static __inline void iha_mark_busy_scb(struct iha_scsi_req_q *); 174 175 static void iha_append_free_scb(struct iha_softc *, struct iha_scsi_req_q *); 176 static void iha_append_done_scb(struct iha_softc *, struct iha_scsi_req_q *, 177 u_int8_t); 178 static __inline struct iha_scsi_req_q *iha_pop_done_scb(struct iha_softc *); 179 180 static __inline void iha_append_pend_scb(struct iha_softc *, 181 struct iha_scsi_req_q *); 182 static __inline void iha_push_pend_scb(struct iha_softc *, 183 struct iha_scsi_req_q *); 184 static __inline void iha_del_pend_scb(struct iha_softc *, 185 struct iha_scsi_req_q *); 186 static struct iha_scsi_req_q *iha_find_pend_scb(struct iha_softc *); 187 188 static void iha_sync_done(struct iha_softc *); 189 static void iha_wide_done(struct iha_softc *); 190 static void iha_bad_seq(struct iha_softc *); 191 192 static int iha_next_state(struct iha_softc *); 193 static int iha_state_1(struct iha_softc *); 194 static int iha_state_2(struct iha_softc *); 195 static int iha_state_3(struct iha_softc *); 196 static int iha_state_4(struct iha_softc *); 197 static int iha_state_5(struct iha_softc *); 198 static int iha_state_6(struct iha_softc *); 199 static int iha_state_8(struct iha_softc *); 200 201 static void iha_set_ssig(struct iha_softc *, u_int8_t, u_int8_t); 202 203 static int iha_xpad_in(struct iha_softc *); 204 static int iha_xpad_out(struct iha_softc *); 205 206 static int iha_xfer_data(struct iha_softc *, struct iha_scsi_req_q *, 207 int direction); 208 209 static int iha_status_msg(struct iha_softc *); 210 211 static int iha_msgin(struct iha_softc *); 212 static int iha_msgin_sdtr(struct iha_softc *); 213 static int iha_msgin_extended(struct iha_softc *); 214 static int iha_msgin_ignore_wid_resid(struct iha_softc *); 215 216 static int iha_msgout(struct iha_softc *, u_int8_t); 217 static int iha_msgout_extended(struct iha_softc *); 218 static void iha_msgout_abort(struct iha_softc *, u_int8_t); 219 static int iha_msgout_reject(struct iha_softc *); 220 static int iha_msgout_sdtr(struct iha_softc *); 221 static int iha_msgout_wdtr(struct iha_softc *); 222 223 static void iha_select(struct iha_softc *, struct iha_scsi_req_q *, u_int8_t); 224 225 static void iha_busfree(struct iha_softc *); 226 static int iha_resel(struct iha_softc *); 227 228 static void iha_abort_xs(struct iha_softc *, struct scsipi_xfer *, u_int8_t); 229 230 void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 231 void *arg); 232 void iha_update_xfer_mode(struct iha_softc *, int); 233 234 /* 235 * iha_intr - the interrupt service routine for the iha driver 236 */ 237 int 238 iha_intr(arg) 239 void *arg; 240 { 241 bus_space_tag_t iot; 242 bus_space_handle_t ioh; 243 struct iha_softc *sc; 244 int s; 245 246 sc = (struct iha_softc *)arg; 247 iot = sc->sc_iot; 248 ioh = sc->sc_ioh; 249 250 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 251 return (0); 252 253 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */ 254 255 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 256 /* XXX - need these inside a splbio()/splx()? */ 257 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 258 sc->sc_semaph = SEMAPH_IN_MAIN; 259 260 iha_main(sc); 261 262 sc->sc_semaph = ~SEMAPH_IN_MAIN; 263 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 264 } 265 266 splx(s); 267 268 return (1); 269 } 270 271 void 272 iha_scsipi_request(chan, req, arg) 273 struct scsipi_channel *chan; 274 scsipi_adapter_req_t req; 275 void *arg; 276 { 277 struct scsipi_xfer *xs; 278 struct scsipi_periph *periph; 279 struct iha_scsi_req_q *scb; 280 struct iha_softc *sc; 281 int error, s; 282 283 sc = (struct iha_softc *)chan->chan_adapter->adapt_dev; 284 285 switch (req) { 286 case ADAPTER_REQ_RUN_XFER: 287 xs = arg; 288 periph = xs->xs_periph; 289 290 if (xs->cmdlen > sizeof(struct scsi_generic) || 291 periph->periph_target >= IHA_MAX_TARGETS) { 292 xs->error = XS_DRIVER_STUFFUP; 293 return; 294 } 295 296 s = splbio(); 297 scb = TAILQ_FIRST(&sc->sc_freescb); 298 if (scb != NULL) { 299 scb->status = STATUS_RENT; 300 TAILQ_REMOVE(&sc->sc_freescb, scb, chain); 301 } 302 #ifdef DIAGNOSTIC 303 else { 304 scsipi_printaddr(periph); 305 printf("unable to allocate scb\n"); 306 panic("iha_scsipi_request"); 307 } 308 #endif 309 splx(s); 310 311 scb->target = periph->periph_target; 312 scb->lun = periph->periph_lun; 313 scb->tcs = &sc->sc_tcs[scb->target]; 314 scb->scb_id = MSG_IDENTIFY(periph->periph_lun, 315 (xs->xs_control & XS_CTL_REQSENSE) == 0); 316 317 scb->xs = xs; 318 scb->cmdlen = xs->cmdlen; 319 memcpy(&scb->cmd, xs->cmd, xs->cmdlen); 320 scb->buflen = xs->datalen; 321 scb->flags = 0; 322 if (xs->xs_control & XS_CTL_DATA_OUT) 323 scb->flags |= FLAG_DATAOUT; 324 if (xs->xs_control & XS_CTL_DATA_IN) 325 scb->flags |= FLAG_DATAIN; 326 327 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 328 error = bus_dmamap_load(sc->sc_dmat, scb->dmap, 329 xs->data, scb->buflen, NULL, 330 ((xs->xs_control & XS_CTL_NOSLEEP) ? 331 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 332 BUS_DMA_STREAMING | 333 ((scb->flags & FLAG_DATAIN) ? 334 BUS_DMA_READ : BUS_DMA_WRITE)); 335 336 if (error) { 337 printf("%s: error %d loading dma map\n", 338 sc->sc_dev.dv_xname, error); 339 iha_append_free_scb(sc, scb); 340 xs->error = XS_DRIVER_STUFFUP; 341 scsipi_done(xs); 342 return; 343 } 344 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 345 0, scb->dmap->dm_mapsize, 346 (scb->flags & FLAG_DATAIN) ? 347 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 348 } 349 350 iha_exec_scb(sc, scb); 351 return; 352 353 case ADAPTER_REQ_GROW_RESOURCES: 354 return; /* XXX */ 355 356 case ADAPTER_REQ_SET_XFER_MODE: 357 { 358 struct tcs *tcs; 359 struct scsipi_xfer_mode *xm = arg; 360 361 tcs = &sc->sc_tcs[xm->xm_target]; 362 363 if ((xm->xm_mode & PERIPH_CAP_WIDE16) != 0 && 364 (tcs->flags & FLAG_NO_WIDE) == 0) 365 tcs->flags &= ~(FLAG_WIDE_DONE|FLAG_SYNC_DONE); 366 367 if ((xm->xm_mode & PERIPH_CAP_SYNC) != 0 && 368 (tcs->flags & FLAG_NO_SYNC) == 0) 369 tcs->flags &= ~FLAG_SYNC_DONE; 370 371 /* 372 * If we're not going to negotiate, send the 373 * notification now, since it won't happen later. 374 */ 375 if ((tcs->flags & (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) == 376 (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) 377 iha_update_xfer_mode(sc, xm->xm_target); 378 379 return; 380 } 381 } 382 } 383 384 void 385 iha_attach(sc) 386 struct iha_softc *sc; 387 { 388 bus_space_tag_t iot = sc->sc_iot; 389 bus_space_handle_t ioh = sc->sc_ioh; 390 struct iha_scsi_req_q *scb; 391 struct iha_eeprom eeprom; 392 struct eeprom_adapter *conf; 393 int i, error, reg; 394 395 iha_read_eeprom(sc, &eeprom); 396 397 conf = &eeprom.adapter[0]; 398 399 /* 400 * fill in the rest of the iha_softc fields 401 */ 402 sc->sc_id = CFG_ID(conf->config1); 403 sc->sc_semaph = ~SEMAPH_IN_MAIN; 404 sc->sc_status0 = 0; 405 sc->sc_actscb = NULL; 406 407 TAILQ_INIT(&sc->sc_freescb); 408 TAILQ_INIT(&sc->sc_pendscb); 409 TAILQ_INIT(&sc->sc_donescb); 410 error = iha_alloc_sglist(sc); 411 if (error != 0) { 412 printf(": cannot allocate sglist\n"); 413 return; 414 } 415 416 sc->sc_scb = malloc(sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB, 417 M_DEVBUF, M_NOWAIT|M_ZERO); 418 if (sc->sc_scb == NULL) { 419 printf(": cannot allocate SCB\n"); 420 return; 421 } 422 423 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) { 424 scb->scb_tagid = i; 425 scb->sgoffset = IHA_SG_SIZE * i; 426 scb->sglist = sc->sc_sglist + IHA_MAX_SG_ENTRIES * i; 427 scb->sg_addr = 428 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset; 429 430 error = bus_dmamap_create(sc->sc_dmat, 431 MAXPHYS, IHA_MAX_SG_ENTRIES, MAXPHYS, 0, 432 BUS_DMA_NOWAIT, &scb->dmap); 433 434 if (error != 0) { 435 printf(": couldn't create SCB DMA map, error = %d\n", 436 error); 437 return; 438 } 439 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 440 } 441 442 /* Mask all the interrupts */ 443 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 444 445 /* Stop any I/O and reset the scsi module */ 446 iha_reset_dma(sc); 447 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD); 448 449 /* Program HBA's SCSI ID */ 450 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4); 451 452 /* 453 * Configure the channel as requested by the NVRAM settings read 454 * by iha_read_eeprom() above. 455 */ 456 457 sc->sc_sconf1 = SCONFIG0DEFAULT; 458 if ((conf->config1 & CFG_EN_PAR) != 0) 459 sc->sc_sconf1 |= SPCHK; 460 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1); 461 462 /* set selection time out 250 ms */ 463 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS); 464 465 /* Enable desired SCSI termination configuration read from eeprom */ 466 reg = 0; 467 if (conf->config1 & CFG_ACT_TERM1) 468 reg |= ENTMW; 469 if (conf->config1 & CFG_ACT_TERM2) 470 reg |= ENTM; 471 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg); 472 473 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN; 474 if (conf->config1 & CFG_AUTO_TERM) 475 reg |= ATDEN; 476 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg); 477 478 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) { 479 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]); 480 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]); 481 iha_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1); 482 iha_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1); 483 } 484 485 iha_reset_chip(sc); 486 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS); 487 488 /* 489 * fill in the adapter. 490 */ 491 sc->sc_adapter.adapt_dev = &sc->sc_dev; 492 sc->sc_adapter.adapt_nchannels = 1; 493 sc->sc_adapter.adapt_openings = IHA_MAX_SCB; 494 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB; 495 sc->sc_adapter.adapt_ioctl = NULL; 496 sc->sc_adapter.adapt_minphys = minphys; 497 sc->sc_adapter.adapt_request = iha_scsipi_request; 498 499 /* 500 * fill in the channel. 501 */ 502 sc->sc_channel.chan_adapter = &sc->sc_adapter; 503 sc->sc_channel.chan_bustype = &scsi_bustype; 504 sc->sc_channel.chan_channel = 0; 505 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2); 506 sc->sc_channel.chan_nluns = 8; 507 sc->sc_channel.chan_id = sc->sc_id; 508 509 /* 510 * Now try to attach all the sub devices. 511 */ 512 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 513 } 514 515 /* 516 * iha_reset_dma - abort any active DMA xfer, reset tulip FIFO. 517 */ 518 static void 519 iha_reset_dma(sc) 520 struct iha_softc *sc; 521 { 522 bus_space_tag_t iot = sc->sc_iot; 523 bus_space_handle_t ioh = sc->sc_ioh; 524 525 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 526 /* if DMA xfer is pending, abort DMA xfer */ 527 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 528 /* wait Abort DMA xfer done */ 529 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0) 530 ; 531 } 532 533 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 534 } 535 536 /* 537 * iha_append_free_scb - append the supplied SCB to the tail of the 538 * sc_freescb queue after clearing and resetting 539 * everything possible. 540 */ 541 static void 542 iha_append_free_scb(sc, scb) 543 struct iha_softc *sc; 544 struct iha_scsi_req_q *scb; 545 { 546 int s; 547 548 s = splbio(); 549 550 if (scb == sc->sc_actscb) 551 sc->sc_actscb = NULL; 552 553 scb->status = STATUS_QUEUED; 554 scb->ha_stat = HOST_OK; 555 scb->ta_stat = SCSI_OK; 556 557 scb->nextstat = 0; 558 scb->scb_tagmsg = 0; 559 560 scb->xs = NULL; 561 scb->tcs = NULL; 562 563 /* 564 * scb_tagid, sg_addr, sglist 565 * SCB_SensePtr are set at initialization 566 * and never change 567 */ 568 569 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 570 571 splx(s); 572 } 573 574 static __inline void 575 iha_append_pend_scb(sc, scb) 576 struct iha_softc *sc; 577 struct iha_scsi_req_q *scb; 578 { 579 /* ASSUMPTION: only called within a splbio()/splx() pair */ 580 581 if (scb == sc->sc_actscb) 582 sc->sc_actscb = NULL; 583 584 scb->status = STATUS_QUEUED; 585 586 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain); 587 } 588 589 static __inline void 590 iha_push_pend_scb(sc, scb) 591 struct iha_softc *sc; 592 struct iha_scsi_req_q *scb; 593 { 594 int s; 595 596 s = splbio(); 597 598 if (scb == sc->sc_actscb) 599 sc->sc_actscb = NULL; 600 601 scb->status = STATUS_QUEUED; 602 603 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain); 604 605 splx(s); 606 } 607 608 /* 609 * iha_find_pend_scb - scan the pending queue for a SCB that can be 610 * processed immediately. Return NULL if none found 611 * and a pointer to the SCB if one is found. If there 612 * is an active SCB, return NULL! 613 */ 614 static struct iha_scsi_req_q * 615 iha_find_pend_scb(sc) 616 struct iha_softc *sc; 617 { 618 struct iha_scsi_req_q *scb; 619 struct tcs *tcs; 620 int s; 621 622 s = splbio(); 623 624 if (sc->sc_actscb != NULL) 625 scb = NULL; 626 627 else 628 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) { 629 if ((scb->xs->xs_control & XS_CTL_RESET) != 0) 630 /* ALWAYS willing to reset a device */ 631 break; 632 633 tcs = scb->tcs; 634 635 if ((scb->scb_tagmsg) != 0) { 636 /* 637 * A Tagged I/O. OK to start If no 638 * non-tagged I/O is active on the same 639 * target 640 */ 641 if (tcs->ntagscb == NULL) 642 break; 643 644 } else if (scb->cmd[0] == REQUEST_SENSE) { 645 /* 646 * OK to do a non-tagged request sense 647 * even if a non-tagged I/O has been 648 * started, 'cuz we don't allow any 649 * disconnect during a request sense op 650 */ 651 break; 652 653 } else if (tcs->tagcnt == 0) { 654 /* 655 * No tagged I/O active on this target, 656 * ok to start a non-tagged one if one 657 * is not already active 658 */ 659 if (tcs->ntagscb == NULL) 660 break; 661 } 662 } 663 664 splx(s); 665 666 return (scb); 667 } 668 669 /* 670 * iha_del_pend_scb - remove scb from sc_pendscb 671 */ 672 static __inline void 673 iha_del_pend_scb(sc, scb) 674 struct iha_softc *sc; 675 struct iha_scsi_req_q *scb; 676 { 677 int s; 678 679 s = splbio(); 680 681 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain); 682 683 splx(s); 684 } 685 686 static __inline void 687 iha_mark_busy_scb(scb) 688 struct iha_scsi_req_q *scb; 689 { 690 int s; 691 692 s = splbio(); 693 694 scb->status = STATUS_BUSY; 695 696 if (scb->scb_tagmsg == 0) 697 scb->tcs->ntagscb = scb; 698 else 699 scb->tcs->tagcnt++; 700 701 splx(s); 702 } 703 704 static void 705 iha_append_done_scb(sc, scb, hastat) 706 struct iha_softc *sc; 707 struct iha_scsi_req_q *scb; 708 u_int8_t hastat; 709 { 710 struct tcs *tcs; 711 int s; 712 713 s = splbio(); 714 715 if (scb->xs != NULL) 716 callout_stop(&scb->xs->xs_callout); 717 718 if (scb == sc->sc_actscb) 719 sc->sc_actscb = NULL; 720 721 tcs = scb->tcs; 722 723 if (scb->scb_tagmsg != 0) { 724 if (tcs->tagcnt) 725 tcs->tagcnt--; 726 } else if (tcs->ntagscb == scb) 727 tcs->ntagscb = NULL; 728 729 scb->status = STATUS_QUEUED; 730 scb->ha_stat = hastat; 731 732 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain); 733 734 splx(s); 735 } 736 737 static __inline struct iha_scsi_req_q * 738 iha_pop_done_scb(sc) 739 struct iha_softc *sc; 740 { 741 struct iha_scsi_req_q *scb; 742 int s; 743 744 s = splbio(); 745 746 scb = TAILQ_FIRST(&sc->sc_donescb); 747 748 if (scb != NULL) { 749 scb->status = STATUS_RENT; 750 TAILQ_REMOVE(&sc->sc_donescb, scb, chain); 751 } 752 753 splx(s); 754 755 return (scb); 756 } 757 758 /* 759 * iha_abort_xs - find the SCB associated with the supplied xs and 760 * stop all processing on it, moving it to the done 761 * queue with the supplied host status value. 762 */ 763 static void 764 iha_abort_xs(sc, xs, hastat) 765 struct iha_softc *sc; 766 struct scsipi_xfer *xs; 767 u_int8_t hastat; 768 { 769 struct iha_scsi_req_q *scb; 770 int i, s; 771 772 s = splbio(); 773 774 /* Check the pending queue for the SCB pointing to xs */ 775 776 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) 777 if (scb->xs == xs) { 778 iha_del_pend_scb(sc, scb); 779 iha_append_done_scb(sc, scb, hastat); 780 splx(s); 781 return; 782 } 783 784 /* 785 * If that didn't work, check all BUSY/SELECTING SCB's for one 786 * pointing to xs 787 */ 788 789 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 790 switch (scb->status) { 791 case STATUS_BUSY: 792 case STATUS_SELECT: 793 if (scb->xs == xs) { 794 iha_append_done_scb(sc, scb, hastat); 795 splx(s); 796 return; 797 } 798 break; 799 default: 800 break; 801 } 802 803 splx(s); 804 } 805 806 /* 807 * iha_bad_seq - a SCSI bus phase was encountered out of the 808 * correct/expected sequence. Reset the SCSI bus. 809 */ 810 static void 811 iha_bad_seq(sc) 812 struct iha_softc *sc; 813 { 814 struct iha_scsi_req_q *scb = sc->sc_actscb; 815 816 if (scb != NULL) 817 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 818 819 iha_reset_scsi_bus(sc); 820 iha_reset_chip(sc); 821 } 822 823 /* 824 * iha_push_sense_request - obtain auto sense data by pushing the 825 * SCB needing it back onto the pending 826 * queue with a REQUEST_SENSE CDB. 827 */ 828 static int 829 iha_push_sense_request(sc, scb) 830 struct iha_softc *sc; 831 struct iha_scsi_req_q *scb; 832 { 833 struct scsipi_xfer *xs = scb->xs; 834 struct scsipi_periph *periph = xs->xs_periph; 835 struct scsipi_sense *ss = (struct scsipi_sense *)scb->cmd; 836 int lun = periph->periph_lun; 837 int err; 838 839 ss->opcode = REQUEST_SENSE; 840 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT; 841 ss->unused[0] = ss->unused[1] = 0; 842 ss->length = sizeof(struct scsipi_sense_data); 843 ss->control = 0; 844 845 scb->flags = FLAG_RSENS | FLAG_DATAIN; 846 847 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG; 848 849 scb->scb_tagmsg = 0; 850 scb->ta_stat = SCSI_OK; 851 852 scb->cmdlen = sizeof(struct scsipi_sense); 853 scb->buflen = ss->length; 854 855 err = bus_dmamap_load(sc->sc_dmat, scb->dmap, 856 &xs->sense.scsi_sense, scb->buflen, NULL, 857 BUS_DMA_READ|BUS_DMA_NOWAIT); 858 if (err != 0) { 859 printf("iha_push_sense_request: cannot bus_dmamap_load()\n"); 860 xs->error = XS_DRIVER_STUFFUP; 861 return 1; 862 } 863 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 864 0, scb->buflen, BUS_DMASYNC_PREREAD); 865 866 /* XXX What about queued command? */ 867 iha_exec_scb(sc, scb); 868 869 return 0; 870 } 871 872 /* 873 * iha_main - process the active SCB, taking one off pending and making it 874 * active if necessary, and any done SCB's created as 875 * a result until there are no interrupts pending and no pending 876 * SCB's that can be started. 877 */ 878 static void 879 iha_main(sc) 880 struct iha_softc *sc; 881 { 882 bus_space_tag_t iot = sc->sc_iot; 883 bus_space_handle_t ioh =sc->sc_ioh; 884 struct iha_scsi_req_q *scb; 885 886 for (;;) { 887 iha_scsi(sc); 888 889 while ((scb = iha_pop_done_scb(sc)) != NULL) 890 iha_done_scb(sc, scb); 891 892 /* 893 * If there are no interrupts pending, or we can't start 894 * a pending sc, break out of the for(;;). Otherwise 895 * continue the good work with another call to 896 * iha_scsi(). 897 */ 898 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 899 && (iha_find_pend_scb(sc) == NULL)) 900 break; 901 } 902 } 903 904 /* 905 * iha_scsi - service any outstanding interrupts. If there are none, try to 906 * start another SCB currently in the pending queue. 907 */ 908 static void 909 iha_scsi(sc) 910 struct iha_softc *sc; 911 { 912 bus_space_tag_t iot = sc->sc_iot; 913 bus_space_handle_t ioh = sc->sc_ioh; 914 struct iha_scsi_req_q *scb; 915 struct tcs *tcs; 916 u_int8_t stat; 917 918 /* service pending interrupts asap */ 919 920 stat = bus_space_read_1(iot, ioh, TUL_STAT0); 921 if ((stat & INTPD) != 0) { 922 sc->sc_status0 = stat; 923 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 924 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 925 926 sc->sc_phase = sc->sc_status0 & PH_MASK; 927 928 if ((sc->sc_sistat & SRSTD) != 0) { 929 iha_reset_scsi_bus(sc); 930 return; 931 } 932 933 if ((sc->sc_sistat & RSELED) != 0) { 934 iha_resel(sc); 935 return; 936 } 937 938 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) { 939 iha_busfree(sc); 940 return; 941 } 942 943 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) { 944 iha_next_state(sc); 945 return; 946 } 947 948 if ((sc->sc_sistat & SELED) != 0) 949 iha_set_ssig(sc, 0, 0); 950 } 951 952 /* 953 * There were no interrupts pending which required action elsewhere, so 954 * see if it is possible to start the selection phase on a pending SCB 955 */ 956 if ((scb = iha_find_pend_scb(sc)) == NULL) 957 return; 958 959 tcs = scb->tcs; 960 961 /* program HBA's SCSI ID & target SCSI ID */ 962 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target); 963 964 if ((scb->xs->xs_control & XS_CTL_RESET) == 0) { 965 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 966 967 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 || 968 (tcs->flags & FLAG_NO_NEG_WIDE) == 0) 969 iha_select(sc, scb, SELATNSTOP); 970 971 else if (scb->scb_tagmsg != 0) 972 iha_select(sc, scb, SEL_ATN3); 973 974 else 975 iha_select(sc, scb, SEL_ATN); 976 977 } else { 978 iha_select(sc, scb, SELATNSTOP); 979 scb->nextstat = 8; 980 } 981 982 if ((scb->xs->xs_control & XS_CTL_POLL) != 0) { 983 int timeout; 984 for (timeout = scb->xs->timeout; timeout > 0; timeout--) { 985 if (iha_wait(sc, NO_OP) == -1) 986 break; 987 if (iha_next_state(sc) == -1) 988 break; 989 delay(1000); /* Only happens in boot, so it's ok */ 990 } 991 992 /* 993 * Since done queue processing not done until AFTER this 994 * function returns, scb is on the done queue, not 995 * the free queue at this point and still has valid data 996 * 997 * Conversely, xs->error has not been set yet 998 */ 999 if (timeout == 0) 1000 iha_timeout(scb); 1001 } 1002 } 1003 1004 /* 1005 * iha_data_over_run - return HOST_OK for all SCSI opcodes where BufLen 1006 * is an 'Allocation Length'. All other SCSI opcodes 1007 * get HOST_DO_DU as they SHOULD have xferred all the 1008 * data requested. 1009 * 1010 * The list of opcodes using 'Allocation Length' was 1011 * found by scanning all the SCSI-3 T10 drafts. See 1012 * www.t10.org for the curious with a .pdf reader. 1013 */ 1014 static u_int8_t 1015 iha_data_over_run(scb) 1016 struct iha_scsi_req_q *scb; 1017 { 1018 switch (scb->cmd[0]) { 1019 case 0x03: /* Request Sense SPC-2 */ 1020 case 0x12: /* Inquiry SPC-2 */ 1021 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */ 1022 case 0x1c: /* Receive Diagnostic Results SPC-2 */ 1023 case 0x23: /* Read Format Capacities MMC-2 */ 1024 case 0x29: /* Read Generation SBC */ 1025 case 0x34: /* Read Position SSC-2 */ 1026 case 0x37: /* Read Defect Data SBC */ 1027 case 0x3c: /* Read Buffer SPC-2 */ 1028 case 0x42: /* Read Sub Channel MMC-2 */ 1029 case 0x43: /* Read TOC/PMA/ATIP MMC */ 1030 1031 /* XXX - 2 with same opcode of 0x44? */ 1032 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/ 1033 1034 case 0x46: /* Get Configuration MMC-2 */ 1035 case 0x4a: /* Get Event/Status Notification MMC-2 */ 1036 case 0x4d: /* Log Sense SPC-2 */ 1037 case 0x51: /* Read Disc Information MMC */ 1038 case 0x52: /* Read Track Information MMC */ 1039 case 0x59: /* Read Master CUE MMC */ 1040 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */ 1041 case 0x5c: /* Read Buffer Capacity MMC */ 1042 case 0x5e: /* Persistant Reserve In SPC-2 */ 1043 case 0x84: /* Receive Copy Results SPC-2 */ 1044 case 0xa0: /* Report LUNs SPC-2 */ 1045 case 0xa3: /* Various Report requests SBC-2/SCC-2*/ 1046 case 0xa4: /* Report Key MMC-2 */ 1047 case 0xad: /* Read DVD Structure MMC-2 */ 1048 case 0xb4: /* Read Element Status (Attached) SMC */ 1049 case 0xb5: /* Request Volume Element Address SMC */ 1050 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */ 1051 case 0xb8: /* Read Element Status (Independ.) SMC */ 1052 case 0xba: /* Report Redundancy SCC-2 */ 1053 case 0xbd: /* Mechanism Status MMC */ 1054 case 0xbe: /* Report Basic Redundancy SCC-2 */ 1055 1056 return (HOST_OK); 1057 break; 1058 1059 default: 1060 return (HOST_DO_DU); 1061 break; 1062 } 1063 } 1064 1065 /* 1066 * iha_next_state - prcess the current SCB as requested in it's 1067 * nextstat member. 1068 */ 1069 static int 1070 iha_next_state(sc) 1071 struct iha_softc *sc; 1072 { 1073 1074 if (sc->sc_actscb == NULL) 1075 return (-1); 1076 1077 switch (sc->sc_actscb->nextstat) { 1078 case 1: 1079 if (iha_state_1(sc) == 3) 1080 goto state_3; 1081 break; 1082 1083 case 2: 1084 switch (iha_state_2(sc)) { 1085 case 3: 1086 goto state_3; 1087 case 4: 1088 goto state_4; 1089 default: 1090 break; 1091 } 1092 break; 1093 1094 case 3: 1095 state_3: 1096 if (iha_state_3(sc) == 4) 1097 goto state_4; 1098 break; 1099 1100 case 4: 1101 state_4: 1102 switch (iha_state_4(sc)) { 1103 case 0: 1104 return (0); 1105 case 6: 1106 goto state_6; 1107 default: 1108 break; 1109 } 1110 break; 1111 1112 case 5: 1113 switch (iha_state_5(sc)) { 1114 case 4: 1115 goto state_4; 1116 case 6: 1117 goto state_6; 1118 default: 1119 break; 1120 } 1121 break; 1122 1123 case 6: 1124 state_6: 1125 iha_state_6(sc); 1126 break; 1127 1128 case 8: 1129 iha_state_8(sc); 1130 break; 1131 1132 default: 1133 #ifdef IHA_DEBUG_STATE 1134 printf("[debug] -unknown state: %i-\n", 1135 sc->sc_actscb->nextstat); 1136 #endif 1137 iha_bad_seq(sc); 1138 break; 1139 } 1140 1141 return (-1); 1142 } 1143 1144 /* 1145 * iha_state_1 - selection is complete after a SELATNSTOP. If the target 1146 * has put the bus into MSG_OUT phase start wide/sync 1147 * negotiation. Otherwise clear the FIFO and go to state 3, 1148 * which will send the SCSI CDB to the target. 1149 */ 1150 static int 1151 iha_state_1(sc) 1152 struct iha_softc *sc; 1153 { 1154 bus_space_tag_t iot = sc->sc_iot; 1155 bus_space_handle_t ioh = sc->sc_ioh; 1156 struct iha_scsi_req_q *scb = sc->sc_actscb; 1157 struct tcs *tcs; 1158 int flags; 1159 1160 iha_mark_busy_scb(scb); 1161 1162 tcs = scb->tcs; 1163 1164 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 1165 1166 /* 1167 * If we are in PHASE_MSG_OUT, send 1168 * a) IDENT message (with tags if appropriate) 1169 * b) WDTR if the target is configured to negotiate wide xfers 1170 * ** OR ** 1171 * c) SDTR if the target is configured to negotiate sync xfers 1172 * but not wide ones 1173 * 1174 * If we are NOT, then the target is not asking for anything but 1175 * the data/command, so go straight to state 3. 1176 */ 1177 if (sc->sc_phase == PHASE_MSG_OUT) { 1178 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL)); 1179 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 1180 1181 if (scb->scb_tagmsg != 0) { 1182 bus_space_write_1(iot, ioh, TUL_SFIFO, 1183 scb->scb_tagmsg); 1184 bus_space_write_1(iot, ioh, TUL_SFIFO, 1185 scb->scb_tagid); 1186 } 1187 1188 flags = tcs->flags; 1189 if ((flags & FLAG_NO_NEG_WIDE) == 0) { 1190 if (iha_msgout_wdtr(sc) == -1) 1191 return (-1); 1192 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) { 1193 if (iha_msgout_sdtr(sc) == -1) 1194 return (-1); 1195 } 1196 1197 } else { 1198 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1199 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1200 } 1201 1202 return (3); 1203 } 1204 1205 /* 1206 * iha_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI 1207 * CDB has already been send, go to state 4 to start the data 1208 * xfer. Otherwise reset the FIFO and go to state 3, sending 1209 * the SCSI CDB. 1210 */ 1211 static int 1212 iha_state_2(sc) 1213 struct iha_softc *sc; 1214 { 1215 bus_space_tag_t iot = sc->sc_iot; 1216 bus_space_handle_t ioh = sc->sc_ioh; 1217 struct iha_scsi_req_q *scb = sc->sc_actscb; 1218 1219 iha_mark_busy_scb(scb); 1220 1221 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0); 1222 1223 if ((sc->sc_status1 & CPDNE) != 0) 1224 return (4); 1225 1226 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1227 1228 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1229 1230 return (3); 1231 } 1232 1233 /* 1234 * iha_state_3 - send the SCSI CDB to the target, processing any status 1235 * or other messages received until that is done or 1236 * abandoned. 1237 */ 1238 static int 1239 iha_state_3(sc) 1240 struct iha_softc *sc; 1241 { 1242 bus_space_tag_t iot = sc->sc_iot; 1243 bus_space_handle_t ioh = sc->sc_ioh; 1244 struct iha_scsi_req_q *scb = sc->sc_actscb; 1245 int flags; 1246 1247 for (;;) { 1248 switch (sc->sc_phase) { 1249 case PHASE_CMD_OUT: 1250 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 1251 scb->cmd, scb->cmdlen); 1252 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1253 return (-1); 1254 else if (sc->sc_phase == PHASE_CMD_OUT) { 1255 iha_bad_seq(sc); 1256 return (-1); 1257 } else 1258 return (4); 1259 1260 case PHASE_MSG_IN: 1261 scb->nextstat = 3; 1262 if (iha_msgin(sc) == -1) 1263 return (-1); 1264 break; 1265 1266 case PHASE_STATUS_IN: 1267 if (iha_status_msg(sc) == -1) 1268 return (-1); 1269 break; 1270 1271 case PHASE_MSG_OUT: 1272 flags = scb->tcs->flags; 1273 if ((flags & FLAG_NO_NEG_SYNC) != 0) { 1274 if (iha_msgout(sc, MSG_NOOP) == -1) 1275 return (-1); 1276 } else if (iha_msgout_sdtr(sc) == -1) 1277 return (-1); 1278 break; 1279 1280 default: 1281 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase); 1282 iha_bad_seq(sc); 1283 return (-1); 1284 } 1285 } 1286 } 1287 1288 /* 1289 * iha_state_4 - start a data xfer. Handle any bus state 1290 * transitions until PHASE_DATA_IN/_OUT 1291 * or the attempt is abandoned. If there is 1292 * no data to xfer, go to state 6 and finish 1293 * processing the current SCB. 1294 */ 1295 static int 1296 iha_state_4(sc) 1297 struct iha_softc *sc; 1298 { 1299 struct iha_scsi_req_q *scb = sc->sc_actscb; 1300 1301 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) == 1302 (FLAG_DATAIN | FLAG_DATAOUT)) 1303 return (6); /* Both dir flags set => NO xfer was requested */ 1304 1305 for (;;) { 1306 if (scb->buflen == 0) 1307 return (6); 1308 1309 switch (sc->sc_phase) { 1310 case PHASE_STATUS_IN: 1311 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1312 scb->ha_stat = iha_data_over_run(scb); 1313 if ((iha_status_msg(sc)) == -1) 1314 return (-1); 1315 break; 1316 1317 case PHASE_MSG_IN: 1318 scb->nextstat = 4; 1319 if (iha_msgin(sc) == -1) 1320 return (-1); 1321 break; 1322 1323 case PHASE_MSG_OUT: 1324 if ((sc->sc_status0 & SPERR) != 0) { 1325 scb->buflen = 0; 1326 scb->ha_stat = HOST_SPERR; 1327 if (iha_msgout(sc, MSG_INITIATOR_DET_ERR) == -1) 1328 return (-1); 1329 else 1330 return (6); 1331 } else { 1332 if (iha_msgout(sc, MSG_NOOP) == -1) 1333 return (-1); 1334 } 1335 break; 1336 1337 case PHASE_DATA_IN: 1338 return (iha_xfer_data(sc, scb, FLAG_DATAIN)); 1339 1340 case PHASE_DATA_OUT: 1341 return (iha_xfer_data(sc, scb, FLAG_DATAOUT)); 1342 1343 default: 1344 iha_bad_seq(sc); 1345 return (-1); 1346 } 1347 } 1348 } 1349 1350 /* 1351 * iha_state_5 - handle the partial or final completion of the current 1352 * data xfer. If DMA is still active stop it. If there is 1353 * more data to xfer, go to state 4 and start the xfer. 1354 * If not go to state 6 and finish the SCB. 1355 */ 1356 static int 1357 iha_state_5(sc) 1358 struct iha_softc *sc; 1359 { 1360 bus_space_tag_t iot = sc->sc_iot; 1361 bus_space_handle_t ioh = sc->sc_ioh; 1362 struct iha_scsi_req_q *scb = sc->sc_actscb; 1363 struct iha_sg_element *sg; 1364 u_int32_t cnt; 1365 u_int8_t period, stat; 1366 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */ 1367 int i; 1368 1369 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT; 1370 1371 /* 1372 * Stop any pending DMA activity and check for parity error. 1373 */ 1374 1375 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) { 1376 /* Input Operation */ 1377 if ((sc->sc_status0 & SPERR) != 0) 1378 scb->ha_stat = HOST_SPERR; 1379 1380 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1381 bus_space_write_1(iot, ioh, TUL_DCTRL0, 1382 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP); 1383 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) 1384 ; 1385 } 1386 1387 } else { 1388 /* Output Operation */ 1389 if ((sc->sc_status1 & SXCMP) == 0) { 1390 period = scb->tcs->syncm; 1391 if ((period & PERIOD_WIDE_SCSI) != 0) 1392 cnt += (bus_space_read_1(iot, ioh, 1393 TUL_SFIFOCNT) & FIFOC) * 2; 1394 else 1395 cnt += bus_space_read_1(iot, ioh, 1396 TUL_SFIFOCNT) & FIFOC; 1397 } 1398 1399 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1400 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 1401 do 1402 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0); 1403 while ((stat & DABT) == 0); 1404 } 1405 1406 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) { 1407 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1408 return (-1); 1409 cnt = 0; 1410 1411 } else if ((sc->sc_status1 & SXCMP) == 0) 1412 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1413 } 1414 1415 if (cnt == 0) { 1416 scb->buflen = 0; 1417 return (6); 1418 } 1419 1420 /* Update active data pointer and restart the I/O at the new point */ 1421 1422 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */ 1423 scb->buflen = cnt; /* cnt == bytes left */ 1424 1425 if ((scb->flags & FLAG_SG) != 0) { 1426 sg = &scb->sglist[scb->sg_index]; 1427 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) { 1428 xcnt -= le32toh(sg->sg_len); 1429 if (xcnt < 0) { 1430 xcnt += le32toh(sg->sg_len); 1431 1432 sg->sg_addr = 1433 htole32(le32toh(sg->sg_addr) + xcnt); 1434 sg->sg_len = 1435 htole32(le32toh(sg->sg_len) - xcnt); 1436 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1437 scb->sgoffset, IHA_SG_SIZE, 1438 BUS_DMASYNC_PREWRITE); 1439 1440 scb->bufaddr += (i - scb->sg_index) * 1441 sizeof(struct iha_sg_element); 1442 scb->sg_size = scb->sg_max - i; 1443 scb->sg_index = i; 1444 1445 return (4); 1446 } 1447 } 1448 return (6); 1449 1450 } else 1451 scb->bufaddr += xcnt; 1452 1453 return (4); 1454 } 1455 1456 /* 1457 * iha_state_6 - finish off the active scb (may require several 1458 * iterations if PHASE_MSG_IN) and return -1 to indicate 1459 * the bus is free. 1460 */ 1461 static int 1462 iha_state_6(sc) 1463 struct iha_softc *sc; 1464 { 1465 1466 for (;;) { 1467 switch (sc->sc_phase) { 1468 case PHASE_STATUS_IN: 1469 if (iha_status_msg(sc) == -1) 1470 return (-1); 1471 break; 1472 1473 case PHASE_MSG_IN: 1474 sc->sc_actscb->nextstat = 6; 1475 if ((iha_msgin(sc)) == -1) 1476 return (-1); 1477 break; 1478 1479 case PHASE_MSG_OUT: 1480 if ((iha_msgout(sc, MSG_NOOP)) == -1) 1481 return (-1); 1482 break; 1483 1484 case PHASE_DATA_IN: 1485 if (iha_xpad_in(sc) == -1) 1486 return (-1); 1487 break; 1488 1489 case PHASE_DATA_OUT: 1490 if (iha_xpad_out(sc) == -1) 1491 return (-1); 1492 break; 1493 1494 default: 1495 iha_bad_seq(sc); 1496 return (-1); 1497 } 1498 } 1499 } 1500 1501 /* 1502 * iha_state_8 - reset the active device and all busy SCBs using it 1503 */ 1504 static int 1505 iha_state_8(sc) 1506 struct iha_softc *sc; 1507 { 1508 bus_space_tag_t iot = sc->sc_iot; 1509 bus_space_handle_t ioh = sc->sc_ioh; 1510 struct iha_scsi_req_q *scb; 1511 int i; 1512 u_int8_t tar; 1513 1514 if (sc->sc_phase == PHASE_MSG_OUT) { 1515 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET); 1516 1517 scb = sc->sc_actscb; 1518 1519 /* This SCB finished correctly -- resetting the device */ 1520 iha_append_done_scb(sc, scb, HOST_OK); 1521 1522 iha_reset_tcs(scb->tcs, sc->sc_sconf1); 1523 1524 tar = scb->target; 1525 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1526 if (scb->target == tar) 1527 switch (scb->status) { 1528 case STATUS_BUSY: 1529 iha_append_done_scb(sc, 1530 scb, HOST_DEV_RST); 1531 break; 1532 1533 case STATUS_SELECT: 1534 iha_push_pend_scb(sc, scb); 1535 break; 1536 1537 default: 1538 break; 1539 } 1540 1541 sc->sc_flags |= FLAG_EXPECT_DISC; 1542 1543 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1544 return (-1); 1545 } 1546 1547 iha_bad_seq(sc); 1548 return (-1); 1549 } 1550 1551 /* 1552 * iha_xfer_data - initiate the DMA xfer of the data 1553 */ 1554 static int 1555 iha_xfer_data(sc, scb, direction) 1556 struct iha_softc *sc; 1557 struct iha_scsi_req_q *scb; 1558 int direction; 1559 { 1560 bus_space_tag_t iot = sc->sc_iot; 1561 bus_space_handle_t ioh = sc->sc_ioh; 1562 u_int32_t xferlen; 1563 u_int8_t xfercmd; 1564 1565 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != direction) 1566 return (6); /* wrong direction, abandon I/O */ 1567 1568 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen); 1569 1570 xfercmd = STRXFR; 1571 if (direction == FLAG_DATAIN) 1572 xfercmd |= XDIR; 1573 1574 if (scb->flags & FLAG_SG) { 1575 xferlen = scb->sg_size * sizeof(struct iha_sg_element); 1576 xfercmd |= SGXFR; 1577 } else 1578 xferlen = scb->buflen; 1579 1580 bus_space_write_4(iot, ioh, TUL_DXC, xferlen); 1581 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr); 1582 bus_space_write_1(iot, ioh, TUL_DCMD, xfercmd); 1583 1584 bus_space_write_1(iot, ioh, TUL_SCMD, 1585 (direction == FLAG_DATAIN) ? XF_DMA_IN : XF_DMA_OUT); 1586 1587 scb->nextstat = 5; 1588 1589 return (0); 1590 } 1591 1592 static int 1593 iha_xpad_in(sc) 1594 struct iha_softc *sc; 1595 { 1596 bus_space_tag_t iot = sc->sc_iot; 1597 bus_space_handle_t ioh = sc->sc_ioh; 1598 struct iha_scsi_req_q *scb = sc->sc_actscb; 1599 1600 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1601 scb->ha_stat = HOST_DO_DU; 1602 1603 for (;;) { 1604 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 1605 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 1606 else 1607 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1608 1609 switch (iha_wait(sc, XF_FIFO_IN)) { 1610 case -1: 1611 return (-1); 1612 1613 case PHASE_DATA_IN: 1614 bus_space_read_1(iot, ioh, TUL_SFIFO); 1615 break; 1616 1617 default: 1618 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1619 return (6); 1620 } 1621 } 1622 } 1623 1624 static int 1625 iha_xpad_out(sc) 1626 struct iha_softc *sc; 1627 { 1628 bus_space_tag_t iot = sc->sc_iot; 1629 bus_space_handle_t ioh = sc->sc_ioh; 1630 struct iha_scsi_req_q *scb = sc->sc_actscb; 1631 1632 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1633 scb->ha_stat = HOST_DO_DU; 1634 1635 for (;;) { 1636 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 1637 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 1638 else 1639 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1640 1641 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 1642 1643 switch (iha_wait(sc, XF_FIFO_OUT)) { 1644 case -1: 1645 return (-1); 1646 1647 case PHASE_DATA_OUT: 1648 break; 1649 1650 default: 1651 /* Disable wide CPU to allow read 16 bits */ 1652 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1653 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1654 return (6); 1655 } 1656 } 1657 } 1658 1659 static int 1660 iha_status_msg(sc) 1661 struct iha_softc *sc; 1662 { 1663 bus_space_tag_t iot = sc->sc_iot; 1664 bus_space_handle_t ioh = sc->sc_ioh; 1665 struct iha_scsi_req_q *scb; 1666 u_int8_t msg; 1667 int phase; 1668 1669 if ((phase = iha_wait(sc, CMD_COMP)) == -1) 1670 return (-1); 1671 1672 scb = sc->sc_actscb; 1673 1674 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO); 1675 1676 if (phase == PHASE_MSG_OUT) { 1677 if ((sc->sc_status0 & SPERR) == 0) 1678 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP); 1679 else 1680 bus_space_write_1(iot, ioh, TUL_SFIFO, 1681 MSG_PARITY_ERROR); 1682 1683 return (iha_wait(sc, XF_FIFO_OUT)); 1684 1685 } else if (phase == PHASE_MSG_IN) { 1686 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 1687 1688 if ((sc->sc_status0 & SPERR) != 0) 1689 switch (iha_wait(sc, MSG_ACCEPT)) { 1690 case -1: 1691 return (-1); 1692 case PHASE_MSG_OUT: 1693 bus_space_write_1(iot, ioh, TUL_SFIFO, 1694 MSG_PARITY_ERROR); 1695 return (iha_wait(sc, XF_FIFO_OUT)); 1696 default: 1697 iha_bad_seq(sc); 1698 return (-1); 1699 } 1700 1701 if (msg == MSG_CMDCOMPLETE) { 1702 if ((scb->ta_stat & 1703 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) { 1704 iha_bad_seq(sc); 1705 return (-1); 1706 } 1707 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 1708 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1709 return (iha_wait(sc, MSG_ACCEPT)); 1710 } 1711 1712 if ((msg == MSG_LINK_CMD_COMPLETE) 1713 || (msg == MSG_LINK_CMD_COMPLETEF)) { 1714 if ((scb->ta_stat & 1715 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) 1716 return (iha_wait(sc, MSG_ACCEPT)); 1717 } 1718 } 1719 1720 iha_bad_seq(sc); 1721 return (-1); 1722 } 1723 1724 /* 1725 * iha_busfree - SCSI bus free detected as a result of a TIMEOUT or 1726 * DISCONNECT interrupt. Reset the tulip FIFO and 1727 * SCONFIG0 and enable hardware reselect. Move any active 1728 * SCB to sc_donescb list. Return an appropriate host status 1729 * if an I/O was active. 1730 */ 1731 static void 1732 iha_busfree(sc) 1733 struct iha_softc *sc; 1734 { 1735 bus_space_tag_t iot = sc->sc_iot; 1736 bus_space_handle_t ioh = sc->sc_ioh; 1737 struct iha_scsi_req_q *scb; 1738 1739 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1740 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT); 1741 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1742 1743 scb = sc->sc_actscb; 1744 1745 if (scb != NULL) { 1746 if (scb->status == STATUS_SELECT) 1747 /* selection timeout */ 1748 iha_append_done_scb(sc, scb, HOST_SEL_TOUT); 1749 else 1750 /* Unexpected bus free */ 1751 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 1752 } 1753 } 1754 1755 static void 1756 iha_reset_scsi_bus(sc) 1757 struct iha_softc *sc; 1758 { 1759 struct iha_scsi_req_q *scb; 1760 struct tcs *tcs; 1761 int i, s; 1762 1763 s = splbio(); 1764 1765 iha_reset_dma(sc); 1766 1767 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1768 switch (scb->status) { 1769 case STATUS_BUSY: 1770 iha_append_done_scb(sc, scb, HOST_SCSI_RST); 1771 break; 1772 1773 case STATUS_SELECT: 1774 iha_push_pend_scb(sc, scb); 1775 break; 1776 1777 default: 1778 break; 1779 } 1780 1781 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++) 1782 iha_reset_tcs(tcs, sc->sc_sconf1); 1783 1784 splx(s); 1785 } 1786 1787 /* 1788 * iha_resel - handle a detected SCSI bus reselection request. 1789 */ 1790 static int 1791 iha_resel(sc) 1792 struct iha_softc *sc; 1793 { 1794 bus_space_tag_t iot = sc->sc_iot; 1795 bus_space_handle_t ioh = sc->sc_ioh; 1796 struct iha_scsi_req_q *scb; 1797 struct tcs *tcs; 1798 u_int8_t tag, target, lun, msg, abortmsg; 1799 1800 if (sc->sc_actscb != NULL) { 1801 if ((sc->sc_actscb->status == STATUS_SELECT)) 1802 iha_push_pend_scb(sc, sc->sc_actscb); 1803 sc->sc_actscb = NULL; 1804 } 1805 1806 target = bus_space_read_1(iot, ioh, TUL_SBID); 1807 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & MSG_IDENTIFY_LUNMASK; 1808 1809 tcs = &sc->sc_tcs[target]; 1810 1811 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 1812 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 1813 1814 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */ 1815 1816 if (tcs->ntagscb != NULL) 1817 /* There is a non-tagged I/O active on the target */ 1818 scb = tcs->ntagscb; 1819 1820 else { 1821 /* 1822 * Since there is no active non-tagged operation 1823 * read the tag type, the tag itself, and find 1824 * the appropriate scb by indexing sc_scb with 1825 * the tag. 1826 */ 1827 1828 switch (iha_wait(sc, MSG_ACCEPT)) { 1829 case -1: 1830 return (-1); 1831 case PHASE_MSG_IN: 1832 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1833 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 1834 return (-1); 1835 break; 1836 default: 1837 goto abort; 1838 } 1839 1840 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */ 1841 1842 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG)) 1843 goto abort; 1844 1845 switch (iha_wait(sc, MSG_ACCEPT)) { 1846 case -1: 1847 return (-1); 1848 case PHASE_MSG_IN: 1849 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1850 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 1851 return (-1); 1852 break; 1853 default: 1854 goto abort; 1855 } 1856 1857 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */ 1858 scb = &sc->sc_scb[tag]; 1859 1860 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */ 1861 } 1862 1863 if ((scb->target != target) 1864 || (scb->lun != lun) 1865 || (scb->status != STATUS_BUSY)) { 1866 abort: 1867 iha_msgout_abort(sc, abortmsg); 1868 return (-1); 1869 } 1870 1871 sc->sc_actscb = scb; 1872 1873 if (iha_wait(sc, MSG_ACCEPT) == -1) 1874 return (-1); 1875 1876 return (iha_next_state(sc)); 1877 } 1878 1879 static int 1880 iha_msgin(sc) 1881 struct iha_softc *sc; 1882 { 1883 bus_space_tag_t iot = sc->sc_iot; 1884 bus_space_handle_t ioh = sc->sc_ioh; 1885 int flags; 1886 int phase; 1887 u_int8_t msg; 1888 1889 for (;;) { 1890 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0) 1891 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1892 1893 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1894 1895 phase = iha_wait(sc, XF_FIFO_IN); 1896 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 1897 1898 switch (msg) { 1899 case MSG_DISCONNECT: 1900 sc->sc_flags |= FLAG_EXPECT_DISC; 1901 if (iha_wait(sc, MSG_ACCEPT) != -1) 1902 iha_bad_seq(sc); 1903 phase = -1; 1904 break; 1905 case MSG_SAVEDATAPOINTER: 1906 case MSG_RESTOREPOINTERS: 1907 case MSG_NOOP: 1908 phase = iha_wait(sc, MSG_ACCEPT); 1909 break; 1910 case MSG_MESSAGE_REJECT: 1911 /* XXX - need to clear FIFO like other 'Clear ATN'?*/ 1912 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1913 flags = sc->sc_actscb->tcs->flags; 1914 if ((flags & FLAG_NO_NEG_SYNC) == 0) 1915 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 1916 phase = iha_wait(sc, MSG_ACCEPT); 1917 break; 1918 case MSG_EXTENDED: 1919 phase = iha_msgin_extended(sc); 1920 break; 1921 case MSG_IGN_WIDE_RESIDUE: 1922 phase = iha_msgin_ignore_wid_resid(sc); 1923 break; 1924 case MSG_CMDCOMPLETE: 1925 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 1926 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1927 phase = iha_wait(sc, MSG_ACCEPT); 1928 if (phase != -1) { 1929 iha_bad_seq(sc); 1930 return (-1); 1931 } 1932 break; 1933 default: 1934 printf("[debug] iha_msgin: bad msg type: %d\n", msg); 1935 phase = iha_msgout_reject(sc); 1936 break; 1937 } 1938 1939 if (phase != PHASE_MSG_IN) 1940 return (phase); 1941 } 1942 /* NOTREACHED */ 1943 } 1944 1945 static int 1946 iha_msgin_ignore_wid_resid(sc) 1947 struct iha_softc *sc; 1948 { 1949 bus_space_tag_t iot = sc->sc_iot; 1950 bus_space_handle_t ioh = sc->sc_ioh; 1951 int phase; 1952 1953 phase = iha_wait(sc, MSG_ACCEPT); 1954 1955 if (phase == PHASE_MSG_IN) { 1956 phase = iha_wait(sc, XF_FIFO_IN); 1957 1958 if (phase != -1) { 1959 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 1960 bus_space_read_1(iot, ioh, TUL_SFIFO); 1961 bus_space_read_1(iot, ioh, TUL_SFIFO); 1962 1963 phase = iha_wait(sc, MSG_ACCEPT); 1964 } 1965 } 1966 1967 return (phase); 1968 } 1969 1970 static int 1971 iha_msgin_extended(sc) 1972 struct iha_softc *sc; 1973 { 1974 bus_space_tag_t iot = sc->sc_iot; 1975 bus_space_handle_t ioh = sc->sc_ioh; 1976 int flags, i, phase, msglen, msgcode; 1977 1978 /* 1979 * XXX - can we just stop reading and reject, or do we have to 1980 * read all input, discarding the excess, and then reject 1981 */ 1982 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) { 1983 phase = iha_wait(sc, MSG_ACCEPT); 1984 1985 if (phase != PHASE_MSG_IN) 1986 return (phase); 1987 1988 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1989 1990 if (iha_wait(sc, XF_FIFO_IN) == -1) 1991 return (-1); 1992 1993 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO); 1994 1995 if (sc->sc_msg[0] == i) 1996 break; 1997 } 1998 1999 msglen = sc->sc_msg[0]; 2000 msgcode = sc->sc_msg[1]; 2001 2002 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) { 2003 if (iha_msgin_sdtr(sc) == 0) { 2004 iha_sync_done(sc); 2005 return (iha_wait(sc, MSG_ACCEPT)); 2006 } 2007 2008 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2009 2010 phase = iha_wait(sc, MSG_ACCEPT); 2011 if (phase != PHASE_MSG_OUT) 2012 return (phase); 2013 2014 /* Clear FIFO for important message - final SYNC offer */ 2015 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2016 2017 iha_sync_done(sc); /* This is our final offer */ 2018 2019 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) { 2020 2021 flags = sc->sc_actscb->tcs->flags; 2022 2023 if ((flags & FLAG_NO_WIDE) != 0) 2024 /* Offer 8bit xfers only */ 2025 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_8_BIT; 2026 2027 else if (sc->sc_msg[2] > MSG_EXT_WDTR_BUS_32_BIT) 2028 /* BAD MSG */ 2029 return (iha_msgout_reject(sc)); 2030 2031 else if (sc->sc_msg[2] == MSG_EXT_WDTR_BUS_32_BIT) 2032 /* Offer 16bit instead */ 2033 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2034 2035 else { 2036 iha_wide_done(sc); 2037 if ((flags & FLAG_NO_NEG_SYNC) == 0) 2038 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2039 return (iha_wait(sc, MSG_ACCEPT)); 2040 } 2041 2042 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2043 2044 phase = iha_wait(sc, MSG_ACCEPT); 2045 if (phase != PHASE_MSG_OUT) 2046 return (phase); 2047 } else 2048 return (iha_msgout_reject(sc)); 2049 2050 return (iha_msgout_extended(sc)); 2051 } 2052 2053 /* 2054 * iha_msgin_sdtr - check SDTR msg in sc_msg. If the offer is 2055 * acceptable leave sc_msg as is and return 0. 2056 * If the negotiation must continue, modify sc_msg 2057 * as needed and return 1. Else return 0. 2058 */ 2059 static int 2060 iha_msgin_sdtr(sc) 2061 struct iha_softc *sc; 2062 { 2063 int flags; 2064 int newoffer; 2065 u_int8_t default_period; 2066 2067 flags = sc->sc_actscb->tcs->flags; 2068 2069 default_period = iha_rate_tbl[flags & FLAG_SCSI_RATE]; 2070 2071 if (sc->sc_msg[3] == 0) 2072 /* target offered async only. Accept it. */ 2073 return (0); 2074 2075 newoffer = 0; 2076 2077 if ((flags & FLAG_NO_SYNC) != 0) { 2078 sc->sc_msg[3] = 0; 2079 newoffer = 1; 2080 } 2081 2082 if (sc->sc_msg[3] > IHA_MAX_OFFSET) { 2083 sc->sc_msg[3] = IHA_MAX_OFFSET; 2084 newoffer = 1; 2085 } 2086 2087 if (sc->sc_msg[2] < default_period) { 2088 sc->sc_msg[2] = default_period; 2089 newoffer = 1; 2090 } 2091 2092 if (sc->sc_msg[2] > IHA_MAX_PERIOD) { 2093 /* Use async */ 2094 sc->sc_msg[3] = 0; 2095 newoffer = 1; 2096 } 2097 2098 return (newoffer); 2099 } 2100 2101 static int 2102 iha_msgout(sc, msg) 2103 struct iha_softc *sc; 2104 u_int8_t msg; 2105 { 2106 2107 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg); 2108 2109 return (iha_wait(sc, XF_FIFO_OUT)); 2110 } 2111 2112 static void 2113 iha_msgout_abort(sc, aborttype) 2114 struct iha_softc *sc; 2115 u_int8_t aborttype; 2116 { 2117 2118 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2119 2120 switch (iha_wait(sc, MSG_ACCEPT)) { 2121 case -1: 2122 break; 2123 2124 case PHASE_MSG_OUT: 2125 sc->sc_flags |= FLAG_EXPECT_DISC; 2126 if (iha_msgout(sc, aborttype) != -1) 2127 iha_bad_seq(sc); 2128 break; 2129 2130 default: 2131 iha_bad_seq(sc); 2132 break; 2133 } 2134 } 2135 2136 static int 2137 iha_msgout_reject(sc) 2138 struct iha_softc *sc; 2139 { 2140 2141 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2142 2143 if (iha_wait(sc, MSG_ACCEPT) == PHASE_MSG_OUT) 2144 return (iha_msgout(sc, MSG_MESSAGE_REJECT)); 2145 2146 return (-1); 2147 } 2148 2149 static int 2150 iha_msgout_extended(sc) 2151 struct iha_softc *sc; 2152 { 2153 bus_space_tag_t iot = sc->sc_iot; 2154 bus_space_handle_t ioh = sc->sc_ioh; 2155 int phase; 2156 2157 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED); 2158 2159 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 2160 sc->sc_msg, sc->sc_msg[0] + 1); 2161 2162 phase = iha_wait(sc, XF_FIFO_OUT); 2163 2164 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2165 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 2166 2167 return (phase); 2168 } 2169 2170 static int 2171 iha_msgout_wdtr(sc) 2172 struct iha_softc *sc; 2173 { 2174 2175 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE; 2176 2177 sc->sc_msg[0] = MSG_EXT_WDTR_LEN; 2178 sc->sc_msg[1] = MSG_EXT_WDTR; 2179 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2180 2181 return (iha_msgout_extended(sc)); 2182 } 2183 2184 static int 2185 iha_msgout_sdtr(sc) 2186 struct iha_softc *sc; 2187 { 2188 struct tcs *tcs = sc->sc_actscb->tcs; 2189 2190 tcs->flags |= FLAG_SYNC_DONE; 2191 2192 sc->sc_msg[0] = MSG_EXT_SDTR_LEN; 2193 sc->sc_msg[1] = MSG_EXT_SDTR; 2194 sc->sc_msg[2] = iha_rate_tbl[tcs->flags & FLAG_SCSI_RATE]; 2195 sc->sc_msg[3] = IHA_MAX_OFFSET; /* REQ/ACK */ 2196 2197 return (iha_msgout_extended(sc)); 2198 } 2199 2200 static void 2201 iha_wide_done(sc) 2202 struct iha_softc *sc; 2203 { 2204 bus_space_tag_t iot = sc->sc_iot; 2205 bus_space_handle_t ioh = sc->sc_ioh; 2206 struct tcs *tcs = sc->sc_actscb->tcs; 2207 2208 tcs->syncm = 0; 2209 tcs->period = 0; 2210 tcs->offset = 0; 2211 2212 if (sc->sc_msg[2] != 0) 2213 tcs->syncm |= PERIOD_WIDE_SCSI; 2214 2215 tcs->sconfig0 &= ~ALTPD; 2216 tcs->flags &= ~FLAG_SYNC_DONE; 2217 tcs->flags |= FLAG_WIDE_DONE; 2218 2219 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2220 2221 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2222 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2223 } 2224 2225 static void 2226 iha_sync_done(sc) 2227 struct iha_softc *sc; 2228 { 2229 bus_space_tag_t iot = sc->sc_iot; 2230 bus_space_handle_t ioh = sc->sc_ioh; 2231 struct tcs *tcs = sc->sc_actscb->tcs; 2232 int i; 2233 2234 tcs->period = sc->sc_msg[2]; 2235 tcs->offset = sc->sc_msg[3]; 2236 if (tcs->offset != 0) { 2237 tcs->syncm |= tcs->offset; 2238 2239 /* pick the highest possible rate */ 2240 for (i = 0; i < sizeof(iha_rate_tbl); i++) 2241 if (iha_rate_tbl[i] >= tcs->period) 2242 break; 2243 2244 tcs->syncm |= (i << 4); 2245 tcs->sconfig0 |= ALTPD; 2246 } 2247 2248 tcs->flags |= FLAG_SYNC_DONE; 2249 2250 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2251 2252 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2253 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2254 } 2255 2256 void 2257 iha_reset_chip(sc) 2258 struct iha_softc *sc; 2259 { 2260 bus_space_tag_t iot = sc->sc_iot; 2261 bus_space_handle_t ioh = sc->sc_ioh; 2262 2263 /* reset tulip chip */ 2264 2265 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI); 2266 2267 do { 2268 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 2269 } while ((sc->sc_sistat & SRSTD) == 0); 2270 2271 iha_set_ssig(sc, 0, 0); 2272 2273 bus_space_read_1(iot, ioh, TUL_SISTAT); /* Clear any active interrupt*/ 2274 } 2275 2276 static void 2277 iha_select(sc, scb, select_type) 2278 struct iha_softc *sc; 2279 struct iha_scsi_req_q *scb; 2280 u_int8_t select_type; 2281 { 2282 bus_space_tag_t iot = sc->sc_iot; 2283 bus_space_handle_t ioh = sc->sc_ioh; 2284 2285 switch (select_type) { 2286 case SEL_ATN: 2287 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 2288 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 2289 scb->cmd, scb->cmdlen); 2290 2291 scb->nextstat = 2; 2292 break; 2293 2294 case SELATNSTOP: 2295 scb->nextstat = 1; 2296 break; 2297 2298 case SEL_ATN3: 2299 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 2300 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg); 2301 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid); 2302 2303 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd, 2304 scb->cmdlen); 2305 2306 scb->nextstat = 2; 2307 break; 2308 2309 default: 2310 printf("[debug] iha_select() - unknown select type = 0x%02x\n", 2311 select_type); 2312 return; 2313 } 2314 2315 iha_del_pend_scb(sc, scb); 2316 scb->status = STATUS_SELECT; 2317 2318 sc->sc_actscb = scb; 2319 2320 bus_space_write_1(iot, ioh, TUL_SCMD, select_type); 2321 } 2322 2323 /* 2324 * iha_wait - wait for an interrupt to service or a SCSI bus phase change 2325 * after writing the supplied command to the tulip chip. If 2326 * the command is NO_OP, skip the command writing. 2327 */ 2328 static int 2329 iha_wait(sc, cmd) 2330 struct iha_softc *sc; 2331 u_int8_t cmd; 2332 { 2333 bus_space_tag_t iot = sc->sc_iot; 2334 bus_space_handle_t ioh = sc->sc_ioh; 2335 2336 if (cmd != NO_OP) 2337 bus_space_write_1(iot, ioh, TUL_SCMD, cmd); 2338 2339 /* 2340 * Have to do this here, in addition to in iha_isr, because 2341 * interrupts might be turned off when we get here. 2342 */ 2343 do { 2344 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0); 2345 } while ((sc->sc_status0 & INTPD) == 0); 2346 2347 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 2348 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 2349 2350 sc->sc_phase = sc->sc_status0 & PH_MASK; 2351 2352 if ((sc->sc_sistat & SRSTD) != 0) { 2353 /* SCSI bus reset interrupt */ 2354 iha_reset_scsi_bus(sc); 2355 return (-1); 2356 } 2357 2358 if ((sc->sc_sistat & RSELED) != 0) 2359 /* Reselection interrupt */ 2360 return (iha_resel(sc)); 2361 2362 if ((sc->sc_sistat & STIMEO) != 0) { 2363 /* selected/reselected timeout interrupt */ 2364 iha_busfree(sc); 2365 return (-1); 2366 } 2367 2368 if ((sc->sc_sistat & DISCD) != 0) { 2369 /* BUS disconnection interrupt */ 2370 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) { 2371 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2372 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 2373 SCONFIG0DEFAULT); 2374 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2375 iha_append_done_scb(sc, sc->sc_actscb, HOST_OK); 2376 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC; 2377 2378 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) { 2379 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2380 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 2381 SCONFIG0DEFAULT); 2382 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2383 sc->sc_actscb = NULL; 2384 sc->sc_flags &= ~FLAG_EXPECT_DISC; 2385 2386 } else 2387 iha_busfree(sc); 2388 2389 return (-1); 2390 } 2391 2392 return (sc->sc_phase); 2393 } 2394 2395 /* 2396 * iha_done_scb - We have a scb which has been processed by the 2397 * adaptor, now we look to see how the operation went. 2398 */ 2399 static void 2400 iha_done_scb(sc, scb) 2401 struct iha_softc *sc; 2402 struct iha_scsi_req_q *scb; 2403 { 2404 struct scsipi_xfer *xs = scb->xs; 2405 2406 if (xs != NULL) { 2407 /* Cancel the timeout. */ 2408 callout_stop(&xs->xs_callout); 2409 2410 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 2411 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 2412 0, scb->dmap->dm_mapsize, 2413 (scb->flags & FLAG_DATAIN) ? 2414 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2415 bus_dmamap_unload(sc->sc_dmat, scb->dmap); 2416 } 2417 2418 xs->status = scb->ta_stat; 2419 2420 switch (scb->ha_stat) { 2421 case HOST_OK: 2422 switch (scb->ta_stat) { 2423 case SCSI_OK: 2424 case SCSI_CONDITION_MET: 2425 case SCSI_INTERM: 2426 case SCSI_INTERM_COND_MET: 2427 xs->resid = scb->buflen; 2428 xs->error = XS_NOERROR; 2429 if ((scb->flags & FLAG_RSENS) != 0) 2430 xs->error = XS_SENSE; 2431 break; 2432 2433 case SCSI_RESV_CONFLICT: 2434 case SCSI_BUSY: 2435 case SCSI_QUEUE_FULL: 2436 xs->error = XS_BUSY; 2437 break; 2438 2439 case SCSI_TERMINATED: 2440 case SCSI_ACA_ACTIVE: 2441 case SCSI_CHECK: 2442 scb->tcs->flags &= 2443 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 2444 2445 if ((scb->flags & FLAG_RSENS) != 0 || 2446 iha_push_sense_request(sc, scb) != 0) { 2447 scb->flags &= ~FLAG_RSENS; 2448 printf("%s: request sense failed\n", 2449 sc->sc_dev.dv_xname); 2450 xs->error = XS_DRIVER_STUFFUP; 2451 break; 2452 } 2453 2454 xs->error = XS_SENSE; 2455 return; 2456 2457 default: 2458 xs->error = XS_DRIVER_STUFFUP; 2459 break; 2460 } 2461 break; 2462 2463 case HOST_SEL_TOUT: 2464 xs->error = XS_SELTIMEOUT; 2465 break; 2466 2467 case HOST_SCSI_RST: 2468 case HOST_DEV_RST: 2469 xs->error = XS_RESET; 2470 break; 2471 2472 case HOST_SPERR: 2473 printf("%s: SCSI Parity error detected\n", 2474 sc->sc_dev.dv_xname); 2475 xs->error = XS_DRIVER_STUFFUP; 2476 break; 2477 2478 case HOST_TIMED_OUT: 2479 xs->error = XS_TIMEOUT; 2480 break; 2481 2482 case HOST_DO_DU: 2483 case HOST_BAD_PHAS: 2484 default: 2485 xs->error = XS_DRIVER_STUFFUP; 2486 break; 2487 } 2488 2489 scsipi_done(xs); 2490 } 2491 2492 iha_append_free_scb(sc, scb); 2493 } 2494 2495 static void 2496 iha_timeout(arg) 2497 void *arg; 2498 { 2499 struct iha_scsi_req_q *scb = (struct iha_scsi_req_q *)arg; 2500 struct scsipi_xfer *xs = scb->xs; 2501 struct scsipi_periph *periph = xs->xs_periph; 2502 struct iha_softc *sc; 2503 2504 sc = (void *)periph->periph_channel->chan_adapter->adapt_dev; 2505 2506 if (xs == NULL) 2507 printf("[debug] iha_timeout called with xs == NULL\n"); 2508 2509 else { 2510 scsipi_printaddr(periph); 2511 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode); 2512 2513 iha_abort_xs(sc, xs, HOST_TIMED_OUT); 2514 } 2515 } 2516 2517 static void 2518 iha_exec_scb(sc, scb) 2519 struct iha_softc *sc; 2520 struct iha_scsi_req_q *scb; 2521 { 2522 bus_space_tag_t iot; 2523 bus_space_handle_t ioh; 2524 bus_dmamap_t dm; 2525 struct scsipi_xfer *xs = scb->xs; 2526 int nseg, s; 2527 2528 dm = scb->dmap; 2529 nseg = dm->dm_nsegs; 2530 2531 if (nseg > 1) { 2532 struct iha_sg_element *sg = scb->sglist; 2533 int i; 2534 2535 for (i = 0; i < nseg; i++) { 2536 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len); 2537 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr); 2538 } 2539 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2540 scb->sgoffset, IHA_SG_SIZE, 2541 BUS_DMASYNC_PREWRITE); 2542 2543 scb->flags |= FLAG_SG; 2544 scb->sg_size = scb->sg_max = nseg; 2545 scb->sg_index = 0; 2546 2547 scb->bufaddr = scb->sg_addr; 2548 } else 2549 scb->bufaddr = dm->dm_segs[0].ds_addr; 2550 2551 if ((xs->xs_control & XS_CTL_POLL) == 0) { 2552 int timeout = xs->timeout; 2553 timeout = (timeout > 100000) ? 2554 timeout / 1000 * hz : timeout * hz / 1000; 2555 if (timeout == 0) 2556 timeout = 1; 2557 callout_reset(&xs->xs_callout, timeout, iha_timeout, scb); 2558 } 2559 2560 s = splbio(); 2561 2562 if (((scb->xs->xs_control & XS_RESET) != 0) || 2563 (scb->cmd[0] == REQUEST_SENSE)) 2564 iha_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */ 2565 else 2566 iha_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */ 2567 2568 /* 2569 * Run through iha_main() to ensure something is active, if 2570 * only this new SCB. 2571 */ 2572 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 2573 iot = sc->sc_iot; 2574 ioh = sc->sc_ioh; 2575 2576 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 2577 sc->sc_semaph = SEMAPH_IN_MAIN;; 2578 2579 splx(s); 2580 iha_main(sc); 2581 s = splbio(); 2582 2583 sc->sc_semaph = ~SEMAPH_IN_MAIN;; 2584 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 2585 } 2586 2587 splx(s); 2588 } 2589 2590 2591 /* 2592 * iha_set_ssig - read the current scsi signal mask, then write a new 2593 * one which turns off/on the specified signals. 2594 */ 2595 static void 2596 iha_set_ssig(sc, offsigs, onsigs) 2597 struct iha_softc *sc; 2598 u_int8_t offsigs, onsigs; 2599 { 2600 bus_space_tag_t iot = sc->sc_iot; 2601 bus_space_handle_t ioh = sc->sc_ioh; 2602 u_int8_t currsigs; 2603 2604 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI); 2605 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs); 2606 } 2607 2608 /* 2609 * iha_alloc_sglist - allocate and map sglist for SCB's 2610 */ 2611 static int 2612 iha_alloc_sglist(sc) 2613 struct iha_softc *sc; 2614 { 2615 bus_dma_segment_t seg; 2616 int error, rseg; 2617 2618 /* 2619 * Allocate dma-safe memory for the SCB's sglist 2620 */ 2621 if ((error = bus_dmamem_alloc(sc->sc_dmat, 2622 IHA_SG_SIZE * IHA_MAX_SCB, 2623 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2624 printf(": unable to allocate sglist, error = %d\n", error); 2625 return (error); 2626 } 2627 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 2628 IHA_SG_SIZE * IHA_MAX_SCB, (caddr_t *)&sc->sc_sglist, 2629 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2630 printf(": unable to map sglist, error = %d\n", error); 2631 return (error); 2632 } 2633 2634 /* 2635 * Create and load the DMA map used for the SCBs 2636 */ 2637 if ((error = bus_dmamap_create(sc->sc_dmat, 2638 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB, 2639 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 2640 printf(": unable to create control DMA map, error = %d\n", 2641 error); 2642 return (error); 2643 } 2644 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 2645 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB, 2646 NULL, BUS_DMA_NOWAIT)) != 0) { 2647 printf(": unable to load control DMA map, error = %d\n", error); 2648 return (error); 2649 } 2650 2651 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB); 2652 2653 return (0); 2654 } 2655 2656 /* 2657 * iha_read_eeprom - read Serial EEPROM value & set to defaults 2658 * if required. XXX - Writing does NOT work! 2659 */ 2660 void 2661 iha_read_eeprom(sc, eeprom) 2662 struct iha_softc *sc; 2663 struct iha_eeprom *eeprom; 2664 { 2665 bus_space_tag_t iot = sc->sc_iot; 2666 bus_space_handle_t ioh = sc->sc_ioh; 2667 u_int16_t *buf = (u_int16_t *)eeprom; 2668 u_int8_t gctrl; 2669 2670 /* Enable EEProm programming */ 2671 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG; 2672 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2673 2674 /* Read EEProm */ 2675 if (iha_se2_rd_all(sc, buf) == 0) 2676 panic("%s: cannot read EEPROM\n", sc->sc_dev.dv_xname); 2677 2678 /* Disable EEProm programming */ 2679 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG; 2680 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2681 } 2682 2683 #ifdef notused 2684 /* 2685 * iha_se2_update_all - Update SCSI H/A configuration parameters from 2686 * serial EEPROM Setup default pattern. Only 2687 * change those values different from the values 2688 * in iha_eeprom. 2689 */ 2690 void 2691 iha_se2_update_all(sc) 2692 struct iha_softc *sc; 2693 { 2694 bus_space_tag_t iot = sc->sc_iot; 2695 bus_space_handle_t ioh = sc->sc_ioh; 2696 u_int16_t *np; 2697 u_int32_t chksum; 2698 int i; 2699 2700 /* Enable erase/write state of EEPROM */ 2701 iha_se2_instr(sc, ENABLE_ERASE); 2702 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2703 EEP_WAIT(); 2704 2705 np = (u_int16_t *)&eeprom_default; 2706 2707 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2708 iha_se2_wr(sc, i, *np); 2709 chksum += *np++; 2710 } 2711 2712 chksum &= 0x0000ffff; 2713 iha_se2_wr(sc, 31, chksum); 2714 2715 /* Disable erase/write state of EEPROM */ 2716 iha_se2_instr(sc, 0); 2717 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2718 EEP_WAIT(); 2719 } 2720 2721 /* 2722 * iha_se2_wr - write the given 16 bit value into the Serial EEPROM 2723 * at the specified offset 2724 */ 2725 void 2726 iha_se2_wr(sc, addr, writeword) 2727 struct iha_softc *sc; 2728 int addr; 2729 u_int16_t writeword; 2730 { 2731 bus_space_tag_t iot = sc->sc_iot; 2732 bus_space_handle_t ioh = sc->sc_ioh; 2733 int i, bit; 2734 2735 /* send 'WRITE' Instruction == address | WRITE bit */ 2736 iha_se2_instr(sc, addr | WRITE); 2737 2738 for (i = 16; i > 0; i--) { 2739 if (writeword & (1 << (i - 1))) 2740 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO); 2741 else 2742 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2743 EEP_WAIT(); 2744 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2745 EEP_WAIT(); 2746 } 2747 2748 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2749 EEP_WAIT(); 2750 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2751 EEP_WAIT(); 2752 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2753 EEP_WAIT(); 2754 2755 for (;;) { 2756 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2757 EEP_WAIT(); 2758 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2759 EEP_WAIT(); 2760 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI; 2761 EEP_WAIT(); 2762 if (bit != 0) 2763 break; /* write complete */ 2764 } 2765 2766 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2767 } 2768 #endif 2769 2770 /* 2771 * iha_se2_rd - read & return the 16 bit value at the specified 2772 * offset in the Serial E2PROM 2773 * 2774 */ 2775 u_int16_t 2776 iha_se2_rd(sc, addr) 2777 struct iha_softc *sc; 2778 int addr; 2779 { 2780 bus_space_tag_t iot = sc->sc_iot; 2781 bus_space_handle_t ioh = sc->sc_ioh; 2782 int i, bit; 2783 u_int16_t readword; 2784 2785 /* Send 'READ' instruction == address | READ bit */ 2786 iha_se2_instr(sc, addr | READ); 2787 2788 readword = 0; 2789 for (i = 16; i > 0; i--) { 2790 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2791 EEP_WAIT(); 2792 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2793 EEP_WAIT(); 2794 /* sample data after the following edge of clock */ 2795 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0; 2796 EEP_WAIT(); 2797 2798 readword |= bit << (i - 1); 2799 } 2800 2801 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2802 2803 return (readword); 2804 } 2805 2806 /* 2807 * iha_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM 2808 */ 2809 int 2810 iha_se2_rd_all(sc, buf) 2811 struct iha_softc *sc; 2812 u_int16_t *buf; 2813 { 2814 struct iha_eeprom *eeprom = (struct iha_eeprom *)buf; 2815 u_int32_t chksum; 2816 int i; 2817 2818 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2819 *buf = iha_se2_rd(sc, i); 2820 chksum += *buf++; 2821 } 2822 *buf = iha_se2_rd(sc, 31); /* read checksum from EEPROM */ 2823 2824 chksum &= 0x0000ffff; /* lower 16 bits */ 2825 2826 return (eeprom->signature == EEP_SIGNATURE) && 2827 (eeprom->checksum == chksum); 2828 } 2829 2830 /* 2831 * iha_se2_instr - write an octet to serial E2PROM one bit at a time 2832 */ 2833 void 2834 iha_se2_instr(sc, instr) 2835 struct iha_softc *sc; 2836 int instr; 2837 { 2838 bus_space_tag_t iot = sc->sc_iot; 2839 bus_space_handle_t ioh = sc->sc_ioh; 2840 int b, i; 2841 2842 b = NVRCS | NVRDO; /* Write the start bit (== 1) */ 2843 2844 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2845 EEP_WAIT(); 2846 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2847 EEP_WAIT(); 2848 2849 for (i = 8; i > 0; i--) { 2850 if (instr & (1 << (i - 1))) 2851 b = NVRCS | NVRDO; /* Write a 1 bit */ 2852 else 2853 b = NVRCS; /* Write a 0 bit */ 2854 2855 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2856 EEP_WAIT(); 2857 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2858 EEP_WAIT(); 2859 } 2860 2861 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2862 } 2863 2864 /* 2865 * iha_reset_tcs - reset the target control structure pointed 2866 * to by tcs to default values. tcs flags 2867 * only has the negotiation done bits reset as 2868 * the other bits are fixed at initialization. 2869 */ 2870 void 2871 iha_reset_tcs(tcs, config0) 2872 struct tcs *tcs; 2873 u_int8_t config0; 2874 { 2875 2876 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 2877 tcs->period = 0; 2878 tcs->offset = 0; 2879 tcs->tagcnt = 0; 2880 tcs->ntagscb = NULL; 2881 tcs->syncm = 0; 2882 tcs->sconfig0 = config0; 2883 } 2884 2885 void 2886 iha_update_xfer_mode(sc, target) 2887 struct iha_softc *sc; 2888 int target; 2889 { 2890 struct tcs *tcs = &sc->sc_tcs[target]; 2891 struct scsipi_xfer_mode xm; 2892 2893 xm.xm_target = target; 2894 xm.xm_mode = 0; 2895 xm.xm_period = 0; 2896 xm.xm_offset = 0; 2897 2898 if (tcs->syncm & PERIOD_WIDE_SCSI) 2899 xm.xm_mode |= PERIPH_CAP_WIDE16; 2900 2901 if (tcs->period) { 2902 xm.xm_mode |= PERIPH_CAP_SYNC; 2903 xm.xm_period = tcs->period; 2904 xm.xm_offset = tcs->offset; 2905 } 2906 2907 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 2908 } 2909 2910