1 /* $NetBSD: iha.c,v 1.13 2001/11/18 14:33:10 tsutsui Exp $ */ 2 /* 3 * Initio INI-9xxxU/UW SCSI Device Driver 4 * 5 * Copyright (c) 2000 Ken Westerback 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 *------------------------------------------------------------------------- 30 * 31 * Ported from i91u.c, provided by Initio Corporation, which credits: 32 * 33 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller. 34 * 35 * FreeBSD 36 * 37 * Written for 386bsd and FreeBSD by 38 * Winston Hung <winstonh@initio.com> 39 * 40 * Copyright (c) 1997-99 Initio Corp. All rights reserved. 41 * 42 *------------------------------------------------------------------------- 43 */ 44 45 /* 46 * Ported to NetBSD by Izumi Tsutsui <tsutsui@ceres.dti.ne.jp> from OpenBSD: 47 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $ 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: iha.c,v 1.13 2001/11/18 14:33:10 tsutsui Exp $"); 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/buf.h> 57 #include <sys/device.h> 58 #include <sys/malloc.h> 59 60 #include <uvm/uvm_extern.h> 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <dev/scsipi/scsi_all.h> 66 #include <dev/scsipi/scsipi_all.h> 67 #include <dev/scsipi/scsiconf.h> 68 #include <dev/scsipi/scsi_message.h> 69 70 #include <dev/ic/ihareg.h> 71 #include <dev/ic/ihavar.h> 72 73 /* 74 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of 75 * tcs flags. 76 */ 77 static const u_int8_t iha_rate_tbl[] = { 78 /* fast 20 */ 79 /* nanosecond divide by 4 */ 80 12, /* 50ns, 20M */ 81 18, /* 75ns, 13.3M */ 82 25, /* 100ns, 10M */ 83 31, /* 125ns, 8M */ 84 37, /* 150ns, 6.6M */ 85 43, /* 175ns, 5.7M */ 86 50, /* 200ns, 5M */ 87 62 /* 250ns, 4M */ 88 }; 89 #define IHA_MAX_PERIOD 62 90 91 #ifdef notused 92 static u_int16_t eeprom_default[EEPROM_SIZE] = { 93 /* -- Header ------------------------------------ */ 94 /* signature */ 95 EEP_SIGNATURE, 96 /* size, revision */ 97 EEP_WORD(EEPROM_SIZE * 2, 0x01), 98 /* -- Host Adapter Structure -------------------- */ 99 /* model */ 100 0x0095, 101 /* model info, number of channel */ 102 EEP_WORD(0x00, 1), 103 /* BIOS config */ 104 EEP_BIOSCFG_DEFAULT, 105 /* host adapter config */ 106 0, 107 108 /* -- eeprom_adapter[0] ------------------------------- */ 109 /* ID, adapter config 1 */ 110 EEP_WORD(7, CFG_DEFAULT), 111 /* adapter config 2, number of targets */ 112 EEP_WORD(0x00, 8), 113 /* target flags */ 114 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 115 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 116 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 117 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 118 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 119 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 120 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 121 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 122 123 /* -- eeprom_adapter[1] ------------------------------- */ 124 /* ID, adapter config 1 */ 125 EEP_WORD(7, CFG_DEFAULT), 126 /* adapter config 2, number of targets */ 127 EEP_WORD(0x00, 8), 128 /* target flags */ 129 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 130 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 131 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 132 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 133 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 134 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 135 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 136 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 137 /* reserved[5] */ 138 0, 0, 0, 0, 0, 139 /* checksum */ 140 0 141 }; 142 #endif 143 144 static u_int8_t iha_data_over_run(struct iha_scsi_req_q *); 145 146 static int iha_push_sense_request(struct iha_softc *, struct iha_scsi_req_q *); 147 static void iha_timeout(void *); 148 static int iha_alloc_sglist(struct iha_softc *); 149 150 static void iha_read_eeprom(struct iha_softc *, struct iha_eeprom *); 151 static int iha_se2_rd_all(struct iha_softc *, u_int16_t *); 152 static void iha_se2_instr(struct iha_softc *, int); 153 static u_int16_t iha_se2_rd(struct iha_softc *, int); 154 #ifdef notused 155 static void iha_se2_update_all(struct iha_softc *); 156 static void iha_se2_wr(struct iha_softc *, int, u_int16_t); 157 #endif 158 159 static void iha_reset_scsi_bus(struct iha_softc *); 160 static void iha_reset_chip(struct iha_softc *); 161 static void iha_reset_dma(struct iha_softc *); 162 163 static void iha_reset_tcs(struct tcs *, u_int8_t); 164 165 static void iha_done_scb(struct iha_softc *, struct iha_scsi_req_q *); 166 static void iha_exec_scb(struct iha_softc *, struct iha_scsi_req_q *); 167 168 static void iha_main(struct iha_softc *); 169 static void iha_scsi(struct iha_softc *); 170 171 static int iha_wait(struct iha_softc *, u_int8_t); 172 173 static __inline void iha_mark_busy_scb(struct iha_scsi_req_q *); 174 175 static void iha_append_free_scb(struct iha_softc *, struct iha_scsi_req_q *); 176 static void iha_append_done_scb(struct iha_softc *, struct iha_scsi_req_q *, 177 u_int8_t); 178 static __inline struct iha_scsi_req_q *iha_pop_done_scb(struct iha_softc *); 179 180 static __inline void iha_append_pend_scb(struct iha_softc *, 181 struct iha_scsi_req_q *); 182 static __inline void iha_push_pend_scb(struct iha_softc *, 183 struct iha_scsi_req_q *); 184 static __inline void iha_del_pend_scb(struct iha_softc *, 185 struct iha_scsi_req_q *); 186 static struct iha_scsi_req_q *iha_find_pend_scb(struct iha_softc *); 187 188 static void iha_sync_done(struct iha_softc *); 189 static void iha_wide_done(struct iha_softc *); 190 static void iha_bad_seq(struct iha_softc *); 191 192 static int iha_next_state(struct iha_softc *); 193 static int iha_state_1(struct iha_softc *); 194 static int iha_state_2(struct iha_softc *); 195 static int iha_state_3(struct iha_softc *); 196 static int iha_state_4(struct iha_softc *); 197 static int iha_state_5(struct iha_softc *); 198 static int iha_state_6(struct iha_softc *); 199 static int iha_state_8(struct iha_softc *); 200 201 static void iha_set_ssig(struct iha_softc *, u_int8_t, u_int8_t); 202 203 static int iha_xpad_in(struct iha_softc *); 204 static int iha_xpad_out(struct iha_softc *); 205 206 static int iha_xfer_data(struct iha_softc *, struct iha_scsi_req_q *, 207 int direction); 208 209 static int iha_status_msg(struct iha_softc *); 210 211 static int iha_msgin(struct iha_softc *); 212 static int iha_msgin_sdtr(struct iha_softc *); 213 static int iha_msgin_extended(struct iha_softc *); 214 static int iha_msgin_ignore_wid_resid(struct iha_softc *); 215 216 static int iha_msgout(struct iha_softc *, u_int8_t); 217 static int iha_msgout_extended(struct iha_softc *); 218 static void iha_msgout_abort(struct iha_softc *, u_int8_t); 219 static int iha_msgout_reject(struct iha_softc *); 220 static int iha_msgout_sdtr(struct iha_softc *); 221 static int iha_msgout_wdtr(struct iha_softc *); 222 223 static void iha_select(struct iha_softc *, struct iha_scsi_req_q *, u_int8_t); 224 225 static void iha_busfree(struct iha_softc *); 226 static int iha_resel(struct iha_softc *); 227 228 static void iha_abort_xs(struct iha_softc *, struct scsipi_xfer *, u_int8_t); 229 230 void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 231 void *arg); 232 void iha_update_xfer_mode(struct iha_softc *, int); 233 234 /* 235 * iha_intr - the interrupt service routine for the iha driver 236 */ 237 int 238 iha_intr(arg) 239 void *arg; 240 { 241 bus_space_tag_t iot; 242 bus_space_handle_t ioh; 243 struct iha_softc *sc; 244 int s; 245 246 sc = (struct iha_softc *)arg; 247 iot = sc->sc_iot; 248 ioh = sc->sc_ioh; 249 250 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 251 return (0); 252 253 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */ 254 255 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 256 /* XXX - need these inside a splbio()/splx()? */ 257 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 258 sc->sc_semaph = SEMAPH_IN_MAIN; 259 260 iha_main(sc); 261 262 sc->sc_semaph = ~SEMAPH_IN_MAIN; 263 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 264 } 265 266 splx(s); 267 268 return (1); 269 } 270 271 void 272 iha_scsipi_request(chan, req, arg) 273 struct scsipi_channel *chan; 274 scsipi_adapter_req_t req; 275 void *arg; 276 { 277 struct scsipi_xfer *xs; 278 struct scsipi_periph *periph; 279 struct iha_scsi_req_q *scb; 280 struct iha_softc *sc; 281 int error, s; 282 283 sc = (struct iha_softc *)chan->chan_adapter->adapt_dev; 284 285 switch (req) { 286 case ADAPTER_REQ_RUN_XFER: 287 xs = arg; 288 periph = xs->xs_periph; 289 290 if (xs->cmdlen > sizeof(struct scsi_generic) || 291 periph->periph_target >= IHA_MAX_TARGETS) { 292 xs->error = XS_DRIVER_STUFFUP; 293 return; 294 } 295 296 s = splbio(); 297 scb = TAILQ_FIRST(&sc->sc_freescb); 298 if (scb != NULL) { 299 scb->status = STATUS_RENT; 300 TAILQ_REMOVE(&sc->sc_freescb, scb, chain); 301 } 302 #ifdef DIAGNOSTIC 303 else { 304 scsipi_printaddr(periph); 305 printf("unable to allocate scb\n"); 306 panic("iha_scsipi_request"); 307 } 308 #endif 309 splx(s); 310 311 scb->target = periph->periph_target; 312 scb->lun = periph->periph_lun; 313 scb->tcs = &sc->sc_tcs[scb->target]; 314 scb->scb_id = MSG_IDENTIFY(periph->periph_lun, 315 (xs->xs_control & XS_CTL_REQSENSE) == 0); 316 317 scb->xs = xs; 318 scb->cmdlen = xs->cmdlen; 319 memcpy(&scb->cmd, xs->cmd, xs->cmdlen); 320 scb->buflen = xs->datalen; 321 scb->flags = 0; 322 if (xs->xs_control & XS_CTL_DATA_OUT) 323 scb->flags |= FLAG_DATAOUT; 324 if (xs->xs_control & XS_CTL_DATA_IN) 325 scb->flags |= FLAG_DATAIN; 326 327 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 328 error = bus_dmamap_load(sc->sc_dmat, scb->dmap, 329 xs->data, scb->buflen, NULL, 330 ((xs->xs_control & XS_CTL_NOSLEEP) ? 331 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 332 BUS_DMA_STREAMING | 333 ((scb->flags & FLAG_DATAIN) ? 334 BUS_DMA_READ : BUS_DMA_WRITE)); 335 336 if (error) { 337 printf("%s: error %d loading dma map\n", 338 sc->sc_dev.dv_xname, error); 339 iha_append_free_scb(sc, scb); 340 xs->error = XS_DRIVER_STUFFUP; 341 scsipi_done(xs); 342 return; 343 } 344 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 345 0, scb->dmap->dm_mapsize, 346 (scb->flags & FLAG_DATAIN) ? 347 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 348 } 349 350 iha_exec_scb(sc, scb); 351 return; 352 353 case ADAPTER_REQ_GROW_RESOURCES: 354 return; /* XXX */ 355 356 case ADAPTER_REQ_SET_XFER_MODE: 357 { 358 struct tcs *tcs; 359 struct scsipi_xfer_mode *xm = arg; 360 361 tcs = &sc->sc_tcs[xm->xm_target]; 362 363 if ((xm->xm_mode & PERIPH_CAP_WIDE16) != 0 && 364 (tcs->flags & FLAG_NO_WIDE) == 0) 365 tcs->flags &= ~(FLAG_WIDE_DONE|FLAG_SYNC_DONE); 366 367 if ((xm->xm_mode & PERIPH_CAP_SYNC) != 0 && 368 (tcs->flags & FLAG_NO_SYNC) == 0) 369 tcs->flags &= ~FLAG_SYNC_DONE; 370 371 /* 372 * If we're not going to negotiate, send the 373 * notification now, since it won't happen later. 374 */ 375 if ((tcs->flags & (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) == 376 (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) 377 iha_update_xfer_mode(sc, xm->xm_target); 378 379 return; 380 } 381 } 382 } 383 384 void 385 iha_attach(sc) 386 struct iha_softc *sc; 387 { 388 bus_space_tag_t iot = sc->sc_iot; 389 bus_space_handle_t ioh = sc->sc_ioh; 390 struct iha_scsi_req_q *scb; 391 struct iha_eeprom eeprom; 392 struct eeprom_adapter *conf; 393 int i, error, reg; 394 395 iha_read_eeprom(sc, &eeprom); 396 397 conf = &eeprom.adapter[0]; 398 399 /* 400 * fill in the rest of the iha_softc fields 401 */ 402 sc->sc_id = CFG_ID(conf->config1); 403 sc->sc_semaph = ~SEMAPH_IN_MAIN; 404 sc->sc_status0 = 0; 405 sc->sc_actscb = NULL; 406 407 TAILQ_INIT(&sc->sc_freescb); 408 TAILQ_INIT(&sc->sc_pendscb); 409 TAILQ_INIT(&sc->sc_donescb); 410 error = iha_alloc_sglist(sc); 411 if (error != 0) { 412 printf(": cannot allocate sglist\n"); 413 return; 414 } 415 416 sc->sc_scb = malloc(sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB, 417 M_DEVBUF, M_NOWAIT); 418 if (sc->sc_scb == NULL) { 419 printf(": cannot allocate SCB\n"); 420 return; 421 } 422 memset(sc->sc_scb, 0, sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB); 423 424 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) { 425 scb->scb_tagid = i; 426 scb->sgoffset = IHA_SG_SIZE * i; 427 scb->sglist = sc->sc_sglist + IHA_MAX_SG_ENTRIES * i; 428 scb->sg_addr = 429 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset; 430 431 error = bus_dmamap_create(sc->sc_dmat, 432 MAXPHYS, IHA_MAX_SG_ENTRIES, MAXPHYS, 0, 433 BUS_DMA_NOWAIT, &scb->dmap); 434 435 if (error != 0) { 436 printf(": couldn't create SCB DMA map, error = %d\n", 437 error); 438 return; 439 } 440 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 441 } 442 443 /* Mask all the interrupts */ 444 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 445 446 /* Stop any I/O and reset the scsi module */ 447 iha_reset_dma(sc); 448 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD); 449 450 /* Program HBA's SCSI ID */ 451 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4); 452 453 /* 454 * Configure the channel as requested by the NVRAM settings read 455 * by iha_read_eeprom() above. 456 */ 457 458 sc->sc_sconf1 = SCONFIG0DEFAULT; 459 if ((conf->config1 & CFG_EN_PAR) != 0) 460 sc->sc_sconf1 |= SPCHK; 461 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1); 462 463 /* set selection time out 250 ms */ 464 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS); 465 466 /* Enable desired SCSI termination configuration read from eeprom */ 467 reg = 0; 468 if (conf->config1 & CFG_ACT_TERM1) 469 reg |= ENTMW; 470 if (conf->config1 & CFG_ACT_TERM2) 471 reg |= ENTM; 472 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg); 473 474 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN; 475 if (conf->config1 & CFG_AUTO_TERM) 476 reg |= ATDEN; 477 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg); 478 479 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) { 480 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]); 481 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]); 482 iha_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1); 483 iha_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1); 484 } 485 486 iha_reset_chip(sc); 487 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS); 488 489 /* 490 * fill in the adapter. 491 */ 492 sc->sc_adapter.adapt_dev = &sc->sc_dev; 493 sc->sc_adapter.adapt_nchannels = 1; 494 sc->sc_adapter.adapt_openings = IHA_MAX_SCB; 495 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB; 496 sc->sc_adapter.adapt_ioctl = NULL; 497 sc->sc_adapter.adapt_minphys = minphys; 498 sc->sc_adapter.adapt_request = iha_scsipi_request; 499 500 /* 501 * fill in the channel. 502 */ 503 sc->sc_channel.chan_adapter = &sc->sc_adapter; 504 sc->sc_channel.chan_bustype = &scsi_bustype; 505 sc->sc_channel.chan_channel = 0; 506 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2); 507 sc->sc_channel.chan_nluns = 8; 508 sc->sc_channel.chan_id = sc->sc_id; 509 510 /* 511 * Now try to attach all the sub devices. 512 */ 513 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 514 } 515 516 /* 517 * iha_reset_dma - abort any active DMA xfer, reset tulip FIFO. 518 */ 519 static void 520 iha_reset_dma(sc) 521 struct iha_softc *sc; 522 { 523 bus_space_tag_t iot = sc->sc_iot; 524 bus_space_handle_t ioh = sc->sc_ioh; 525 526 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 527 /* if DMA xfer is pending, abort DMA xfer */ 528 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 529 /* wait Abort DMA xfer done */ 530 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0) 531 ; 532 } 533 534 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 535 } 536 537 /* 538 * iha_append_free_scb - append the supplied SCB to the tail of the 539 * sc_freescb queue after clearing and resetting 540 * everything possible. 541 */ 542 static void 543 iha_append_free_scb(sc, scb) 544 struct iha_softc *sc; 545 struct iha_scsi_req_q *scb; 546 { 547 int s; 548 549 s = splbio(); 550 551 if (scb == sc->sc_actscb) 552 sc->sc_actscb = NULL; 553 554 scb->status = STATUS_QUEUED; 555 scb->ha_stat = HOST_OK; 556 scb->ta_stat = SCSI_OK; 557 558 scb->nextstat = 0; 559 scb->scb_tagmsg = 0; 560 561 scb->xs = NULL; 562 scb->tcs = NULL; 563 564 /* 565 * scb_tagid, sg_addr, sglist 566 * SCB_SensePtr are set at initialization 567 * and never change 568 */ 569 570 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 571 572 splx(s); 573 } 574 575 static __inline void 576 iha_append_pend_scb(sc, scb) 577 struct iha_softc *sc; 578 struct iha_scsi_req_q *scb; 579 { 580 /* ASSUMPTION: only called within a splbio()/splx() pair */ 581 582 if (scb == sc->sc_actscb) 583 sc->sc_actscb = NULL; 584 585 scb->status = STATUS_QUEUED; 586 587 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain); 588 } 589 590 static __inline void 591 iha_push_pend_scb(sc, scb) 592 struct iha_softc *sc; 593 struct iha_scsi_req_q *scb; 594 { 595 int s; 596 597 s = splbio(); 598 599 if (scb == sc->sc_actscb) 600 sc->sc_actscb = NULL; 601 602 scb->status = STATUS_QUEUED; 603 604 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain); 605 606 splx(s); 607 } 608 609 /* 610 * iha_find_pend_scb - scan the pending queue for a SCB that can be 611 * processed immediately. Return NULL if none found 612 * and a pointer to the SCB if one is found. If there 613 * is an active SCB, return NULL! 614 */ 615 static struct iha_scsi_req_q * 616 iha_find_pend_scb(sc) 617 struct iha_softc *sc; 618 { 619 struct iha_scsi_req_q *scb; 620 struct tcs *tcs; 621 int s; 622 623 s = splbio(); 624 625 if (sc->sc_actscb != NULL) 626 scb = NULL; 627 628 else 629 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) { 630 if ((scb->xs->xs_control & XS_CTL_RESET) != 0) 631 /* ALWAYS willing to reset a device */ 632 break; 633 634 tcs = scb->tcs; 635 636 if ((scb->scb_tagmsg) != 0) { 637 /* 638 * A Tagged I/O. OK to start If no 639 * non-tagged I/O is active on the same 640 * target 641 */ 642 if (tcs->ntagscb == NULL) 643 break; 644 645 } else if (scb->cmd[0] == REQUEST_SENSE) { 646 /* 647 * OK to do a non-tagged request sense 648 * even if a non-tagged I/O has been 649 * started, 'cuz we don't allow any 650 * disconnect during a request sense op 651 */ 652 break; 653 654 } else if (tcs->tagcnt == 0) { 655 /* 656 * No tagged I/O active on this target, 657 * ok to start a non-tagged one if one 658 * is not already active 659 */ 660 if (tcs->ntagscb == NULL) 661 break; 662 } 663 } 664 665 splx(s); 666 667 return (scb); 668 } 669 670 /* 671 * iha_del_pend_scb - remove scb from sc_pendscb 672 */ 673 static __inline void 674 iha_del_pend_scb(sc, scb) 675 struct iha_softc *sc; 676 struct iha_scsi_req_q *scb; 677 { 678 int s; 679 680 s = splbio(); 681 682 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain); 683 684 splx(s); 685 } 686 687 static __inline void 688 iha_mark_busy_scb(scb) 689 struct iha_scsi_req_q *scb; 690 { 691 int s; 692 693 s = splbio(); 694 695 scb->status = STATUS_BUSY; 696 697 if (scb->scb_tagmsg == 0) 698 scb->tcs->ntagscb = scb; 699 else 700 scb->tcs->tagcnt++; 701 702 splx(s); 703 } 704 705 static void 706 iha_append_done_scb(sc, scb, hastat) 707 struct iha_softc *sc; 708 struct iha_scsi_req_q *scb; 709 u_int8_t hastat; 710 { 711 struct tcs *tcs; 712 int s; 713 714 s = splbio(); 715 716 if (scb->xs != NULL) 717 callout_stop(&scb->xs->xs_callout); 718 719 if (scb == sc->sc_actscb) 720 sc->sc_actscb = NULL; 721 722 tcs = scb->tcs; 723 724 if (scb->scb_tagmsg != 0) { 725 if (tcs->tagcnt) 726 tcs->tagcnt--; 727 } else if (tcs->ntagscb == scb) 728 tcs->ntagscb = NULL; 729 730 scb->status = STATUS_QUEUED; 731 scb->ha_stat = hastat; 732 733 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain); 734 735 splx(s); 736 } 737 738 static __inline struct iha_scsi_req_q * 739 iha_pop_done_scb(sc) 740 struct iha_softc *sc; 741 { 742 struct iha_scsi_req_q *scb; 743 int s; 744 745 s = splbio(); 746 747 scb = TAILQ_FIRST(&sc->sc_donescb); 748 749 if (scb != NULL) { 750 scb->status = STATUS_RENT; 751 TAILQ_REMOVE(&sc->sc_donescb, scb, chain); 752 } 753 754 splx(s); 755 756 return (scb); 757 } 758 759 /* 760 * iha_abort_xs - find the SCB associated with the supplied xs and 761 * stop all processing on it, moving it to the done 762 * queue with the supplied host status value. 763 */ 764 static void 765 iha_abort_xs(sc, xs, hastat) 766 struct iha_softc *sc; 767 struct scsipi_xfer *xs; 768 u_int8_t hastat; 769 { 770 struct iha_scsi_req_q *scb; 771 int i, s; 772 773 s = splbio(); 774 775 /* Check the pending queue for the SCB pointing to xs */ 776 777 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) 778 if (scb->xs == xs) { 779 iha_del_pend_scb(sc, scb); 780 iha_append_done_scb(sc, scb, hastat); 781 splx(s); 782 return; 783 } 784 785 /* 786 * If that didn't work, check all BUSY/SELECTING SCB's for one 787 * pointing to xs 788 */ 789 790 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 791 switch (scb->status) { 792 case STATUS_BUSY: 793 case STATUS_SELECT: 794 if (scb->xs == xs) { 795 iha_append_done_scb(sc, scb, hastat); 796 splx(s); 797 return; 798 } 799 break; 800 default: 801 break; 802 } 803 804 splx(s); 805 } 806 807 /* 808 * iha_bad_seq - a SCSI bus phase was encountered out of the 809 * correct/expected sequence. Reset the SCSI bus. 810 */ 811 static void 812 iha_bad_seq(sc) 813 struct iha_softc *sc; 814 { 815 struct iha_scsi_req_q *scb = sc->sc_actscb; 816 817 if (scb != NULL) 818 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 819 820 iha_reset_scsi_bus(sc); 821 iha_reset_chip(sc); 822 } 823 824 /* 825 * iha_push_sense_request - obtain auto sense data by pushing the 826 * SCB needing it back onto the pending 827 * queue with a REQUEST_SENSE CDB. 828 */ 829 static int 830 iha_push_sense_request(sc, scb) 831 struct iha_softc *sc; 832 struct iha_scsi_req_q *scb; 833 { 834 struct scsipi_xfer *xs = scb->xs; 835 struct scsipi_periph *periph = xs->xs_periph; 836 struct scsipi_sense *ss = (struct scsipi_sense *)scb->cmd; 837 int lun = periph->periph_lun; 838 int err; 839 840 ss->opcode = REQUEST_SENSE; 841 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT; 842 ss->unused[0] = ss->unused[1] = 0; 843 ss->length = sizeof(struct scsipi_sense_data); 844 ss->control = 0; 845 846 scb->flags = FLAG_RSENS | FLAG_DATAIN; 847 848 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG; 849 850 scb->scb_tagmsg = 0; 851 scb->ta_stat = SCSI_OK; 852 853 scb->cmdlen = sizeof(struct scsipi_sense); 854 scb->buflen = ss->length; 855 856 err = bus_dmamap_load(sc->sc_dmat, scb->dmap, 857 &xs->sense.scsi_sense, scb->buflen, NULL, 858 BUS_DMA_READ|BUS_DMA_NOWAIT); 859 if (err != 0) { 860 printf("iha_push_sense_request: cannot bus_dmamap_load()\n"); 861 xs->error = XS_DRIVER_STUFFUP; 862 return 1; 863 } 864 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 865 0, scb->buflen, BUS_DMASYNC_PREREAD); 866 867 /* XXX What about queued command? */ 868 iha_exec_scb(sc, scb); 869 870 return 0; 871 } 872 873 /* 874 * iha_main - process the active SCB, taking one off pending and making it 875 * active if necessary, and any done SCB's created as 876 * a result until there are no interrupts pending and no pending 877 * SCB's that can be started. 878 */ 879 static void 880 iha_main(sc) 881 struct iha_softc *sc; 882 { 883 bus_space_tag_t iot = sc->sc_iot; 884 bus_space_handle_t ioh =sc->sc_ioh; 885 struct iha_scsi_req_q *scb; 886 887 for (;;) { 888 iha_scsi(sc); 889 890 while ((scb = iha_pop_done_scb(sc)) != NULL) 891 iha_done_scb(sc, scb); 892 893 /* 894 * If there are no interrupts pending, or we can't start 895 * a pending sc, break out of the for(;;). Otherwise 896 * continue the good work with another call to 897 * iha_scsi(). 898 */ 899 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 900 && (iha_find_pend_scb(sc) == NULL)) 901 break; 902 } 903 } 904 905 /* 906 * iha_scsi - service any outstanding interrupts. If there are none, try to 907 * start another SCB currently in the pending queue. 908 */ 909 static void 910 iha_scsi(sc) 911 struct iha_softc *sc; 912 { 913 bus_space_tag_t iot = sc->sc_iot; 914 bus_space_handle_t ioh = sc->sc_ioh; 915 struct iha_scsi_req_q *scb; 916 struct tcs *tcs; 917 u_int8_t stat; 918 919 /* service pending interrupts asap */ 920 921 stat = bus_space_read_1(iot, ioh, TUL_STAT0); 922 if ((stat & INTPD) != 0) { 923 sc->sc_status0 = stat; 924 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 925 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 926 927 sc->sc_phase = sc->sc_status0 & PH_MASK; 928 929 if ((sc->sc_sistat & SRSTD) != 0) { 930 iha_reset_scsi_bus(sc); 931 return; 932 } 933 934 if ((sc->sc_sistat & RSELED) != 0) { 935 iha_resel(sc); 936 return; 937 } 938 939 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) { 940 iha_busfree(sc); 941 return; 942 } 943 944 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) { 945 iha_next_state(sc); 946 return; 947 } 948 949 if ((sc->sc_sistat & SELED) != 0) 950 iha_set_ssig(sc, 0, 0); 951 } 952 953 /* 954 * There were no interrupts pending which required action elsewhere, so 955 * see if it is possible to start the selection phase on a pending SCB 956 */ 957 if ((scb = iha_find_pend_scb(sc)) == NULL) 958 return; 959 960 tcs = scb->tcs; 961 962 /* program HBA's SCSI ID & target SCSI ID */ 963 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target); 964 965 if ((scb->xs->xs_control & XS_CTL_RESET) == 0) { 966 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 967 968 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 || 969 (tcs->flags & FLAG_NO_NEG_WIDE) == 0) 970 iha_select(sc, scb, SELATNSTOP); 971 972 else if (scb->scb_tagmsg != 0) 973 iha_select(sc, scb, SEL_ATN3); 974 975 else 976 iha_select(sc, scb, SEL_ATN); 977 978 } else { 979 iha_select(sc, scb, SELATNSTOP); 980 scb->nextstat = 8; 981 } 982 983 if ((scb->xs->xs_control & XS_CTL_POLL) != 0) { 984 int timeout; 985 for (timeout = scb->xs->timeout; timeout > 0; timeout--) { 986 if (iha_wait(sc, NO_OP) == -1) 987 break; 988 if (iha_next_state(sc) == -1) 989 break; 990 delay(1000); /* Only happens in boot, so it's ok */ 991 } 992 993 /* 994 * Since done queue processing not done until AFTER this 995 * function returns, scb is on the done queue, not 996 * the free queue at this point and still has valid data 997 * 998 * Conversely, xs->error has not been set yet 999 */ 1000 if (timeout == 0) 1001 iha_timeout(scb); 1002 } 1003 } 1004 1005 /* 1006 * iha_data_over_run - return HOST_OK for all SCSI opcodes where BufLen 1007 * is an 'Allocation Length'. All other SCSI opcodes 1008 * get HOST_DO_DU as they SHOULD have xferred all the 1009 * data requested. 1010 * 1011 * The list of opcodes using 'Allocation Length' was 1012 * found by scanning all the SCSI-3 T10 drafts. See 1013 * www.t10.org for the curious with a .pdf reader. 1014 */ 1015 static u_int8_t 1016 iha_data_over_run(scb) 1017 struct iha_scsi_req_q *scb; 1018 { 1019 switch (scb->cmd[0]) { 1020 case 0x03: /* Request Sense SPC-2 */ 1021 case 0x12: /* Inquiry SPC-2 */ 1022 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */ 1023 case 0x1c: /* Receive Diagnostic Results SPC-2 */ 1024 case 0x23: /* Read Format Capacities MMC-2 */ 1025 case 0x29: /* Read Generation SBC */ 1026 case 0x34: /* Read Position SSC-2 */ 1027 case 0x37: /* Read Defect Data SBC */ 1028 case 0x3c: /* Read Buffer SPC-2 */ 1029 case 0x42: /* Read Sub Channel MMC-2 */ 1030 case 0x43: /* Read TOC/PMA/ATIP MMC */ 1031 1032 /* XXX - 2 with same opcode of 0x44? */ 1033 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/ 1034 1035 case 0x46: /* Get Configuration MMC-2 */ 1036 case 0x4a: /* Get Event/Status Notification MMC-2 */ 1037 case 0x4d: /* Log Sense SPC-2 */ 1038 case 0x51: /* Read Disc Information MMC */ 1039 case 0x52: /* Read Track Information MMC */ 1040 case 0x59: /* Read Master CUE MMC */ 1041 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */ 1042 case 0x5c: /* Read Buffer Capacity MMC */ 1043 case 0x5e: /* Persistant Reserve In SPC-2 */ 1044 case 0x84: /* Receive Copy Results SPC-2 */ 1045 case 0xa0: /* Report LUNs SPC-2 */ 1046 case 0xa3: /* Various Report requests SBC-2/SCC-2*/ 1047 case 0xa4: /* Report Key MMC-2 */ 1048 case 0xad: /* Read DVD Structure MMC-2 */ 1049 case 0xb4: /* Read Element Status (Attached) SMC */ 1050 case 0xb5: /* Request Volume Element Address SMC */ 1051 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */ 1052 case 0xb8: /* Read Element Status (Independ.) SMC */ 1053 case 0xba: /* Report Redundancy SCC-2 */ 1054 case 0xbd: /* Mechanism Status MMC */ 1055 case 0xbe: /* Report Basic Redundancy SCC-2 */ 1056 1057 return (HOST_OK); 1058 break; 1059 1060 default: 1061 return (HOST_DO_DU); 1062 break; 1063 } 1064 } 1065 1066 /* 1067 * iha_next_state - prcess the current SCB as requested in it's 1068 * nextstat member. 1069 */ 1070 static int 1071 iha_next_state(sc) 1072 struct iha_softc *sc; 1073 { 1074 1075 if (sc->sc_actscb == NULL) 1076 return (-1); 1077 1078 switch (sc->sc_actscb->nextstat) { 1079 case 1: 1080 if (iha_state_1(sc) == 3) 1081 goto state_3; 1082 break; 1083 1084 case 2: 1085 switch (iha_state_2(sc)) { 1086 case 3: 1087 goto state_3; 1088 case 4: 1089 goto state_4; 1090 default: 1091 break; 1092 } 1093 break; 1094 1095 case 3: 1096 state_3: 1097 if (iha_state_3(sc) == 4) 1098 goto state_4; 1099 break; 1100 1101 case 4: 1102 state_4: 1103 switch (iha_state_4(sc)) { 1104 case 0: 1105 return (0); 1106 case 6: 1107 goto state_6; 1108 default: 1109 break; 1110 } 1111 break; 1112 1113 case 5: 1114 switch (iha_state_5(sc)) { 1115 case 4: 1116 goto state_4; 1117 case 6: 1118 goto state_6; 1119 default: 1120 break; 1121 } 1122 break; 1123 1124 case 6: 1125 state_6: 1126 iha_state_6(sc); 1127 break; 1128 1129 case 8: 1130 iha_state_8(sc); 1131 break; 1132 1133 default: 1134 #ifdef IHA_DEBUG_STATE 1135 printf("[debug] -unknown state: %i-\n", 1136 sc->sc_actscb->nextstat); 1137 #endif 1138 iha_bad_seq(sc); 1139 break; 1140 } 1141 1142 return (-1); 1143 } 1144 1145 /* 1146 * iha_state_1 - selection is complete after a SELATNSTOP. If the target 1147 * has put the bus into MSG_OUT phase start wide/sync 1148 * negotiation. Otherwise clear the FIFO and go to state 3, 1149 * which will send the SCSI CDB to the target. 1150 */ 1151 static int 1152 iha_state_1(sc) 1153 struct iha_softc *sc; 1154 { 1155 bus_space_tag_t iot = sc->sc_iot; 1156 bus_space_handle_t ioh = sc->sc_ioh; 1157 struct iha_scsi_req_q *scb = sc->sc_actscb; 1158 struct tcs *tcs; 1159 int flags; 1160 1161 iha_mark_busy_scb(scb); 1162 1163 tcs = scb->tcs; 1164 1165 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 1166 1167 /* 1168 * If we are in PHASE_MSG_OUT, send 1169 * a) IDENT message (with tags if appropriate) 1170 * b) WDTR if the target is configured to negotiate wide xfers 1171 * ** OR ** 1172 * c) SDTR if the target is configured to negotiate sync xfers 1173 * but not wide ones 1174 * 1175 * If we are NOT, then the target is not asking for anything but 1176 * the data/command, so go straight to state 3. 1177 */ 1178 if (sc->sc_phase == PHASE_MSG_OUT) { 1179 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL)); 1180 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 1181 1182 if (scb->scb_tagmsg != 0) { 1183 bus_space_write_1(iot, ioh, TUL_SFIFO, 1184 scb->scb_tagmsg); 1185 bus_space_write_1(iot, ioh, TUL_SFIFO, 1186 scb->scb_tagid); 1187 } 1188 1189 flags = tcs->flags; 1190 if ((flags & FLAG_NO_NEG_WIDE) == 0) { 1191 if (iha_msgout_wdtr(sc) == -1) 1192 return (-1); 1193 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) { 1194 if (iha_msgout_sdtr(sc) == -1) 1195 return (-1); 1196 } 1197 1198 } else { 1199 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1200 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1201 } 1202 1203 return (3); 1204 } 1205 1206 /* 1207 * iha_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI 1208 * CDB has already been send, go to state 4 to start the data 1209 * xfer. Otherwise reset the FIFO and go to state 3, sending 1210 * the SCSI CDB. 1211 */ 1212 static int 1213 iha_state_2(sc) 1214 struct iha_softc *sc; 1215 { 1216 bus_space_tag_t iot = sc->sc_iot; 1217 bus_space_handle_t ioh = sc->sc_ioh; 1218 struct iha_scsi_req_q *scb = sc->sc_actscb; 1219 1220 iha_mark_busy_scb(scb); 1221 1222 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0); 1223 1224 if ((sc->sc_status1 & CPDNE) != 0) 1225 return (4); 1226 1227 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1228 1229 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1230 1231 return (3); 1232 } 1233 1234 /* 1235 * iha_state_3 - send the SCSI CDB to the target, processing any status 1236 * or other messages received until that is done or 1237 * abandoned. 1238 */ 1239 static int 1240 iha_state_3(sc) 1241 struct iha_softc *sc; 1242 { 1243 bus_space_tag_t iot = sc->sc_iot; 1244 bus_space_handle_t ioh = sc->sc_ioh; 1245 struct iha_scsi_req_q *scb = sc->sc_actscb; 1246 int flags; 1247 1248 for (;;) { 1249 switch (sc->sc_phase) { 1250 case PHASE_CMD_OUT: 1251 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 1252 scb->cmd, scb->cmdlen); 1253 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1254 return (-1); 1255 else if (sc->sc_phase == PHASE_CMD_OUT) { 1256 iha_bad_seq(sc); 1257 return (-1); 1258 } else 1259 return (4); 1260 1261 case PHASE_MSG_IN: 1262 scb->nextstat = 3; 1263 if (iha_msgin(sc) == -1) 1264 return (-1); 1265 break; 1266 1267 case PHASE_STATUS_IN: 1268 if (iha_status_msg(sc) == -1) 1269 return (-1); 1270 break; 1271 1272 case PHASE_MSG_OUT: 1273 flags = scb->tcs->flags; 1274 if ((flags & FLAG_NO_NEG_SYNC) != 0) { 1275 if (iha_msgout(sc, MSG_NOOP) == -1) 1276 return (-1); 1277 } else if (iha_msgout_sdtr(sc) == -1) 1278 return (-1); 1279 break; 1280 1281 default: 1282 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase); 1283 iha_bad_seq(sc); 1284 return (-1); 1285 } 1286 } 1287 } 1288 1289 /* 1290 * iha_state_4 - start a data xfer. Handle any bus state 1291 * transitions until PHASE_DATA_IN/_OUT 1292 * or the attempt is abandoned. If there is 1293 * no data to xfer, go to state 6 and finish 1294 * processing the current SCB. 1295 */ 1296 static int 1297 iha_state_4(sc) 1298 struct iha_softc *sc; 1299 { 1300 struct iha_scsi_req_q *scb = sc->sc_actscb; 1301 1302 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) == 1303 (FLAG_DATAIN | FLAG_DATAOUT)) 1304 return (6); /* Both dir flags set => NO xfer was requested */ 1305 1306 for (;;) { 1307 if (scb->buflen == 0) 1308 return (6); 1309 1310 switch (sc->sc_phase) { 1311 case PHASE_STATUS_IN: 1312 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1313 scb->ha_stat = iha_data_over_run(scb); 1314 if ((iha_status_msg(sc)) == -1) 1315 return (-1); 1316 break; 1317 1318 case PHASE_MSG_IN: 1319 scb->nextstat = 4; 1320 if (iha_msgin(sc) == -1) 1321 return (-1); 1322 break; 1323 1324 case PHASE_MSG_OUT: 1325 if ((sc->sc_status0 & SPERR) != 0) { 1326 scb->buflen = 0; 1327 scb->ha_stat = HOST_SPERR; 1328 if (iha_msgout(sc, MSG_INITIATOR_DET_ERR) == -1) 1329 return (-1); 1330 else 1331 return (6); 1332 } else { 1333 if (iha_msgout(sc, MSG_NOOP) == -1) 1334 return (-1); 1335 } 1336 break; 1337 1338 case PHASE_DATA_IN: 1339 return (iha_xfer_data(sc, scb, FLAG_DATAIN)); 1340 1341 case PHASE_DATA_OUT: 1342 return (iha_xfer_data(sc, scb, FLAG_DATAOUT)); 1343 1344 default: 1345 iha_bad_seq(sc); 1346 return (-1); 1347 } 1348 } 1349 } 1350 1351 /* 1352 * iha_state_5 - handle the partial or final completion of the current 1353 * data xfer. If DMA is still active stop it. If there is 1354 * more data to xfer, go to state 4 and start the xfer. 1355 * If not go to state 6 and finish the SCB. 1356 */ 1357 static int 1358 iha_state_5(sc) 1359 struct iha_softc *sc; 1360 { 1361 bus_space_tag_t iot = sc->sc_iot; 1362 bus_space_handle_t ioh = sc->sc_ioh; 1363 struct iha_scsi_req_q *scb = sc->sc_actscb; 1364 struct iha_sg_element *sg; 1365 u_int32_t cnt; 1366 u_int8_t period, stat; 1367 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */ 1368 int i; 1369 1370 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT; 1371 1372 /* 1373 * Stop any pending DMA activity and check for parity error. 1374 */ 1375 1376 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) { 1377 /* Input Operation */ 1378 if ((sc->sc_status0 & SPERR) != 0) 1379 scb->ha_stat = HOST_SPERR; 1380 1381 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1382 bus_space_write_1(iot, ioh, TUL_DCTRL0, 1383 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP); 1384 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) 1385 ; 1386 } 1387 1388 } else { 1389 /* Output Operation */ 1390 if ((sc->sc_status1 & SXCMP) == 0) { 1391 period = scb->tcs->syncm; 1392 if ((period & PERIOD_WIDE_SCSI) != 0) 1393 cnt += (bus_space_read_1(iot, ioh, 1394 TUL_SFIFOCNT) & FIFOC) * 2; 1395 else 1396 cnt += bus_space_read_1(iot, ioh, 1397 TUL_SFIFOCNT) & FIFOC; 1398 } 1399 1400 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1401 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 1402 do 1403 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0); 1404 while ((stat & DABT) == 0); 1405 } 1406 1407 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) { 1408 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1409 return (-1); 1410 cnt = 0; 1411 1412 } else if ((sc->sc_status1 & SXCMP) == 0) 1413 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1414 } 1415 1416 if (cnt == 0) { 1417 scb->buflen = 0; 1418 return (6); 1419 } 1420 1421 /* Update active data pointer and restart the I/O at the new point */ 1422 1423 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */ 1424 scb->buflen = cnt; /* cnt == bytes left */ 1425 1426 if ((scb->flags & FLAG_SG) != 0) { 1427 sg = &scb->sglist[scb->sg_index]; 1428 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) { 1429 xcnt -= le32toh(sg->sg_len); 1430 if (xcnt < 0) { 1431 xcnt += le32toh(sg->sg_len); 1432 1433 sg->sg_addr = 1434 htole32(le32toh(sg->sg_addr) + xcnt); 1435 sg->sg_len = 1436 htole32(le32toh(sg->sg_len) - xcnt); 1437 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1438 scb->sgoffset, IHA_SG_SIZE, 1439 BUS_DMASYNC_PREWRITE); 1440 1441 scb->bufaddr += (i - scb->sg_index) * 1442 sizeof(struct iha_sg_element); 1443 scb->sg_size = scb->sg_max - i; 1444 scb->sg_index = i; 1445 1446 return (4); 1447 } 1448 } 1449 return (6); 1450 1451 } else 1452 scb->bufaddr += xcnt; 1453 1454 return (4); 1455 } 1456 1457 /* 1458 * iha_state_6 - finish off the active scb (may require several 1459 * iterations if PHASE_MSG_IN) and return -1 to indicate 1460 * the bus is free. 1461 */ 1462 static int 1463 iha_state_6(sc) 1464 struct iha_softc *sc; 1465 { 1466 1467 for (;;) { 1468 switch (sc->sc_phase) { 1469 case PHASE_STATUS_IN: 1470 if (iha_status_msg(sc) == -1) 1471 return (-1); 1472 break; 1473 1474 case PHASE_MSG_IN: 1475 sc->sc_actscb->nextstat = 6; 1476 if ((iha_msgin(sc)) == -1) 1477 return (-1); 1478 break; 1479 1480 case PHASE_MSG_OUT: 1481 if ((iha_msgout(sc, MSG_NOOP)) == -1) 1482 return (-1); 1483 break; 1484 1485 case PHASE_DATA_IN: 1486 if (iha_xpad_in(sc) == -1) 1487 return (-1); 1488 break; 1489 1490 case PHASE_DATA_OUT: 1491 if (iha_xpad_out(sc) == -1) 1492 return (-1); 1493 break; 1494 1495 default: 1496 iha_bad_seq(sc); 1497 return (-1); 1498 } 1499 } 1500 } 1501 1502 /* 1503 * iha_state_8 - reset the active device and all busy SCBs using it 1504 */ 1505 static int 1506 iha_state_8(sc) 1507 struct iha_softc *sc; 1508 { 1509 bus_space_tag_t iot = sc->sc_iot; 1510 bus_space_handle_t ioh = sc->sc_ioh; 1511 struct iha_scsi_req_q *scb; 1512 int i; 1513 u_int8_t tar; 1514 1515 if (sc->sc_phase == PHASE_MSG_OUT) { 1516 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET); 1517 1518 scb = sc->sc_actscb; 1519 1520 /* This SCB finished correctly -- resetting the device */ 1521 iha_append_done_scb(sc, scb, HOST_OK); 1522 1523 iha_reset_tcs(scb->tcs, sc->sc_sconf1); 1524 1525 tar = scb->target; 1526 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1527 if (scb->target == tar) 1528 switch (scb->status) { 1529 case STATUS_BUSY: 1530 iha_append_done_scb(sc, 1531 scb, HOST_DEV_RST); 1532 break; 1533 1534 case STATUS_SELECT: 1535 iha_push_pend_scb(sc, scb); 1536 break; 1537 1538 default: 1539 break; 1540 } 1541 1542 sc->sc_flags |= FLAG_EXPECT_DISC; 1543 1544 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1545 return (-1); 1546 } 1547 1548 iha_bad_seq(sc); 1549 return (-1); 1550 } 1551 1552 /* 1553 * iha_xfer_data - initiate the DMA xfer of the data 1554 */ 1555 static int 1556 iha_xfer_data(sc, scb, direction) 1557 struct iha_softc *sc; 1558 struct iha_scsi_req_q *scb; 1559 int direction; 1560 { 1561 bus_space_tag_t iot = sc->sc_iot; 1562 bus_space_handle_t ioh = sc->sc_ioh; 1563 u_int32_t xferlen; 1564 u_int8_t xfercmd; 1565 1566 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != direction) 1567 return (6); /* wrong direction, abandon I/O */ 1568 1569 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen); 1570 1571 xfercmd = STRXFR; 1572 if (direction == FLAG_DATAIN) 1573 xfercmd |= XDIR; 1574 1575 if (scb->flags & FLAG_SG) { 1576 xferlen = scb->sg_size * sizeof(struct iha_sg_element); 1577 xfercmd |= SGXFR; 1578 } else 1579 xferlen = scb->buflen; 1580 1581 bus_space_write_4(iot, ioh, TUL_DXC, xferlen); 1582 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr); 1583 bus_space_write_1(iot, ioh, TUL_DCMD, xfercmd); 1584 1585 bus_space_write_1(iot, ioh, TUL_SCMD, 1586 (direction == FLAG_DATAIN) ? XF_DMA_IN : XF_DMA_OUT); 1587 1588 scb->nextstat = 5; 1589 1590 return (0); 1591 } 1592 1593 static int 1594 iha_xpad_in(sc) 1595 struct iha_softc *sc; 1596 { 1597 bus_space_tag_t iot = sc->sc_iot; 1598 bus_space_handle_t ioh = sc->sc_ioh; 1599 struct iha_scsi_req_q *scb = sc->sc_actscb; 1600 1601 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1602 scb->ha_stat = HOST_DO_DU; 1603 1604 for (;;) { 1605 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 1606 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 1607 else 1608 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1609 1610 switch (iha_wait(sc, XF_FIFO_IN)) { 1611 case -1: 1612 return (-1); 1613 1614 case PHASE_DATA_IN: 1615 bus_space_read_1(iot, ioh, TUL_SFIFO); 1616 break; 1617 1618 default: 1619 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1620 return (6); 1621 } 1622 } 1623 } 1624 1625 static int 1626 iha_xpad_out(sc) 1627 struct iha_softc *sc; 1628 { 1629 bus_space_tag_t iot = sc->sc_iot; 1630 bus_space_handle_t ioh = sc->sc_ioh; 1631 struct iha_scsi_req_q *scb = sc->sc_actscb; 1632 1633 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1634 scb->ha_stat = HOST_DO_DU; 1635 1636 for (;;) { 1637 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 1638 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 1639 else 1640 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1641 1642 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 1643 1644 switch (iha_wait(sc, XF_FIFO_OUT)) { 1645 case -1: 1646 return (-1); 1647 1648 case PHASE_DATA_OUT: 1649 break; 1650 1651 default: 1652 /* Disable wide CPU to allow read 16 bits */ 1653 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1654 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1655 return (6); 1656 } 1657 } 1658 } 1659 1660 static int 1661 iha_status_msg(sc) 1662 struct iha_softc *sc; 1663 { 1664 bus_space_tag_t iot = sc->sc_iot; 1665 bus_space_handle_t ioh = sc->sc_ioh; 1666 struct iha_scsi_req_q *scb; 1667 u_int8_t msg; 1668 int phase; 1669 1670 if ((phase = iha_wait(sc, CMD_COMP)) == -1) 1671 return (-1); 1672 1673 scb = sc->sc_actscb; 1674 1675 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO); 1676 1677 if (phase == PHASE_MSG_OUT) { 1678 if ((sc->sc_status0 & SPERR) == 0) 1679 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP); 1680 else 1681 bus_space_write_1(iot, ioh, TUL_SFIFO, 1682 MSG_PARITY_ERROR); 1683 1684 return (iha_wait(sc, XF_FIFO_OUT)); 1685 1686 } else if (phase == PHASE_MSG_IN) { 1687 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 1688 1689 if ((sc->sc_status0 & SPERR) != 0) 1690 switch (iha_wait(sc, MSG_ACCEPT)) { 1691 case -1: 1692 return (-1); 1693 case PHASE_MSG_OUT: 1694 bus_space_write_1(iot, ioh, TUL_SFIFO, 1695 MSG_PARITY_ERROR); 1696 return (iha_wait(sc, XF_FIFO_OUT)); 1697 default: 1698 iha_bad_seq(sc); 1699 return (-1); 1700 } 1701 1702 if (msg == MSG_CMDCOMPLETE) { 1703 if ((scb->ta_stat & 1704 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) { 1705 iha_bad_seq(sc); 1706 return (-1); 1707 } 1708 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 1709 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1710 return (iha_wait(sc, MSG_ACCEPT)); 1711 } 1712 1713 if ((msg == MSG_LINK_CMD_COMPLETE) 1714 || (msg == MSG_LINK_CMD_COMPLETEF)) { 1715 if ((scb->ta_stat & 1716 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) 1717 return (iha_wait(sc, MSG_ACCEPT)); 1718 } 1719 } 1720 1721 iha_bad_seq(sc); 1722 return (-1); 1723 } 1724 1725 /* 1726 * iha_busfree - SCSI bus free detected as a result of a TIMEOUT or 1727 * DISCONNECT interrupt. Reset the tulip FIFO and 1728 * SCONFIG0 and enable hardware reselect. Move any active 1729 * SCB to sc_donescb list. Return an appropriate host status 1730 * if an I/O was active. 1731 */ 1732 static void 1733 iha_busfree(sc) 1734 struct iha_softc *sc; 1735 { 1736 bus_space_tag_t iot = sc->sc_iot; 1737 bus_space_handle_t ioh = sc->sc_ioh; 1738 struct iha_scsi_req_q *scb; 1739 1740 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1741 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT); 1742 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1743 1744 scb = sc->sc_actscb; 1745 1746 if (scb != NULL) { 1747 if (scb->status == STATUS_SELECT) 1748 /* selection timeout */ 1749 iha_append_done_scb(sc, scb, HOST_SEL_TOUT); 1750 else 1751 /* Unexpected bus free */ 1752 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 1753 } 1754 } 1755 1756 static void 1757 iha_reset_scsi_bus(sc) 1758 struct iha_softc *sc; 1759 { 1760 struct iha_scsi_req_q *scb; 1761 struct tcs *tcs; 1762 int i, s; 1763 1764 s = splbio(); 1765 1766 iha_reset_dma(sc); 1767 1768 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1769 switch (scb->status) { 1770 case STATUS_BUSY: 1771 iha_append_done_scb(sc, scb, HOST_SCSI_RST); 1772 break; 1773 1774 case STATUS_SELECT: 1775 iha_push_pend_scb(sc, scb); 1776 break; 1777 1778 default: 1779 break; 1780 } 1781 1782 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++) 1783 iha_reset_tcs(tcs, sc->sc_sconf1); 1784 1785 splx(s); 1786 } 1787 1788 /* 1789 * iha_resel - handle a detected SCSI bus reselection request. 1790 */ 1791 static int 1792 iha_resel(sc) 1793 struct iha_softc *sc; 1794 { 1795 bus_space_tag_t iot = sc->sc_iot; 1796 bus_space_handle_t ioh = sc->sc_ioh; 1797 struct iha_scsi_req_q *scb; 1798 struct tcs *tcs; 1799 u_int8_t tag, target, lun, msg, abortmsg; 1800 1801 if (sc->sc_actscb != NULL) { 1802 if ((sc->sc_actscb->status == STATUS_SELECT)) 1803 iha_push_pend_scb(sc, sc->sc_actscb); 1804 sc->sc_actscb = NULL; 1805 } 1806 1807 target = bus_space_read_1(iot, ioh, TUL_SBID); 1808 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & MSG_IDENTIFY_LUNMASK; 1809 1810 tcs = &sc->sc_tcs[target]; 1811 1812 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 1813 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 1814 1815 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */ 1816 1817 if (tcs->ntagscb != NULL) 1818 /* There is a non-tagged I/O active on the target */ 1819 scb = tcs->ntagscb; 1820 1821 else { 1822 /* 1823 * Since there is no active non-tagged operation 1824 * read the tag type, the tag itself, and find 1825 * the appropriate scb by indexing sc_scb with 1826 * the tag. 1827 */ 1828 1829 switch (iha_wait(sc, MSG_ACCEPT)) { 1830 case -1: 1831 return (-1); 1832 case PHASE_MSG_IN: 1833 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1834 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 1835 return (-1); 1836 break; 1837 default: 1838 goto abort; 1839 } 1840 1841 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */ 1842 1843 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG)) 1844 goto abort; 1845 1846 switch (iha_wait(sc, MSG_ACCEPT)) { 1847 case -1: 1848 return (-1); 1849 case PHASE_MSG_IN: 1850 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1851 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 1852 return (-1); 1853 break; 1854 default: 1855 goto abort; 1856 } 1857 1858 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */ 1859 scb = &sc->sc_scb[tag]; 1860 1861 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */ 1862 } 1863 1864 if ((scb->target != target) 1865 || (scb->lun != lun) 1866 || (scb->status != STATUS_BUSY)) { 1867 abort: 1868 iha_msgout_abort(sc, abortmsg); 1869 return (-1); 1870 } 1871 1872 sc->sc_actscb = scb; 1873 1874 if (iha_wait(sc, MSG_ACCEPT) == -1) 1875 return (-1); 1876 1877 return (iha_next_state(sc)); 1878 } 1879 1880 static int 1881 iha_msgin(sc) 1882 struct iha_softc *sc; 1883 { 1884 bus_space_tag_t iot = sc->sc_iot; 1885 bus_space_handle_t ioh = sc->sc_ioh; 1886 int flags; 1887 int phase; 1888 u_int8_t msg; 1889 1890 for (;;) { 1891 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0) 1892 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1893 1894 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1895 1896 phase = iha_wait(sc, XF_FIFO_IN); 1897 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 1898 1899 switch (msg) { 1900 case MSG_DISCONNECT: 1901 sc->sc_flags |= FLAG_EXPECT_DISC; 1902 if (iha_wait(sc, MSG_ACCEPT) != -1) 1903 iha_bad_seq(sc); 1904 phase = -1; 1905 break; 1906 case MSG_SAVEDATAPOINTER: 1907 case MSG_RESTOREPOINTERS: 1908 case MSG_NOOP: 1909 phase = iha_wait(sc, MSG_ACCEPT); 1910 break; 1911 case MSG_MESSAGE_REJECT: 1912 /* XXX - need to clear FIFO like other 'Clear ATN'?*/ 1913 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1914 flags = sc->sc_actscb->tcs->flags; 1915 if ((flags & FLAG_NO_NEG_SYNC) == 0) 1916 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 1917 phase = iha_wait(sc, MSG_ACCEPT); 1918 break; 1919 case MSG_EXTENDED: 1920 phase = iha_msgin_extended(sc); 1921 break; 1922 case MSG_IGN_WIDE_RESIDUE: 1923 phase = iha_msgin_ignore_wid_resid(sc); 1924 break; 1925 case MSG_CMDCOMPLETE: 1926 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 1927 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1928 phase = iha_wait(sc, MSG_ACCEPT); 1929 if (phase != -1) { 1930 iha_bad_seq(sc); 1931 return (-1); 1932 } 1933 break; 1934 default: 1935 printf("[debug] iha_msgin: bad msg type: %d\n", msg); 1936 phase = iha_msgout_reject(sc); 1937 break; 1938 } 1939 1940 if (phase != PHASE_MSG_IN) 1941 return (phase); 1942 } 1943 /* NOTREACHED */ 1944 } 1945 1946 static int 1947 iha_msgin_ignore_wid_resid(sc) 1948 struct iha_softc *sc; 1949 { 1950 bus_space_tag_t iot = sc->sc_iot; 1951 bus_space_handle_t ioh = sc->sc_ioh; 1952 int phase; 1953 1954 phase = iha_wait(sc, MSG_ACCEPT); 1955 1956 if (phase == PHASE_MSG_IN) { 1957 phase = iha_wait(sc, XF_FIFO_IN); 1958 1959 if (phase != -1) { 1960 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 1961 bus_space_read_1(iot, ioh, TUL_SFIFO); 1962 bus_space_read_1(iot, ioh, TUL_SFIFO); 1963 1964 phase = iha_wait(sc, MSG_ACCEPT); 1965 } 1966 } 1967 1968 return (phase); 1969 } 1970 1971 static int 1972 iha_msgin_extended(sc) 1973 struct iha_softc *sc; 1974 { 1975 bus_space_tag_t iot = sc->sc_iot; 1976 bus_space_handle_t ioh = sc->sc_ioh; 1977 int flags, i, phase, msglen, msgcode; 1978 1979 /* 1980 * XXX - can we just stop reading and reject, or do we have to 1981 * read all input, discarding the excess, and then reject 1982 */ 1983 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) { 1984 phase = iha_wait(sc, MSG_ACCEPT); 1985 1986 if (phase != PHASE_MSG_IN) 1987 return (phase); 1988 1989 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 1990 1991 if (iha_wait(sc, XF_FIFO_IN) == -1) 1992 return (-1); 1993 1994 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO); 1995 1996 if (sc->sc_msg[0] == i) 1997 break; 1998 } 1999 2000 msglen = sc->sc_msg[0]; 2001 msgcode = sc->sc_msg[1]; 2002 2003 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) { 2004 if (iha_msgin_sdtr(sc) == 0) { 2005 iha_sync_done(sc); 2006 return (iha_wait(sc, MSG_ACCEPT)); 2007 } 2008 2009 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2010 2011 phase = iha_wait(sc, MSG_ACCEPT); 2012 if (phase != PHASE_MSG_OUT) 2013 return (phase); 2014 2015 /* Clear FIFO for important message - final SYNC offer */ 2016 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2017 2018 iha_sync_done(sc); /* This is our final offer */ 2019 2020 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) { 2021 2022 flags = sc->sc_actscb->tcs->flags; 2023 2024 if ((flags & FLAG_NO_WIDE) != 0) 2025 /* Offer 8bit xfers only */ 2026 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_8_BIT; 2027 2028 else if (sc->sc_msg[2] > MSG_EXT_WDTR_BUS_32_BIT) 2029 /* BAD MSG */ 2030 return (iha_msgout_reject(sc)); 2031 2032 else if (sc->sc_msg[2] == MSG_EXT_WDTR_BUS_32_BIT) 2033 /* Offer 16bit instead */ 2034 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2035 2036 else { 2037 iha_wide_done(sc); 2038 if ((flags & FLAG_NO_NEG_SYNC) == 0) 2039 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2040 return (iha_wait(sc, MSG_ACCEPT)); 2041 } 2042 2043 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2044 2045 phase = iha_wait(sc, MSG_ACCEPT); 2046 if (phase != PHASE_MSG_OUT) 2047 return (phase); 2048 } else 2049 return (iha_msgout_reject(sc)); 2050 2051 return (iha_msgout_extended(sc)); 2052 } 2053 2054 /* 2055 * iha_msgin_sdtr - check SDTR msg in sc_msg. If the offer is 2056 * acceptable leave sc_msg as is and return 0. 2057 * If the negotiation must continue, modify sc_msg 2058 * as needed and return 1. Else return 0. 2059 */ 2060 static int 2061 iha_msgin_sdtr(sc) 2062 struct iha_softc *sc; 2063 { 2064 int flags; 2065 int newoffer; 2066 u_int8_t default_period; 2067 2068 flags = sc->sc_actscb->tcs->flags; 2069 2070 default_period = iha_rate_tbl[flags & FLAG_SCSI_RATE]; 2071 2072 if (sc->sc_msg[3] == 0) 2073 /* target offered async only. Accept it. */ 2074 return (0); 2075 2076 newoffer = 0; 2077 2078 if ((flags & FLAG_NO_SYNC) != 0) { 2079 sc->sc_msg[3] = 0; 2080 newoffer = 1; 2081 } 2082 2083 if (sc->sc_msg[3] > IHA_MAX_OFFSET) { 2084 sc->sc_msg[3] = IHA_MAX_OFFSET; 2085 newoffer = 1; 2086 } 2087 2088 if (sc->sc_msg[2] < default_period) { 2089 sc->sc_msg[2] = default_period; 2090 newoffer = 1; 2091 } 2092 2093 if (sc->sc_msg[2] > IHA_MAX_PERIOD) { 2094 /* Use async */ 2095 sc->sc_msg[3] = 0; 2096 newoffer = 1; 2097 } 2098 2099 return (newoffer); 2100 } 2101 2102 static int 2103 iha_msgout(sc, msg) 2104 struct iha_softc *sc; 2105 u_int8_t msg; 2106 { 2107 2108 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg); 2109 2110 return (iha_wait(sc, XF_FIFO_OUT)); 2111 } 2112 2113 static void 2114 iha_msgout_abort(sc, aborttype) 2115 struct iha_softc *sc; 2116 u_int8_t aborttype; 2117 { 2118 2119 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2120 2121 switch (iha_wait(sc, MSG_ACCEPT)) { 2122 case -1: 2123 break; 2124 2125 case PHASE_MSG_OUT: 2126 sc->sc_flags |= FLAG_EXPECT_DISC; 2127 if (iha_msgout(sc, aborttype) != -1) 2128 iha_bad_seq(sc); 2129 break; 2130 2131 default: 2132 iha_bad_seq(sc); 2133 break; 2134 } 2135 } 2136 2137 static int 2138 iha_msgout_reject(sc) 2139 struct iha_softc *sc; 2140 { 2141 2142 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2143 2144 if (iha_wait(sc, MSG_ACCEPT) == PHASE_MSG_OUT) 2145 return (iha_msgout(sc, MSG_MESSAGE_REJECT)); 2146 2147 return (-1); 2148 } 2149 2150 static int 2151 iha_msgout_extended(sc) 2152 struct iha_softc *sc; 2153 { 2154 bus_space_tag_t iot = sc->sc_iot; 2155 bus_space_handle_t ioh = sc->sc_ioh; 2156 int phase; 2157 2158 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED); 2159 2160 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 2161 sc->sc_msg, sc->sc_msg[0] + 1); 2162 2163 phase = iha_wait(sc, XF_FIFO_OUT); 2164 2165 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2166 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 2167 2168 return (phase); 2169 } 2170 2171 static int 2172 iha_msgout_wdtr(sc) 2173 struct iha_softc *sc; 2174 { 2175 2176 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE; 2177 2178 sc->sc_msg[0] = MSG_EXT_WDTR_LEN; 2179 sc->sc_msg[1] = MSG_EXT_WDTR; 2180 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2181 2182 return (iha_msgout_extended(sc)); 2183 } 2184 2185 static int 2186 iha_msgout_sdtr(sc) 2187 struct iha_softc *sc; 2188 { 2189 struct tcs *tcs = sc->sc_actscb->tcs; 2190 2191 tcs->flags |= FLAG_SYNC_DONE; 2192 2193 sc->sc_msg[0] = MSG_EXT_SDTR_LEN; 2194 sc->sc_msg[1] = MSG_EXT_SDTR; 2195 sc->sc_msg[2] = iha_rate_tbl[tcs->flags & FLAG_SCSI_RATE]; 2196 sc->sc_msg[3] = IHA_MAX_OFFSET; /* REQ/ACK */ 2197 2198 return (iha_msgout_extended(sc)); 2199 } 2200 2201 static void 2202 iha_wide_done(sc) 2203 struct iha_softc *sc; 2204 { 2205 bus_space_tag_t iot = sc->sc_iot; 2206 bus_space_handle_t ioh = sc->sc_ioh; 2207 struct tcs *tcs = sc->sc_actscb->tcs; 2208 2209 tcs->syncm = 0; 2210 tcs->period = 0; 2211 tcs->offset = 0; 2212 2213 if (sc->sc_msg[2] != 0) 2214 tcs->syncm |= PERIOD_WIDE_SCSI; 2215 2216 tcs->sconfig0 &= ~ALTPD; 2217 tcs->flags &= ~FLAG_SYNC_DONE; 2218 tcs->flags |= FLAG_WIDE_DONE; 2219 2220 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2221 2222 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2223 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2224 } 2225 2226 static void 2227 iha_sync_done(sc) 2228 struct iha_softc *sc; 2229 { 2230 bus_space_tag_t iot = sc->sc_iot; 2231 bus_space_handle_t ioh = sc->sc_ioh; 2232 struct tcs *tcs = sc->sc_actscb->tcs; 2233 int i; 2234 2235 tcs->period = sc->sc_msg[2]; 2236 tcs->offset = sc->sc_msg[3]; 2237 if (tcs->offset != 0) { 2238 tcs->syncm |= tcs->offset; 2239 2240 /* pick the highest possible rate */ 2241 for (i = 0; i < sizeof(iha_rate_tbl); i++) 2242 if (iha_rate_tbl[i] >= tcs->period) 2243 break; 2244 2245 tcs->syncm |= (i << 4); 2246 tcs->sconfig0 |= ALTPD; 2247 } 2248 2249 tcs->flags |= FLAG_SYNC_DONE; 2250 2251 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2252 2253 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2254 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2255 } 2256 2257 void 2258 iha_reset_chip(sc) 2259 struct iha_softc *sc; 2260 { 2261 bus_space_tag_t iot = sc->sc_iot; 2262 bus_space_handle_t ioh = sc->sc_ioh; 2263 2264 /* reset tulip chip */ 2265 2266 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI); 2267 2268 do { 2269 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 2270 } while ((sc->sc_sistat & SRSTD) == 0); 2271 2272 iha_set_ssig(sc, 0, 0); 2273 2274 bus_space_read_1(iot, ioh, TUL_SISTAT); /* Clear any active interrupt*/ 2275 } 2276 2277 static void 2278 iha_select(sc, scb, select_type) 2279 struct iha_softc *sc; 2280 struct iha_scsi_req_q *scb; 2281 u_int8_t select_type; 2282 { 2283 bus_space_tag_t iot = sc->sc_iot; 2284 bus_space_handle_t ioh = sc->sc_ioh; 2285 2286 switch (select_type) { 2287 case SEL_ATN: 2288 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 2289 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 2290 scb->cmd, scb->cmdlen); 2291 2292 scb->nextstat = 2; 2293 break; 2294 2295 case SELATNSTOP: 2296 scb->nextstat = 1; 2297 break; 2298 2299 case SEL_ATN3: 2300 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 2301 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg); 2302 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid); 2303 2304 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd, 2305 scb->cmdlen); 2306 2307 scb->nextstat = 2; 2308 break; 2309 2310 default: 2311 printf("[debug] iha_select() - unknown select type = 0x%02x\n", 2312 select_type); 2313 return; 2314 } 2315 2316 iha_del_pend_scb(sc, scb); 2317 scb->status = STATUS_SELECT; 2318 2319 sc->sc_actscb = scb; 2320 2321 bus_space_write_1(iot, ioh, TUL_SCMD, select_type); 2322 } 2323 2324 /* 2325 * iha_wait - wait for an interrupt to service or a SCSI bus phase change 2326 * after writing the supplied command to the tulip chip. If 2327 * the command is NO_OP, skip the command writing. 2328 */ 2329 static int 2330 iha_wait(sc, cmd) 2331 struct iha_softc *sc; 2332 u_int8_t cmd; 2333 { 2334 bus_space_tag_t iot = sc->sc_iot; 2335 bus_space_handle_t ioh = sc->sc_ioh; 2336 2337 if (cmd != NO_OP) 2338 bus_space_write_1(iot, ioh, TUL_SCMD, cmd); 2339 2340 /* 2341 * Have to do this here, in addition to in iha_isr, because 2342 * interrupts might be turned off when we get here. 2343 */ 2344 do { 2345 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0); 2346 } while ((sc->sc_status0 & INTPD) == 0); 2347 2348 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 2349 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 2350 2351 sc->sc_phase = sc->sc_status0 & PH_MASK; 2352 2353 if ((sc->sc_sistat & SRSTD) != 0) { 2354 /* SCSI bus reset interrupt */ 2355 iha_reset_scsi_bus(sc); 2356 return (-1); 2357 } 2358 2359 if ((sc->sc_sistat & RSELED) != 0) 2360 /* Reselection interrupt */ 2361 return (iha_resel(sc)); 2362 2363 if ((sc->sc_sistat & STIMEO) != 0) { 2364 /* selected/reselected timeout interrupt */ 2365 iha_busfree(sc); 2366 return (-1); 2367 } 2368 2369 if ((sc->sc_sistat & DISCD) != 0) { 2370 /* BUS disconnection interrupt */ 2371 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) { 2372 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2373 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 2374 SCONFIG0DEFAULT); 2375 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2376 iha_append_done_scb(sc, sc->sc_actscb, HOST_OK); 2377 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC; 2378 2379 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) { 2380 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2381 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 2382 SCONFIG0DEFAULT); 2383 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2384 sc->sc_actscb = NULL; 2385 sc->sc_flags &= ~FLAG_EXPECT_DISC; 2386 2387 } else 2388 iha_busfree(sc); 2389 2390 return (-1); 2391 } 2392 2393 return (sc->sc_phase); 2394 } 2395 2396 /* 2397 * iha_done_scb - We have a scb which has been processed by the 2398 * adaptor, now we look to see how the operation went. 2399 */ 2400 static void 2401 iha_done_scb(sc, scb) 2402 struct iha_softc *sc; 2403 struct iha_scsi_req_q *scb; 2404 { 2405 struct scsipi_xfer *xs = scb->xs; 2406 2407 if (xs != NULL) { 2408 /* Cancel the timeout. */ 2409 callout_stop(&xs->xs_callout); 2410 2411 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 2412 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 2413 0, scb->dmap->dm_mapsize, 2414 (scb->flags & FLAG_DATAIN) ? 2415 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2416 bus_dmamap_unload(sc->sc_dmat, scb->dmap); 2417 } 2418 2419 xs->status = scb->ta_stat; 2420 2421 switch (scb->ha_stat) { 2422 case HOST_OK: 2423 switch (scb->ta_stat) { 2424 case SCSI_OK: 2425 case SCSI_CONDITION_MET: 2426 case SCSI_INTERM: 2427 case SCSI_INTERM_COND_MET: 2428 xs->resid = scb->buflen; 2429 xs->error = XS_NOERROR; 2430 if ((scb->flags & FLAG_RSENS) != 0) 2431 xs->error = XS_SENSE; 2432 break; 2433 2434 case SCSI_RESV_CONFLICT: 2435 case SCSI_BUSY: 2436 case SCSI_QUEUE_FULL: 2437 xs->error = XS_BUSY; 2438 break; 2439 2440 case SCSI_TERMINATED: 2441 case SCSI_ACA_ACTIVE: 2442 case SCSI_CHECK: 2443 scb->tcs->flags &= 2444 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 2445 2446 if ((scb->flags & FLAG_RSENS) != 0 || 2447 iha_push_sense_request(sc, scb) != 0) { 2448 scb->flags &= ~FLAG_RSENS; 2449 printf("%s: request sense failed\n", 2450 sc->sc_dev.dv_xname); 2451 xs->error = XS_DRIVER_STUFFUP; 2452 break; 2453 } 2454 2455 xs->error = XS_SENSE; 2456 return; 2457 2458 default: 2459 xs->error = XS_DRIVER_STUFFUP; 2460 break; 2461 } 2462 break; 2463 2464 case HOST_SEL_TOUT: 2465 xs->error = XS_SELTIMEOUT; 2466 break; 2467 2468 case HOST_SCSI_RST: 2469 case HOST_DEV_RST: 2470 xs->error = XS_RESET; 2471 break; 2472 2473 case HOST_SPERR: 2474 printf("%s: SCSI Parity error detected\n", 2475 sc->sc_dev.dv_xname); 2476 xs->error = XS_DRIVER_STUFFUP; 2477 break; 2478 2479 case HOST_TIMED_OUT: 2480 xs->error = XS_TIMEOUT; 2481 break; 2482 2483 case HOST_DO_DU: 2484 case HOST_BAD_PHAS: 2485 default: 2486 xs->error = XS_DRIVER_STUFFUP; 2487 break; 2488 } 2489 2490 scsipi_done(xs); 2491 } 2492 2493 iha_append_free_scb(sc, scb); 2494 } 2495 2496 static void 2497 iha_timeout(arg) 2498 void *arg; 2499 { 2500 struct iha_scsi_req_q *scb = (struct iha_scsi_req_q *)arg; 2501 struct scsipi_xfer *xs = scb->xs; 2502 struct scsipi_periph *periph = xs->xs_periph; 2503 struct iha_softc *sc; 2504 2505 sc = (void *)periph->periph_channel->chan_adapter->adapt_dev; 2506 2507 if (xs == NULL) 2508 printf("[debug] iha_timeout called with xs == NULL\n"); 2509 2510 else { 2511 scsipi_printaddr(periph); 2512 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode); 2513 2514 iha_abort_xs(sc, xs, HOST_TIMED_OUT); 2515 } 2516 } 2517 2518 static void 2519 iha_exec_scb(sc, scb) 2520 struct iha_softc *sc; 2521 struct iha_scsi_req_q *scb; 2522 { 2523 bus_space_tag_t iot; 2524 bus_space_handle_t ioh; 2525 bus_dmamap_t dm; 2526 struct scsipi_xfer *xs = scb->xs; 2527 int nseg, s; 2528 2529 dm = scb->dmap; 2530 nseg = dm->dm_nsegs; 2531 2532 if (nseg > 1) { 2533 struct iha_sg_element *sg = scb->sglist; 2534 int i; 2535 2536 for (i = 0; i < nseg; i++) { 2537 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len); 2538 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr); 2539 } 2540 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2541 scb->sgoffset, IHA_SG_SIZE, 2542 BUS_DMASYNC_PREWRITE); 2543 2544 scb->flags |= FLAG_SG; 2545 scb->sg_size = scb->sg_max = nseg; 2546 scb->sg_index = 0; 2547 2548 scb->bufaddr = scb->sg_addr; 2549 } else 2550 scb->bufaddr = dm->dm_segs[0].ds_addr; 2551 2552 if ((xs->xs_control & XS_CTL_POLL) == 0) { 2553 int timeout = xs->timeout; 2554 timeout = (timeout > 100000) ? 2555 timeout / 1000 * hz : timeout * hz / 1000; 2556 if (timeout == 0) 2557 timeout = 1; 2558 callout_reset(&xs->xs_callout, timeout, iha_timeout, scb); 2559 } 2560 2561 s = splbio(); 2562 2563 if (((scb->xs->xs_control & XS_RESET) != 0) || 2564 (scb->cmd[0] == REQUEST_SENSE)) 2565 iha_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */ 2566 else 2567 iha_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */ 2568 2569 /* 2570 * Run through iha_main() to ensure something is active, if 2571 * only this new SCB. 2572 */ 2573 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 2574 iot = sc->sc_iot; 2575 ioh = sc->sc_ioh; 2576 2577 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 2578 sc->sc_semaph = SEMAPH_IN_MAIN;; 2579 2580 splx(s); 2581 iha_main(sc); 2582 s = splbio(); 2583 2584 sc->sc_semaph = ~SEMAPH_IN_MAIN;; 2585 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 2586 } 2587 2588 splx(s); 2589 } 2590 2591 2592 /* 2593 * iha_set_ssig - read the current scsi signal mask, then write a new 2594 * one which turns off/on the specified signals. 2595 */ 2596 static void 2597 iha_set_ssig(sc, offsigs, onsigs) 2598 struct iha_softc *sc; 2599 u_int8_t offsigs, onsigs; 2600 { 2601 bus_space_tag_t iot = sc->sc_iot; 2602 bus_space_handle_t ioh = sc->sc_ioh; 2603 u_int8_t currsigs; 2604 2605 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI); 2606 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs); 2607 } 2608 2609 /* 2610 * iha_alloc_sglist - allocate and map sglist for SCB's 2611 */ 2612 static int 2613 iha_alloc_sglist(sc) 2614 struct iha_softc *sc; 2615 { 2616 bus_dma_segment_t seg; 2617 int error, rseg; 2618 2619 /* 2620 * Allocate dma-safe memory for the SCB's sglist 2621 */ 2622 if ((error = bus_dmamem_alloc(sc->sc_dmat, 2623 IHA_SG_SIZE * IHA_MAX_SCB, 2624 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2625 printf(": unable to allocate sglist, error = %d\n", error); 2626 return (error); 2627 } 2628 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 2629 IHA_SG_SIZE * IHA_MAX_SCB, (caddr_t *)&sc->sc_sglist, 2630 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2631 printf(": unable to map sglist, error = %d\n", error); 2632 return (error); 2633 } 2634 2635 /* 2636 * Create and load the DMA map used for the SCBs 2637 */ 2638 if ((error = bus_dmamap_create(sc->sc_dmat, 2639 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB, 2640 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 2641 printf(": unable to create control DMA map, error = %d\n", 2642 error); 2643 return (error); 2644 } 2645 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 2646 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB, 2647 NULL, BUS_DMA_NOWAIT)) != 0) { 2648 printf(": unable to load control DMA map, error = %d\n", error); 2649 return (error); 2650 } 2651 2652 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB); 2653 2654 return (0); 2655 } 2656 2657 /* 2658 * iha_read_eeprom - read Serial EEPROM value & set to defaults 2659 * if required. XXX - Writing does NOT work! 2660 */ 2661 void 2662 iha_read_eeprom(sc, eeprom) 2663 struct iha_softc *sc; 2664 struct iha_eeprom *eeprom; 2665 { 2666 bus_space_tag_t iot = sc->sc_iot; 2667 bus_space_handle_t ioh = sc->sc_ioh; 2668 u_int16_t *buf = (u_int16_t *)eeprom; 2669 u_int8_t gctrl; 2670 2671 /* Enable EEProm programming */ 2672 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG; 2673 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2674 2675 /* Read EEProm */ 2676 if (iha_se2_rd_all(sc, buf) == 0) 2677 panic("%s: cannot read EEPROM\n", sc->sc_dev.dv_xname); 2678 2679 /* Disable EEProm programming */ 2680 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG; 2681 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2682 } 2683 2684 #ifdef notused 2685 /* 2686 * iha_se2_update_all - Update SCSI H/A configuration parameters from 2687 * serial EEPROM Setup default pattern. Only 2688 * change those values different from the values 2689 * in iha_eeprom. 2690 */ 2691 void 2692 iha_se2_update_all(sc) 2693 struct iha_softc *sc; 2694 { 2695 bus_space_tag_t iot = sc->sc_iot; 2696 bus_space_handle_t ioh = sc->sc_ioh; 2697 u_int16_t *np; 2698 u_int32_t chksum; 2699 int i; 2700 2701 /* Enable erase/write state of EEPROM */ 2702 iha_se2_instr(sc, ENABLE_ERASE); 2703 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2704 EEP_WAIT(); 2705 2706 np = (u_int16_t *)&eeprom_default; 2707 2708 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2709 iha_se2_wr(sc, i, *np); 2710 chksum += *np++; 2711 } 2712 2713 chksum &= 0x0000ffff; 2714 iha_se2_wr(sc, 31, chksum); 2715 2716 /* Disable erase/write state of EEPROM */ 2717 iha_se2_instr(sc, 0); 2718 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2719 EEP_WAIT(); 2720 } 2721 2722 /* 2723 * iha_se2_wr - write the given 16 bit value into the Serial EEPROM 2724 * at the specified offset 2725 */ 2726 void 2727 iha_se2_wr(sc, addr, writeword) 2728 struct iha_softc *sc; 2729 int addr; 2730 u_int16_t writeword; 2731 { 2732 bus_space_tag_t iot = sc->sc_iot; 2733 bus_space_handle_t ioh = sc->sc_ioh; 2734 int i, bit; 2735 2736 /* send 'WRITE' Instruction == address | WRITE bit */ 2737 iha_se2_instr(sc, addr | WRITE); 2738 2739 for (i = 16; i > 0; i--) { 2740 if (writeword & (1 << (i - 1))) 2741 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO); 2742 else 2743 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2744 EEP_WAIT(); 2745 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2746 EEP_WAIT(); 2747 } 2748 2749 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2750 EEP_WAIT(); 2751 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2752 EEP_WAIT(); 2753 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2754 EEP_WAIT(); 2755 2756 for (;;) { 2757 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2758 EEP_WAIT(); 2759 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2760 EEP_WAIT(); 2761 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI; 2762 EEP_WAIT(); 2763 if (bit != 0) 2764 break; /* write complete */ 2765 } 2766 2767 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2768 } 2769 #endif 2770 2771 /* 2772 * iha_se2_rd - read & return the 16 bit value at the specified 2773 * offset in the Serial E2PROM 2774 * 2775 */ 2776 u_int16_t 2777 iha_se2_rd(sc, addr) 2778 struct iha_softc *sc; 2779 int addr; 2780 { 2781 bus_space_tag_t iot = sc->sc_iot; 2782 bus_space_handle_t ioh = sc->sc_ioh; 2783 int i, bit; 2784 u_int16_t readword; 2785 2786 /* Send 'READ' instruction == address | READ bit */ 2787 iha_se2_instr(sc, addr | READ); 2788 2789 readword = 0; 2790 for (i = 16; i > 0; i--) { 2791 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2792 EEP_WAIT(); 2793 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2794 EEP_WAIT(); 2795 /* sample data after the following edge of clock */ 2796 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0; 2797 EEP_WAIT(); 2798 2799 readword |= bit << (i - 1); 2800 } 2801 2802 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2803 2804 return (readword); 2805 } 2806 2807 /* 2808 * iha_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM 2809 */ 2810 int 2811 iha_se2_rd_all(sc, buf) 2812 struct iha_softc *sc; 2813 u_int16_t *buf; 2814 { 2815 struct iha_eeprom *eeprom = (struct iha_eeprom *)buf; 2816 u_int32_t chksum; 2817 int i; 2818 2819 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2820 *buf = iha_se2_rd(sc, i); 2821 chksum += *buf++; 2822 } 2823 *buf = iha_se2_rd(sc, 31); /* read checksum from EEPROM */ 2824 2825 chksum &= 0x0000ffff; /* lower 16 bits */ 2826 2827 return (eeprom->signature == EEP_SIGNATURE) && 2828 (eeprom->checksum == chksum); 2829 } 2830 2831 /* 2832 * iha_se2_instr - write an octet to serial E2PROM one bit at a time 2833 */ 2834 void 2835 iha_se2_instr(sc, instr) 2836 struct iha_softc *sc; 2837 int instr; 2838 { 2839 bus_space_tag_t iot = sc->sc_iot; 2840 bus_space_handle_t ioh = sc->sc_ioh; 2841 int b, i; 2842 2843 b = NVRCS | NVRDO; /* Write the start bit (== 1) */ 2844 2845 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2846 EEP_WAIT(); 2847 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2848 EEP_WAIT(); 2849 2850 for (i = 8; i > 0; i--) { 2851 if (instr & (1 << (i - 1))) 2852 b = NVRCS | NVRDO; /* Write a 1 bit */ 2853 else 2854 b = NVRCS; /* Write a 0 bit */ 2855 2856 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2857 EEP_WAIT(); 2858 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2859 EEP_WAIT(); 2860 } 2861 2862 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2863 } 2864 2865 /* 2866 * iha_reset_tcs - reset the target control structure pointed 2867 * to by tcs to default values. tcs flags 2868 * only has the negotiation done bits reset as 2869 * the other bits are fixed at initialization. 2870 */ 2871 void 2872 iha_reset_tcs(tcs, config0) 2873 struct tcs *tcs; 2874 u_int8_t config0; 2875 { 2876 2877 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 2878 tcs->period = 0; 2879 tcs->offset = 0; 2880 tcs->tagcnt = 0; 2881 tcs->ntagscb = NULL; 2882 tcs->syncm = 0; 2883 tcs->sconfig0 = config0; 2884 } 2885 2886 void 2887 iha_update_xfer_mode(sc, target) 2888 struct iha_softc *sc; 2889 int target; 2890 { 2891 struct tcs *tcs = &sc->sc_tcs[target]; 2892 struct scsipi_xfer_mode xm; 2893 2894 xm.xm_target = target; 2895 xm.xm_mode = 0; 2896 xm.xm_period = 0; 2897 xm.xm_offset = 0; 2898 2899 if (tcs->syncm & PERIOD_WIDE_SCSI) 2900 xm.xm_mode |= PERIPH_CAP_WIDE16; 2901 2902 if (tcs->period) { 2903 xm.xm_mode |= PERIPH_CAP_SYNC; 2904 xm.xm_period = tcs->period; 2905 xm.xm_offset = tcs->offset; 2906 } 2907 2908 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 2909 } 2910 2911