1 /* $NetBSD: siop_common.c,v 1.32 2003/01/31 00:26:31 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2000, 2002 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Manuel Bouyer. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.32 2003/01/31 00:26:31 thorpej Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/buf.h> 43 #include <sys/kernel.h> 44 #include <sys/scsiio.h> 45 46 #include <uvm/uvm_extern.h> 47 48 #include <machine/endian.h> 49 #include <machine/bus.h> 50 51 #include <dev/scsipi/scsi_all.h> 52 #include <dev/scsipi/scsi_message.h> 53 #include <dev/scsipi/scsipi_all.h> 54 55 #include <dev/scsipi/scsiconf.h> 56 57 #include <dev/ic/siopreg.h> 58 #include <dev/ic/siopvar_common.h> 59 60 #include "opt_siop.h" 61 62 #undef DEBUG 63 #undef DEBUG_DR 64 #undef DEBUG_NEG 65 66 int 67 siop_common_attach(sc) 68 struct siop_common_softc *sc; 69 { 70 int error, i; 71 bus_dma_segment_t seg; 72 int rseg; 73 74 /* 75 * Allocate DMA-safe memory for the script and map it. 76 */ 77 if ((sc->features & SF_CHIP_RAM) == 0) { 78 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 79 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 80 if (error) { 81 aprint_error( 82 "%s: unable to allocate script DMA memory, " 83 "error = %d\n", sc->sc_dev.dv_xname, error); 84 return error; 85 } 86 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 87 (caddr_t *)&sc->sc_script, 88 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 89 if (error) { 90 aprint_error("%s: unable to map script DMA memory, " 91 "error = %d\n", sc->sc_dev.dv_xname, error); 92 return error; 93 } 94 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 95 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 96 if (error) { 97 aprint_error("%s: unable to create script DMA map, " 98 "error = %d\n", sc->sc_dev.dv_xname, error); 99 return error; 100 } 101 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 102 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 103 if (error) { 104 aprint_error("%s: unable to load script DMA map, " 105 "error = %d\n", sc->sc_dev.dv_xname, error); 106 return error; 107 } 108 sc->sc_scriptaddr = 109 sc->sc_scriptdma->dm_segs[0].ds_addr; 110 sc->ram_size = PAGE_SIZE; 111 } 112 113 sc->sc_adapt.adapt_dev = &sc->sc_dev; 114 sc->sc_adapt.adapt_nchannels = 1; 115 sc->sc_adapt.adapt_openings = 0; 116 sc->sc_adapt.adapt_ioctl = siop_ioctl; 117 sc->sc_adapt.adapt_minphys = minphys; 118 119 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan)); 120 sc->sc_chan.chan_adapter = &sc->sc_adapt; 121 sc->sc_chan.chan_bustype = &scsi_bustype; 122 sc->sc_chan.chan_channel = 0; 123 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW; 124 sc->sc_chan.chan_ntargets = 125 (sc->features & SF_BUS_WIDE) ? 16 : 8; 126 sc->sc_chan.chan_nluns = 8; 127 sc->sc_chan.chan_id = 128 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 129 if (sc->sc_chan.chan_id == 0 || 130 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets) 131 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET; 132 133 for (i = 0; i < 16; i++) 134 sc->targets[i] = NULL; 135 136 /* find min/max sync period for this chip */ 137 sc->st_maxsync = 0; 138 sc->dt_maxsync = 0; 139 sc->st_minsync = 255; 140 sc->dt_minsync = 255; 141 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) { 142 if (sc->clock_period != scf_period[i].clock) 143 continue; 144 if (sc->st_maxsync < scf_period[i].period) 145 sc->st_maxsync = scf_period[i].period; 146 if (sc->st_minsync > scf_period[i].period) 147 sc->st_minsync = scf_period[i].period; 148 } 149 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 150 panic("siop: can't find my sync parameters"); 151 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) { 152 if (sc->clock_period != dt_scf_period[i].clock) 153 continue; 154 if (sc->dt_maxsync < dt_scf_period[i].period) 155 sc->dt_maxsync = dt_scf_period[i].period; 156 if (sc->dt_minsync > dt_scf_period[i].period) 157 sc->dt_minsync = dt_scf_period[i].period; 158 } 159 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 160 panic("siop: can't find my sync parameters"); 161 return 0; 162 } 163 164 void 165 siop_common_reset(sc) 166 struct siop_common_softc *sc; 167 { 168 u_int32_t stest3; 169 170 /* reset the chip */ 171 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 172 delay(1000); 173 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 174 175 /* init registers */ 176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 177 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 183 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 185 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 189 (0xb << STIME0_SEL_SHIFT)); 190 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 191 sc->sc_chan.chan_id | SCID_RRE); 192 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 193 1 << sc->sc_chan.chan_id); 194 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 195 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 196 197 /* enable clock doubler or quadruler if appropriate */ 198 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 199 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 200 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 201 STEST1_DBLEN); 202 if (sc->features & SF_CHIP_QUAD) { 203 /* wait for PPL to lock */ 204 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 205 SIOP_STEST4) & STEST4_LOCK) == 0) 206 delay(10); 207 } else { 208 /* data sheet says 20us - more won't hurt */ 209 delay(100); 210 } 211 /* halt scsi clock, select doubler/quad, restart clock */ 212 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 213 stest3 | STEST3_HSC); 214 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 215 STEST1_DBLEN | STEST1_DBLSEL); 216 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 217 } else { 218 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 219 } 220 if (sc->features & SF_CHIP_FIFO) 221 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 222 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 223 CTEST5_DFS); 224 if (sc->features & SF_CHIP_LED0) { 225 /* Set GPIO0 as output if software LED control is required */ 226 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 227 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 228 } 229 if (sc->features & SF_BUS_ULTRA3) { 230 /* reset SCNTL4 */ 231 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 232 } 233 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 234 STEST4_MODE_MASK; 235 236 /* 237 * initialise the RAM. Without this we may get scsi gross errors on 238 * the 1010 239 */ 240 if (sc->features & SF_CHIP_RAM) 241 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 242 0, 0, sc->ram_size / 4); 243 sc->sc_reset(sc); 244 } 245 246 /* prepare tables before sending a cmd */ 247 void 248 siop_setuptables(siop_cmd) 249 struct siop_common_cmd *siop_cmd; 250 { 251 int i; 252 struct siop_common_softc *sc = siop_cmd->siop_sc; 253 struct scsipi_xfer *xs = siop_cmd->xs; 254 int target = xs->xs_periph->periph_target; 255 int lun = xs->xs_periph->periph_lun; 256 int msgoffset = 1; 257 258 siop_cmd->siop_tables->id = htole32(sc->targets[target]->id); 259 memset(siop_cmd->siop_tables->msg_out, 0, 260 sizeof(siop_cmd->siop_tables->msg_out)); 261 /* request sense doesn't disconnect */ 262 if (xs->xs_control & XS_CTL_REQSENSE) 263 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 264 else if ((sc->features & SF_CHIP_GEBUG) && 265 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 266 /* 267 * 1010 bug: it seems that the 1010 has problems with reselect 268 * when not in wide mode (generate false SCSI gross error). 269 * The FreeBSD sym driver has comments about it but their 270 * workaround (disable SCSI gross error reporting) doesn't 271 * work with my adapter. So disable disconnect when not 272 * wide. 273 */ 274 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 275 else 276 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 277 if (xs->xs_tag_type != 0) { 278 if ((sc->targets[target]->flags & TARF_TAG) == 0) { 279 scsipi_printaddr(xs->xs_periph); 280 printf(": tagged command type %d id %d\n", 281 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id); 282 panic("tagged command for non-tagging device"); 283 } 284 siop_cmd->flags |= CMDFL_TAG; 285 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type; 286 /* 287 * use siop_cmd->tag not xs->xs_tag_id, caller may want a 288 * different one 289 */ 290 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag; 291 msgoffset = 3; 292 } 293 siop_cmd->siop_tables->t_msgout.count= htole32(msgoffset); 294 if (sc->targets[target]->status == TARST_ASYNC) { 295 if ((sc->targets[target]->flags & TARF_DT) && 296 (sc->mode == STEST4_MODE_LVD)) { 297 sc->targets[target]->status = TARST_PPR_NEG; 298 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 299 sc->maxoff); 300 } else if (sc->targets[target]->flags & TARF_WIDE) { 301 sc->targets[target]->status = TARST_WIDE_NEG; 302 siop_wdtr_msg(siop_cmd, msgoffset, 303 MSG_EXT_WDTR_BUS_16_BIT); 304 } else if (sc->targets[target]->flags & TARF_SYNC) { 305 sc->targets[target]->status = TARST_SYNC_NEG; 306 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 307 (sc->maxoff > 31) ? 31 : sc->maxoff); 308 } else { 309 sc->targets[target]->status = TARST_OK; 310 siop_update_xfer_mode(sc, target); 311 } 312 } 313 siop_cmd->siop_tables->status = 314 htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */ 315 316 siop_cmd->siop_tables->cmd.count = 317 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len); 318 siop_cmd->siop_tables->cmd.addr = 319 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr); 320 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 321 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 322 siop_cmd->siop_tables->data[i].count = 323 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len); 324 siop_cmd->siop_tables->data[i].addr = 325 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr); 326 } 327 } 328 } 329 330 int 331 siop_wdtr_neg(siop_cmd) 332 struct siop_common_cmd *siop_cmd; 333 { 334 struct siop_common_softc *sc = siop_cmd->siop_sc; 335 struct siop_common_target *siop_target = siop_cmd->siop_target; 336 int target = siop_cmd->xs->xs_periph->periph_target; 337 struct siop_common_xfer *tables = siop_cmd->siop_tables; 338 339 if (siop_target->status == TARST_WIDE_NEG) { 340 /* we initiated wide negotiation */ 341 switch (tables->msg_in[3]) { 342 case MSG_EXT_WDTR_BUS_8_BIT: 343 siop_target->flags &= ~TARF_ISWIDE; 344 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 345 break; 346 case MSG_EXT_WDTR_BUS_16_BIT: 347 if (siop_target->flags & TARF_WIDE) { 348 siop_target->flags |= TARF_ISWIDE; 349 sc->targets[target]->id |= (SCNTL3_EWS << 24); 350 break; 351 } 352 /* FALLTHROUH */ 353 default: 354 /* 355 * hum, we got more than what we can handle, shouldn't 356 * happen. Reject, and stay async 357 */ 358 siop_target->flags &= ~TARF_ISWIDE; 359 siop_target->status = TARST_OK; 360 siop_target->offset = siop_target->period = 0; 361 siop_update_xfer_mode(sc, target); 362 printf("%s: rejecting invalid wide negotiation from " 363 "target %d (%d)\n", sc->sc_dev.dv_xname, target, 364 tables->msg_in[3]); 365 tables->t_msgout.count= htole32(1); 366 tables->msg_out[0] = MSG_MESSAGE_REJECT; 367 return SIOP_NEG_MSGOUT; 368 } 369 tables->id = htole32(sc->targets[target]->id); 370 bus_space_write_1(sc->sc_rt, sc->sc_rh, 371 SIOP_SCNTL3, 372 (sc->targets[target]->id >> 24) & 0xff); 373 /* we now need to do sync */ 374 if (siop_target->flags & TARF_SYNC) { 375 siop_target->status = TARST_SYNC_NEG; 376 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 377 (sc->maxoff > 31) ? 31 : sc->maxoff); 378 return SIOP_NEG_MSGOUT; 379 } else { 380 siop_target->status = TARST_OK; 381 siop_update_xfer_mode(sc, target); 382 return SIOP_NEG_ACK; 383 } 384 } else { 385 /* target initiated wide negotiation */ 386 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 387 && (siop_target->flags & TARF_WIDE)) { 388 siop_target->flags |= TARF_ISWIDE; 389 sc->targets[target]->id |= SCNTL3_EWS << 24; 390 } else { 391 siop_target->flags &= ~TARF_ISWIDE; 392 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 393 } 394 tables->id = htole32(sc->targets[target]->id); 395 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 396 (sc->targets[target]->id >> 24) & 0xff); 397 /* 398 * we did reset wide parameters, so fall back to async, 399 * but don't schedule a sync neg, target should initiate it 400 */ 401 siop_target->status = TARST_OK; 402 siop_target->offset = siop_target->period = 0; 403 siop_update_xfer_mode(sc, target); 404 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 405 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 406 return SIOP_NEG_MSGOUT; 407 } 408 } 409 410 int 411 siop_ppr_neg(siop_cmd) 412 struct siop_common_cmd *siop_cmd; 413 { 414 struct siop_common_softc *sc = siop_cmd->siop_sc; 415 struct siop_common_target *siop_target = siop_cmd->siop_target; 416 int target = siop_cmd->xs->xs_periph->periph_target; 417 struct siop_common_xfer *tables = siop_cmd->siop_tables; 418 int sync, offset, options, scf = 0; 419 int i; 420 421 #ifdef DEBUG_NEG 422 printf("%s: anserw on ppr negotiation:", sc->sc_dev.dv_xname); 423 for (i = 0; i < 8; i++) 424 printf(" 0x%x", tables->msg_in[i]); 425 printf("\n"); 426 #endif 427 428 if (siop_target->status == TARST_PPR_NEG) { 429 /* we initiated PPR negotiation */ 430 sync = tables->msg_in[3]; 431 offset = tables->msg_in[5]; 432 options = tables->msg_in[7]; 433 if (options != MSG_EXT_PPR_DT) { 434 /* should't happen */ 435 printf("%s: ppr negotiation for target %d: " 436 "no DT option\n", sc->sc_dev.dv_xname, target); 437 siop_target->status = TARST_ASYNC; 438 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 439 siop_target->offset = 0; 440 siop_target->period = 0; 441 goto reject; 442 } 443 444 if (offset > sc->maxoff || sync < sc->dt_minsync || 445 sync > sc->dt_maxsync) { 446 printf("%s: ppr negotiation for target %d: " 447 "offset (%d) or sync (%d) out of range\n", 448 sc->sc_dev.dv_xname, target, offset, sync); 449 /* should not happen */ 450 siop_target->offset = 0; 451 siop_target->period = 0; 452 goto reject; 453 } else { 454 for (i = 0; i < 455 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); 456 i++) { 457 if (sc->clock_period != dt_scf_period[i].clock) 458 continue; 459 if (dt_scf_period[i].period == sync) { 460 /* ok, found it. we now are sync. */ 461 siop_target->offset = offset; 462 siop_target->period = sync; 463 scf = dt_scf_period[i].scf; 464 siop_target->flags |= TARF_ISDT; 465 } 466 } 467 if ((siop_target->flags & TARF_ISDT) == 0) { 468 printf("%s: ppr negotiation for target %d: " 469 "sync (%d) incompatible with adapter\n", 470 sc->sc_dev.dv_xname, target, sync); 471 /* 472 * we didn't find it in our table, do async 473 * send reject msg, start SDTR/WDTR neg 474 */ 475 siop_target->status = TARST_ASYNC; 476 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 477 siop_target->offset = 0; 478 siop_target->period = 0; 479 goto reject; 480 } 481 } 482 if (tables->msg_in[6] != 1) { 483 printf("%s: ppr negotiation for target %d: " 484 "transfer width (%d) incompatible with dt\n", 485 sc->sc_dev.dv_xname, target, tables->msg_in[6]); 486 /* DT mode can only be done with wide transfers */ 487 siop_target->status = TARST_ASYNC; 488 goto reject; 489 } 490 siop_target->flags |= TARF_ISWIDE; 491 sc->targets[target]->id |= (SCNTL3_EWS << 24); 492 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 493 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 494 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 495 sc->targets[target]->id |= 496 (siop_target->offset & SXFER_MO_MASK) << 8; 497 sc->targets[target]->id &= ~0xff; 498 sc->targets[target]->id |= SCNTL4_U3EN; 499 siop_target->status = TARST_OK; 500 siop_update_xfer_mode(sc, target); 501 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 502 (sc->targets[target]->id >> 24) & 0xff); 503 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 504 (sc->targets[target]->id >> 8) & 0xff); 505 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 506 sc->targets[target]->id & 0xff); 507 return SIOP_NEG_ACK; 508 } else { 509 /* target initiated PPR negotiation, shouldn't happen */ 510 printf("%s: rejecting invalid PPR negotiation from " 511 "target %d\n", sc->sc_dev.dv_xname, target); 512 reject: 513 tables->t_msgout.count= htole32(1); 514 tables->msg_out[0] = MSG_MESSAGE_REJECT; 515 return SIOP_NEG_MSGOUT; 516 } 517 } 518 519 int 520 siop_sdtr_neg(siop_cmd) 521 struct siop_common_cmd *siop_cmd; 522 { 523 struct siop_common_softc *sc = siop_cmd->siop_sc; 524 struct siop_common_target *siop_target = siop_cmd->siop_target; 525 int target = siop_cmd->xs->xs_periph->periph_target; 526 int sync, maxoffset, offset, i; 527 int send_msgout = 0; 528 struct siop_common_xfer *tables = siop_cmd->siop_tables; 529 530 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 531 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 532 533 sync = tables->msg_in[3]; 534 offset = tables->msg_in[4]; 535 536 if (siop_target->status == TARST_SYNC_NEG) { 537 /* we initiated sync negotiation */ 538 siop_target->status = TARST_OK; 539 #ifdef DEBUG 540 printf("sdtr: sync %d offset %d\n", sync, offset); 541 #endif 542 if (offset > maxoffset || sync < sc->st_minsync || 543 sync > sc->st_maxsync) 544 goto reject; 545 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 546 i++) { 547 if (sc->clock_period != scf_period[i].clock) 548 continue; 549 if (scf_period[i].period == sync) { 550 /* ok, found it. we now are sync. */ 551 siop_target->offset = offset; 552 siop_target->period = sync; 553 sc->targets[target]->id &= 554 ~(SCNTL3_SCF_MASK << 24); 555 sc->targets[target]->id |= scf_period[i].scf 556 << (24 + SCNTL3_SCF_SHIFT); 557 if (sync < 25 && /* Ultra */ 558 (sc->features & SF_BUS_ULTRA3) == 0) 559 sc->targets[target]->id |= 560 SCNTL3_ULTRA << 24; 561 else 562 sc->targets[target]->id &= 563 ~(SCNTL3_ULTRA << 24); 564 sc->targets[target]->id &= 565 ~(SXFER_MO_MASK << 8); 566 sc->targets[target]->id |= 567 (offset & SXFER_MO_MASK) << 8; 568 sc->targets[target]->id &= ~0xff; /* scntl4 */ 569 goto end; 570 } 571 } 572 /* 573 * we didn't find it in our table, do async and send reject 574 * msg 575 */ 576 reject: 577 send_msgout = 1; 578 tables->t_msgout.count= htole32(1); 579 tables->msg_out[0] = MSG_MESSAGE_REJECT; 580 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 581 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 582 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 583 sc->targets[target]->id &= ~0xff; /* scntl4 */ 584 siop_target->offset = siop_target->period = 0; 585 } else { /* target initiated sync neg */ 586 #ifdef DEBUG 587 printf("sdtr (target): sync %d offset %d\n", sync, offset); 588 #endif 589 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 590 goto async; 591 } 592 if (offset > maxoffset) 593 offset = maxoffset; 594 if (sync < sc->st_minsync) 595 sync = sc->st_minsync; 596 /* look for sync period */ 597 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 598 i++) { 599 if (sc->clock_period != scf_period[i].clock) 600 continue; 601 if (scf_period[i].period == sync) { 602 /* ok, found it. we now are sync. */ 603 siop_target->offset = offset; 604 siop_target->period = sync; 605 sc->targets[target]->id &= 606 ~(SCNTL3_SCF_MASK << 24); 607 sc->targets[target]->id |= scf_period[i].scf 608 << (24 + SCNTL3_SCF_SHIFT); 609 if (sync < 25 && /* Ultra */ 610 (sc->features & SF_BUS_ULTRA3) == 0) 611 sc->targets[target]->id |= 612 SCNTL3_ULTRA << 24; 613 else 614 sc->targets[target]->id &= 615 ~(SCNTL3_ULTRA << 24); 616 sc->targets[target]->id &= 617 ~(SXFER_MO_MASK << 8); 618 sc->targets[target]->id |= 619 (offset & SXFER_MO_MASK) << 8; 620 sc->targets[target]->id &= ~0xff; /* scntl4 */ 621 siop_sdtr_msg(siop_cmd, 0, sync, offset); 622 send_msgout = 1; 623 goto end; 624 } 625 } 626 async: 627 siop_target->offset = siop_target->period = 0; 628 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 629 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 630 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 631 sc->targets[target]->id &= ~0xff; /* scntl4 */ 632 siop_sdtr_msg(siop_cmd, 0, 0, 0); 633 send_msgout = 1; 634 } 635 end: 636 if (siop_target->status == TARST_OK) 637 siop_update_xfer_mode(sc, target); 638 #ifdef DEBUG 639 printf("id now 0x%x\n", sc->targets[target]->id); 640 #endif 641 tables->id = htole32(sc->targets[target]->id); 642 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 643 (sc->targets[target]->id >> 24) & 0xff); 644 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 645 (sc->targets[target]->id >> 8) & 0xff); 646 if (send_msgout) { 647 return SIOP_NEG_MSGOUT; 648 } else { 649 return SIOP_NEG_ACK; 650 } 651 } 652 653 void 654 siop_sdtr_msg(siop_cmd, offset, ssync, soff) 655 struct siop_common_cmd *siop_cmd; 656 int offset; 657 int ssync, soff; 658 { 659 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 660 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 661 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 662 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 663 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 664 siop_cmd->siop_tables->t_msgout.count = 665 htole32(offset + MSG_EXT_SDTR_LEN + 2); 666 } 667 668 void 669 siop_wdtr_msg(siop_cmd, offset, wide) 670 struct siop_common_cmd *siop_cmd; 671 int offset; 672 { 673 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 674 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 675 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 676 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 677 siop_cmd->siop_tables->t_msgout.count = 678 htole32(offset + MSG_EXT_WDTR_LEN + 2); 679 } 680 681 void 682 siop_ppr_msg(siop_cmd, offset, ssync, soff) 683 struct siop_common_cmd *siop_cmd; 684 int offset; 685 int ssync, soff; 686 { 687 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 688 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 689 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 690 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 691 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 692 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 693 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 694 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT; 695 siop_cmd->siop_tables->t_msgout.count = 696 htole32(offset + MSG_EXT_PPR_LEN + 2); 697 } 698 699 void 700 siop_minphys(bp) 701 struct buf *bp; 702 { 703 minphys(bp); 704 } 705 706 int 707 siop_ioctl(chan, cmd, arg, flag, p) 708 struct scsipi_channel *chan; 709 u_long cmd; 710 caddr_t arg; 711 int flag; 712 struct proc *p; 713 { 714 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev; 715 716 switch (cmd) { 717 case SCBUSIORESET: 718 /* 719 * abort the script. This will trigger an interrupt, which will 720 * trigger a bus reset. 721 * We can't safely trigger the reset here as we can't access 722 * the required register while the script is running. 723 */ 724 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT); 725 return (0); 726 default: 727 return (ENOTTY); 728 } 729 } 730 731 void 732 siop_sdp(siop_cmd) 733 struct siop_common_cmd *siop_cmd; 734 { 735 /* save data pointer. Handle async only for now */ 736 int offset, dbc, sstat; 737 struct siop_common_softc *sc = siop_cmd->siop_sc; 738 scr_table_t *table; /* table to patch */ 739 740 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 741 == 0) 742 return; /* no data pointers to save */ 743 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 744 if (offset >= SIOP_NSG) { 745 printf("%s: bad offset in siop_sdp (%d)\n", 746 sc->sc_dev.dv_xname, offset); 747 return; 748 } 749 table = &siop_cmd->siop_tables->data[offset]; 750 #ifdef DEBUG_DR 751 printf("sdp: offset %d count=%d addr=0x%x ", offset, 752 table->count, table->addr); 753 #endif 754 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 755 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) { 756 if (sc->features & SF_CHIP_DFBC) { 757 dbc += 758 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 759 } else { 760 /* need to account stale data in FIFO */ 761 int dfifo = 762 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 763 if (sc->features & SF_CHIP_FIFO) { 764 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 765 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 766 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 767 } else { 768 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 769 } 770 } 771 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 772 if (sstat & SSTAT0_OLF) 773 dbc++; 774 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 775 dbc++; 776 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 777 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 778 SIOP_SSTAT2); 779 if (sstat & SSTAT2_OLF1) 780 dbc++; 781 if ((sstat & SSTAT2_ORF1) && 782 (sc->features & SF_CHIP_DFBC) == 0) 783 dbc++; 784 } 785 /* clear the FIFO */ 786 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 787 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 788 CTEST3_CLF); 789 } 790 table->addr = 791 htole32(le32toh(table->addr) + le32toh(table->count) - dbc); 792 table->count = htole32(dbc); 793 #ifdef DEBUG_DR 794 printf("now count=%d addr=0x%x\n", table->count, table->addr); 795 #endif 796 } 797 798 void 799 siop_clearfifo(sc) 800 struct siop_common_softc *sc; 801 { 802 int timeout = 0; 803 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 804 805 #ifdef DEBUG_INTR 806 printf("DMA fifo not empty !\n"); 807 #endif 808 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 809 ctest3 | CTEST3_CLF); 810 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 811 CTEST3_CLF) != 0) { 812 delay(1); 813 if (++timeout > 1000) { 814 printf("clear fifo failed\n"); 815 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 816 bus_space_read_1(sc->sc_rt, sc->sc_rh, 817 SIOP_CTEST3) & ~CTEST3_CLF); 818 return; 819 } 820 } 821 } 822 823 int 824 siop_modechange(sc) 825 struct siop_common_softc *sc; 826 { 827 int retry; 828 int sist0, sist1, stest2; 829 for (retry = 0; retry < 5; retry++) { 830 /* 831 * datasheet says to wait 100ms and re-read SIST1, 832 * to check that DIFFSENSE is stable. 833 * We may delay() 5 times for 100ms at interrupt time; 834 * hopefully this will not happen often. 835 */ 836 delay(100000); 837 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 838 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 839 if (sist1 & SIEN1_SBMC) 840 continue; /* we got an irq again */ 841 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 842 STEST4_MODE_MASK; 843 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 844 switch(sc->mode) { 845 case STEST4_MODE_DIF: 846 printf("%s: switching to differential mode\n", 847 sc->sc_dev.dv_xname); 848 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 849 stest2 | STEST2_DIF); 850 break; 851 case STEST4_MODE_SE: 852 printf("%s: switching to single-ended mode\n", 853 sc->sc_dev.dv_xname); 854 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 855 stest2 & ~STEST2_DIF); 856 break; 857 case STEST4_MODE_LVD: 858 printf("%s: switching to LVD mode\n", 859 sc->sc_dev.dv_xname); 860 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 861 stest2 & ~STEST2_DIF); 862 break; 863 default: 864 printf("%s: invalid SCSI mode 0x%x\n", 865 sc->sc_dev.dv_xname, sc->mode); 866 return 0; 867 } 868 return 1; 869 } 870 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 871 sc->sc_dev.dv_xname); 872 return 0; 873 } 874 875 void 876 siop_resetbus(sc) 877 struct siop_common_softc *sc; 878 { 879 int scntl1; 880 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 881 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 882 scntl1 | SCNTL1_RST); 883 /* minimum 25 us, more time won't hurt */ 884 delay(100); 885 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 886 } 887 888 void 889 siop_update_xfer_mode(sc, target) 890 struct siop_common_softc *sc; 891 int target; 892 { 893 struct siop_common_target *siop_target = sc->targets[target]; 894 struct scsipi_xfer_mode xm; 895 896 xm.xm_target = target; 897 xm.xm_mode = 0; 898 xm.xm_period = 0; 899 xm.xm_offset = 0; 900 901 902 if (siop_target->flags & TARF_ISWIDE) 903 xm.xm_mode |= PERIPH_CAP_WIDE16; 904 if (siop_target->period) { 905 xm.xm_period = siop_target->period; 906 xm.xm_offset = siop_target->offset; 907 xm.xm_mode |= PERIPH_CAP_SYNC; 908 } 909 if (siop_target->flags & TARF_TAG) { 910 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 911 if ((sc->features & SF_CHIP_GEBUG) == 0 || 912 (sc->targets[target]->flags & TARF_ISWIDE)) 913 xm.xm_mode |= PERIPH_CAP_TQING; 914 } 915 916 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm); 917 } 918