1 /* $OpenBSD: siop_common.c,v 1.31 2008/08/31 17:21:57 miod Exp $ */ 2 /* $NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $ */ 3 4 /* 5 * Copyright (c) 2000, 2002 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/device.h> 39 #include <sys/malloc.h> 40 #include <sys/buf.h> 41 #include <sys/kernel.h> 42 #include <sys/scsiio.h> 43 44 #include <machine/endian.h> 45 #include <machine/bus.h> 46 47 #include <scsi/scsi_all.h> 48 #include <scsi/scsi_message.h> 49 #include <scsi/scsiconf.h> 50 51 #define SIOP_NEEDS_PERIOD_TABLES 52 #include <dev/ic/siopreg.h> 53 #include <dev/ic/siopvar_common.h> 54 #include <dev/ic/siopvar.h> 55 56 #undef DEBUG 57 #undef DEBUG_DR 58 #undef DEBUG_NEG 59 60 int 61 siop_common_attach(sc) 62 struct siop_common_softc *sc; 63 { 64 int error, i; 65 bus_dma_segment_t seg; 66 int rseg; 67 68 /* 69 * Allocate DMA-safe memory for the script and map it. 70 */ 71 if ((sc->features & SF_CHIP_RAM) == 0) { 72 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 73 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 74 if (error) { 75 printf("%s: unable to allocate script DMA memory, " 76 "error = %d\n", sc->sc_dev.dv_xname, error); 77 return error; 78 } 79 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 80 (caddr_t *)&sc->sc_script, 81 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 82 if (error) { 83 printf("%s: unable to map script DMA memory, " 84 "error = %d\n", sc->sc_dev.dv_xname, error); 85 return error; 86 } 87 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 88 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 89 if (error) { 90 printf("%s: unable to create script DMA map, " 91 "error = %d\n", sc->sc_dev.dv_xname, error); 92 return error; 93 } 94 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 95 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 96 if (error) { 97 printf("%s: unable to load script DMA map, " 98 "error = %d\n", sc->sc_dev.dv_xname, error); 99 return error; 100 } 101 sc->sc_scriptaddr = 102 sc->sc_scriptdma->dm_segs[0].ds_addr; 103 sc->ram_size = PAGE_SIZE; 104 } 105 106 /* 107 * sc->sc_link is the template for all device sc_link's 108 * for devices attached to this adapter. It is passed to 109 * the upper layers in config_found(). 110 */ 111 sc->sc_link.adapter_softc = sc; 112 sc->sc_link.adapter_buswidth = 113 (sc->features & SF_BUS_WIDE) ? 16 : 8; 114 sc->sc_link.adapter_target = 115 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 116 if (sc->sc_link.adapter_target == 0 || 117 sc->sc_link.adapter_target >= 118 sc->sc_link.adapter_buswidth) 119 sc->sc_link.adapter_target = SIOP_DEFAULT_TARGET; 120 121 for (i = 0; i < 16; i++) 122 sc->targets[i] = NULL; 123 124 /* find min/max sync period for this chip */ 125 sc->st_maxsync = 0; 126 sc->dt_maxsync = 0; 127 sc->st_minsync = 255; 128 sc->dt_minsync = 255; 129 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) { 130 if (sc->clock_period != scf_period[i].clock) 131 continue; 132 if (sc->st_maxsync < scf_period[i].period) 133 sc->st_maxsync = scf_period[i].period; 134 if (sc->st_minsync > scf_period[i].period) 135 sc->st_minsync = scf_period[i].period; 136 } 137 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 138 panic("siop: can't find my sync parameters"); 139 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) { 140 if (sc->clock_period != dt_scf_period[i].clock) 141 continue; 142 if (sc->dt_maxsync < dt_scf_period[i].period) 143 sc->dt_maxsync = dt_scf_period[i].period; 144 if (sc->dt_minsync > dt_scf_period[i].period) 145 sc->dt_minsync = dt_scf_period[i].period; 146 } 147 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 148 panic("siop: can't find my sync parameters"); 149 return 0; 150 } 151 152 void 153 siop_common_reset(sc) 154 struct siop_common_softc *sc; 155 { 156 u_int32_t stest3; 157 158 /* reset the chip */ 159 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 160 delay(1000); 161 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 162 163 /* init registers */ 164 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 165 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 166 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 167 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 168 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 169 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 171 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 173 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 174 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 177 (0xb << STIME0_SEL_SHIFT)); 178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 179 sc->sc_link.adapter_target | SCID_RRE); 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 181 1 << sc->sc_link.adapter_target); 182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 183 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 184 if (sc->features & SF_CHIP_AAIP) 185 bus_space_write_1(sc->sc_rt, sc->sc_rh, 186 SIOP_AIPCNTL1, AIPCNTL1_DIS); 187 188 /* enable clock doubler or quadrupler if appropriate */ 189 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 190 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 191 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 192 STEST1_DBLEN); 193 if (sc->features & SF_CHIP_QUAD) { 194 /* wait for PPL to lock */ 195 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 196 SIOP_STEST4) & STEST4_LOCK) == 0) 197 delay(10); 198 } else { 199 /* data sheet says 20us - more won't hurt */ 200 delay(100); 201 } 202 /* halt scsi clock, select doubler/quad, restart clock */ 203 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 204 stest3 | STEST3_HSC); 205 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 206 STEST1_DBLEN | STEST1_DBLSEL); 207 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 208 } else { 209 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 210 } 211 if (sc->features & SF_CHIP_FIFO) 212 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 213 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 214 CTEST5_DFS); 215 if (sc->features & SF_CHIP_LED0) { 216 /* Set GPIO0 as output if software LED control is required */ 217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 218 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 219 } 220 if (sc->features & SF_BUS_ULTRA3) { 221 /* reset SCNTL4 */ 222 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 223 } 224 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 225 STEST4_MODE_MASK; 226 227 /* 228 * initialise the RAM. Without this we may get scsi gross errors on 229 * the 1010 230 */ 231 if (sc->features & SF_CHIP_RAM) 232 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 233 0, 0, sc->ram_size / 4); 234 sc->sc_reset(sc); 235 } 236 237 /* prepare tables before sending a cmd */ 238 void 239 siop_setuptables(siop_cmd) 240 struct siop_common_cmd *siop_cmd; 241 { 242 int i; 243 struct siop_common_softc *sc = siop_cmd->siop_sc; 244 struct scsi_xfer *xs = siop_cmd->xs; 245 int target = xs->sc_link->target; 246 int lun = xs->sc_link->lun; 247 int msgoffset = 1; 248 int *targ_flags = &sc->targets[target]->flags; 249 int quirks; 250 251 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); 252 memset(siop_cmd->siop_tables->msg_out, 0, 253 sizeof(siop_cmd->siop_tables->msg_out)); 254 /* request sense doesn't disconnect */ 255 if (siop_cmd->status == CMDST_SENSE) 256 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 257 else if ((sc->features & SF_CHIP_GEBUG) && 258 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 259 /* 260 * 1010 bug: it seems that the 1010 has problems with reselect 261 * when not in wide mode (generate false SCSI gross error). 262 * The FreeBSD sym driver has comments about it but their 263 * workaround (disable SCSI gross error reporting) doesn't 264 * work with my adapter. So disable disconnect when not 265 * wide. 266 */ 267 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 268 else 269 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 270 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); 271 if (sc->targets[target]->status == TARST_ASYNC) { 272 *targ_flags &= TARF_DT; /* Save TARF_DT 'cuz we don't set it here */ 273 quirks = xs->sc_link->quirks; 274 275 if ((quirks & SDEV_NOTAGS) == 0) 276 *targ_flags |= TARF_TAG; 277 if (((quirks & SDEV_NOWIDE) == 0) && 278 (sc->features & SF_BUS_WIDE)) 279 *targ_flags |= TARF_WIDE; 280 if ((quirks & SDEV_NOSYNC) == 0) 281 *targ_flags |= TARF_SYNC; 282 283 if ((sc->features & SF_CHIP_GEBUG) && 284 (*targ_flags & TARF_WIDE) == 0) 285 /* 286 * 1010 workaround: can't do disconnect if not wide, 287 * so can't do tag 288 */ 289 *targ_flags &= ~TARF_TAG; 290 291 /* Safe to call siop_add_dev() multiple times */ 292 siop_add_dev((struct siop_softc *)sc, target, lun); 293 294 if ((*targ_flags & TARF_DT) && 295 (sc->mode == STEST4_MODE_LVD)) { 296 sc->targets[target]->status = TARST_PPR_NEG; 297 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 298 sc->maxoff); 299 } else if (*targ_flags & TARF_WIDE) { 300 sc->targets[target]->status = TARST_WIDE_NEG; 301 siop_wdtr_msg(siop_cmd, msgoffset, 302 MSG_EXT_WDTR_BUS_16_BIT); 303 } else if (*targ_flags & TARF_SYNC) { 304 sc->targets[target]->status = TARST_SYNC_NEG; 305 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 306 (sc->maxoff > 31) ? 31 : sc->maxoff); 307 } else { 308 sc->targets[target]->status = TARST_OK; 309 siop_update_xfer_mode(sc, target); 310 } 311 } else if (sc->targets[target]->status == TARST_OK && 312 (*targ_flags & TARF_TAG) && 313 siop_cmd->status != CMDST_SENSE) { 314 siop_cmd->flags |= CMDFL_TAG; 315 } 316 siop_cmd->siop_tables->status = 317 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ 318 319 if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) || 320 siop_cmd->status == CMDST_SENSE) { 321 bzero(siop_cmd->siop_tables->data, 322 sizeof(siop_cmd->siop_tables->data)); 323 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 324 siop_cmd->siop_tables->data[i].count = 325 siop_htoc32(sc, 326 siop_cmd->dmamap_data->dm_segs[i].ds_len); 327 siop_cmd->siop_tables->data[i].addr = 328 siop_htoc32(sc, 329 siop_cmd->dmamap_data->dm_segs[i].ds_addr); 330 } 331 } 332 } 333 334 int 335 siop_wdtr_neg(siop_cmd) 336 struct siop_common_cmd *siop_cmd; 337 { 338 struct siop_common_softc *sc = siop_cmd->siop_sc; 339 struct siop_common_target *siop_target = siop_cmd->siop_target; 340 int target = siop_cmd->xs->sc_link->target; 341 struct siop_common_xfer *tables = siop_cmd->siop_tables; 342 343 if (siop_target->status == TARST_WIDE_NEG) { 344 /* we initiated wide negotiation */ 345 switch (tables->msg_in[3]) { 346 case MSG_EXT_WDTR_BUS_8_BIT: 347 siop_target->flags &= ~TARF_ISWIDE; 348 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 349 break; 350 case MSG_EXT_WDTR_BUS_16_BIT: 351 if (siop_target->flags & TARF_WIDE) { 352 siop_target->flags |= TARF_ISWIDE; 353 sc->targets[target]->id |= (SCNTL3_EWS << 24); 354 break; 355 } 356 /* FALLTHROUGH */ 357 default: 358 /* 359 * hum, we got more than what we can handle, shouldn't 360 * happen. Reject, and stay async 361 */ 362 siop_target->flags &= ~TARF_ISWIDE; 363 siop_target->status = TARST_OK; 364 siop_target->offset = siop_target->period = 0; 365 siop_update_xfer_mode(sc, target); 366 printf("%s: rejecting invalid wide negotiation from " 367 "target %d (%d)\n", sc->sc_dev.dv_xname, target, 368 tables->msg_in[3]); 369 tables->t_msgout.count = siop_htoc32(sc, 1); 370 tables->msg_out[0] = MSG_MESSAGE_REJECT; 371 return SIOP_NEG_MSGOUT; 372 } 373 tables->id = siop_htoc32(sc, sc->targets[target]->id); 374 bus_space_write_1(sc->sc_rt, sc->sc_rh, 375 SIOP_SCNTL3, 376 (sc->targets[target]->id >> 24) & 0xff); 377 /* we now need to do sync */ 378 if (siop_target->flags & TARF_SYNC) { 379 siop_target->status = TARST_SYNC_NEG; 380 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 381 (sc->maxoff > 31) ? 31 : sc->maxoff); 382 return SIOP_NEG_MSGOUT; 383 } else { 384 siop_target->status = TARST_OK; 385 siop_update_xfer_mode(sc, target); 386 return SIOP_NEG_ACK; 387 } 388 } else { 389 /* target initiated wide negotiation */ 390 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 391 && (siop_target->flags & TARF_WIDE)) { 392 siop_target->flags |= TARF_ISWIDE; 393 sc->targets[target]->id |= SCNTL3_EWS << 24; 394 } else { 395 siop_target->flags &= ~TARF_ISWIDE; 396 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 397 } 398 tables->id = siop_htoc32(sc, sc->targets[target]->id); 399 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 400 (sc->targets[target]->id >> 24) & 0xff); 401 /* 402 * we did reset wide parameters, so fall back to async, 403 * but don't schedule a sync neg, target should initiate it 404 */ 405 siop_target->status = TARST_OK; 406 siop_target->offset = siop_target->period = 0; 407 siop_update_xfer_mode(sc, target); 408 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 409 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 410 return SIOP_NEG_MSGOUT; 411 } 412 } 413 414 int 415 siop_ppr_neg(siop_cmd) 416 struct siop_common_cmd *siop_cmd; 417 { 418 struct siop_common_softc *sc = siop_cmd->siop_sc; 419 struct siop_common_target *siop_target = siop_cmd->siop_target; 420 int target = siop_cmd->xs->sc_link->target; 421 struct siop_common_xfer *tables = siop_cmd->siop_tables; 422 int sync, offset, options, scf = 0; 423 int i; 424 425 #ifdef DEBUG_NEG 426 printf("%s: answer on ppr negotiation:", sc->sc_dev.dv_xname); 427 for (i = 0; i < 8; i++) 428 printf(" 0x%x", tables->msg_in[i]); 429 printf("\n"); 430 #endif 431 432 if (siop_target->status == TARST_PPR_NEG) { 433 /* we initiated PPR negotiation */ 434 sync = tables->msg_in[3]; 435 offset = tables->msg_in[5]; 436 options = tables->msg_in[7]; 437 if (options != MSG_EXT_PPR_PROT_DT) { 438 /* should't happen */ 439 printf("%s: ppr negotiation for target %d: " 440 "no DT option\n", sc->sc_dev.dv_xname, target); 441 siop_target->status = TARST_ASYNC; 442 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 443 siop_target->offset = 0; 444 siop_target->period = 0; 445 goto reject; 446 } 447 448 if (offset > sc->maxoff || sync < sc->dt_minsync || 449 sync > sc->dt_maxsync) { 450 printf("%s: ppr negotiation for target %d: " 451 "offset (%d) or sync (%d) out of range\n", 452 sc->sc_dev.dv_xname, target, offset, sync); 453 /* should not happen */ 454 siop_target->status = TARST_ASYNC; 455 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 456 siop_target->offset = 0; 457 siop_target->period = 0; 458 goto reject; 459 } else { 460 for (i = 0; i < 461 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); 462 i++) { 463 if (sc->clock_period != dt_scf_period[i].clock) 464 continue; 465 if (dt_scf_period[i].period == sync) { 466 /* ok, found it. we now are sync. */ 467 siop_target->offset = offset; 468 siop_target->period = sync; 469 scf = dt_scf_period[i].scf; 470 siop_target->flags |= TARF_ISDT; 471 } 472 } 473 if ((siop_target->flags & TARF_ISDT) == 0) { 474 printf("%s: ppr negotiation for target %d: " 475 "sync (%d) incompatible with adapter\n", 476 sc->sc_dev.dv_xname, target, sync); 477 /* 478 * we didn't find it in our table, do async 479 * send reject msg, start SDTR/WDTR neg 480 */ 481 siop_target->status = TARST_ASYNC; 482 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 483 siop_target->offset = 0; 484 siop_target->period = 0; 485 goto reject; 486 } 487 } 488 if (tables->msg_in[6] != 1) { 489 printf("%s: ppr negotiation for target %d: " 490 "transfer width (%d) incompatible with dt\n", 491 sc->sc_dev.dv_xname, target, tables->msg_in[6]); 492 /* DT mode can only be done with wide transfers */ 493 siop_target->status = TARST_ASYNC; 494 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 495 siop_target->offset = 0; 496 siop_target->period = 0; 497 goto reject; 498 } 499 siop_target->flags |= TARF_ISWIDE; 500 sc->targets[target]->id |= (SCNTL3_EWS << 24); 501 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 502 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 503 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 504 sc->targets[target]->id |= 505 (siop_target->offset & SXFER_MO_MASK) << 8; 506 sc->targets[target]->id &= ~0xff; 507 sc->targets[target]->id |= SCNTL4_U3EN; 508 siop_target->status = TARST_OK; 509 siop_update_xfer_mode(sc, target); 510 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 511 (sc->targets[target]->id >> 24) & 0xff); 512 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 513 (sc->targets[target]->id >> 8) & 0xff); 514 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 515 sc->targets[target]->id & 0xff); 516 return SIOP_NEG_ACK; 517 } else { 518 /* target initiated PPR negotiation, shouldn't happen */ 519 printf("%s: rejecting invalid PPR negotiation from " 520 "target %d\n", sc->sc_dev.dv_xname, target); 521 reject: 522 tables->t_msgout.count = siop_htoc32(sc, 1); 523 tables->msg_out[0] = MSG_MESSAGE_REJECT; 524 return SIOP_NEG_MSGOUT; 525 } 526 } 527 528 int 529 siop_sdtr_neg(siop_cmd) 530 struct siop_common_cmd *siop_cmd; 531 { 532 struct siop_common_softc *sc = siop_cmd->siop_sc; 533 struct siop_common_target *siop_target = siop_cmd->siop_target; 534 int target = siop_cmd->xs->sc_link->target; 535 int sync, maxoffset, offset, i; 536 int send_msgout = 0; 537 struct siop_common_xfer *tables = siop_cmd->siop_tables; 538 539 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 540 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 541 542 sync = tables->msg_in[3]; 543 offset = tables->msg_in[4]; 544 545 if (siop_target->status == TARST_SYNC_NEG) { 546 /* we initiated sync negotiation */ 547 siop_target->status = TARST_OK; 548 #ifdef DEBUG 549 printf("sdtr: sync %d offset %d\n", sync, offset); 550 #endif 551 if (offset > maxoffset || sync < sc->st_minsync || 552 sync > sc->st_maxsync) 553 goto reject; 554 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 555 i++) { 556 if (sc->clock_period != scf_period[i].clock) 557 continue; 558 if (scf_period[i].period == sync) { 559 /* ok, found it. we now are sync. */ 560 siop_target->offset = offset; 561 siop_target->period = sync; 562 sc->targets[target]->id &= 563 ~(SCNTL3_SCF_MASK << 24); 564 sc->targets[target]->id |= scf_period[i].scf 565 << (24 + SCNTL3_SCF_SHIFT); 566 if (sync < 25 && /* Ultra */ 567 (sc->features & SF_BUS_ULTRA3) == 0) 568 sc->targets[target]->id |= 569 SCNTL3_ULTRA << 24; 570 else 571 sc->targets[target]->id &= 572 ~(SCNTL3_ULTRA << 24); 573 sc->targets[target]->id &= 574 ~(SXFER_MO_MASK << 8); 575 sc->targets[target]->id |= 576 (offset & SXFER_MO_MASK) << 8; 577 sc->targets[target]->id &= ~0xff; /* scntl4 */ 578 goto end; 579 } 580 } 581 /* 582 * we didn't find it in our table, do async and send reject 583 * msg 584 */ 585 reject: 586 send_msgout = 1; 587 tables->t_msgout.count = siop_htoc32(sc, 1); 588 tables->msg_out[0] = MSG_MESSAGE_REJECT; 589 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 590 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 591 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 592 sc->targets[target]->id &= ~0xff; /* scntl4 */ 593 siop_target->offset = siop_target->period = 0; 594 } else { /* target initiated sync neg */ 595 #ifdef DEBUG 596 printf("sdtr (target): sync %d offset %d\n", sync, offset); 597 #endif 598 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 599 goto async; 600 } 601 if (offset > maxoffset) 602 offset = maxoffset; 603 if (sync < sc->st_minsync) 604 sync = sc->st_minsync; 605 /* look for sync period */ 606 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 607 i++) { 608 if (sc->clock_period != scf_period[i].clock) 609 continue; 610 if (scf_period[i].period == sync) { 611 /* ok, found it. we now are sync. */ 612 siop_target->offset = offset; 613 siop_target->period = sync; 614 sc->targets[target]->id &= 615 ~(SCNTL3_SCF_MASK << 24); 616 sc->targets[target]->id |= scf_period[i].scf 617 << (24 + SCNTL3_SCF_SHIFT); 618 if (sync < 25 && /* Ultra */ 619 (sc->features & SF_BUS_ULTRA3) == 0) 620 sc->targets[target]->id |= 621 SCNTL3_ULTRA << 24; 622 else 623 sc->targets[target]->id &= 624 ~(SCNTL3_ULTRA << 24); 625 sc->targets[target]->id &= 626 ~(SXFER_MO_MASK << 8); 627 sc->targets[target]->id |= 628 (offset & SXFER_MO_MASK) << 8; 629 sc->targets[target]->id &= ~0xff; /* scntl4 */ 630 siop_sdtr_msg(siop_cmd, 0, sync, offset); 631 send_msgout = 1; 632 goto end; 633 } 634 } 635 async: 636 siop_target->offset = siop_target->period = 0; 637 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 638 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 639 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 640 sc->targets[target]->id &= ~0xff; /* scntl4 */ 641 siop_sdtr_msg(siop_cmd, 0, 0, 0); 642 send_msgout = 1; 643 } 644 end: 645 if (siop_target->status == TARST_OK) 646 siop_update_xfer_mode(sc, target); 647 #ifdef DEBUG 648 printf("id now 0x%x\n", sc->targets[target]->id); 649 #endif 650 tables->id = siop_htoc32(sc, sc->targets[target]->id); 651 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 652 (sc->targets[target]->id >> 24) & 0xff); 653 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 654 (sc->targets[target]->id >> 8) & 0xff); 655 if (send_msgout) { 656 return SIOP_NEG_MSGOUT; 657 } else { 658 return SIOP_NEG_ACK; 659 } 660 } 661 662 void 663 siop_sdtr_msg(siop_cmd, offset, ssync, soff) 664 struct siop_common_cmd *siop_cmd; 665 int offset; 666 int ssync, soff; 667 { 668 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 669 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 670 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 671 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 672 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 673 siop_cmd->siop_tables->t_msgout.count = 674 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); 675 } 676 677 void 678 siop_wdtr_msg(siop_cmd, offset, wide) 679 struct siop_common_cmd *siop_cmd; 680 int offset; 681 int wide; 682 { 683 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 684 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 685 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 686 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 687 siop_cmd->siop_tables->t_msgout.count = 688 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); 689 } 690 691 void 692 siop_ppr_msg(siop_cmd, offset, ssync, soff) 693 struct siop_common_cmd *siop_cmd; 694 int offset; 695 int ssync, soff; 696 { 697 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 698 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 699 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 700 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 701 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 702 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 703 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 704 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_PROT_DT; 705 siop_cmd->siop_tables->t_msgout.count = 706 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); 707 } 708 709 void 710 siop_minphys(bp) 711 struct buf *bp; 712 { 713 if (bp->b_bcount > SIOP_MAXFER) 714 bp->b_bcount = SIOP_MAXFER; 715 716 minphys(bp); 717 } 718 719 void 720 siop_ma(siop_cmd) 721 struct siop_common_cmd *siop_cmd; 722 { 723 int offset, dbc, sstat; 724 struct siop_common_softc *sc = siop_cmd->siop_sc; 725 scr_table_t *table; /* table with partial xfer */ 726 727 /* 728 * compute how much of the current table didn't get handled when 729 * a phase mismatch occurs 730 */ 731 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 732 == 0) 733 return; /* no valid data transfer */ 734 735 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 736 if (offset >= SIOP_NSG) { 737 printf("%s: bad offset in siop_sdp (%d)\n", 738 sc->sc_dev.dv_xname, offset); 739 return; 740 } 741 table = &siop_cmd->siop_tables->data[offset]; 742 #ifdef DEBUG_DR 743 printf("siop_ma: offset %d count=%d addr=0x%x ", offset, 744 table->count, table->addr); 745 #endif 746 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 747 if (siop_cmd->xs->flags & SCSI_DATA_OUT) { 748 if (sc->features & SF_CHIP_DFBC) { 749 dbc += 750 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 751 } else { 752 /* need to account stale data in FIFO */ 753 int dfifo = 754 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 755 if (sc->features & SF_CHIP_FIFO) { 756 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 757 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 758 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 759 } else { 760 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 761 } 762 } 763 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 764 if (sstat & SSTAT0_OLF) 765 dbc++; 766 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 767 dbc++; 768 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 769 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 770 SIOP_SSTAT2); 771 if (sstat & SSTAT2_OLF1) 772 dbc++; 773 if ((sstat & SSTAT2_ORF1) && 774 (sc->features & SF_CHIP_DFBC) == 0) 775 dbc++; 776 } 777 /* clear the FIFO */ 778 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 779 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 780 CTEST3_CLF); 781 } 782 siop_cmd->flags |= CMDFL_RESID; 783 siop_cmd->resid = dbc; 784 } 785 786 void 787 siop_sdp(siop_cmd, offset) 788 struct siop_common_cmd *siop_cmd; 789 int offset; 790 { 791 struct siop_common_softc *sc = siop_cmd->siop_sc; 792 scr_table_t *table; 793 794 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 795 == 0) 796 return; /* no data pointers to save */ 797 798 /* 799 * offset == SIOP_NSG may be a valid condition if we get a Save data 800 * pointer when the xfer is done. Just ignore the Save data pointer 801 * in this case 802 */ 803 if (offset == SIOP_NSG) 804 return; 805 #ifdef DIAGNOSTIC 806 if (offset > SIOP_NSG) { 807 sc_print_addr(siop_cmd->xs->sc_link); 808 printf("offset %d > %d\n", offset, SIOP_NSG); 809 panic("siop_sdp: offset"); 810 } 811 #endif 812 /* 813 * Save data pointer. We do this by adjusting the tables to point 814 * at the begginning of the data not yet transfered. 815 * offset points to the first table with untransfered data. 816 */ 817 818 /* 819 * before doing that we decrease resid from the amount of data which 820 * has been transfered. 821 */ 822 siop_update_resid(siop_cmd, offset); 823 824 /* 825 * First let see if we have a resid from a phase mismatch. If so, 826 * we have to adjst the table at offset to remove transfered data. 827 */ 828 if (siop_cmd->flags & CMDFL_RESID) { 829 siop_cmd->flags &= ~CMDFL_RESID; 830 table = &siop_cmd->siop_tables->data[offset]; 831 /* "cut" already transfered data from this table */ 832 table->addr = 833 siop_htoc32(sc, siop_ctoh32(sc, table->addr) + 834 siop_ctoh32(sc, table->count) - siop_cmd->resid); 835 table->count = siop_htoc32(sc, siop_cmd->resid); 836 } 837 838 /* 839 * now we can remove entries which have been transfered. 840 * We just move the entries with data left at the beggining of the 841 * tables 842 */ 843 bcopy(&siop_cmd->siop_tables->data[offset], 844 &siop_cmd->siop_tables->data[0], 845 (SIOP_NSG - offset) * sizeof(scr_table_t)); 846 } 847 848 void 849 siop_update_resid(siop_cmd, offset) 850 struct siop_common_cmd *siop_cmd; 851 int offset; 852 { 853 struct siop_common_softc *sc = siop_cmd->siop_sc; 854 scr_table_t *table; 855 int i; 856 857 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 858 == 0) 859 return; /* no data to transfer */ 860 861 /* 862 * update resid. First account for the table entries which have 863 * been fully completed. 864 */ 865 for (i = 0; i < offset; i++) 866 siop_cmd->xs->resid -= 867 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); 868 /* 869 * if CMDFL_RESID is set, the last table (pointed by offset) is a 870 * partial transfers. If not, offset points to the entry folloing 871 * the last full transfer. 872 */ 873 if (siop_cmd->flags & CMDFL_RESID) { 874 table = &siop_cmd->siop_tables->data[offset]; 875 siop_cmd->xs->resid -= 876 siop_ctoh32(sc, table->count) - siop_cmd->resid; 877 } 878 } 879 880 int 881 siop_iwr(siop_cmd) 882 struct siop_common_cmd *siop_cmd; 883 { 884 int offset; 885 scr_table_t *table; /* table with IWR */ 886 struct siop_common_softc *sc = siop_cmd->siop_sc; 887 /* handle ignore wide residue messages */ 888 889 /* if target isn't wide, reject */ 890 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { 891 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); 892 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; 893 return SIOP_NEG_MSGOUT; 894 } 895 /* get index of current command in table */ 896 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 897 /* 898 * if the current table did complete, we're now pointing at the 899 * next one. Go back one if we didn't see a phase mismatch. 900 */ 901 if ((siop_cmd->flags & CMDFL_RESID) == 0) 902 offset--; 903 table = &siop_cmd->siop_tables->data[offset]; 904 905 if ((siop_cmd->flags & CMDFL_RESID) == 0) { 906 if (siop_ctoh32(sc, table->count) & 1) { 907 /* we really got the number of bytes we expected */ 908 return SIOP_NEG_ACK; 909 } else { 910 /* 911 * now we really had a short xfer, by one byte. 912 * handle it just as if we had a phase mistmatch 913 * (there is a resid of one for this table). 914 * Update scratcha1 to reflect the fact that 915 * this xfer isn't complete. 916 */ 917 siop_cmd->flags |= CMDFL_RESID; 918 siop_cmd->resid = 1; 919 bus_space_write_1(sc->sc_rt, sc->sc_rh, 920 SIOP_SCRATCHA + 1, offset); 921 return SIOP_NEG_ACK; 922 } 923 } else { 924 /* 925 * we already have a short xfer for this table; it's 926 * just one byte less than we though it was 927 */ 928 siop_cmd->resid--; 929 return SIOP_NEG_ACK; 930 } 931 } 932 933 void 934 siop_clearfifo(sc) 935 struct siop_common_softc *sc; 936 { 937 int timeout = 0; 938 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 939 940 #ifdef DEBUG_INTR 941 printf("DMA fifo not empty !\n"); 942 #endif 943 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 944 ctest3 | CTEST3_CLF); 945 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 946 CTEST3_CLF) != 0) { 947 delay(1); 948 if (++timeout > 1000) { 949 printf("clear fifo failed\n"); 950 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 951 bus_space_read_1(sc->sc_rt, sc->sc_rh, 952 SIOP_CTEST3) & ~CTEST3_CLF); 953 return; 954 } 955 } 956 } 957 958 int 959 siop_modechange(sc) 960 struct siop_common_softc *sc; 961 { 962 int retry; 963 int sist0, sist1, stest2; 964 for (retry = 0; retry < 5; retry++) { 965 /* 966 * datasheet says to wait 100ms and re-read SIST1, 967 * to check that DIFFSENSE is stable. 968 * We may delay() 5 times for 100ms at interrupt time; 969 * hopefully this will not happen often. 970 */ 971 delay(100000); 972 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 973 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 974 if (sist1 & SIEN1_SBMC) 975 continue; /* we got an irq again */ 976 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 977 STEST4_MODE_MASK; 978 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 979 switch(sc->mode) { 980 case STEST4_MODE_DIF: 981 printf("%s: switching to differential mode\n", 982 sc->sc_dev.dv_xname); 983 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 984 stest2 | STEST2_DIF); 985 break; 986 case STEST4_MODE_SE: 987 printf("%s: switching to single-ended mode\n", 988 sc->sc_dev.dv_xname); 989 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 990 stest2 & ~STEST2_DIF); 991 break; 992 case STEST4_MODE_LVD: 993 printf("%s: switching to LVD mode\n", 994 sc->sc_dev.dv_xname); 995 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 996 stest2 & ~STEST2_DIF); 997 break; 998 default: 999 printf("%s: invalid SCSI mode 0x%x\n", 1000 sc->sc_dev.dv_xname, sc->mode); 1001 return 0; 1002 } 1003 return 1; 1004 } 1005 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 1006 sc->sc_dev.dv_xname); 1007 return 0; 1008 } 1009 1010 void 1011 siop_resetbus(sc) 1012 struct siop_common_softc *sc; 1013 { 1014 int scntl1; 1015 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 1016 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 1017 scntl1 | SCNTL1_RST); 1018 /* minimum 25 us, more time won't hurt */ 1019 delay(100); 1020 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 1021 } 1022 1023 void 1024 siop_update_xfer_mode(sc, target) 1025 struct siop_common_softc *sc; 1026 int target; 1027 { 1028 struct siop_common_target *siop_target; 1029 1030 siop_target = sc->targets[target]; 1031 1032 printf("%s: target %d now using %s%s%d bit ", 1033 sc->sc_dev.dv_xname, target, 1034 (siop_target->flags & TARF_TAG) ? "tagged " : "", 1035 (siop_target->flags & TARF_ISDT) ? "DT " : "", 1036 (siop_target->flags & TARF_ISWIDE) ? 16 : 8); 1037 1038 if (siop_target->offset == 0) 1039 printf("async "); 1040 else { 1041 switch (siop_target->period) { 1042 case 9: /* 12.5ns cycle */ 1043 printf("80.0"); 1044 break; 1045 case 10: /* 25 ns cycle */ 1046 printf("40.0"); 1047 break; 1048 case 12: /* 48 ns cycle */ 1049 printf("20.0"); 1050 break; 1051 case 18: /* 72 ns cycle */ 1052 printf("13.3"); 1053 break; 1054 case 25: /* 100 ns cycle */ 1055 printf("10.0"); 1056 break; 1057 case 37: /* 118 ns cycle */ 1058 printf("6.67"); 1059 break; 1060 case 50: /* 200 ns cycle */ 1061 printf("5.0"); 1062 break; 1063 case 75: /* 300 ns cycle */ 1064 printf("3.33"); 1065 break; 1066 default: 1067 printf("??"); 1068 break; 1069 } 1070 printf(" MHz %d REQ/ACK offset ", siop_target->offset); 1071 } 1072 1073 printf("xfers\n"); 1074 1075 if ((sc->features & SF_CHIP_GEBUG) && 1076 (siop_target->flags & TARF_ISWIDE) == 0) 1077 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 1078 siop_target->flags &= ~TARF_TAG; 1079 } 1080