1 /* $NetBSD: siop_common.c,v 1.45 2008/04/08 12:07:27 cegger Exp $ */ 2 3 /* 4 * Copyright (c) 2000, 2002 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Manuel Bouyer. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.45 2008/04/08 12:07:27 cegger Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/buf.h> 43 #include <sys/kernel.h> 44 #include <sys/scsiio.h> 45 46 #include <uvm/uvm_extern.h> 47 48 #include <machine/endian.h> 49 #include <sys/bus.h> 50 51 #include <dev/scsipi/scsi_all.h> 52 #include <dev/scsipi/scsi_message.h> 53 #include <dev/scsipi/scsipi_all.h> 54 55 #include <dev/scsipi/scsiconf.h> 56 57 #include <dev/ic/siopreg.h> 58 #include <dev/ic/siopvar_common.h> 59 60 #include "opt_siop.h" 61 62 #undef DEBUG 63 #undef DEBUG_DR 64 #undef DEBUG_NEG 65 66 int 67 siop_common_attach(sc) 68 struct siop_common_softc *sc; 69 { 70 int error, i; 71 bus_dma_segment_t seg; 72 int rseg; 73 74 /* 75 * Allocate DMA-safe memory for the script and map it. 76 */ 77 if ((sc->features & SF_CHIP_RAM) == 0) { 78 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 79 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 80 if (error) { 81 aprint_error_dev(&sc->sc_dev, 82 "unable to allocate script DMA memory, " 83 "error = %d\n", error); 84 return error; 85 } 86 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 87 (void **)&sc->sc_script, 88 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 89 if (error) { 90 aprint_error_dev(&sc->sc_dev, "unable to map script DMA memory, " 91 "error = %d\n", error); 92 return error; 93 } 94 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 95 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 96 if (error) { 97 aprint_error_dev(&sc->sc_dev, "unable to create script DMA map, " 98 "error = %d\n", error); 99 return error; 100 } 101 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 102 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 103 if (error) { 104 aprint_error_dev(&sc->sc_dev, "unable to load script DMA map, " 105 "error = %d\n", error); 106 return error; 107 } 108 sc->sc_scriptaddr = 109 sc->sc_scriptdma->dm_segs[0].ds_addr; 110 sc->ram_size = PAGE_SIZE; 111 } 112 113 sc->sc_adapt.adapt_dev = &sc->sc_dev; 114 sc->sc_adapt.adapt_nchannels = 1; 115 sc->sc_adapt.adapt_openings = 0; 116 sc->sc_adapt.adapt_ioctl = siop_ioctl; 117 sc->sc_adapt.adapt_minphys = minphys; 118 119 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan)); 120 sc->sc_chan.chan_adapter = &sc->sc_adapt; 121 sc->sc_chan.chan_bustype = &scsi_bustype; 122 sc->sc_chan.chan_channel = 0; 123 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW; 124 sc->sc_chan.chan_ntargets = 125 (sc->features & SF_BUS_WIDE) ? 16 : 8; 126 sc->sc_chan.chan_nluns = 8; 127 sc->sc_chan.chan_id = 128 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 129 if (sc->sc_chan.chan_id == 0 || 130 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets) 131 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET; 132 133 for (i = 0; i < 16; i++) 134 sc->targets[i] = NULL; 135 136 /* find min/max sync period for this chip */ 137 sc->st_maxsync = 0; 138 sc->dt_maxsync = 0; 139 sc->st_minsync = 255; 140 sc->dt_minsync = 255; 141 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) { 142 if (sc->clock_period != scf_period[i].clock) 143 continue; 144 if (sc->st_maxsync < scf_period[i].period) 145 sc->st_maxsync = scf_period[i].period; 146 if (sc->st_minsync > scf_period[i].period) 147 sc->st_minsync = scf_period[i].period; 148 } 149 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 150 panic("siop: can't find my sync parameters"); 151 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) { 152 if (sc->clock_period != dt_scf_period[i].clock) 153 continue; 154 if (sc->dt_maxsync < dt_scf_period[i].period) 155 sc->dt_maxsync = dt_scf_period[i].period; 156 if (sc->dt_minsync > dt_scf_period[i].period) 157 sc->dt_minsync = dt_scf_period[i].period; 158 } 159 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 160 panic("siop: can't find my sync parameters"); 161 return 0; 162 } 163 164 void 165 siop_common_reset(sc) 166 struct siop_common_softc *sc; 167 { 168 u_int32_t stest3; 169 170 /* reset the chip */ 171 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 172 delay(1000); 173 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 174 175 /* init registers */ 176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 177 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 183 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 185 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 189 (0xb << STIME0_SEL_SHIFT)); 190 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 191 sc->sc_chan.chan_id | SCID_RRE); 192 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 193 1 << sc->sc_chan.chan_id); 194 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 195 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 196 if (sc->features & SF_CHIP_AAIP) 197 bus_space_write_1(sc->sc_rt, sc->sc_rh, 198 SIOP_AIPCNTL1, AIPCNTL1_DIS); 199 200 /* enable clock doubler or quadruler if appropriate */ 201 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 202 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 203 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 204 STEST1_DBLEN); 205 if (sc->features & SF_CHIP_QUAD) { 206 /* wait for PPL to lock */ 207 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 208 SIOP_STEST4) & STEST4_LOCK) == 0) 209 delay(10); 210 } else { 211 /* data sheet says 20us - more won't hurt */ 212 delay(100); 213 } 214 /* halt scsi clock, select doubler/quad, restart clock */ 215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 216 stest3 | STEST3_HSC); 217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 218 STEST1_DBLEN | STEST1_DBLSEL); 219 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 220 } else { 221 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 222 } 223 if (sc->features & SF_CHIP_FIFO) 224 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 225 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 226 CTEST5_DFS); 227 if (sc->features & SF_CHIP_LED0) { 228 /* Set GPIO0 as output if software LED control is required */ 229 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 230 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 231 } 232 if (sc->features & SF_BUS_ULTRA3) { 233 /* reset SCNTL4 */ 234 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 235 } 236 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 237 STEST4_MODE_MASK; 238 239 /* 240 * initialise the RAM. Without this we may get scsi gross errors on 241 * the 1010 242 */ 243 if (sc->features & SF_CHIP_RAM) 244 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 245 0, 0, sc->ram_size / 4); 246 sc->sc_reset(sc); 247 } 248 249 /* prepare tables before sending a cmd */ 250 void 251 siop_setuptables(siop_cmd) 252 struct siop_common_cmd *siop_cmd; 253 { 254 int i; 255 struct siop_common_softc *sc = siop_cmd->siop_sc; 256 struct scsipi_xfer *xs = siop_cmd->xs; 257 int target = xs->xs_periph->periph_target; 258 int lun = xs->xs_periph->periph_lun; 259 int msgoffset = 1; 260 261 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); 262 memset(siop_cmd->siop_tables->msg_out, 0, 263 sizeof(siop_cmd->siop_tables->msg_out)); 264 /* request sense doesn't disconnect */ 265 if (xs->xs_control & XS_CTL_REQSENSE) 266 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 267 else if ((sc->features & SF_CHIP_GEBUG) && 268 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 269 /* 270 * 1010 bug: it seems that the 1010 has problems with reselect 271 * when not in wide mode (generate false SCSI gross error). 272 * The FreeBSD sym driver has comments about it but their 273 * workaround (disable SCSI gross error reporting) doesn't 274 * work with my adapter. So disable disconnect when not 275 * wide. 276 */ 277 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 278 else 279 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 280 if (xs->xs_tag_type != 0) { 281 if ((sc->targets[target]->flags & TARF_TAG) == 0) { 282 scsipi_printaddr(xs->xs_periph); 283 printf(": tagged command type %d id %d\n", 284 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id); 285 panic("tagged command for non-tagging device"); 286 } 287 siop_cmd->flags |= CMDFL_TAG; 288 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type; 289 /* 290 * use siop_cmd->tag not xs->xs_tag_id, caller may want a 291 * different one 292 */ 293 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag; 294 msgoffset = 3; 295 } 296 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); 297 if (sc->targets[target]->status == TARST_ASYNC) { 298 if ((sc->targets[target]->flags & TARF_DT) && 299 (sc->mode == STEST4_MODE_LVD)) { 300 sc->targets[target]->status = TARST_PPR_NEG; 301 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 302 sc->maxoff); 303 } else if (sc->targets[target]->flags & TARF_WIDE) { 304 sc->targets[target]->status = TARST_WIDE_NEG; 305 siop_wdtr_msg(siop_cmd, msgoffset, 306 MSG_EXT_WDTR_BUS_16_BIT); 307 } else if (sc->targets[target]->flags & TARF_SYNC) { 308 sc->targets[target]->status = TARST_SYNC_NEG; 309 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 310 (sc->maxoff > 31) ? 31 : sc->maxoff); 311 } else { 312 sc->targets[target]->status = TARST_OK; 313 siop_update_xfer_mode(sc, target); 314 } 315 } 316 siop_cmd->siop_tables->status = 317 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ 318 319 siop_cmd->siop_tables->cmd.count = 320 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len); 321 siop_cmd->siop_tables->cmd.addr = 322 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr); 323 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 324 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 325 siop_cmd->siop_tables->data[i].count = 326 siop_htoc32(sc, 327 siop_cmd->dmamap_data->dm_segs[i].ds_len); 328 siop_cmd->siop_tables->data[i].addr = 329 siop_htoc32(sc, 330 siop_cmd->dmamap_data->dm_segs[i].ds_addr); 331 } 332 } 333 } 334 335 int 336 siop_wdtr_neg(siop_cmd) 337 struct siop_common_cmd *siop_cmd; 338 { 339 struct siop_common_softc *sc = siop_cmd->siop_sc; 340 struct siop_common_target *siop_target = siop_cmd->siop_target; 341 int target = siop_cmd->xs->xs_periph->periph_target; 342 struct siop_common_xfer *tables = siop_cmd->siop_tables; 343 344 if (siop_target->status == TARST_WIDE_NEG) { 345 /* we initiated wide negotiation */ 346 switch (tables->msg_in[3]) { 347 case MSG_EXT_WDTR_BUS_8_BIT: 348 siop_target->flags &= ~TARF_ISWIDE; 349 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 350 break; 351 case MSG_EXT_WDTR_BUS_16_BIT: 352 if (siop_target->flags & TARF_WIDE) { 353 siop_target->flags |= TARF_ISWIDE; 354 sc->targets[target]->id |= (SCNTL3_EWS << 24); 355 break; 356 } 357 /* FALLTHROUGH */ 358 default: 359 /* 360 * hum, we got more than what we can handle, shouldn't 361 * happen. Reject, and stay async 362 */ 363 siop_target->flags &= ~TARF_ISWIDE; 364 siop_target->status = TARST_OK; 365 siop_target->offset = siop_target->period = 0; 366 siop_update_xfer_mode(sc, target); 367 printf("%s: rejecting invalid wide negotiation from " 368 "target %d (%d)\n", device_xname(&sc->sc_dev), target, 369 tables->msg_in[3]); 370 tables->t_msgout.count = siop_htoc32(sc, 1); 371 tables->msg_out[0] = MSG_MESSAGE_REJECT; 372 return SIOP_NEG_MSGOUT; 373 } 374 tables->id = siop_htoc32(sc, sc->targets[target]->id); 375 bus_space_write_1(sc->sc_rt, sc->sc_rh, 376 SIOP_SCNTL3, 377 (sc->targets[target]->id >> 24) & 0xff); 378 /* we now need to do sync */ 379 if (siop_target->flags & TARF_SYNC) { 380 siop_target->status = TARST_SYNC_NEG; 381 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 382 (sc->maxoff > 31) ? 31 : sc->maxoff); 383 return SIOP_NEG_MSGOUT; 384 } else { 385 siop_target->status = TARST_OK; 386 siop_update_xfer_mode(sc, target); 387 return SIOP_NEG_ACK; 388 } 389 } else { 390 /* target initiated wide negotiation */ 391 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 392 && (siop_target->flags & TARF_WIDE)) { 393 siop_target->flags |= TARF_ISWIDE; 394 sc->targets[target]->id |= SCNTL3_EWS << 24; 395 } else { 396 siop_target->flags &= ~TARF_ISWIDE; 397 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 398 } 399 tables->id = siop_htoc32(sc, sc->targets[target]->id); 400 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 401 (sc->targets[target]->id >> 24) & 0xff); 402 /* 403 * we did reset wide parameters, so fall back to async, 404 * but don't schedule a sync neg, target should initiate it 405 */ 406 siop_target->status = TARST_OK; 407 siop_target->offset = siop_target->period = 0; 408 siop_update_xfer_mode(sc, target); 409 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 410 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 411 return SIOP_NEG_MSGOUT; 412 } 413 } 414 415 int 416 siop_ppr_neg(siop_cmd) 417 struct siop_common_cmd *siop_cmd; 418 { 419 struct siop_common_softc *sc = siop_cmd->siop_sc; 420 struct siop_common_target *siop_target = siop_cmd->siop_target; 421 int target = siop_cmd->xs->xs_periph->periph_target; 422 struct siop_common_xfer *tables = siop_cmd->siop_tables; 423 int sync, offset, options, scf = 0; 424 int i; 425 426 #ifdef DEBUG_NEG 427 printf("%s: answer on ppr negotiation:", device_xname(&sc->sc_dev)); 428 for (i = 0; i < 8; i++) 429 printf(" 0x%x", tables->msg_in[i]); 430 printf("\n"); 431 #endif 432 433 if (siop_target->status == TARST_PPR_NEG) { 434 /* we initiated PPR negotiation */ 435 sync = tables->msg_in[3]; 436 offset = tables->msg_in[5]; 437 options = tables->msg_in[7]; 438 if (options != MSG_EXT_PPR_DT) { 439 /* should't happen */ 440 printf("%s: ppr negotiation for target %d: " 441 "no DT option\n", device_xname(&sc->sc_dev), target); 442 siop_target->status = TARST_ASYNC; 443 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 444 siop_target->offset = 0; 445 siop_target->period = 0; 446 goto reject; 447 } 448 449 if (offset > sc->maxoff || sync < sc->dt_minsync || 450 sync > sc->dt_maxsync) { 451 printf("%s: ppr negotiation for target %d: " 452 "offset (%d) or sync (%d) out of range\n", 453 device_xname(&sc->sc_dev), target, offset, sync); 454 /* should not happen */ 455 siop_target->offset = 0; 456 siop_target->period = 0; 457 goto reject; 458 } else { 459 for (i = 0; i < 460 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); 461 i++) { 462 if (sc->clock_period != dt_scf_period[i].clock) 463 continue; 464 if (dt_scf_period[i].period == sync) { 465 /* ok, found it. we now are sync. */ 466 siop_target->offset = offset; 467 siop_target->period = sync; 468 scf = dt_scf_period[i].scf; 469 siop_target->flags |= TARF_ISDT; 470 } 471 } 472 if ((siop_target->flags & TARF_ISDT) == 0) { 473 printf("%s: ppr negotiation for target %d: " 474 "sync (%d) incompatible with adapter\n", 475 device_xname(&sc->sc_dev), target, sync); 476 /* 477 * we didn't find it in our table, do async 478 * send reject msg, start SDTR/WDTR neg 479 */ 480 siop_target->status = TARST_ASYNC; 481 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 482 siop_target->offset = 0; 483 siop_target->period = 0; 484 goto reject; 485 } 486 } 487 if (tables->msg_in[6] != 1) { 488 printf("%s: ppr negotiation for target %d: " 489 "transfer width (%d) incompatible with dt\n", 490 device_xname(&sc->sc_dev), target, tables->msg_in[6]); 491 /* DT mode can only be done with wide transfers */ 492 siop_target->status = TARST_ASYNC; 493 goto reject; 494 } 495 siop_target->flags |= TARF_ISWIDE; 496 sc->targets[target]->id |= (SCNTL3_EWS << 24); 497 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 498 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 499 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 500 sc->targets[target]->id |= 501 (siop_target->offset & SXFER_MO_MASK) << 8; 502 sc->targets[target]->id &= ~0xff; 503 sc->targets[target]->id |= SCNTL4_U3EN; 504 siop_target->status = TARST_OK; 505 siop_update_xfer_mode(sc, target); 506 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 507 (sc->targets[target]->id >> 24) & 0xff); 508 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 509 (sc->targets[target]->id >> 8) & 0xff); 510 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 511 sc->targets[target]->id & 0xff); 512 return SIOP_NEG_ACK; 513 } else { 514 /* target initiated PPR negotiation, shouldn't happen */ 515 printf("%s: rejecting invalid PPR negotiation from " 516 "target %d\n", device_xname(&sc->sc_dev), target); 517 reject: 518 tables->t_msgout.count = siop_htoc32(sc, 1); 519 tables->msg_out[0] = MSG_MESSAGE_REJECT; 520 return SIOP_NEG_MSGOUT; 521 } 522 } 523 524 int 525 siop_sdtr_neg(siop_cmd) 526 struct siop_common_cmd *siop_cmd; 527 { 528 struct siop_common_softc *sc = siop_cmd->siop_sc; 529 struct siop_common_target *siop_target = siop_cmd->siop_target; 530 int target = siop_cmd->xs->xs_periph->periph_target; 531 int sync, maxoffset, offset, i; 532 int send_msgout = 0; 533 struct siop_common_xfer *tables = siop_cmd->siop_tables; 534 535 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 536 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 537 538 sync = tables->msg_in[3]; 539 offset = tables->msg_in[4]; 540 541 if (siop_target->status == TARST_SYNC_NEG) { 542 /* we initiated sync negotiation */ 543 siop_target->status = TARST_OK; 544 #ifdef DEBUG 545 printf("sdtr: sync %d offset %d\n", sync, offset); 546 #endif 547 if (offset > maxoffset || sync < sc->st_minsync || 548 sync > sc->st_maxsync) 549 goto reject; 550 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 551 i++) { 552 if (sc->clock_period != scf_period[i].clock) 553 continue; 554 if (scf_period[i].period == sync) { 555 /* ok, found it. we now are sync. */ 556 siop_target->offset = offset; 557 siop_target->period = sync; 558 sc->targets[target]->id &= 559 ~(SCNTL3_SCF_MASK << 24); 560 sc->targets[target]->id |= scf_period[i].scf 561 << (24 + SCNTL3_SCF_SHIFT); 562 if (sync < 25 && /* Ultra */ 563 (sc->features & SF_BUS_ULTRA3) == 0) 564 sc->targets[target]->id |= 565 SCNTL3_ULTRA << 24; 566 else 567 sc->targets[target]->id &= 568 ~(SCNTL3_ULTRA << 24); 569 sc->targets[target]->id &= 570 ~(SXFER_MO_MASK << 8); 571 sc->targets[target]->id |= 572 (offset & SXFER_MO_MASK) << 8; 573 sc->targets[target]->id &= ~0xff; /* scntl4 */ 574 goto end; 575 } 576 } 577 /* 578 * we didn't find it in our table, do async and send reject 579 * msg 580 */ 581 reject: 582 send_msgout = 1; 583 tables->t_msgout.count = siop_htoc32(sc, 1); 584 tables->msg_out[0] = MSG_MESSAGE_REJECT; 585 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 586 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 587 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 588 sc->targets[target]->id &= ~0xff; /* scntl4 */ 589 siop_target->offset = siop_target->period = 0; 590 } else { /* target initiated sync neg */ 591 #ifdef DEBUG 592 printf("sdtr (target): sync %d offset %d\n", sync, offset); 593 #endif 594 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 595 goto async; 596 } 597 if (offset > maxoffset) 598 offset = maxoffset; 599 if (sync < sc->st_minsync) 600 sync = sc->st_minsync; 601 /* look for sync period */ 602 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 603 i++) { 604 if (sc->clock_period != scf_period[i].clock) 605 continue; 606 if (scf_period[i].period == sync) { 607 /* ok, found it. we now are sync. */ 608 siop_target->offset = offset; 609 siop_target->period = sync; 610 sc->targets[target]->id &= 611 ~(SCNTL3_SCF_MASK << 24); 612 sc->targets[target]->id |= scf_period[i].scf 613 << (24 + SCNTL3_SCF_SHIFT); 614 if (sync < 25 && /* Ultra */ 615 (sc->features & SF_BUS_ULTRA3) == 0) 616 sc->targets[target]->id |= 617 SCNTL3_ULTRA << 24; 618 else 619 sc->targets[target]->id &= 620 ~(SCNTL3_ULTRA << 24); 621 sc->targets[target]->id &= 622 ~(SXFER_MO_MASK << 8); 623 sc->targets[target]->id |= 624 (offset & SXFER_MO_MASK) << 8; 625 sc->targets[target]->id &= ~0xff; /* scntl4 */ 626 siop_sdtr_msg(siop_cmd, 0, sync, offset); 627 send_msgout = 1; 628 goto end; 629 } 630 } 631 async: 632 siop_target->offset = siop_target->period = 0; 633 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 634 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 635 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 636 sc->targets[target]->id &= ~0xff; /* scntl4 */ 637 siop_sdtr_msg(siop_cmd, 0, 0, 0); 638 send_msgout = 1; 639 } 640 end: 641 if (siop_target->status == TARST_OK) 642 siop_update_xfer_mode(sc, target); 643 #ifdef DEBUG 644 printf("id now 0x%x\n", sc->targets[target]->id); 645 #endif 646 tables->id = siop_htoc32(sc, sc->targets[target]->id); 647 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 648 (sc->targets[target]->id >> 24) & 0xff); 649 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 650 (sc->targets[target]->id >> 8) & 0xff); 651 if (send_msgout) { 652 return SIOP_NEG_MSGOUT; 653 } else { 654 return SIOP_NEG_ACK; 655 } 656 } 657 658 void 659 siop_sdtr_msg(siop_cmd, offset, ssync, soff) 660 struct siop_common_cmd *siop_cmd; 661 int offset; 662 int ssync, soff; 663 { 664 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 665 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 666 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 667 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 668 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 669 siop_cmd->siop_tables->t_msgout.count = 670 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); 671 } 672 673 void 674 siop_wdtr_msg(siop_cmd, offset, wide) 675 struct siop_common_cmd *siop_cmd; 676 int offset; 677 int wide; 678 { 679 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 680 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 681 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 682 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 683 siop_cmd->siop_tables->t_msgout.count = 684 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); 685 } 686 687 void 688 siop_ppr_msg(siop_cmd, offset, ssync, soff) 689 struct siop_common_cmd *siop_cmd; 690 int offset; 691 int ssync, soff; 692 { 693 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 694 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 695 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 696 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 697 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 698 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 699 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 700 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT; 701 siop_cmd->siop_tables->t_msgout.count = 702 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); 703 } 704 705 void 706 siop_minphys(bp) 707 struct buf *bp; 708 { 709 minphys(bp); 710 } 711 712 int 713 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, 714 int flag, struct proc *p) 715 { 716 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev; 717 718 switch (cmd) { 719 case SCBUSIORESET: 720 /* 721 * abort the script. This will trigger an interrupt, which will 722 * trigger a bus reset. 723 * We can't safely trigger the reset here as we can't access 724 * the required register while the script is running. 725 */ 726 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT); 727 return (0); 728 default: 729 return (ENOTTY); 730 } 731 } 732 733 void 734 siop_ma(siop_cmd) 735 struct siop_common_cmd *siop_cmd; 736 { 737 int offset, dbc, sstat; 738 struct siop_common_softc *sc = siop_cmd->siop_sc; 739 scr_table_t *table; /* table with partial xfer */ 740 741 /* 742 * compute how much of the current table didn't get handled when 743 * a phase mismatch occurs 744 */ 745 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 746 == 0) 747 return; /* no valid data transfer */ 748 749 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 750 if (offset >= SIOP_NSG) { 751 aprint_error_dev(&sc->sc_dev, "bad offset in siop_sdp (%d)\n", 752 offset); 753 return; 754 } 755 table = &siop_cmd->siop_tables->data[offset]; 756 #ifdef DEBUG_DR 757 printf("siop_ma: offset %d count=%d addr=0x%x ", offset, 758 table->count, table->addr); 759 #endif 760 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 761 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) { 762 if (sc->features & SF_CHIP_DFBC) { 763 dbc += 764 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 765 } else { 766 /* need to account stale data in FIFO */ 767 int dfifo = 768 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 769 if (sc->features & SF_CHIP_FIFO) { 770 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 771 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 772 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 773 } else { 774 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 775 } 776 } 777 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 778 if (sstat & SSTAT0_OLF) 779 dbc++; 780 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 781 dbc++; 782 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 783 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 784 SIOP_SSTAT2); 785 if (sstat & SSTAT2_OLF1) 786 dbc++; 787 if ((sstat & SSTAT2_ORF1) && 788 (sc->features & SF_CHIP_DFBC) == 0) 789 dbc++; 790 } 791 /* clear the FIFO */ 792 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 793 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 794 CTEST3_CLF); 795 } 796 siop_cmd->flags |= CMDFL_RESID; 797 siop_cmd->resid = dbc; 798 } 799 800 void 801 siop_sdp(siop_cmd, offset) 802 struct siop_common_cmd *siop_cmd; 803 int offset; 804 { 805 struct siop_common_softc *sc = siop_cmd->siop_sc; 806 scr_table_t *table; 807 808 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 809 == 0) 810 return; /* no data pointers to save */ 811 812 /* 813 * offset == SIOP_NSG may be a valid condition if we get a Save data 814 * pointer when the xfer is done. Just ignore the Save data pointer 815 * in this case 816 */ 817 if (offset == SIOP_NSG) 818 return; 819 #ifdef DIAGNOSTIC 820 if (offset > SIOP_NSG) { 821 scsipi_printaddr(siop_cmd->xs->xs_periph); 822 printf(": offset %d > %d\n", offset, SIOP_NSG); 823 panic("siop_sdp: offset"); 824 } 825 #endif 826 /* 827 * Save data pointer. We do this by adjusting the tables to point 828 * at the begginning of the data not yet transfered. 829 * offset points to the first table with untransfered data. 830 */ 831 832 /* 833 * before doing that we decrease resid from the ammount of data which 834 * has been transfered. 835 */ 836 siop_update_resid(siop_cmd, offset); 837 838 /* 839 * First let see if we have a resid from a phase mismatch. If so, 840 * we have to adjst the table at offset to remove transfered data. 841 */ 842 if (siop_cmd->flags & CMDFL_RESID) { 843 siop_cmd->flags &= ~CMDFL_RESID; 844 table = &siop_cmd->siop_tables->data[offset]; 845 /* "cut" already transfered data from this table */ 846 table->addr = 847 siop_htoc32(sc, siop_ctoh32(sc, table->addr) + 848 siop_ctoh32(sc, table->count) - siop_cmd->resid); 849 table->count = siop_htoc32(sc, siop_cmd->resid); 850 } 851 852 /* 853 * now we can remove entries which have been transfered. 854 * We just move the entries with data left at the beggining of the 855 * tables 856 */ 857 memmove(&siop_cmd->siop_tables->data[0], 858 &siop_cmd->siop_tables->data[offset], 859 (SIOP_NSG - offset) * sizeof(scr_table_t)); 860 } 861 862 void 863 siop_update_resid(siop_cmd, offset) 864 struct siop_common_cmd *siop_cmd; 865 int offset; 866 { 867 struct siop_common_softc *sc = siop_cmd->siop_sc; 868 scr_table_t *table; 869 int i; 870 871 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 872 == 0) 873 return; /* no data to transfer */ 874 875 /* 876 * update resid. First account for the table entries which have 877 * been fully completed. 878 */ 879 for (i = 0; i < offset; i++) 880 siop_cmd->xs->resid -= 881 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); 882 /* 883 * if CMDFL_RESID is set, the last table (pointed by offset) is a 884 * partial transfers. If not, offset points to the entry folloing 885 * the last full transfer. 886 */ 887 if (siop_cmd->flags & CMDFL_RESID) { 888 table = &siop_cmd->siop_tables->data[offset]; 889 siop_cmd->xs->resid -= 890 siop_ctoh32(sc, table->count) - siop_cmd->resid; 891 } 892 } 893 894 int 895 siop_iwr(siop_cmd) 896 struct siop_common_cmd *siop_cmd; 897 { 898 int offset; 899 scr_table_t *table; /* table with IWR */ 900 struct siop_common_softc *sc = siop_cmd->siop_sc; 901 /* handle ignore wide residue messages */ 902 903 /* if target isn't wide, reject */ 904 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { 905 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); 906 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; 907 return SIOP_NEG_MSGOUT; 908 } 909 /* get index of current command in table */ 910 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 911 /* 912 * if the current table did complete, we're now pointing at the 913 * next one. Go back one if we didn't see a phase mismatch. 914 */ 915 if ((siop_cmd->flags & CMDFL_RESID) == 0) 916 offset--; 917 table = &siop_cmd->siop_tables->data[offset]; 918 919 if ((siop_cmd->flags & CMDFL_RESID) == 0) { 920 if (siop_ctoh32(sc, table->count) & 1) { 921 /* we really got the number of bytes we expected */ 922 return SIOP_NEG_ACK; 923 } else { 924 /* 925 * now we really had a short xfer, by one byte. 926 * handle it just as if we had a phase mistmatch 927 * (there is a resid of one for this table). 928 * Update scratcha1 to reflect the fact that 929 * this xfer isn't complete. 930 */ 931 siop_cmd->flags |= CMDFL_RESID; 932 siop_cmd->resid = 1; 933 bus_space_write_1(sc->sc_rt, sc->sc_rh, 934 SIOP_SCRATCHA + 1, offset); 935 return SIOP_NEG_ACK; 936 } 937 } else { 938 /* 939 * we already have a short xfer for this table; it's 940 * just one byte less than we though it was 941 */ 942 siop_cmd->resid--; 943 return SIOP_NEG_ACK; 944 } 945 } 946 947 void 948 siop_clearfifo(sc) 949 struct siop_common_softc *sc; 950 { 951 int timeout = 0; 952 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 953 954 #ifdef DEBUG_INTR 955 printf("DMA fifo not empty !\n"); 956 #endif 957 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 958 ctest3 | CTEST3_CLF); 959 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 960 CTEST3_CLF) != 0) { 961 delay(1); 962 if (++timeout > 1000) { 963 printf("clear fifo failed\n"); 964 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 965 bus_space_read_1(sc->sc_rt, sc->sc_rh, 966 SIOP_CTEST3) & ~CTEST3_CLF); 967 return; 968 } 969 } 970 } 971 972 int 973 siop_modechange(sc) 974 struct siop_common_softc *sc; 975 { 976 int retry; 977 int sist0, sist1, stest2; 978 for (retry = 0; retry < 5; retry++) { 979 /* 980 * datasheet says to wait 100ms and re-read SIST1, 981 * to check that DIFFSENSE is stable. 982 * We may delay() 5 times for 100ms at interrupt time; 983 * hopefully this will not happen often. 984 */ 985 delay(100000); 986 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 987 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 988 if (sist1 & SIEN1_SBMC) 989 continue; /* we got an irq again */ 990 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 991 STEST4_MODE_MASK; 992 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 993 switch(sc->mode) { 994 case STEST4_MODE_DIF: 995 printf("%s: switching to differential mode\n", 996 device_xname(&sc->sc_dev)); 997 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 998 stest2 | STEST2_DIF); 999 break; 1000 case STEST4_MODE_SE: 1001 printf("%s: switching to single-ended mode\n", 1002 device_xname(&sc->sc_dev)); 1003 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 1004 stest2 & ~STEST2_DIF); 1005 break; 1006 case STEST4_MODE_LVD: 1007 printf("%s: switching to LVD mode\n", 1008 device_xname(&sc->sc_dev)); 1009 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 1010 stest2 & ~STEST2_DIF); 1011 break; 1012 default: 1013 aprint_error_dev(&sc->sc_dev, "invalid SCSI mode 0x%x\n", 1014 sc->mode); 1015 return 0; 1016 } 1017 return 1; 1018 } 1019 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 1020 device_xname(&sc->sc_dev)); 1021 return 0; 1022 } 1023 1024 void 1025 siop_resetbus(sc) 1026 struct siop_common_softc *sc; 1027 { 1028 int scntl1; 1029 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 1030 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 1031 scntl1 | SCNTL1_RST); 1032 /* minimum 25 us, more time won't hurt */ 1033 delay(100); 1034 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 1035 } 1036 1037 void 1038 siop_update_xfer_mode(sc, target) 1039 struct siop_common_softc *sc; 1040 int target; 1041 { 1042 struct siop_common_target *siop_target = sc->targets[target]; 1043 struct scsipi_xfer_mode xm; 1044 1045 xm.xm_target = target; 1046 xm.xm_mode = 0; 1047 xm.xm_period = 0; 1048 xm.xm_offset = 0; 1049 1050 1051 if (siop_target->flags & TARF_ISWIDE) 1052 xm.xm_mode |= PERIPH_CAP_WIDE16; 1053 if (siop_target->period) { 1054 xm.xm_period = siop_target->period; 1055 xm.xm_offset = siop_target->offset; 1056 xm.xm_mode |= PERIPH_CAP_SYNC; 1057 } 1058 if (siop_target->flags & TARF_TAG) { 1059 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 1060 if ((sc->features & SF_CHIP_GEBUG) == 0 || 1061 (sc->targets[target]->flags & TARF_ISWIDE)) 1062 xm.xm_mode |= PERIPH_CAP_TQING; 1063 } 1064 1065 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm); 1066 } 1067