1 /* $NetBSD: siop_common.c,v 1.51 2009/09/04 18:29:52 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 2000, 2002 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Manuel Bouyer. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.51 2009/09/04 18:29:52 tsutsui Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/buf.h> 43 #include <sys/kernel.h> 44 #include <sys/scsiio.h> 45 46 #include <uvm/uvm_extern.h> 47 48 #include <machine/endian.h> 49 #include <sys/bus.h> 50 51 #include <dev/scsipi/scsi_all.h> 52 #include <dev/scsipi/scsi_message.h> 53 #include <dev/scsipi/scsipi_all.h> 54 55 #include <dev/scsipi/scsiconf.h> 56 57 #include <dev/ic/siopreg.h> 58 #include <dev/ic/siopvar_common.h> 59 60 #include "opt_siop.h" 61 62 #undef DEBUG 63 #undef DEBUG_DR 64 #undef DEBUG_NEG 65 66 int 67 siop_common_attach(struct siop_common_softc *sc) 68 { 69 int error, i; 70 bus_dma_segment_t seg; 71 int rseg; 72 73 /* 74 * Allocate DMA-safe memory for the script and map it. 75 */ 76 if ((sc->features & SF_CHIP_RAM) == 0) { 77 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 78 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 79 if (error) { 80 aprint_error_dev(sc->sc_dev, 81 "unable to allocate script DMA memory, " 82 "error = %d\n", error); 83 return error; 84 } 85 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 86 (void **)&sc->sc_script, 87 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 88 if (error) { 89 aprint_error_dev(sc->sc_dev, 90 "unable to map script DMA memory, " 91 "error = %d\n", error); 92 return error; 93 } 94 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 95 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 96 if (error) { 97 aprint_error_dev(sc->sc_dev, 98 "unable to create script DMA map, " 99 "error = %d\n", error); 100 return error; 101 } 102 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 103 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 104 if (error) { 105 aprint_error_dev(sc->sc_dev, 106 "unable to load script DMA map, " 107 "error = %d\n", error); 108 return error; 109 } 110 sc->sc_scriptaddr = 111 sc->sc_scriptdma->dm_segs[0].ds_addr; 112 sc->ram_size = PAGE_SIZE; 113 } 114 115 sc->sc_adapt.adapt_dev = sc->sc_dev; 116 sc->sc_adapt.adapt_nchannels = 1; 117 sc->sc_adapt.adapt_openings = 0; 118 sc->sc_adapt.adapt_ioctl = siop_ioctl; 119 sc->sc_adapt.adapt_minphys = minphys; 120 121 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan)); 122 sc->sc_chan.chan_adapter = &sc->sc_adapt; 123 sc->sc_chan.chan_bustype = &scsi_bustype; 124 sc->sc_chan.chan_channel = 0; 125 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW; 126 sc->sc_chan.chan_ntargets = 127 (sc->features & SF_BUS_WIDE) ? 16 : 8; 128 sc->sc_chan.chan_nluns = 8; 129 sc->sc_chan.chan_id = 130 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 131 if (sc->sc_chan.chan_id == 0 || 132 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets) 133 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET; 134 135 for (i = 0; i < 16; i++) 136 sc->targets[i] = NULL; 137 138 /* find min/max sync period for this chip */ 139 sc->st_maxsync = 0; 140 sc->dt_maxsync = 0; 141 sc->st_minsync = 255; 142 sc->dt_minsync = 255; 143 for (i = 0; i < __arraycount(scf_period); i++) { 144 if (sc->clock_period != scf_period[i].clock) 145 continue; 146 if (sc->st_maxsync < scf_period[i].period) 147 sc->st_maxsync = scf_period[i].period; 148 if (sc->st_minsync > scf_period[i].period) 149 sc->st_minsync = scf_period[i].period; 150 } 151 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 152 panic("siop: can't find my sync parameters"); 153 for (i = 0; i < __arraycount(dt_scf_period); i++) { 154 if (sc->clock_period != dt_scf_period[i].clock) 155 continue; 156 if (sc->dt_maxsync < dt_scf_period[i].period) 157 sc->dt_maxsync = dt_scf_period[i].period; 158 if (sc->dt_minsync > dt_scf_period[i].period) 159 sc->dt_minsync = dt_scf_period[i].period; 160 } 161 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 162 panic("siop: can't find my sync parameters"); 163 return 0; 164 } 165 166 void 167 siop_common_reset(struct siop_common_softc *sc) 168 { 169 u_int32_t stest1, stest3; 170 171 /* reset the chip */ 172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 173 delay(1000); 174 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 175 176 /* init registers */ 177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 178 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 183 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 184 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 185 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 186 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 189 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 190 (0xb << STIME0_SEL_SHIFT)); 191 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 192 sc->sc_chan.chan_id | SCID_RRE); 193 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 194 1 << sc->sc_chan.chan_id); 195 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 196 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 197 if (sc->features & SF_CHIP_AAIP) 198 bus_space_write_1(sc->sc_rt, sc->sc_rh, 199 SIOP_AIPCNTL1, AIPCNTL1_DIS); 200 201 /* enable clock doubler or quadruler if appropriate */ 202 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 203 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 204 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 205 STEST1_DBLEN); 206 if (sc->features & SF_CHIP_QUAD) { 207 /* wait for PPL to lock */ 208 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 209 SIOP_STEST4) & STEST4_LOCK) == 0) 210 delay(10); 211 } else { 212 /* data sheet says 20us - more won't hurt */ 213 delay(100); 214 } 215 /* halt scsi clock, select doubler/quad, restart clock */ 216 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 217 stest3 | STEST3_HSC); 218 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 219 STEST1_DBLEN | STEST1_DBLSEL); 220 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 221 } else { 222 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 223 } 224 225 if (sc->features & SF_CHIP_USEPCIC) { 226 stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1); 227 stest1 |= STEST1_SCLK; 228 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1); 229 } 230 231 if (sc->features & SF_CHIP_FIFO) 232 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 233 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 234 CTEST5_DFS); 235 if (sc->features & SF_CHIP_LED0) { 236 /* Set GPIO0 as output if software LED control is required */ 237 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 238 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 239 } 240 if (sc->features & SF_BUS_ULTRA3) { 241 /* reset SCNTL4 */ 242 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 243 } 244 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 245 STEST4_MODE_MASK; 246 247 /* 248 * initialise the RAM. Without this we may get scsi gross errors on 249 * the 1010 250 */ 251 if (sc->features & SF_CHIP_RAM) 252 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 253 0, 0, sc->ram_size / 4); 254 sc->sc_reset(sc); 255 } 256 257 /* prepare tables before sending a cmd */ 258 void 259 siop_setuptables(struct siop_common_cmd *siop_cmd) 260 { 261 int i; 262 struct siop_common_softc *sc = siop_cmd->siop_sc; 263 struct scsipi_xfer *xs = siop_cmd->xs; 264 int target = xs->xs_periph->periph_target; 265 int lun = xs->xs_periph->periph_lun; 266 int msgoffset = 1; 267 268 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); 269 memset(siop_cmd->siop_tables->msg_out, 0, 270 sizeof(siop_cmd->siop_tables->msg_out)); 271 /* request sense doesn't disconnect */ 272 if (xs->xs_control & XS_CTL_REQSENSE) 273 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 274 else if ((sc->features & SF_CHIP_GEBUG) && 275 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 276 /* 277 * 1010 bug: it seems that the 1010 has problems with reselect 278 * when not in wide mode (generate false SCSI gross error). 279 * The FreeBSD sym driver has comments about it but their 280 * workaround (disable SCSI gross error reporting) doesn't 281 * work with my adapter. So disable disconnect when not 282 * wide. 283 */ 284 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 285 else 286 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 287 if (xs->xs_tag_type != 0) { 288 if ((sc->targets[target]->flags & TARF_TAG) == 0) { 289 scsipi_printaddr(xs->xs_periph); 290 printf(": tagged command type %d id %d\n", 291 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id); 292 panic("tagged command for non-tagging device"); 293 } 294 siop_cmd->flags |= CMDFL_TAG; 295 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type; 296 /* 297 * use siop_cmd->tag not xs->xs_tag_id, caller may want a 298 * different one 299 */ 300 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag; 301 msgoffset = 3; 302 } 303 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); 304 if (sc->targets[target]->status == TARST_ASYNC) { 305 if ((sc->targets[target]->flags & TARF_DT) && 306 (sc->mode == STEST4_MODE_LVD)) { 307 sc->targets[target]->status = TARST_PPR_NEG; 308 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 309 sc->maxoff); 310 } else if (sc->targets[target]->flags & TARF_WIDE) { 311 sc->targets[target]->status = TARST_WIDE_NEG; 312 siop_wdtr_msg(siop_cmd, msgoffset, 313 MSG_EXT_WDTR_BUS_16_BIT); 314 } else if (sc->targets[target]->flags & TARF_SYNC) { 315 sc->targets[target]->status = TARST_SYNC_NEG; 316 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 317 (sc->maxoff > 31) ? 31 : sc->maxoff); 318 } else { 319 sc->targets[target]->status = TARST_OK; 320 siop_update_xfer_mode(sc, target); 321 } 322 } 323 siop_cmd->siop_tables->status = 324 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ 325 326 siop_cmd->siop_tables->cmd.count = 327 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len); 328 siop_cmd->siop_tables->cmd.addr = 329 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr); 330 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 331 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 332 siop_cmd->siop_tables->data[i].count = 333 siop_htoc32(sc, 334 siop_cmd->dmamap_data->dm_segs[i].ds_len); 335 siop_cmd->siop_tables->data[i].addr = 336 siop_htoc32(sc, 337 siop_cmd->dmamap_data->dm_segs[i].ds_addr); 338 } 339 } 340 } 341 342 int 343 siop_wdtr_neg(struct siop_common_cmd *siop_cmd) 344 { 345 struct siop_common_softc *sc = siop_cmd->siop_sc; 346 struct siop_common_target *siop_target = siop_cmd->siop_target; 347 int target = siop_cmd->xs->xs_periph->periph_target; 348 struct siop_common_xfer *tables = siop_cmd->siop_tables; 349 350 if (siop_target->status == TARST_WIDE_NEG) { 351 /* we initiated wide negotiation */ 352 switch (tables->msg_in[3]) { 353 case MSG_EXT_WDTR_BUS_8_BIT: 354 siop_target->flags &= ~TARF_ISWIDE; 355 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 356 break; 357 case MSG_EXT_WDTR_BUS_16_BIT: 358 if (siop_target->flags & TARF_WIDE) { 359 siop_target->flags |= TARF_ISWIDE; 360 sc->targets[target]->id |= (SCNTL3_EWS << 24); 361 break; 362 } 363 /* FALLTHROUGH */ 364 default: 365 /* 366 * hum, we got more than what we can handle, shouldn't 367 * happen. Reject, and stay async 368 */ 369 siop_target->flags &= ~TARF_ISWIDE; 370 siop_target->status = TARST_OK; 371 siop_target->offset = siop_target->period = 0; 372 siop_update_xfer_mode(sc, target); 373 printf("%s: rejecting invalid wide negotiation from " 374 "target %d (%d)\n", device_xname(sc->sc_dev), 375 target, 376 tables->msg_in[3]); 377 tables->t_msgout.count = siop_htoc32(sc, 1); 378 tables->msg_out[0] = MSG_MESSAGE_REJECT; 379 return SIOP_NEG_MSGOUT; 380 } 381 tables->id = siop_htoc32(sc, sc->targets[target]->id); 382 bus_space_write_1(sc->sc_rt, sc->sc_rh, 383 SIOP_SCNTL3, 384 (sc->targets[target]->id >> 24) & 0xff); 385 /* we now need to do sync */ 386 if (siop_target->flags & TARF_SYNC) { 387 siop_target->status = TARST_SYNC_NEG; 388 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 389 (sc->maxoff > 31) ? 31 : sc->maxoff); 390 return SIOP_NEG_MSGOUT; 391 } else { 392 siop_target->status = TARST_OK; 393 siop_update_xfer_mode(sc, target); 394 return SIOP_NEG_ACK; 395 } 396 } else { 397 /* target initiated wide negotiation */ 398 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 399 && (siop_target->flags & TARF_WIDE)) { 400 siop_target->flags |= TARF_ISWIDE; 401 sc->targets[target]->id |= SCNTL3_EWS << 24; 402 } else { 403 siop_target->flags &= ~TARF_ISWIDE; 404 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 405 } 406 tables->id = siop_htoc32(sc, sc->targets[target]->id); 407 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 408 (sc->targets[target]->id >> 24) & 0xff); 409 /* 410 * we did reset wide parameters, so fall back to async, 411 * but don't schedule a sync neg, target should initiate it 412 */ 413 siop_target->status = TARST_OK; 414 siop_target->offset = siop_target->period = 0; 415 siop_update_xfer_mode(sc, target); 416 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 417 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 418 return SIOP_NEG_MSGOUT; 419 } 420 } 421 422 int 423 siop_ppr_neg(struct siop_common_cmd *siop_cmd) 424 { 425 struct siop_common_softc *sc = siop_cmd->siop_sc; 426 struct siop_common_target *siop_target = siop_cmd->siop_target; 427 int target = siop_cmd->xs->xs_periph->periph_target; 428 struct siop_common_xfer *tables = siop_cmd->siop_tables; 429 int sync, offset, options, scf = 0; 430 int i; 431 432 #ifdef DEBUG_NEG 433 printf("%s: answer on ppr negotiation:", device_xname(sc->sc_dev)); 434 for (i = 0; i < 8; i++) 435 printf(" 0x%x", tables->msg_in[i]); 436 printf("\n"); 437 #endif 438 439 if (siop_target->status == TARST_PPR_NEG) { 440 /* we initiated PPR negotiation */ 441 sync = tables->msg_in[3]; 442 offset = tables->msg_in[5]; 443 options = tables->msg_in[7]; 444 if (options != MSG_EXT_PPR_DT) { 445 /* should't happen */ 446 printf("%s: ppr negotiation for target %d: " 447 "no DT option\n", device_xname(sc->sc_dev), target); 448 siop_target->status = TARST_ASYNC; 449 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 450 siop_target->offset = 0; 451 siop_target->period = 0; 452 goto reject; 453 } 454 455 if (offset > sc->maxoff || sync < sc->dt_minsync || 456 sync > sc->dt_maxsync) { 457 printf("%s: ppr negotiation for target %d: " 458 "offset (%d) or sync (%d) out of range\n", 459 device_xname(sc->sc_dev), target, offset, sync); 460 /* should not happen */ 461 siop_target->offset = 0; 462 siop_target->period = 0; 463 goto reject; 464 } else { 465 for (i = 0; i < __arraycount(dt_scf_period); i++) { 466 if (sc->clock_period != dt_scf_period[i].clock) 467 continue; 468 if (dt_scf_period[i].period == sync) { 469 /* ok, found it. we now are sync. */ 470 siop_target->offset = offset; 471 siop_target->period = sync; 472 scf = dt_scf_period[i].scf; 473 siop_target->flags |= TARF_ISDT; 474 } 475 } 476 if ((siop_target->flags & TARF_ISDT) == 0) { 477 printf("%s: ppr negotiation for target %d: " 478 "sync (%d) incompatible with adapter\n", 479 device_xname(sc->sc_dev), target, sync); 480 /* 481 * we didn't find it in our table, do async 482 * send reject msg, start SDTR/WDTR neg 483 */ 484 siop_target->status = TARST_ASYNC; 485 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 486 siop_target->offset = 0; 487 siop_target->period = 0; 488 goto reject; 489 } 490 } 491 if (tables->msg_in[6] != 1) { 492 printf("%s: ppr negotiation for target %d: " 493 "transfer width (%d) incompatible with dt\n", 494 device_xname(sc->sc_dev), 495 target, tables->msg_in[6]); 496 /* DT mode can only be done with wide transfers */ 497 siop_target->status = TARST_ASYNC; 498 goto reject; 499 } 500 siop_target->flags |= TARF_ISWIDE; 501 sc->targets[target]->id |= (SCNTL3_EWS << 24); 502 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 503 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 504 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 505 sc->targets[target]->id |= 506 (siop_target->offset & SXFER_MO_MASK) << 8; 507 sc->targets[target]->id &= ~0xff; 508 sc->targets[target]->id |= SCNTL4_U3EN; 509 siop_target->status = TARST_OK; 510 siop_update_xfer_mode(sc, target); 511 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 512 (sc->targets[target]->id >> 24) & 0xff); 513 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 514 (sc->targets[target]->id >> 8) & 0xff); 515 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 516 sc->targets[target]->id & 0xff); 517 return SIOP_NEG_ACK; 518 } else { 519 /* target initiated PPR negotiation, shouldn't happen */ 520 printf("%s: rejecting invalid PPR negotiation from " 521 "target %d\n", device_xname(sc->sc_dev), target); 522 reject: 523 tables->t_msgout.count = siop_htoc32(sc, 1); 524 tables->msg_out[0] = MSG_MESSAGE_REJECT; 525 return SIOP_NEG_MSGOUT; 526 } 527 } 528 529 int 530 siop_sdtr_neg(struct siop_common_cmd *siop_cmd) 531 { 532 struct siop_common_softc *sc = siop_cmd->siop_sc; 533 struct siop_common_target *siop_target = siop_cmd->siop_target; 534 int target = siop_cmd->xs->xs_periph->periph_target; 535 int sync, maxoffset, offset, i; 536 int send_msgout = 0; 537 struct siop_common_xfer *tables = siop_cmd->siop_tables; 538 539 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 540 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 541 542 sync = tables->msg_in[3]; 543 offset = tables->msg_in[4]; 544 545 if (siop_target->status == TARST_SYNC_NEG) { 546 /* we initiated sync negotiation */ 547 siop_target->status = TARST_OK; 548 #ifdef DEBUG 549 printf("sdtr: sync %d offset %d\n", sync, offset); 550 #endif 551 if (offset > maxoffset || sync < sc->st_minsync || 552 sync > sc->st_maxsync) 553 goto reject; 554 for (i = 0; i < __arraycount(scf_period); i++) { 555 if (sc->clock_period != scf_period[i].clock) 556 continue; 557 if (scf_period[i].period == sync) { 558 /* ok, found it. we now are sync. */ 559 siop_target->offset = offset; 560 siop_target->period = sync; 561 sc->targets[target]->id &= 562 ~(SCNTL3_SCF_MASK << 24); 563 sc->targets[target]->id |= scf_period[i].scf 564 << (24 + SCNTL3_SCF_SHIFT); 565 if (sync < 25 && /* Ultra */ 566 (sc->features & SF_BUS_ULTRA3) == 0) 567 sc->targets[target]->id |= 568 SCNTL3_ULTRA << 24; 569 else 570 sc->targets[target]->id &= 571 ~(SCNTL3_ULTRA << 24); 572 sc->targets[target]->id &= 573 ~(SXFER_MO_MASK << 8); 574 sc->targets[target]->id |= 575 (offset & SXFER_MO_MASK) << 8; 576 sc->targets[target]->id &= ~0xff; /* scntl4 */ 577 goto end; 578 } 579 } 580 /* 581 * we didn't find it in our table, do async and send reject 582 * msg 583 */ 584 reject: 585 send_msgout = 1; 586 tables->t_msgout.count = siop_htoc32(sc, 1); 587 tables->msg_out[0] = MSG_MESSAGE_REJECT; 588 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 589 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 590 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 591 sc->targets[target]->id &= ~0xff; /* scntl4 */ 592 siop_target->offset = siop_target->period = 0; 593 } else { /* target initiated sync neg */ 594 #ifdef DEBUG 595 printf("sdtr (target): sync %d offset %d\n", sync, offset); 596 #endif 597 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 598 goto async; 599 } 600 if (offset > maxoffset) 601 offset = maxoffset; 602 if (sync < sc->st_minsync) 603 sync = sc->st_minsync; 604 /* look for sync period */ 605 for (i = 0; i < __arraycount(scf_period); i++) { 606 if (sc->clock_period != scf_period[i].clock) 607 continue; 608 if (scf_period[i].period == sync) { 609 /* ok, found it. we now are sync. */ 610 siop_target->offset = offset; 611 siop_target->period = sync; 612 sc->targets[target]->id &= 613 ~(SCNTL3_SCF_MASK << 24); 614 sc->targets[target]->id |= scf_period[i].scf 615 << (24 + SCNTL3_SCF_SHIFT); 616 if (sync < 25 && /* Ultra */ 617 (sc->features & SF_BUS_ULTRA3) == 0) 618 sc->targets[target]->id |= 619 SCNTL3_ULTRA << 24; 620 else 621 sc->targets[target]->id &= 622 ~(SCNTL3_ULTRA << 24); 623 sc->targets[target]->id &= 624 ~(SXFER_MO_MASK << 8); 625 sc->targets[target]->id |= 626 (offset & SXFER_MO_MASK) << 8; 627 sc->targets[target]->id &= ~0xff; /* scntl4 */ 628 siop_sdtr_msg(siop_cmd, 0, sync, offset); 629 send_msgout = 1; 630 goto end; 631 } 632 } 633 async: 634 siop_target->offset = siop_target->period = 0; 635 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 636 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 637 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 638 sc->targets[target]->id &= ~0xff; /* scntl4 */ 639 siop_sdtr_msg(siop_cmd, 0, 0, 0); 640 send_msgout = 1; 641 } 642 end: 643 if (siop_target->status == TARST_OK) 644 siop_update_xfer_mode(sc, target); 645 #ifdef DEBUG 646 printf("id now 0x%x\n", sc->targets[target]->id); 647 #endif 648 tables->id = siop_htoc32(sc, sc->targets[target]->id); 649 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 650 (sc->targets[target]->id >> 24) & 0xff); 651 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 652 (sc->targets[target]->id >> 8) & 0xff); 653 if (send_msgout) { 654 return SIOP_NEG_MSGOUT; 655 } else { 656 return SIOP_NEG_ACK; 657 } 658 } 659 660 void 661 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) 662 { 663 664 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 665 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 666 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 667 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 668 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 669 siop_cmd->siop_tables->t_msgout.count = 670 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); 671 } 672 673 void 674 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide) 675 { 676 677 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 678 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 679 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 680 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 681 siop_cmd->siop_tables->t_msgout.count = 682 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); 683 } 684 685 void 686 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) 687 { 688 689 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 690 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 691 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 692 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 693 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 694 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 695 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 696 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT; 697 siop_cmd->siop_tables->t_msgout.count = 698 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); 699 } 700 701 void 702 siop_minphys(struct buf *bp) 703 { 704 705 minphys(bp); 706 } 707 708 int 709 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, 710 int flag, struct proc *p) 711 { 712 struct siop_common_softc *sc; 713 714 sc = device_private(chan->chan_adapter->adapt_dev); 715 716 switch (cmd) { 717 case SCBUSIORESET: 718 /* 719 * abort the script. This will trigger an interrupt, which will 720 * trigger a bus reset. 721 * We can't safely trigger the reset here as we can't access 722 * the required register while the script is running. 723 */ 724 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT); 725 return (0); 726 default: 727 return (ENOTTY); 728 } 729 } 730 731 void 732 siop_ma(struct siop_common_cmd *siop_cmd) 733 { 734 int offset, dbc, sstat; 735 struct siop_common_softc *sc = siop_cmd->siop_sc; 736 scr_table_t *table; /* table with partial xfer */ 737 738 /* 739 * compute how much of the current table didn't get handled when 740 * a phase mismatch occurs 741 */ 742 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 743 == 0) 744 return; /* no valid data transfer */ 745 746 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 747 if (offset >= SIOP_NSG) { 748 aprint_error_dev(sc->sc_dev, "bad offset in siop_sdp (%d)\n", 749 offset); 750 return; 751 } 752 table = &siop_cmd->siop_tables->data[offset]; 753 #ifdef DEBUG_DR 754 printf("siop_ma: offset %d count=%d addr=0x%x ", offset, 755 table->count, table->addr); 756 #endif 757 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 758 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) { 759 if (sc->features & SF_CHIP_DFBC) { 760 dbc += 761 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 762 } else { 763 /* need to account stale data in FIFO */ 764 int dfifo = 765 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 766 if (sc->features & SF_CHIP_FIFO) { 767 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 768 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 769 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 770 } else { 771 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 772 } 773 } 774 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 775 if (sstat & SSTAT0_OLF) 776 dbc++; 777 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 778 dbc++; 779 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 780 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 781 SIOP_SSTAT2); 782 if (sstat & SSTAT2_OLF1) 783 dbc++; 784 if ((sstat & SSTAT2_ORF1) && 785 (sc->features & SF_CHIP_DFBC) == 0) 786 dbc++; 787 } 788 /* clear the FIFO */ 789 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 790 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 791 CTEST3_CLF); 792 } 793 siop_cmd->flags |= CMDFL_RESID; 794 siop_cmd->resid = dbc; 795 } 796 797 void 798 siop_sdp(struct siop_common_cmd *siop_cmd, int offset) 799 { 800 struct siop_common_softc *sc = siop_cmd->siop_sc; 801 scr_table_t *table; 802 803 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 804 == 0) 805 return; /* no data pointers to save */ 806 807 /* 808 * offset == SIOP_NSG may be a valid condition if we get a Save data 809 * pointer when the xfer is done. Just ignore the Save data pointer 810 * in this case 811 */ 812 if (offset == SIOP_NSG) 813 return; 814 #ifdef DIAGNOSTIC 815 if (offset > SIOP_NSG) { 816 scsipi_printaddr(siop_cmd->xs->xs_periph); 817 printf(": offset %d > %d\n", offset, SIOP_NSG); 818 panic("siop_sdp: offset"); 819 } 820 #endif 821 /* 822 * Save data pointer. We do this by adjusting the tables to point 823 * at the begginning of the data not yet transfered. 824 * offset points to the first table with untransfered data. 825 */ 826 827 /* 828 * before doing that we decrease resid from the ammount of data which 829 * has been transfered. 830 */ 831 siop_update_resid(siop_cmd, offset); 832 833 /* 834 * First let see if we have a resid from a phase mismatch. If so, 835 * we have to adjst the table at offset to remove transfered data. 836 */ 837 if (siop_cmd->flags & CMDFL_RESID) { 838 siop_cmd->flags &= ~CMDFL_RESID; 839 table = &siop_cmd->siop_tables->data[offset]; 840 /* "cut" already transfered data from this table */ 841 table->addr = 842 siop_htoc32(sc, siop_ctoh32(sc, table->addr) + 843 siop_ctoh32(sc, table->count) - siop_cmd->resid); 844 table->count = siop_htoc32(sc, siop_cmd->resid); 845 } 846 847 /* 848 * now we can remove entries which have been transfered. 849 * We just move the entries with data left at the beggining of the 850 * tables 851 */ 852 memmove(&siop_cmd->siop_tables->data[0], 853 &siop_cmd->siop_tables->data[offset], 854 (SIOP_NSG - offset) * sizeof(scr_table_t)); 855 } 856 857 void 858 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset) 859 { 860 struct siop_common_softc *sc = siop_cmd->siop_sc; 861 scr_table_t *table; 862 int i; 863 864 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 865 == 0) 866 return; /* no data to transfer */ 867 868 /* 869 * update resid. First account for the table entries which have 870 * been fully completed. 871 */ 872 for (i = 0; i < offset; i++) 873 siop_cmd->xs->resid -= 874 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); 875 /* 876 * if CMDFL_RESID is set, the last table (pointed by offset) is a 877 * partial transfers. If not, offset points to the entry folloing 878 * the last full transfer. 879 */ 880 if (siop_cmd->flags & CMDFL_RESID) { 881 table = &siop_cmd->siop_tables->data[offset]; 882 siop_cmd->xs->resid -= 883 siop_ctoh32(sc, table->count) - siop_cmd->resid; 884 } 885 } 886 887 int 888 siop_iwr(struct siop_common_cmd *siop_cmd) 889 { 890 int offset; 891 scr_table_t *table; /* table with IWR */ 892 struct siop_common_softc *sc = siop_cmd->siop_sc; 893 894 /* handle ignore wide residue messages */ 895 896 /* if target isn't wide, reject */ 897 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { 898 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); 899 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; 900 return SIOP_NEG_MSGOUT; 901 } 902 /* get index of current command in table */ 903 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 904 /* 905 * if the current table did complete, we're now pointing at the 906 * next one. Go back one if we didn't see a phase mismatch. 907 */ 908 if ((siop_cmd->flags & CMDFL_RESID) == 0) 909 offset--; 910 table = &siop_cmd->siop_tables->data[offset]; 911 912 if ((siop_cmd->flags & CMDFL_RESID) == 0) { 913 if (siop_ctoh32(sc, table->count) & 1) { 914 /* we really got the number of bytes we expected */ 915 return SIOP_NEG_ACK; 916 } else { 917 /* 918 * now we really had a short xfer, by one byte. 919 * handle it just as if we had a phase mistmatch 920 * (there is a resid of one for this table). 921 * Update scratcha1 to reflect the fact that 922 * this xfer isn't complete. 923 */ 924 siop_cmd->flags |= CMDFL_RESID; 925 siop_cmd->resid = 1; 926 bus_space_write_1(sc->sc_rt, sc->sc_rh, 927 SIOP_SCRATCHA + 1, offset); 928 return SIOP_NEG_ACK; 929 } 930 } else { 931 /* 932 * we already have a short xfer for this table; it's 933 * just one byte less than we though it was 934 */ 935 siop_cmd->resid--; 936 return SIOP_NEG_ACK; 937 } 938 } 939 940 void 941 siop_clearfifo(struct siop_common_softc *sc) 942 { 943 int timeout = 0; 944 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 945 946 #ifdef DEBUG_INTR 947 printf("DMA fifo not empty !\n"); 948 #endif 949 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 950 ctest3 | CTEST3_CLF); 951 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 952 CTEST3_CLF) != 0) { 953 delay(1); 954 if (++timeout > 1000) { 955 printf("clear fifo failed\n"); 956 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 957 bus_space_read_1(sc->sc_rt, sc->sc_rh, 958 SIOP_CTEST3) & ~CTEST3_CLF); 959 return; 960 } 961 } 962 } 963 964 int 965 siop_modechange(struct siop_common_softc *sc) 966 { 967 int retry; 968 int sist0, sist1, stest2; 969 970 for (retry = 0; retry < 5; retry++) { 971 /* 972 * datasheet says to wait 100ms and re-read SIST1, 973 * to check that DIFFSENSE is stable. 974 * We may delay() 5 times for 100ms at interrupt time; 975 * hopefully this will not happen often. 976 */ 977 delay(100000); 978 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 979 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 980 if (sist1 & SIEN1_SBMC) 981 continue; /* we got an irq again */ 982 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 983 STEST4_MODE_MASK; 984 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 985 switch(sc->mode) { 986 case STEST4_MODE_DIF: 987 printf("%s: switching to differential mode\n", 988 device_xname(sc->sc_dev)); 989 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 990 stest2 | STEST2_DIF); 991 break; 992 case STEST4_MODE_SE: 993 printf("%s: switching to single-ended mode\n", 994 device_xname(sc->sc_dev)); 995 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 996 stest2 & ~STEST2_DIF); 997 break; 998 case STEST4_MODE_LVD: 999 printf("%s: switching to LVD mode\n", 1000 device_xname(sc->sc_dev)); 1001 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 1002 stest2 & ~STEST2_DIF); 1003 break; 1004 default: 1005 aprint_error_dev(sc->sc_dev, "invalid SCSI mode 0x%x\n", 1006 sc->mode); 1007 return 0; 1008 } 1009 return 1; 1010 } 1011 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 1012 device_xname(sc->sc_dev)); 1013 return 0; 1014 } 1015 1016 void 1017 siop_resetbus(struct siop_common_softc *sc) 1018 { 1019 int scntl1; 1020 1021 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 1022 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 1023 scntl1 | SCNTL1_RST); 1024 /* minimum 25 us, more time won't hurt */ 1025 delay(100); 1026 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 1027 } 1028 1029 void 1030 siop_update_xfer_mode(struct siop_common_softc *sc, int target) 1031 { 1032 struct siop_common_target *siop_target = sc->targets[target]; 1033 struct scsipi_xfer_mode xm; 1034 1035 xm.xm_target = target; 1036 xm.xm_mode = 0; 1037 xm.xm_period = 0; 1038 xm.xm_offset = 0; 1039 1040 if (siop_target->flags & TARF_ISWIDE) 1041 xm.xm_mode |= PERIPH_CAP_WIDE16; 1042 if (siop_target->period) { 1043 xm.xm_period = siop_target->period; 1044 xm.xm_offset = siop_target->offset; 1045 xm.xm_mode |= PERIPH_CAP_SYNC; 1046 } 1047 if (siop_target->flags & TARF_TAG) { 1048 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 1049 if ((sc->features & SF_CHIP_GEBUG) == 0 || 1050 (sc->targets[target]->flags & TARF_ISWIDE)) 1051 xm.xm_mode |= PERIPH_CAP_TQING; 1052 } 1053 1054 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm); 1055 } 1056