1 /* $NetBSD: siop.c,v 1.75 2004/05/17 20:12:34 bouyer Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Manuel Bouyer. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.75 2004/05/17 20:12:34 bouyer Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/buf.h> 43 #include <sys/kernel.h> 44 45 #include <uvm/uvm_extern.h> 46 47 #include <machine/endian.h> 48 #include <machine/bus.h> 49 50 #include <dev/microcode/siop/siop.out> 51 52 #include <dev/scsipi/scsi_all.h> 53 #include <dev/scsipi/scsi_message.h> 54 #include <dev/scsipi/scsipi_all.h> 55 56 #include <dev/scsipi/scsiconf.h> 57 58 #include <dev/ic/siopreg.h> 59 #include <dev/ic/siopvar_common.h> 60 #include <dev/ic/siopvar.h> 61 62 #include "opt_siop.h" 63 64 #ifndef DEBUG 65 #undef DEBUG 66 #endif 67 #undef SIOP_DEBUG 68 #undef SIOP_DEBUG_DR 69 #undef SIOP_DEBUG_INTR 70 #undef SIOP_DEBUG_SCHED 71 #undef DUMP_SCRIPT 72 73 #define SIOP_STATS 74 75 #ifndef SIOP_DEFAULT_TARGET 76 #define SIOP_DEFAULT_TARGET 7 77 #endif 78 79 /* number of cmd descriptors per block */ 80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer)) 81 82 /* Number of scheduler slot (needs to match script) */ 83 #define SIOP_NSLOTS 40 84 85 void siop_reset __P((struct siop_softc *)); 86 void siop_handle_reset __P((struct siop_softc *)); 87 int siop_handle_qtag_reject __P((struct siop_cmd *)); 88 void siop_scsicmd_end __P((struct siop_cmd *)); 89 void siop_unqueue __P((struct siop_softc *, int, int)); 90 static void siop_start __P((struct siop_softc *, struct siop_cmd *)); 91 void siop_timeout __P((void *)); 92 int siop_scsicmd __P((struct scsipi_xfer *)); 93 void siop_scsipi_request __P((struct scsipi_channel *, 94 scsipi_adapter_req_t, void *)); 95 void siop_dump_script __P((struct siop_softc *)); 96 void siop_morecbd __P((struct siop_softc *)); 97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *)); 98 void siop_add_reselsw __P((struct siop_softc *, int)); 99 void siop_update_scntl3 __P((struct siop_softc *, 100 struct siop_common_target *)); 101 102 #ifdef SIOP_STATS 103 static int siop_stat_intr = 0; 104 static int siop_stat_intr_shortxfer = 0; 105 static int siop_stat_intr_sdp = 0; 106 static int siop_stat_intr_done = 0; 107 static int siop_stat_intr_xferdisc = 0; 108 static int siop_stat_intr_lunresel = 0; 109 static int siop_stat_intr_qfull = 0; 110 void siop_printstats __P((void)); 111 #define INCSTAT(x) x++ 112 #else 113 #define INCSTAT(x) 114 #endif 115 116 static __inline__ void siop_script_sync __P((struct siop_softc *, int)); 117 static __inline__ void 118 siop_script_sync(sc, ops) 119 struct siop_softc *sc; 120 int ops; 121 { 122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) 123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 124 PAGE_SIZE, ops); 125 } 126 127 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int)); 128 static __inline__ u_int32_t 129 siop_script_read(sc, offset) 130 struct siop_softc *sc; 131 u_int offset; 132 { 133 if (sc->sc_c.features & SF_CHIP_RAM) { 134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 135 offset * 4); 136 } else { 137 return le32toh(sc->sc_c.sc_script[offset]); 138 } 139 } 140 141 static __inline__ void siop_script_write __P((struct siop_softc *, u_int, 142 u_int32_t)); 143 static __inline__ void 144 siop_script_write(sc, offset, val) 145 struct siop_softc *sc; 146 u_int offset; 147 u_int32_t val; 148 { 149 if (sc->sc_c.features & SF_CHIP_RAM) { 150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 151 offset * 4, val); 152 } else { 153 sc->sc_c.sc_script[offset] = htole32(val); 154 } 155 } 156 157 void 158 siop_attach(sc) 159 struct siop_softc *sc; 160 { 161 if (siop_common_attach(&sc->sc_c) != 0) 162 return; 163 164 TAILQ_INIT(&sc->free_list); 165 TAILQ_INIT(&sc->cmds); 166 TAILQ_INIT(&sc->lunsw_list); 167 sc->sc_currschedslot = 0; 168 #ifdef SIOP_DEBUG 169 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n", 170 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script), 171 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script); 172 #endif 173 174 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1; 175 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request; 176 177 /* Do a bus reset, so that devices fall back to narrow/async */ 178 siop_resetbus(&sc->sc_c); 179 /* 180 * siop_reset() will reset the chip, thus clearing pending interrupts 181 */ 182 siop_reset(sc); 183 #ifdef DUMP_SCRIPT 184 siop_dump_script(sc); 185 #endif 186 187 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint); 188 } 189 190 void 191 siop_reset(sc) 192 struct siop_softc *sc; 193 { 194 int i, j; 195 struct siop_lunsw *lunsw; 196 197 siop_common_reset(&sc->sc_c); 198 199 /* copy and patch the script */ 200 if (sc->sc_c.features & SF_CHIP_RAM) { 201 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0, 202 siop_script, sizeof(siop_script) / sizeof(siop_script[0])); 203 for (j = 0; j < 204 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0])); 205 j++) { 206 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 207 E_abs_msgin_Used[j] * 4, 208 sc->sc_c.sc_scriptaddr + Ent_msgin_space); 209 } 210 if (sc->sc_c.features & SF_CHIP_LED0) { 211 bus_space_write_region_4(sc->sc_c.sc_ramt, 212 sc->sc_c.sc_ramh, 213 Ent_led_on1, siop_led_on, 214 sizeof(siop_led_on) / sizeof(siop_led_on[0])); 215 bus_space_write_region_4(sc->sc_c.sc_ramt, 216 sc->sc_c.sc_ramh, 217 Ent_led_on2, siop_led_on, 218 sizeof(siop_led_on) / sizeof(siop_led_on[0])); 219 bus_space_write_region_4(sc->sc_c.sc_ramt, 220 sc->sc_c.sc_ramh, 221 Ent_led_off, siop_led_off, 222 sizeof(siop_led_off) / sizeof(siop_led_off[0])); 223 } 224 } else { 225 for (j = 0; 226 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) { 227 sc->sc_c.sc_script[j] = htole32(siop_script[j]); 228 } 229 for (j = 0; j < 230 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0])); 231 j++) { 232 sc->sc_c.sc_script[E_abs_msgin_Used[j]] = 233 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space); 234 } 235 if (sc->sc_c.features & SF_CHIP_LED0) { 236 for (j = 0; j < (sizeof(siop_led_on) / 237 sizeof(siop_led_on[0])); j++) 238 sc->sc_c.sc_script[ 239 Ent_led_on1 / sizeof(siop_led_on[0]) + j 240 ] = htole32(siop_led_on[j]); 241 for (j = 0; j < (sizeof(siop_led_on) / 242 sizeof(siop_led_on[0])); j++) 243 sc->sc_c.sc_script[ 244 Ent_led_on2 / sizeof(siop_led_on[0]) + j 245 ] = htole32(siop_led_on[j]); 246 for (j = 0; j < (sizeof(siop_led_off) / 247 sizeof(siop_led_off[0])); j++) 248 sc->sc_c.sc_script[ 249 Ent_led_off / sizeof(siop_led_off[0]) + j 250 ] = htole32(siop_led_off[j]); 251 } 252 } 253 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]); 254 sc->script_free_hi = sc->sc_c.ram_size / 4; 255 sc->sc_ntargets = 0; 256 257 /* free used and unused lun switches */ 258 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) { 259 #ifdef SIOP_DEBUG 260 printf("%s: free lunsw at offset %d\n", 261 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off); 262 #endif 263 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next); 264 free(lunsw, M_DEVBUF); 265 } 266 TAILQ_INIT(&sc->lunsw_list); 267 /* restore reselect switch */ 268 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) { 269 struct siop_target *target; 270 if (sc->sc_c.targets[i] == NULL) 271 continue; 272 #ifdef SIOP_DEBUG 273 printf("%s: restore sw for target %d\n", 274 sc->sc_c.sc_dev.dv_xname, i); 275 #endif 276 target = (struct siop_target *)sc->sc_c.targets[i]; 277 free(target->lunsw, M_DEVBUF); 278 target->lunsw = siop_get_lunsw(sc); 279 if (target->lunsw == NULL) { 280 printf("%s: can't alloc lunsw for target %d\n", 281 sc->sc_c.sc_dev.dv_xname, i); 282 break; 283 } 284 siop_add_reselsw(sc, i); 285 } 286 287 /* start script */ 288 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) { 289 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 290 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 291 } 292 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, 293 sc->sc_c.sc_scriptaddr + Ent_reselect); 294 } 295 296 #if 0 297 #define CALL_SCRIPT(ent) do {\ 298 printf ("start script DSA 0x%lx DSP 0x%lx\n", \ 299 siop_cmd->cmd_c.dsa, \ 300 sc->sc_c.sc_scriptaddr + ent); \ 301 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 302 } while (0) 303 #else 304 #define CALL_SCRIPT(ent) do {\ 305 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 306 } while (0) 307 #endif 308 309 int 310 siop_intr(v) 311 void *v; 312 { 313 struct siop_softc *sc = v; 314 struct siop_target *siop_target; 315 struct siop_cmd *siop_cmd; 316 struct siop_lun *siop_lun; 317 struct scsipi_xfer *xs; 318 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */ 319 u_int32_t irqcode; 320 int need_reset = 0; 321 int offset, target, lun, tag; 322 bus_addr_t dsa; 323 struct siop_cbd *cbdp; 324 int freetarget = 0; 325 int restart = 0; 326 327 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT); 328 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) 329 return 0; 330 INCSTAT(siop_stat_intr); 331 if (istat & ISTAT_INTF) { 332 printf("INTRF\n"); 333 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 334 SIOP_ISTAT, ISTAT_INTF); 335 } 336 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) == 337 (ISTAT_DIP | ISTAT_ABRT)) { 338 /* clear abort */ 339 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 340 SIOP_ISTAT, 0); 341 } 342 /* use DSA to find the current siop_cmd */ 343 siop_cmd = NULL; 344 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA); 345 TAILQ_FOREACH(cbdp, &sc->cmds, next) { 346 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr && 347 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) { 348 dsa -= cbdp->xferdma->dm_segs[0].ds_addr; 349 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)]; 350 siop_table_sync(siop_cmd, 351 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 352 break; 353 } 354 } 355 if (siop_cmd) { 356 xs = siop_cmd->cmd_c.xs; 357 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target; 358 target = siop_cmd->cmd_c.xs->xs_periph->periph_target; 359 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun; 360 tag = siop_cmd->cmd_c.tag; 361 siop_lun = siop_target->siop_lun[lun]; 362 #ifdef DIAGNOSTIC 363 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) { 364 printf("siop_cmd (lun %d) for DSA 0x%x " 365 "not active (%d)\n", lun, (u_int)dsa, 366 siop_cmd->cmd_c.status); 367 xs = NULL; 368 siop_target = NULL; 369 target = -1; 370 lun = -1; 371 tag = -1; 372 siop_lun = NULL; 373 siop_cmd = NULL; 374 } else if (siop_lun->siop_tag[tag].active != siop_cmd) { 375 printf("siop_cmd (lun %d tag %d) not in siop_lun " 376 "active (%p != %p)\n", lun, tag, siop_cmd, 377 siop_lun->siop_tag[tag].active); 378 } 379 #endif 380 } else { 381 xs = NULL; 382 siop_target = NULL; 383 target = -1; 384 lun = -1; 385 tag = -1; 386 siop_lun = NULL; 387 } 388 if (istat & ISTAT_DIP) { 389 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 390 SIOP_DSTAT); 391 if (dstat & DSTAT_ABRT) { 392 /* was probably generated by a bus reset IOCTL */ 393 if ((dstat & DSTAT_DFE) == 0) 394 siop_clearfifo(&sc->sc_c); 395 goto reset; 396 } 397 if (dstat & DSTAT_SSI) { 398 printf("single step dsp 0x%08x dsa 0x08%x\n", 399 (int)(bus_space_read_4(sc->sc_c.sc_rt, 400 sc->sc_c.sc_rh, SIOP_DSP) - 401 sc->sc_c.sc_scriptaddr), 402 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 403 SIOP_DSA)); 404 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 && 405 (istat & ISTAT_SIP) == 0) { 406 bus_space_write_1(sc->sc_c.sc_rt, 407 sc->sc_c.sc_rh, SIOP_DCNTL, 408 bus_space_read_1(sc->sc_c.sc_rt, 409 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD); 410 } 411 return 1; 412 } 413 414 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) { 415 printf("DMA IRQ:"); 416 if (dstat & DSTAT_IID) 417 printf(" Illegal instruction"); 418 if (dstat & DSTAT_BF) 419 printf(" bus fault"); 420 if (dstat & DSTAT_MDPE) 421 printf(" parity"); 422 if (dstat & DSTAT_DFE) 423 printf(" DMA fifo empty"); 424 else 425 siop_clearfifo(&sc->sc_c); 426 printf(", DSP=0x%x DSA=0x%x: ", 427 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 428 SIOP_DSP) - sc->sc_c.sc_scriptaddr), 429 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA)); 430 if (siop_cmd) 431 printf("last msg_in=0x%x status=0x%x\n", 432 siop_cmd->cmd_tables->msg_in[0], 433 le32toh(siop_cmd->cmd_tables->status)); 434 else 435 printf("%s: current DSA invalid\n", 436 sc->sc_c.sc_dev.dv_xname); 437 need_reset = 1; 438 } 439 } 440 if (istat & ISTAT_SIP) { 441 if (istat & ISTAT_DIP) 442 delay(10); 443 /* 444 * Can't read sist0 & sist1 independently, or we have to 445 * insert delay 446 */ 447 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 448 SIOP_SIST0); 449 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 450 SIOP_SSTAT1); 451 #ifdef SIOP_DEBUG_INTR 452 printf("scsi interrupt, sist=0x%x sstat1=0x%x " 453 "DSA=0x%x DSP=0x%lx\n", sist, 454 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 455 SIOP_SSTAT1), 456 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 457 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 458 SIOP_DSP) - 459 sc->sc_c.sc_scriptaddr)); 460 #endif 461 if (sist & SIST0_RST) { 462 siop_handle_reset(sc); 463 /* no table to flush here */ 464 return 1; 465 } 466 if (sist & SIST0_SGE) { 467 if (siop_cmd) 468 scsipi_printaddr(xs->xs_periph); 469 else 470 printf("%s:", sc->sc_c.sc_dev.dv_xname); 471 printf("scsi gross error\n"); 472 goto reset; 473 } 474 if ((sist & SIST0_MA) && need_reset == 0) { 475 if (siop_cmd) { 476 int scratcha0; 477 dstat = bus_space_read_1(sc->sc_c.sc_rt, 478 sc->sc_c.sc_rh, SIOP_DSTAT); 479 /* 480 * first restore DSA, in case we were in a S/G 481 * operation. 482 */ 483 bus_space_write_4(sc->sc_c.sc_rt, 484 sc->sc_c.sc_rh, 485 SIOP_DSA, siop_cmd->cmd_c.dsa); 486 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt, 487 sc->sc_c.sc_rh, SIOP_SCRATCHA); 488 switch (sstat1 & SSTAT1_PHASE_MASK) { 489 case SSTAT1_PHASE_STATUS: 490 /* 491 * previous phase may be aborted for any reason 492 * ( for example, the target has less data to 493 * transfer than requested). Compute resid and 494 * just go to status, the command should 495 * terminate. 496 */ 497 INCSTAT(siop_stat_intr_shortxfer); 498 if (scratcha0 & A_flag_data) 499 siop_ma(&siop_cmd->cmd_c); 500 else if ((dstat & DSTAT_DFE) == 0) 501 siop_clearfifo(&sc->sc_c); 502 CALL_SCRIPT(Ent_status); 503 return 1; 504 case SSTAT1_PHASE_MSGIN: 505 /* 506 * target may be ready to disconnect 507 * Compute resid which would be used later 508 * if a save data pointer is needed. 509 */ 510 INCSTAT(siop_stat_intr_xferdisc); 511 if (scratcha0 & A_flag_data) 512 siop_ma(&siop_cmd->cmd_c); 513 else if ((dstat & DSTAT_DFE) == 0) 514 siop_clearfifo(&sc->sc_c); 515 bus_space_write_1(sc->sc_c.sc_rt, 516 sc->sc_c.sc_rh, SIOP_SCRATCHA, 517 scratcha0 & ~A_flag_data); 518 CALL_SCRIPT(Ent_msgin); 519 return 1; 520 } 521 printf("%s: unexpected phase mismatch %d\n", 522 sc->sc_c.sc_dev.dv_xname, 523 sstat1 & SSTAT1_PHASE_MASK); 524 } else { 525 printf("%s: phase mismatch without command\n", 526 sc->sc_c.sc_dev.dv_xname); 527 } 528 need_reset = 1; 529 } 530 if (sist & SIST0_PAR) { 531 /* parity error, reset */ 532 if (siop_cmd) 533 scsipi_printaddr(xs->xs_periph); 534 else 535 printf("%s:", sc->sc_c.sc_dev.dv_xname); 536 printf("parity error\n"); 537 goto reset; 538 } 539 if ((sist & (SIST1_STO << 8)) && need_reset == 0) { 540 /* selection time out, assume there's no device here */ 541 if (siop_cmd) { 542 siop_cmd->cmd_c.status = CMDST_DONE; 543 xs->error = XS_SELTIMEOUT; 544 freetarget = 1; 545 goto end; 546 } else { 547 printf("%s: selection timeout without " 548 "command\n", sc->sc_c.sc_dev.dv_xname); 549 need_reset = 1; 550 } 551 } 552 if (sist & SIST0_UDC) { 553 /* 554 * unexpected disconnect. Usually the target signals 555 * a fatal condition this way. Attempt to get sense. 556 */ 557 if (siop_cmd) { 558 siop_cmd->cmd_tables->status = 559 htole32(SCSI_CHECK); 560 goto end; 561 } 562 printf("%s: unexpected disconnect without " 563 "command\n", sc->sc_c.sc_dev.dv_xname); 564 goto reset; 565 } 566 if (sist & (SIST1_SBMC << 8)) { 567 /* SCSI bus mode change */ 568 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1) 569 goto reset; 570 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { 571 /* 572 * we have a script interrupt, it will 573 * restart the script. 574 */ 575 goto scintr; 576 } 577 /* 578 * else we have to restart it ourselve, at the 579 * interrupted instruction. 580 */ 581 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 582 SIOP_DSP, 583 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 584 SIOP_DSP) - 8); 585 return 1; 586 } 587 /* Else it's an unhandled exception (for now). */ 588 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x " 589 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist, 590 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 591 SIOP_SSTAT1), 592 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 593 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 594 SIOP_DSP) - sc->sc_c.sc_scriptaddr)); 595 if (siop_cmd) { 596 siop_cmd->cmd_c.status = CMDST_DONE; 597 xs->error = XS_SELTIMEOUT; 598 goto end; 599 } 600 need_reset = 1; 601 } 602 if (need_reset) { 603 reset: 604 /* fatal error, reset the bus */ 605 siop_resetbus(&sc->sc_c); 606 /* no table to flush here */ 607 return 1; 608 } 609 610 scintr: 611 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */ 612 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 613 SIOP_DSPS); 614 #ifdef SIOP_DEBUG_INTR 615 printf("script interrupt 0x%x\n", irqcode); 616 #endif 617 /* 618 * no command, or an inactive command is only valid for a 619 * reselect interrupt 620 */ 621 if ((irqcode & 0x80) == 0) { 622 if (siop_cmd == NULL) { 623 printf( 624 "%s: script interrupt (0x%x) with invalid DSA !!!\n", 625 sc->sc_c.sc_dev.dv_xname, irqcode); 626 goto reset; 627 } 628 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) { 629 printf("%s: command with invalid status " 630 "(IRQ code 0x%x current status %d) !\n", 631 sc->sc_c.sc_dev.dv_xname, 632 irqcode, siop_cmd->cmd_c.status); 633 xs = NULL; 634 } 635 } 636 switch(irqcode) { 637 case A_int_err: 638 printf("error, DSP=0x%x\n", 639 (int)(bus_space_read_4(sc->sc_c.sc_rt, 640 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr)); 641 if (xs) { 642 xs->error = XS_SELTIMEOUT; 643 goto end; 644 } else { 645 goto reset; 646 } 647 case A_int_reseltarg: 648 printf("%s: reselect with invalid target\n", 649 sc->sc_c.sc_dev.dv_xname); 650 goto reset; 651 case A_int_resellun: 652 INCSTAT(siop_stat_intr_lunresel); 653 target = bus_space_read_1(sc->sc_c.sc_rt, 654 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf; 655 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 656 SIOP_SCRATCHA + 1); 657 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 658 SIOP_SCRATCHA + 2); 659 siop_target = 660 (struct siop_target *)sc->sc_c.targets[target]; 661 if (siop_target == NULL) { 662 printf("%s: reselect with invalid target %d\n", 663 sc->sc_c.sc_dev.dv_xname, target); 664 goto reset; 665 } 666 siop_lun = siop_target->siop_lun[lun]; 667 if (siop_lun == NULL) { 668 printf("%s: target %d reselect with invalid " 669 "lun %d\n", sc->sc_c.sc_dev.dv_xname, 670 target, lun); 671 goto reset; 672 } 673 if (siop_lun->siop_tag[tag].active == NULL) { 674 printf("%s: target %d lun %d tag %d reselect " 675 "without command\n", 676 sc->sc_c.sc_dev.dv_xname, 677 target, lun, tag); 678 goto reset; 679 } 680 siop_cmd = siop_lun->siop_tag[tag].active; 681 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 682 SIOP_DSP, siop_cmd->cmd_c.dsa + 683 sizeof(struct siop_common_xfer) + 684 Ent_ldsa_reload_dsa); 685 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 686 return 1; 687 case A_int_reseltag: 688 printf("%s: reselect with invalid tag\n", 689 sc->sc_c.sc_dev.dv_xname); 690 goto reset; 691 case A_int_msgin: 692 { 693 int msgin = bus_space_read_1(sc->sc_c.sc_rt, 694 sc->sc_c.sc_rh, SIOP_SFBR); 695 if (msgin == MSG_MESSAGE_REJECT) { 696 int msg, extmsg; 697 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) { 698 /* 699 * message was part of a identify + 700 * something else. Identify shouldn't 701 * have been rejected. 702 */ 703 msg = 704 siop_cmd->cmd_tables->msg_out[1]; 705 extmsg = 706 siop_cmd->cmd_tables->msg_out[3]; 707 } else { 708 msg = siop_cmd->cmd_tables->msg_out[0]; 709 extmsg = 710 siop_cmd->cmd_tables->msg_out[2]; 711 } 712 if (msg == MSG_MESSAGE_REJECT) { 713 /* MSG_REJECT for a MSG_REJECT !*/ 714 if (xs) 715 scsipi_printaddr(xs->xs_periph); 716 else 717 printf("%s: ", 718 sc->sc_c.sc_dev.dv_xname); 719 printf("our reject message was " 720 "rejected\n"); 721 goto reset; 722 } 723 if (msg == MSG_EXTENDED && 724 extmsg == MSG_EXT_WDTR) { 725 /* WDTR rejected, initiate sync */ 726 if ((siop_target->target_c.flags & 727 TARF_SYNC) == 0) { 728 siop_target->target_c.status = 729 TARST_OK; 730 siop_update_xfer_mode(&sc->sc_c, 731 target); 732 /* no table to flush here */ 733 CALL_SCRIPT(Ent_msgin_ack); 734 return 1; 735 } 736 siop_target->target_c.status = 737 TARST_SYNC_NEG; 738 siop_sdtr_msg(&siop_cmd->cmd_c, 0, 739 sc->sc_c.st_minsync, 740 sc->sc_c.maxoff); 741 siop_table_sync(siop_cmd, 742 BUS_DMASYNC_PREREAD | 743 BUS_DMASYNC_PREWRITE); 744 CALL_SCRIPT(Ent_send_msgout); 745 return 1; 746 } else if (msg == MSG_EXTENDED && 747 extmsg == MSG_EXT_SDTR) { 748 /* sync rejected */ 749 siop_target->target_c.offset = 0; 750 siop_target->target_c.period = 0; 751 siop_target->target_c.status = TARST_OK; 752 siop_update_xfer_mode(&sc->sc_c, 753 target); 754 /* no table to flush here */ 755 CALL_SCRIPT(Ent_msgin_ack); 756 return 1; 757 } else if (msg == MSG_SIMPLE_Q_TAG || 758 msg == MSG_HEAD_OF_Q_TAG || 759 msg == MSG_ORDERED_Q_TAG) { 760 if (siop_handle_qtag_reject( 761 siop_cmd) == -1) 762 goto reset; 763 CALL_SCRIPT(Ent_msgin_ack); 764 return 1; 765 } 766 if (xs) 767 scsipi_printaddr(xs->xs_periph); 768 else 769 printf("%s: ", 770 sc->sc_c.sc_dev.dv_xname); 771 if (msg == MSG_EXTENDED) { 772 printf("scsi message reject, extended " 773 "message sent was 0x%x\n", extmsg); 774 } else { 775 printf("scsi message reject, message " 776 "sent was 0x%x\n", msg); 777 } 778 /* no table to flush here */ 779 CALL_SCRIPT(Ent_msgin_ack); 780 return 1; 781 } 782 if (msgin == MSG_IGN_WIDE_RESIDUE) { 783 /* use the extmsgdata table to get the second byte */ 784 siop_cmd->cmd_tables->t_extmsgdata.count = 785 htole32(1); 786 siop_table_sync(siop_cmd, 787 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 788 CALL_SCRIPT(Ent_get_extmsgdata); 789 } 790 if (xs) 791 scsipi_printaddr(xs->xs_periph); 792 else 793 printf("%s: ", sc->sc_c.sc_dev.dv_xname); 794 printf("unhandled message 0x%x\n", 795 siop_cmd->cmd_tables->msg_in[0]); 796 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 797 siop_cmd->cmd_tables->t_msgout.count= htole32(1); 798 siop_table_sync(siop_cmd, 799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 800 CALL_SCRIPT(Ent_send_msgout); 801 return 1; 802 } 803 case A_int_extmsgin: 804 #ifdef SIOP_DEBUG_INTR 805 printf("extended message: msg 0x%x len %d\n", 806 siop_cmd->cmd_tables->msg_in[2], 807 siop_cmd->cmd_tables->msg_in[1]); 808 #endif 809 if (siop_cmd->cmd_tables->msg_in[1] > 810 sizeof(siop_cmd->cmd_tables->msg_in) - 2) 811 printf("%s: extended message too big (%d)\n", 812 sc->sc_c.sc_dev.dv_xname, 813 siop_cmd->cmd_tables->msg_in[1]); 814 siop_cmd->cmd_tables->t_extmsgdata.count = 815 htole32(siop_cmd->cmd_tables->msg_in[1] - 1); 816 siop_table_sync(siop_cmd, 817 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 818 CALL_SCRIPT(Ent_get_extmsgdata); 819 return 1; 820 case A_int_extmsgdata: 821 #ifdef SIOP_DEBUG_INTR 822 { 823 int i; 824 printf("extended message: 0x%x, data:", 825 siop_cmd->cmd_tables->msg_in[2]); 826 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1]; 827 i++) 828 printf(" 0x%x", 829 siop_cmd->cmd_tables->msg_in[i]); 830 printf("\n"); 831 } 832 #endif 833 if (siop_cmd->cmd_tables->msg_in[0] == 834 MSG_IGN_WIDE_RESIDUE) { 835 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */ 836 if (siop_cmd->cmd_tables->msg_in[3] != 1) 837 printf("MSG_IGN_WIDE_RESIDUE: " 838 "bad len %d\n", 839 siop_cmd->cmd_tables->msg_in[3]); 840 switch (siop_iwr(&siop_cmd->cmd_c)) { 841 case SIOP_NEG_MSGOUT: 842 siop_table_sync(siop_cmd, 843 BUS_DMASYNC_PREREAD | 844 BUS_DMASYNC_PREWRITE); 845 CALL_SCRIPT(Ent_send_msgout); 846 return(1); 847 case SIOP_NEG_ACK: 848 CALL_SCRIPT(Ent_msgin_ack); 849 return(1); 850 default: 851 panic("invalid retval from " 852 "siop_iwr()"); 853 } 854 return(1); 855 } 856 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) { 857 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) { 858 case SIOP_NEG_MSGOUT: 859 siop_update_scntl3(sc, 860 siop_cmd->cmd_c.siop_target); 861 siop_table_sync(siop_cmd, 862 BUS_DMASYNC_PREREAD | 863 BUS_DMASYNC_PREWRITE); 864 CALL_SCRIPT(Ent_send_msgout); 865 return(1); 866 case SIOP_NEG_ACK: 867 siop_update_scntl3(sc, 868 siop_cmd->cmd_c.siop_target); 869 CALL_SCRIPT(Ent_msgin_ack); 870 return(1); 871 default: 872 panic("invalid retval from " 873 "siop_wdtr_neg()"); 874 } 875 return(1); 876 } 877 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) { 878 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) { 879 case SIOP_NEG_MSGOUT: 880 siop_update_scntl3(sc, 881 siop_cmd->cmd_c.siop_target); 882 siop_table_sync(siop_cmd, 883 BUS_DMASYNC_PREREAD | 884 BUS_DMASYNC_PREWRITE); 885 CALL_SCRIPT(Ent_send_msgout); 886 return(1); 887 case SIOP_NEG_ACK: 888 siop_update_scntl3(sc, 889 siop_cmd->cmd_c.siop_target); 890 CALL_SCRIPT(Ent_msgin_ack); 891 return(1); 892 default: 893 panic("invalid retval from " 894 "siop_wdtr_neg()"); 895 } 896 return(1); 897 } 898 /* send a message reject */ 899 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 900 siop_cmd->cmd_tables->t_msgout.count = htole32(1); 901 siop_table_sync(siop_cmd, 902 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 903 CALL_SCRIPT(Ent_send_msgout); 904 return 1; 905 case A_int_disc: 906 INCSTAT(siop_stat_intr_sdp); 907 offset = bus_space_read_1(sc->sc_c.sc_rt, 908 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 909 #ifdef SIOP_DEBUG_DR 910 printf("disconnect offset %d\n", offset); 911 #endif 912 siop_sdp(&siop_cmd->cmd_c, offset); 913 siop_table_sync(siop_cmd, 914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 915 CALL_SCRIPT(Ent_script_sched); 916 return 1; 917 case A_int_resfail: 918 printf("reselect failed\n"); 919 CALL_SCRIPT(Ent_script_sched); 920 return 1; 921 case A_int_done: 922 if (xs == NULL) { 923 printf("%s: done without command, DSA=0x%lx\n", 924 sc->sc_c.sc_dev.dv_xname, 925 (u_long)siop_cmd->cmd_c.dsa); 926 siop_cmd->cmd_c.status = CMDST_FREE; 927 CALL_SCRIPT(Ent_script_sched); 928 return 1; 929 } 930 #ifdef SIOP_DEBUG_INTR 931 printf("done, DSA=0x%lx target id 0x%x last msg " 932 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa, 933 le32toh(siop_cmd->cmd_tables->id), 934 siop_cmd->cmd_tables->msg_in[0], 935 le32toh(siop_cmd->cmd_tables->status)); 936 #endif 937 INCSTAT(siop_stat_intr_done); 938 /* update resid. */ 939 offset = bus_space_read_1(sc->sc_c.sc_rt, 940 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 941 siop_update_resid(&siop_cmd->cmd_c, offset); 942 siop_cmd->cmd_c.status = CMDST_DONE; 943 goto end; 944 default: 945 printf("unknown irqcode %x\n", irqcode); 946 if (xs) { 947 xs->error = XS_SELTIMEOUT; 948 goto end; 949 } 950 goto reset; 951 } 952 return 1; 953 } 954 /* We just should't get there */ 955 panic("siop_intr: I shouldn't be there !"); 956 957 end: 958 /* 959 * restart the script now if command completed properly 960 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the 961 * queue 962 */ 963 xs->status = le32toh(siop_cmd->cmd_tables->status); 964 if (xs->status == SCSI_OK) 965 CALL_SCRIPT(Ent_script_sched); 966 else 967 restart = 1; 968 siop_lun->siop_tag[tag].active = NULL; 969 siop_scsicmd_end(siop_cmd); 970 if (freetarget && siop_target->target_c.status == TARST_PROBING) 971 siop_del_dev(sc, target, lun); 972 if (restart) 973 CALL_SCRIPT(Ent_script_sched); 974 if (sc->sc_flags & SCF_CHAN_NOSLOT) { 975 /* a command terminated, so we have free slots now */ 976 sc->sc_flags &= ~SCF_CHAN_NOSLOT; 977 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1); 978 } 979 980 return 1; 981 } 982 983 void 984 siop_scsicmd_end(siop_cmd) 985 struct siop_cmd *siop_cmd; 986 { 987 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs; 988 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 989 990 switch(xs->status) { 991 case SCSI_OK: 992 xs->error = XS_NOERROR; 993 break; 994 case SCSI_BUSY: 995 xs->error = XS_BUSY; 996 break; 997 case SCSI_CHECK: 998 xs->error = XS_BUSY; 999 /* remove commands in the queue and scheduler */ 1000 siop_unqueue(sc, xs->xs_periph->periph_target, 1001 xs->xs_periph->periph_lun); 1002 break; 1003 case SCSI_QUEUE_FULL: 1004 INCSTAT(siop_stat_intr_qfull); 1005 #ifdef SIOP_DEBUG 1006 printf("%s:%d:%d: queue full (tag %d)\n", 1007 sc->sc_c.sc_dev.dv_xname, 1008 xs->xs_periph->periph_target, 1009 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag); 1010 #endif 1011 xs->error = XS_BUSY; 1012 break; 1013 case SCSI_SIOP_NOCHECK: 1014 /* 1015 * don't check status, xs->error is already valid 1016 */ 1017 break; 1018 case SCSI_SIOP_NOSTATUS: 1019 /* 1020 * the status byte was not updated, cmd was 1021 * aborted 1022 */ 1023 xs->error = XS_SELTIMEOUT; 1024 break; 1025 default: 1026 scsipi_printaddr(xs->xs_periph); 1027 printf("invalid status code %d\n", xs->status); 1028 xs->error = XS_DRIVER_STUFFUP; 1029 } 1030 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 1031 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0, 1032 siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1033 (xs->xs_control & XS_CTL_DATA_IN) ? 1034 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1035 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data); 1036 } 1037 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd); 1038 callout_stop(&siop_cmd->cmd_c.xs->xs_callout); 1039 siop_cmd->cmd_c.status = CMDST_FREE; 1040 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next); 1041 #if 0 1042 if (xs->resid != 0) 1043 printf("resid %d datalen %d\n", xs->resid, xs->datalen); 1044 #endif 1045 scsipi_done (xs); 1046 } 1047 1048 void 1049 siop_unqueue(sc, target, lun) 1050 struct siop_softc *sc; 1051 int target; 1052 int lun; 1053 { 1054 int slot, tag; 1055 struct siop_cmd *siop_cmd; 1056 struct siop_lun *siop_lun = 1057 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun]; 1058 1059 /* first make sure to read valid data */ 1060 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1061 1062 for (tag = 1; tag < SIOP_NTAG; tag++) { 1063 /* look for commands in the scheduler, not yet started */ 1064 if (siop_lun->siop_tag[tag].active == NULL) 1065 continue; 1066 siop_cmd = siop_lun->siop_tag[tag].active; 1067 for (slot = 0; slot <= sc->sc_currschedslot; slot++) { 1068 if (siop_script_read(sc, 1069 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) == 1070 siop_cmd->cmd_c.dsa + 1071 sizeof(struct siop_common_xfer) + 1072 Ent_ldsa_select) 1073 break; 1074 } 1075 if (slot > sc->sc_currschedslot) 1076 continue; /* didn't find it */ 1077 if (siop_script_read(sc, 1078 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000) 1079 continue; /* already started */ 1080 /* clear the slot */ 1081 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2, 1082 0x80000000); 1083 /* ask to requeue */ 1084 siop_cmd->cmd_c.xs->error = XS_REQUEUE; 1085 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1086 siop_lun->siop_tag[tag].active = NULL; 1087 siop_scsicmd_end(siop_cmd); 1088 } 1089 /* update sc_currschedslot */ 1090 sc->sc_currschedslot = 0; 1091 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) { 1092 if (siop_script_read(sc, 1093 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000) 1094 sc->sc_currschedslot = slot; 1095 } 1096 } 1097 1098 /* 1099 * handle a rejected queue tag message: the command will run untagged, 1100 * has to adjust the reselect script. 1101 */ 1102 int 1103 siop_handle_qtag_reject(siop_cmd) 1104 struct siop_cmd *siop_cmd; 1105 { 1106 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 1107 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target; 1108 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun; 1109 int tag = siop_cmd->cmd_tables->msg_out[2]; 1110 struct siop_lun *siop_lun = 1111 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun]; 1112 1113 #ifdef SIOP_DEBUG 1114 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n", 1115 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag, 1116 siop_cmd->cmd_c.status); 1117 #endif 1118 1119 if (siop_lun->siop_tag[0].active != NULL) { 1120 printf("%s: untagged command already running for target %d " 1121 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname, 1122 target, lun, siop_lun->siop_tag[0].active->cmd_c.status); 1123 return -1; 1124 } 1125 /* clear tag slot */ 1126 siop_lun->siop_tag[tag].active = NULL; 1127 /* add command to non-tagged slot */ 1128 siop_lun->siop_tag[0].active = siop_cmd; 1129 siop_cmd->cmd_c.tag = 0; 1130 /* adjust reselect script if there is one */ 1131 if (siop_lun->siop_tag[0].reseloff > 0) { 1132 siop_script_write(sc, 1133 siop_lun->siop_tag[0].reseloff + 1, 1134 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) + 1135 Ent_ldsa_reload_dsa); 1136 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 1137 } 1138 return 0; 1139 } 1140 1141 /* 1142 * handle a bus reset: reset chip, unqueue all active commands, free all 1143 * target struct and report lossage to upper layer. 1144 * As the upper layer may requeue immediatly we have to first store 1145 * all active commands in a temporary queue. 1146 */ 1147 void 1148 siop_handle_reset(sc) 1149 struct siop_softc *sc; 1150 { 1151 struct siop_cmd *siop_cmd; 1152 struct siop_lun *siop_lun; 1153 int target, lun, tag; 1154 /* 1155 * scsi bus reset. reset the chip and restart 1156 * the queue. Need to clean up all active commands 1157 */ 1158 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname); 1159 /* stop, reset and restart the chip */ 1160 siop_reset(sc); 1161 if (sc->sc_flags & SCF_CHAN_NOSLOT) { 1162 /* chip has been reset, all slots are free now */ 1163 sc->sc_flags &= ~SCF_CHAN_NOSLOT; 1164 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1); 1165 } 1166 /* 1167 * Process all commands: first commands being executed 1168 */ 1169 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; 1170 target++) { 1171 if (sc->sc_c.targets[target] == NULL) 1172 continue; 1173 for (lun = 0; lun < 8; lun++) { 1174 struct siop_target *siop_target = 1175 (struct siop_target *)sc->sc_c.targets[target]; 1176 siop_lun = siop_target->siop_lun[lun]; 1177 if (siop_lun == NULL) 1178 continue; 1179 for (tag = 0; tag < 1180 ((sc->sc_c.targets[target]->flags & TARF_TAG) ? 1181 SIOP_NTAG : 1); 1182 tag++) { 1183 siop_cmd = siop_lun->siop_tag[tag].active; 1184 if (siop_cmd == NULL) 1185 continue; 1186 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph); 1187 printf("command with tag id %d reset\n", tag); 1188 siop_cmd->cmd_c.xs->error = 1189 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ? 1190 XS_TIMEOUT : XS_RESET; 1191 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1192 siop_lun->siop_tag[tag].active = NULL; 1193 siop_cmd->cmd_c.status = CMDST_DONE; 1194 siop_scsicmd_end(siop_cmd); 1195 } 1196 } 1197 sc->sc_c.targets[target]->status = TARST_ASYNC; 1198 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE; 1199 sc->sc_c.targets[target]->period = 1200 sc->sc_c.targets[target]->offset = 0; 1201 siop_update_xfer_mode(&sc->sc_c, target); 1202 } 1203 1204 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL); 1205 } 1206 1207 void 1208 siop_scsipi_request(chan, req, arg) 1209 struct scsipi_channel *chan; 1210 scsipi_adapter_req_t req; 1211 void *arg; 1212 { 1213 struct scsipi_xfer *xs; 1214 struct scsipi_periph *periph; 1215 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev; 1216 struct siop_cmd *siop_cmd; 1217 struct siop_target *siop_target; 1218 int s, error, i; 1219 int target; 1220 int lun; 1221 1222 switch (req) { 1223 case ADAPTER_REQ_RUN_XFER: 1224 xs = arg; 1225 periph = xs->xs_periph; 1226 target = periph->periph_target; 1227 lun = periph->periph_lun; 1228 1229 s = splbio(); 1230 #ifdef SIOP_DEBUG_SCHED 1231 printf("starting cmd for %d:%d\n", target, lun); 1232 #endif 1233 siop_cmd = TAILQ_FIRST(&sc->free_list); 1234 if (siop_cmd == NULL) { 1235 xs->error = XS_RESOURCE_SHORTAGE; 1236 scsipi_done(xs); 1237 splx(s); 1238 return; 1239 } 1240 TAILQ_REMOVE(&sc->free_list, siop_cmd, next); 1241 #ifdef DIAGNOSTIC 1242 if (siop_cmd->cmd_c.status != CMDST_FREE) 1243 panic("siop_scsicmd: new cmd not free"); 1244 #endif 1245 siop_target = (struct siop_target*)sc->sc_c.targets[target]; 1246 if (siop_target == NULL) { 1247 #ifdef SIOP_DEBUG 1248 printf("%s: alloc siop_target for target %d\n", 1249 sc->sc_c.sc_dev.dv_xname, target); 1250 #endif 1251 sc->sc_c.targets[target] = 1252 malloc(sizeof(struct siop_target), 1253 M_DEVBUF, M_NOWAIT); 1254 if (sc->sc_c.targets[target] == NULL) { 1255 printf("%s: can't malloc memory for " 1256 "target %d\n", sc->sc_c.sc_dev.dv_xname, 1257 target); 1258 xs->error = XS_RESOURCE_SHORTAGE; 1259 scsipi_done(xs); 1260 splx(s); 1261 return; 1262 } 1263 siop_target = 1264 (struct siop_target*)sc->sc_c.targets[target]; 1265 siop_target->target_c.status = TARST_PROBING; 1266 siop_target->target_c.flags = 0; 1267 siop_target->target_c.id = 1268 sc->sc_c.clock_div << 24; /* scntl3 */ 1269 siop_target->target_c.id |= target << 16; /* id */ 1270 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */ 1271 1272 /* get a lun switch script */ 1273 siop_target->lunsw = siop_get_lunsw(sc); 1274 if (siop_target->lunsw == NULL) { 1275 printf("%s: can't alloc lunsw for target %d\n", 1276 sc->sc_c.sc_dev.dv_xname, target); 1277 xs->error = XS_RESOURCE_SHORTAGE; 1278 scsipi_done(xs); 1279 splx(s); 1280 return; 1281 } 1282 for (i=0; i < 8; i++) 1283 siop_target->siop_lun[i] = NULL; 1284 siop_add_reselsw(sc, target); 1285 } 1286 if (siop_target->siop_lun[lun] == NULL) { 1287 siop_target->siop_lun[lun] = 1288 malloc(sizeof(struct siop_lun), M_DEVBUF, 1289 M_NOWAIT|M_ZERO); 1290 if (siop_target->siop_lun[lun] == NULL) { 1291 printf("%s: can't alloc siop_lun for " 1292 "target %d lun %d\n", 1293 sc->sc_c.sc_dev.dv_xname, target, lun); 1294 xs->error = XS_RESOURCE_SHORTAGE; 1295 scsipi_done(xs); 1296 splx(s); 1297 return; 1298 } 1299 } 1300 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target]; 1301 siop_cmd->cmd_c.xs = xs; 1302 siop_cmd->cmd_c.flags = 0; 1303 siop_cmd->cmd_c.status = CMDST_READY; 1304 1305 /* load the DMA maps */ 1306 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1307 siop_cmd->cmd_c.dmamap_cmd, 1308 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT); 1309 if (error) { 1310 printf("%s: unable to load cmd DMA map: %d\n", 1311 sc->sc_c.sc_dev.dv_xname, error); 1312 xs->error = XS_DRIVER_STUFFUP; 1313 scsipi_done(xs); 1314 splx(s); 1315 return; 1316 } 1317 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 1318 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1319 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen, 1320 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1321 ((xs->xs_control & XS_CTL_DATA_IN) ? 1322 BUS_DMA_READ : BUS_DMA_WRITE)); 1323 if (error) { 1324 printf("%s: unable to load cmd DMA map: %d", 1325 sc->sc_c.sc_dev.dv_xname, error); 1326 xs->error = XS_DRIVER_STUFFUP; 1327 scsipi_done(xs); 1328 bus_dmamap_unload(sc->sc_c.sc_dmat, 1329 siop_cmd->cmd_c.dmamap_cmd); 1330 splx(s); 1331 return; 1332 } 1333 bus_dmamap_sync(sc->sc_c.sc_dmat, 1334 siop_cmd->cmd_c.dmamap_data, 0, 1335 siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1336 (xs->xs_control & XS_CTL_DATA_IN) ? 1337 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1338 } 1339 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0, 1340 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize, 1341 BUS_DMASYNC_PREWRITE); 1342 1343 if (xs->xs_tag_type) { 1344 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/ 1345 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1; 1346 } else { 1347 siop_cmd->cmd_c.tag = 0; 1348 } 1349 siop_setuptables(&siop_cmd->cmd_c); 1350 siop_table_sync(siop_cmd, 1351 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1352 siop_start(sc, siop_cmd); 1353 if (xs->xs_control & XS_CTL_POLL) { 1354 /* poll for command completion */ 1355 while ((xs->xs_status & XS_STS_DONE) == 0) { 1356 delay(1000); 1357 siop_intr(sc); 1358 } 1359 } 1360 splx(s); 1361 return; 1362 1363 case ADAPTER_REQ_GROW_RESOURCES: 1364 #ifdef SIOP_DEBUG 1365 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname, 1366 sc->sc_c.sc_adapt.adapt_openings); 1367 #endif 1368 siop_morecbd(sc); 1369 return; 1370 1371 case ADAPTER_REQ_SET_XFER_MODE: 1372 { 1373 struct scsipi_xfer_mode *xm = arg; 1374 if (sc->sc_c.targets[xm->xm_target] == NULL) 1375 return; 1376 s = splbio(); 1377 if (xm->xm_mode & PERIPH_CAP_TQING) 1378 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG; 1379 if ((xm->xm_mode & PERIPH_CAP_WIDE16) && 1380 (sc->sc_c.features & SF_BUS_WIDE)) 1381 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE; 1382 if (xm->xm_mode & PERIPH_CAP_SYNC) 1383 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC; 1384 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) || 1385 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING) 1386 sc->sc_c.targets[xm->xm_target]->status = 1387 TARST_ASYNC; 1388 1389 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) { 1390 if (scsipi_lookup_periph(chan, 1391 xm->xm_target, lun) != NULL) { 1392 /* allocate a lun sw entry for this device */ 1393 siop_add_dev(sc, xm->xm_target, lun); 1394 } 1395 } 1396 1397 splx(s); 1398 } 1399 } 1400 } 1401 1402 static void 1403 siop_start(sc, siop_cmd) 1404 struct siop_softc *sc; 1405 struct siop_cmd *siop_cmd; 1406 { 1407 struct siop_lun *siop_lun; 1408 struct siop_xfer *siop_xfer; 1409 u_int32_t dsa; 1410 int timeout; 1411 int target, lun, slot; 1412 1413 /* 1414 * first make sure to read valid data 1415 */ 1416 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1417 1418 /* 1419 * The queue management here is a bit tricky: the script always looks 1420 * at the slot from first to last, so if we always use the first 1421 * free slot commands can stay at the tail of the queue ~forever. 1422 * The algorithm used here is to restart from the head when we know 1423 * that the queue is empty, and only add commands after the last one. 1424 * When we're at the end of the queue wait for the script to clear it. 1425 * The best thing to do here would be to implement a circular queue, 1426 * but using only 53c720 features this can be "interesting". 1427 * A mid-way solution could be to implement 2 queues and swap orders. 1428 */ 1429 slot = sc->sc_currschedslot; 1430 /* 1431 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is 1432 * free. As this is the last used slot, all previous slots are free, 1433 * we can restart from 0. 1434 */ 1435 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) == 1436 0x80000000) { 1437 slot = sc->sc_currschedslot = 0; 1438 } else { 1439 slot++; 1440 } 1441 target = siop_cmd->cmd_c.xs->xs_periph->periph_target; 1442 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun; 1443 siop_lun = 1444 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun]; 1445 /* if non-tagged command active, panic: this shouldn't happen */ 1446 if (siop_lun->siop_tag[0].active != NULL) { 1447 panic("siop_start: tagged cmd while untagged running"); 1448 } 1449 #ifdef DIAGNOSTIC 1450 /* sanity check the tag if needed */ 1451 if (siop_cmd->cmd_c.flags & CMDFL_TAG) { 1452 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL) 1453 panic("siop_start: tag not free"); 1454 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) { 1455 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph); 1456 printf(": tag id %d\n", siop_cmd->cmd_c.tag); 1457 panic("siop_start: invalid tag id"); 1458 } 1459 } 1460 #endif 1461 /* 1462 * find a free scheduler slot and load it. 1463 */ 1464 for (; slot < SIOP_NSLOTS; slot++) { 1465 /* 1466 * If cmd if 0x80000000 the slot is free 1467 */ 1468 if (siop_script_read(sc, 1469 (Ent_script_sched_slot0 / 4) + slot * 2) == 1470 0x80000000) 1471 break; 1472 } 1473 if (slot == SIOP_NSLOTS) { 1474 /* 1475 * no more free slot, no need to continue. freeze the queue 1476 * and requeue this command. 1477 */ 1478 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1); 1479 sc->sc_flags |= SCF_CHAN_NOSLOT; 1480 siop_cmd->cmd_c.xs->error = XS_REQUEUE; 1481 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1482 siop_scsicmd_end(siop_cmd); 1483 return; 1484 } 1485 #ifdef SIOP_DEBUG_SCHED 1486 printf("using slot %d for DSA 0x%lx\n", slot, 1487 (u_long)siop_cmd->cmd_c.dsa); 1488 #endif 1489 /* mark command as active */ 1490 if (siop_cmd->cmd_c.status == CMDST_READY) 1491 siop_cmd->cmd_c.status = CMDST_ACTIVE; 1492 else 1493 panic("siop_start: bad status"); 1494 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd; 1495 /* patch scripts with DSA addr */ 1496 dsa = siop_cmd->cmd_c.dsa; 1497 /* first reselect switch, if we have an entry */ 1498 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0) 1499 siop_script_write(sc, 1500 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1, 1501 dsa + sizeof(struct siop_common_xfer) + 1502 Ent_ldsa_reload_dsa); 1503 /* CMD script: MOVE MEMORY addr */ 1504 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables; 1505 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] = 1506 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8); 1507 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 1508 /* scheduler slot: JUMP ldsa_select */ 1509 siop_script_write(sc, 1510 (Ent_script_sched_slot0 / 4) + slot * 2 + 1, 1511 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select); 1512 /* handle timeout */ 1513 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) { 1514 /* start exire timer */ 1515 timeout = mstohz(siop_cmd->cmd_c.xs->timeout); 1516 if (timeout == 0) 1517 timeout = 1; 1518 callout_reset( &siop_cmd->cmd_c.xs->xs_callout, 1519 timeout, siop_timeout, siop_cmd); 1520 } 1521 /* 1522 * Change JUMP cmd so that this slot will be handled 1523 */ 1524 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2, 1525 0x80080000); 1526 sc->sc_currschedslot = slot; 1527 1528 /* make sure SCRIPT processor will read valid data */ 1529 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1530 /* Signal script it has some work to do */ 1531 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 1532 SIOP_ISTAT, ISTAT_SIGP); 1533 /* and wait for IRQ */ 1534 return; 1535 } 1536 1537 void 1538 siop_timeout(v) 1539 void *v; 1540 { 1541 struct siop_cmd *siop_cmd = v; 1542 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 1543 int s; 1544 1545 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph); 1546 printf("command timeout, CDB: "); 1547 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd); 1548 printf("\n"); 1549 1550 s = splbio(); 1551 /* reset the scsi bus */ 1552 siop_resetbus(&sc->sc_c); 1553 1554 /* deactivate callout */ 1555 callout_stop(&siop_cmd->cmd_c.xs->xs_callout); 1556 /* mark command as being timed out; siop_intr will handle it */ 1557 /* 1558 * mark command has being timed out and just return; 1559 * the bus reset will generate an interrupt, 1560 * it will be handled in siop_intr() 1561 */ 1562 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT; 1563 splx(s); 1564 return; 1565 1566 } 1567 1568 void 1569 siop_dump_script(sc) 1570 struct siop_softc *sc; 1571 { 1572 int i; 1573 for (i = 0; i < PAGE_SIZE / 4; i += 2) { 1574 printf("0x%04x: 0x%08x 0x%08x", i * 4, 1575 le32toh(sc->sc_c.sc_script[i]), 1576 le32toh(sc->sc_c.sc_script[i+1])); 1577 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) == 1578 0xc0000000) { 1579 i++; 1580 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1])); 1581 } 1582 printf("\n"); 1583 } 1584 } 1585 1586 void 1587 siop_morecbd(sc) 1588 struct siop_softc *sc; 1589 { 1590 int error, i, j, s; 1591 bus_dma_segment_t seg; 1592 int rseg; 1593 struct siop_cbd *newcbd; 1594 struct siop_xfer *xfer; 1595 bus_addr_t dsa; 1596 u_int32_t *scr; 1597 1598 /* allocate a new list head */ 1599 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO); 1600 if (newcbd == NULL) { 1601 printf("%s: can't allocate memory for command descriptors " 1602 "head\n", sc->sc_c.sc_dev.dv_xname); 1603 return; 1604 } 1605 1606 /* allocate cmd list */ 1607 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB, 1608 M_DEVBUF, M_NOWAIT|M_ZERO); 1609 if (newcbd->cmds == NULL) { 1610 printf("%s: can't allocate memory for command descriptors\n", 1611 sc->sc_c.sc_dev.dv_xname); 1612 goto bad3; 1613 } 1614 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg, 1615 1, &rseg, BUS_DMA_NOWAIT); 1616 if (error) { 1617 printf("%s: unable to allocate cbd DMA memory, error = %d\n", 1618 sc->sc_c.sc_dev.dv_xname, error); 1619 goto bad2; 1620 } 1621 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE, 1622 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 1623 if (error) { 1624 printf("%s: unable to map cbd DMA memory, error = %d\n", 1625 sc->sc_c.sc_dev.dv_xname, error); 1626 goto bad2; 1627 } 1628 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 1629 BUS_DMA_NOWAIT, &newcbd->xferdma); 1630 if (error) { 1631 printf("%s: unable to create cbd DMA map, error = %d\n", 1632 sc->sc_c.sc_dev.dv_xname, error); 1633 goto bad1; 1634 } 1635 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers, 1636 PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 1637 if (error) { 1638 printf("%s: unable to load cbd DMA map, error = %d\n", 1639 sc->sc_c.sc_dev.dv_xname, error); 1640 goto bad0; 1641 } 1642 #ifdef DEBUG 1643 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname, 1644 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr); 1645 #endif 1646 for (i = 0; i < SIOP_NCMDPB; i++) { 1647 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG, 1648 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1649 &newcbd->cmds[i].cmd_c.dmamap_data); 1650 if (error) { 1651 printf("%s: unable to create data DMA map for cbd: " 1652 "error %d\n", 1653 sc->sc_c.sc_dev.dv_xname, error); 1654 goto bad0; 1655 } 1656 error = bus_dmamap_create(sc->sc_c.sc_dmat, 1657 sizeof(struct scsipi_generic), 1, 1658 sizeof(struct scsipi_generic), 0, 1659 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1660 &newcbd->cmds[i].cmd_c.dmamap_cmd); 1661 if (error) { 1662 printf("%s: unable to create cmd DMA map for cbd %d\n", 1663 sc->sc_c.sc_dev.dv_xname, error); 1664 goto bad0; 1665 } 1666 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c; 1667 newcbd->cmds[i].siop_cbdp = newcbd; 1668 xfer = &newcbd->xfers[i]; 1669 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer; 1670 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer)); 1671 dsa = newcbd->xferdma->dm_segs[0].ds_addr + 1672 i * sizeof(struct siop_xfer); 1673 newcbd->cmds[i].cmd_c.dsa = dsa; 1674 newcbd->cmds[i].cmd_c.status = CMDST_FREE; 1675 xfer->siop_tables.t_msgout.count= htole32(1); 1676 xfer->siop_tables.t_msgout.addr = htole32(dsa); 1677 xfer->siop_tables.t_msgin.count= htole32(1); 1678 xfer->siop_tables.t_msgin.addr = htole32(dsa + 1679 offsetof(struct siop_common_xfer, msg_in)); 1680 xfer->siop_tables.t_extmsgin.count= htole32(2); 1681 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 1682 offsetof(struct siop_common_xfer, msg_in) + 1); 1683 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 1684 offsetof(struct siop_common_xfer, msg_in) + 3); 1685 xfer->siop_tables.t_status.count= htole32(1); 1686 xfer->siop_tables.t_status.addr = htole32(dsa + 1687 offsetof(struct siop_common_xfer, status)); 1688 /* The select/reselect script */ 1689 scr = &xfer->resel[0]; 1690 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++) 1691 scr[j] = htole32(load_dsa[j]); 1692 /* 1693 * 0x78000000 is a 'move data8 to reg'. data8 is the second 1694 * octet, reg offset is the third. 1695 */ 1696 scr[Ent_rdsa0 / 4] = 1697 htole32(0x78100000 | ((dsa & 0x000000ff) << 8)); 1698 scr[Ent_rdsa1 / 4] = 1699 htole32(0x78110000 | ( dsa & 0x0000ff00 )); 1700 scr[Ent_rdsa2 / 4] = 1701 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8)); 1702 scr[Ent_rdsa3 / 4] = 1703 htole32(0x78130000 | ((dsa & 0xff000000) >> 16)); 1704 scr[E_ldsa_abs_reselected_Used[0]] = 1705 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected); 1706 scr[E_ldsa_abs_reselect_Used[0]] = 1707 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect); 1708 scr[E_ldsa_abs_selected_Used[0]] = 1709 htole32(sc->sc_c.sc_scriptaddr + Ent_selected); 1710 scr[E_ldsa_abs_data_Used[0]] = 1711 htole32(dsa + sizeof(struct siop_common_xfer) + 1712 Ent_ldsa_data); 1713 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */ 1714 scr[Ent_ldsa_data / 4] = htole32(0x80000000); 1715 s = splbio(); 1716 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next); 1717 splx(s); 1718 #ifdef SIOP_DEBUG 1719 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i, 1720 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr), 1721 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr), 1722 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr)); 1723 #endif 1724 } 1725 s = splbio(); 1726 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next); 1727 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB; 1728 splx(s); 1729 return; 1730 bad0: 1731 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma); 1732 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma); 1733 bad1: 1734 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg); 1735 bad2: 1736 free(newcbd->cmds, M_DEVBUF); 1737 bad3: 1738 free(newcbd, M_DEVBUF); 1739 return; 1740 } 1741 1742 struct siop_lunsw * 1743 siop_get_lunsw(sc) 1744 struct siop_softc *sc; 1745 { 1746 struct siop_lunsw *lunsw; 1747 int i; 1748 1749 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >= 1750 sc->script_free_hi) 1751 return NULL; 1752 lunsw = TAILQ_FIRST(&sc->lunsw_list); 1753 if (lunsw != NULL) { 1754 #ifdef SIOP_DEBUG 1755 printf("siop_get_lunsw got lunsw at offset %d\n", 1756 lunsw->lunsw_off); 1757 #endif 1758 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next); 1759 return lunsw; 1760 } 1761 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO); 1762 if (lunsw == NULL) 1763 return NULL; 1764 #ifdef SIOP_DEBUG 1765 printf("allocating lunsw at offset %d\n", sc->script_free_lo); 1766 #endif 1767 if (sc->sc_c.features & SF_CHIP_RAM) { 1768 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 1769 sc->script_free_lo * 4, lun_switch, 1770 sizeof(lun_switch) / sizeof(lun_switch[0])); 1771 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 1772 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4, 1773 sc->sc_c.sc_scriptaddr + Ent_lunsw_return); 1774 } else { 1775 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]); 1776 i++) 1777 sc->sc_c.sc_script[sc->script_free_lo + i] = 1778 htole32(lun_switch[i]); 1779 sc->sc_c.sc_script[ 1780 sc->script_free_lo + E_abs_lunsw_return_Used[0]] = 1781 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return); 1782 } 1783 lunsw->lunsw_off = sc->script_free_lo; 1784 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]); 1785 sc->script_free_lo += lunsw->lunsw_size; 1786 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1787 return lunsw; 1788 } 1789 1790 void 1791 siop_add_reselsw(sc, target) 1792 struct siop_softc *sc; 1793 int target; 1794 { 1795 int i, j; 1796 struct siop_target *siop_target; 1797 struct siop_lun *siop_lun; 1798 1799 siop_target = (struct siop_target *)sc->sc_c.targets[target]; 1800 /* 1801 * add an entry to resel switch 1802 */ 1803 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE); 1804 for (i = 0; i < 15; i++) { 1805 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2; 1806 if ((siop_script_read(sc, siop_target->reseloff) & 0xff) 1807 == 0xff) { /* it's free */ 1808 #ifdef SIOP_DEBUG 1809 printf("siop: target %d slot %d offset %d\n", 1810 target, i, siop_target->reseloff); 1811 #endif 1812 /* JUMP abs_foo, IF target | 0x80; */ 1813 siop_script_write(sc, siop_target->reseloff, 1814 0x800c0080 | target); 1815 siop_script_write(sc, siop_target->reseloff + 1, 1816 sc->sc_c.sc_scriptaddr + 1817 siop_target->lunsw->lunsw_off * 4 + 1818 Ent_lun_switch_entry); 1819 break; 1820 } 1821 } 1822 if (i == 15) /* no free slot, shouldn't happen */ 1823 panic("siop: resel switch full"); 1824 1825 sc->sc_ntargets++; 1826 for (i = 0; i < 8; i++) { 1827 siop_lun = siop_target->siop_lun[i]; 1828 if (siop_lun == NULL) 1829 continue; 1830 if (siop_lun->reseloff > 0) { 1831 siop_lun->reseloff = 0; 1832 for (j = 0; j < SIOP_NTAG; j++) 1833 siop_lun->siop_tag[j].reseloff = 0; 1834 siop_add_dev(sc, target, i); 1835 } 1836 } 1837 siop_update_scntl3(sc, sc->sc_c.targets[target]); 1838 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 1839 } 1840 1841 void 1842 siop_update_scntl3(sc, _siop_target) 1843 struct siop_softc *sc; 1844 struct siop_common_target *_siop_target; 1845 { 1846 struct siop_target *siop_target = (struct siop_target *)_siop_target; 1847 /* MOVE target->id >> 24 TO SCNTL3 */ 1848 siop_script_write(sc, 1849 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4), 1850 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00)); 1851 /* MOVE target->id >> 8 TO SXFER */ 1852 siop_script_write(sc, 1853 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2, 1854 0x78050000 | (siop_target->target_c.id & 0x0000ff00)); 1855 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 1856 } 1857 1858 void 1859 siop_add_dev(sc, target, lun) 1860 struct siop_softc *sc; 1861 int target; 1862 int lun; 1863 { 1864 struct siop_lunsw *lunsw; 1865 struct siop_target *siop_target = 1866 (struct siop_target *)sc->sc_c.targets[target]; 1867 struct siop_lun *siop_lun = siop_target->siop_lun[lun]; 1868 int i, ntargets; 1869 1870 if (siop_lun->reseloff > 0) 1871 return; 1872 lunsw = siop_target->lunsw; 1873 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) { 1874 /* 1875 * can't extend this slot. Probably not worth trying to deal 1876 * with this case 1877 */ 1878 #ifdef DEBUG 1879 printf("%s:%d:%d: can't allocate a lun sw slot\n", 1880 sc->sc_c.sc_dev.dv_xname, target, lun); 1881 #endif 1882 return; 1883 } 1884 /* count how many free targets we still have to probe */ 1885 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets; 1886 1887 /* 1888 * we need 8 bytes for the lun sw additional entry, and 1889 * eventually sizeof(tag_switch) for the tag switch entry. 1890 * Keep enough free space for the free targets that could be 1891 * probed later. 1892 */ 1893 if (sc->script_free_lo + 2 + 1894 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >= 1895 ((siop_target->target_c.flags & TARF_TAG) ? 1896 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) : 1897 sc->script_free_hi)) { 1898 /* 1899 * not enough space, probably not worth dealing with it. 1900 * We can hold 13 tagged-queuing capable devices in the 4k RAM. 1901 */ 1902 #ifdef DEBUG 1903 printf("%s:%d:%d: not enough memory for a lun sw slot\n", 1904 sc->sc_c.sc_dev.dv_xname, target, lun); 1905 #endif 1906 return; 1907 } 1908 #ifdef SIOP_DEBUG 1909 printf("%s:%d:%d: allocate lun sw entry\n", 1910 sc->sc_c.sc_dev.dv_xname, target, lun); 1911 #endif 1912 /* INT int_resellun */ 1913 siop_script_write(sc, sc->script_free_lo, 0x98080000); 1914 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun); 1915 /* Now the slot entry: JUMP abs_foo, IF lun */ 1916 siop_script_write(sc, sc->script_free_lo - 2, 1917 0x800c0000 | lun); 1918 siop_script_write(sc, sc->script_free_lo - 1, 0); 1919 siop_lun->reseloff = sc->script_free_lo - 2; 1920 lunsw->lunsw_size += 2; 1921 sc->script_free_lo += 2; 1922 if (siop_target->target_c.flags & TARF_TAG) { 1923 /* we need a tag switch */ 1924 sc->script_free_hi -= 1925 sizeof(tag_switch) / sizeof(tag_switch[0]); 1926 if (sc->sc_c.features & SF_CHIP_RAM) { 1927 bus_space_write_region_4(sc->sc_c.sc_ramt, 1928 sc->sc_c.sc_ramh, 1929 sc->script_free_hi * 4, tag_switch, 1930 sizeof(tag_switch) / sizeof(tag_switch[0])); 1931 } else { 1932 for(i = 0; 1933 i < sizeof(tag_switch) / sizeof(tag_switch[0]); 1934 i++) { 1935 sc->sc_c.sc_script[sc->script_free_hi + i] = 1936 htole32(tag_switch[i]); 1937 } 1938 } 1939 siop_script_write(sc, 1940 siop_lun->reseloff + 1, 1941 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 + 1942 Ent_tag_switch_entry); 1943 1944 for (i = 0; i < SIOP_NTAG; i++) { 1945 siop_lun->siop_tag[i].reseloff = 1946 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2; 1947 } 1948 } else { 1949 /* non-tag case; just work with the lun switch */ 1950 siop_lun->siop_tag[0].reseloff = 1951 siop_target->siop_lun[lun]->reseloff; 1952 } 1953 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 1954 } 1955 1956 void 1957 siop_del_dev(sc, target, lun) 1958 struct siop_softc *sc; 1959 int target; 1960 int lun; 1961 { 1962 int i; 1963 struct siop_target *siop_target; 1964 #ifdef SIOP_DEBUG 1965 printf("%s:%d:%d: free lun sw entry\n", 1966 sc->sc_c.sc_dev.dv_xname, target, lun); 1967 #endif 1968 if (sc->sc_c.targets[target] == NULL) 1969 return; 1970 siop_target = (struct siop_target *)sc->sc_c.targets[target]; 1971 free(siop_target->siop_lun[lun], M_DEVBUF); 1972 siop_target->siop_lun[lun] = NULL; 1973 /* XXX compact sw entry too ? */ 1974 /* check if we can free the whole target */ 1975 for (i = 0; i < 8; i++) { 1976 if (siop_target->siop_lun[i] != NULL) 1977 return; 1978 } 1979 #ifdef SIOP_DEBUG 1980 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n", 1981 sc->sc_c.sc_dev.dv_xname, target, lun, 1982 sc->sc_c.targets[target]->lunsw->lunsw_off); 1983 #endif 1984 /* 1985 * nothing here, free the target struct and resel 1986 * switch entry 1987 */ 1988 siop_script_write(sc, siop_target->reseloff, 0x800c00ff); 1989 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 1990 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next); 1991 free(sc->sc_c.targets[target], M_DEVBUF); 1992 sc->sc_c.targets[target] = NULL; 1993 sc->sc_ntargets--; 1994 } 1995 1996 #ifdef SIOP_STATS 1997 void 1998 siop_printstats() 1999 { 2000 printf("siop_stat_intr %d\n", siop_stat_intr); 2001 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer); 2002 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc); 2003 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp); 2004 printf("siop_stat_intr_done %d\n", siop_stat_intr_done); 2005 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel); 2006 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull); 2007 } 2008 #endif 2009