1 /* $OpenBSD: siop.c,v 1.74 2020/02/17 02:50:23 krw Exp $ */ 2 /* $NetBSD: siop.c,v 1.79 2005/11/18 23:10:32 bouyer Exp $ */ 3 4 /* 5 * Copyright (c) 2000 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/device.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/endian.h> 37 38 #include <machine/bus.h> 39 40 #include <dev/microcode/siop/siop.out> 41 42 #include <scsi/scsi_all.h> 43 #include <scsi/scsi_message.h> 44 #include <scsi/scsiconf.h> 45 46 #include <dev/ic/siopreg.h> 47 #include <dev/ic/siopvar_common.h> 48 #include <dev/ic/siopvar.h> 49 50 #ifndef SIOP_DEBUG 51 #undef SIOP_DEBUG 52 #undef SIOP_DEBUG_DR 53 #undef SIOP_DEBUG_INTR 54 #undef SIOP_DEBUG_SCHED 55 #undef DUMP_SCRIPT 56 #else 57 #define SIOP_DEBUG_DR 58 #define SIOP_DEBUG_INTR 59 #define SIOP_DEBUG_SCHED 60 #define DUMP_SCRIPT 61 #endif 62 63 64 #undef SIOP_STATS 65 66 #ifndef SIOP_DEFAULT_TARGET 67 #define SIOP_DEFAULT_TARGET 7 68 #endif 69 70 /* number of cmd descriptors per block */ 71 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer)) 72 73 /* Number of scheduler slot (needs to match script) */ 74 #define SIOP_NSLOTS 40 75 76 void siop_table_sync(struct siop_cmd *, int); 77 void siop_script_sync(struct siop_softc *, int); 78 u_int32_t siop_script_read(struct siop_softc *, u_int); 79 void siop_script_write(struct siop_softc *, u_int, u_int32_t); 80 void siop_reset(struct siop_softc *); 81 void siop_handle_reset(struct siop_softc *); 82 int siop_handle_qtag_reject(struct siop_cmd *); 83 void siop_scsicmd_end(struct siop_cmd *); 84 void siop_start(struct siop_softc *); 85 void siop_timeout(void *); 86 void siop_scsicmd(struct scsi_xfer *); 87 void * siop_cmd_get(void *); 88 void siop_cmd_put(void *, void *); 89 int siop_scsiprobe(struct scsi_link *); 90 void siop_scsifree(struct scsi_link *); 91 #ifdef DUMP_SCRIPT 92 void siop_dump_script(struct siop_softc *); 93 #endif 94 void siop_morecbd(struct siop_softc *); 95 struct siop_lunsw *siop_get_lunsw(struct siop_softc *); 96 void siop_add_reselsw(struct siop_softc *, int); 97 void siop_update_scntl3(struct siop_softc *, struct siop_common_target *); 98 99 struct siop_dmamem *siop_dmamem_alloc(struct siop_softc *, size_t); 100 void siop_dmamem_free(struct siop_softc *, struct siop_dmamem *); 101 102 struct cfdriver siop_cd = { 103 NULL, "siop", DV_DULL 104 }; 105 106 struct scsi_adapter siop_switch = { 107 siop_scsicmd, NULL, siop_scsiprobe, siop_scsifree, NULL 108 }; 109 110 #ifdef SIOP_STATS 111 static int siop_stat_intr = 0; 112 static int siop_stat_intr_shortxfer = 0; 113 static int siop_stat_intr_sdp = 0; 114 static int siop_stat_intr_saveoffset = 0; 115 static int siop_stat_intr_done = 0; 116 static int siop_stat_intr_xferdisc = 0; 117 static int siop_stat_intr_lunresel = 0; 118 static int siop_stat_intr_qfull = 0; 119 void siop_printstats(void); 120 #define INCSTAT(x) x++ 121 #else 122 #define INCSTAT(x) 123 #endif 124 125 void 126 siop_table_sync(siop_cmd, ops) 127 struct siop_cmd *siop_cmd; 128 int ops; 129 { 130 struct siop_common_softc *sc = siop_cmd->cmd_c.siop_sc; 131 bus_addr_t offset; 132 133 offset = siop_cmd->cmd_c.dsa - 134 SIOP_DMA_DVA(siop_cmd->siop_cbdp->xfers); 135 bus_dmamap_sync(sc->sc_dmat, 136 SIOP_DMA_MAP(siop_cmd->siop_cbdp->xfers), offset, 137 sizeof(struct siop_xfer), ops); 138 } 139 140 void 141 siop_script_sync(sc, ops) 142 struct siop_softc *sc; 143 int ops; 144 { 145 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) 146 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 147 PAGE_SIZE, ops); 148 } 149 150 u_int32_t 151 siop_script_read(sc, offset) 152 struct siop_softc *sc; 153 u_int offset; 154 { 155 if (sc->sc_c.features & SF_CHIP_RAM) { 156 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 157 offset * 4); 158 } else { 159 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]); 160 } 161 } 162 163 void 164 siop_script_write(sc, offset, val) 165 struct siop_softc *sc; 166 u_int offset; 167 u_int32_t val; 168 { 169 if (sc->sc_c.features & SF_CHIP_RAM) { 170 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 171 offset * 4, val); 172 } else { 173 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val); 174 } 175 } 176 177 void 178 siop_attach(sc) 179 struct siop_softc *sc; 180 { 181 struct scsibus_attach_args saa; 182 183 if (siop_common_attach(&sc->sc_c) != 0) 184 return; 185 186 TAILQ_INIT(&sc->free_list); 187 TAILQ_INIT(&sc->ready_list); 188 TAILQ_INIT(&sc->urgent_list); 189 TAILQ_INIT(&sc->cmds); 190 TAILQ_INIT(&sc->lunsw_list); 191 scsi_iopool_init(&sc->iopool, sc, siop_cmd_get, siop_cmd_put); 192 sc->sc_currschedslot = 0; 193 sc->sc_c.sc_link.adapter = &siop_switch; 194 sc->sc_c.sc_link.openings = SIOP_NTAG; 195 sc->sc_c.sc_link.pool = &sc->iopool; 196 197 /* Start with one page worth of commands */ 198 siop_morecbd(sc); 199 200 #ifdef SIOP_DEBUG 201 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n", 202 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script), 203 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script); 204 #endif 205 206 /* Do a bus reset, so that devices fall back to narrow/async */ 207 siop_resetbus(&sc->sc_c); 208 /* 209 * siop_reset() will reset the chip, thus clearing pending interrupts 210 */ 211 siop_reset(sc); 212 #ifdef DUMP_SCRIPT 213 siop_dump_script(sc); 214 #endif 215 216 bzero(&saa, sizeof(saa)); 217 saa.saa_sc_link = &sc->sc_c.sc_link; 218 219 config_found((struct device*)sc, &saa, scsiprint); 220 } 221 222 void 223 siop_reset(sc) 224 struct siop_softc *sc; 225 { 226 int i, j; 227 struct siop_lunsw *lunsw; 228 229 siop_common_reset(&sc->sc_c); 230 231 /* copy and patch the script */ 232 if (sc->sc_c.features & SF_CHIP_RAM) { 233 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0, 234 siop_script, sizeof(siop_script) / sizeof(siop_script[0])); 235 for (j = 0; j < 236 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0])); 237 j++) { 238 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 239 E_abs_msgin_Used[j] * 4, 240 sc->sc_c.sc_scriptaddr + Ent_msgin_space); 241 } 242 if (sc->sc_c.features & SF_CHIP_LED0) { 243 bus_space_write_region_4(sc->sc_c.sc_ramt, 244 sc->sc_c.sc_ramh, 245 Ent_led_on1, siop_led_on, 246 sizeof(siop_led_on) / sizeof(siop_led_on[0])); 247 bus_space_write_region_4(sc->sc_c.sc_ramt, 248 sc->sc_c.sc_ramh, 249 Ent_led_on2, siop_led_on, 250 sizeof(siop_led_on) / sizeof(siop_led_on[0])); 251 bus_space_write_region_4(sc->sc_c.sc_ramt, 252 sc->sc_c.sc_ramh, 253 Ent_led_off, siop_led_off, 254 sizeof(siop_led_off) / sizeof(siop_led_off[0])); 255 } 256 } else { 257 for (j = 0; 258 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) { 259 sc->sc_c.sc_script[j] = 260 siop_htoc32(&sc->sc_c, siop_script[j]); 261 } 262 for (j = 0; j < 263 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0])); 264 j++) { 265 sc->sc_c.sc_script[E_abs_msgin_Used[j]] = 266 siop_htoc32(&sc->sc_c, 267 sc->sc_c.sc_scriptaddr + Ent_msgin_space); 268 } 269 if (sc->sc_c.features & SF_CHIP_LED0) { 270 for (j = 0; j < (sizeof(siop_led_on) / 271 sizeof(siop_led_on[0])); j++) 272 sc->sc_c.sc_script[ 273 Ent_led_on1 / sizeof(siop_led_on[0]) + j 274 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]); 275 for (j = 0; j < (sizeof(siop_led_on) / 276 sizeof(siop_led_on[0])); j++) 277 sc->sc_c.sc_script[ 278 Ent_led_on2 / sizeof(siop_led_on[0]) + j 279 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]); 280 for (j = 0; j < (sizeof(siop_led_off) / 281 sizeof(siop_led_off[0])); j++) 282 sc->sc_c.sc_script[ 283 Ent_led_off / sizeof(siop_led_off[0]) + j 284 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]); 285 } 286 } 287 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]); 288 sc->script_free_hi = sc->sc_c.ram_size / 4; 289 sc->sc_ntargets = 0; 290 291 /* free used and unused lun switches */ 292 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) { 293 #ifdef SIOP_DEBUG 294 printf("%s: free lunsw at offset %d\n", 295 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off); 296 #endif 297 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next); 298 free(lunsw, M_DEVBUF, 0); 299 } 300 TAILQ_INIT(&sc->lunsw_list); 301 /* restore reselect switch */ 302 for (i = 0; i < sc->sc_c.sc_link.adapter_buswidth; i++) { 303 struct siop_target *target; 304 if (sc->sc_c.targets[i] == NULL) 305 continue; 306 #ifdef SIOP_DEBUG 307 printf("%s: restore sw for target %d\n", 308 sc->sc_c.sc_dev.dv_xname, i); 309 #endif 310 target = (struct siop_target *)sc->sc_c.targets[i]; 311 free(target->lunsw, M_DEVBUF, 0); 312 target->lunsw = siop_get_lunsw(sc); 313 if (target->lunsw == NULL) { 314 printf("%s: can't alloc lunsw for target %d\n", 315 sc->sc_c.sc_dev.dv_xname, i); 316 break; 317 } 318 siop_add_reselsw(sc, i); 319 } 320 321 /* start script */ 322 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) { 323 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 324 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 325 } 326 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, 327 sc->sc_c.sc_scriptaddr + Ent_reselect); 328 } 329 330 #if 0 331 #define CALL_SCRIPT(ent) do {\ 332 printf ("start script DSA 0x%lx DSP 0x%lx\n", \ 333 siop_cmd->cmd_c.dsa, \ 334 sc->sc_c.sc_scriptaddr + ent); \ 335 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 336 } while (0) 337 #else 338 #define CALL_SCRIPT(ent) do {\ 339 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 340 } while (0) 341 #endif 342 343 int 344 siop_intr(v) 345 void *v; 346 { 347 struct siop_softc *sc = v; 348 struct siop_target *siop_target; 349 struct siop_cmd *siop_cmd; 350 struct siop_lun *siop_lun; 351 struct scsi_xfer *xs; 352 int istat, sist, sstat1, dstat = 0; 353 u_int32_t irqcode; 354 int need_reset = 0; 355 int offset, target, lun, tag; 356 bus_addr_t dsa; 357 struct siop_cbd *cbdp; 358 int restart = 0; 359 360 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT); 361 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) 362 return 0; 363 INCSTAT(siop_stat_intr); 364 if (istat & ISTAT_INTF) { 365 printf("INTRF\n"); 366 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 367 SIOP_ISTAT, ISTAT_INTF); 368 } 369 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) == 370 (ISTAT_DIP | ISTAT_ABRT)) { 371 /* clear abort */ 372 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 373 SIOP_ISTAT, 0); 374 } 375 /* use DSA to find the current siop_cmd */ 376 siop_cmd = NULL; 377 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA); 378 TAILQ_FOREACH(cbdp, &sc->cmds, next) { 379 if (dsa >= SIOP_DMA_DVA(cbdp->xfers) && 380 dsa < SIOP_DMA_DVA(cbdp->xfers) + PAGE_SIZE) { 381 dsa -= SIOP_DMA_DVA(cbdp->xfers); 382 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)]; 383 siop_table_sync(siop_cmd, 384 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 385 break; 386 } 387 } 388 if (siop_cmd) { 389 xs = siop_cmd->cmd_c.xs; 390 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target; 391 target = siop_cmd->cmd_c.xs->sc_link->target; 392 lun = siop_cmd->cmd_c.xs->sc_link->lun; 393 tag = siop_cmd->cmd_c.tag; 394 siop_lun = siop_target->siop_lun[lun]; 395 #ifdef DIAGNOSTIC 396 if (siop_cmd->cmd_c.status != CMDST_ACTIVE && 397 siop_cmd->cmd_c.status != CMDST_SENSE_ACTIVE) { 398 printf("siop_cmd (lun %d) for DSA 0x%x " 399 "not active (%d)\n", lun, (u_int)dsa, 400 siop_cmd->cmd_c.status); 401 xs = NULL; 402 siop_target = NULL; 403 target = -1; 404 lun = -1; 405 tag = -1; 406 siop_lun = NULL; 407 siop_cmd = NULL; 408 } else if (siop_lun->siop_tag[tag].active != siop_cmd) { 409 printf("siop_cmd (lun %d tag %d) not in siop_lun " 410 "active (%p != %p)\n", lun, tag, siop_cmd, 411 siop_lun->siop_tag[tag].active); 412 } 413 #endif 414 } else { 415 xs = NULL; 416 siop_target = NULL; 417 target = -1; 418 lun = -1; 419 tag = -1; 420 siop_lun = NULL; 421 } 422 if (istat & ISTAT_DIP) { 423 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 424 SIOP_DSTAT); 425 if (dstat & DSTAT_ABRT) { 426 /* was probably generated by a bus reset IOCTL */ 427 if ((dstat & DSTAT_DFE) == 0) 428 siop_clearfifo(&sc->sc_c); 429 goto reset; 430 } 431 if (dstat & DSTAT_SSI) { 432 printf("single step dsp 0x%08x dsa 0x08%x\n", 433 (int)(bus_space_read_4(sc->sc_c.sc_rt, 434 sc->sc_c.sc_rh, SIOP_DSP) - 435 sc->sc_c.sc_scriptaddr), 436 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 437 SIOP_DSA)); 438 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 && 439 (istat & ISTAT_SIP) == 0) { 440 bus_space_write_1(sc->sc_c.sc_rt, 441 sc->sc_c.sc_rh, SIOP_DCNTL, 442 bus_space_read_1(sc->sc_c.sc_rt, 443 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD); 444 } 445 return 1; 446 } 447 448 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) { 449 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname); 450 if (dstat & DSTAT_IID) 451 printf(" illegal instruction"); 452 if (dstat & DSTAT_BF) 453 printf(" bus fault"); 454 if (dstat & DSTAT_MDPE) 455 printf(" parity"); 456 if (dstat & DSTAT_DFE) 457 printf(" DMA fifo empty"); 458 else 459 siop_clearfifo(&sc->sc_c); 460 printf(", DSP=0x%x DSA=0x%x: ", 461 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 462 SIOP_DSP) - sc->sc_c.sc_scriptaddr), 463 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA)); 464 if (siop_cmd) 465 printf("last msg_in=0x%x status=0x%x\n", 466 siop_cmd->cmd_tables->msg_in[0], 467 siop_ctoh32(&sc->sc_c, 468 siop_cmd->cmd_tables->status)); 469 else 470 printf("current DSA invalid\n"); 471 need_reset = 1; 472 } 473 } 474 if (istat & ISTAT_SIP) { 475 if (istat & ISTAT_DIP) 476 delay(10); 477 /* 478 * Can't read sist0 & sist1 independently, or we have to 479 * insert delay 480 */ 481 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 482 SIOP_SIST0); 483 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 484 SIOP_SSTAT1); 485 #ifdef SIOP_DEBUG_INTR 486 printf("scsi interrupt, sist=0x%x sstat1=0x%x " 487 "DSA=0x%x DSP=0x%lx\n", sist, sstat1, 488 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 489 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 490 SIOP_DSP) - 491 sc->sc_c.sc_scriptaddr)); 492 #endif 493 if (sist & SIST0_RST) { 494 siop_handle_reset(sc); 495 siop_start(sc); 496 /* no table to flush here */ 497 return 1; 498 } 499 if (sist & SIST0_SGE) { 500 if (siop_cmd) 501 sc_print_addr(xs->sc_link); 502 else 503 printf("%s: ", sc->sc_c.sc_dev.dv_xname); 504 printf("scsi gross error\n"); 505 goto reset; 506 } 507 if ((sist & SIST0_MA) && need_reset == 0) { 508 if (siop_cmd) { 509 int scratcha0; 510 /* XXX Why read DSTAT again? */ 511 dstat = bus_space_read_1(sc->sc_c.sc_rt, 512 sc->sc_c.sc_rh, SIOP_DSTAT); 513 /* 514 * first restore DSA, in case we were in a S/G 515 * operation. 516 */ 517 bus_space_write_4(sc->sc_c.sc_rt, 518 sc->sc_c.sc_rh, 519 SIOP_DSA, siop_cmd->cmd_c.dsa); 520 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt, 521 sc->sc_c.sc_rh, SIOP_SCRATCHA); 522 switch (sstat1 & SSTAT1_PHASE_MASK) { 523 case SSTAT1_PHASE_STATUS: 524 /* 525 * previous phase may be aborted for any reason 526 * ( for example, the target has less data to 527 * transfer than requested). Compute resid and 528 * just go to status, the command should 529 * terminate. 530 */ 531 INCSTAT(siop_stat_intr_shortxfer); 532 if (scratcha0 & A_flag_data) 533 siop_ma(&siop_cmd->cmd_c); 534 else if ((dstat & DSTAT_DFE) == 0) 535 siop_clearfifo(&sc->sc_c); 536 CALL_SCRIPT(Ent_status); 537 return 1; 538 case SSTAT1_PHASE_MSGIN: 539 /* 540 * target may be ready to disconnect 541 * Compute resid which would be used later 542 * if a save data pointer is needed. 543 */ 544 INCSTAT(siop_stat_intr_xferdisc); 545 if (scratcha0 & A_flag_data) 546 siop_ma(&siop_cmd->cmd_c); 547 else if ((dstat & DSTAT_DFE) == 0) 548 siop_clearfifo(&sc->sc_c); 549 bus_space_write_1(sc->sc_c.sc_rt, 550 sc->sc_c.sc_rh, SIOP_SCRATCHA, 551 scratcha0 & ~A_flag_data); 552 CALL_SCRIPT(Ent_msgin); 553 return 1; 554 } 555 printf("%s: unexpected phase mismatch %d\n", 556 sc->sc_c.sc_dev.dv_xname, 557 sstat1 & SSTAT1_PHASE_MASK); 558 } else { 559 printf("%s: phase mismatch without command\n", 560 sc->sc_c.sc_dev.dv_xname); 561 } 562 need_reset = 1; 563 } 564 if (sist & SIST0_PAR) { 565 /* parity error, reset */ 566 if (siop_cmd) 567 sc_print_addr(xs->sc_link); 568 else 569 printf("%s: ", sc->sc_c.sc_dev.dv_xname); 570 printf("parity error\n"); 571 goto reset; 572 } 573 if ((sist & (SIST1_STO << 8)) && need_reset == 0) { 574 /* selection time out, assume there's no device here */ 575 if (siop_cmd) { 576 siop_cmd->cmd_c.status = CMDST_DONE; 577 xs->error = XS_SELTIMEOUT; 578 goto end; 579 } else { 580 printf("%s: selection timeout without " 581 "command\n", sc->sc_c.sc_dev.dv_xname); 582 need_reset = 1; 583 } 584 } 585 if (sist & SIST0_UDC) { 586 /* 587 * unexpected disconnect. Usually the target signals 588 * a fatal condition this way. Attempt to get sense. 589 */ 590 if (siop_cmd) { 591 siop_cmd->cmd_tables->status = 592 siop_htoc32(&sc->sc_c, SCSI_CHECK); 593 goto end; 594 } 595 printf("%s: unexpected disconnect without " 596 "command\n", sc->sc_c.sc_dev.dv_xname); 597 goto reset; 598 } 599 if (sist & (SIST1_SBMC << 8)) { 600 /* SCSI bus mode change */ 601 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1) 602 goto reset; 603 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { 604 /* 605 * we have a script interrupt, it will 606 * restart the script. 607 */ 608 goto scintr; 609 } 610 /* 611 * else we have to restart it ourselve, at the 612 * interrupted instruction. 613 */ 614 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 615 SIOP_DSP, 616 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 617 SIOP_DSP) - 8); 618 return 1; 619 } 620 /* Else it's an unhandled exception (for now). */ 621 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x " 622 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, 623 sist, sstat1, 624 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 625 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 626 SIOP_DSP) - sc->sc_c.sc_scriptaddr)); 627 if (siop_cmd) { 628 siop_cmd->cmd_c.status = CMDST_DONE; 629 xs->error = XS_SELTIMEOUT; 630 goto end; 631 } 632 need_reset = 1; 633 } else { 634 sist = sstat1 = 0; 635 } 636 if (need_reset) { 637 reset: 638 /* fatal error, reset the bus */ 639 siop_resetbus(&sc->sc_c); 640 /* no table to flush here */ 641 return 1; 642 } 643 644 scintr: 645 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */ 646 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 647 SIOP_DSPS); 648 #ifdef SIOP_DEBUG_INTR 649 printf("script interrupt 0x%x\n", irqcode); 650 #endif 651 /* 652 * no command, or an inactive command is only valid for a 653 * reselect interrupt 654 */ 655 if ((irqcode & 0x80) == 0) { 656 if (siop_cmd == NULL) { 657 printf( 658 "%s: script interrupt (0x%x) with invalid DSA !!!\n", 659 sc->sc_c.sc_dev.dv_xname, irqcode); 660 goto reset; 661 } 662 if (siop_cmd->cmd_c.status != CMDST_ACTIVE && 663 siop_cmd->cmd_c.status != CMDST_SENSE_ACTIVE) { 664 printf("%s: command with invalid status " 665 "(IRQ code 0x%x current status %d) !\n", 666 sc->sc_c.sc_dev.dv_xname, 667 irqcode, siop_cmd->cmd_c.status); 668 xs = NULL; 669 } 670 } 671 switch(irqcode) { 672 case A_int_err: 673 printf("error, DSP=0x%x\n", 674 (int)(bus_space_read_4(sc->sc_c.sc_rt, 675 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr)); 676 if (xs) { 677 xs->error = XS_SELTIMEOUT; 678 goto end; 679 } else { 680 goto reset; 681 } 682 case A_int_reseltarg: 683 printf("%s: reselect with invalid target\n", 684 sc->sc_c.sc_dev.dv_xname); 685 goto reset; 686 case A_int_resellun: 687 INCSTAT(siop_stat_intr_lunresel); 688 target = bus_space_read_1(sc->sc_c.sc_rt, 689 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf; 690 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 691 SIOP_SCRATCHA + 1); 692 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 693 SIOP_SCRATCHA + 2); 694 siop_target = 695 (struct siop_target *)sc->sc_c.targets[target]; 696 if (siop_target == NULL) { 697 printf("%s: reselect with invalid target %d\n", 698 sc->sc_c.sc_dev.dv_xname, target); 699 goto reset; 700 } 701 siop_lun = siop_target->siop_lun[lun]; 702 if (siop_lun == NULL) { 703 printf("%s: target %d reselect with invalid " 704 "lun %d\n", sc->sc_c.sc_dev.dv_xname, 705 target, lun); 706 goto reset; 707 } 708 if (siop_lun->siop_tag[tag].active == NULL) { 709 printf("%s: target %d lun %d tag %d reselect " 710 "without command\n", 711 sc->sc_c.sc_dev.dv_xname, 712 target, lun, tag); 713 goto reset; 714 } 715 siop_cmd = siop_lun->siop_tag[tag].active; 716 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 717 SIOP_DSP, siop_cmd->cmd_c.dsa + 718 sizeof(struct siop_common_xfer) + 719 Ent_ldsa_reload_dsa); 720 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 721 return 1; 722 case A_int_reseltag: 723 printf("%s: reselect with invalid tag\n", 724 sc->sc_c.sc_dev.dv_xname); 725 goto reset; 726 case A_int_msgin: 727 { 728 int msgin = bus_space_read_1(sc->sc_c.sc_rt, 729 sc->sc_c.sc_rh, SIOP_SFBR); 730 if (msgin == MSG_MESSAGE_REJECT) { 731 int msg, extmsg; 732 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) { 733 /* 734 * message was part of a identify + 735 * something else. Identify shouldn't 736 * have been rejected. 737 */ 738 msg = 739 siop_cmd->cmd_tables->msg_out[1]; 740 extmsg = 741 siop_cmd->cmd_tables->msg_out[3]; 742 } else { 743 msg = siop_cmd->cmd_tables->msg_out[0]; 744 extmsg = 745 siop_cmd->cmd_tables->msg_out[2]; 746 } 747 if (msg == MSG_MESSAGE_REJECT) { 748 /* MSG_REJECT for a MSG_REJECT !*/ 749 if (xs) 750 sc_print_addr(xs->sc_link); 751 else 752 printf("%s: ", 753 sc->sc_c.sc_dev.dv_xname); 754 printf("our reject message was " 755 "rejected\n"); 756 goto reset; 757 } 758 if (msg == MSG_EXTENDED && 759 extmsg == MSG_EXT_WDTR) { 760 /* WDTR rejected, initiate sync */ 761 if ((siop_target->target_c.flags & 762 TARF_SYNC) == 0) { 763 siop_target->target_c.status = 764 TARST_OK; 765 siop_update_xfer_mode(&sc->sc_c, 766 target); 767 /* no table to flush here */ 768 CALL_SCRIPT(Ent_msgin_ack); 769 return 1; 770 } 771 siop_target->target_c.status = 772 TARST_SYNC_NEG; 773 siop_sdtr_msg(&siop_cmd->cmd_c, 0, 774 sc->sc_c.st_minsync, 775 sc->sc_c.maxoff); 776 siop_table_sync(siop_cmd, 777 BUS_DMASYNC_PREREAD | 778 BUS_DMASYNC_PREWRITE); 779 CALL_SCRIPT(Ent_send_msgout); 780 return 1; 781 } else if (msg == MSG_EXTENDED && 782 extmsg == MSG_EXT_SDTR) { 783 /* sync rejected */ 784 siop_target->target_c.offset = 0; 785 siop_target->target_c.period = 0; 786 siop_target->target_c.status = TARST_OK; 787 siop_update_xfer_mode(&sc->sc_c, 788 target); 789 /* no table to flush here */ 790 CALL_SCRIPT(Ent_msgin_ack); 791 return 1; 792 } else if (msg == MSG_EXTENDED && 793 extmsg == MSG_EXT_PPR) { 794 /* PPR negotiation rejected */ 795 siop_target->target_c.offset = 0; 796 siop_target->target_c.period = 0; 797 siop_target->target_c.status = TARST_ASYNC; 798 siop_target->target_c.flags &= ~(TARF_DT | TARF_ISDT); 799 CALL_SCRIPT(Ent_msgin_ack); 800 return 1; 801 } else if (msg == MSG_SIMPLE_Q_TAG || 802 msg == MSG_HEAD_OF_Q_TAG || 803 msg == MSG_ORDERED_Q_TAG) { 804 if (siop_handle_qtag_reject( 805 siop_cmd) == -1) 806 goto reset; 807 CALL_SCRIPT(Ent_msgin_ack); 808 return 1; 809 } 810 if (xs) 811 sc_print_addr(xs->sc_link); 812 else 813 printf("%s: ", 814 sc->sc_c.sc_dev.dv_xname); 815 if (msg == MSG_EXTENDED) { 816 printf("scsi message reject, extended " 817 "message sent was 0x%x\n", extmsg); 818 } else { 819 printf("scsi message reject, message " 820 "sent was 0x%x\n", msg); 821 } 822 /* no table to flush here */ 823 CALL_SCRIPT(Ent_msgin_ack); 824 return 1; 825 } 826 if (msgin == MSG_IGN_WIDE_RESIDUE) { 827 /* use the extmsgdata table to get the second byte */ 828 siop_cmd->cmd_tables->t_extmsgdata.count = 829 siop_htoc32(&sc->sc_c, 1); 830 siop_table_sync(siop_cmd, 831 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 832 CALL_SCRIPT(Ent_get_extmsgdata); 833 return 1; 834 } 835 if (xs) 836 sc_print_addr(xs->sc_link); 837 else 838 printf("%s: ", sc->sc_c.sc_dev.dv_xname); 839 printf("unhandled message 0x%x\n", 840 siop_cmd->cmd_tables->msg_in[0]); 841 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 842 siop_cmd->cmd_tables->t_msgout.count = 843 siop_htoc32(&sc->sc_c, 1); 844 siop_table_sync(siop_cmd, 845 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 846 CALL_SCRIPT(Ent_send_msgout); 847 return 1; 848 } 849 case A_int_extmsgin: 850 #ifdef SIOP_DEBUG_INTR 851 printf("extended message: msg 0x%x len %d\n", 852 siop_cmd->cmd_tables->msg_in[2], 853 siop_cmd->cmd_tables->msg_in[1]); 854 #endif 855 if (siop_cmd->cmd_tables->msg_in[1] > 856 sizeof(siop_cmd->cmd_tables->msg_in) - 2) 857 printf("%s: extended message too big (%d)\n", 858 sc->sc_c.sc_dev.dv_xname, 859 siop_cmd->cmd_tables->msg_in[1]); 860 siop_cmd->cmd_tables->t_extmsgdata.count = 861 siop_htoc32(&sc->sc_c, 862 siop_cmd->cmd_tables->msg_in[1] - 1); 863 siop_table_sync(siop_cmd, 864 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 865 CALL_SCRIPT(Ent_get_extmsgdata); 866 return 1; 867 case A_int_extmsgdata: 868 #ifdef SIOP_DEBUG_INTR 869 { 870 int i; 871 printf("extended message: 0x%x, data:", 872 siop_cmd->cmd_tables->msg_in[2]); 873 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1]; 874 i++) 875 printf(" 0x%x", 876 siop_cmd->cmd_tables->msg_in[i]); 877 printf("\n"); 878 } 879 #endif 880 if (siop_cmd->cmd_tables->msg_in[0] == 881 MSG_IGN_WIDE_RESIDUE) { 882 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */ 883 if (siop_cmd->cmd_tables->msg_in[3] != 1) 884 printf("MSG_IGN_WIDE_RESIDUE: " 885 "bad len %d\n", 886 siop_cmd->cmd_tables->msg_in[3]); 887 switch (siop_iwr(&siop_cmd->cmd_c)) { 888 case SIOP_NEG_MSGOUT: 889 siop_table_sync(siop_cmd, 890 BUS_DMASYNC_PREREAD | 891 BUS_DMASYNC_PREWRITE); 892 CALL_SCRIPT(Ent_send_msgout); 893 return(1); 894 case SIOP_NEG_ACK: 895 CALL_SCRIPT(Ent_msgin_ack); 896 return(1); 897 default: 898 panic("invalid retval from " 899 "siop_iwr()"); 900 } 901 return(1); 902 } 903 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) { 904 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) { 905 case SIOP_NEG_MSGOUT: 906 siop_update_scntl3(sc, 907 siop_cmd->cmd_c.siop_target); 908 siop_table_sync(siop_cmd, 909 BUS_DMASYNC_PREREAD | 910 BUS_DMASYNC_PREWRITE); 911 CALL_SCRIPT(Ent_send_msgout); 912 return(1); 913 case SIOP_NEG_ACK: 914 siop_update_scntl3(sc, 915 siop_cmd->cmd_c.siop_target); 916 CALL_SCRIPT(Ent_msgin_ack); 917 return(1); 918 default: 919 panic("invalid retval from " 920 "siop_wdtr_neg()"); 921 } 922 return(1); 923 } 924 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) { 925 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) { 926 case SIOP_NEG_MSGOUT: 927 siop_update_scntl3(sc, 928 siop_cmd->cmd_c.siop_target); 929 siop_table_sync(siop_cmd, 930 BUS_DMASYNC_PREREAD | 931 BUS_DMASYNC_PREWRITE); 932 CALL_SCRIPT(Ent_send_msgout); 933 return(1); 934 case SIOP_NEG_ACK: 935 siop_update_scntl3(sc, 936 siop_cmd->cmd_c.siop_target); 937 CALL_SCRIPT(Ent_msgin_ack); 938 return(1); 939 default: 940 panic("invalid retval from " 941 "siop_sdtr_neg()"); 942 } 943 return(1); 944 } 945 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) { 946 switch (siop_ppr_neg(&siop_cmd->cmd_c)) { 947 case SIOP_NEG_MSGOUT: 948 siop_update_scntl3(sc, 949 siop_cmd->cmd_c.siop_target); 950 siop_table_sync(siop_cmd, 951 BUS_DMASYNC_PREREAD | 952 BUS_DMASYNC_PREWRITE); 953 CALL_SCRIPT(Ent_send_msgout); 954 return(1); 955 case SIOP_NEG_ACK: 956 siop_update_scntl3(sc, 957 siop_cmd->cmd_c.siop_target); 958 CALL_SCRIPT(Ent_msgin_ack); 959 return(1); 960 default: 961 panic("invalid retval from " 962 "siop_wdtr_neg()"); 963 } 964 return(1); 965 } 966 /* send a message reject */ 967 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 968 siop_cmd->cmd_tables->t_msgout.count = 969 siop_htoc32(&sc->sc_c, 1); 970 siop_table_sync(siop_cmd, 971 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 972 CALL_SCRIPT(Ent_send_msgout); 973 return 1; 974 case A_int_disc: 975 INCSTAT(siop_stat_intr_sdp); 976 offset = bus_space_read_1(sc->sc_c.sc_rt, 977 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 978 #ifdef SIOP_DEBUG_DR 979 printf("disconnect offset %d\n", offset); 980 #endif 981 siop_sdp(&siop_cmd->cmd_c, offset); 982 /* we start again with no offset */ 983 siop_cmd->saved_offset = SIOP_NOOFFSET; 984 siop_table_sync(siop_cmd, 985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 986 CALL_SCRIPT(Ent_script_sched); 987 return 1; 988 case A_int_saveoffset: 989 INCSTAT(siop_stat_intr_saveoffset); 990 offset = bus_space_read_1(sc->sc_c.sc_rt, 991 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 992 #ifdef SIOP_DEBUG_DR 993 printf("saveoffset offset %d\n", offset); 994 #endif 995 siop_cmd->saved_offset = offset; 996 CALL_SCRIPT(Ent_script_sched); 997 return 1; 998 case A_int_resfail: 999 printf("reselect failed\n"); 1000 /* check if we can put some command in scheduler */ 1001 siop_start(sc); 1002 CALL_SCRIPT(Ent_script_sched); 1003 return 1; 1004 case A_int_done: 1005 if (xs == NULL) { 1006 printf("%s: done without command, DSA=0x%lx\n", 1007 sc->sc_c.sc_dev.dv_xname, 1008 (u_long)siop_cmd->cmd_c.dsa); 1009 siop_cmd->cmd_c.status = CMDST_FREE; 1010 siop_start(sc); 1011 CALL_SCRIPT(Ent_script_sched); 1012 return 1; 1013 } 1014 #ifdef SIOP_DEBUG_INTR 1015 printf("done, DSA=0x%lx target id 0x%x last msg " 1016 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa, 1017 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id), 1018 siop_cmd->cmd_tables->msg_in[0], 1019 siop_ctoh32(&sc->sc_c, 1020 siop_cmd->cmd_tables->status)); 1021 #endif 1022 INCSTAT(siop_stat_intr_done); 1023 /* update resid. */ 1024 offset = bus_space_read_1(sc->sc_c.sc_rt, 1025 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 1026 /* 1027 * if we got a disconnect between the last data phase 1028 * and the status phase, offset will be 0. In this 1029 * case, siop_cmd->saved_offset will have the proper 1030 * value if it got updated by the controller 1031 */ 1032 if (offset == 0 && 1033 siop_cmd->saved_offset != SIOP_NOOFFSET) 1034 offset = siop_cmd->saved_offset; 1035 siop_update_resid(&siop_cmd->cmd_c, offset); 1036 if (siop_cmd->cmd_c.status == CMDST_SENSE_ACTIVE) 1037 siop_cmd->cmd_c.status = CMDST_SENSE_DONE; 1038 else 1039 siop_cmd->cmd_c.status = CMDST_DONE; 1040 goto end; 1041 default: 1042 printf("unknown irqcode %x\n", irqcode); 1043 if (xs) { 1044 xs->error = XS_SELTIMEOUT; 1045 goto end; 1046 } 1047 goto reset; 1048 } 1049 return 1; 1050 } else 1051 irqcode = 0; 1052 /* We can get here if ISTAT_DIP and DSTAT_DFE are the only bits set. */ 1053 /* But that *SHOULDN'T* happen. It does on powerpc (at least). */ 1054 printf("%s: siop_intr() - we should not be here!\n" 1055 " istat = 0x%x, dstat = 0x%x, sist = 0x%x, sstat1 = 0x%x\n" 1056 " need_reset = %x, irqcode = %x, siop_cmd %s\n", 1057 sc->sc_c.sc_dev.dv_xname, 1058 istat, dstat, sist, sstat1, need_reset, irqcode, 1059 (siop_cmd == NULL) ? "== NULL" : "!= NULL"); 1060 goto reset; /* Where we should have gone in the first place! */ 1061 end: 1062 /* 1063 * restart the script now if command completed properly 1064 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the 1065 * queue 1066 */ 1067 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status); 1068 if (xs->status == SCSI_OK) 1069 CALL_SCRIPT(Ent_script_sched); 1070 else 1071 restart = 1; 1072 siop_lun->siop_tag[tag].active = NULL; 1073 siop_scsicmd_end(siop_cmd); 1074 siop_start(sc); 1075 if (restart) 1076 CALL_SCRIPT(Ent_script_sched); 1077 return 1; 1078 } 1079 1080 void 1081 siop_scsicmd_end(siop_cmd) 1082 struct siop_cmd *siop_cmd; 1083 { 1084 struct scsi_xfer *xs = siop_cmd->cmd_c.xs; 1085 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 1086 struct siop_lun *siop_lun = 1087 ((struct siop_target*)sc->sc_c.targets[xs->sc_link->target])->siop_lun[xs->sc_link->lun]; 1088 1089 /* 1090 * If the command is re-queued (SENSE, QUEUE_FULL) it 1091 * must get a new timeout, so delete existing timeout now. 1092 */ 1093 timeout_del(&siop_cmd->cmd_c.xs->stimeout); 1094 1095 switch(xs->status) { 1096 case SCSI_OK: 1097 xs->error = (siop_cmd->cmd_c.status == CMDST_DONE) ? 1098 XS_NOERROR : XS_SENSE; 1099 break; 1100 case SCSI_BUSY: 1101 xs->error = XS_BUSY; 1102 break; 1103 case SCSI_CHECK: 1104 if (siop_cmd->cmd_c.status == CMDST_SENSE_DONE) { 1105 /* request sense on a request sense ? */ 1106 printf("%s: request sense failed\n", 1107 sc->sc_c.sc_dev.dv_xname); 1108 xs->error = XS_DRIVER_STUFFUP; 1109 } else { 1110 siop_cmd->cmd_c.status = CMDST_SENSE; 1111 } 1112 break; 1113 case SCSI_QUEUE_FULL: 1114 /* 1115 * Device didn't queue the command. We have to retry 1116 * it. We insert it into the urgent list, hoping to 1117 * preserve order. But unfortunately, commands already 1118 * in the scheduler may be accepted before this one. 1119 * Also remember the condition, to avoid starting new 1120 * commands for this device before one is done. 1121 */ 1122 INCSTAT(siop_stat_intr_qfull); 1123 #ifdef SIOP_DEBUG 1124 printf("%s:%d:%d: queue full (tag %d)\n", sc->sc_c.sc_dev.dv_xname, 1125 xs->sc_link->target, 1126 xs->sc_link->lun, siop_cmd->cmd_c.tag); 1127 #endif 1128 siop_lun->lun_flags |= SIOP_LUNF_FULL; 1129 siop_cmd->cmd_c.status = CMDST_READY; 1130 siop_setuptables(&siop_cmd->cmd_c); 1131 siop_table_sync(siop_cmd, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1132 TAILQ_INSERT_TAIL(&sc->urgent_list, siop_cmd, next); 1133 return; 1134 case SCSI_SIOP_NOCHECK: 1135 /* 1136 * don't check status, xs->error is already valid 1137 */ 1138 break; 1139 case SCSI_SIOP_NOSTATUS: 1140 /* 1141 * the status byte was not updated, cmd was 1142 * aborted 1143 */ 1144 xs->error = XS_SELTIMEOUT; 1145 break; 1146 default: 1147 xs->error = XS_DRIVER_STUFFUP; 1148 } 1149 if (siop_cmd->cmd_c.status != CMDST_SENSE_DONE && 1150 xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1151 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0, 1152 siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1153 (xs->flags & SCSI_DATA_IN) ? 1154 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1155 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data); 1156 } 1157 if (siop_cmd->cmd_c.status == CMDST_SENSE) { 1158 /* issue a request sense for this target */ 1159 struct scsi_sense *cmd = (struct scsi_sense *)&siop_cmd->cmd_c.siop_tables->xscmd; 1160 int error; 1161 bzero(cmd, sizeof(*cmd)); 1162 siop_cmd->cmd_c.siop_tables->cmd.count = 1163 siop_htoc32(&sc->sc_c, sizeof(struct scsi_sense)); 1164 cmd->opcode = REQUEST_SENSE; 1165 cmd->byte2 = xs->sc_link->lun << 5; 1166 cmd->unused[0] = cmd->unused[1] = 0; 1167 cmd->length = sizeof(struct scsi_sense_data); 1168 cmd->control = 0; 1169 siop_cmd->cmd_c.flags &= ~CMDFL_TAG; 1170 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1171 siop_cmd->cmd_c.dmamap_data, 1172 siop_cmd->cmd_c.sense, sizeof(struct scsi_sense_data), 1173 NULL, BUS_DMA_NOWAIT); 1174 if (error) { 1175 printf("%s: unable to load data DMA map " 1176 "(for SENSE): %d\n", 1177 sc->sc_c.sc_dev.dv_xname, error); 1178 xs->error = XS_DRIVER_STUFFUP; 1179 goto out; 1180 } 1181 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 1182 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1183 BUS_DMASYNC_PREREAD); 1184 1185 siop_setuptables(&siop_cmd->cmd_c); 1186 siop_table_sync(siop_cmd, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1187 /* arrange for the cmd to be handled now */ 1188 TAILQ_INSERT_HEAD(&sc->urgent_list, siop_cmd, next); 1189 return; 1190 } else if (siop_cmd->cmd_c.status == CMDST_SENSE_DONE) { 1191 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 1192 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1193 BUS_DMASYNC_POSTREAD); 1194 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data); 1195 bcopy(siop_cmd->cmd_c.sense, &xs->sense, sizeof(xs->sense)); 1196 } 1197 out: 1198 siop_lun->lun_flags &= ~SIOP_LUNF_FULL; 1199 #if 0 1200 if (xs->resid != 0) 1201 printf("resid %d datalen %d\n", xs->resid, xs->datalen); 1202 #endif 1203 scsi_done(xs); 1204 } 1205 1206 /* 1207 * handle a rejected queue tag message: the command will run untagged, 1208 * has to adjust the reselect script. 1209 */ 1210 int 1211 siop_handle_qtag_reject(siop_cmd) 1212 struct siop_cmd *siop_cmd; 1213 { 1214 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 1215 int target = siop_cmd->cmd_c.xs->sc_link->target; 1216 int lun = siop_cmd->cmd_c.xs->sc_link->lun; 1217 int tag = siop_cmd->cmd_tables->msg_out[2]; 1218 struct siop_lun *siop_lun = 1219 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun]; 1220 1221 #ifdef SIOP_DEBUG 1222 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n", 1223 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag, 1224 siop_cmd->cmd_c.status); 1225 #endif 1226 1227 if (siop_lun->siop_tag[0].active != NULL) { 1228 printf("%s: untagged command already running for target %d " 1229 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname, 1230 target, lun, siop_lun->siop_tag[0].active->cmd_c.status); 1231 return -1; 1232 } 1233 /* clear tag slot */ 1234 siop_lun->siop_tag[tag].active = NULL; 1235 /* add command to non-tagged slot */ 1236 siop_lun->siop_tag[0].active = siop_cmd; 1237 siop_cmd->cmd_c.tag = 0; 1238 /* adjust reselect script if there is one */ 1239 if (siop_lun->siop_tag[0].reseloff > 0) { 1240 siop_script_write(sc, 1241 siop_lun->siop_tag[0].reseloff + 1, 1242 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) + 1243 Ent_ldsa_reload_dsa); 1244 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 1245 } 1246 return 0; 1247 } 1248 1249 /* 1250 * handle a bus reset: reset chip, unqueue all active commands, free all 1251 * target struct and report lossage to upper layer. 1252 * As the upper layer may requeue immediately we have to first store 1253 * all active commands in a temporary queue. 1254 */ 1255 void 1256 siop_handle_reset(sc) 1257 struct siop_softc *sc; 1258 { 1259 struct cmd_list reset_list; 1260 struct siop_cmd *siop_cmd, *next_siop_cmd; 1261 struct siop_lun *siop_lun; 1262 int target, lun, tag; 1263 /* 1264 * scsi bus reset. reset the chip and restart 1265 * the queue. Need to clean up all active commands 1266 */ 1267 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname); 1268 /* stop, reset and restart the chip */ 1269 siop_reset(sc); 1270 TAILQ_INIT(&reset_list); 1271 /* 1272 * Process all commands: first commands being executed 1273 */ 1274 for (target = 0; target < sc->sc_c.sc_link.adapter_buswidth; 1275 target++) { 1276 if (sc->sc_c.targets[target] == NULL) 1277 continue; 1278 for (lun = 0; lun < 8; lun++) { 1279 struct siop_target *siop_target = 1280 (struct siop_target *)sc->sc_c.targets[target]; 1281 siop_lun = siop_target->siop_lun[lun]; 1282 if (siop_lun == NULL) 1283 continue; 1284 siop_lun->lun_flags &= ~SIOP_LUNF_FULL; 1285 for (tag = 0; tag < 1286 ((sc->sc_c.targets[target]->flags & TARF_TAG) ? 1287 SIOP_NTAG : 1); 1288 tag++) { 1289 siop_cmd = siop_lun->siop_tag[tag].active; 1290 if (siop_cmd == NULL) 1291 continue; 1292 siop_lun->siop_tag[tag].active = NULL; 1293 TAILQ_INSERT_TAIL(&reset_list, siop_cmd, next); 1294 sc_print_addr(siop_cmd->cmd_c.xs->sc_link); 1295 printf("cmd %p (tag %d) added to reset list\n", 1296 siop_cmd, tag); 1297 } 1298 } 1299 if (sc->sc_c.targets[target]->status != TARST_PROBING) { 1300 sc->sc_c.targets[target]->status = TARST_ASYNC; 1301 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE; 1302 sc->sc_c.targets[target]->period = 1303 sc->sc_c.targets[target]->offset = 0; 1304 siop_update_xfer_mode(&sc->sc_c, target); 1305 } 1306 } 1307 /* Next commands from the urgent list */ 1308 for (siop_cmd = TAILQ_FIRST(&sc->urgent_list); siop_cmd != NULL; 1309 siop_cmd = next_siop_cmd) { 1310 next_siop_cmd = TAILQ_NEXT(siop_cmd, next); 1311 TAILQ_REMOVE(&sc->urgent_list, siop_cmd, next); 1312 TAILQ_INSERT_TAIL(&reset_list, siop_cmd, next); 1313 sc_print_addr(siop_cmd->cmd_c.xs->sc_link); 1314 printf("cmd %p added to reset list from urgent list\n", 1315 siop_cmd); 1316 } 1317 /* Then commands waiting in the input list. */ 1318 for (siop_cmd = TAILQ_FIRST(&sc->ready_list); siop_cmd != NULL; 1319 siop_cmd = next_siop_cmd) { 1320 next_siop_cmd = TAILQ_NEXT(siop_cmd, next); 1321 TAILQ_REMOVE(&sc->ready_list, siop_cmd, next); 1322 TAILQ_INSERT_TAIL(&reset_list, siop_cmd, next); 1323 sc_print_addr(siop_cmd->cmd_c.xs->sc_link); 1324 printf("cmd %p added to reset list from ready list\n", 1325 siop_cmd); 1326 } 1327 1328 for (siop_cmd = TAILQ_FIRST(&reset_list); siop_cmd != NULL; 1329 siop_cmd = next_siop_cmd) { 1330 next_siop_cmd = TAILQ_NEXT(siop_cmd, next); 1331 siop_cmd->cmd_c.flags &= ~CMDFL_TAG; 1332 siop_cmd->cmd_c.xs->error = 1333 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) 1334 ? XS_TIMEOUT : XS_RESET; 1335 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1336 sc_print_addr(siop_cmd->cmd_c.xs->sc_link); 1337 printf("cmd %p (status %d) reset", 1338 siop_cmd, siop_cmd->cmd_c.status); 1339 if (siop_cmd->cmd_c.status == CMDST_SENSE || 1340 siop_cmd->cmd_c.status == CMDST_SENSE_ACTIVE) 1341 siop_cmd->cmd_c.status = CMDST_SENSE_DONE; 1342 else 1343 siop_cmd->cmd_c.status = CMDST_DONE; 1344 printf(" with status %d, xs->error %d\n", 1345 siop_cmd->cmd_c.status, siop_cmd->cmd_c.xs->error); 1346 TAILQ_REMOVE(&reset_list, siop_cmd, next); 1347 siop_scsicmd_end(siop_cmd); 1348 } 1349 } 1350 1351 void * 1352 siop_cmd_get(void *cookie) 1353 { 1354 struct siop_softc *sc = cookie; 1355 struct siop_cmd *siop_cmd; 1356 int s; 1357 1358 /* Look if a ccb is available. */ 1359 s = splbio(); 1360 siop_cmd = TAILQ_FIRST(&sc->free_list); 1361 if (siop_cmd != NULL) { 1362 TAILQ_REMOVE(&sc->free_list, siop_cmd, next); 1363 #ifdef DIAGNOSTIC 1364 if (siop_cmd->cmd_c.status != CMDST_FREE) 1365 panic("siop_scsicmd: new cmd not free"); 1366 #endif 1367 siop_cmd->cmd_c.status = CMDST_READY; 1368 } 1369 splx(s); 1370 1371 return (siop_cmd); 1372 } 1373 1374 void 1375 siop_cmd_put(void *cookie, void *io) 1376 { 1377 struct siop_softc *sc = cookie; 1378 struct siop_cmd *siop_cmd = io; 1379 int s; 1380 1381 s = splbio(); 1382 siop_cmd->cmd_c.status = CMDST_FREE; 1383 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next); 1384 splx(s); 1385 } 1386 1387 int 1388 siop_scsiprobe(struct scsi_link *link) 1389 { 1390 struct siop_softc *sc = (struct siop_softc *)link->adapter_softc; 1391 struct siop_target *siop_target; 1392 const int target = link->target; 1393 const int lun = link->lun; 1394 int i; 1395 1396 #ifdef SIOP_DEBUG 1397 printf("%s:%d:%d: probe\n", 1398 sc->sc_c.sc_dev.dv_xname, target, lun); 1399 #endif 1400 1401 /* XXX locking */ 1402 1403 siop_target = (struct siop_target*)sc->sc_c.targets[target]; 1404 if (siop_target == NULL) { 1405 siop_target = malloc(sizeof(*siop_target), M_DEVBUF, 1406 M_WAITOK | M_CANFAIL | M_ZERO); 1407 if (siop_target == NULL) { 1408 printf("%s: can't malloc memory for target %d\n", 1409 sc->sc_c.sc_dev.dv_xname, target); 1410 return (ENOMEM); 1411 } 1412 1413 siop_target->target_c.status = TARST_PROBING; 1414 siop_target->target_c.flags = 0; 1415 siop_target->target_c.id = 1416 sc->sc_c.clock_div << 24; /* scntl3 */ 1417 siop_target->target_c.id |= target << 16; /* id */ 1418 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */ 1419 1420 /* get a lun switch script */ 1421 siop_target->lunsw = siop_get_lunsw(sc); 1422 if (siop_target->lunsw == NULL) { 1423 printf("%s: can't alloc lunsw for target %d\n", 1424 sc->sc_c.sc_dev.dv_xname, target); 1425 free(siop_target, M_DEVBUF, sizeof *siop_target); 1426 return (ENOMEM); 1427 } 1428 for (i = 0; i < 8; i++) 1429 siop_target->siop_lun[i] = NULL; 1430 1431 sc->sc_c.targets[target] = 1432 (struct siop_common_target *)siop_target; 1433 1434 siop_add_reselsw(sc, target); 1435 } 1436 1437 if (siop_target->siop_lun[lun] == NULL) { 1438 siop_target->siop_lun[lun] = 1439 malloc(sizeof(struct siop_lun), M_DEVBUF, 1440 M_WAITOK | M_CANFAIL | M_ZERO); 1441 if (siop_target->siop_lun[lun] == NULL) { 1442 printf("%s: can't alloc siop_lun for " 1443 "target %d lun %d\n", 1444 sc->sc_c.sc_dev.dv_xname, target, lun); 1445 return (ENOMEM); 1446 } 1447 } 1448 1449 return (0); 1450 } 1451 1452 void 1453 siop_scsicmd(xs) 1454 struct scsi_xfer *xs; 1455 { 1456 struct siop_softc *sc = (struct siop_softc *)xs->sc_link->adapter_softc; 1457 struct siop_cmd *siop_cmd; 1458 struct siop_target *siop_target; 1459 int s, error, i, j; 1460 const int target = xs->sc_link->target; 1461 const int lun = xs->sc_link->lun; 1462 1463 #ifdef SIOP_DEBUG_SCHED 1464 printf("starting cmd for %d:%d\n", target, lun); 1465 #endif 1466 1467 siop_target = (struct siop_target*)sc->sc_c.targets[target]; 1468 siop_cmd = xs->io; 1469 1470 /* 1471 * The xs may have been restarted by the scsi layer, so ensure the ccb 1472 * starts in the proper state. 1473 */ 1474 siop_cmd->cmd_c.status = CMDST_READY; 1475 1476 /* Always reset xs->stimeout, lest we timeout_del() with trash */ 1477 timeout_set(&xs->stimeout, siop_timeout, siop_cmd); 1478 1479 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target]; 1480 siop_cmd->cmd_c.xs = xs; 1481 siop_cmd->cmd_c.flags = 0; 1482 1483 bzero(&siop_cmd->cmd_c.siop_tables->xscmd, 1484 sizeof(siop_cmd->cmd_c.siop_tables->xscmd)); 1485 bcopy(xs->cmd, &siop_cmd->cmd_c.siop_tables->xscmd, xs->cmdlen); 1486 siop_cmd->cmd_c.siop_tables->cmd.count = 1487 siop_htoc32(&sc->sc_c, xs->cmdlen); 1488 1489 /* load the DMA maps */ 1490 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1491 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1492 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen, 1493 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1494 ((xs->flags & SCSI_DATA_IN) ? 1495 BUS_DMA_READ : BUS_DMA_WRITE)); 1496 if (error) { 1497 printf("%s: unable to load data DMA map: %d\n", 1498 sc->sc_c.sc_dev.dv_xname, error); 1499 xs->error = XS_DRIVER_STUFFUP; 1500 scsi_done(xs); 1501 return; 1502 } 1503 bus_dmamap_sync(sc->sc_c.sc_dmat, 1504 siop_cmd->cmd_c.dmamap_data, 0, 1505 siop_cmd->cmd_c.dmamap_data->dm_mapsize, 1506 (xs->flags & SCSI_DATA_IN) ? 1507 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1508 } 1509 1510 siop_setuptables(&siop_cmd->cmd_c); 1511 siop_cmd->saved_offset = SIOP_NOOFFSET; 1512 siop_table_sync(siop_cmd, 1513 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1514 1515 /* Negotiate transfer parameters on first non-polling command. */ 1516 if (((xs->flags & SCSI_POLL) == 0) && 1517 siop_target->target_c.status == TARST_PROBING) 1518 siop_target->target_c.status = TARST_ASYNC; 1519 1520 s = splbio(); 1521 TAILQ_INSERT_TAIL(&sc->ready_list, siop_cmd, next); 1522 siop_start(sc); 1523 if ((xs->flags & SCSI_POLL) == 0) { 1524 splx(s); 1525 return; 1526 } 1527 1528 /* Poll for command completion. */ 1529 for(i = xs->timeout; i > 0; i--) { 1530 siop_intr(sc); 1531 if ((xs->flags & ITSDONE) == 0) { 1532 delay(1000); 1533 continue; 1534 } 1535 if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) { 1536 struct scsi_inquiry_data *inqbuf = 1537 (struct scsi_inquiry_data *)xs->data; 1538 if ((inqbuf->device & SID_QUAL) == SID_QUAL_BAD_LU) 1539 break; 1540 /* 1541 * Allocate cbd's to hold maximum openings worth of 1542 * commands. Do this now because doing it dynamically in 1543 * siop_startcmd may cause calls to bus_dma* functions 1544 * in interrupt context. 1545 */ 1546 for (j = 0; j < SIOP_NTAG; j += SIOP_NCMDPB) 1547 siop_morecbd(sc); 1548 1549 /* 1550 * Set TARF_DT here because if it is turned off during 1551 * PPR, it must STAY off! 1552 */ 1553 if ((lun == 0) && (sc->sc_c.features & SF_BUS_ULTRA3)) 1554 sc->sc_c.targets[target]->flags |= TARF_DT; 1555 /* 1556 * Can't do lun 0 here, because flags are not set yet. 1557 * But have to do other lun's here because they never go 1558 * through TARST_ASYNC. 1559 */ 1560 if (lun > 0) 1561 siop_add_dev(sc, target, lun); 1562 } 1563 break; 1564 } 1565 if (i == 0) { 1566 siop_timeout(siop_cmd); 1567 while ((xs->flags & ITSDONE) == 0) 1568 siop_intr(sc); 1569 } 1570 1571 splx(s); 1572 } 1573 1574 void 1575 siop_start(sc) 1576 struct siop_softc *sc; 1577 { 1578 struct siop_cmd *siop_cmd, *next_siop_cmd; 1579 struct siop_lun *siop_lun; 1580 struct siop_xfer *siop_xfer; 1581 u_int32_t dsa; 1582 int target, lun, tag, slot; 1583 int newcmd = 0; 1584 int doingready = 0; 1585 1586 /* 1587 * first make sure to read valid data 1588 */ 1589 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1590 1591 /* 1592 * The queue management here is a bit tricky: the script always looks 1593 * at the slot from first to last, so if we always use the first 1594 * free slot commands can stay at the tail of the queue ~forever. 1595 * The algorithm used here is to restart from the head when we know 1596 * that the queue is empty, and only add commands after the last one. 1597 * When we're at the end of the queue wait for the script to clear it. 1598 * The best thing to do here would be to implement a circular queue, 1599 * but using only 53c720 features this can be "interesting". 1600 * A mid-way solution could be to implement 2 queues and swap orders. 1601 */ 1602 slot = sc->sc_currschedslot; 1603 /* 1604 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is 1605 * free. As this is the last used slot, all previous slots are free, 1606 * we can restart from 1. 1607 * slot 0 is reserved for request sense commands. 1608 */ 1609 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) == 1610 0x80000000) { 1611 slot = sc->sc_currschedslot = 1; 1612 } else { 1613 slot++; 1614 } 1615 /* first handle commands from the urgent list */ 1616 siop_cmd = TAILQ_FIRST(&sc->urgent_list); 1617 again: 1618 for (; siop_cmd != NULL; siop_cmd = next_siop_cmd) { 1619 next_siop_cmd = TAILQ_NEXT(siop_cmd, next); 1620 #ifdef DIAGNOSTIC 1621 if (siop_cmd->cmd_c.status != CMDST_READY && 1622 siop_cmd->cmd_c.status != CMDST_SENSE) 1623 panic("siop: non-ready cmd in ready list"); 1624 #endif 1625 target = siop_cmd->cmd_c.xs->sc_link->target; 1626 lun = siop_cmd->cmd_c.xs->sc_link->lun; 1627 siop_lun = 1628 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun]; 1629 /* if non-tagged command active, wait */ 1630 if (siop_lun->siop_tag[0].active != NULL) 1631 continue; 1632 /* 1633 * if we're in a queue full condition don't start a new 1634 * command, unless it's a request sense 1635 */ 1636 if ((siop_lun->lun_flags & SIOP_LUNF_FULL) && 1637 siop_cmd->cmd_c.status == CMDST_READY) 1638 continue; 1639 /* find a free tag if needed */ 1640 if (siop_cmd->cmd_c.flags & CMDFL_TAG) { 1641 for (tag = 1; tag < SIOP_NTAG; tag++) { 1642 if (siop_lun->siop_tag[tag].active == NULL) 1643 break; 1644 } 1645 if (tag == SIOP_NTAG) /* no free tag */ 1646 continue; 1647 } else { 1648 tag = 0; 1649 } 1650 siop_cmd->cmd_c.tag = tag; 1651 /* 1652 * find a free scheduler slot and load it. If it's a request 1653 * sense we need to use slot 0. 1654 */ 1655 if (siop_cmd->cmd_c.status != CMDST_SENSE) { 1656 for (; slot < SIOP_NSLOTS; slot++) { 1657 /* 1658 * If cmd if 0x80000000 the slot is free 1659 */ 1660 if (siop_script_read(sc, 1661 (Ent_script_sched_slot0 / 4) + slot * 2) == 1662 0x80000000) 1663 break; 1664 } 1665 /* no more free slots, no need to continue */ 1666 if (slot == SIOP_NSLOTS) { 1667 goto end; 1668 } 1669 } else { 1670 slot = 0; 1671 if (siop_script_read(sc, Ent_script_sched_slot0 / 4) 1672 != 0x80000000) 1673 goto end; 1674 } 1675 1676 #ifdef SIOP_DEBUG_SCHED 1677 printf("using slot %d for DSA 0x%lx\n", slot, 1678 (u_long)siop_cmd->cmd_c.dsa); 1679 #endif 1680 /* Ok, we can add the tag message */ 1681 if (tag > 0) { 1682 #ifdef DIAGNOSTIC 1683 int msgcount = siop_ctoh32(&sc->sc_c, 1684 siop_cmd->cmd_tables->t_msgout.count); 1685 if (msgcount != 1) 1686 printf("%s:%d:%d: tag %d with msgcount %d\n", 1687 sc->sc_c.sc_dev.dv_xname, target, lun, tag, 1688 msgcount); 1689 #endif 1690 siop_cmd->cmd_tables->msg_out[1] = MSG_SIMPLE_Q_TAG; 1691 siop_cmd->cmd_tables->msg_out[2] = tag; 1692 siop_cmd->cmd_tables->t_msgout.count = 1693 siop_htoc32(&sc->sc_c, 3); 1694 } 1695 /* note that we started a new command */ 1696 newcmd = 1; 1697 /* mark command as active */ 1698 if (siop_cmd->cmd_c.status == CMDST_READY) { 1699 siop_cmd->cmd_c.status = CMDST_ACTIVE; 1700 } else if (siop_cmd->cmd_c.status == CMDST_SENSE) { 1701 siop_cmd->cmd_c.status = CMDST_SENSE_ACTIVE; 1702 } else 1703 panic("siop_start: bad status"); 1704 if (doingready) 1705 TAILQ_REMOVE(&sc->ready_list, siop_cmd, next); 1706 else 1707 TAILQ_REMOVE(&sc->urgent_list, siop_cmd, next); 1708 siop_lun->siop_tag[tag].active = siop_cmd; 1709 /* patch scripts with DSA addr */ 1710 dsa = siop_cmd->cmd_c.dsa; 1711 /* first reselect switch, if we have an entry */ 1712 if (siop_lun->siop_tag[tag].reseloff > 0) 1713 siop_script_write(sc, 1714 siop_lun->siop_tag[tag].reseloff + 1, 1715 dsa + sizeof(struct siop_common_xfer) + 1716 Ent_ldsa_reload_dsa); 1717 /* CMD script: MOVE MEMORY addr */ 1718 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables; 1719 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] = 1720 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr + 1721 Ent_script_sched_slot0 + slot * 8); 1722 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE); 1723 /* scheduler slot: JUMP ldsa_select */ 1724 siop_script_write(sc, 1725 (Ent_script_sched_slot0 / 4) + slot * 2 + 1, 1726 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select); 1727 /* handle timeout */ 1728 if (siop_cmd->cmd_c.status == CMDST_ACTIVE) { 1729 if ((siop_cmd->cmd_c.xs->flags & SCSI_POLL) == 0) { 1730 /* start expire timer */ 1731 timeout_add_msec(&siop_cmd->cmd_c.xs->stimeout, 1732 siop_cmd->cmd_c.xs->timeout); 1733 } 1734 } 1735 /* 1736 * Change JUMP cmd so that this slot will be handled 1737 */ 1738 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2, 1739 0x80080000); 1740 /* if we're using the request sense slot, stop here */ 1741 if (slot == 0) 1742 goto end; 1743 sc->sc_currschedslot = slot; 1744 slot++; 1745 } 1746 if (doingready == 0) { 1747 /* now process ready list */ 1748 doingready = 1; 1749 siop_cmd = TAILQ_FIRST(&sc->ready_list); 1750 goto again; 1751 } 1752 1753 end: 1754 /* if nothing changed no need to flush cache and wakeup script */ 1755 if (newcmd == 0) 1756 return; 1757 /* make sure SCRIPT processor will read valid data */ 1758 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1759 /* Signal script it has some work to do */ 1760 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 1761 SIOP_ISTAT, ISTAT_SIGP); 1762 /* and wait for IRQ */ 1763 } 1764 1765 void 1766 siop_timeout(v) 1767 void *v; 1768 { 1769 struct siop_cmd *siop_cmd = v; 1770 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc; 1771 int s; 1772 1773 /* deactivate callout */ 1774 timeout_del(&siop_cmd->cmd_c.xs->stimeout); 1775 1776 sc_print_addr(siop_cmd->cmd_c.xs->sc_link); 1777 printf("timeout on SCSI command 0x%x\n", 1778 siop_cmd->cmd_c.xs->cmd->opcode); 1779 1780 s = splbio(); 1781 /* reset the scsi bus */ 1782 siop_resetbus(&sc->sc_c); 1783 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT; 1784 siop_handle_reset(sc); 1785 splx(s); 1786 } 1787 1788 #ifdef DUMP_SCRIPT 1789 void 1790 siop_dump_script(sc) 1791 struct siop_softc *sc; 1792 { 1793 int i; 1794 for (i = 0; i < PAGE_SIZE / 4; i += 2) { 1795 printf("0x%04x: 0x%08x 0x%08x", i * 4, 1796 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i]), 1797 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i+1])); 1798 if ((siop_ctoh32(&sc->sc_c, 1799 sc->sc_c.sc_script[i]) & 0xe0000000) == 0xc0000000) { 1800 i++; 1801 printf(" 0x%08x", siop_ctoh32(&sc->sc_c, 1802 sc->sc_c.sc_script[i+1])); 1803 } 1804 printf("\n"); 1805 } 1806 } 1807 #endif 1808 1809 void 1810 siop_morecbd(sc) 1811 struct siop_softc *sc; 1812 { 1813 int error, off, i, j, s; 1814 struct siop_cbd *newcbd; 1815 struct siop_xfer *xfers, *xfer; 1816 bus_addr_t dsa; 1817 u_int32_t *scr; 1818 size_t sense_size = roundup(sizeof(struct scsi_sense_data), 16); 1819 1820 /* allocate a new list head */ 1821 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT | M_ZERO); 1822 if (newcbd == NULL) { 1823 printf("%s: can't allocate memory for command descriptors " 1824 "head\n", sc->sc_c.sc_dev.dv_xname); 1825 return; 1826 } 1827 1828 /* allocate cmd list */ 1829 newcbd->cmds = mallocarray(SIOP_NCMDPB, sizeof(struct siop_cmd), 1830 M_DEVBUF, M_NOWAIT | M_ZERO); 1831 if (newcbd->cmds == NULL) { 1832 printf("%s: can't allocate memory for command descriptors\n", 1833 sc->sc_c.sc_dev.dv_xname); 1834 goto bad3; 1835 } 1836 1837 newcbd->xfers = siop_dmamem_alloc(sc, PAGE_SIZE); 1838 if (newcbd->xfers == NULL) { 1839 printf("%s: unable to allocate cbd xfer DMA memory\n", 1840 sc->sc_c.sc_dev.dv_xname); 1841 goto bad2; 1842 } 1843 xfers = SIOP_DMA_KVA(newcbd->xfers); 1844 1845 newcbd->sense = siop_dmamem_alloc(sc, sense_size * SIOP_NCMDPB); 1846 if (newcbd->sense == NULL) { 1847 printf("%s: unable to allocate cbd sense DMA memory\n", 1848 sc->sc_c.sc_dev.dv_xname); 1849 goto bad1; 1850 } 1851 1852 for (i = 0; i < SIOP_NCMDPB; i++) { 1853 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG, 1854 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1855 &newcbd->cmds[i].cmd_c.dmamap_data); 1856 if (error) { 1857 printf("%s: unable to create data DMA map for cbd: " 1858 "error %d\n", 1859 sc->sc_c.sc_dev.dv_xname, error); 1860 goto bad0; 1861 } 1862 } 1863 1864 /* Use two loops since bailing out above releases allocated memory */ 1865 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0; 1866 for (i = 0; i < SIOP_NCMDPB; i++) { 1867 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c; 1868 newcbd->cmds[i].siop_cbdp = newcbd; 1869 xfer = &xfers[i]; 1870 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer; 1871 bzero(newcbd->cmds[i].cmd_tables, sizeof(struct siop_xfer)); 1872 dsa = SIOP_DMA_DVA(newcbd->xfers) + 1873 i * sizeof(struct siop_xfer); 1874 newcbd->cmds[i].cmd_c.dsa = dsa; 1875 newcbd->cmds[i].cmd_c.status = CMDST_FREE; 1876 newcbd->cmds[i].cmd_c.sense = (struct scsi_sense_data *)( 1877 i * sense_size + 1878 (u_int8_t *)SIOP_DMA_KVA(newcbd->sense)); 1879 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1); 1880 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa); 1881 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1); 1882 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c, 1883 dsa + offsetof(struct siop_common_xfer, msg_in)); 1884 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2); 1885 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c, 1886 dsa + offsetof(struct siop_common_xfer, msg_in) + 1); 1887 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c, 1888 dsa + offsetof(struct siop_common_xfer, msg_in) + 3); 1889 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1); 1890 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c, 1891 dsa + offsetof(struct siop_common_xfer, status) + off); 1892 xfer->siop_tables.cmd.count = siop_htoc32(&sc->sc_c, 0); 1893 xfer->siop_tables.cmd.addr = siop_htoc32(&sc->sc_c, 1894 dsa + offsetof(struct siop_common_xfer, xscmd)); 1895 /* The select/reselect script */ 1896 scr = &xfer->resel[0]; 1897 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++) 1898 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]); 1899 /* 1900 * 0x78000000 is a 'move data8 to reg'. data8 is the second 1901 * octet, reg offset is the third. 1902 */ 1903 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c, 1904 0x78100000 | ((dsa & 0x000000ff) << 8)); 1905 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c, 1906 0x78110000 | ( dsa & 0x0000ff00 )); 1907 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c, 1908 0x78120000 | ((dsa & 0x00ff0000) >> 8)); 1909 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c, 1910 0x78130000 | ((dsa & 0xff000000) >> 16)); 1911 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c, 1912 sc->sc_c.sc_scriptaddr + Ent_reselected); 1913 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c, 1914 sc->sc_c.sc_scriptaddr + Ent_reselect); 1915 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c, 1916 sc->sc_c.sc_scriptaddr + Ent_selected); 1917 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c, 1918 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data); 1919 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */ 1920 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000); 1921 s = splbio(); 1922 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next); 1923 splx(s); 1924 #ifdef SIOP_DEBUG 1925 printf("tables[%d]: in=0x%x out=0x%x status=0x%x " 1926 "offset=0x%x\n", i, 1927 siop_ctoh32(&sc->sc_c, 1928 newcbd->cmds[i].cmd_tables->t_msgin.addr), 1929 siop_ctoh32(&sc->sc_c, 1930 newcbd->cmds[i].cmd_tables->t_msgout.addr), 1931 siop_ctoh32(&sc->sc_c, 1932 newcbd->cmds[i].cmd_tables->t_status.addr)); 1933 #endif 1934 } 1935 s = splbio(); 1936 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next); 1937 splx(s); 1938 return; 1939 bad0: 1940 while (--i >= 0) { 1941 bus_dmamap_destroy(sc->sc_c.sc_dmat, 1942 newcbd->cmds[i].cmd_c.dmamap_data); 1943 } 1944 siop_dmamem_free(sc, newcbd->sense); 1945 bad1: 1946 siop_dmamem_free(sc, newcbd->xfers); 1947 bad2: 1948 free(newcbd->cmds, M_DEVBUF, SIOP_NCMDPB * sizeof(struct siop_cmd)); 1949 bad3: 1950 free(newcbd, M_DEVBUF, sizeof *newcbd); 1951 } 1952 1953 struct siop_lunsw * 1954 siop_get_lunsw(sc) 1955 struct siop_softc *sc; 1956 { 1957 struct siop_lunsw *lunsw; 1958 int i; 1959 1960 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >= 1961 sc->script_free_hi) 1962 return NULL; 1963 lunsw = TAILQ_FIRST(&sc->lunsw_list); 1964 if (lunsw != NULL) { 1965 #ifdef SIOP_DEBUG 1966 printf("siop_get_lunsw got lunsw at offset %d\n", 1967 lunsw->lunsw_off); 1968 #endif 1969 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next); 1970 return lunsw; 1971 } 1972 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT | M_ZERO); 1973 if (lunsw == NULL) 1974 return NULL; 1975 #ifdef SIOP_DEBUG 1976 printf("allocating lunsw at offset %d\n", sc->script_free_lo); 1977 #endif 1978 if (sc->sc_c.features & SF_CHIP_RAM) { 1979 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 1980 sc->script_free_lo * 4, lun_switch, 1981 sizeof(lun_switch) / sizeof(lun_switch[0])); 1982 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 1983 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4, 1984 sc->sc_c.sc_scriptaddr + Ent_lunsw_return); 1985 } else { 1986 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]); 1987 i++) 1988 sc->sc_c.sc_script[sc->script_free_lo + i] = 1989 siop_htoc32(&sc->sc_c, lun_switch[i]); 1990 sc->sc_c.sc_script[ 1991 sc->script_free_lo + E_abs_lunsw_return_Used[0]] = 1992 siop_htoc32(&sc->sc_c, 1993 sc->sc_c.sc_scriptaddr + Ent_lunsw_return); 1994 } 1995 lunsw->lunsw_off = sc->script_free_lo; 1996 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]); 1997 sc->script_free_lo += lunsw->lunsw_size; 1998 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1999 return lunsw; 2000 } 2001 2002 void 2003 siop_add_reselsw(sc, target) 2004 struct siop_softc *sc; 2005 int target; 2006 { 2007 int i,j; 2008 struct siop_target *siop_target; 2009 struct siop_lun *siop_lun; 2010 2011 siop_target = (struct siop_target *)sc->sc_c.targets[target]; 2012 /* 2013 * add an entry to resel switch 2014 */ 2015 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE); 2016 for (i = 0; i < 15; i++) { 2017 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2; 2018 if ((siop_script_read(sc, siop_target->reseloff) & 0xff) 2019 == 0xff) { /* it's free */ 2020 #ifdef SIOP_DEBUG 2021 printf("siop: target %d slot %d offset %d\n", 2022 target, i, siop_target->reseloff); 2023 #endif 2024 /* JUMP abs_foo, IF target | 0x80; */ 2025 siop_script_write(sc, siop_target->reseloff, 2026 0x800c0080 | target); 2027 siop_script_write(sc, siop_target->reseloff + 1, 2028 sc->sc_c.sc_scriptaddr + 2029 siop_target->lunsw->lunsw_off * 4 + 2030 Ent_lun_switch_entry); 2031 break; 2032 } 2033 } 2034 if (i == 15) /* no free slot, shouldn't happen */ 2035 panic("siop: resel switch full"); 2036 2037 sc->sc_ntargets++; 2038 for (i = 0; i < 8; i++) { 2039 siop_lun = siop_target->siop_lun[i]; 2040 if (siop_lun == NULL) 2041 continue; 2042 if (siop_lun->reseloff > 0) { 2043 siop_lun->reseloff = 0; 2044 for (j = 0; j < SIOP_NTAG; j++) 2045 siop_lun->siop_tag[j].reseloff = 0; 2046 siop_add_dev(sc, target, i); 2047 } 2048 } 2049 siop_update_scntl3(sc, sc->sc_c.targets[target]); 2050 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 2051 } 2052 2053 void 2054 siop_update_scntl3(sc, _siop_target) 2055 struct siop_softc *sc; 2056 struct siop_common_target *_siop_target; 2057 { 2058 struct siop_target *siop_target = (struct siop_target *)_siop_target; 2059 /* MOVE target->id >> 24 TO SCNTL3 */ 2060 siop_script_write(sc, 2061 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4), 2062 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00)); 2063 /* MOVE target->id >> 8 TO SXFER */ 2064 siop_script_write(sc, 2065 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2, 2066 0x78050000 | (siop_target->target_c.id & 0x0000ff00)); 2067 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 2068 } 2069 2070 void 2071 siop_add_dev(sc, target, lun) 2072 struct siop_softc *sc; 2073 int target; 2074 int lun; 2075 { 2076 struct siop_lunsw *lunsw; 2077 struct siop_target *siop_target = 2078 (struct siop_target *)sc->sc_c.targets[target]; 2079 struct siop_lun *siop_lun = siop_target->siop_lun[lun]; 2080 int i, ntargets; 2081 2082 if (siop_lun->reseloff > 0) 2083 return; 2084 lunsw = siop_target->lunsw; 2085 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) { 2086 /* 2087 * can't extend this slot. Probably not worth trying to deal 2088 * with this case 2089 */ 2090 #ifdef SIOP_DEBUG 2091 printf("%s:%d:%d: can't allocate a lun sw slot\n", 2092 sc->sc_c.sc_dev.dv_xname, target, lun); 2093 #endif 2094 return; 2095 } 2096 /* count how many free targets we still have to probe */ 2097 ntargets = (sc->sc_c.sc_link.adapter_buswidth - 1) - 1 - sc->sc_ntargets; 2098 2099 /* 2100 * we need 8 bytes for the lun sw additional entry, and 2101 * eventually sizeof(tag_switch) for the tag switch entry. 2102 * Keep enough free space for the free targets that could be 2103 * probed later. 2104 */ 2105 if (sc->script_free_lo + 2 + 2106 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >= 2107 ((siop_target->target_c.flags & TARF_TAG) ? 2108 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) : 2109 sc->script_free_hi)) { 2110 /* 2111 * not enough space, probably not worth dealing with it. 2112 * We can hold 13 tagged-queuing capable devices in the 4k RAM. 2113 */ 2114 #ifdef SIOP_DEBUG 2115 printf("%s:%d:%d: not enough memory for a lun sw slot\n", 2116 sc->sc_c.sc_dev.dv_xname, target, lun); 2117 #endif 2118 return; 2119 } 2120 #ifdef SIOP_DEBUG 2121 printf("%s:%d:%d: allocate lun sw entry\n", 2122 sc->sc_c.sc_dev.dv_xname, target, lun); 2123 #endif 2124 /* INT int_resellun */ 2125 siop_script_write(sc, sc->script_free_lo, 0x98080000); 2126 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun); 2127 /* Now the slot entry: JUMP abs_foo, IF lun */ 2128 siop_script_write(sc, sc->script_free_lo - 2, 2129 0x800c0000 | lun); 2130 siop_script_write(sc, sc->script_free_lo - 1, 0); 2131 siop_lun->reseloff = sc->script_free_lo - 2; 2132 lunsw->lunsw_size += 2; 2133 sc->script_free_lo += 2; 2134 if (siop_target->target_c.flags & TARF_TAG) { 2135 /* we need a tag switch */ 2136 sc->script_free_hi -= 2137 sizeof(tag_switch) / sizeof(tag_switch[0]); 2138 if (sc->sc_c.features & SF_CHIP_RAM) { 2139 bus_space_write_region_4(sc->sc_c.sc_ramt, 2140 sc->sc_c.sc_ramh, 2141 sc->script_free_hi * 4, tag_switch, 2142 sizeof(tag_switch) / sizeof(tag_switch[0])); 2143 } else { 2144 for(i = 0; 2145 i < sizeof(tag_switch) / sizeof(tag_switch[0]); 2146 i++) { 2147 sc->sc_c.sc_script[sc->script_free_hi + i] = 2148 siop_htoc32(&sc->sc_c, tag_switch[i]); 2149 } 2150 } 2151 siop_script_write(sc, 2152 siop_lun->reseloff + 1, 2153 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 + 2154 Ent_tag_switch_entry); 2155 2156 for (i = 0; i < SIOP_NTAG; i++) { 2157 siop_lun->siop_tag[i].reseloff = 2158 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2; 2159 } 2160 } else { 2161 /* non-tag case; just work with the lun switch */ 2162 siop_lun->siop_tag[0].reseloff = 2163 siop_target->siop_lun[lun]->reseloff; 2164 } 2165 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 2166 } 2167 2168 void 2169 siop_scsifree(struct scsi_link *link) 2170 { 2171 struct siop_softc *sc = link->adapter_softc; 2172 int target = link->target; 2173 int lun = link->lun; 2174 int i; 2175 struct siop_target *siop_target; 2176 2177 #ifdef SIOP_DEBUG 2178 printf("%s:%d:%d: free lun sw entry\n", 2179 sc->sc_c.sc_dev.dv_xname, target, lun); 2180 #endif 2181 2182 siop_target = (struct siop_target *)sc->sc_c.targets[target]; 2183 free(siop_target->siop_lun[lun], M_DEVBUF, 0); 2184 siop_target->siop_lun[lun] = NULL; 2185 /* XXX compact sw entry too ? */ 2186 /* check if we can free the whole target */ 2187 for (i = 0; i < 8; i++) { 2188 if (siop_target->siop_lun[i] != NULL) 2189 return; 2190 } 2191 #ifdef SIOP_DEBUG 2192 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n", 2193 sc->sc_c.sc_dev.dv_xname, target, lun, 2194 siop_target->lunsw->lunsw_off); 2195 #endif 2196 /* 2197 * nothing here, free the target struct and resel 2198 * switch entry 2199 */ 2200 siop_script_write(sc, siop_target->reseloff, 0x800c00ff); 2201 siop_script_sync(sc, BUS_DMASYNC_PREWRITE); 2202 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next); 2203 free(sc->sc_c.targets[target], M_DEVBUF, 0); 2204 sc->sc_c.targets[target] = NULL; 2205 sc->sc_ntargets--; 2206 } 2207 2208 #ifdef SIOP_STATS 2209 void 2210 siop_printstats(void) 2211 { 2212 printf("siop_stat_intr %d\n", siop_stat_intr); 2213 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer); 2214 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc); 2215 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp); 2216 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset); 2217 printf("siop_stat_intr_done %d\n", siop_stat_intr_done); 2218 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel); 2219 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull); 2220 } 2221 #endif 2222 2223 struct siop_dmamem * 2224 siop_dmamem_alloc(struct siop_softc *sc, size_t size) 2225 { 2226 struct siop_dmamem *sdm; 2227 int nsegs; 2228 2229 sdm = malloc(sizeof(*sdm), M_DEVBUF, M_NOWAIT | M_ZERO); 2230 if (sdm == NULL) 2231 return (NULL); 2232 2233 sdm->sdm_size = size; 2234 2235 if (bus_dmamap_create(sc->sc_c.sc_dmat, size, 1, size, 0, 2236 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sdm->sdm_map) != 0) 2237 goto sdmfree; 2238 2239 if (bus_dmamem_alloc(sc->sc_c.sc_dmat, size, PAGE_SIZE, 0, 2240 &sdm->sdm_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2241 goto destroy; 2242 2243 if (bus_dmamem_map(sc->sc_c.sc_dmat, &sdm->sdm_seg, nsegs, size, 2244 &sdm->sdm_kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0) 2245 goto free; 2246 2247 if (bus_dmamap_load(sc->sc_c.sc_dmat, sdm->sdm_map, sdm->sdm_kva, 2248 size, NULL, BUS_DMA_NOWAIT) != 0) 2249 goto unmap; 2250 2251 return (sdm); 2252 2253 unmap: 2254 bus_dmamem_unmap(sc->sc_c.sc_dmat, sdm->sdm_kva, size); 2255 free: 2256 bus_dmamem_free(sc->sc_c.sc_dmat, &sdm->sdm_seg, 1); 2257 destroy: 2258 bus_dmamap_destroy(sc->sc_c.sc_dmat, sdm->sdm_map); 2259 sdmfree: 2260 free(sdm, M_DEVBUF, sizeof *sdm); 2261 2262 return (NULL); 2263 } 2264 2265 void 2266 siop_dmamem_free(struct siop_softc *sc, struct siop_dmamem *sdm) 2267 { 2268 bus_dmamap_unload(sc->sc_c.sc_dmat, sdm->sdm_map); 2269 bus_dmamem_unmap(sc->sc_c.sc_dmat, sdm->sdm_kva, sdm->sdm_size); 2270 bus_dmamem_free(sc->sc_c.sc_dmat, &sdm->sdm_seg, 1); 2271 bus_dmamap_destroy(sc->sc_c.sc_dmat, sdm->sdm_map); 2272 free(sdm, M_DEVBUF, sizeof *sdm); 2273 } 2274 2275