1 /* $OpenBSD: twe.c,v 1.46 2016/01/22 00:40:25 jsg Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2002 Michael Shalayeff. All rights reserved. 5 * 6 * The SCSI emulation layer is derived from gdt(4) driver, 7 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* #define TWE_DEBUG */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/device.h> 37 #include <sys/malloc.h> 38 #include <sys/kthread.h> 39 40 #include <machine/bus.h> 41 42 #include <scsi/scsi_all.h> 43 #include <scsi/scsi_disk.h> 44 #include <scsi/scsiconf.h> 45 46 #include <dev/ic/twereg.h> 47 #include <dev/ic/twevar.h> 48 49 #ifdef TWE_DEBUG 50 #define TWE_DPRINTF(m,a) if (twe_debug & (m)) printf a 51 #define TWE_D_CMD 0x0001 52 #define TWE_D_INTR 0x0002 53 #define TWE_D_MISC 0x0004 54 #define TWE_D_DMA 0x0008 55 #define TWE_D_AEN 0x0010 56 int twe_debug = 0; 57 #else 58 #define TWE_DPRINTF(m,a) /* m, a */ 59 #endif 60 61 struct cfdriver twe_cd = { 62 NULL, "twe", DV_DULL 63 }; 64 65 void twe_scsi_cmd(struct scsi_xfer *); 66 67 struct scsi_adapter twe_switch = { 68 twe_scsi_cmd, tweminphys, 0, 0, 69 }; 70 71 void *twe_get_ccb(void *); 72 void twe_put_ccb(void *, void *); 73 void twe_dispose(struct twe_softc *sc); 74 int twe_cmd(struct twe_ccb *ccb, int flags, int wait); 75 int twe_start(struct twe_ccb *ccb, int wait); 76 int twe_complete(struct twe_ccb *ccb); 77 int twe_done(struct twe_softc *sc, struct twe_ccb *ccb); 78 void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size); 79 void twe_thread_create(void *v); 80 void twe_thread(void *v); 81 void twe_aen(void *, void *); 82 83 void * 84 twe_get_ccb(void *xsc) 85 { 86 struct twe_softc *sc = xsc; 87 struct twe_ccb *ccb; 88 89 mtx_enter(&sc->sc_ccb_mtx); 90 ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head); 91 if (ccb != NULL) 92 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link); 93 mtx_leave(&sc->sc_ccb_mtx); 94 95 return (ccb); 96 } 97 98 void 99 twe_put_ccb(void *xsc, void *xccb) 100 { 101 struct twe_softc *sc = xsc; 102 struct twe_ccb *ccb = xccb; 103 104 ccb->ccb_state = TWE_CCB_FREE; 105 mtx_enter(&sc->sc_ccb_mtx); 106 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 107 mtx_leave(&sc->sc_ccb_mtx); 108 } 109 110 void 111 twe_dispose(sc) 112 struct twe_softc *sc; 113 { 114 register struct twe_ccb *ccb; 115 if (sc->sc_cmdmap != NULL) { 116 bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap); 117 /* traverse the ccbs and destroy the maps */ 118 for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--) 119 if (ccb->ccb_dmamap) 120 bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap); 121 } 122 bus_dmamem_unmap(sc->dmat, sc->sc_cmds, 123 sizeof(struct twe_cmd) * TWE_MAXCMDS); 124 bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1); 125 } 126 127 int 128 twe_attach(sc) 129 struct twe_softc *sc; 130 { 131 struct scsibus_attach_args saa; 132 /* this includes a buffer for drive config req, and a capacity req */ 133 u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1]; 134 struct twe_param *pb = (void *) 135 (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1)); 136 struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE); 137 struct twe_ccb *ccb; 138 struct twe_cmd *cmd; 139 u_int32_t status; 140 int error, i, retry, nunits, nseg; 141 const char *errstr; 142 twe_lock_t lock; 143 paddr_t pa; 144 145 error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS, 146 PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT); 147 if (error) { 148 printf(": cannot allocate commands (%d)\n", error); 149 return (1); 150 } 151 152 error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg, 153 sizeof(struct twe_cmd) * TWE_MAXCMDS, 154 (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT); 155 if (error) { 156 printf(": cannot map commands (%d)\n", error); 157 bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1); 158 return (1); 159 } 160 161 error = bus_dmamap_create(sc->dmat, 162 sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS, 163 sizeof(struct twe_cmd) * TWE_MAXCMDS, 0, 164 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap); 165 if (error) { 166 printf(": cannot create ccb cmd dmamap (%d)\n", error); 167 twe_dispose(sc); 168 return (1); 169 } 170 error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds, 171 sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT); 172 if (error) { 173 printf(": cannot load command dma map (%d)\n", error); 174 twe_dispose(sc); 175 return (1); 176 } 177 178 TAILQ_INIT(&sc->sc_ccb2q); 179 TAILQ_INIT(&sc->sc_ccbq); 180 TAILQ_INIT(&sc->sc_free_ccb); 181 TAILQ_INIT(&sc->sc_done_ccb); 182 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 183 scsi_iopool_init(&sc->sc_iopool, sc, twe_get_ccb, twe_put_ccb); 184 185 scsi_ioh_set(&sc->sc_aen, &sc->sc_iopool, twe_aen, sc); 186 187 pa = sc->sc_cmdmap->dm_segs[0].ds_addr + 188 sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1); 189 for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1; 190 cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) { 191 192 cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds; 193 ccb = &sc->sc_ccbs[cmd->cmd_index]; 194 error = bus_dmamap_create(sc->dmat, 195 TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0, 196 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 197 if (error) { 198 printf(": cannot create ccb dmamap (%d)\n", error); 199 twe_dispose(sc); 200 return (1); 201 } 202 ccb->ccb_sc = sc; 203 ccb->ccb_cmd = cmd; 204 ccb->ccb_cmdpa = pa; 205 ccb->ccb_state = TWE_CCB_FREE; 206 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 207 } 208 209 for (errstr = NULL, retry = 3; retry--; ) { 210 int veseen_srst; 211 u_int16_t aen; 212 213 if (errstr) 214 TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr)); 215 216 for (i = 350000; i--; DELAY(100)) { 217 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 218 if (status & TWE_STAT_CPURDY) 219 break; 220 } 221 222 if (!(status & TWE_STAT_CPURDY)) { 223 errstr = ": card CPU is not ready\n"; 224 continue; 225 } 226 227 /* soft reset, disable ints */ 228 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 229 TWE_CTRL_SRST | 230 TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR | 231 TWE_CTRL_MCMDI | TWE_CTRL_MRDYI | 232 TWE_CTRL_MINT); 233 234 for (i = 350000; i--; DELAY(100)) { 235 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 236 if (status & TWE_STAT_ATTNI) 237 break; 238 } 239 240 if (!(status & TWE_STAT_ATTNI)) { 241 errstr = ": cannot get card's attention\n"; 242 continue; 243 } 244 245 /* drain aen queue */ 246 for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) { 247 248 ccb = scsi_io_get(&sc->sc_iopool, 0); 249 if (ccb == NULL) { 250 errstr = ": out of ccbs\n"; 251 break; 252 } 253 254 ccb->ccb_xs = NULL; 255 ccb->ccb_data = pb; 256 ccb->ccb_length = TWE_SECTOR_SIZE; 257 ccb->ccb_state = TWE_CCB_READY; 258 cmd = ccb->ccb_cmd; 259 cmd->cmd_unit_host = TWE_UNITHOST(0, 0); 260 cmd->cmd_op = TWE_CMD_GPARAM; 261 cmd->cmd_param.count = 1; 262 263 pb->table_id = TWE_PARAM_AEN; 264 pb->param_id = 2; 265 pb->param_size = 2; 266 267 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1); 268 scsi_io_put(&sc->sc_iopool, ccb); 269 if (error) { 270 errstr = ": error draining attention queue\n"; 271 break; 272 } 273 274 aen = *(u_int16_t *)pb->data; 275 TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen)); 276 if (aen == TWE_AEN_SRST) 277 veseen_srst++; 278 } 279 280 if (!veseen_srst) { 281 errstr = ": we don't get it\n"; 282 continue; 283 } 284 285 if (status & TWE_STAT_CPUERR) { 286 errstr = ": card CPU error detected\n"; 287 continue; 288 } 289 290 if (status & TWE_STAT_PCIPAR) { 291 errstr = ": PCI parity error detected\n"; 292 continue; 293 } 294 295 if (status & TWE_STAT_QUEUEE ) { 296 errstr = ": queuing error detected\n"; 297 continue; 298 } 299 300 if (status & TWE_STAT_PCIABR) { 301 errstr = ": PCI abort\n"; 302 continue; 303 } 304 305 while (!(status & TWE_STAT_RQE)) { 306 bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE); 307 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 308 } 309 310 break; 311 } 312 313 if (retry < 0) { 314 printf("%s", errstr); 315 twe_dispose(sc); 316 return 1; 317 } 318 319 ccb = scsi_io_get(&sc->sc_iopool, 0); 320 if (ccb == NULL) { 321 printf(": out of ccbs\n"); 322 twe_dispose(sc); 323 return 1; 324 } 325 326 ccb->ccb_xs = NULL; 327 ccb->ccb_data = pb; 328 ccb->ccb_length = TWE_SECTOR_SIZE; 329 ccb->ccb_state = TWE_CCB_READY; 330 cmd = ccb->ccb_cmd; 331 cmd->cmd_unit_host = TWE_UNITHOST(0, 0); 332 cmd->cmd_op = TWE_CMD_GPARAM; 333 cmd->cmd_param.count = 1; 334 335 pb->table_id = TWE_PARAM_UC; 336 pb->param_id = TWE_PARAM_UC; 337 pb->param_size = TWE_MAX_UNITS; 338 339 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1); 340 scsi_io_put(&sc->sc_iopool, ccb); 341 if (error) { 342 printf(": failed to fetch unit parameters\n"); 343 twe_dispose(sc); 344 return 1; 345 } 346 347 /* we are assuming last read status was good */ 348 printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status)); 349 350 for (nunits = i = 0; i < TWE_MAX_UNITS; i++) { 351 if (pb->data[i] == 0) 352 continue; 353 354 ccb = scsi_io_get(&sc->sc_iopool, 0); 355 if (ccb == NULL) { 356 printf(": out of ccbs\n"); 357 twe_dispose(sc); 358 return 1; 359 } 360 361 ccb->ccb_xs = NULL; 362 ccb->ccb_data = cap; 363 ccb->ccb_length = TWE_SECTOR_SIZE; 364 ccb->ccb_state = TWE_CCB_READY; 365 cmd = ccb->ccb_cmd; 366 cmd->cmd_unit_host = TWE_UNITHOST(0, 0); 367 cmd->cmd_op = TWE_CMD_GPARAM; 368 cmd->cmd_param.count = 1; 369 370 cap->table_id = TWE_PARAM_UI + i; 371 cap->param_id = 4; 372 cap->param_size = 4; /* 4 bytes */ 373 374 lock = TWE_LOCK(sc); 375 twe_cmd(ccb, BUS_DMA_NOWAIT, 1); 376 TWE_UNLOCK(sc, lock); 377 scsi_io_put(&sc->sc_iopool, ccb); 378 if (error) { 379 printf("%s: error fetching capacity for unit %d\n", 380 sc->sc_dev.dv_xname, i); 381 continue; 382 } 383 384 nunits++; 385 sc->sc_hdr[i].hd_present = 1; 386 sc->sc_hdr[i].hd_devtype = 0; 387 sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data); 388 TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n", 389 i, sc->sc_hdr[i].hd_size)); 390 } 391 392 if (!nunits) 393 nunits++; 394 395 /* TODO: fetch & print cache params? */ 396 397 sc->sc_link.adapter_softc = sc; 398 sc->sc_link.adapter = &twe_switch; 399 sc->sc_link.adapter_target = TWE_MAX_UNITS; 400 sc->sc_link.openings = TWE_MAXCMDS / nunits; 401 sc->sc_link.adapter_buswidth = TWE_MAX_UNITS; 402 sc->sc_link.pool = &sc->sc_iopool; 403 404 bzero(&saa, sizeof(saa)); 405 saa.saa_sc_link = &sc->sc_link; 406 407 config_found(&sc->sc_dev, &saa, scsiprint); 408 409 kthread_create_deferred(twe_thread_create, sc); 410 411 return (0); 412 } 413 414 void 415 twe_thread_create(void *v) 416 { 417 struct twe_softc *sc = v; 418 419 if (kthread_create(twe_thread, sc, &sc->sc_thread, 420 sc->sc_dev.dv_xname)) { 421 /* TODO disable twe */ 422 printf("%s: failed to create kernel thread, disabled\n", 423 sc->sc_dev.dv_xname); 424 return; 425 } 426 427 TWE_DPRINTF(TWE_D_CMD, ("stat=%b ", 428 bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS)); 429 /* 430 * ack all before enable, cannot be done in one 431 * operation as it seems clear is not processed 432 * if enable is specified. 433 */ 434 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 435 TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR); 436 TWE_DPRINTF(TWE_D_CMD, ("stat=%b ", 437 bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS)); 438 /* enable interrupts */ 439 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 440 TWE_CTRL_EINT | TWE_CTRL_ERDYI | 441 /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI); 442 } 443 444 void 445 twe_thread(v) 446 void *v; 447 { 448 struct twe_softc *sc = v; 449 struct twe_ccb *ccb; 450 twe_lock_t lock; 451 u_int32_t status; 452 int err; 453 454 for (;;) { 455 lock = TWE_LOCK(sc); 456 457 while (!TAILQ_EMPTY(&sc->sc_done_ccb)) { 458 ccb = TAILQ_FIRST(&sc->sc_done_ccb); 459 TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link); 460 if ((err = twe_done(sc, ccb))) 461 printf("%s: done failed (%d)\n", 462 sc->sc_dev.dv_xname, err); 463 } 464 465 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 466 TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ", 467 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); 468 while (!(status & TWE_STAT_CQF) && 469 !TAILQ_EMPTY(&sc->sc_ccb2q)) { 470 471 ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head); 472 TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link); 473 474 ccb->ccb_state = TWE_CCB_QUEUED; 475 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link); 476 bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE, 477 ccb->ccb_cmdpa); 478 479 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 480 TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ", 481 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); 482 } 483 484 if (!TAILQ_EMPTY(&sc->sc_ccb2q)) 485 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 486 TWE_CTRL_ECMDI); 487 488 TWE_UNLOCK(sc, lock); 489 sc->sc_thread_on = 1; 490 tsleep(sc, PWAIT, "twespank", 0); 491 } 492 } 493 494 int 495 twe_cmd(ccb, flags, wait) 496 struct twe_ccb *ccb; 497 int flags, wait; 498 { 499 struct twe_softc *sc = ccb->ccb_sc; 500 bus_dmamap_t dmap; 501 struct twe_cmd *cmd; 502 struct twe_segs *sgp; 503 int error, i; 504 505 if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) { 506 TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data)); 507 ccb->ccb_realdata = ccb->ccb_data; 508 509 error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE, 510 0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg, 511 BUS_DMA_NOWAIT); 512 if (error) { 513 TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error)); 514 return (ENOMEM); 515 } 516 517 error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg, 518 ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT); 519 if (error) { 520 TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error)); 521 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg); 522 return (ENOMEM); 523 } 524 bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length); 525 } else 526 ccb->ccb_realdata = NULL; 527 528 dmap = ccb->ccb_dmamap; 529 cmd = ccb->ccb_cmd; 530 cmd->cmd_status = 0; 531 532 if (ccb->ccb_data) { 533 error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data, 534 ccb->ccb_length, NULL, flags); 535 if (error) { 536 if (error == EFBIG) 537 printf("more than %d dma segs\n", TWE_MAXOFFSETS); 538 else 539 printf("error %d loading dma map\n", error); 540 541 if (ccb->ccb_realdata) { 542 bus_dmamem_unmap(sc->dmat, ccb->ccb_data, 543 ccb->ccb_length); 544 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, 545 ccb->ccb_2nseg); 546 } 547 return error; 548 } 549 /* load addresses into command */ 550 switch (cmd->cmd_op) { 551 case TWE_CMD_GPARAM: 552 case TWE_CMD_SPARAM: 553 sgp = cmd->cmd_param.segs; 554 break; 555 case TWE_CMD_READ: 556 case TWE_CMD_WRITE: 557 sgp = cmd->cmd_io.segs; 558 break; 559 default: 560 /* no data transfer */ 561 TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n", 562 cmd->cmd_op)); 563 sgp = NULL; 564 break; 565 } 566 TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data)); 567 if (sgp) { 568 /* 569 * we know that size is in the upper byte, 570 * and we do not worry about overflow 571 */ 572 cmd->cmd_op += (2 * dmap->dm_nsegs) << 8; 573 bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp)); 574 for (i = 0; i < dmap->dm_nsegs; i++, sgp++) { 575 sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr); 576 sgp->twes_len = htole32(dmap->dm_segs[i].ds_len); 577 TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ", 578 dmap->dm_segs[i].ds_addr, 579 dmap->dm_segs[i].ds_len)); 580 } 581 } 582 TWE_DPRINTF(TWE_D_DMA, ("> ")); 583 bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize, 584 BUS_DMASYNC_PREWRITE); 585 } 586 bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize, 587 BUS_DMASYNC_PREWRITE); 588 589 if ((error = twe_start(ccb, wait))) { 590 bus_dmamap_unload(sc->dmat, dmap); 591 if (ccb->ccb_realdata) { 592 bus_dmamem_unmap(sc->dmat, ccb->ccb_data, 593 ccb->ccb_length); 594 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, 595 ccb->ccb_2nseg); 596 } 597 return (error); 598 } 599 600 return wait? twe_complete(ccb) : 0; 601 } 602 603 int 604 twe_start(ccb, wait) 605 struct twe_ccb *ccb; 606 int wait; 607 { 608 struct twe_softc*sc = ccb->ccb_sc; 609 struct twe_cmd *cmd = ccb->ccb_cmd; 610 u_int32_t status; 611 int i; 612 613 cmd->cmd_op = htole16(cmd->cmd_op); 614 615 if (!wait) { 616 617 TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index)); 618 ccb->ccb_state = TWE_CCB_PREQUEUED; 619 TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link); 620 wakeup(sc); 621 return 0; 622 } 623 624 for (i = 1000; i--; DELAY(10)) { 625 626 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 627 if (!(status & TWE_STAT_CQF)) 628 break; 629 TWE_DPRINTF(TWE_D_CMD, ("twe_start stat=%b ", 630 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); 631 } 632 633 if (!(status & TWE_STAT_CQF)) { 634 bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE, 635 ccb->ccb_cmdpa); 636 637 TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index)); 638 ccb->ccb_state = TWE_CCB_QUEUED; 639 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link); 640 return 0; 641 642 } else { 643 644 printf("%s: twe_start(%d) timed out\n", 645 sc->sc_dev.dv_xname, cmd->cmd_index); 646 647 return EPERM; 648 } 649 } 650 651 int 652 twe_complete(ccb) 653 struct twe_ccb *ccb; 654 { 655 struct twe_softc *sc = ccb->ccb_sc; 656 struct scsi_xfer *xs = ccb->ccb_xs; 657 int i; 658 659 for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) { 660 u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 661 662 /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ", 663 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */ 664 665 while (!(status & TWE_STAT_RQE)) { 666 struct twe_ccb *ccb1; 667 u_int32_t ready; 668 669 ready = bus_space_read_4(sc->iot, sc->ioh, 670 TWE_READYQUEUE); 671 672 TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready)); 673 674 ccb1 = &sc->sc_ccbs[TWE_READYID(ready)]; 675 TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link); 676 ccb1->ccb_state = TWE_CCB_DONE; 677 if (!twe_done(sc, ccb1) && ccb1 == ccb) { 678 TWE_DPRINTF(TWE_D_CMD, ("complete\n")); 679 return 0; 680 } 681 682 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 683 /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ", 684 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */ 685 } 686 } 687 688 return 1; 689 } 690 691 int 692 twe_done(sc, ccb) 693 struct twe_softc *sc; 694 struct twe_ccb *ccb; 695 { 696 struct twe_cmd *cmd = ccb->ccb_cmd; 697 struct scsi_xfer *xs = ccb->ccb_xs; 698 bus_dmamap_t dmap; 699 twe_lock_t lock; 700 701 TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index)); 702 703 if (ccb->ccb_state != TWE_CCB_DONE) { 704 printf("%s: undone ccb %d ready\n", 705 sc->sc_dev.dv_xname, cmd->cmd_index); 706 return 1; 707 } 708 709 dmap = ccb->ccb_dmamap; 710 if (xs) { 711 if (xs->cmd->opcode != PREVENT_ALLOW && 712 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 713 bus_dmamap_sync(sc->dmat, dmap, 0, 714 dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ? 715 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 716 bus_dmamap_unload(sc->dmat, dmap); 717 } 718 } else { 719 switch (letoh16(cmd->cmd_op)) { 720 case TWE_CMD_GPARAM: 721 case TWE_CMD_READ: 722 bus_dmamap_sync(sc->dmat, dmap, 0, 723 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 724 bus_dmamap_unload(sc->dmat, dmap); 725 break; 726 case TWE_CMD_SPARAM: 727 case TWE_CMD_WRITE: 728 bus_dmamap_sync(sc->dmat, dmap, 0, 729 dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 730 bus_dmamap_unload(sc->dmat, dmap); 731 break; 732 default: 733 /* no data */ 734 break; 735 } 736 } 737 738 if (ccb->ccb_realdata) { 739 bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length); 740 bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length); 741 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg); 742 } 743 744 lock = TWE_LOCK(sc); 745 746 if (xs) { 747 xs->resid = 0; 748 scsi_done(xs); 749 } 750 TWE_UNLOCK(sc, lock); 751 752 return 0; 753 } 754 755 void 756 tweminphys(struct buf *bp, struct scsi_link *sl) 757 { 758 if (bp->b_bcount > TWE_MAXFER) 759 bp->b_bcount = TWE_MAXFER; 760 minphys(bp); 761 } 762 763 void 764 twe_copy_internal_data(xs, v, size) 765 struct scsi_xfer *xs; 766 void *v; 767 size_t size; 768 { 769 size_t copy_cnt; 770 771 TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data ")); 772 773 if (!xs->datalen) 774 printf("uio move is not yet supported\n"); 775 else { 776 copy_cnt = MIN(size, xs->datalen); 777 bcopy(v, xs->data, copy_cnt); 778 } 779 } 780 781 void 782 twe_scsi_cmd(xs) 783 struct scsi_xfer *xs; 784 { 785 struct scsi_link *link = xs->sc_link; 786 struct twe_softc *sc = link->adapter_softc; 787 struct twe_ccb *ccb = xs->io; 788 struct twe_cmd *cmd; 789 struct scsi_inquiry_data inq; 790 struct scsi_sense_data sd; 791 struct scsi_read_cap_data rcd; 792 u_int8_t target = link->target; 793 u_int32_t blockno, blockcnt; 794 struct scsi_rw *rw; 795 struct scsi_rw_big *rwb; 796 int error, op, flags, wait; 797 twe_lock_t lock; 798 799 800 if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present || 801 link->lun != 0) { 802 xs->error = XS_DRIVER_STUFFUP; 803 scsi_done(xs); 804 return; 805 } 806 807 TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd ")); 808 809 xs->error = XS_NOERROR; 810 811 switch (xs->cmd->opcode) { 812 case TEST_UNIT_READY: 813 case START_STOP: 814 #if 0 815 case VERIFY: 816 #endif 817 TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode, 818 target)); 819 break; 820 821 case REQUEST_SENSE: 822 TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target)); 823 bzero(&sd, sizeof sd); 824 sd.error_code = SSD_ERRCODE_CURRENT; 825 sd.segment = 0; 826 sd.flags = SKEY_NO_SENSE; 827 *(u_int32_t*)sd.info = htole32(0); 828 sd.extra_len = 0; 829 twe_copy_internal_data(xs, &sd, sizeof sd); 830 break; 831 832 case INQUIRY: 833 TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target, 834 sc->sc_hdr[target].hd_devtype)); 835 bzero(&inq, sizeof inq); 836 inq.device = 837 (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT; 838 inq.dev_qual2 = 839 (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0; 840 inq.version = 2; 841 inq.response_format = 2; 842 inq.additional_length = 32; 843 strlcpy(inq.vendor, "3WARE ", sizeof inq.vendor); 844 snprintf(inq.product, sizeof inq.product, "Host drive #%02d", 845 target); 846 strlcpy(inq.revision, " ", sizeof inq.revision); 847 twe_copy_internal_data(xs, &inq, sizeof inq); 848 break; 849 850 case READ_CAPACITY: 851 TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target)); 852 bzero(&rcd, sizeof rcd); 853 _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr); 854 _lto4b(TWE_SECTOR_SIZE, rcd.length); 855 twe_copy_internal_data(xs, &rcd, sizeof rcd); 856 break; 857 858 case PREVENT_ALLOW: 859 TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW ")); 860 scsi_done(xs); 861 return; 862 863 case READ_COMMAND: 864 case READ_BIG: 865 case WRITE_COMMAND: 866 case WRITE_BIG: 867 case SYNCHRONIZE_CACHE: 868 lock = TWE_LOCK(sc); 869 870 flags = 0; 871 if (xs->cmd->opcode == SYNCHRONIZE_CACHE) { 872 blockno = blockcnt = 0; 873 } else { 874 /* A read or write operation. */ 875 if (xs->cmdlen == 6) { 876 rw = (struct scsi_rw *)xs->cmd; 877 blockno = _3btol(rw->addr) & 878 (SRW_TOPADDR << 16 | 0xffff); 879 blockcnt = rw->length ? rw->length : 0x100; 880 } else { 881 rwb = (struct scsi_rw_big *)xs->cmd; 882 blockno = _4btol(rwb->addr); 883 blockcnt = _2btol(rwb->length); 884 /* reflect DPO & FUA flags */ 885 if (xs->cmd->opcode == WRITE_BIG && 886 rwb->byte2 & 0x18) 887 flags = TWE_FLAGS_CACHEDISABLE; 888 } 889 if (blockno >= sc->sc_hdr[target].hd_size || 890 blockno + blockcnt > sc->sc_hdr[target].hd_size) { 891 printf("%s: out of bounds %u-%u >= %u\n", 892 sc->sc_dev.dv_xname, blockno, blockcnt, 893 sc->sc_hdr[target].hd_size); 894 xs->error = XS_DRIVER_STUFFUP; 895 scsi_done(xs); 896 TWE_UNLOCK(sc, lock); 897 return; 898 } 899 } 900 901 switch (xs->cmd->opcode) { 902 case READ_COMMAND: op = TWE_CMD_READ; break; 903 case READ_BIG: op = TWE_CMD_READ; break; 904 case WRITE_COMMAND: op = TWE_CMD_WRITE; break; 905 case WRITE_BIG: op = TWE_CMD_WRITE; break; 906 default: op = TWE_CMD_NOP; break; 907 } 908 909 ccb->ccb_xs = xs; 910 ccb->ccb_data = xs->data; 911 ccb->ccb_length = xs->datalen; 912 ccb->ccb_state = TWE_CCB_READY; 913 cmd = ccb->ccb_cmd; 914 cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */ 915 cmd->cmd_op = op; 916 cmd->cmd_flags = flags; 917 cmd->cmd_io.count = htole16(blockcnt); 918 cmd->cmd_io.lba = htole32(blockno); 919 wait = xs->flags & SCSI_POLL; 920 if (!sc->sc_thread_on) 921 wait |= SCSI_POLL; 922 923 if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)? 924 BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) { 925 926 TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs)); 927 xs->error = XS_DRIVER_STUFFUP; 928 scsi_done(xs); 929 } 930 931 TWE_UNLOCK(sc, lock); 932 return; 933 934 default: 935 TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ", 936 xs->cmd->opcode, target)); 937 xs->error = XS_DRIVER_STUFFUP; 938 } 939 940 scsi_done(xs); 941 } 942 943 int 944 twe_intr(v) 945 void *v; 946 { 947 struct twe_softc *sc = v; 948 struct twe_ccb *ccb; 949 u_int32_t status; 950 int rv = 0; 951 952 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 953 TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ", 954 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); 955 #if 0 956 if (status & TWE_STAT_HOSTI) { 957 958 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 959 TWE_CTRL_CHOSTI); 960 } 961 #endif 962 963 if (status & TWE_STAT_RDYI) { 964 965 while (!(status & TWE_STAT_RQE)) { 966 967 u_int32_t ready; 968 969 /* 970 * it seems that reading ready queue 971 * we get all the status bits in each ready word. 972 * i wonder if it's legal to use those for 973 * status and avoid extra read below 974 */ 975 ready = bus_space_read_4(sc->iot, sc->ioh, 976 TWE_READYQUEUE); 977 978 ccb = &sc->sc_ccbs[TWE_READYID(ready)]; 979 TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link); 980 ccb->ccb_state = TWE_CCB_DONE; 981 TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link); 982 rv++; 983 984 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS); 985 TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ", 986 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); 987 } 988 } 989 990 if (status & TWE_STAT_CMDI) { 991 rv++; 992 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 993 TWE_CTRL_MCMDI); 994 } 995 996 if (rv) 997 wakeup(sc); 998 999 if (status & TWE_STAT_ATTNI) { 1000 /* 1001 * we know no attentions of interest right now. 1002 * one of those would be mirror degradation i think. 1003 * or, what else exists in there? 1004 * maybe 3ware can answer that? 1005 */ 1006 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, 1007 TWE_CTRL_CATTNI); 1008 1009 scsi_ioh_add(&sc->sc_aen); 1010 } 1011 1012 return rv; 1013 } 1014 1015 void 1016 twe_aen(void *cookie, void *io) 1017 { 1018 struct twe_softc *sc = cookie; 1019 struct twe_ccb *ccb = io; 1020 struct twe_cmd *cmd = ccb->ccb_cmd; 1021 1022 u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1]; 1023 struct twe_param *pb = (void *) (((u_long)param_buf + 1024 TWE_ALIGN - 1) & ~(TWE_ALIGN - 1)); 1025 u_int16_t aen; 1026 1027 twe_lock_t lock; 1028 int error; 1029 1030 ccb->ccb_xs = NULL; 1031 ccb->ccb_data = pb; 1032 ccb->ccb_length = TWE_SECTOR_SIZE; 1033 ccb->ccb_state = TWE_CCB_READY; 1034 cmd->cmd_unit_host = TWE_UNITHOST(0, 0); 1035 cmd->cmd_op = TWE_CMD_GPARAM; 1036 cmd->cmd_flags = 0; 1037 cmd->cmd_param.count = 1; 1038 1039 pb->table_id = TWE_PARAM_AEN; 1040 pb->param_id = 2; 1041 pb->param_size = 2; 1042 1043 lock = TWE_LOCK(sc); 1044 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1); 1045 TWE_UNLOCK(sc, lock); 1046 scsi_io_put(&sc->sc_iopool, ccb); 1047 1048 if (error) { 1049 printf("%s: error draining attention queue\n", 1050 sc->sc_dev.dv_xname); 1051 return; 1052 } 1053 1054 aen = *(u_int16_t *)pb->data; 1055 if (aen != TWE_AEN_QEMPTY) 1056 scsi_ioh_add(&sc->sc_aen); 1057 } 1058