1 /* $OpenBSD: ami.c,v 1.23 2003/06/28 23:55:50 avsm Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Michael Shalayeff 5 * All rights reserved. 6 * 7 * The SCSI emulation layer is derived from gdt(4) driver, 8 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 * THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * American Megatrends Inc. MegaRAID controllers driver 33 * 34 * This driver was made because these ppl and organizations 35 * donated hardware and provided documentation: 36 * 37 * - 428 model card 38 * John Kerbawy, Stephan Matis, Mark Stovall; 39 * 40 * - 467 and 475 model cards, docs 41 * American Megatrends Inc.; 42 * 43 * - uninterruptable electric power for cvs 44 * Theo de Raadt. 45 */ 46 47 /* #define AMI_DEBUG */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/buf.h> 52 #include <sys/device.h> 53 #include <sys/kernel.h> 54 #include <sys/malloc.h> 55 56 #include <machine/bus.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #include <scsi/scsi_all.h> 61 #include <scsi/scsi_disk.h> 62 #include <scsi/scsiconf.h> 63 64 #include <dev/ic/amireg.h> 65 #include <dev/ic/amivar.h> 66 67 #ifdef AMI_DEBUG 68 #define AMI_DPRINTF(m,a) if (ami_debug & (m)) printf a 69 #define AMI_D_CMD 0x0001 70 #define AMI_D_INTR 0x0002 71 #define AMI_D_MISC 0x0004 72 #define AMI_D_DMA 0x0008 73 int ami_debug = 0 74 | AMI_D_CMD 75 | AMI_D_INTR 76 /* | AMI_D_MISC */ 77 /* | AMI_D_DMA */ 78 ; 79 #else 80 #define AMI_DPRINTF(m,a) /* m, a */ 81 #endif 82 83 struct cfdriver ami_cd = { 84 NULL, "ami", DV_DULL 85 }; 86 87 int ami_scsi_cmd(struct scsi_xfer *xs); 88 void amiminphys(struct buf *bp); 89 90 struct scsi_adapter ami_switch = { 91 ami_scsi_cmd, amiminphys, 0, 0, 92 }; 93 94 struct scsi_device ami_dev = { 95 NULL, NULL, NULL, NULL 96 }; 97 98 int ami_scsi_raw_cmd(struct scsi_xfer *xs); 99 100 struct scsi_adapter ami_raw_switch = { 101 ami_scsi_raw_cmd, amiminphys, 0, 0, 102 }; 103 104 struct scsi_device ami_raw_dev = { 105 NULL, NULL, NULL, NULL 106 }; 107 108 static __inline struct ami_ccb *ami_get_ccb(struct ami_softc *sc); 109 static __inline void ami_put_ccb(struct ami_ccb *ccb); 110 void ami_copyhds(struct ami_softc *sc, const u_int32_t *sizes, 111 const u_int8_t *props, const u_int8_t *stats); 112 void *ami_allocmem(bus_dma_tag_t dmat, bus_dmamap_t *map, 113 bus_dma_segment_t *segp, size_t isize, size_t nent, const char *iname); 114 void ami_freemem(bus_dma_tag_t dmat, bus_dmamap_t *map, 115 bus_dma_segment_t *segp, size_t isize, size_t nent, const char *iname); 116 void ami_dispose(struct ami_softc *sc); 117 void ami_stimeout(void *v); 118 int ami_cmd(struct ami_ccb *ccb, int flags, int wait); 119 int ami_start(struct ami_ccb *ccb, int wait); 120 int ami_complete(struct ami_ccb *ccb); 121 int ami_done(struct ami_softc *sc, int idx); 122 void ami_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size); 123 int ami_inquire(struct ami_softc *sc, u_int8_t op); 124 125 126 static __inline struct ami_ccb * 127 ami_get_ccb(sc) 128 struct ami_softc *sc; 129 { 130 struct ami_ccb *ccb; 131 132 ccb = TAILQ_LAST(&sc->sc_free_ccb, ami_queue_head); 133 if (ccb) { 134 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link); 135 ccb->ccb_state = AMI_CCB_READY; 136 } 137 return ccb; 138 } 139 140 static __inline void 141 ami_put_ccb(ccb) 142 struct ami_ccb *ccb; 143 { 144 struct ami_softc *sc = ccb->ccb_sc; 145 146 ccb->ccb_state = AMI_CCB_FREE; 147 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 148 } 149 150 void * 151 ami_allocmem(dmat, map, segp, isize, nent, iname) 152 bus_dma_tag_t dmat; 153 bus_dmamap_t *map; 154 bus_dma_segment_t *segp; 155 size_t isize, nent; 156 const char *iname; 157 { 158 size_t total = isize * nent; 159 caddr_t p; 160 int error, rseg; 161 162 /* XXX this is because we might have no dmamem_load_raw */ 163 if ((error = bus_dmamem_alloc(dmat, total, PAGE_SIZE, 0, segp, 1, 164 &rseg, BUS_DMA_NOWAIT))) { 165 printf(": cannot allocate %s%s (%d)\n", 166 iname, nent==1? "": "s", error); 167 return (NULL); 168 } 169 170 if ((error = bus_dmamem_map(dmat, segp, rseg, total, &p, 171 BUS_DMA_NOWAIT))) { 172 printf(": cannot map %s%s (%d)\n", 173 iname, nent==1? "": "s", error); 174 return (NULL); 175 } 176 177 bzero(p, total); 178 if ((error = bus_dmamap_create(dmat, total, 1, 179 total, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map))) { 180 printf(": cannot create %s dmamap (%d)\n", iname, error); 181 return (NULL); 182 } 183 if ((error = bus_dmamap_load(dmat, *map, p, total, NULL, 184 BUS_DMA_NOWAIT))) { 185 printf(": cannot load %s dma map (%d)\n", iname, error); 186 return (NULL); 187 } 188 189 return (p); 190 } 191 192 void 193 ami_freemem(dmat, map, segp, isize, nent, iname) 194 bus_dma_tag_t dmat; 195 bus_dmamap_t *map; 196 bus_dma_segment_t *segp; 197 size_t isize, nent; 198 const char *iname; 199 { 200 bus_dmamem_free(dmat, segp, 1); 201 bus_dmamap_destroy(dmat, *map); 202 *map = NULL; 203 } 204 205 void 206 ami_dispose(sc) 207 struct ami_softc *sc; 208 { 209 register struct ami_ccb *ccb; 210 211 /* traverse the ccbs and destroy the maps */ 212 for (ccb = &sc->sc_ccbs[AMI_MAXCMDS - 1]; ccb > sc->sc_ccbs; ccb--) 213 if (ccb->ccb_dmamap) 214 bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap); 215 ami_freemem(sc->dmat, &sc->sc_sgmap, sc->sc_sgseg, 216 sizeof(struct ami_sgent) * AMI_SGEPERCMD, AMI_MAXCMDS, "sglist"); 217 ami_freemem(sc->dmat, &sc->sc_cmdmap, sc->sc_cmdseg, 218 sizeof(struct ami_iocmd), AMI_MAXCMDS + 1, "command"); 219 } 220 221 222 void 223 ami_copyhds(sc, sizes, props, stats) 224 struct ami_softc *sc; 225 const u_int32_t *sizes; 226 const u_int8_t *props, *stats; 227 { 228 int i; 229 230 for (i = 0; i < sc->sc_nunits; i++) { 231 sc->sc_hdr[i].hd_present = 1; 232 sc->sc_hdr[i].hd_is_logdrv = 1; 233 sc->sc_hdr[i].hd_size = letoh32(sizes[i]); 234 sc->sc_hdr[i].hd_prop = props[i]; 235 sc->sc_hdr[i].hd_stat = stats[i]; 236 if (sc->sc_hdr[i].hd_size > 0x200000) { 237 sc->sc_hdr[i].hd_heads = 255; 238 sc->sc_hdr[i].hd_secs = 63; 239 } else { 240 sc->sc_hdr[i].hd_heads = 64; 241 sc->sc_hdr[i].hd_secs = 32; 242 } 243 } 244 } 245 246 int 247 ami_attach(sc) 248 struct ami_softc *sc; 249 { 250 /* struct ami_rawsoftc *rsc; */ 251 struct ami_ccb *ccb; 252 struct ami_iocmd *cmd; 253 struct ami_sgent *sg; 254 bus_dmamap_t idatamap; 255 bus_dma_segment_t idataseg[1]; 256 const char *p; 257 void *idata; 258 int error; 259 260 if (!(idata = ami_allocmem(sc->dmat, &idatamap, idataseg, 261 NBPG, 1, "init data"))) { 262 ami_freemem(sc->dmat, &idatamap, idataseg, 263 NBPG, 1, "init data"); 264 return 1; 265 } 266 267 sc->sc_cmds = ami_allocmem(sc->dmat, &sc->sc_cmdmap, sc->sc_cmdseg, 268 sizeof(struct ami_iocmd), AMI_MAXCMDS+1, "command"); 269 if (!sc->sc_cmds) { 270 ami_dispose(sc); 271 ami_freemem(sc->dmat, &idatamap, 272 idataseg, NBPG, 1, "init data"); 273 return 1; 274 } 275 sc->sc_sgents = ami_allocmem(sc->dmat, &sc->sc_sgmap, sc->sc_sgseg, 276 sizeof(struct ami_sgent) * AMI_SGEPERCMD, AMI_MAXCMDS+1, "sglist"); 277 if (!sc->sc_sgents) { 278 ami_dispose(sc); 279 ami_freemem(sc->dmat, &idatamap, 280 idataseg, NBPG, 1, "init data"); 281 return 1; 282 } 283 284 TAILQ_INIT(&sc->sc_ccbq); 285 TAILQ_INIT(&sc->sc_ccbdone); 286 TAILQ_INIT(&sc->sc_free_ccb); 287 288 /* 0th command is a mailbox */ 289 for (ccb = &sc->sc_ccbs[AMI_MAXCMDS-1], 290 cmd = sc->sc_cmds + sizeof(*cmd) * AMI_MAXCMDS, 291 sg = sc->sc_sgents + sizeof(*sg) * AMI_MAXCMDS * AMI_SGEPERCMD; 292 cmd >= (struct ami_iocmd *)sc->sc_cmds; 293 cmd--, ccb--, sg -= AMI_SGEPERCMD) { 294 295 cmd->acc_id = cmd - (struct ami_iocmd *)sc->sc_cmds; 296 if (cmd->acc_id) { 297 error = bus_dmamap_create(sc->dmat, 298 AMI_MAXFER, AMI_MAXOFFSETS, AMI_MAXFER, 0, 299 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 300 &ccb->ccb_dmamap); 301 if (error) { 302 printf(": cannot create ccb dmamap (%d)\n", 303 error); 304 ami_dispose(sc); 305 ami_freemem(sc->dmat, &idatamap, 306 idataseg, NBPG, 1, "init data"); 307 return (1); 308 } 309 ccb->ccb_sc = sc; 310 ccb->ccb_cmd = cmd; 311 ccb->ccb_state = AMI_CCB_FREE; 312 ccb->ccb_cmdpa = htole32(sc->sc_cmdseg[0].ds_addr + 313 cmd->acc_id * sizeof(*cmd)); 314 ccb->ccb_sglist = sg; 315 ccb->ccb_sglistpa = htole32(sc->sc_sgseg[0].ds_addr + 316 cmd->acc_id * sizeof(*sg) * AMI_SGEPERCMD); 317 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 318 } else { 319 sc->sc_mbox = cmd; 320 sc->sc_mbox_pa = sc->sc_cmdseg[0].ds_addr; 321 AMI_DPRINTF(AMI_D_CMD, ("mbox_pa=%llx ", 322 sc->sc_mbox_pa)); 323 } 324 } 325 326 timeout_set(&sc->sc_poll_tmo, (void (*)(void *))ami_intr, sc); 327 328 (sc->sc_init)(sc); 329 { 330 paddr_t pa = idataseg[0].ds_addr; 331 ami_lock_t lock; 332 333 lock = AMI_LOCK_AMI(sc); 334 335 ccb = ami_get_ccb(sc); 336 cmd = ccb->ccb_cmd; 337 338 /* try FC inquiry first */ 339 cmd->acc_cmd = AMI_FCOP; 340 cmd->acc_io.aio_channel = AMI_FC_EINQ3; 341 cmd->acc_io.aio_param = AMI_FC_EINQ3_SOLICITED_FULL; 342 cmd->acc_io.aio_data = htole32(pa); 343 if (ami_cmd(ccb, 0, 1) == 0) { 344 struct ami_fc_einquiry *einq = idata; 345 struct ami_fc_prodinfo *pi = idata; 346 347 sc->sc_nunits = einq->ain_nlogdrv; 348 ami_copyhds(sc, einq->ain_ldsize, einq->ain_ldprop, 349 einq->ain_ldstat); 350 351 ccb = ami_get_ccb(sc); 352 cmd = ccb->ccb_cmd; 353 354 cmd->acc_cmd = AMI_FCOP; 355 cmd->acc_io.aio_channel = AMI_FC_PRODINF; 356 cmd->acc_io.aio_param = 0; 357 cmd->acc_io.aio_data = htole32(pa); 358 if (ami_cmd(ccb, 0, 1) == 0) { 359 sc->sc_maxunits = AMI_BIG_MAX_LDRIVES; 360 361 bcopy (pi->api_fwver, sc->sc_fwver, 16); 362 sc->sc_fwver[15] = '\0'; 363 bcopy (pi->api_biosver, sc->sc_biosver, 16); 364 sc->sc_biosver[15] = '\0'; 365 sc->sc_channels = pi->api_channels; 366 sc->sc_targets = pi->api_fcloops; 367 sc->sc_memory = letoh16(pi->api_ramsize); 368 sc->sc_maxcmds = pi->api_maxcmd; 369 p = "FC loop"; 370 } 371 } 372 373 if (sc->sc_maxunits == 0) { 374 struct ami_inquiry *inq = idata; 375 376 ccb = ami_get_ccb(sc); 377 cmd = ccb->ccb_cmd; 378 379 cmd->acc_cmd = AMI_EINQUIRY; 380 cmd->acc_io.aio_channel = 0; 381 cmd->acc_io.aio_param = 0; 382 cmd->acc_io.aio_data = htole32(pa); 383 if (ami_cmd(ccb, 0, 1) != 0) { 384 ccb = ami_get_ccb(sc); 385 cmd = ccb->ccb_cmd; 386 387 cmd->acc_cmd = AMI_INQUIRY; 388 cmd->acc_io.aio_channel = 0; 389 cmd->acc_io.aio_param = 0; 390 cmd->acc_io.aio_data = htole32(pa); 391 if (ami_cmd(ccb, 0, 1) != 0) { 392 AMI_UNLOCK_AMI(sc, lock); 393 printf(": cannot do inquiry\n"); 394 ami_dispose(sc); 395 ami_freemem(sc->dmat, &idatamap, 396 idataseg, NBPG, 1, "init data"); 397 return (1); 398 } 399 } 400 401 sc->sc_maxunits = AMI_MAX_LDRIVES; 402 sc->sc_nunits = inq->ain_nlogdrv; 403 ami_copyhds(sc, inq->ain_ldsize, inq->ain_ldprop, 404 inq->ain_ldstat); 405 406 bcopy (inq->ain_fwver, sc->sc_fwver, 4); 407 sc->sc_fwver[4] = '\0'; 408 bcopy (inq->ain_biosver, sc->sc_biosver, 4); 409 sc->sc_biosver[4] = '\0'; 410 sc->sc_channels = inq->ain_channels; 411 sc->sc_targets = inq->ain_targets; 412 sc->sc_memory = inq->ain_ramsize; 413 sc->sc_maxcmds = inq->ain_maxcmd; 414 p = "target"; 415 } 416 417 AMI_UNLOCK_AMI(sc, lock); 418 419 if (sc->sc_maxcmds > AMI_MAXCMDS) 420 sc->sc_maxcmds = 1 /* AMI_MAXCMDS */; 421 } 422 ami_freemem(sc->dmat, &idatamap, idataseg, NBPG, 1, "init data"); 423 424 /* hack for hp netraid version encoding */ 425 if ('A' <= sc->sc_fwver[2] && sc->sc_fwver[2] <= 'Z' && 426 sc->sc_fwver[1] < ' ' && sc->sc_fwver[0] < ' ' && 427 'A' <= sc->sc_biosver[2] && sc->sc_biosver[2] <= 'Z' && 428 sc->sc_biosver[1] < ' ' && sc->sc_biosver[0] < ' ') { 429 430 snprintf(sc->sc_fwver, sizeof sc->sc_fwver, "%c.%02d.%02d", 431 sc->sc_fwver[2], sc->sc_fwver[1], sc->sc_fwver[0]); 432 snprintf(sc->sc_biosver, sizeof sc->sc_biosver, "%c.%02d.%02d", 433 sc->sc_biosver[2], sc->sc_biosver[1], sc->sc_biosver[0]); 434 } 435 436 printf(": FW %s, BIOS v%s, %dMB RAM\n" 437 "%s: %d channels, %d %ss, %d logical drives\n", 438 sc->sc_fwver, sc->sc_biosver, sc->sc_memory, 439 sc->sc_dev.dv_xname, 440 sc->sc_channels, sc->sc_targets, p, sc->sc_nunits); 441 442 /* TODO: fetch & print cache strategy */ 443 /* TODO: fetch & print scsi and raid info */ 444 445 sc->sc_link.device = &ami_dev; 446 sc->sc_link.openings = sc->sc_maxcmds; 447 sc->sc_link.adapter_softc = sc; 448 sc->sc_link.adapter = &ami_switch; 449 sc->sc_link.adapter_target = sc->sc_maxunits; 450 sc->sc_link.adapter_buswidth = sc->sc_maxunits; 451 452 config_found(&sc->sc_dev, &sc->sc_link, scsiprint); 453 #if 0 454 rsc = malloc(sizeof(struct ami_rawsoftc) * sc->sc_channels, 455 M_DEVBUF, M_NOWAIT); 456 if (!rsc) { 457 printf("%s: no memory for raw interface\n", 458 sc->sc_dev.dv_xname); 459 return (0); 460 } 461 462 bzero(rsc, sizeof(struct ami_rawsoftc) * sc->sc_channels); 463 for (sc->sc_rawsoftcs = rsc; 464 rsc < &sc->sc_rawsoftcs[sc->sc_channels]; rsc++) { 465 466 /* TODO fetch and print channel properties */ 467 468 rsc->sc_softc = sc; 469 rsc->sc_channel = rsc - sc->sc_rawsoftcs; 470 rsc->sc_link.device = &ami_raw_dev; 471 rsc->sc_link.openings = sc->sc_maxcmds; 472 rsc->sc_link.adapter_softc = rsc; 473 rsc->sc_link.adapter = &ami_raw_switch; 474 /* TODO fetch it from the controller */ 475 rsc->sc_link.adapter_target = sc->sc_targets; 476 rsc->sc_link.adapter_buswidth = sc->sc_targets; 477 478 config_found(&sc->sc_dev, &rsc->sc_link, scsiprint); 479 } 480 #endif 481 return 0; 482 } 483 484 int 485 ami_quartz_init(sc) 486 struct ami_softc *sc; 487 { 488 bus_space_write_4(sc->iot, sc->ioh, AMI_QIDB, 0); 489 bus_space_barrier(sc->iot, sc->ioh, 490 AMI_QIDB, 4, BUS_SPACE_BARRIER_WRITE); 491 492 return 0; 493 } 494 495 int 496 ami_quartz_exec(sc, cmd) 497 struct ami_softc *sc; 498 struct ami_iocmd *cmd; 499 { 500 u_int32_t qidb; 501 502 bus_space_barrier(sc->iot, sc->ioh, 503 AMI_QIDB, 4, BUS_SPACE_BARRIER_READ); 504 qidb = bus_space_read_4(sc->iot, sc->ioh, AMI_QIDB); 505 if (qidb & (AMI_QIDB_EXEC | AMI_QIDB_ACK)) { 506 AMI_DPRINTF(AMI_D_CMD, ("qidb1=%x ", qidb)); 507 return (EBUSY); 508 } 509 510 /* do not scramble the busy mailbox */ 511 if (sc->sc_mbox->acc_busy) { 512 AMI_DPRINTF(AMI_D_CMD, ("mbox_busy ")); 513 return (EBUSY); 514 } 515 516 *sc->sc_mbox = *cmd; 517 bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sizeof(*cmd), 518 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 519 520 qidb = sc->sc_mbox_pa | AMI_QIDB_EXEC; 521 AMI_DPRINTF(AMI_D_CMD, ("qidb2=%x ", qidb)); 522 bus_space_write_4(sc->iot, sc->ioh, AMI_QIDB, qidb); 523 bus_space_barrier(sc->iot, sc->ioh, 524 AMI_QIDB, 4, BUS_SPACE_BARRIER_WRITE); 525 return (0); 526 } 527 528 int 529 ami_quartz_done(sc, mbox) 530 struct ami_softc *sc; 531 struct ami_iocmd *mbox; 532 { 533 u_int32_t qdb; 534 535 bus_space_barrier(sc->iot, sc->ioh, 536 AMI_QIDB, 4, BUS_SPACE_BARRIER_READ); 537 qdb = bus_space_read_4(sc->iot, sc->ioh, AMI_QIDB); 538 if (qdb & (AMI_QIDB_EXEC | AMI_QIDB_ACK)) { 539 AMI_DPRINTF(AMI_D_CMD, ("qidb3=%x ", qdb)); 540 return (0); 541 } 542 543 /* do not scramble the busy mailbox */ 544 if (sc->sc_mbox->acc_busy) { 545 AMI_DPRINTF(AMI_D_CMD, ("mbox_busy ")); 546 return (0); 547 } 548 549 bus_space_barrier(sc->iot, sc->ioh, 550 AMI_QODB, 4, BUS_SPACE_BARRIER_READ); 551 qdb = bus_space_read_4(sc->iot, sc->ioh, AMI_QODB); 552 if (qdb == AMI_QODB_READY) { 553 554 bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sizeof(*mbox), 555 BUS_DMASYNC_POSTWRITE); 556 *mbox = *sc->sc_mbox; 557 558 /* ack interrupt */ 559 bus_space_write_4(sc->iot, sc->ioh, AMI_QODB, AMI_QODB_READY); 560 bus_space_barrier(sc->iot, sc->ioh, 561 AMI_QODB, 4, BUS_SPACE_BARRIER_WRITE); 562 563 qdb = sc->sc_mbox_pa | AMI_QIDB_ACK; 564 bus_space_write_4(sc->iot, sc->ioh, AMI_QIDB, qdb); 565 bus_space_barrier(sc->iot, sc->ioh, 566 AMI_QIDB, 4, BUS_SPACE_BARRIER_WRITE); 567 return (1); 568 } 569 570 AMI_DPRINTF(AMI_D_CMD, ("qodb=%x ", qdb)); 571 572 return (0); 573 } 574 575 int 576 ami_schwartz_init(sc) 577 struct ami_softc *sc; 578 { 579 u_int32_t a = (u_int32_t)sc->sc_mbox_pa; 580 581 bus_space_write_4(sc->iot, sc->ioh, AMI_SMBADDR, a); 582 /* XXX 40bit address ??? */ 583 bus_space_write_1(sc->iot, sc->ioh, AMI_SMBENA, 0); 584 585 bus_space_write_1(sc->iot, sc->ioh, AMI_SCMD, AMI_SCMD_ACK); 586 bus_space_write_1(sc->iot, sc->ioh, AMI_SIEM, AMI_SEIM_ENA | 587 bus_space_read_1(sc->iot, sc->ioh, AMI_SIEM)); 588 589 return 0; 590 } 591 592 int 593 ami_schwartz_exec(sc, cmd) 594 struct ami_softc *sc; 595 struct ami_iocmd *cmd; 596 { 597 if (bus_space_read_1(sc->iot, sc->ioh, AMI_SMBSTAT) & AMI_SMBST_BUSY) 598 return EBUSY; 599 600 *sc->sc_mbox = *cmd; 601 bus_space_write_1(sc->iot, sc->ioh, AMI_SCMD, AMI_SCMD_EXEC); 602 return 0; 603 } 604 605 int 606 ami_schwartz_done(sc, mbox) 607 struct ami_softc *sc; 608 struct ami_iocmd *mbox; 609 { 610 u_int8_t stat; 611 #if 0 612 /* do not scramble the busy mailbox */ 613 if (sc->sc_mbox->acc_busy) 614 return (0); 615 #endif 616 if (bus_space_read_1(sc->iot, sc->ioh, AMI_SMBSTAT) & AMI_SMBST_BUSY) 617 return 0; 618 619 stat = bus_space_read_1(sc->iot, sc->ioh, AMI_ISTAT); 620 if (stat & AMI_ISTAT_PEND) { 621 bus_space_write_1(sc->iot, sc->ioh, AMI_ISTAT, stat); 622 623 *mbox = *sc->sc_mbox; 624 625 bus_space_write_1(sc->iot, sc->ioh, AMI_SCMD, AMI_SCMD_ACK); 626 627 return 1; 628 } 629 630 return 0; 631 } 632 633 int 634 ami_cmd(ccb, flags, wait) 635 struct ami_ccb *ccb; 636 int flags, wait; 637 { 638 struct ami_softc *sc = ccb->ccb_sc; 639 bus_dmamap_t dmap = ccb->ccb_dmamap; 640 int error = 0, i, s; 641 642 if (ccb->ccb_data) { 643 struct ami_iocmd *cmd = ccb->ccb_cmd; 644 bus_dma_segment_t *sgd; 645 646 error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data, 647 ccb->ccb_len, NULL, flags); 648 if (error) { 649 if (error == EFBIG) 650 printf("more than %d dma segs\n", AMI_MAXOFFSETS); 651 else 652 printf("error %d loading dma map\n", error); 653 654 ami_put_ccb(ccb); 655 return (error); 656 } 657 658 sgd = dmap->dm_segs; 659 AMI_DPRINTF(AMI_D_DMA, ("data=%p/%u<0x%lx/%u", 660 ccb->ccb_data, ccb->ccb_len, 661 sgd->ds_addr, sgd->ds_len)); 662 663 if(dmap->dm_nsegs > 1) { 664 struct ami_sgent *sgl = ccb->ccb_sglist; 665 666 cmd->acc_mbox.amb_nsge = htole32(dmap->dm_nsegs); 667 cmd->acc_mbox.amb_data = ccb->ccb_sglistpa; 668 669 for (i = 0; i < dmap->dm_nsegs; i++, sgd++) { 670 sgl[i].asg_addr = htole32(sgd->ds_addr); 671 sgl[i].asg_len = htole32(sgd->ds_len); 672 if (i) 673 AMI_DPRINTF(AMI_D_DMA, (",0x%lx/%u", 674 sgd->ds_addr, sgd->ds_len)); 675 } 676 } else { 677 cmd->acc_mbox.amb_nsge = htole32(0); 678 cmd->acc_mbox.amb_data = htole32(sgd->ds_addr); 679 } 680 AMI_DPRINTF(AMI_D_DMA, ("> ")); 681 682 bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize, 683 BUS_DMASYNC_PREWRITE); 684 } else 685 ccb->ccb_cmd->acc_mbox.amb_nsge = htole32(0); 686 bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize, 687 BUS_DMASYNC_PREWRITE); 688 689 s = splimp(); 690 if ((error = ami_start(ccb, wait))) { 691 AMI_DPRINTF(AMI_D_DMA, ("error=%d ", error)); 692 __asm __volatile(".globl _bpamierr\n_bpamierr:"); 693 if (ccb->ccb_data) 694 bus_dmamap_unload(sc->dmat, dmap); 695 ami_put_ccb(ccb); 696 } else if (wait) { 697 AMI_DPRINTF(AMI_D_DMA, ("waiting ")); 698 if ((error = ami_complete(ccb))) 699 ami_put_ccb(ccb); 700 } 701 splx(s); 702 703 return (error); 704 } 705 706 int 707 ami_start(ccb, wait) 708 struct ami_ccb *ccb; 709 int wait; 710 { 711 struct ami_softc *sc = ccb->ccb_sc; 712 struct ami_iocmd *cmd = ccb->ccb_cmd; 713 struct scsi_xfer *xs = ccb->ccb_xs; 714 volatile struct ami_iocmd *mbox = sc->sc_mbox; 715 int i; 716 717 AMI_DPRINTF(AMI_D_CMD, ("start(%d) ", cmd->acc_id)); 718 719 if (ccb->ccb_state != AMI_CCB_READY) { 720 printf("%s: ccb %d not ready <%d>\n", 721 sc->sc_dev.dv_xname, cmd->acc_id, ccb->ccb_state); 722 return (EINVAL); 723 } 724 725 if (xs) 726 timeout_set(&xs->stimeout, ami_stimeout, ccb); 727 728 if (wait && mbox->acc_busy) { 729 730 for (i = 100000; i-- && mbox->acc_busy; DELAY(10)); 731 732 if (mbox->acc_busy) { 733 AMI_DPRINTF(AMI_D_CMD, ("mbox_busy ")); 734 return (EAGAIN); 735 } 736 } 737 738 AMI_DPRINTF(AMI_D_CMD, ("exec ")); 739 740 cmd->acc_busy = 1; 741 cmd->acc_poll = 0; 742 cmd->acc_ack = 0; 743 744 if (!(i = (sc->sc_exec)(sc, cmd))) { 745 ccb->ccb_state = AMI_CCB_QUEUED; 746 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link); 747 if (!wait) { 748 #ifdef AMI_POLLING 749 if (!timeout_pending(&sc->sc_poll_tmo)) 750 timeout_add(&sc->sc_poll_tmo, 1); 751 #endif 752 if (xs) { 753 struct timeval tv; 754 tv.tv_sec = xs->timeout / 1000; 755 tv.tv_usec = 1000 * (xs->timeout % 1000); 756 timeout_add(&xs->stimeout, tvtohz(&tv)); 757 } 758 } 759 } else if (!wait && xs) { 760 AMI_DPRINTF(AMI_D_CMD, ("2queue1(%d) ", cmd->acc_id)); 761 ccb->ccb_state = AMI_CCB_PREQUEUED; 762 timeout_add(&xs->stimeout, 1); 763 return (0); 764 } 765 766 return (i); 767 } 768 769 void 770 ami_stimeout(v) 771 void *v; 772 { 773 struct ami_ccb *ccb = v; 774 struct ami_softc *sc = ccb->ccb_sc; 775 struct scsi_xfer *xs = ccb->ccb_xs; 776 struct ami_iocmd *cmd = ccb->ccb_cmd; 777 volatile struct ami_iocmd *mbox = sc->sc_mbox; 778 ami_lock_t lock, s; 779 780 lock = AMI_LOCK_AMI(sc); 781 switch (ccb->ccb_state) { 782 case AMI_CCB_PREQUEUED: 783 if (mbox->acc_busy) { 784 timeout_add(&xs->stimeout, 1); 785 break; 786 } 787 788 AMI_DPRINTF(AMI_D_CMD, ("requeue(%d) ", cmd->acc_id)); 789 790 ccb->ccb_state = AMI_CCB_READY; 791 if (ami_start(ccb, 0)) { 792 AMI_DPRINTF(AMI_D_CMD, ("requeue(%d) again\n", cmd->acc_id)); 793 ccb->ccb_state = AMI_CCB_PREQUEUED; 794 timeout_add(&xs->stimeout, 1); 795 } 796 break; 797 798 case AMI_CCB_QUEUED: 799 /* XXX need to kill all cmds in the queue and reset the card */ 800 printf("%s: timeout ccb %d\n", 801 sc->sc_dev.dv_xname, cmd->acc_id); 802 AMI_DPRINTF(AMI_D_CMD, ("timeout(%d) ", cmd->acc_id)); 803 if (xs->cmd->opcode != PREVENT_ALLOW && 804 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 805 bus_dmamap_sync(sc->dmat, ccb->ccb_dmamap, 0, 806 ccb->ccb_dmamap->dm_mapsize, 807 (xs->flags & SCSI_DATA_IN) ? 808 BUS_DMASYNC_POSTREAD : 809 BUS_DMASYNC_POSTWRITE); 810 bus_dmamap_unload(sc->dmat, ccb->ccb_dmamap); 811 } 812 s = splimp(); 813 TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link); 814 ami_put_ccb(ccb); 815 splx(s); 816 xs->error = XS_TIMEOUT; 817 xs->flags |= ITSDONE; 818 scsi_done(xs); 819 break; 820 case AMI_CCB_FREE: 821 case AMI_CCB_READY: 822 panic("ami_stimeout(%d) botch", cmd->acc_id); 823 } 824 AMI_UNLOCK_AMI(sc, lock); 825 } 826 827 int 828 ami_complete(ccb) 829 struct ami_ccb *ccb; 830 { 831 struct ami_softc *sc = ccb->ccb_sc; 832 struct scsi_xfer *xs = ccb->ccb_xs; 833 struct ami_iocmd mbox; 834 int i, j, rv, status; 835 836 i = 1 * (xs? xs->timeout: 1000); 837 AMI_DPRINTF(AMI_D_CMD, ("%d ", i)); 838 for (rv = 1, status = 0; !status && rv && i--; DELAY(1000)) 839 if ((sc->sc_done)(sc, &mbox)) { 840 AMI_DPRINTF(AMI_D_CMD, ("got#%d ", mbox.acc_nstat)); 841 status = mbox.acc_status; 842 for (j = 0; j < mbox.acc_nstat; j++ ) { 843 int ready = mbox.acc_cmplidl[j]; 844 845 AMI_DPRINTF(AMI_D_CMD, ("ready=%x ", ready)); 846 847 if (!ami_done(sc, ready) && 848 ccb->ccb_cmd->acc_id == ready) 849 rv = 0; 850 } 851 } 852 853 if (status) { 854 AMI_DPRINTF(AMI_D_CMD, ("aborted\n")); 855 } else if (!rv) { 856 AMI_DPRINTF(AMI_D_CMD, ("complete\n")); 857 } else if (i < 0) { 858 AMI_DPRINTF(AMI_D_CMD, ("timeout\n")); 859 } else 860 AMI_DPRINTF(AMI_D_CMD, ("screwed\n")); 861 862 return rv? rv : status; 863 } 864 865 int 866 ami_done(sc, idx) 867 struct ami_softc *sc; 868 int idx; 869 { 870 struct ami_ccb *ccb = &sc->sc_ccbs[idx - 1]; 871 struct scsi_xfer *xs = ccb->ccb_xs; 872 ami_lock_t lock, s; 873 874 AMI_DPRINTF(AMI_D_CMD, ("done(%d) ", ccb->ccb_cmd->acc_id)); 875 876 if (ccb->ccb_state != AMI_CCB_QUEUED) { 877 printf("%s: unqueued ccb %d ready, state = %d\n", 878 sc->sc_dev.dv_xname, idx, ccb->ccb_state); 879 return (1); 880 } 881 882 lock = AMI_LOCK_AMI(sc); 883 s = splimp(); 884 ccb->ccb_state = AMI_CCB_READY; 885 TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link); 886 887 if (xs) { 888 timeout_del(&xs->stimeout); 889 if (xs->cmd->opcode != PREVENT_ALLOW && 890 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 891 bus_dmamap_sync(sc->dmat, ccb->ccb_dmamap, 0, 892 ccb->ccb_dmamap->dm_mapsize, 893 (xs->flags & SCSI_DATA_IN) ? 894 BUS_DMASYNC_POSTREAD : 895 BUS_DMASYNC_POSTWRITE); 896 bus_dmamap_unload(sc->dmat, ccb->ccb_dmamap); 897 } 898 ccb->ccb_xs = NULL; 899 } else { 900 struct ami_iocmd *cmd = ccb->ccb_cmd; 901 902 switch (cmd->acc_cmd) { 903 case AMI_INQUIRY: 904 case AMI_EINQUIRY: 905 case AMI_EINQUIRY3: 906 bus_dmamap_sync(sc->dmat, ccb->ccb_dmamap, 0, 907 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 908 bus_dmamap_unload(sc->dmat, ccb->ccb_dmamap); 909 break; 910 default: 911 /* no data */ 912 break; 913 } 914 } 915 916 ami_put_ccb(ccb); 917 splx(s); 918 919 if (xs) { 920 xs->resid = 0; 921 xs->flags |= ITSDONE; 922 AMI_DPRINTF(AMI_D_CMD, ("scsi_done(%d) ", idx)); 923 scsi_done(xs); 924 } 925 926 AMI_UNLOCK_AMI(sc, lock); 927 928 return (0); 929 } 930 931 void 932 amiminphys(bp) 933 struct buf *bp; 934 { 935 if (bp->b_bcount > AMI_MAXFER) 936 bp->b_bcount = AMI_MAXFER; 937 minphys(bp); 938 } 939 940 void 941 ami_copy_internal_data(xs, v, size) 942 struct scsi_xfer *xs; 943 void *v; 944 size_t size; 945 { 946 size_t copy_cnt; 947 948 AMI_DPRINTF(AMI_D_MISC, ("ami_copy_internal_data ")); 949 950 if (!xs->datalen) 951 printf("uio move not yet supported\n"); 952 else { 953 copy_cnt = MIN(size, xs->datalen); 954 bcopy(v, xs->data, copy_cnt); 955 } 956 } 957 958 int 959 ami_scsi_raw_cmd(xs) 960 struct scsi_xfer *xs; 961 { 962 struct scsi_link *link = xs->sc_link; 963 struct ami_rawsoftc *rsc = link->adapter_softc; 964 struct ami_softc *sc = rsc->sc_softc; 965 u_int8_t channel = rsc->sc_channel, target = link->target; 966 struct ami_ccb *ccb, *ccb1; 967 struct ami_iocmd *cmd; 968 struct ami_passthrough *ps; 969 int error; 970 ami_lock_t lock; 971 972 AMI_DPRINTF(AMI_D_CMD, ("ami_scsi_raw_cmd ")); 973 974 lock = AMI_LOCK_AMI(sc); 975 976 if (xs->cmdlen > AMI_MAX_CDB) { 977 AMI_DPRINTF(AMI_D_CMD, ("CDB too big %p ", xs)); 978 bzero(&xs->sense, sizeof(xs->sense)); 979 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 980 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 981 xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */ 982 xs->error = XS_SENSE; 983 scsi_done(xs); 984 AMI_UNLOCK_AMI(sc, lock); 985 return (COMPLETE); 986 } 987 988 xs->error = XS_NOERROR; 989 990 if ((ccb = ami_get_ccb(sc)) == NULL) { 991 xs->error = XS_DRIVER_STUFFUP; 992 scsi_done(xs); 993 AMI_UNLOCK_AMI(sc, lock); 994 return (COMPLETE); 995 } 996 997 if ((ccb1 = ami_get_ccb(sc)) == NULL) { 998 ami_put_ccb(ccb); 999 xs->error = XS_DRIVER_STUFFUP; 1000 scsi_done(xs); 1001 AMI_UNLOCK_AMI(sc, lock); 1002 return (COMPLETE); 1003 } 1004 1005 ccb->ccb_xs = xs; 1006 ccb->ccb_ccb1 = ccb1; 1007 ccb->ccb_len = xs->datalen; 1008 ccb->ccb_data = xs->data; 1009 1010 ps = (struct ami_passthrough *)ccb1->ccb_cmd; 1011 ps->apt_param = AMI_PTPARAM(AMI_TIMEOUT_6,1,0); 1012 ps->apt_channel = channel; 1013 ps->apt_target = target; 1014 bcopy(xs->cmd, ps->apt_cdb, AMI_MAX_CDB); 1015 ps->apt_ncdb = xs->cmdlen; 1016 ps->apt_nsense = AMI_MAX_SENSE; 1017 1018 cmd = ccb->ccb_cmd; 1019 cmd->acc_cmd = AMI_PASSTHRU; 1020 cmd->acc_passthru.apt_data = ccb1->ccb_cmdpa; 1021 1022 if ((error = ami_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)? 1023 BUS_DMA_NOWAIT : BUS_DMA_WAITOK), xs->flags & SCSI_POLL))) { 1024 1025 AMI_DPRINTF(AMI_D_CMD, ("failed %p ", xs)); 1026 if (xs->flags & SCSI_POLL) { 1027 xs->error = XS_TIMEOUT; 1028 AMI_UNLOCK_AMI(sc, lock); 1029 return (TRY_AGAIN_LATER); 1030 } else { 1031 xs->error = XS_DRIVER_STUFFUP; 1032 scsi_done(xs); 1033 AMI_UNLOCK_AMI(sc, lock); 1034 return (COMPLETE); 1035 } 1036 } 1037 1038 1039 if (xs->flags & SCSI_POLL) { 1040 scsi_done(xs); 1041 AMI_UNLOCK_AMI(sc, lock); 1042 return (COMPLETE); 1043 } 1044 1045 AMI_UNLOCK_AMI(sc, lock); 1046 return (SUCCESSFULLY_QUEUED); 1047 } 1048 1049 int 1050 ami_scsi_cmd(xs) 1051 struct scsi_xfer *xs; 1052 { 1053 struct scsi_link *link = xs->sc_link; 1054 struct ami_softc *sc = link->adapter_softc; 1055 struct ami_ccb *ccb; 1056 struct ami_iocmd *cmd; 1057 struct scsi_inquiry_data inq; 1058 struct scsi_sense_data sd; 1059 struct { 1060 struct scsi_mode_header hd; 1061 struct scsi_blk_desc bd; 1062 union scsi_disk_pages dp; 1063 } mpd; 1064 struct scsi_read_cap_data rcd; 1065 u_int8_t target = link->target; 1066 u_int32_t blockno, blockcnt; 1067 struct scsi_rw *rw; 1068 struct scsi_rw_big *rwb; 1069 int error, flags; 1070 ami_lock_t lock; 1071 1072 AMI_DPRINTF(AMI_D_CMD, ("ami_scsi_cmd ")); 1073 1074 lock = AMI_LOCK_AMI(sc); 1075 if (target >= sc->sc_nunits || !sc->sc_hdr[target].hd_present || 1076 link->lun != 0) { 1077 AMI_DPRINTF(AMI_D_CMD, ("no taget %d ", target)); 1078 /* XXX should be XS_SENSE and sense filled out */ 1079 xs->error = XS_DRIVER_STUFFUP; 1080 xs->flags |= ITSDONE; 1081 scsi_done(xs); 1082 AMI_UNLOCK_AMI(sc, lock); 1083 return (COMPLETE); 1084 } 1085 1086 error = 0; 1087 xs->error = XS_NOERROR; 1088 1089 switch (xs->cmd->opcode) { 1090 case TEST_UNIT_READY: 1091 case START_STOP: 1092 #if 0 1093 case VERIFY: 1094 #endif 1095 AMI_DPRINTF(AMI_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode, 1096 target)); 1097 break; 1098 1099 case REQUEST_SENSE: 1100 AMI_DPRINTF(AMI_D_CMD, ("REQUEST SENSE tgt %d ", target)); 1101 bzero(&sd, sizeof sd); 1102 sd.error_code = 0x70; 1103 sd.segment = 0; 1104 sd.flags = SKEY_NO_SENSE; 1105 *(u_int32_t*)sd.info = htole32(0); 1106 sd.extra_len = 0; 1107 ami_copy_internal_data(xs, &sd, sizeof sd); 1108 break; 1109 1110 case INQUIRY: 1111 AMI_DPRINTF(AMI_D_CMD, ("INQUIRY tgt %d ", target)); 1112 bzero(&inq, sizeof inq); 1113 inq.device = T_DIRECT; 1114 inq.dev_qual2 = 0; 1115 inq.version = 2; 1116 inq.response_format = 2; 1117 inq.additional_length = 32; 1118 strlcpy(inq.vendor, "AMI ", sizeof inq.vendor); 1119 snprintf(inq.product, sizeof inq.product, "Host drive #%02d", 1120 target); 1121 strlcpy(inq.revision, " ", sizeof inq.revision); 1122 ami_copy_internal_data(xs, &inq, sizeof inq); 1123 break; 1124 1125 case MODE_SENSE: 1126 AMI_DPRINTF(AMI_D_CMD, ("MODE SENSE tgt %d ", target)); 1127 1128 bzero(&mpd, sizeof mpd); 1129 switch (((struct scsi_mode_sense *)xs->cmd)->page) { 1130 case 4: 1131 /* scsi_disk.h says this should be 0x16 */ 1132 mpd.dp.rigid_geometry.pg_length = 0x16; 1133 mpd.hd.data_length = sizeof mpd.hd + sizeof mpd.bd + 1134 mpd.dp.rigid_geometry.pg_length; 1135 mpd.hd.blk_desc_len = sizeof mpd.bd; 1136 1137 mpd.hd.dev_spec = 0; /* writeprotect ? XXX */ 1138 _lto3b(AMI_SECTOR_SIZE, mpd.bd.blklen); 1139 mpd.dp.rigid_geometry.pg_code = 4; 1140 _lto3b(sc->sc_hdr[target].hd_size / 1141 sc->sc_hdr[target].hd_heads / 1142 sc->sc_hdr[target].hd_secs, 1143 mpd.dp.rigid_geometry.ncyl); 1144 mpd.dp.rigid_geometry.nheads = 1145 sc->sc_hdr[target].hd_heads; 1146 ami_copy_internal_data(xs, (u_int8_t *)&mpd, 1147 sizeof mpd); 1148 break; 1149 1150 default: 1151 printf("%s: mode sense page %d not simulated\n", 1152 sc->sc_dev.dv_xname, 1153 ((struct scsi_mode_sense *)xs->cmd)->page); 1154 xs->error = XS_DRIVER_STUFFUP; 1155 } 1156 break; 1157 1158 case READ_CAPACITY: 1159 AMI_DPRINTF(AMI_D_CMD, ("READ CAPACITY tgt %d ", target)); 1160 bzero(&rcd, sizeof rcd); 1161 _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr); 1162 _lto4b(AMI_SECTOR_SIZE, rcd.length); 1163 ami_copy_internal_data(xs, &rcd, sizeof rcd); 1164 break; 1165 1166 case PREVENT_ALLOW: 1167 AMI_DPRINTF(AMI_D_CMD, ("PREVENT/ALLOW ")); 1168 AMI_UNLOCK_AMI(sc, lock); 1169 return (COMPLETE); 1170 1171 case SYNCHRONIZE_CACHE: 1172 AMI_DPRINTF(AMI_D_CMD, ("SYNCHRONIZE CACHE ")); 1173 error++; 1174 case READ_COMMAND: 1175 if (!error) { 1176 AMI_DPRINTF(AMI_D_CMD, ("READ ")); 1177 error++; 1178 } 1179 case READ_BIG: 1180 if (!error) { 1181 AMI_DPRINTF(AMI_D_CMD, ("READ BIG ")); 1182 error++; 1183 } 1184 case WRITE_COMMAND: 1185 if (!error) { 1186 AMI_DPRINTF(AMI_D_CMD, ("WRITE ")); 1187 error++; 1188 } 1189 case WRITE_BIG: 1190 if (!error) { 1191 AMI_DPRINTF(AMI_D_CMD, ("WRITE BIG ")); 1192 error++; 1193 } 1194 1195 flags = xs->flags; 1196 if (xs->cmd->opcode != SYNCHRONIZE_CACHE) { 1197 /* A read or write operation. */ 1198 if (xs->cmdlen == 6) { 1199 rw = (struct scsi_rw *)xs->cmd; 1200 blockno = _3btol(rw->addr) & 1201 (SRW_TOPADDR << 16 | 0xffff); 1202 blockcnt = rw->length ? rw->length : 0x100; 1203 } else { 1204 rwb = (struct scsi_rw_big *)xs->cmd; 1205 blockno = _4btol(rwb->addr); 1206 blockcnt = _2btol(rwb->length); 1207 /* TODO: reflect DPO & FUA flags */ 1208 if (xs->cmd->opcode == WRITE_BIG && 1209 rwb->byte2 & 0x18) 1210 flags |= 0; 1211 } 1212 if (blockno >= sc->sc_hdr[target].hd_size || 1213 blockno + blockcnt > sc->sc_hdr[target].hd_size) { 1214 printf("%s: out of bounds %u-%u >= %u\n", 1215 sc->sc_dev.dv_xname, blockno, blockcnt, 1216 sc->sc_hdr[target].hd_size); 1217 xs->error = XS_DRIVER_STUFFUP; 1218 scsi_done(xs); 1219 AMI_UNLOCK_AMI(sc, lock); 1220 return (COMPLETE); 1221 } 1222 } 1223 1224 if ((ccb = ami_get_ccb(sc)) == NULL) { 1225 AMI_DPRINTF(AMI_D_CMD, ("no more ccbs ")); 1226 xs->error = XS_DRIVER_STUFFUP; 1227 scsi_done(xs); 1228 AMI_UNLOCK_AMI(sc, lock); 1229 __asm __volatile(".globl _bpamiccb\n_bpamiccb:"); 1230 return (COMPLETE); 1231 } 1232 1233 ccb->ccb_xs = xs; 1234 ccb->ccb_ccb1 = NULL; 1235 ccb->ccb_len = xs->datalen; 1236 ccb->ccb_data = xs->data; 1237 cmd = ccb->ccb_cmd; 1238 cmd->acc_mbox.amb_nsect = htole16(blockcnt); 1239 cmd->acc_mbox.amb_lba = htole32(blockno); 1240 cmd->acc_mbox.amb_ldn = target; 1241 cmd->acc_mbox.amb_data = 0; 1242 1243 switch (xs->cmd->opcode) { 1244 case SYNCHRONIZE_CACHE: 1245 cmd->acc_cmd = AMI_FLUSH; 1246 if (xs->timeout < 30000) 1247 xs->timeout = 30000; /* at least 30sec */ 1248 break; 1249 case READ_COMMAND: case READ_BIG: 1250 cmd->acc_cmd = AMI_READ; 1251 break; 1252 case WRITE_COMMAND: case WRITE_BIG: 1253 cmd->acc_cmd = AMI_WRITE; 1254 break; 1255 } 1256 1257 if ((error = ami_cmd(ccb, ((flags & SCSI_NOSLEEP)? 1258 BUS_DMA_NOWAIT : BUS_DMA_WAITOK), flags & SCSI_POLL))) { 1259 1260 AMI_DPRINTF(AMI_D_CMD, ("failed %p ", xs)); 1261 __asm __volatile(".globl _bpamifail\n_bpamifail:"); 1262 if (flags & SCSI_POLL) { 1263 xs->error = XS_TIMEOUT; 1264 AMI_UNLOCK_AMI(sc, lock); 1265 return (TRY_AGAIN_LATER); 1266 } else { 1267 xs->error = XS_DRIVER_STUFFUP; 1268 scsi_done(xs); 1269 AMI_UNLOCK_AMI(sc, lock); 1270 return (COMPLETE); 1271 } 1272 } 1273 1274 AMI_UNLOCK_AMI(sc, lock); 1275 if (flags & SCSI_POLL) 1276 return (COMPLETE); 1277 else 1278 return (SUCCESSFULLY_QUEUED); 1279 1280 default: 1281 AMI_DPRINTF(AMI_D_CMD, ("unknown opc %d ", xs->cmd->opcode)); 1282 xs->error = XS_DRIVER_STUFFUP; 1283 } 1284 1285 AMI_UNLOCK_AMI(sc, lock); 1286 return (COMPLETE); 1287 } 1288 1289 int 1290 ami_intr(v) 1291 void *v; 1292 { 1293 struct ami_softc *sc = v; 1294 struct ami_iocmd mbox; 1295 int i, s, rv = 0; 1296 ami_lock_t lock; 1297 1298 if (TAILQ_EMPTY(&sc->sc_ccbq)) 1299 return (0); 1300 1301 AMI_DPRINTF(AMI_D_INTR, ("intr ")); 1302 1303 lock = AMI_LOCK_AMI(sc); 1304 s = splimp(); /* XXX need to do this to mask timeouts */ 1305 while ((sc->sc_done)(sc, &mbox)) { 1306 AMI_DPRINTF(AMI_D_CMD, ("got#%d ", mbox.acc_nstat)); 1307 for (i = 0; i < mbox.acc_nstat; i++ ) { 1308 int ready = mbox.acc_cmplidl[i]; 1309 1310 AMI_DPRINTF(AMI_D_CMD, ("ready=%d ", ready)); 1311 1312 if (!ami_done(sc, ready)) 1313 rv |= 1; 1314 } 1315 } 1316 1317 #ifdef AMI_POLLING 1318 if (!TAILQ_EMPTY(&sc->sc_ccbq) && !timeout_pending(&sc->sc_poll_tmo)) { 1319 AMI_DPRINTF(AMI_D_INTR, ("tmo ")); 1320 timeout_add(&sc->sc_poll_tmo, 2); 1321 } 1322 #endif 1323 1324 splx(s); 1325 AMI_UNLOCK_AMI(sc, lock); 1326 AMI_DPRINTF(AMI_D_INTR, ("exit ")); 1327 return (rv); 1328 } 1329