1 /* $NetBSD: mly.c,v 1.30 2006/10/12 01:31:32 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 2000, 2001 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp 66 */ 67 68 /* 69 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware. 70 * 71 * TODO: 72 * 73 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ. 74 * o Handle FC and multiple LUNs. 75 * o Fix mmbox usage. 76 * o Fix transfer speed fudge. 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.30 2006/10/12 01:31:32 christos Exp $"); 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/device.h> 85 #include <sys/kernel.h> 86 #include <sys/queue.h> 87 #include <sys/buf.h> 88 #include <sys/endian.h> 89 #include <sys/conf.h> 90 #include <sys/malloc.h> 91 #include <sys/ioctl.h> 92 #include <sys/scsiio.h> 93 #include <sys/kthread.h> 94 95 #include <uvm/uvm_extern.h> 96 97 #include <machine/bus.h> 98 99 #include <dev/scsipi/scsi_all.h> 100 #include <dev/scsipi/scsipi_all.h> 101 #include <dev/scsipi/scsiconf.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 107 #include <dev/pci/mlyreg.h> 108 #include <dev/pci/mlyio.h> 109 #include <dev/pci/mlyvar.h> 110 #include <dev/pci/mly_tables.h> 111 112 static void mly_attach(struct device *, struct device *, void *); 113 static int mly_match(struct device *, struct cfdata *, void *); 114 static const struct mly_ident *mly_find_ident(struct pci_attach_args *); 115 static int mly_fwhandshake(struct mly_softc *); 116 static int mly_flush(struct mly_softc *); 117 static int mly_intr(void *); 118 static void mly_shutdown(void *); 119 120 static int mly_alloc_ccbs(struct mly_softc *); 121 static void mly_check_event(struct mly_softc *); 122 static void mly_complete_event(struct mly_softc *, struct mly_ccb *); 123 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *); 124 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *, 125 caddr_t *, bus_addr_t *, bus_dma_segment_t *); 126 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t, 127 caddr_t, bus_dma_segment_t *); 128 static int mly_enable_mmbox(struct mly_softc *); 129 static void mly_fetch_event(struct mly_softc *); 130 static int mly_get_controllerinfo(struct mly_softc *); 131 static int mly_get_eventstatus(struct mly_softc *); 132 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *, 133 void **, size_t, void *, size_t *); 134 static void mly_padstr(char *, const char *, int); 135 static void mly_process_event(struct mly_softc *, struct mly_event *); 136 static void mly_release_ccbs(struct mly_softc *); 137 static int mly_scan_btl(struct mly_softc *, int, int); 138 static void mly_scan_channel(struct mly_softc *, int); 139 static void mly_thread(void *); 140 static void mly_thread_create(void *); 141 142 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **); 143 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *); 144 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *); 145 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *); 146 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *); 147 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int); 148 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *); 149 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *); 150 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int); 151 152 static void mly_get_xfer_mode(struct mly_softc *, int, 153 struct scsipi_xfer_mode *); 154 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *); 155 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, caddr_t, 156 int, struct proc *); 157 static void mly_scsipi_minphys(struct buf *); 158 static void mly_scsipi_request(struct scsipi_channel *, 159 scsipi_adapter_req_t, void *); 160 161 static int mly_user_command(struct mly_softc *, struct mly_user_command *); 162 static int mly_user_health(struct mly_softc *, struct mly_user_health *); 163 164 extern struct cfdriver mly_cd; 165 166 CFATTACH_DECL(mly, sizeof(struct mly_softc), 167 mly_match, mly_attach, NULL, NULL); 168 169 dev_type_open(mlyopen); 170 dev_type_close(mlyclose); 171 dev_type_ioctl(mlyioctl); 172 173 const struct cdevsw mly_cdevsw = { 174 mlyopen, mlyclose, noread, nowrite, mlyioctl, 175 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER, 176 }; 177 178 static struct mly_ident { 179 u_short vendor; 180 u_short product; 181 u_short subvendor; 182 u_short subproduct; 183 int hwif; 184 const char *desc; 185 } const mly_ident[] = { 186 { 187 PCI_VENDOR_MYLEX, 188 PCI_PRODUCT_MYLEX_EXTREMERAID, 189 PCI_VENDOR_MYLEX, 190 0x0040, 191 MLY_HWIF_STRONGARM, 192 "eXtremeRAID 2000" 193 }, 194 { 195 PCI_VENDOR_MYLEX, 196 PCI_PRODUCT_MYLEX_EXTREMERAID, 197 PCI_VENDOR_MYLEX, 198 0x0030, 199 MLY_HWIF_STRONGARM, 200 "eXtremeRAID 3000" 201 }, 202 { 203 PCI_VENDOR_MYLEX, 204 PCI_PRODUCT_MYLEX_ACCELERAID, 205 PCI_VENDOR_MYLEX, 206 0x0050, 207 MLY_HWIF_I960RX, 208 "AcceleRAID 352" 209 }, 210 { 211 PCI_VENDOR_MYLEX, 212 PCI_PRODUCT_MYLEX_ACCELERAID, 213 PCI_VENDOR_MYLEX, 214 0x0052, 215 MLY_HWIF_I960RX, 216 "AcceleRAID 170" 217 }, 218 { 219 PCI_VENDOR_MYLEX, 220 PCI_PRODUCT_MYLEX_ACCELERAID, 221 PCI_VENDOR_MYLEX, 222 0x0054, 223 MLY_HWIF_I960RX, 224 "AcceleRAID 160" 225 }, 226 }; 227 228 static void *mly_sdh; 229 230 /* 231 * Try to find a `mly_ident' entry corresponding to this board. 232 */ 233 static const struct mly_ident * 234 mly_find_ident(struct pci_attach_args *pa) 235 { 236 const struct mly_ident *mpi, *maxmpi; 237 pcireg_t reg; 238 239 mpi = mly_ident; 240 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]); 241 242 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 243 return (NULL); 244 245 for (; mpi < maxmpi; mpi++) { 246 if (PCI_VENDOR(pa->pa_id) != mpi->vendor || 247 PCI_PRODUCT(pa->pa_id) != mpi->product) 248 continue; 249 250 if (mpi->subvendor == 0x0000) 251 return (mpi); 252 253 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 254 255 if (PCI_VENDOR(reg) == mpi->subvendor && 256 PCI_PRODUCT(reg) == mpi->subproduct) 257 return (mpi); 258 } 259 260 return (NULL); 261 } 262 263 /* 264 * Match a supported board. 265 */ 266 static int 267 mly_match(struct device *parent __unused, struct cfdata *cfdata __unused, 268 void *aux) 269 { 270 271 return (mly_find_ident(aux) != NULL); 272 } 273 274 /* 275 * Attach a supported board. 276 */ 277 static void 278 mly_attach(struct device *parent __unused, struct device *self, void *aux) 279 { 280 struct pci_attach_args *pa; 281 struct mly_softc *mly; 282 struct mly_ioctl_getcontrollerinfo *mi; 283 const struct mly_ident *ident; 284 pci_chipset_tag_t pc; 285 pci_intr_handle_t ih; 286 bus_space_handle_t memh, ioh; 287 bus_space_tag_t memt, iot; 288 pcireg_t reg; 289 const char *intrstr; 290 int ior, memr, i, rv, state; 291 struct scsipi_adapter *adapt; 292 struct scsipi_channel *chan; 293 294 mly = (struct mly_softc *)self; 295 pa = aux; 296 pc = pa->pa_pc; 297 ident = mly_find_ident(pa); 298 state = 0; 299 300 mly->mly_dmat = pa->pa_dmat; 301 mly->mly_hwif = ident->hwif; 302 303 printf(": Mylex %s\n", ident->desc); 304 305 /* 306 * Map the PCI register window. 307 */ 308 memr = -1; 309 ior = -1; 310 311 for (i = 0x10; i <= 0x14; i += 4) { 312 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 313 314 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 315 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0) 316 ior = i; 317 } else { 318 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0) 319 memr = i; 320 } 321 } 322 323 if (memr != -1) 324 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0, 325 &memt, &memh, NULL, NULL)) 326 memr = -1; 327 if (ior != -1) 328 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0, 329 &iot, &ioh, NULL, NULL)) 330 ior = -1; 331 332 if (memr != -1) { 333 mly->mly_iot = memt; 334 mly->mly_ioh = memh; 335 } else if (ior != -1) { 336 mly->mly_iot = iot; 337 mly->mly_ioh = ioh; 338 } else { 339 printf("%s: can't map i/o or memory space\n", self->dv_xname); 340 return; 341 } 342 343 /* 344 * Enable the device. 345 */ 346 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 347 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 348 reg | PCI_COMMAND_MASTER_ENABLE); 349 350 /* 351 * Map and establish the interrupt. 352 */ 353 if (pci_intr_map(pa, &ih)) { 354 printf("%s: can't map interrupt\n", self->dv_xname); 355 return; 356 } 357 intrstr = pci_intr_string(pc, ih); 358 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly); 359 if (mly->mly_ih == NULL) { 360 printf("%s: can't establish interrupt", self->dv_xname); 361 if (intrstr != NULL) 362 printf(" at %s", intrstr); 363 printf("\n"); 364 return; 365 } 366 367 if (intrstr != NULL) 368 printf("%s: interrupting at %s\n", mly->mly_dv.dv_xname, 369 intrstr); 370 371 /* 372 * Take care of interface-specific tasks. 373 */ 374 switch (mly->mly_hwif) { 375 case MLY_HWIF_I960RX: 376 mly->mly_doorbell_true = 0x00; 377 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX; 378 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; 379 mly->mly_idbr = MLY_I960RX_IDBR; 380 mly->mly_odbr = MLY_I960RX_ODBR; 381 mly->mly_error_status = MLY_I960RX_ERROR_STATUS; 382 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; 383 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; 384 break; 385 386 case MLY_HWIF_STRONGARM: 387 mly->mly_doorbell_true = 0xff; 388 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; 389 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; 390 mly->mly_idbr = MLY_STRONGARM_IDBR; 391 mly->mly_odbr = MLY_STRONGARM_ODBR; 392 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS; 393 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; 394 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; 395 break; 396 } 397 398 /* 399 * Allocate and map the scatter/gather lists. 400 */ 401 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 402 &mly->mly_sg_dmamap, (caddr_t *)&mly->mly_sg, 403 &mly->mly_sg_busaddr, &mly->mly_sg_seg); 404 if (rv) { 405 printf("%s: unable to allocate S/G maps\n", 406 mly->mly_dv.dv_xname); 407 goto bad; 408 } 409 state++; 410 411 /* 412 * Allocate and map the memory mailbox. 413 */ 414 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox), 415 &mly->mly_mmbox_dmamap, (caddr_t *)&mly->mly_mmbox, 416 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg); 417 if (rv) { 418 printf("%s: unable to allocate mailboxes\n", 419 mly->mly_dv.dv_xname); 420 goto bad; 421 } 422 state++; 423 424 /* 425 * Initialise per-controller queues. 426 */ 427 SLIST_INIT(&mly->mly_ccb_free); 428 SIMPLEQ_INIT(&mly->mly_ccb_queue); 429 430 /* 431 * Disable interrupts before we start talking to the controller. 432 */ 433 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE); 434 435 /* 436 * Wait for the controller to come ready, handshaking with the 437 * firmware if required. This is typically only necessary on 438 * platforms where the controller BIOS does not run. 439 */ 440 if (mly_fwhandshake(mly)) { 441 printf("%s: unable to bring controller online\n", 442 mly->mly_dv.dv_xname); 443 goto bad; 444 } 445 446 /* 447 * Allocate initial command buffers, obtain controller feature 448 * information, and then reallocate command buffers, since we'll 449 * know how many we want. 450 */ 451 if (mly_alloc_ccbs(mly)) { 452 printf("%s: unable to allocate CCBs\n", 453 mly->mly_dv.dv_xname); 454 goto bad; 455 } 456 state++; 457 if (mly_get_controllerinfo(mly)) { 458 printf("%s: unable to retrieve controller info\n", 459 mly->mly_dv.dv_xname); 460 goto bad; 461 } 462 mly_release_ccbs(mly); 463 if (mly_alloc_ccbs(mly)) { 464 printf("%s: unable to allocate CCBs\n", 465 mly->mly_dv.dv_xname); 466 state--; 467 goto bad; 468 } 469 470 /* 471 * Get the current event counter for health purposes, populate the 472 * initial health status buffer. 473 */ 474 if (mly_get_eventstatus(mly)) { 475 printf("%s: unable to retrieve event status\n", 476 mly->mly_dv.dv_xname); 477 goto bad; 478 } 479 480 /* 481 * Enable memory-mailbox mode. 482 */ 483 if (mly_enable_mmbox(mly)) { 484 printf("%s: unable to enable memory mailbox\n", 485 mly->mly_dv.dv_xname); 486 goto bad; 487 } 488 489 /* 490 * Print a little information about the controller. 491 */ 492 mi = mly->mly_controllerinfo; 493 494 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d " 495 "(%02d%02d%02d%02d), %dMB RAM\n", mly->mly_dv.dv_xname, 496 mi->physical_channels_present, 497 (mi->physical_channels_present) > 1 ? "s" : "", 498 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, 499 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 500 le16toh(mi->memory_size)); 501 502 /* 503 * Register our `shutdownhook'. 504 */ 505 if (mly_sdh == NULL) 506 shutdownhook_establish(mly_shutdown, NULL); 507 508 /* 509 * Clear any previous BTL information. For each bus that scsipi 510 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve 511 * all BTL info at that point. 512 */ 513 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl)); 514 515 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present + 516 mly->mly_controllerinfo->virtual_channels_present; 517 518 /* 519 * Attach to scsipi. 520 */ 521 adapt = &mly->mly_adapt; 522 memset(adapt, 0, sizeof(*adapt)); 523 adapt->adapt_dev = &mly->mly_dv; 524 adapt->adapt_nchannels = mly->mly_nchans; 525 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV; 526 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV; 527 adapt->adapt_request = mly_scsipi_request; 528 adapt->adapt_minphys = mly_scsipi_minphys; 529 adapt->adapt_ioctl = mly_scsipi_ioctl; 530 531 for (i = 0; i < mly->mly_nchans; i++) { 532 chan = &mly->mly_chans[i]; 533 memset(chan, 0, sizeof(*chan)); 534 chan->chan_adapter = adapt; 535 chan->chan_bustype = &scsi_bustype; 536 chan->chan_channel = i; 537 chan->chan_ntargets = MLY_MAX_TARGETS; 538 chan->chan_nluns = MLY_MAX_LUNS; 539 chan->chan_id = mly->mly_controllerparam->initiator_id; 540 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 541 config_found(&mly->mly_dv, chan, scsiprint); 542 } 543 544 /* 545 * Now enable interrupts... 546 */ 547 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE); 548 549 /* 550 * Finally, create our monitoring thread. 551 */ 552 kthread_create(mly_thread_create, mly); 553 554 mly->mly_state |= MLY_STATE_INITOK; 555 return; 556 557 bad: 558 if (state > 2) 559 mly_release_ccbs(mly); 560 if (state > 1) 561 mly_dmamem_free(mly, sizeof(struct mly_mmbox), 562 mly->mly_mmbox_dmamap, (caddr_t)mly->mly_mmbox, 563 &mly->mly_mmbox_seg); 564 if (state > 0) 565 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 566 mly->mly_sg_dmamap, (caddr_t)mly->mly_sg, 567 &mly->mly_sg_seg); 568 } 569 570 /* 571 * Scan all possible devices on the specified channel. 572 */ 573 static void 574 mly_scan_channel(struct mly_softc *mly, int bus) 575 { 576 int s, target; 577 578 for (target = 0; target < MLY_MAX_TARGETS; target++) { 579 s = splbio(); 580 if (!mly_scan_btl(mly, bus, target)) { 581 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan", 582 0); 583 } 584 splx(s); 585 } 586 } 587 588 /* 589 * Shut down all configured `mly' devices. 590 */ 591 static void 592 mly_shutdown(void *cookie __unused) 593 { 594 struct mly_softc *mly; 595 int i; 596 597 for (i = 0; i < mly_cd.cd_ndevs; i++) { 598 if ((mly = device_lookup(&mly_cd, i)) == NULL) 599 continue; 600 601 if (mly_flush(mly)) 602 printf("%s: unable to flush cache\n", 603 mly->mly_dv.dv_xname); 604 } 605 } 606 607 /* 608 * Fill in the mly_controllerinfo and mly_controllerparam fields in the 609 * softc. 610 */ 611 static int 612 mly_get_controllerinfo(struct mly_softc *mly) 613 { 614 struct mly_cmd_ioctl mci; 615 int rv; 616 617 /* 618 * Build the getcontrollerinfo ioctl and send it. 619 */ 620 memset(&mci, 0, sizeof(mci)); 621 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 622 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo, 623 sizeof(*mly->mly_controllerinfo), NULL, NULL); 624 if (rv != 0) 625 return (rv); 626 627 /* 628 * Build the getcontrollerparameter ioctl and send it. 629 */ 630 memset(&mci, 0, sizeof(mci)); 631 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 632 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam, 633 sizeof(*mly->mly_controllerparam), NULL, NULL); 634 635 return (rv); 636 } 637 638 /* 639 * Rescan a device, possibly as a consequence of getting an event which 640 * suggests that it may have changed. Must be called with interrupts 641 * blocked. 642 */ 643 static int 644 mly_scan_btl(struct mly_softc *mly, int bus, int target) 645 { 646 struct mly_ccb *mc; 647 struct mly_cmd_ioctl *mci; 648 int rv; 649 650 if (target == mly->mly_controllerparam->initiator_id) { 651 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED; 652 return (EIO); 653 } 654 655 /* Don't re-scan if a scan is already in progress. */ 656 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0) 657 return (EBUSY); 658 659 /* Get a command. */ 660 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 661 return (rv); 662 663 /* Set up the data buffer. */ 664 mc->mc_data = malloc(sizeof(union mly_devinfo), 665 M_DEVBUF, M_NOWAIT|M_ZERO); 666 667 mc->mc_flags |= MLY_CCB_DATAIN; 668 mc->mc_complete = mly_complete_rescan; 669 670 /* 671 * Build the ioctl. 672 */ 673 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 674 mci->opcode = MDACMD_IOCTL; 675 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 676 memset(&mci->param, 0, sizeof(mci->param)); 677 678 if (MLY_BUS_IS_VIRTUAL(mly, bus)) { 679 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid); 680 mci->data_size = htole32(mc->mc_length); 681 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 682 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)), 683 mci->addr); 684 } else { 685 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid); 686 mci->data_size = htole32(mc->mc_length); 687 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 688 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr); 689 } 690 691 /* 692 * Dispatch the command. 693 */ 694 if ((rv = mly_ccb_map(mly, mc)) != 0) { 695 free(mc->mc_data, M_DEVBUF); 696 mly_ccb_free(mly, mc); 697 return(rv); 698 } 699 700 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING; 701 mly_ccb_enqueue(mly, mc); 702 return (0); 703 } 704 705 /* 706 * Handle the completion of a rescan operation. 707 */ 708 static void 709 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc) 710 { 711 struct mly_ioctl_getlogdevinfovalid *ldi; 712 struct mly_ioctl_getphysdevinfovalid *pdi; 713 struct mly_cmd_ioctl *mci; 714 struct mly_btl btl, *btlp; 715 struct scsipi_xfer_mode xm; 716 int bus, target, rescan; 717 u_int tmp; 718 719 mly_ccb_unmap(mly, mc); 720 721 /* 722 * Recover the bus and target from the command. We need these even 723 * in the case where we don't have a useful response. 724 */ 725 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 726 tmp = _3ltol(mci->addr); 727 rescan = 0; 728 729 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { 730 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp)); 731 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp)); 732 } else { 733 bus = MLY_PHYADDR_CHANNEL(tmp); 734 target = MLY_PHYADDR_TARGET(tmp); 735 } 736 737 btlp = &mly->mly_btl[bus][target]; 738 739 /* The default result is 'no device'. */ 740 memset(&btl, 0, sizeof(btl)); 741 btl.mb_flags = MLY_BTL_PROTECTED; 742 743 /* If the rescan completed OK, we have possibly-new BTL data. */ 744 if (mc->mc_status != 0) 745 goto out; 746 747 if (mc->mc_length == sizeof(*ldi)) { 748 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 749 tmp = le32toh(ldi->logical_device_number); 750 751 if (MLY_LOGDEV_BUS(mly, tmp) != bus || 752 MLY_LOGDEV_TARGET(mly, tmp) != target) { 753 #ifdef MLYDEBUG 754 printf("%s: WARNING: BTL rescan (logical) for %d:%d " 755 "returned data for %d:%d instead\n", 756 mly->mly_dv.dv_xname, bus, target, 757 MLY_LOGDEV_BUS(mly, tmp), 758 MLY_LOGDEV_TARGET(mly, tmp)); 759 #endif 760 goto out; 761 } 762 763 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING; 764 btl.mb_type = ldi->raid_level; 765 btl.mb_state = ldi->state; 766 } else if (mc->mc_length == sizeof(*pdi)) { 767 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 768 769 if (pdi->channel != bus || pdi->target != target) { 770 #ifdef MLYDEBUG 771 printf("%s: WARNING: BTL rescan (physical) for %d:%d " 772 " returned data for %d:%d instead\n", 773 mly->mly_dv.dv_xname, 774 bus, target, pdi->channel, pdi->target); 775 #endif 776 goto out; 777 } 778 779 btl.mb_flags = MLY_BTL_PHYSICAL; 780 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; 781 btl.mb_state = pdi->state; 782 btl.mb_speed = pdi->speed; 783 btl.mb_width = pdi->width; 784 785 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 786 btl.mb_flags |= MLY_BTL_PROTECTED; 787 if (pdi->command_tags != 0) 788 btl.mb_flags |= MLY_BTL_TQING; 789 } else { 790 printf("%s: BTL rescan result invalid\n", mly->mly_dv.dv_xname); 791 goto out; 792 } 793 794 /* Decide whether we need to rescan the device. */ 795 if (btl.mb_flags != btlp->mb_flags || 796 btl.mb_speed != btlp->mb_speed || 797 btl.mb_width != btlp->mb_width) 798 rescan = 1; 799 800 out: 801 *btlp = btl; 802 803 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) { 804 xm.xm_target = target; 805 mly_get_xfer_mode(mly, bus, &xm); 806 /* XXX SCSI mid-layer rescan goes here. */ 807 } 808 809 /* Wake anybody waiting on the device to be rescanned. */ 810 wakeup(btlp); 811 812 free(mc->mc_data, M_DEVBUF); 813 mly_ccb_free(mly, mc); 814 } 815 816 /* 817 * Get the current health status and set the 'next event' counter to suit. 818 */ 819 static int 820 mly_get_eventstatus(struct mly_softc *mly) 821 { 822 struct mly_cmd_ioctl mci; 823 struct mly_health_status *mh; 824 int rv; 825 826 /* Build the gethealthstatus ioctl and send it. */ 827 memset(&mci, 0, sizeof(mci)); 828 mh = NULL; 829 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 830 831 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL); 832 if (rv) 833 return (rv); 834 835 /* Get the event counter. */ 836 mly->mly_event_change = le32toh(mh->change_counter); 837 mly->mly_event_waiting = le32toh(mh->next_event); 838 mly->mly_event_counter = le32toh(mh->next_event); 839 840 /* Save the health status into the memory mailbox */ 841 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh)); 842 843 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 844 offsetof(struct mly_mmbox, mmm_health), 845 sizeof(mly->mly_mmbox->mmm_health), 846 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 847 848 free(mh, M_DEVBUF); 849 return (0); 850 } 851 852 /* 853 * Enable memory mailbox mode. 854 */ 855 static int 856 mly_enable_mmbox(struct mly_softc *mly) 857 { 858 struct mly_cmd_ioctl mci; 859 u_int8_t *sp; 860 u_int64_t tmp; 861 int rv; 862 863 /* Build the ioctl and send it. */ 864 memset(&mci, 0, sizeof(mci)); 865 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 866 867 /* Set buffer addresses. */ 868 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 869 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp); 870 871 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 872 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp); 873 874 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 875 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp); 876 877 /* Set buffer sizes - abuse of data_size field is revolting. */ 878 sp = (u_int8_t *)&mci.data_size; 879 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10; 880 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10; 881 mci.param.setmemorymailbox.health_buffer_size = 882 sizeof(union mly_health_region) >> 10; 883 884 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL); 885 if (rv) 886 return (rv); 887 888 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE; 889 return (0); 890 } 891 892 /* 893 * Flush all pending I/O from the controller. 894 */ 895 static int 896 mly_flush(struct mly_softc *mly) 897 { 898 struct mly_cmd_ioctl mci; 899 900 /* Build the ioctl */ 901 memset(&mci, 0, sizeof(mci)); 902 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 903 mci.param.deviceoperation.operation_device = 904 MLY_OPDEVICE_PHYSICAL_CONTROLLER; 905 906 /* Pass it off to the controller */ 907 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL)); 908 } 909 910 /* 911 * Perform an ioctl command. 912 * 913 * If (data) is not NULL, the command requires data transfer to the 914 * controller. If (*data) is NULL the command requires data transfer from 915 * the controller, and we will allocate a buffer for it. 916 */ 917 static int 918 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data, 919 size_t datasize, void *sense_buffer, 920 size_t *sense_length) 921 { 922 struct mly_ccb *mc; 923 struct mly_cmd_ioctl *mci; 924 u_int8_t status; 925 int rv; 926 927 mc = NULL; 928 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 929 goto bad; 930 931 /* 932 * Copy the ioctl structure, but save some important fields and then 933 * fixup. 934 */ 935 mci = &mc->mc_packet->ioctl; 936 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address); 937 ioctl->maximum_sense_size = mci->maximum_sense_size; 938 *mci = *ioctl; 939 mci->opcode = MDACMD_IOCTL; 940 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 941 942 /* Handle the data buffer. */ 943 if (data != NULL) { 944 if (*data == NULL) { 945 /* Allocate data buffer */ 946 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT); 947 mc->mc_flags |= MLY_CCB_DATAIN; 948 } else { 949 mc->mc_data = *data; 950 mc->mc_flags |= MLY_CCB_DATAOUT; 951 } 952 mc->mc_length = datasize; 953 mc->mc_packet->generic.data_size = htole32(datasize); 954 } 955 956 /* Run the command. */ 957 if (datasize > 0) 958 if ((rv = mly_ccb_map(mly, mc)) != 0) 959 goto bad; 960 rv = mly_ccb_poll(mly, mc, 30000); 961 if (datasize > 0) 962 mly_ccb_unmap(mly, mc); 963 if (rv != 0) 964 goto bad; 965 966 /* Clean up and return any data. */ 967 status = mc->mc_status; 968 969 if (status != 0) 970 printf("mly_ioctl: command status %d\n", status); 971 972 if (mc->mc_sense > 0 && sense_buffer != NULL) { 973 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense); 974 *sense_length = mc->mc_sense; 975 goto bad; 976 } 977 978 /* Should we return a data pointer? */ 979 if (data != NULL && *data == NULL) 980 *data = mc->mc_data; 981 982 /* Command completed OK. */ 983 rv = (status != 0 ? EIO : 0); 984 985 bad: 986 if (mc != NULL) { 987 /* Do we need to free a data buffer we allocated? */ 988 if (rv != 0 && mc->mc_data != NULL && 989 (data == NULL || *data == NULL)) 990 free(mc->mc_data, M_DEVBUF); 991 mly_ccb_free(mly, mc); 992 } 993 994 return (rv); 995 } 996 997 /* 998 * Check for event(s) outstanding in the controller. 999 */ 1000 static void 1001 mly_check_event(struct mly_softc *mly) 1002 { 1003 1004 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1005 offsetof(struct mly_mmbox, mmm_health), 1006 sizeof(mly->mly_mmbox->mmm_health), 1007 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1008 1009 /* 1010 * The controller may have updated the health status information, so 1011 * check for it here. Note that the counters are all in host 1012 * memory, so this check is very cheap. Also note that we depend on 1013 * checking on completion 1014 */ 1015 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) != 1016 mly->mly_event_change) { 1017 mly->mly_event_change = 1018 le32toh(mly->mly_mmbox->mmm_health.status.change_counter); 1019 mly->mly_event_waiting = 1020 le32toh(mly->mly_mmbox->mmm_health.status.next_event); 1021 1022 /* Wake up anyone that might be interested in this. */ 1023 wakeup(&mly->mly_event_change); 1024 } 1025 1026 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1027 offsetof(struct mly_mmbox, mmm_health), 1028 sizeof(mly->mly_mmbox->mmm_health), 1029 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1030 1031 if (mly->mly_event_counter != mly->mly_event_waiting) 1032 mly_fetch_event(mly); 1033 } 1034 1035 /* 1036 * Fetch one event from the controller. If we fail due to resource 1037 * starvation, we'll be retried the next time a command completes. 1038 */ 1039 static void 1040 mly_fetch_event(struct mly_softc *mly) 1041 { 1042 struct mly_ccb *mc; 1043 struct mly_cmd_ioctl *mci; 1044 int s; 1045 u_int32_t event; 1046 1047 /* Get a command. */ 1048 if (mly_ccb_alloc(mly, &mc)) 1049 return; 1050 1051 /* Set up the data buffer. */ 1052 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, 1053 M_NOWAIT|M_ZERO); 1054 1055 mc->mc_length = sizeof(struct mly_event); 1056 mc->mc_flags |= MLY_CCB_DATAIN; 1057 mc->mc_complete = mly_complete_event; 1058 1059 /* 1060 * Get an event number to fetch. It's possible that we've raced 1061 * with another context for the last event, in which case there will 1062 * be no more events. 1063 */ 1064 s = splbio(); 1065 if (mly->mly_event_counter == mly->mly_event_waiting) { 1066 splx(s); 1067 free(mc->mc_data, M_DEVBUF); 1068 mly_ccb_free(mly, mc); 1069 return; 1070 } 1071 event = mly->mly_event_counter++; 1072 splx(s); 1073 1074 /* 1075 * Build the ioctl. 1076 * 1077 * At this point we are committed to sending this request, as it 1078 * will be the only one constructed for this particular event 1079 * number. 1080 */ 1081 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 1082 mci->opcode = MDACMD_IOCTL; 1083 mci->data_size = htole32(sizeof(struct mly_event)); 1084 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff), 1085 mci->addr); 1086 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 1087 mci->sub_ioctl = MDACIOCTL_GETEVENT; 1088 mci->param.getevent.sequence_number_low = htole16(event & 0xffff); 1089 1090 /* 1091 * Submit the command. 1092 */ 1093 if (mly_ccb_map(mly, mc) != 0) 1094 goto bad; 1095 mly_ccb_enqueue(mly, mc); 1096 return; 1097 1098 bad: 1099 printf("%s: couldn't fetch event %u\n", mly->mly_dv.dv_xname, event); 1100 free(mc->mc_data, M_DEVBUF); 1101 mly_ccb_free(mly, mc); 1102 } 1103 1104 /* 1105 * Handle the completion of an event poll. 1106 */ 1107 static void 1108 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc) 1109 { 1110 struct mly_event *me; 1111 1112 me = (struct mly_event *)mc->mc_data; 1113 mly_ccb_unmap(mly, mc); 1114 mly_ccb_free(mly, mc); 1115 1116 /* If the event was successfully fetched, process it. */ 1117 if (mc->mc_status == SCSI_OK) 1118 mly_process_event(mly, me); 1119 else 1120 printf("%s: unable to fetch event; status = 0x%x\n", 1121 mly->mly_dv.dv_xname, mc->mc_status); 1122 1123 free(me, M_DEVBUF); 1124 1125 /* Check for another event. */ 1126 mly_check_event(mly); 1127 } 1128 1129 /* 1130 * Process a controller event. Called with interrupts blocked (i.e., at 1131 * interrupt time). 1132 */ 1133 static void 1134 mly_process_event(struct mly_softc *mly, struct mly_event *me) 1135 { 1136 struct scsi_sense_data *ssd; 1137 int bus, target, event, class, action; 1138 const char *fp, *tp; 1139 1140 ssd = (struct scsi_sense_data *)&me->sense[0]; 1141 1142 /* 1143 * Errors can be reported using vendor-unique sense data. In this 1144 * case, the event code will be 0x1c (Request sense data present), 1145 * the sense key will be 0x09 (vendor specific), the MSB of the ASC 1146 * will be set, and the actual event code will be a 16-bit value 1147 * comprised of the ASCQ (low byte) and low seven bits of the ASC 1148 * (low seven bits of the high byte). 1149 */ 1150 if (le32toh(me->code) == 0x1c && 1151 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC && 1152 (ssd->asc & 0x80) != 0) { 1153 event = ((int)(ssd->asc & ~0x80) << 8) + 1154 ssd->ascq; 1155 } else 1156 event = le32toh(me->code); 1157 1158 /* Look up event, get codes. */ 1159 fp = mly_describe_code(mly_table_event, event); 1160 1161 /* Quiet event? */ 1162 class = fp[0]; 1163 #ifdef notyet 1164 if (isupper(class) && bootverbose) 1165 class = tolower(class); 1166 #endif 1167 1168 /* Get action code, text string. */ 1169 action = fp[1]; 1170 tp = fp + 3; 1171 1172 /* 1173 * Print some information about the event. 1174 * 1175 * This code uses a table derived from the corresponding portion of 1176 * the Linux driver, and thus the parser is very similar. 1177 */ 1178 switch (class) { 1179 case 'p': 1180 /* 1181 * Error on physical drive. 1182 */ 1183 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname, 1184 me->channel, me->target, tp); 1185 if (action == 'r') 1186 mly->mly_btl[me->channel][me->target].mb_flags |= 1187 MLY_BTL_RESCAN; 1188 break; 1189 1190 case 'l': 1191 case 'm': 1192 /* 1193 * Error on logical unit, or message about logical unit. 1194 */ 1195 bus = MLY_LOGDEV_BUS(mly, me->lun); 1196 target = MLY_LOGDEV_TARGET(mly, me->lun); 1197 printf("%s: logical device %d:%d %s\n", mly->mly_dv.dv_xname, 1198 bus, target, tp); 1199 if (action == 'r') 1200 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 1201 break; 1202 1203 case 's': 1204 /* 1205 * Report of sense data. 1206 */ 1207 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE || 1208 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) && 1209 ssd->asc == 0x04 && 1210 (ssd->ascq == 0x01 || 1211 ssd->ascq == 0x02)) { 1212 /* Ignore NO_SENSE or NOT_READY in one case */ 1213 break; 1214 } 1215 1216 /* 1217 * XXX Should translate this if SCSIVERBOSE. 1218 */ 1219 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname, 1220 me->channel, me->target, tp); 1221 printf("%s: sense key %d asc %02x ascq %02x\n", 1222 mly->mly_dv.dv_xname, SSD_SENSE_KEY(ssd->flags), 1223 ssd->asc, ssd->ascq); 1224 printf("%s: info %x%x%x%x csi %x%x%x%x\n", 1225 mly->mly_dv.dv_xname, ssd->info[0], ssd->info[1], 1226 ssd->info[2], ssd->info[3], ssd->csi[0], 1227 ssd->csi[1], ssd->csi[2], 1228 ssd->csi[3]); 1229 if (action == 'r') 1230 mly->mly_btl[me->channel][me->target].mb_flags |= 1231 MLY_BTL_RESCAN; 1232 break; 1233 1234 case 'e': 1235 printf("%s: ", mly->mly_dv.dv_xname); 1236 printf(tp, me->target, me->lun); 1237 break; 1238 1239 case 'c': 1240 printf("%s: controller %s\n", mly->mly_dv.dv_xname, tp); 1241 break; 1242 1243 case '?': 1244 printf("%s: %s - %d\n", mly->mly_dv.dv_xname, tp, event); 1245 break; 1246 1247 default: 1248 /* Probably a 'noisy' event being ignored. */ 1249 break; 1250 } 1251 } 1252 1253 /* 1254 * Create the monitoring thread. Called after the standard kernel threads 1255 * have been created. 1256 */ 1257 static void 1258 mly_thread_create(void *cookie) 1259 { 1260 struct mly_softc *mly; 1261 int rv; 1262 1263 mly = cookie; 1264 1265 rv = kthread_create1(mly_thread, mly, &mly->mly_thread, "%s", 1266 mly->mly_dv.dv_xname); 1267 if (rv != 0) 1268 printf("%s: unable to create thread (%d)\n", 1269 mly->mly_dv.dv_xname, rv); 1270 } 1271 1272 /* 1273 * Perform periodic activities. 1274 */ 1275 static void 1276 mly_thread(void *cookie) 1277 { 1278 struct mly_softc *mly; 1279 struct mly_btl *btl; 1280 int s, bus, target, done; 1281 1282 mly = (struct mly_softc *)cookie; 1283 1284 for (;;) { 1285 /* Check for new events. */ 1286 mly_check_event(mly); 1287 1288 /* Re-scan up to 1 device. */ 1289 s = splbio(); 1290 done = 0; 1291 for (bus = 0; bus < mly->mly_nchans && !done; bus++) { 1292 for (target = 0; target < MLY_MAX_TARGETS; target++) { 1293 /* Perform device rescan? */ 1294 btl = &mly->mly_btl[bus][target]; 1295 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) { 1296 btl->mb_flags ^= MLY_BTL_RESCAN; 1297 mly_scan_btl(mly, bus, target); 1298 done = 1; 1299 break; 1300 } 1301 } 1302 } 1303 splx(s); 1304 1305 /* Sleep for N seconds. */ 1306 tsleep(mly_thread, PWAIT, "mlyzzz", 1307 hz * MLY_PERIODIC_INTERVAL); 1308 } 1309 } 1310 1311 /* 1312 * Submit a command to the controller and poll on completion. Return 1313 * non-zero on timeout. 1314 */ 1315 static int 1316 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1317 { 1318 int rv; 1319 1320 if ((rv = mly_ccb_submit(mly, mc)) != 0) 1321 return (rv); 1322 1323 for (timo *= 10; timo != 0; timo--) { 1324 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) 1325 break; 1326 mly_intr(mly); 1327 DELAY(100); 1328 } 1329 1330 return (timo == 0); 1331 } 1332 1333 /* 1334 * Submit a command to the controller and sleep on completion. Return 1335 * non-zero on timeout. 1336 */ 1337 static int 1338 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1339 { 1340 int rv, s; 1341 1342 mly_ccb_enqueue(mly, mc); 1343 1344 s = splbio(); 1345 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) { 1346 splx(s); 1347 return (0); 1348 } 1349 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000); 1350 splx(s); 1351 1352 return (rv); 1353 } 1354 1355 /* 1356 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1357 * the order that they were enqueued and try to submit their command blocks 1358 * to the controller for execution. 1359 */ 1360 void 1361 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc) 1362 { 1363 int s; 1364 1365 s = splbio(); 1366 1367 if (mc != NULL) 1368 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq); 1369 1370 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) { 1371 if (mly_ccb_submit(mly, mc)) 1372 break; 1373 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq); 1374 } 1375 1376 splx(s); 1377 } 1378 1379 /* 1380 * Deliver a command to the controller. 1381 */ 1382 static int 1383 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc) 1384 { 1385 union mly_cmd_packet *pkt; 1386 int s, off; 1387 1388 mc->mc_packet->generic.command_id = htole16(mc->mc_slot); 1389 1390 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1391 mc->mc_packetphys - mly->mly_pkt_busaddr, 1392 sizeof(union mly_cmd_packet), 1393 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1394 1395 s = splbio(); 1396 1397 /* 1398 * Do we have to use the hardware mailbox? 1399 */ 1400 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) { 1401 /* 1402 * Check to see if the controller is ready for us. 1403 */ 1404 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) { 1405 splx(s); 1406 return (EBUSY); 1407 } 1408 1409 /* 1410 * It's ready, send the command. 1411 */ 1412 mly_outl(mly, mly->mly_cmd_mailbox, 1413 (u_int64_t)mc->mc_packetphys & 0xffffffff); 1414 mly_outl(mly, mly->mly_cmd_mailbox + 4, 1415 (u_int64_t)mc->mc_packetphys >> 32); 1416 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT); 1417 } else { 1418 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx]; 1419 off = (caddr_t)pkt - (caddr_t)mly->mly_mmbox; 1420 1421 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1422 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1423 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1424 1425 /* Check to see if the next index is free yet. */ 1426 if (pkt->mmbox.flag != 0) { 1427 splx(s); 1428 return (EBUSY); 1429 } 1430 1431 /* Copy in new command */ 1432 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data, 1433 sizeof(pkt->mmbox.data)); 1434 1435 /* Copy flag last. */ 1436 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 1437 1438 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1439 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1440 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1441 1442 /* Signal controller and update index. */ 1443 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT); 1444 mly->mly_mmbox_cmd_idx = 1445 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS; 1446 } 1447 1448 splx(s); 1449 return (0); 1450 } 1451 1452 /* 1453 * Pick up completed commands from the controller and handle accordingly. 1454 */ 1455 int 1456 mly_intr(void *cookie) 1457 { 1458 struct mly_ccb *mc; 1459 union mly_status_packet *sp; 1460 u_int16_t slot; 1461 int forus, off; 1462 struct mly_softc *mly; 1463 1464 mly = cookie; 1465 forus = 0; 1466 1467 /* 1468 * Pick up hardware-mailbox commands. 1469 */ 1470 if (mly_odbr_true(mly, MLY_HM_STSREADY)) { 1471 slot = mly_inw(mly, mly->mly_status_mailbox); 1472 1473 if (slot < MLY_SLOT_MAX) { 1474 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1475 mc->mc_status = 1476 mly_inb(mly, mly->mly_status_mailbox + 2); 1477 mc->mc_sense = 1478 mly_inb(mly, mly->mly_status_mailbox + 3); 1479 mc->mc_resid = 1480 mly_inl(mly, mly->mly_status_mailbox + 4); 1481 1482 mly_ccb_complete(mly, mc); 1483 } else { 1484 /* Slot 0xffff may mean "extremely bogus command". */ 1485 printf("%s: got HM completion for illegal slot %u\n", 1486 mly->mly_dv.dv_xname, slot); 1487 } 1488 1489 /* Unconditionally acknowledge status. */ 1490 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY); 1491 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 1492 forus = 1; 1493 } 1494 1495 /* 1496 * Pick up memory-mailbox commands. 1497 */ 1498 if (mly_odbr_true(mly, MLY_AM_STSREADY)) { 1499 for (;;) { 1500 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx]; 1501 off = (caddr_t)sp - (caddr_t)mly->mly_mmbox; 1502 1503 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1504 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1505 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1506 1507 /* Check for more status. */ 1508 if (sp->mmbox.flag == 0) 1509 break; 1510 1511 /* Get slot number. */ 1512 slot = le16toh(sp->status.command_id); 1513 if (slot < MLY_SLOT_MAX) { 1514 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1515 mc->mc_status = sp->status.status; 1516 mc->mc_sense = sp->status.sense_length; 1517 mc->mc_resid = le32toh(sp->status.residue); 1518 mly_ccb_complete(mly, mc); 1519 } else { 1520 /* 1521 * Slot 0xffff may mean "extremely bogus 1522 * command". 1523 */ 1524 printf("%s: got AM completion for illegal " 1525 "slot %u at %d\n", mly->mly_dv.dv_xname, 1526 slot, mly->mly_mmbox_sts_idx); 1527 } 1528 1529 /* Clear and move to next index. */ 1530 sp->mmbox.flag = 0; 1531 mly->mly_mmbox_sts_idx = 1532 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS; 1533 } 1534 1535 /* Acknowledge that we have collected status value(s). */ 1536 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY); 1537 forus = 1; 1538 } 1539 1540 /* 1541 * Run the queue. 1542 */ 1543 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue)) 1544 mly_ccb_enqueue(mly, NULL); 1545 1546 return (forus); 1547 } 1548 1549 /* 1550 * Process completed commands 1551 */ 1552 static void 1553 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc) 1554 { 1555 void (*complete)(struct mly_softc *, struct mly_ccb *); 1556 1557 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1558 mc->mc_packetphys - mly->mly_pkt_busaddr, 1559 sizeof(union mly_cmd_packet), 1560 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1561 1562 complete = mc->mc_complete; 1563 mc->mc_flags |= MLY_CCB_COMPLETE; 1564 1565 /* 1566 * Call completion handler or wake up sleeping consumer. 1567 */ 1568 if (complete != NULL) 1569 (*complete)(mly, mc); 1570 else 1571 wakeup(mc); 1572 } 1573 1574 /* 1575 * Allocate a command. 1576 */ 1577 int 1578 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp) 1579 { 1580 struct mly_ccb *mc; 1581 int s; 1582 1583 s = splbio(); 1584 mc = SLIST_FIRST(&mly->mly_ccb_free); 1585 if (mc != NULL) 1586 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist); 1587 splx(s); 1588 1589 *mcp = mc; 1590 return (mc == NULL ? EAGAIN : 0); 1591 } 1592 1593 /* 1594 * Release a command back to the freelist. 1595 */ 1596 void 1597 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc) 1598 { 1599 int s; 1600 1601 /* 1602 * Fill in parts of the command that may cause confusion if a 1603 * consumer doesn't when we are later allocated. 1604 */ 1605 mc->mc_data = NULL; 1606 mc->mc_flags = 0; 1607 mc->mc_complete = NULL; 1608 mc->mc_private = NULL; 1609 mc->mc_packet->generic.command_control = 0; 1610 1611 /* 1612 * By default, we set up to overwrite the command packet with sense 1613 * information. 1614 */ 1615 mc->mc_packet->generic.sense_buffer_address = 1616 htole64(mc->mc_packetphys); 1617 mc->mc_packet->generic.maximum_sense_size = 1618 sizeof(union mly_cmd_packet); 1619 1620 s = splbio(); 1621 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist); 1622 splx(s); 1623 } 1624 1625 /* 1626 * Allocate and initialize command and packet structures. 1627 * 1628 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our 1629 * allocation to that number. If we don't yet know how many commands the 1630 * controller supports, allocate a very small set (suitable for initialization 1631 * purposes only). 1632 */ 1633 static int 1634 mly_alloc_ccbs(struct mly_softc *mly) 1635 { 1636 struct mly_ccb *mc; 1637 int i, rv; 1638 1639 if (mly->mly_controllerinfo == NULL) 1640 mly->mly_ncmds = MLY_CCBS_RESV; 1641 else { 1642 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands); 1643 mly->mly_ncmds = min(MLY_MAX_CCBS, i); 1644 } 1645 1646 /* 1647 * Allocate enough space for all the command packets in one chunk 1648 * and map them permanently into controller-visible space. 1649 */ 1650 rv = mly_dmamem_alloc(mly, 1651 mly->mly_ncmds * sizeof(union mly_cmd_packet), 1652 &mly->mly_pkt_dmamap, (caddr_t *)&mly->mly_pkt, 1653 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg); 1654 if (rv) 1655 return (rv); 1656 1657 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds, 1658 M_DEVBUF, M_NOWAIT|M_ZERO); 1659 1660 for (i = 0; i < mly->mly_ncmds; i++) { 1661 mc = mly->mly_ccbs + i; 1662 mc->mc_slot = MLY_SLOT_START + i; 1663 mc->mc_packet = mly->mly_pkt + i; 1664 mc->mc_packetphys = mly->mly_pkt_busaddr + 1665 (i * sizeof(union mly_cmd_packet)); 1666 1667 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER, 1668 MLY_MAX_SEGS, MLY_MAX_XFER, 0, 1669 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1670 &mc->mc_datamap); 1671 if (rv) { 1672 mly_release_ccbs(mly); 1673 return (rv); 1674 } 1675 1676 mly_ccb_free(mly, mc); 1677 } 1678 1679 return (0); 1680 } 1681 1682 /* 1683 * Free all the storage held by commands. 1684 * 1685 * Must be called with all commands on the free list. 1686 */ 1687 static void 1688 mly_release_ccbs(struct mly_softc *mly) 1689 { 1690 struct mly_ccb *mc; 1691 1692 /* Throw away command buffer DMA maps. */ 1693 while (mly_ccb_alloc(mly, &mc) == 0) 1694 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap); 1695 1696 /* Release CCB storage. */ 1697 free(mly->mly_ccbs, M_DEVBUF); 1698 1699 /* Release the packet storage. */ 1700 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet), 1701 mly->mly_pkt_dmamap, (caddr_t)mly->mly_pkt, &mly->mly_pkt_seg); 1702 } 1703 1704 /* 1705 * Map a command into controller-visible space. 1706 */ 1707 static int 1708 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc) 1709 { 1710 struct mly_cmd_generic *gen; 1711 struct mly_sg_entry *sg; 1712 bus_dma_segment_t *ds; 1713 int flg, nseg, rv; 1714 1715 #ifdef DIAGNOSTIC 1716 /* Don't map more than once. */ 1717 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0) 1718 panic("mly_ccb_map: already mapped"); 1719 mc->mc_flags |= MLY_CCB_MAPPED; 1720 1721 /* Does the command have a data buffer? */ 1722 if (mc->mc_data == NULL) 1723 panic("mly_ccb_map: no data buffer"); 1724 #endif 1725 1726 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data, 1727 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1728 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ? 1729 BUS_DMA_READ : BUS_DMA_WRITE)); 1730 if (rv != 0) 1731 return (rv); 1732 1733 gen = &mc->mc_packet->generic; 1734 1735 /* 1736 * Can we use the transfer structure directly? 1737 */ 1738 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) { 1739 mc->mc_sgoff = -1; 1740 sg = &gen->transfer.direct.sg[0]; 1741 } else { 1742 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) * 1743 MLY_MAX_SEGS; 1744 sg = mly->mly_sg + mc->mc_sgoff; 1745 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE; 1746 gen->transfer.indirect.entries[0] = htole16(nseg); 1747 gen->transfer.indirect.table_physaddr[0] = 1748 htole64(mly->mly_sg_busaddr + 1749 (mc->mc_sgoff * sizeof(struct mly_sg_entry))); 1750 } 1751 1752 /* 1753 * Fill the S/G table. 1754 */ 1755 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) { 1756 sg->physaddr = htole64(ds->ds_addr); 1757 sg->length = htole64(ds->ds_len); 1758 } 1759 1760 /* 1761 * Sync up the data map. 1762 */ 1763 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1764 flg = BUS_DMASYNC_PREREAD; 1765 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ { 1766 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION; 1767 flg = BUS_DMASYNC_PREWRITE; 1768 } 1769 1770 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1771 1772 /* 1773 * Sync up the chained S/G table, if we're using one. 1774 */ 1775 if (mc->mc_sgoff == -1) 1776 return (0); 1777 1778 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1779 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE); 1780 1781 return (0); 1782 } 1783 1784 /* 1785 * Unmap a command from controller-visible space. 1786 */ 1787 static void 1788 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc) 1789 { 1790 int flg; 1791 1792 #ifdef DIAGNOSTIC 1793 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0) 1794 panic("mly_ccb_unmap: not mapped"); 1795 mc->mc_flags &= ~MLY_CCB_MAPPED; 1796 #endif 1797 1798 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1799 flg = BUS_DMASYNC_POSTREAD; 1800 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ 1801 flg = BUS_DMASYNC_POSTWRITE; 1802 1803 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1804 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap); 1805 1806 if (mc->mc_sgoff == -1) 1807 return; 1808 1809 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1810 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE); 1811 } 1812 1813 /* 1814 * Adjust the size of each I/O before it passes to the SCSI layer. 1815 */ 1816 static void 1817 mly_scsipi_minphys(struct buf *bp) 1818 { 1819 1820 if (bp->b_bcount > MLY_MAX_XFER) 1821 bp->b_bcount = MLY_MAX_XFER; 1822 minphys(bp); 1823 } 1824 1825 /* 1826 * Start a SCSI command. 1827 */ 1828 static void 1829 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1830 void *arg) 1831 { 1832 struct mly_ccb *mc; 1833 struct mly_cmd_scsi_small *ss; 1834 struct scsipi_xfer *xs; 1835 struct scsipi_periph *periph; 1836 struct mly_softc *mly; 1837 struct mly_btl *btl; 1838 int s, tmp; 1839 1840 mly = (void *)chan->chan_adapter->adapt_dev; 1841 1842 switch (req) { 1843 case ADAPTER_REQ_RUN_XFER: 1844 xs = arg; 1845 periph = xs->xs_periph; 1846 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target]; 1847 s = splbio(); 1848 tmp = btl->mb_flags; 1849 splx(s); 1850 1851 /* 1852 * Check for I/O attempt to a protected or non-existant 1853 * device. 1854 */ 1855 if ((tmp & MLY_BTL_PROTECTED) != 0) { 1856 xs->error = XS_SELTIMEOUT; 1857 scsipi_done(xs); 1858 break; 1859 } 1860 1861 #ifdef DIAGNOSTIC 1862 /* XXX Increase if/when we support large SCSI commands. */ 1863 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) { 1864 printf("%s: cmd too large\n", mly->mly_dv.dv_xname); 1865 xs->error = XS_DRIVER_STUFFUP; 1866 scsipi_done(xs); 1867 break; 1868 } 1869 #endif 1870 1871 if (mly_ccb_alloc(mly, &mc)) { 1872 xs->error = XS_RESOURCE_SHORTAGE; 1873 scsipi_done(xs); 1874 break; 1875 } 1876 1877 /* Build the command. */ 1878 mc->mc_data = xs->data; 1879 mc->mc_length = xs->datalen; 1880 mc->mc_complete = mly_scsipi_complete; 1881 mc->mc_private = xs; 1882 1883 /* Build the packet for the controller. */ 1884 ss = &mc->mc_packet->scsi_small; 1885 ss->opcode = MDACMD_SCSI; 1886 #ifdef notdef 1887 /* 1888 * XXX FreeBSD does this, but it doesn't fix anything, 1889 * XXX and appears potentially harmful. 1890 */ 1891 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT; 1892 #endif 1893 1894 ss->data_size = htole32(xs->datalen); 1895 _lto3l(MLY_PHYADDR(0, chan->chan_channel, 1896 periph->periph_target, periph->periph_lun), ss->addr); 1897 1898 if (xs->timeout < 60 * 1000) 1899 ss->timeout = xs->timeout / 1000 | 1900 MLY_TIMEOUT_SECONDS; 1901 else if (xs->timeout < 60 * 60 * 1000) 1902 ss->timeout = xs->timeout / (60 * 1000) | 1903 MLY_TIMEOUT_MINUTES; 1904 else 1905 ss->timeout = xs->timeout / (60 * 60 * 1000) | 1906 MLY_TIMEOUT_HOURS; 1907 1908 ss->maximum_sense_size = sizeof(xs->sense); 1909 ss->cdb_length = xs->cmdlen; 1910 memcpy(ss->cdb, xs->cmd, xs->cmdlen); 1911 1912 if (mc->mc_length != 0) { 1913 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0) 1914 mc->mc_flags |= MLY_CCB_DATAOUT; 1915 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */ 1916 mc->mc_flags |= MLY_CCB_DATAIN; 1917 1918 if (mly_ccb_map(mly, mc) != 0) { 1919 xs->error = XS_DRIVER_STUFFUP; 1920 mly_ccb_free(mly, mc); 1921 scsipi_done(xs); 1922 break; 1923 } 1924 } 1925 1926 /* 1927 * Give the command to the controller. 1928 */ 1929 if ((xs->xs_control & XS_CTL_POLL) != 0) { 1930 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) { 1931 xs->error = XS_REQUEUE; 1932 if (mc->mc_length != 0) 1933 mly_ccb_unmap(mly, mc); 1934 mly_ccb_free(mly, mc); 1935 scsipi_done(xs); 1936 } 1937 } else 1938 mly_ccb_enqueue(mly, mc); 1939 1940 break; 1941 1942 case ADAPTER_REQ_GROW_RESOURCES: 1943 /* 1944 * Not supported. 1945 */ 1946 break; 1947 1948 case ADAPTER_REQ_SET_XFER_MODE: 1949 /* 1950 * We can't change the transfer mode, but at least let 1951 * scsipi know what the adapter has negotiated. 1952 */ 1953 mly_get_xfer_mode(mly, chan->chan_channel, arg); 1954 break; 1955 } 1956 } 1957 1958 /* 1959 * Handle completion of a SCSI command. 1960 */ 1961 static void 1962 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc) 1963 { 1964 struct scsipi_xfer *xs; 1965 struct scsipi_channel *chan; 1966 struct scsipi_inquiry_data *inq; 1967 struct mly_btl *btl; 1968 int target, sl, s; 1969 const char *p; 1970 1971 xs = mc->mc_private; 1972 xs->status = mc->mc_status; 1973 1974 /* 1975 * XXX The `resid' value as returned by the controller appears to be 1976 * bogus, so we always set it to zero. Is it perhaps the transfer 1977 * count? 1978 */ 1979 xs->resid = 0; /* mc->mc_resid; */ 1980 1981 if (mc->mc_length != 0) 1982 mly_ccb_unmap(mly, mc); 1983 1984 switch (mc->mc_status) { 1985 case SCSI_OK: 1986 /* 1987 * In order to report logical device type and status, we 1988 * overwrite the result of the INQUIRY command to logical 1989 * devices. 1990 */ 1991 if (xs->cmd->opcode == INQUIRY) { 1992 chan = xs->xs_periph->periph_channel; 1993 target = xs->xs_periph->periph_target; 1994 btl = &mly->mly_btl[chan->chan_channel][target]; 1995 1996 s = splbio(); 1997 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) { 1998 inq = (struct scsipi_inquiry_data *)xs->data; 1999 mly_padstr(inq->vendor, "MYLEX", 8); 2000 p = mly_describe_code(mly_table_device_type, 2001 btl->mb_type); 2002 mly_padstr(inq->product, p, 16); 2003 p = mly_describe_code(mly_table_device_state, 2004 btl->mb_state); 2005 mly_padstr(inq->revision, p, 4); 2006 } 2007 splx(s); 2008 } 2009 2010 xs->error = XS_NOERROR; 2011 break; 2012 2013 case SCSI_CHECK: 2014 sl = mc->mc_sense; 2015 if (sl > sizeof(xs->sense.scsi_sense)) 2016 sl = sizeof(xs->sense.scsi_sense); 2017 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl); 2018 xs->error = XS_SENSE; 2019 break; 2020 2021 case SCSI_BUSY: 2022 case SCSI_QUEUE_FULL: 2023 xs->error = XS_BUSY; 2024 break; 2025 2026 default: 2027 printf("%s: unknown SCSI status 0x%x\n", 2028 mly->mly_dv.dv_xname, xs->status); 2029 xs->error = XS_DRIVER_STUFFUP; 2030 break; 2031 } 2032 2033 mly_ccb_free(mly, mc); 2034 scsipi_done(xs); 2035 } 2036 2037 /* 2038 * Notify scsipi about a target's transfer mode. 2039 */ 2040 static void 2041 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm) 2042 { 2043 struct mly_btl *btl; 2044 int s; 2045 2046 btl = &mly->mly_btl[bus][xm->xm_target]; 2047 xm->xm_mode = 0; 2048 2049 s = splbio(); 2050 2051 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) { 2052 if (btl->mb_speed == 0) { 2053 xm->xm_period = 0; 2054 xm->xm_offset = 0; 2055 } else { 2056 xm->xm_period = 12; /* XXX */ 2057 xm->xm_offset = 8; /* XXX */ 2058 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */ 2059 } 2060 2061 switch (btl->mb_width) { 2062 case 32: 2063 xm->xm_mode = PERIPH_CAP_WIDE32; 2064 break; 2065 case 16: 2066 xm->xm_mode = PERIPH_CAP_WIDE16; 2067 break; 2068 default: 2069 xm->xm_mode = 0; 2070 break; 2071 } 2072 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ { 2073 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC; 2074 xm->xm_period = 12; 2075 xm->xm_offset = 8; 2076 } 2077 2078 if ((btl->mb_flags & MLY_BTL_TQING) != 0) 2079 xm->xm_mode |= PERIPH_CAP_TQING; 2080 2081 splx(s); 2082 2083 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm); 2084 } 2085 2086 /* 2087 * ioctl hook; used here only to initiate low-level rescans. 2088 */ 2089 static int 2090 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, caddr_t data __unused, 2091 int flag __unused, struct proc *p __unused) 2092 { 2093 struct mly_softc *mly; 2094 int rv; 2095 2096 mly = (struct mly_softc *)chan->chan_adapter->adapt_dev; 2097 2098 switch (cmd) { 2099 case SCBUSIOLLSCAN: 2100 mly_scan_channel(mly, chan->chan_channel); 2101 rv = 0; 2102 break; 2103 default: 2104 rv = ENOTTY; 2105 break; 2106 } 2107 2108 return (rv); 2109 } 2110 2111 /* 2112 * Handshake with the firmware while the card is being initialized. 2113 */ 2114 static int 2115 mly_fwhandshake(struct mly_softc *mly) 2116 { 2117 u_int8_t error, param0, param1; 2118 int spinup; 2119 2120 spinup = 0; 2121 2122 /* Set HM_STSACK and let the firmware initialize. */ 2123 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 2124 DELAY(1000); /* too short? */ 2125 2126 /* If HM_STSACK is still true, the controller is initializing. */ 2127 if (!mly_idbr_true(mly, MLY_HM_STSACK)) 2128 return (0); 2129 2130 printf("%s: controller initialization started\n", 2131 mly->mly_dv.dv_xname); 2132 2133 /* 2134 * Spin waiting for initialization to finish, or for a message to be 2135 * delivered. 2136 */ 2137 while (mly_idbr_true(mly, MLY_HM_STSACK)) { 2138 /* Check for a message */ 2139 if (!mly_error_valid(mly)) 2140 continue; 2141 2142 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY; 2143 param0 = mly_inb(mly, mly->mly_cmd_mailbox); 2144 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1); 2145 2146 switch (error) { 2147 case MLY_MSG_SPINUP: 2148 if (!spinup) { 2149 printf("%s: drive spinup in progress\n", 2150 mly->mly_dv.dv_xname); 2151 spinup = 1; 2152 } 2153 break; 2154 2155 case MLY_MSG_RACE_RECOVERY_FAIL: 2156 printf("%s: mirror race recovery failed - \n", 2157 mly->mly_dv.dv_xname); 2158 printf("%s: one or more drives offline\n", 2159 mly->mly_dv.dv_xname); 2160 break; 2161 2162 case MLY_MSG_RACE_IN_PROGRESS: 2163 printf("%s: mirror race recovery in progress\n", 2164 mly->mly_dv.dv_xname); 2165 break; 2166 2167 case MLY_MSG_RACE_ON_CRITICAL: 2168 printf("%s: mirror race recovery on critical drive\n", 2169 mly->mly_dv.dv_xname); 2170 break; 2171 2172 case MLY_MSG_PARITY_ERROR: 2173 printf("%s: FATAL MEMORY PARITY ERROR\n", 2174 mly->mly_dv.dv_xname); 2175 return (ENXIO); 2176 2177 default: 2178 printf("%s: unknown initialization code 0x%x\n", 2179 mly->mly_dv.dv_xname, error); 2180 break; 2181 } 2182 } 2183 2184 return (0); 2185 } 2186 2187 /* 2188 * Space-fill a character string 2189 */ 2190 static void 2191 mly_padstr(char *dst, const char *src, int len) 2192 { 2193 2194 while (len-- > 0) { 2195 if (*src != '\0') 2196 *dst++ = *src++; 2197 else 2198 *dst++ = ' '; 2199 } 2200 } 2201 2202 /* 2203 * Allocate DMA safe memory. 2204 */ 2205 static int 2206 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap, 2207 caddr_t *kva, bus_addr_t *paddr, bus_dma_segment_t *seg) 2208 { 2209 int rseg, rv, state; 2210 2211 state = 0; 2212 2213 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0, 2214 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2215 printf("%s: dmamem_alloc = %d\n", mly->mly_dv.dv_xname, rv); 2216 goto bad; 2217 } 2218 2219 state++; 2220 2221 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva, 2222 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2223 printf("%s: dmamem_map = %d\n", mly->mly_dv.dv_xname, rv); 2224 goto bad; 2225 } 2226 2227 state++; 2228 2229 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0, 2230 BUS_DMA_NOWAIT, dmamap)) != 0) { 2231 printf("%s: dmamap_create = %d\n", mly->mly_dv.dv_xname, rv); 2232 goto bad; 2233 } 2234 2235 state++; 2236 2237 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size, 2238 NULL, BUS_DMA_NOWAIT)) != 0) { 2239 printf("%s: dmamap_load = %d\n", mly->mly_dv.dv_xname, rv); 2240 goto bad; 2241 } 2242 2243 *paddr = (*dmamap)->dm_segs[0].ds_addr; 2244 memset(*kva, 0, size); 2245 return (0); 2246 2247 bad: 2248 if (state > 2) 2249 bus_dmamap_destroy(mly->mly_dmat, *dmamap); 2250 if (state > 1) 2251 bus_dmamem_unmap(mly->mly_dmat, *kva, size); 2252 if (state > 0) 2253 bus_dmamem_free(mly->mly_dmat, seg, 1); 2254 2255 return (rv); 2256 } 2257 2258 /* 2259 * Free DMA safe memory. 2260 */ 2261 static void 2262 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap, 2263 caddr_t kva, bus_dma_segment_t *seg) 2264 { 2265 2266 bus_dmamap_unload(mly->mly_dmat, dmamap); 2267 bus_dmamap_destroy(mly->mly_dmat, dmamap); 2268 bus_dmamem_unmap(mly->mly_dmat, kva, size); 2269 bus_dmamem_free(mly->mly_dmat, seg, 1); 2270 } 2271 2272 2273 /* 2274 * Accept an open operation on the control device. 2275 */ 2276 int 2277 mlyopen(dev_t dev, int flag __unused, int mode __unused, struct lwp *l __unused) 2278 { 2279 struct mly_softc *mly; 2280 2281 if ((mly = device_lookup(&mly_cd, minor(dev))) == NULL) 2282 return (ENXIO); 2283 if ((mly->mly_state & MLY_STATE_INITOK) == 0) 2284 return (ENXIO); 2285 if ((mly->mly_state & MLY_STATE_OPEN) != 0) 2286 return (EBUSY); 2287 2288 mly->mly_state |= MLY_STATE_OPEN; 2289 return (0); 2290 } 2291 2292 /* 2293 * Accept the last close on the control device. 2294 */ 2295 int 2296 mlyclose(dev_t dev, int flag __unused, int mode __unused, 2297 struct lwp *l __unused) 2298 { 2299 struct mly_softc *mly; 2300 2301 mly = device_lookup(&mly_cd, minor(dev)); 2302 mly->mly_state &= ~MLY_STATE_OPEN; 2303 return (0); 2304 } 2305 2306 /* 2307 * Handle control operations. 2308 */ 2309 int 2310 mlyioctl(dev_t dev, u_long cmd, caddr_t data, int flag __unused, 2311 struct lwp *l __unused) 2312 { 2313 struct mly_softc *mly; 2314 int rv; 2315 2316 mly = device_lookup(&mly_cd, minor(dev)); 2317 2318 switch (cmd) { 2319 case MLYIO_COMMAND: 2320 if (securelevel >= 2) 2321 rv = EPERM; 2322 else 2323 rv = mly_user_command(mly, (void *)data); 2324 break; 2325 case MLYIO_HEALTH: 2326 rv = mly_user_health(mly, (void *)data); 2327 break; 2328 default: 2329 rv = ENOTTY; 2330 break; 2331 } 2332 2333 return (rv); 2334 } 2335 2336 /* 2337 * Execute a command passed in from userspace. 2338 * 2339 * The control structure contains the actual command for the controller, as 2340 * well as the user-space data pointer and data size, and an optional sense 2341 * buffer size/pointer. On completion, the data size is adjusted to the 2342 * command residual, and the sense buffer size to the size of the returned 2343 * sense data. 2344 */ 2345 static int 2346 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc) 2347 { 2348 struct mly_ccb *mc; 2349 int rv, mapped; 2350 2351 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 2352 return (rv); 2353 2354 mapped = 0; 2355 mc->mc_data = NULL; 2356 2357 /* 2358 * Handle data size/direction. 2359 */ 2360 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) { 2361 if (mc->mc_length > MAXPHYS) { 2362 rv = EINVAL; 2363 goto out; 2364 } 2365 2366 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK); 2367 if (mc->mc_data == NULL) { 2368 rv = ENOMEM; 2369 goto out; 2370 } 2371 2372 if (uc->DataTransferLength > 0) { 2373 mc->mc_flags |= MLY_CCB_DATAIN; 2374 memset(mc->mc_data, 0, mc->mc_length); 2375 } 2376 2377 if (uc->DataTransferLength < 0) { 2378 mc->mc_flags |= MLY_CCB_DATAOUT; 2379 rv = copyin(uc->DataTransferBuffer, mc->mc_data, 2380 mc->mc_length); 2381 if (rv != 0) 2382 goto out; 2383 } 2384 2385 if ((rv = mly_ccb_map(mly, mc)) != 0) 2386 goto out; 2387 mapped = 1; 2388 } 2389 2390 /* Copy in the command and execute it. */ 2391 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox)); 2392 2393 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0) 2394 goto out; 2395 2396 /* Return the data to userspace. */ 2397 if (uc->DataTransferLength > 0) { 2398 rv = copyout(mc->mc_data, uc->DataTransferBuffer, 2399 mc->mc_length); 2400 if (rv != 0) 2401 goto out; 2402 } 2403 2404 /* Return the sense buffer to userspace. */ 2405 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) { 2406 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer, 2407 min(uc->RequestSenseLength, mc->mc_sense)); 2408 if (rv != 0) 2409 goto out; 2410 } 2411 2412 /* Return command results to userspace (caller will copy out). */ 2413 uc->DataTransferLength = mc->mc_resid; 2414 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 2415 uc->CommandStatus = mc->mc_status; 2416 rv = 0; 2417 2418 out: 2419 if (mapped) 2420 mly_ccb_unmap(mly, mc); 2421 if (mc->mc_data != NULL) 2422 free(mc->mc_data, M_DEVBUF); 2423 mly_ccb_free(mly, mc); 2424 2425 return (rv); 2426 } 2427 2428 /* 2429 * Return health status to userspace. If the health change index in the 2430 * user structure does not match that currently exported by the controller, 2431 * we return the current status immediately. Otherwise, we block until 2432 * either interrupted or new status is delivered. 2433 */ 2434 static int 2435 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh) 2436 { 2437 struct mly_health_status mh; 2438 int rv, s; 2439 2440 /* Fetch the current health status from userspace. */ 2441 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh)); 2442 if (rv != 0) 2443 return (rv); 2444 2445 /* spin waiting for a status update */ 2446 s = splbio(); 2447 if (mly->mly_event_change == mh.change_counter) 2448 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH, 2449 "mlyhealth", 0); 2450 splx(s); 2451 2452 if (rv == 0) { 2453 /* 2454 * Copy the controller's health status buffer out (there is 2455 * a race here if it changes again). 2456 */ 2457 rv = copyout(&mly->mly_mmbox->mmm_health.status, 2458 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); 2459 } 2460 2461 return (rv); 2462 } 2463