1 /* $NetBSD: mly.c,v 1.27 2006/05/14 21:45:00 elad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 2000, 2001 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp 66 */ 67 68 /* 69 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware. 70 * 71 * TODO: 72 * 73 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ. 74 * o Handle FC and multiple LUNs. 75 * o Fix mmbox usage. 76 * o Fix transfer speed fudge. 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.27 2006/05/14 21:45:00 elad Exp $"); 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/device.h> 85 #include <sys/kernel.h> 86 #include <sys/queue.h> 87 #include <sys/buf.h> 88 #include <sys/endian.h> 89 #include <sys/conf.h> 90 #include <sys/malloc.h> 91 #include <sys/ioctl.h> 92 #include <sys/scsiio.h> 93 #include <sys/kthread.h> 94 95 #include <uvm/uvm_extern.h> 96 97 #include <machine/bus.h> 98 99 #include <dev/scsipi/scsi_all.h> 100 #include <dev/scsipi/scsipi_all.h> 101 #include <dev/scsipi/scsiconf.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 107 #include <dev/pci/mlyreg.h> 108 #include <dev/pci/mlyio.h> 109 #include <dev/pci/mlyvar.h> 110 #include <dev/pci/mly_tables.h> 111 112 static void mly_attach(struct device *, struct device *, void *); 113 static int mly_match(struct device *, struct cfdata *, void *); 114 static const struct mly_ident *mly_find_ident(struct pci_attach_args *); 115 static int mly_fwhandshake(struct mly_softc *); 116 static int mly_flush(struct mly_softc *); 117 static int mly_intr(void *); 118 static void mly_shutdown(void *); 119 120 static int mly_alloc_ccbs(struct mly_softc *); 121 static void mly_check_event(struct mly_softc *); 122 static void mly_complete_event(struct mly_softc *, struct mly_ccb *); 123 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *); 124 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *, 125 caddr_t *, bus_addr_t *, bus_dma_segment_t *); 126 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t, 127 caddr_t, bus_dma_segment_t *); 128 static int mly_enable_mmbox(struct mly_softc *); 129 static void mly_fetch_event(struct mly_softc *); 130 static int mly_get_controllerinfo(struct mly_softc *); 131 static int mly_get_eventstatus(struct mly_softc *); 132 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *, 133 void **, size_t, void *, size_t *); 134 static void mly_padstr(char *, const char *, int); 135 static void mly_process_event(struct mly_softc *, struct mly_event *); 136 static void mly_release_ccbs(struct mly_softc *); 137 static int mly_scan_btl(struct mly_softc *, int, int); 138 static void mly_scan_channel(struct mly_softc *, int); 139 static void mly_thread(void *); 140 static void mly_thread_create(void *); 141 142 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **); 143 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *); 144 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *); 145 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *); 146 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *); 147 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int); 148 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *); 149 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *); 150 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int); 151 152 static void mly_get_xfer_mode(struct mly_softc *, int, 153 struct scsipi_xfer_mode *); 154 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *); 155 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, caddr_t, 156 int, struct proc *); 157 static void mly_scsipi_minphys(struct buf *); 158 static void mly_scsipi_request(struct scsipi_channel *, 159 scsipi_adapter_req_t, void *); 160 161 static int mly_user_command(struct mly_softc *, struct mly_user_command *); 162 static int mly_user_health(struct mly_softc *, struct mly_user_health *); 163 164 extern struct cfdriver mly_cd; 165 166 CFATTACH_DECL(mly, sizeof(struct mly_softc), 167 mly_match, mly_attach, NULL, NULL); 168 169 dev_type_open(mlyopen); 170 dev_type_close(mlyclose); 171 dev_type_ioctl(mlyioctl); 172 173 const struct cdevsw mly_cdevsw = { 174 mlyopen, mlyclose, noread, nowrite, mlyioctl, 175 nostop, notty, nopoll, nommap, nokqfilter, 176 }; 177 178 struct mly_ident { 179 u_short vendor; 180 u_short product; 181 u_short subvendor; 182 u_short subproduct; 183 int hwif; 184 const char *desc; 185 } static const mly_ident[] = { 186 { 187 PCI_VENDOR_MYLEX, 188 PCI_PRODUCT_MYLEX_EXTREMERAID, 189 PCI_VENDOR_MYLEX, 190 0x0040, 191 MLY_HWIF_STRONGARM, 192 "eXtremeRAID 2000" 193 }, 194 { 195 PCI_VENDOR_MYLEX, 196 PCI_PRODUCT_MYLEX_EXTREMERAID, 197 PCI_VENDOR_MYLEX, 198 0x0030, 199 MLY_HWIF_STRONGARM, 200 "eXtremeRAID 3000" 201 }, 202 { 203 PCI_VENDOR_MYLEX, 204 PCI_PRODUCT_MYLEX_ACCELERAID, 205 PCI_VENDOR_MYLEX, 206 0x0050, 207 MLY_HWIF_I960RX, 208 "AcceleRAID 352" 209 }, 210 { 211 PCI_VENDOR_MYLEX, 212 PCI_PRODUCT_MYLEX_ACCELERAID, 213 PCI_VENDOR_MYLEX, 214 0x0052, 215 MLY_HWIF_I960RX, 216 "AcceleRAID 170" 217 }, 218 { 219 PCI_VENDOR_MYLEX, 220 PCI_PRODUCT_MYLEX_ACCELERAID, 221 PCI_VENDOR_MYLEX, 222 0x0054, 223 MLY_HWIF_I960RX, 224 "AcceleRAID 160" 225 }, 226 }; 227 228 static void *mly_sdh; 229 230 /* 231 * Try to find a `mly_ident' entry corresponding to this board. 232 */ 233 static const struct mly_ident * 234 mly_find_ident(struct pci_attach_args *pa) 235 { 236 const struct mly_ident *mpi, *maxmpi; 237 pcireg_t reg; 238 239 mpi = mly_ident; 240 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]); 241 242 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 243 return (NULL); 244 245 for (; mpi < maxmpi; mpi++) { 246 if (PCI_VENDOR(pa->pa_id) != mpi->vendor || 247 PCI_PRODUCT(pa->pa_id) != mpi->product) 248 continue; 249 250 if (mpi->subvendor == 0x0000) 251 return (mpi); 252 253 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 254 255 if (PCI_VENDOR(reg) == mpi->subvendor && 256 PCI_PRODUCT(reg) == mpi->subproduct) 257 return (mpi); 258 } 259 260 return (NULL); 261 } 262 263 /* 264 * Match a supported board. 265 */ 266 static int 267 mly_match(struct device *parent, struct cfdata *cfdata, void *aux) 268 { 269 270 return (mly_find_ident(aux) != NULL); 271 } 272 273 /* 274 * Attach a supported board. 275 */ 276 static void 277 mly_attach(struct device *parent, struct device *self, void *aux) 278 { 279 struct pci_attach_args *pa; 280 struct mly_softc *mly; 281 struct mly_ioctl_getcontrollerinfo *mi; 282 const struct mly_ident *ident; 283 pci_chipset_tag_t pc; 284 pci_intr_handle_t ih; 285 bus_space_handle_t memh, ioh; 286 bus_space_tag_t memt, iot; 287 pcireg_t reg; 288 const char *intrstr; 289 int ior, memr, i, rv, state; 290 struct scsipi_adapter *adapt; 291 struct scsipi_channel *chan; 292 293 mly = (struct mly_softc *)self; 294 pa = aux; 295 pc = pa->pa_pc; 296 ident = mly_find_ident(pa); 297 state = 0; 298 299 mly->mly_dmat = pa->pa_dmat; 300 mly->mly_hwif = ident->hwif; 301 302 printf(": Mylex %s\n", ident->desc); 303 304 /* 305 * Map the PCI register window. 306 */ 307 memr = -1; 308 ior = -1; 309 310 for (i = 0x10; i <= 0x14; i += 4) { 311 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 312 313 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 314 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0) 315 ior = i; 316 } else { 317 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0) 318 memr = i; 319 } 320 } 321 322 if (memr != -1) 323 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0, 324 &memt, &memh, NULL, NULL)) 325 memr = -1; 326 if (ior != -1) 327 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0, 328 &iot, &ioh, NULL, NULL)) 329 ior = -1; 330 331 if (memr != -1) { 332 mly->mly_iot = memt; 333 mly->mly_ioh = memh; 334 } else if (ior != -1) { 335 mly->mly_iot = iot; 336 mly->mly_ioh = ioh; 337 } else { 338 printf("%s: can't map i/o or memory space\n", self->dv_xname); 339 return; 340 } 341 342 /* 343 * Enable the device. 344 */ 345 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 346 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 347 reg | PCI_COMMAND_MASTER_ENABLE); 348 349 /* 350 * Map and establish the interrupt. 351 */ 352 if (pci_intr_map(pa, &ih)) { 353 printf("%s: can't map interrupt\n", self->dv_xname); 354 return; 355 } 356 intrstr = pci_intr_string(pc, ih); 357 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly); 358 if (mly->mly_ih == NULL) { 359 printf("%s: can't establish interrupt", self->dv_xname); 360 if (intrstr != NULL) 361 printf(" at %s", intrstr); 362 printf("\n"); 363 return; 364 } 365 366 if (intrstr != NULL) 367 printf("%s: interrupting at %s\n", mly->mly_dv.dv_xname, 368 intrstr); 369 370 /* 371 * Take care of interface-specific tasks. 372 */ 373 switch (mly->mly_hwif) { 374 case MLY_HWIF_I960RX: 375 mly->mly_doorbell_true = 0x00; 376 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX; 377 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; 378 mly->mly_idbr = MLY_I960RX_IDBR; 379 mly->mly_odbr = MLY_I960RX_ODBR; 380 mly->mly_error_status = MLY_I960RX_ERROR_STATUS; 381 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; 382 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; 383 break; 384 385 case MLY_HWIF_STRONGARM: 386 mly->mly_doorbell_true = 0xff; 387 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; 388 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; 389 mly->mly_idbr = MLY_STRONGARM_IDBR; 390 mly->mly_odbr = MLY_STRONGARM_ODBR; 391 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS; 392 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; 393 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; 394 break; 395 } 396 397 /* 398 * Allocate and map the scatter/gather lists. 399 */ 400 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 401 &mly->mly_sg_dmamap, (caddr_t *)&mly->mly_sg, 402 &mly->mly_sg_busaddr, &mly->mly_sg_seg); 403 if (rv) { 404 printf("%s: unable to allocate S/G maps\n", 405 mly->mly_dv.dv_xname); 406 goto bad; 407 } 408 state++; 409 410 /* 411 * Allocate and map the memory mailbox. 412 */ 413 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox), 414 &mly->mly_mmbox_dmamap, (caddr_t *)&mly->mly_mmbox, 415 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg); 416 if (rv) { 417 printf("%s: unable to allocate mailboxes\n", 418 mly->mly_dv.dv_xname); 419 goto bad; 420 } 421 state++; 422 423 /* 424 * Initialise per-controller queues. 425 */ 426 SLIST_INIT(&mly->mly_ccb_free); 427 SIMPLEQ_INIT(&mly->mly_ccb_queue); 428 429 /* 430 * Disable interrupts before we start talking to the controller. 431 */ 432 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE); 433 434 /* 435 * Wait for the controller to come ready, handshaking with the 436 * firmware if required. This is typically only necessary on 437 * platforms where the controller BIOS does not run. 438 */ 439 if (mly_fwhandshake(mly)) { 440 printf("%s: unable to bring controller online\n", 441 mly->mly_dv.dv_xname); 442 goto bad; 443 } 444 445 /* 446 * Allocate initial command buffers, obtain controller feature 447 * information, and then reallocate command buffers, since we'll 448 * know how many we want. 449 */ 450 if (mly_alloc_ccbs(mly)) { 451 printf("%s: unable to allocate CCBs\n", 452 mly->mly_dv.dv_xname); 453 goto bad; 454 } 455 state++; 456 if (mly_get_controllerinfo(mly)) { 457 printf("%s: unable to retrieve controller info\n", 458 mly->mly_dv.dv_xname); 459 goto bad; 460 } 461 mly_release_ccbs(mly); 462 if (mly_alloc_ccbs(mly)) { 463 printf("%s: unable to allocate CCBs\n", 464 mly->mly_dv.dv_xname); 465 state--; 466 goto bad; 467 } 468 469 /* 470 * Get the current event counter for health purposes, populate the 471 * initial health status buffer. 472 */ 473 if (mly_get_eventstatus(mly)) { 474 printf("%s: unable to retrieve event status\n", 475 mly->mly_dv.dv_xname); 476 goto bad; 477 } 478 479 /* 480 * Enable memory-mailbox mode. 481 */ 482 if (mly_enable_mmbox(mly)) { 483 printf("%s: unable to enable memory mailbox\n", 484 mly->mly_dv.dv_xname); 485 goto bad; 486 } 487 488 /* 489 * Print a little information about the controller. 490 */ 491 mi = mly->mly_controllerinfo; 492 493 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d " 494 "(%02d%02d%02d%02d), %dMB RAM\n", mly->mly_dv.dv_xname, 495 mi->physical_channels_present, 496 (mi->physical_channels_present) > 1 ? "s" : "", 497 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, 498 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 499 le16toh(mi->memory_size)); 500 501 /* 502 * Register our `shutdownhook'. 503 */ 504 if (mly_sdh == NULL) 505 shutdownhook_establish(mly_shutdown, NULL); 506 507 /* 508 * Clear any previous BTL information. For each bus that scsipi 509 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve 510 * all BTL info at that point. 511 */ 512 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl)); 513 514 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present + 515 mly->mly_controllerinfo->virtual_channels_present; 516 517 /* 518 * Attach to scsipi. 519 */ 520 adapt = &mly->mly_adapt; 521 memset(adapt, 0, sizeof(*adapt)); 522 adapt->adapt_dev = &mly->mly_dv; 523 adapt->adapt_nchannels = mly->mly_nchans; 524 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV; 525 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV; 526 adapt->adapt_request = mly_scsipi_request; 527 adapt->adapt_minphys = mly_scsipi_minphys; 528 adapt->adapt_ioctl = mly_scsipi_ioctl; 529 530 for (i = 0; i < mly->mly_nchans; i++) { 531 chan = &mly->mly_chans[i]; 532 memset(chan, 0, sizeof(*chan)); 533 chan->chan_adapter = adapt; 534 chan->chan_bustype = &scsi_bustype; 535 chan->chan_channel = i; 536 chan->chan_ntargets = MLY_MAX_TARGETS; 537 chan->chan_nluns = MLY_MAX_LUNS; 538 chan->chan_id = mly->mly_controllerparam->initiator_id; 539 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 540 config_found(&mly->mly_dv, chan, scsiprint); 541 } 542 543 /* 544 * Now enable interrupts... 545 */ 546 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE); 547 548 /* 549 * Finally, create our monitoring thread. 550 */ 551 kthread_create(mly_thread_create, mly); 552 553 mly->mly_state |= MLY_STATE_INITOK; 554 return; 555 556 bad: 557 if (state > 2) 558 mly_release_ccbs(mly); 559 if (state > 1) 560 mly_dmamem_free(mly, sizeof(struct mly_mmbox), 561 mly->mly_mmbox_dmamap, (caddr_t)mly->mly_mmbox, 562 &mly->mly_mmbox_seg); 563 if (state > 0) 564 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 565 mly->mly_sg_dmamap, (caddr_t)mly->mly_sg, 566 &mly->mly_sg_seg); 567 } 568 569 /* 570 * Scan all possible devices on the specified channel. 571 */ 572 static void 573 mly_scan_channel(struct mly_softc *mly, int bus) 574 { 575 int s, target; 576 577 for (target = 0; target < MLY_MAX_TARGETS; target++) { 578 s = splbio(); 579 if (!mly_scan_btl(mly, bus, target)) { 580 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan", 581 0); 582 } 583 splx(s); 584 } 585 } 586 587 /* 588 * Shut down all configured `mly' devices. 589 */ 590 static void 591 mly_shutdown(void *cookie) 592 { 593 struct mly_softc *mly; 594 int i; 595 596 for (i = 0; i < mly_cd.cd_ndevs; i++) { 597 if ((mly = device_lookup(&mly_cd, i)) == NULL) 598 continue; 599 600 if (mly_flush(mly)) 601 printf("%s: unable to flush cache\n", 602 mly->mly_dv.dv_xname); 603 } 604 } 605 606 /* 607 * Fill in the mly_controllerinfo and mly_controllerparam fields in the 608 * softc. 609 */ 610 static int 611 mly_get_controllerinfo(struct mly_softc *mly) 612 { 613 struct mly_cmd_ioctl mci; 614 int rv; 615 616 /* 617 * Build the getcontrollerinfo ioctl and send it. 618 */ 619 memset(&mci, 0, sizeof(mci)); 620 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 621 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo, 622 sizeof(*mly->mly_controllerinfo), NULL, NULL); 623 if (rv != 0) 624 return (rv); 625 626 /* 627 * Build the getcontrollerparameter ioctl and send it. 628 */ 629 memset(&mci, 0, sizeof(mci)); 630 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 631 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam, 632 sizeof(*mly->mly_controllerparam), NULL, NULL); 633 634 return (rv); 635 } 636 637 /* 638 * Rescan a device, possibly as a consequence of getting an event which 639 * suggests that it may have changed. Must be called with interrupts 640 * blocked. 641 */ 642 static int 643 mly_scan_btl(struct mly_softc *mly, int bus, int target) 644 { 645 struct mly_ccb *mc; 646 struct mly_cmd_ioctl *mci; 647 int rv; 648 649 if (target == mly->mly_controllerparam->initiator_id) { 650 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED; 651 return (EIO); 652 } 653 654 /* Don't re-scan if a scan is already in progress. */ 655 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0) 656 return (EBUSY); 657 658 /* Get a command. */ 659 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 660 return (rv); 661 662 /* Set up the data buffer. */ 663 mc->mc_data = malloc(sizeof(union mly_devinfo), 664 M_DEVBUF, M_NOWAIT|M_ZERO); 665 666 mc->mc_flags |= MLY_CCB_DATAIN; 667 mc->mc_complete = mly_complete_rescan; 668 669 /* 670 * Build the ioctl. 671 */ 672 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 673 mci->opcode = MDACMD_IOCTL; 674 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 675 memset(&mci->param, 0, sizeof(mci->param)); 676 677 if (MLY_BUS_IS_VIRTUAL(mly, bus)) { 678 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid); 679 mci->data_size = htole32(mc->mc_length); 680 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 681 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)), 682 mci->addr); 683 } else { 684 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid); 685 mci->data_size = htole32(mc->mc_length); 686 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 687 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr); 688 } 689 690 /* 691 * Dispatch the command. 692 */ 693 if ((rv = mly_ccb_map(mly, mc)) != 0) { 694 free(mc->mc_data, M_DEVBUF); 695 mly_ccb_free(mly, mc); 696 return(rv); 697 } 698 699 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING; 700 mly_ccb_enqueue(mly, mc); 701 return (0); 702 } 703 704 /* 705 * Handle the completion of a rescan operation. 706 */ 707 static void 708 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc) 709 { 710 struct mly_ioctl_getlogdevinfovalid *ldi; 711 struct mly_ioctl_getphysdevinfovalid *pdi; 712 struct mly_cmd_ioctl *mci; 713 struct mly_btl btl, *btlp; 714 struct scsipi_xfer_mode xm; 715 int bus, target, rescan; 716 u_int tmp; 717 718 mly_ccb_unmap(mly, mc); 719 720 /* 721 * Recover the bus and target from the command. We need these even 722 * in the case where we don't have a useful response. 723 */ 724 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 725 tmp = _3ltol(mci->addr); 726 rescan = 0; 727 728 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { 729 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp)); 730 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp)); 731 } else { 732 bus = MLY_PHYADDR_CHANNEL(tmp); 733 target = MLY_PHYADDR_TARGET(tmp); 734 } 735 736 btlp = &mly->mly_btl[bus][target]; 737 738 /* The default result is 'no device'. */ 739 memset(&btl, 0, sizeof(btl)); 740 btl.mb_flags = MLY_BTL_PROTECTED; 741 742 /* If the rescan completed OK, we have possibly-new BTL data. */ 743 if (mc->mc_status != 0) 744 goto out; 745 746 if (mc->mc_length == sizeof(*ldi)) { 747 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 748 tmp = le32toh(ldi->logical_device_number); 749 750 if (MLY_LOGDEV_BUS(mly, tmp) != bus || 751 MLY_LOGDEV_TARGET(mly, tmp) != target) { 752 #ifdef MLYDEBUG 753 printf("%s: WARNING: BTL rescan (logical) for %d:%d " 754 "returned data for %d:%d instead\n", 755 mly->mly_dv.dv_xname, bus, target, 756 MLY_LOGDEV_BUS(mly, tmp), 757 MLY_LOGDEV_TARGET(mly, tmp)); 758 #endif 759 goto out; 760 } 761 762 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING; 763 btl.mb_type = ldi->raid_level; 764 btl.mb_state = ldi->state; 765 } else if (mc->mc_length == sizeof(*pdi)) { 766 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 767 768 if (pdi->channel != bus || pdi->target != target) { 769 #ifdef MLYDEBUG 770 printf("%s: WARNING: BTL rescan (physical) for %d:%d " 771 " returned data for %d:%d instead\n", 772 mly->mly_dv.dv_xname, 773 bus, target, pdi->channel, pdi->target); 774 #endif 775 goto out; 776 } 777 778 btl.mb_flags = MLY_BTL_PHYSICAL; 779 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; 780 btl.mb_state = pdi->state; 781 btl.mb_speed = pdi->speed; 782 btl.mb_width = pdi->width; 783 784 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 785 btl.mb_flags |= MLY_BTL_PROTECTED; 786 if (pdi->command_tags != 0) 787 btl.mb_flags |= MLY_BTL_TQING; 788 } else { 789 printf("%s: BTL rescan result invalid\n", mly->mly_dv.dv_xname); 790 goto out; 791 } 792 793 /* Decide whether we need to rescan the device. */ 794 if (btl.mb_flags != btlp->mb_flags || 795 btl.mb_speed != btlp->mb_speed || 796 btl.mb_width != btlp->mb_width) 797 rescan = 1; 798 799 out: 800 *btlp = btl; 801 802 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) { 803 xm.xm_target = target; 804 mly_get_xfer_mode(mly, bus, &xm); 805 /* XXX SCSI mid-layer rescan goes here. */ 806 } 807 808 /* Wake anybody waiting on the device to be rescanned. */ 809 wakeup(btlp); 810 811 free(mc->mc_data, M_DEVBUF); 812 mly_ccb_free(mly, mc); 813 } 814 815 /* 816 * Get the current health status and set the 'next event' counter to suit. 817 */ 818 static int 819 mly_get_eventstatus(struct mly_softc *mly) 820 { 821 struct mly_cmd_ioctl mci; 822 struct mly_health_status *mh; 823 int rv; 824 825 /* Build the gethealthstatus ioctl and send it. */ 826 memset(&mci, 0, sizeof(mci)); 827 mh = NULL; 828 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 829 830 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL); 831 if (rv) 832 return (rv); 833 834 /* Get the event counter. */ 835 mly->mly_event_change = le32toh(mh->change_counter); 836 mly->mly_event_waiting = le32toh(mh->next_event); 837 mly->mly_event_counter = le32toh(mh->next_event); 838 839 /* Save the health status into the memory mailbox */ 840 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh)); 841 842 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 843 offsetof(struct mly_mmbox, mmm_health), 844 sizeof(mly->mly_mmbox->mmm_health), 845 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 846 847 free(mh, M_DEVBUF); 848 return (0); 849 } 850 851 /* 852 * Enable memory mailbox mode. 853 */ 854 static int 855 mly_enable_mmbox(struct mly_softc *mly) 856 { 857 struct mly_cmd_ioctl mci; 858 u_int8_t *sp; 859 u_int64_t tmp; 860 int rv; 861 862 /* Build the ioctl and send it. */ 863 memset(&mci, 0, sizeof(mci)); 864 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 865 866 /* Set buffer addresses. */ 867 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 868 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp); 869 870 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 871 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp); 872 873 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 874 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp); 875 876 /* Set buffer sizes - abuse of data_size field is revolting. */ 877 sp = (u_int8_t *)&mci.data_size; 878 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10; 879 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10; 880 mci.param.setmemorymailbox.health_buffer_size = 881 sizeof(union mly_health_region) >> 10; 882 883 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL); 884 if (rv) 885 return (rv); 886 887 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE; 888 return (0); 889 } 890 891 /* 892 * Flush all pending I/O from the controller. 893 */ 894 static int 895 mly_flush(struct mly_softc *mly) 896 { 897 struct mly_cmd_ioctl mci; 898 899 /* Build the ioctl */ 900 memset(&mci, 0, sizeof(mci)); 901 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 902 mci.param.deviceoperation.operation_device = 903 MLY_OPDEVICE_PHYSICAL_CONTROLLER; 904 905 /* Pass it off to the controller */ 906 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL)); 907 } 908 909 /* 910 * Perform an ioctl command. 911 * 912 * If (data) is not NULL, the command requires data transfer to the 913 * controller. If (*data) is NULL the command requires data transfer from 914 * the controller, and we will allocate a buffer for it. 915 */ 916 static int 917 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data, 918 size_t datasize, void *sense_buffer, 919 size_t *sense_length) 920 { 921 struct mly_ccb *mc; 922 struct mly_cmd_ioctl *mci; 923 u_int8_t status; 924 int rv; 925 926 mc = NULL; 927 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 928 goto bad; 929 930 /* 931 * Copy the ioctl structure, but save some important fields and then 932 * fixup. 933 */ 934 mci = &mc->mc_packet->ioctl; 935 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address); 936 ioctl->maximum_sense_size = mci->maximum_sense_size; 937 *mci = *ioctl; 938 mci->opcode = MDACMD_IOCTL; 939 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 940 941 /* Handle the data buffer. */ 942 if (data != NULL) { 943 if (*data == NULL) { 944 /* Allocate data buffer */ 945 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT); 946 mc->mc_flags |= MLY_CCB_DATAIN; 947 } else { 948 mc->mc_data = *data; 949 mc->mc_flags |= MLY_CCB_DATAOUT; 950 } 951 mc->mc_length = datasize; 952 mc->mc_packet->generic.data_size = htole32(datasize); 953 } 954 955 /* Run the command. */ 956 if (datasize > 0) 957 if ((rv = mly_ccb_map(mly, mc)) != 0) 958 goto bad; 959 rv = mly_ccb_poll(mly, mc, 30000); 960 if (datasize > 0) 961 mly_ccb_unmap(mly, mc); 962 if (rv != 0) 963 goto bad; 964 965 /* Clean up and return any data. */ 966 status = mc->mc_status; 967 968 if (status != 0) 969 printf("mly_ioctl: command status %d\n", status); 970 971 if (mc->mc_sense > 0 && sense_buffer != NULL) { 972 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense); 973 *sense_length = mc->mc_sense; 974 goto bad; 975 } 976 977 /* Should we return a data pointer? */ 978 if (data != NULL && *data == NULL) 979 *data = mc->mc_data; 980 981 /* Command completed OK. */ 982 rv = (status != 0 ? EIO : 0); 983 984 bad: 985 if (mc != NULL) { 986 /* Do we need to free a data buffer we allocated? */ 987 if (rv != 0 && mc->mc_data != NULL && 988 (data == NULL || *data == NULL)) 989 free(mc->mc_data, M_DEVBUF); 990 mly_ccb_free(mly, mc); 991 } 992 993 return (rv); 994 } 995 996 /* 997 * Check for event(s) outstanding in the controller. 998 */ 999 static void 1000 mly_check_event(struct mly_softc *mly) 1001 { 1002 1003 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1004 offsetof(struct mly_mmbox, mmm_health), 1005 sizeof(mly->mly_mmbox->mmm_health), 1006 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1007 1008 /* 1009 * The controller may have updated the health status information, so 1010 * check for it here. Note that the counters are all in host 1011 * memory, so this check is very cheap. Also note that we depend on 1012 * checking on completion 1013 */ 1014 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) != 1015 mly->mly_event_change) { 1016 mly->mly_event_change = 1017 le32toh(mly->mly_mmbox->mmm_health.status.change_counter); 1018 mly->mly_event_waiting = 1019 le32toh(mly->mly_mmbox->mmm_health.status.next_event); 1020 1021 /* Wake up anyone that might be interested in this. */ 1022 wakeup(&mly->mly_event_change); 1023 } 1024 1025 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1026 offsetof(struct mly_mmbox, mmm_health), 1027 sizeof(mly->mly_mmbox->mmm_health), 1028 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1029 1030 if (mly->mly_event_counter != mly->mly_event_waiting) 1031 mly_fetch_event(mly); 1032 } 1033 1034 /* 1035 * Fetch one event from the controller. If we fail due to resource 1036 * starvation, we'll be retried the next time a command completes. 1037 */ 1038 static void 1039 mly_fetch_event(struct mly_softc *mly) 1040 { 1041 struct mly_ccb *mc; 1042 struct mly_cmd_ioctl *mci; 1043 int s; 1044 u_int32_t event; 1045 1046 /* Get a command. */ 1047 if (mly_ccb_alloc(mly, &mc)) 1048 return; 1049 1050 /* Set up the data buffer. */ 1051 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, 1052 M_NOWAIT|M_ZERO); 1053 1054 mc->mc_length = sizeof(struct mly_event); 1055 mc->mc_flags |= MLY_CCB_DATAIN; 1056 mc->mc_complete = mly_complete_event; 1057 1058 /* 1059 * Get an event number to fetch. It's possible that we've raced 1060 * with another context for the last event, in which case there will 1061 * be no more events. 1062 */ 1063 s = splbio(); 1064 if (mly->mly_event_counter == mly->mly_event_waiting) { 1065 splx(s); 1066 free(mc->mc_data, M_DEVBUF); 1067 mly_ccb_free(mly, mc); 1068 return; 1069 } 1070 event = mly->mly_event_counter++; 1071 splx(s); 1072 1073 /* 1074 * Build the ioctl. 1075 * 1076 * At this point we are committed to sending this request, as it 1077 * will be the only one constructed for this particular event 1078 * number. 1079 */ 1080 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 1081 mci->opcode = MDACMD_IOCTL; 1082 mci->data_size = htole32(sizeof(struct mly_event)); 1083 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff), 1084 mci->addr); 1085 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 1086 mci->sub_ioctl = MDACIOCTL_GETEVENT; 1087 mci->param.getevent.sequence_number_low = htole16(event & 0xffff); 1088 1089 /* 1090 * Submit the command. 1091 */ 1092 if (mly_ccb_map(mly, mc) != 0) 1093 goto bad; 1094 mly_ccb_enqueue(mly, mc); 1095 return; 1096 1097 bad: 1098 printf("%s: couldn't fetch event %u\n", mly->mly_dv.dv_xname, event); 1099 free(mc->mc_data, M_DEVBUF); 1100 mly_ccb_free(mly, mc); 1101 } 1102 1103 /* 1104 * Handle the completion of an event poll. 1105 */ 1106 static void 1107 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc) 1108 { 1109 struct mly_event *me; 1110 1111 me = (struct mly_event *)mc->mc_data; 1112 mly_ccb_unmap(mly, mc); 1113 mly_ccb_free(mly, mc); 1114 1115 /* If the event was successfully fetched, process it. */ 1116 if (mc->mc_status == SCSI_OK) 1117 mly_process_event(mly, me); 1118 else 1119 printf("%s: unable to fetch event; status = 0x%x\n", 1120 mly->mly_dv.dv_xname, mc->mc_status); 1121 1122 free(me, M_DEVBUF); 1123 1124 /* Check for another event. */ 1125 mly_check_event(mly); 1126 } 1127 1128 /* 1129 * Process a controller event. Called with interrupts blocked (i.e., at 1130 * interrupt time). 1131 */ 1132 static void 1133 mly_process_event(struct mly_softc *mly, struct mly_event *me) 1134 { 1135 struct scsi_sense_data *ssd; 1136 int bus, target, event, class, action; 1137 const char *fp, *tp; 1138 1139 ssd = (struct scsi_sense_data *)&me->sense[0]; 1140 1141 /* 1142 * Errors can be reported using vendor-unique sense data. In this 1143 * case, the event code will be 0x1c (Request sense data present), 1144 * the sense key will be 0x09 (vendor specific), the MSB of the ASC 1145 * will be set, and the actual event code will be a 16-bit value 1146 * comprised of the ASCQ (low byte) and low seven bits of the ASC 1147 * (low seven bits of the high byte). 1148 */ 1149 if (le32toh(me->code) == 0x1c && 1150 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC && 1151 (ssd->asc & 0x80) != 0) { 1152 event = ((int)(ssd->asc & ~0x80) << 8) + 1153 ssd->ascq; 1154 } else 1155 event = le32toh(me->code); 1156 1157 /* Look up event, get codes. */ 1158 fp = mly_describe_code(mly_table_event, event); 1159 1160 /* Quiet event? */ 1161 class = fp[0]; 1162 #ifdef notyet 1163 if (isupper(class) && bootverbose) 1164 class = tolower(class); 1165 #endif 1166 1167 /* Get action code, text string. */ 1168 action = fp[1]; 1169 tp = fp + 3; 1170 1171 /* 1172 * Print some information about the event. 1173 * 1174 * This code uses a table derived from the corresponding portion of 1175 * the Linux driver, and thus the parser is very similar. 1176 */ 1177 switch (class) { 1178 case 'p': 1179 /* 1180 * Error on physical drive. 1181 */ 1182 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname, 1183 me->channel, me->target, tp); 1184 if (action == 'r') 1185 mly->mly_btl[me->channel][me->target].mb_flags |= 1186 MLY_BTL_RESCAN; 1187 break; 1188 1189 case 'l': 1190 case 'm': 1191 /* 1192 * Error on logical unit, or message about logical unit. 1193 */ 1194 bus = MLY_LOGDEV_BUS(mly, me->lun); 1195 target = MLY_LOGDEV_TARGET(mly, me->lun); 1196 printf("%s: logical device %d:%d %s\n", mly->mly_dv.dv_xname, 1197 bus, target, tp); 1198 if (action == 'r') 1199 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 1200 break; 1201 1202 case 's': 1203 /* 1204 * Report of sense data. 1205 */ 1206 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE || 1207 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) && 1208 ssd->asc == 0x04 && 1209 (ssd->ascq == 0x01 || 1210 ssd->ascq == 0x02)) { 1211 /* Ignore NO_SENSE or NOT_READY in one case */ 1212 break; 1213 } 1214 1215 /* 1216 * XXX Should translate this if SCSIVERBOSE. 1217 */ 1218 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname, 1219 me->channel, me->target, tp); 1220 printf("%s: sense key %d asc %02x ascq %02x\n", 1221 mly->mly_dv.dv_xname, SSD_SENSE_KEY(ssd->flags), 1222 ssd->asc, ssd->ascq); 1223 printf("%s: info %x%x%x%x csi %x%x%x%x\n", 1224 mly->mly_dv.dv_xname, ssd->info[0], ssd->info[1], 1225 ssd->info[2], ssd->info[3], ssd->csi[0], 1226 ssd->csi[1], ssd->csi[2], 1227 ssd->csi[3]); 1228 if (action == 'r') 1229 mly->mly_btl[me->channel][me->target].mb_flags |= 1230 MLY_BTL_RESCAN; 1231 break; 1232 1233 case 'e': 1234 printf("%s: ", mly->mly_dv.dv_xname); 1235 printf(tp, me->target, me->lun); 1236 break; 1237 1238 case 'c': 1239 printf("%s: controller %s\n", mly->mly_dv.dv_xname, tp); 1240 break; 1241 1242 case '?': 1243 printf("%s: %s - %d\n", mly->mly_dv.dv_xname, tp, event); 1244 break; 1245 1246 default: 1247 /* Probably a 'noisy' event being ignored. */ 1248 break; 1249 } 1250 } 1251 1252 /* 1253 * Create the monitoring thread. Called after the standard kernel threads 1254 * have been created. 1255 */ 1256 static void 1257 mly_thread_create(void *cookie) 1258 { 1259 struct mly_softc *mly; 1260 int rv; 1261 1262 mly = cookie; 1263 1264 rv = kthread_create1(mly_thread, mly, &mly->mly_thread, "%s", 1265 mly->mly_dv.dv_xname); 1266 if (rv != 0) 1267 printf("%s: unable to create thread (%d)\n", 1268 mly->mly_dv.dv_xname, rv); 1269 } 1270 1271 /* 1272 * Perform periodic activities. 1273 */ 1274 static void 1275 mly_thread(void *cookie) 1276 { 1277 struct mly_softc *mly; 1278 struct mly_btl *btl; 1279 int s, bus, target, done; 1280 1281 mly = (struct mly_softc *)cookie; 1282 1283 for (;;) { 1284 /* Check for new events. */ 1285 mly_check_event(mly); 1286 1287 /* Re-scan up to 1 device. */ 1288 s = splbio(); 1289 done = 0; 1290 for (bus = 0; bus < mly->mly_nchans && !done; bus++) { 1291 for (target = 0; target < MLY_MAX_TARGETS; target++) { 1292 /* Perform device rescan? */ 1293 btl = &mly->mly_btl[bus][target]; 1294 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) { 1295 btl->mb_flags ^= MLY_BTL_RESCAN; 1296 mly_scan_btl(mly, bus, target); 1297 done = 1; 1298 break; 1299 } 1300 } 1301 } 1302 splx(s); 1303 1304 /* Sleep for N seconds. */ 1305 tsleep(mly_thread, PWAIT, "mlyzzz", 1306 hz * MLY_PERIODIC_INTERVAL); 1307 } 1308 } 1309 1310 /* 1311 * Submit a command to the controller and poll on completion. Return 1312 * non-zero on timeout. 1313 */ 1314 static int 1315 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1316 { 1317 int rv; 1318 1319 if ((rv = mly_ccb_submit(mly, mc)) != 0) 1320 return (rv); 1321 1322 for (timo *= 10; timo != 0; timo--) { 1323 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) 1324 break; 1325 mly_intr(mly); 1326 DELAY(100); 1327 } 1328 1329 return (timo == 0); 1330 } 1331 1332 /* 1333 * Submit a command to the controller and sleep on completion. Return 1334 * non-zero on timeout. 1335 */ 1336 static int 1337 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1338 { 1339 int rv, s; 1340 1341 mly_ccb_enqueue(mly, mc); 1342 1343 s = splbio(); 1344 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) { 1345 splx(s); 1346 return (0); 1347 } 1348 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000); 1349 splx(s); 1350 1351 return (rv); 1352 } 1353 1354 /* 1355 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1356 * the order that they were enqueued and try to submit their command blocks 1357 * to the controller for execution. 1358 */ 1359 void 1360 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc) 1361 { 1362 int s; 1363 1364 s = splbio(); 1365 1366 if (mc != NULL) 1367 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq); 1368 1369 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) { 1370 if (mly_ccb_submit(mly, mc)) 1371 break; 1372 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq); 1373 } 1374 1375 splx(s); 1376 } 1377 1378 /* 1379 * Deliver a command to the controller. 1380 */ 1381 static int 1382 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc) 1383 { 1384 union mly_cmd_packet *pkt; 1385 int s, off; 1386 1387 mc->mc_packet->generic.command_id = htole16(mc->mc_slot); 1388 1389 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1390 mc->mc_packetphys - mly->mly_pkt_busaddr, 1391 sizeof(union mly_cmd_packet), 1392 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1393 1394 s = splbio(); 1395 1396 /* 1397 * Do we have to use the hardware mailbox? 1398 */ 1399 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) { 1400 /* 1401 * Check to see if the controller is ready for us. 1402 */ 1403 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) { 1404 splx(s); 1405 return (EBUSY); 1406 } 1407 1408 /* 1409 * It's ready, send the command. 1410 */ 1411 mly_outl(mly, mly->mly_cmd_mailbox, 1412 (u_int64_t)mc->mc_packetphys & 0xffffffff); 1413 mly_outl(mly, mly->mly_cmd_mailbox + 4, 1414 (u_int64_t)mc->mc_packetphys >> 32); 1415 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT); 1416 } else { 1417 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx]; 1418 off = (caddr_t)pkt - (caddr_t)mly->mly_mmbox; 1419 1420 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1421 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1422 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1423 1424 /* Check to see if the next index is free yet. */ 1425 if (pkt->mmbox.flag != 0) { 1426 splx(s); 1427 return (EBUSY); 1428 } 1429 1430 /* Copy in new command */ 1431 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data, 1432 sizeof(pkt->mmbox.data)); 1433 1434 /* Copy flag last. */ 1435 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 1436 1437 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1438 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1439 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1440 1441 /* Signal controller and update index. */ 1442 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT); 1443 mly->mly_mmbox_cmd_idx = 1444 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS; 1445 } 1446 1447 splx(s); 1448 return (0); 1449 } 1450 1451 /* 1452 * Pick up completed commands from the controller and handle accordingly. 1453 */ 1454 int 1455 mly_intr(void *cookie) 1456 { 1457 struct mly_ccb *mc; 1458 union mly_status_packet *sp; 1459 u_int16_t slot; 1460 int forus, off; 1461 struct mly_softc *mly; 1462 1463 mly = cookie; 1464 forus = 0; 1465 1466 /* 1467 * Pick up hardware-mailbox commands. 1468 */ 1469 if (mly_odbr_true(mly, MLY_HM_STSREADY)) { 1470 slot = mly_inw(mly, mly->mly_status_mailbox); 1471 1472 if (slot < MLY_SLOT_MAX) { 1473 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1474 mc->mc_status = 1475 mly_inb(mly, mly->mly_status_mailbox + 2); 1476 mc->mc_sense = 1477 mly_inb(mly, mly->mly_status_mailbox + 3); 1478 mc->mc_resid = 1479 mly_inl(mly, mly->mly_status_mailbox + 4); 1480 1481 mly_ccb_complete(mly, mc); 1482 } else { 1483 /* Slot 0xffff may mean "extremely bogus command". */ 1484 printf("%s: got HM completion for illegal slot %u\n", 1485 mly->mly_dv.dv_xname, slot); 1486 } 1487 1488 /* Unconditionally acknowledge status. */ 1489 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY); 1490 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 1491 forus = 1; 1492 } 1493 1494 /* 1495 * Pick up memory-mailbox commands. 1496 */ 1497 if (mly_odbr_true(mly, MLY_AM_STSREADY)) { 1498 for (;;) { 1499 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx]; 1500 off = (caddr_t)sp - (caddr_t)mly->mly_mmbox; 1501 1502 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1503 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1504 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1505 1506 /* Check for more status. */ 1507 if (sp->mmbox.flag == 0) 1508 break; 1509 1510 /* Get slot number. */ 1511 slot = le16toh(sp->status.command_id); 1512 if (slot < MLY_SLOT_MAX) { 1513 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1514 mc->mc_status = sp->status.status; 1515 mc->mc_sense = sp->status.sense_length; 1516 mc->mc_resid = le32toh(sp->status.residue); 1517 mly_ccb_complete(mly, mc); 1518 } else { 1519 /* 1520 * Slot 0xffff may mean "extremely bogus 1521 * command". 1522 */ 1523 printf("%s: got AM completion for illegal " 1524 "slot %u at %d\n", mly->mly_dv.dv_xname, 1525 slot, mly->mly_mmbox_sts_idx); 1526 } 1527 1528 /* Clear and move to next index. */ 1529 sp->mmbox.flag = 0; 1530 mly->mly_mmbox_sts_idx = 1531 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS; 1532 } 1533 1534 /* Acknowledge that we have collected status value(s). */ 1535 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY); 1536 forus = 1; 1537 } 1538 1539 /* 1540 * Run the queue. 1541 */ 1542 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue)) 1543 mly_ccb_enqueue(mly, NULL); 1544 1545 return (forus); 1546 } 1547 1548 /* 1549 * Process completed commands 1550 */ 1551 static void 1552 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc) 1553 { 1554 void (*complete)(struct mly_softc *, struct mly_ccb *); 1555 1556 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1557 mc->mc_packetphys - mly->mly_pkt_busaddr, 1558 sizeof(union mly_cmd_packet), 1559 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1560 1561 complete = mc->mc_complete; 1562 mc->mc_flags |= MLY_CCB_COMPLETE; 1563 1564 /* 1565 * Call completion handler or wake up sleeping consumer. 1566 */ 1567 if (complete != NULL) 1568 (*complete)(mly, mc); 1569 else 1570 wakeup(mc); 1571 } 1572 1573 /* 1574 * Allocate a command. 1575 */ 1576 int 1577 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp) 1578 { 1579 struct mly_ccb *mc; 1580 int s; 1581 1582 s = splbio(); 1583 mc = SLIST_FIRST(&mly->mly_ccb_free); 1584 if (mc != NULL) 1585 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist); 1586 splx(s); 1587 1588 *mcp = mc; 1589 return (mc == NULL ? EAGAIN : 0); 1590 } 1591 1592 /* 1593 * Release a command back to the freelist. 1594 */ 1595 void 1596 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc) 1597 { 1598 int s; 1599 1600 /* 1601 * Fill in parts of the command that may cause confusion if a 1602 * consumer doesn't when we are later allocated. 1603 */ 1604 mc->mc_data = NULL; 1605 mc->mc_flags = 0; 1606 mc->mc_complete = NULL; 1607 mc->mc_private = NULL; 1608 mc->mc_packet->generic.command_control = 0; 1609 1610 /* 1611 * By default, we set up to overwrite the command packet with sense 1612 * information. 1613 */ 1614 mc->mc_packet->generic.sense_buffer_address = 1615 htole64(mc->mc_packetphys); 1616 mc->mc_packet->generic.maximum_sense_size = 1617 sizeof(union mly_cmd_packet); 1618 1619 s = splbio(); 1620 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist); 1621 splx(s); 1622 } 1623 1624 /* 1625 * Allocate and initialize command and packet structures. 1626 * 1627 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our 1628 * allocation to that number. If we don't yet know how many commands the 1629 * controller supports, allocate a very small set (suitable for initialization 1630 * purposes only). 1631 */ 1632 static int 1633 mly_alloc_ccbs(struct mly_softc *mly) 1634 { 1635 struct mly_ccb *mc; 1636 int i, rv; 1637 1638 if (mly->mly_controllerinfo == NULL) 1639 mly->mly_ncmds = MLY_CCBS_RESV; 1640 else { 1641 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands); 1642 mly->mly_ncmds = min(MLY_MAX_CCBS, i); 1643 } 1644 1645 /* 1646 * Allocate enough space for all the command packets in one chunk 1647 * and map them permanently into controller-visible space. 1648 */ 1649 rv = mly_dmamem_alloc(mly, 1650 mly->mly_ncmds * sizeof(union mly_cmd_packet), 1651 &mly->mly_pkt_dmamap, (caddr_t *)&mly->mly_pkt, 1652 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg); 1653 if (rv) 1654 return (rv); 1655 1656 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds, 1657 M_DEVBUF, M_NOWAIT|M_ZERO); 1658 1659 for (i = 0; i < mly->mly_ncmds; i++) { 1660 mc = mly->mly_ccbs + i; 1661 mc->mc_slot = MLY_SLOT_START + i; 1662 mc->mc_packet = mly->mly_pkt + i; 1663 mc->mc_packetphys = mly->mly_pkt_busaddr + 1664 (i * sizeof(union mly_cmd_packet)); 1665 1666 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER, 1667 MLY_MAX_SEGS, MLY_MAX_XFER, 0, 1668 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1669 &mc->mc_datamap); 1670 if (rv) { 1671 mly_release_ccbs(mly); 1672 return (rv); 1673 } 1674 1675 mly_ccb_free(mly, mc); 1676 } 1677 1678 return (0); 1679 } 1680 1681 /* 1682 * Free all the storage held by commands. 1683 * 1684 * Must be called with all commands on the free list. 1685 */ 1686 static void 1687 mly_release_ccbs(struct mly_softc *mly) 1688 { 1689 struct mly_ccb *mc; 1690 1691 /* Throw away command buffer DMA maps. */ 1692 while (mly_ccb_alloc(mly, &mc) == 0) 1693 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap); 1694 1695 /* Release CCB storage. */ 1696 free(mly->mly_ccbs, M_DEVBUF); 1697 1698 /* Release the packet storage. */ 1699 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet), 1700 mly->mly_pkt_dmamap, (caddr_t)mly->mly_pkt, &mly->mly_pkt_seg); 1701 } 1702 1703 /* 1704 * Map a command into controller-visible space. 1705 */ 1706 static int 1707 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc) 1708 { 1709 struct mly_cmd_generic *gen; 1710 struct mly_sg_entry *sg; 1711 bus_dma_segment_t *ds; 1712 int flg, nseg, rv; 1713 1714 #ifdef DIAGNOSTIC 1715 /* Don't map more than once. */ 1716 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0) 1717 panic("mly_ccb_map: already mapped"); 1718 mc->mc_flags |= MLY_CCB_MAPPED; 1719 1720 /* Does the command have a data buffer? */ 1721 if (mc->mc_data == NULL) 1722 panic("mly_ccb_map: no data buffer"); 1723 #endif 1724 1725 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data, 1726 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1727 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ? 1728 BUS_DMA_READ : BUS_DMA_WRITE)); 1729 if (rv != 0) 1730 return (rv); 1731 1732 gen = &mc->mc_packet->generic; 1733 1734 /* 1735 * Can we use the transfer structure directly? 1736 */ 1737 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) { 1738 mc->mc_sgoff = -1; 1739 sg = &gen->transfer.direct.sg[0]; 1740 } else { 1741 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) * 1742 MLY_MAX_SEGS; 1743 sg = mly->mly_sg + mc->mc_sgoff; 1744 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE; 1745 gen->transfer.indirect.entries[0] = htole16(nseg); 1746 gen->transfer.indirect.table_physaddr[0] = 1747 htole64(mly->mly_sg_busaddr + 1748 (mc->mc_sgoff * sizeof(struct mly_sg_entry))); 1749 } 1750 1751 /* 1752 * Fill the S/G table. 1753 */ 1754 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) { 1755 sg->physaddr = htole64(ds->ds_addr); 1756 sg->length = htole64(ds->ds_len); 1757 } 1758 1759 /* 1760 * Sync up the data map. 1761 */ 1762 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1763 flg = BUS_DMASYNC_PREREAD; 1764 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ { 1765 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION; 1766 flg = BUS_DMASYNC_PREWRITE; 1767 } 1768 1769 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1770 1771 /* 1772 * Sync up the chained S/G table, if we're using one. 1773 */ 1774 if (mc->mc_sgoff == -1) 1775 return (0); 1776 1777 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1778 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE); 1779 1780 return (0); 1781 } 1782 1783 /* 1784 * Unmap a command from controller-visible space. 1785 */ 1786 static void 1787 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc) 1788 { 1789 int flg; 1790 1791 #ifdef DIAGNOSTIC 1792 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0) 1793 panic("mly_ccb_unmap: not mapped"); 1794 mc->mc_flags &= ~MLY_CCB_MAPPED; 1795 #endif 1796 1797 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1798 flg = BUS_DMASYNC_POSTREAD; 1799 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ 1800 flg = BUS_DMASYNC_POSTWRITE; 1801 1802 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1803 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap); 1804 1805 if (mc->mc_sgoff == -1) 1806 return; 1807 1808 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1809 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE); 1810 } 1811 1812 /* 1813 * Adjust the size of each I/O before it passes to the SCSI layer. 1814 */ 1815 static void 1816 mly_scsipi_minphys(struct buf *bp) 1817 { 1818 1819 if (bp->b_bcount > MLY_MAX_XFER) 1820 bp->b_bcount = MLY_MAX_XFER; 1821 minphys(bp); 1822 } 1823 1824 /* 1825 * Start a SCSI command. 1826 */ 1827 static void 1828 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1829 void *arg) 1830 { 1831 struct mly_ccb *mc; 1832 struct mly_cmd_scsi_small *ss; 1833 struct scsipi_xfer *xs; 1834 struct scsipi_periph *periph; 1835 struct mly_softc *mly; 1836 struct mly_btl *btl; 1837 int s, tmp; 1838 1839 mly = (void *)chan->chan_adapter->adapt_dev; 1840 1841 switch (req) { 1842 case ADAPTER_REQ_RUN_XFER: 1843 xs = arg; 1844 periph = xs->xs_periph; 1845 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target]; 1846 s = splbio(); 1847 tmp = btl->mb_flags; 1848 splx(s); 1849 1850 /* 1851 * Check for I/O attempt to a protected or non-existant 1852 * device. 1853 */ 1854 if ((tmp & MLY_BTL_PROTECTED) != 0) { 1855 xs->error = XS_SELTIMEOUT; 1856 scsipi_done(xs); 1857 break; 1858 } 1859 1860 #ifdef DIAGNOSTIC 1861 /* XXX Increase if/when we support large SCSI commands. */ 1862 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) { 1863 printf("%s: cmd too large\n", mly->mly_dv.dv_xname); 1864 xs->error = XS_DRIVER_STUFFUP; 1865 scsipi_done(xs); 1866 break; 1867 } 1868 #endif 1869 1870 if (mly_ccb_alloc(mly, &mc)) { 1871 xs->error = XS_RESOURCE_SHORTAGE; 1872 scsipi_done(xs); 1873 break; 1874 } 1875 1876 /* Build the command. */ 1877 mc->mc_data = xs->data; 1878 mc->mc_length = xs->datalen; 1879 mc->mc_complete = mly_scsipi_complete; 1880 mc->mc_private = xs; 1881 1882 /* Build the packet for the controller. */ 1883 ss = &mc->mc_packet->scsi_small; 1884 ss->opcode = MDACMD_SCSI; 1885 #ifdef notdef 1886 /* 1887 * XXX FreeBSD does this, but it doesn't fix anything, 1888 * XXX and appears potentially harmful. 1889 */ 1890 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT; 1891 #endif 1892 1893 ss->data_size = htole32(xs->datalen); 1894 _lto3l(MLY_PHYADDR(0, chan->chan_channel, 1895 periph->periph_target, periph->periph_lun), ss->addr); 1896 1897 if (xs->timeout < 60 * 1000) 1898 ss->timeout = xs->timeout / 1000 | 1899 MLY_TIMEOUT_SECONDS; 1900 else if (xs->timeout < 60 * 60 * 1000) 1901 ss->timeout = xs->timeout / (60 * 1000) | 1902 MLY_TIMEOUT_MINUTES; 1903 else 1904 ss->timeout = xs->timeout / (60 * 60 * 1000) | 1905 MLY_TIMEOUT_HOURS; 1906 1907 ss->maximum_sense_size = sizeof(xs->sense); 1908 ss->cdb_length = xs->cmdlen; 1909 memcpy(ss->cdb, xs->cmd, xs->cmdlen); 1910 1911 if (mc->mc_length != 0) { 1912 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0) 1913 mc->mc_flags |= MLY_CCB_DATAOUT; 1914 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */ 1915 mc->mc_flags |= MLY_CCB_DATAIN; 1916 1917 if (mly_ccb_map(mly, mc) != 0) { 1918 xs->error = XS_DRIVER_STUFFUP; 1919 mly_ccb_free(mly, mc); 1920 scsipi_done(xs); 1921 break; 1922 } 1923 } 1924 1925 /* 1926 * Give the command to the controller. 1927 */ 1928 if ((xs->xs_control & XS_CTL_POLL) != 0) { 1929 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) { 1930 xs->error = XS_REQUEUE; 1931 if (mc->mc_length != 0) 1932 mly_ccb_unmap(mly, mc); 1933 mly_ccb_free(mly, mc); 1934 scsipi_done(xs); 1935 } 1936 } else 1937 mly_ccb_enqueue(mly, mc); 1938 1939 break; 1940 1941 case ADAPTER_REQ_GROW_RESOURCES: 1942 /* 1943 * Not supported. 1944 */ 1945 break; 1946 1947 case ADAPTER_REQ_SET_XFER_MODE: 1948 /* 1949 * We can't change the transfer mode, but at least let 1950 * scsipi know what the adapter has negotiated. 1951 */ 1952 mly_get_xfer_mode(mly, chan->chan_channel, arg); 1953 break; 1954 } 1955 } 1956 1957 /* 1958 * Handle completion of a SCSI command. 1959 */ 1960 static void 1961 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc) 1962 { 1963 struct scsipi_xfer *xs; 1964 struct scsipi_channel *chan; 1965 struct scsipi_inquiry_data *inq; 1966 struct mly_btl *btl; 1967 int target, sl, s; 1968 const char *p; 1969 1970 xs = mc->mc_private; 1971 xs->status = mc->mc_status; 1972 1973 /* 1974 * XXX The `resid' value as returned by the controller appears to be 1975 * bogus, so we always set it to zero. Is it perhaps the transfer 1976 * count? 1977 */ 1978 xs->resid = 0; /* mc->mc_resid; */ 1979 1980 if (mc->mc_length != 0) 1981 mly_ccb_unmap(mly, mc); 1982 1983 switch (mc->mc_status) { 1984 case SCSI_OK: 1985 /* 1986 * In order to report logical device type and status, we 1987 * overwrite the result of the INQUIRY command to logical 1988 * devices. 1989 */ 1990 if (xs->cmd->opcode == INQUIRY) { 1991 chan = xs->xs_periph->periph_channel; 1992 target = xs->xs_periph->periph_target; 1993 btl = &mly->mly_btl[chan->chan_channel][target]; 1994 1995 s = splbio(); 1996 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) { 1997 inq = (struct scsipi_inquiry_data *)xs->data; 1998 mly_padstr(inq->vendor, "MYLEX", 8); 1999 p = mly_describe_code(mly_table_device_type, 2000 btl->mb_type); 2001 mly_padstr(inq->product, p, 16); 2002 p = mly_describe_code(mly_table_device_state, 2003 btl->mb_state); 2004 mly_padstr(inq->revision, p, 4); 2005 } 2006 splx(s); 2007 } 2008 2009 xs->error = XS_NOERROR; 2010 break; 2011 2012 case SCSI_CHECK: 2013 sl = mc->mc_sense; 2014 if (sl > sizeof(xs->sense.scsi_sense)) 2015 sl = sizeof(xs->sense.scsi_sense); 2016 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl); 2017 xs->error = XS_SENSE; 2018 break; 2019 2020 case SCSI_BUSY: 2021 case SCSI_QUEUE_FULL: 2022 xs->error = XS_BUSY; 2023 break; 2024 2025 default: 2026 printf("%s: unknown SCSI status 0x%x\n", 2027 mly->mly_dv.dv_xname, xs->status); 2028 xs->error = XS_DRIVER_STUFFUP; 2029 break; 2030 } 2031 2032 mly_ccb_free(mly, mc); 2033 scsipi_done(xs); 2034 } 2035 2036 /* 2037 * Notify scsipi about a target's transfer mode. 2038 */ 2039 static void 2040 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm) 2041 { 2042 struct mly_btl *btl; 2043 int s; 2044 2045 btl = &mly->mly_btl[bus][xm->xm_target]; 2046 xm->xm_mode = 0; 2047 2048 s = splbio(); 2049 2050 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) { 2051 if (btl->mb_speed == 0) { 2052 xm->xm_period = 0; 2053 xm->xm_offset = 0; 2054 } else { 2055 xm->xm_period = 12; /* XXX */ 2056 xm->xm_offset = 8; /* XXX */ 2057 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */ 2058 } 2059 2060 switch (btl->mb_width) { 2061 case 32: 2062 xm->xm_mode = PERIPH_CAP_WIDE32; 2063 break; 2064 case 16: 2065 xm->xm_mode = PERIPH_CAP_WIDE16; 2066 break; 2067 default: 2068 xm->xm_mode = 0; 2069 break; 2070 } 2071 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ { 2072 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC; 2073 xm->xm_period = 12; 2074 xm->xm_offset = 8; 2075 } 2076 2077 if ((btl->mb_flags & MLY_BTL_TQING) != 0) 2078 xm->xm_mode |= PERIPH_CAP_TQING; 2079 2080 splx(s); 2081 2082 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm); 2083 } 2084 2085 /* 2086 * ioctl hook; used here only to initiate low-level rescans. 2087 */ 2088 static int 2089 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, caddr_t data, 2090 int flag, struct proc *p) 2091 { 2092 struct mly_softc *mly; 2093 int rv; 2094 2095 mly = (struct mly_softc *)chan->chan_adapter->adapt_dev; 2096 2097 switch (cmd) { 2098 case SCBUSIOLLSCAN: 2099 mly_scan_channel(mly, chan->chan_channel); 2100 rv = 0; 2101 break; 2102 default: 2103 rv = ENOTTY; 2104 break; 2105 } 2106 2107 return (rv); 2108 } 2109 2110 /* 2111 * Handshake with the firmware while the card is being initialized. 2112 */ 2113 static int 2114 mly_fwhandshake(struct mly_softc *mly) 2115 { 2116 u_int8_t error, param0, param1; 2117 int spinup; 2118 2119 spinup = 0; 2120 2121 /* Set HM_STSACK and let the firmware initialize. */ 2122 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 2123 DELAY(1000); /* too short? */ 2124 2125 /* If HM_STSACK is still true, the controller is initializing. */ 2126 if (!mly_idbr_true(mly, MLY_HM_STSACK)) 2127 return (0); 2128 2129 printf("%s: controller initialization started\n", 2130 mly->mly_dv.dv_xname); 2131 2132 /* 2133 * Spin waiting for initialization to finish, or for a message to be 2134 * delivered. 2135 */ 2136 while (mly_idbr_true(mly, MLY_HM_STSACK)) { 2137 /* Check for a message */ 2138 if (!mly_error_valid(mly)) 2139 continue; 2140 2141 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY; 2142 param0 = mly_inb(mly, mly->mly_cmd_mailbox); 2143 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1); 2144 2145 switch (error) { 2146 case MLY_MSG_SPINUP: 2147 if (!spinup) { 2148 printf("%s: drive spinup in progress\n", 2149 mly->mly_dv.dv_xname); 2150 spinup = 1; 2151 } 2152 break; 2153 2154 case MLY_MSG_RACE_RECOVERY_FAIL: 2155 printf("%s: mirror race recovery failed - \n", 2156 mly->mly_dv.dv_xname); 2157 printf("%s: one or more drives offline\n", 2158 mly->mly_dv.dv_xname); 2159 break; 2160 2161 case MLY_MSG_RACE_IN_PROGRESS: 2162 printf("%s: mirror race recovery in progress\n", 2163 mly->mly_dv.dv_xname); 2164 break; 2165 2166 case MLY_MSG_RACE_ON_CRITICAL: 2167 printf("%s: mirror race recovery on critical drive\n", 2168 mly->mly_dv.dv_xname); 2169 break; 2170 2171 case MLY_MSG_PARITY_ERROR: 2172 printf("%s: FATAL MEMORY PARITY ERROR\n", 2173 mly->mly_dv.dv_xname); 2174 return (ENXIO); 2175 2176 default: 2177 printf("%s: unknown initialization code 0x%x\n", 2178 mly->mly_dv.dv_xname, error); 2179 break; 2180 } 2181 } 2182 2183 return (0); 2184 } 2185 2186 /* 2187 * Space-fill a character string 2188 */ 2189 static void 2190 mly_padstr(char *dst, const char *src, int len) 2191 { 2192 2193 while (len-- > 0) { 2194 if (*src != '\0') 2195 *dst++ = *src++; 2196 else 2197 *dst++ = ' '; 2198 } 2199 } 2200 2201 /* 2202 * Allocate DMA safe memory. 2203 */ 2204 static int 2205 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap, 2206 caddr_t *kva, bus_addr_t *paddr, bus_dma_segment_t *seg) 2207 { 2208 int rseg, rv, state; 2209 2210 state = 0; 2211 2212 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0, 2213 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2214 printf("%s: dmamem_alloc = %d\n", mly->mly_dv.dv_xname, rv); 2215 goto bad; 2216 } 2217 2218 state++; 2219 2220 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva, 2221 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2222 printf("%s: dmamem_map = %d\n", mly->mly_dv.dv_xname, rv); 2223 goto bad; 2224 } 2225 2226 state++; 2227 2228 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0, 2229 BUS_DMA_NOWAIT, dmamap)) != 0) { 2230 printf("%s: dmamap_create = %d\n", mly->mly_dv.dv_xname, rv); 2231 goto bad; 2232 } 2233 2234 state++; 2235 2236 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size, 2237 NULL, BUS_DMA_NOWAIT)) != 0) { 2238 printf("%s: dmamap_load = %d\n", mly->mly_dv.dv_xname, rv); 2239 goto bad; 2240 } 2241 2242 *paddr = (*dmamap)->dm_segs[0].ds_addr; 2243 memset(*kva, 0, size); 2244 return (0); 2245 2246 bad: 2247 if (state > 2) 2248 bus_dmamap_destroy(mly->mly_dmat, *dmamap); 2249 if (state > 1) 2250 bus_dmamem_unmap(mly->mly_dmat, *kva, size); 2251 if (state > 0) 2252 bus_dmamem_free(mly->mly_dmat, seg, 1); 2253 2254 return (rv); 2255 } 2256 2257 /* 2258 * Free DMA safe memory. 2259 */ 2260 static void 2261 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap, 2262 caddr_t kva, bus_dma_segment_t *seg) 2263 { 2264 2265 bus_dmamap_unload(mly->mly_dmat, dmamap); 2266 bus_dmamap_destroy(mly->mly_dmat, dmamap); 2267 bus_dmamem_unmap(mly->mly_dmat, kva, size); 2268 bus_dmamem_free(mly->mly_dmat, seg, 1); 2269 } 2270 2271 2272 /* 2273 * Accept an open operation on the control device. 2274 */ 2275 int 2276 mlyopen(dev_t dev, int flag, int mode, struct lwp *l) 2277 { 2278 struct mly_softc *mly; 2279 2280 if ((mly = device_lookup(&mly_cd, minor(dev))) == NULL) 2281 return (ENXIO); 2282 if ((mly->mly_state & MLY_STATE_INITOK) == 0) 2283 return (ENXIO); 2284 if ((mly->mly_state & MLY_STATE_OPEN) != 0) 2285 return (EBUSY); 2286 2287 mly->mly_state |= MLY_STATE_OPEN; 2288 return (0); 2289 } 2290 2291 /* 2292 * Accept the last close on the control device. 2293 */ 2294 int 2295 mlyclose(dev_t dev, int flag, int mode, struct lwp *l) 2296 { 2297 struct mly_softc *mly; 2298 2299 mly = device_lookup(&mly_cd, minor(dev)); 2300 mly->mly_state &= ~MLY_STATE_OPEN; 2301 return (0); 2302 } 2303 2304 /* 2305 * Handle control operations. 2306 */ 2307 int 2308 mlyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l) 2309 { 2310 struct mly_softc *mly; 2311 int rv; 2312 2313 if (securelevel >= 2) 2314 return (EPERM); 2315 2316 mly = device_lookup(&mly_cd, minor(dev)); 2317 2318 switch (cmd) { 2319 case MLYIO_COMMAND: 2320 rv = mly_user_command(mly, (void *)data); 2321 break; 2322 case MLYIO_HEALTH: 2323 rv = mly_user_health(mly, (void *)data); 2324 break; 2325 default: 2326 rv = ENOTTY; 2327 break; 2328 } 2329 2330 return (rv); 2331 } 2332 2333 /* 2334 * Execute a command passed in from userspace. 2335 * 2336 * The control structure contains the actual command for the controller, as 2337 * well as the user-space data pointer and data size, and an optional sense 2338 * buffer size/pointer. On completion, the data size is adjusted to the 2339 * command residual, and the sense buffer size to the size of the returned 2340 * sense data. 2341 */ 2342 static int 2343 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc) 2344 { 2345 struct mly_ccb *mc; 2346 int rv, mapped; 2347 2348 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 2349 return (rv); 2350 2351 mapped = 0; 2352 mc->mc_data = NULL; 2353 2354 /* 2355 * Handle data size/direction. 2356 */ 2357 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) { 2358 if (mc->mc_length > MAXPHYS) { 2359 rv = EINVAL; 2360 goto out; 2361 } 2362 2363 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK); 2364 if (mc->mc_data == NULL) { 2365 rv = ENOMEM; 2366 goto out; 2367 } 2368 2369 if (uc->DataTransferLength > 0) { 2370 mc->mc_flags |= MLY_CCB_DATAIN; 2371 memset(mc->mc_data, 0, mc->mc_length); 2372 } 2373 2374 if (uc->DataTransferLength < 0) { 2375 mc->mc_flags |= MLY_CCB_DATAOUT; 2376 rv = copyin(uc->DataTransferBuffer, mc->mc_data, 2377 mc->mc_length); 2378 if (rv != 0) 2379 goto out; 2380 } 2381 2382 if ((rv = mly_ccb_map(mly, mc)) != 0) 2383 goto out; 2384 mapped = 1; 2385 } 2386 2387 /* Copy in the command and execute it. */ 2388 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox)); 2389 2390 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0) 2391 goto out; 2392 2393 /* Return the data to userspace. */ 2394 if (uc->DataTransferLength > 0) { 2395 rv = copyout(mc->mc_data, uc->DataTransferBuffer, 2396 mc->mc_length); 2397 if (rv != 0) 2398 goto out; 2399 } 2400 2401 /* Return the sense buffer to userspace. */ 2402 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) { 2403 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer, 2404 min(uc->RequestSenseLength, mc->mc_sense)); 2405 if (rv != 0) 2406 goto out; 2407 } 2408 2409 /* Return command results to userspace (caller will copy out). */ 2410 uc->DataTransferLength = mc->mc_resid; 2411 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 2412 uc->CommandStatus = mc->mc_status; 2413 rv = 0; 2414 2415 out: 2416 if (mapped) 2417 mly_ccb_unmap(mly, mc); 2418 if (mc->mc_data != NULL) 2419 free(mc->mc_data, M_DEVBUF); 2420 mly_ccb_free(mly, mc); 2421 2422 return (rv); 2423 } 2424 2425 /* 2426 * Return health status to userspace. If the health change index in the 2427 * user structure does not match that currently exported by the controller, 2428 * we return the current status immediately. Otherwise, we block until 2429 * either interrupted or new status is delivered. 2430 */ 2431 static int 2432 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh) 2433 { 2434 struct mly_health_status mh; 2435 int rv, s; 2436 2437 /* Fetch the current health status from userspace. */ 2438 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh)); 2439 if (rv != 0) 2440 return (rv); 2441 2442 /* spin waiting for a status update */ 2443 s = splbio(); 2444 if (mly->mly_event_change == mh.change_counter) 2445 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH, 2446 "mlyhealth", 0); 2447 splx(s); 2448 2449 if (rv == 0) { 2450 /* 2451 * Copy the controller's health status buffer out (there is 2452 * a race here if it changes again). 2453 */ 2454 rv = copyout(&mly->mly_mmbox->mmm_health.status, 2455 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); 2456 } 2457 2458 return (rv); 2459 } 2460