1 /* $NetBSD: mly.c,v 1.46 2013/10/17 21:06:15 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 2000, 2001 Michael Smith 34 * Copyright (c) 2000 BSDi 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp 59 */ 60 61 /* 62 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware. 63 * 64 * TODO: 65 * 66 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ. 67 * o Handle FC and multiple LUNs. 68 * o Fix mmbox usage. 69 * o Fix transfer speed fudge. 70 */ 71 72 #include <sys/cdefs.h> 73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.46 2013/10/17 21:06:15 christos Exp $"); 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/device.h> 78 #include <sys/kernel.h> 79 #include <sys/queue.h> 80 #include <sys/buf.h> 81 #include <sys/endian.h> 82 #include <sys/conf.h> 83 #include <sys/malloc.h> 84 #include <sys/ioctl.h> 85 #include <sys/scsiio.h> 86 #include <sys/kthread.h> 87 #include <sys/kauth.h> 88 89 #include <sys/bus.h> 90 91 #include <dev/scsipi/scsi_all.h> 92 #include <dev/scsipi/scsipi_all.h> 93 #include <dev/scsipi/scsiconf.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #include <dev/pci/mlyreg.h> 100 #include <dev/pci/mlyio.h> 101 #include <dev/pci/mlyvar.h> 102 #include <dev/pci/mly_tables.h> 103 104 static void mly_attach(device_t, device_t, void *); 105 static int mly_match(device_t, cfdata_t, void *); 106 static const struct mly_ident *mly_find_ident(struct pci_attach_args *); 107 static int mly_fwhandshake(struct mly_softc *); 108 static int mly_flush(struct mly_softc *); 109 static int mly_intr(void *); 110 static void mly_shutdown(void *); 111 112 static int mly_alloc_ccbs(struct mly_softc *); 113 static void mly_check_event(struct mly_softc *); 114 static void mly_complete_event(struct mly_softc *, struct mly_ccb *); 115 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *); 116 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *, 117 void **, bus_addr_t *, bus_dma_segment_t *); 118 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t, 119 void *, bus_dma_segment_t *); 120 static int mly_enable_mmbox(struct mly_softc *); 121 static void mly_fetch_event(struct mly_softc *); 122 static int mly_get_controllerinfo(struct mly_softc *); 123 static int mly_get_eventstatus(struct mly_softc *); 124 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *, 125 void **, size_t, void *, size_t *); 126 static void mly_padstr(char *, const char *, int); 127 static void mly_process_event(struct mly_softc *, struct mly_event *); 128 static void mly_release_ccbs(struct mly_softc *); 129 static int mly_scan_btl(struct mly_softc *, int, int); 130 static void mly_scan_channel(struct mly_softc *, int); 131 static void mly_thread(void *); 132 133 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **); 134 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *); 135 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *); 136 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *); 137 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *); 138 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int); 139 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *); 140 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *); 141 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int); 142 143 static void mly_get_xfer_mode(struct mly_softc *, int, 144 struct scsipi_xfer_mode *); 145 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *); 146 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *, 147 int, struct proc *); 148 static void mly_scsipi_minphys(struct buf *); 149 static void mly_scsipi_request(struct scsipi_channel *, 150 scsipi_adapter_req_t, void *); 151 152 static int mly_user_command(struct mly_softc *, struct mly_user_command *); 153 static int mly_user_health(struct mly_softc *, struct mly_user_health *); 154 155 extern struct cfdriver mly_cd; 156 157 CFATTACH_DECL_NEW(mly, sizeof(struct mly_softc), 158 mly_match, mly_attach, NULL, NULL); 159 160 dev_type_open(mlyopen); 161 dev_type_close(mlyclose); 162 dev_type_ioctl(mlyioctl); 163 164 const struct cdevsw mly_cdevsw = { 165 mlyopen, mlyclose, noread, nowrite, mlyioctl, 166 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER, 167 }; 168 169 static struct mly_ident { 170 u_short vendor; 171 u_short product; 172 u_short subvendor; 173 u_short subproduct; 174 int hwif; 175 const char *desc; 176 } const mly_ident[] = { 177 { 178 PCI_VENDOR_MYLEX, 179 PCI_PRODUCT_MYLEX_EXTREMERAID, 180 PCI_VENDOR_MYLEX, 181 0x0040, 182 MLY_HWIF_STRONGARM, 183 "eXtremeRAID 2000" 184 }, 185 { 186 PCI_VENDOR_MYLEX, 187 PCI_PRODUCT_MYLEX_EXTREMERAID, 188 PCI_VENDOR_MYLEX, 189 0x0030, 190 MLY_HWIF_STRONGARM, 191 "eXtremeRAID 3000" 192 }, 193 { 194 PCI_VENDOR_MYLEX, 195 PCI_PRODUCT_MYLEX_ACCELERAID, 196 PCI_VENDOR_MYLEX, 197 0x0050, 198 MLY_HWIF_I960RX, 199 "AcceleRAID 352" 200 }, 201 { 202 PCI_VENDOR_MYLEX, 203 PCI_PRODUCT_MYLEX_ACCELERAID, 204 PCI_VENDOR_MYLEX, 205 0x0052, 206 MLY_HWIF_I960RX, 207 "AcceleRAID 170" 208 }, 209 { 210 PCI_VENDOR_MYLEX, 211 PCI_PRODUCT_MYLEX_ACCELERAID, 212 PCI_VENDOR_MYLEX, 213 0x0054, 214 MLY_HWIF_I960RX, 215 "AcceleRAID 160" 216 }, 217 }; 218 219 static void *mly_sdh; 220 221 /* 222 * Try to find a `mly_ident' entry corresponding to this board. 223 */ 224 static const struct mly_ident * 225 mly_find_ident(struct pci_attach_args *pa) 226 { 227 const struct mly_ident *mpi, *maxmpi; 228 pcireg_t reg; 229 230 mpi = mly_ident; 231 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]); 232 233 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 234 return (NULL); 235 236 for (; mpi < maxmpi; mpi++) { 237 if (PCI_VENDOR(pa->pa_id) != mpi->vendor || 238 PCI_PRODUCT(pa->pa_id) != mpi->product) 239 continue; 240 241 if (mpi->subvendor == 0x0000) 242 return (mpi); 243 244 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 245 246 if (PCI_VENDOR(reg) == mpi->subvendor && 247 PCI_PRODUCT(reg) == mpi->subproduct) 248 return (mpi); 249 } 250 251 return (NULL); 252 } 253 254 /* 255 * Match a supported board. 256 */ 257 static int 258 mly_match(device_t parent, cfdata_t cfdata, void *aux) 259 { 260 261 return (mly_find_ident(aux) != NULL); 262 } 263 264 /* 265 * Attach a supported board. 266 */ 267 static void 268 mly_attach(device_t parent, device_t self, void *aux) 269 { 270 struct pci_attach_args *pa; 271 struct mly_softc *mly; 272 struct mly_ioctl_getcontrollerinfo *mi; 273 const struct mly_ident *ident; 274 pci_chipset_tag_t pc; 275 pci_intr_handle_t ih; 276 bus_space_handle_t memh, ioh; 277 bus_space_tag_t memt, iot; 278 pcireg_t reg; 279 const char *intrstr; 280 int ior, memr, i, rv, state; 281 struct scsipi_adapter *adapt; 282 struct scsipi_channel *chan; 283 284 mly = device_private(self); 285 mly->mly_dv = self; 286 pa = aux; 287 pc = pa->pa_pc; 288 ident = mly_find_ident(pa); 289 state = 0; 290 291 mly->mly_dmat = pa->pa_dmat; 292 mly->mly_hwif = ident->hwif; 293 294 printf(": Mylex %s\n", ident->desc); 295 296 /* 297 * Map the PCI register window. 298 */ 299 memr = -1; 300 ior = -1; 301 302 for (i = 0x10; i <= 0x14; i += 4) { 303 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 304 305 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 306 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0) 307 ior = i; 308 } else { 309 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0) 310 memr = i; 311 } 312 } 313 314 if (memr != -1) 315 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0, 316 &memt, &memh, NULL, NULL)) 317 memr = -1; 318 if (ior != -1) 319 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0, 320 &iot, &ioh, NULL, NULL)) 321 ior = -1; 322 323 if (memr != -1) { 324 mly->mly_iot = memt; 325 mly->mly_ioh = memh; 326 } else if (ior != -1) { 327 mly->mly_iot = iot; 328 mly->mly_ioh = ioh; 329 } else { 330 aprint_error_dev(self, "can't map i/o or memory space\n"); 331 return; 332 } 333 334 /* 335 * Enable the device. 336 */ 337 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 338 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 339 reg | PCI_COMMAND_MASTER_ENABLE); 340 341 /* 342 * Map and establish the interrupt. 343 */ 344 if (pci_intr_map(pa, &ih)) { 345 aprint_error_dev(self, "can't map interrupt\n"); 346 return; 347 } 348 intrstr = pci_intr_string(pc, ih); 349 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly); 350 if (mly->mly_ih == NULL) { 351 aprint_error_dev(self, "can't establish interrupt"); 352 if (intrstr != NULL) 353 aprint_error(" at %s", intrstr); 354 aprint_error("\n"); 355 return; 356 } 357 358 if (intrstr != NULL) 359 aprint_normal_dev(self, "interrupting at %s\n", 360 intrstr); 361 362 /* 363 * Take care of interface-specific tasks. 364 */ 365 switch (mly->mly_hwif) { 366 case MLY_HWIF_I960RX: 367 mly->mly_doorbell_true = 0x00; 368 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX; 369 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; 370 mly->mly_idbr = MLY_I960RX_IDBR; 371 mly->mly_odbr = MLY_I960RX_ODBR; 372 mly->mly_error_status = MLY_I960RX_ERROR_STATUS; 373 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; 374 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; 375 break; 376 377 case MLY_HWIF_STRONGARM: 378 mly->mly_doorbell_true = 0xff; 379 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; 380 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; 381 mly->mly_idbr = MLY_STRONGARM_IDBR; 382 mly->mly_odbr = MLY_STRONGARM_ODBR; 383 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS; 384 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; 385 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; 386 break; 387 } 388 389 /* 390 * Allocate and map the scatter/gather lists. 391 */ 392 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 393 &mly->mly_sg_dmamap, (void **)&mly->mly_sg, 394 &mly->mly_sg_busaddr, &mly->mly_sg_seg); 395 if (rv) { 396 printf("%s: unable to allocate S/G maps\n", 397 device_xname(self)); 398 goto bad; 399 } 400 state++; 401 402 /* 403 * Allocate and map the memory mailbox. 404 */ 405 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox), 406 &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox, 407 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg); 408 if (rv) { 409 aprint_error_dev(self, "unable to allocate mailboxes\n"); 410 goto bad; 411 } 412 state++; 413 414 /* 415 * Initialise per-controller queues. 416 */ 417 SLIST_INIT(&mly->mly_ccb_free); 418 SIMPLEQ_INIT(&mly->mly_ccb_queue); 419 420 /* 421 * Disable interrupts before we start talking to the controller. 422 */ 423 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE); 424 425 /* 426 * Wait for the controller to come ready, handshaking with the 427 * firmware if required. This is typically only necessary on 428 * platforms where the controller BIOS does not run. 429 */ 430 if (mly_fwhandshake(mly)) { 431 aprint_error_dev(self, "unable to bring controller online\n"); 432 goto bad; 433 } 434 435 /* 436 * Allocate initial command buffers, obtain controller feature 437 * information, and then reallocate command buffers, since we'll 438 * know how many we want. 439 */ 440 if (mly_alloc_ccbs(mly)) { 441 aprint_error_dev(self, "unable to allocate CCBs\n"); 442 goto bad; 443 } 444 state++; 445 if (mly_get_controllerinfo(mly)) { 446 aprint_error_dev(self, "unable to retrieve controller info\n"); 447 goto bad; 448 } 449 mly_release_ccbs(mly); 450 if (mly_alloc_ccbs(mly)) { 451 aprint_error_dev(self, "unable to allocate CCBs\n"); 452 state--; 453 goto bad; 454 } 455 456 /* 457 * Get the current event counter for health purposes, populate the 458 * initial health status buffer. 459 */ 460 if (mly_get_eventstatus(mly)) { 461 aprint_error_dev(self, "unable to retrieve event status\n"); 462 goto bad; 463 } 464 465 /* 466 * Enable memory-mailbox mode. 467 */ 468 if (mly_enable_mmbox(mly)) { 469 aprint_error_dev(self, "unable to enable memory mailbox\n"); 470 goto bad; 471 } 472 473 /* 474 * Print a little information about the controller. 475 */ 476 mi = mly->mly_controllerinfo; 477 478 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d " 479 "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(self), 480 mi->physical_channels_present, 481 (mi->physical_channels_present) > 1 ? "s" : "", 482 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, 483 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 484 le16toh(mi->memory_size)); 485 486 /* 487 * Register our `shutdownhook'. 488 */ 489 if (mly_sdh == NULL) 490 shutdownhook_establish(mly_shutdown, NULL); 491 492 /* 493 * Clear any previous BTL information. For each bus that scsipi 494 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve 495 * all BTL info at that point. 496 */ 497 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl)); 498 499 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present + 500 mly->mly_controllerinfo->virtual_channels_present; 501 502 /* 503 * Attach to scsipi. 504 */ 505 adapt = &mly->mly_adapt; 506 memset(adapt, 0, sizeof(*adapt)); 507 adapt->adapt_dev = self; 508 adapt->adapt_nchannels = mly->mly_nchans; 509 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV; 510 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV; 511 adapt->adapt_request = mly_scsipi_request; 512 adapt->adapt_minphys = mly_scsipi_minphys; 513 adapt->adapt_ioctl = mly_scsipi_ioctl; 514 515 for (i = 0; i < mly->mly_nchans; i++) { 516 chan = &mly->mly_chans[i]; 517 memset(chan, 0, sizeof(*chan)); 518 chan->chan_adapter = adapt; 519 chan->chan_bustype = &scsi_bustype; 520 chan->chan_channel = i; 521 chan->chan_ntargets = MLY_MAX_TARGETS; 522 chan->chan_nluns = MLY_MAX_LUNS; 523 chan->chan_id = mly->mly_controllerparam->initiator_id; 524 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 525 config_found(self, chan, scsiprint); 526 } 527 528 /* 529 * Now enable interrupts... 530 */ 531 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE); 532 533 /* 534 * Finally, create our monitoring thread. 535 */ 536 mly->mly_state |= MLY_STATE_INITOK; 537 rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly, 538 &mly->mly_thread, "%s", device_xname(self)); 539 if (rv != 0) 540 aprint_error_dev(self, "unable to create thread (%d)\n", 541 rv); 542 return; 543 544 bad: 545 if (state > 2) 546 mly_release_ccbs(mly); 547 if (state > 1) 548 mly_dmamem_free(mly, sizeof(struct mly_mmbox), 549 mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox, 550 &mly->mly_mmbox_seg); 551 if (state > 0) 552 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 553 mly->mly_sg_dmamap, (void *)mly->mly_sg, 554 &mly->mly_sg_seg); 555 } 556 557 /* 558 * Scan all possible devices on the specified channel. 559 */ 560 static void 561 mly_scan_channel(struct mly_softc *mly, int bus) 562 { 563 int s, target; 564 565 for (target = 0; target < MLY_MAX_TARGETS; target++) { 566 s = splbio(); 567 if (!mly_scan_btl(mly, bus, target)) { 568 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan", 569 0); 570 } 571 splx(s); 572 } 573 } 574 575 /* 576 * Shut down all configured `mly' devices. 577 */ 578 static void 579 mly_shutdown(void *cookie) 580 { 581 struct mly_softc *mly; 582 int i; 583 584 for (i = 0; i < mly_cd.cd_ndevs; i++) { 585 if ((mly = device_lookup_private(&mly_cd, i)) == NULL) 586 continue; 587 588 if (mly_flush(mly)) 589 aprint_error_dev(mly->mly_dv, "unable to flush cache\n"); 590 } 591 } 592 593 /* 594 * Fill in the mly_controllerinfo and mly_controllerparam fields in the 595 * softc. 596 */ 597 static int 598 mly_get_controllerinfo(struct mly_softc *mly) 599 { 600 struct mly_cmd_ioctl mci; 601 int rv; 602 603 /* 604 * Build the getcontrollerinfo ioctl and send it. 605 */ 606 memset(&mci, 0, sizeof(mci)); 607 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 608 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo, 609 sizeof(*mly->mly_controllerinfo), NULL, NULL); 610 if (rv != 0) 611 return (rv); 612 613 /* 614 * Build the getcontrollerparameter ioctl and send it. 615 */ 616 memset(&mci, 0, sizeof(mci)); 617 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 618 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam, 619 sizeof(*mly->mly_controllerparam), NULL, NULL); 620 621 return (rv); 622 } 623 624 /* 625 * Rescan a device, possibly as a consequence of getting an event which 626 * suggests that it may have changed. Must be called with interrupts 627 * blocked. 628 */ 629 static int 630 mly_scan_btl(struct mly_softc *mly, int bus, int target) 631 { 632 struct mly_ccb *mc; 633 struct mly_cmd_ioctl *mci; 634 int rv; 635 636 if (target == mly->mly_controllerparam->initiator_id) { 637 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED; 638 return (EIO); 639 } 640 641 /* Don't re-scan if a scan is already in progress. */ 642 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0) 643 return (EBUSY); 644 645 /* Get a command. */ 646 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 647 return (rv); 648 649 /* Set up the data buffer. */ 650 mc->mc_data = malloc(sizeof(union mly_devinfo), 651 M_DEVBUF, M_NOWAIT|M_ZERO); 652 653 mc->mc_flags |= MLY_CCB_DATAIN; 654 mc->mc_complete = mly_complete_rescan; 655 656 /* 657 * Build the ioctl. 658 */ 659 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 660 mci->opcode = MDACMD_IOCTL; 661 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 662 memset(&mci->param, 0, sizeof(mci->param)); 663 664 if (MLY_BUS_IS_VIRTUAL(mly, bus)) { 665 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid); 666 mci->data_size = htole32(mc->mc_length); 667 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 668 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)), 669 mci->addr); 670 } else { 671 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid); 672 mci->data_size = htole32(mc->mc_length); 673 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 674 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr); 675 } 676 677 /* 678 * Dispatch the command. 679 */ 680 if ((rv = mly_ccb_map(mly, mc)) != 0) { 681 free(mc->mc_data, M_DEVBUF); 682 mly_ccb_free(mly, mc); 683 return(rv); 684 } 685 686 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING; 687 mly_ccb_enqueue(mly, mc); 688 return (0); 689 } 690 691 /* 692 * Handle the completion of a rescan operation. 693 */ 694 static void 695 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc) 696 { 697 struct mly_ioctl_getlogdevinfovalid *ldi; 698 struct mly_ioctl_getphysdevinfovalid *pdi; 699 struct mly_cmd_ioctl *mci; 700 struct mly_btl btl, *btlp; 701 struct scsipi_xfer_mode xm; 702 int bus, target, rescan; 703 u_int tmp; 704 705 mly_ccb_unmap(mly, mc); 706 707 /* 708 * Recover the bus and target from the command. We need these even 709 * in the case where we don't have a useful response. 710 */ 711 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 712 tmp = _3ltol(mci->addr); 713 rescan = 0; 714 715 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { 716 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp)); 717 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp)); 718 } else { 719 bus = MLY_PHYADDR_CHANNEL(tmp); 720 target = MLY_PHYADDR_TARGET(tmp); 721 } 722 723 btlp = &mly->mly_btl[bus][target]; 724 725 /* The default result is 'no device'. */ 726 memset(&btl, 0, sizeof(btl)); 727 btl.mb_flags = MLY_BTL_PROTECTED; 728 729 /* If the rescan completed OK, we have possibly-new BTL data. */ 730 if (mc->mc_status != 0) 731 goto out; 732 733 if (mc->mc_length == sizeof(*ldi)) { 734 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 735 tmp = le32toh(ldi->logical_device_number); 736 737 if (MLY_LOGDEV_BUS(mly, tmp) != bus || 738 MLY_LOGDEV_TARGET(mly, tmp) != target) { 739 #ifdef MLYDEBUG 740 printf("%s: WARNING: BTL rescan (logical) for %d:%d " 741 "returned data for %d:%d instead\n", 742 device_xname(mly->mly_dv), bus, target, 743 MLY_LOGDEV_BUS(mly, tmp), 744 MLY_LOGDEV_TARGET(mly, tmp)); 745 #endif 746 goto out; 747 } 748 749 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING; 750 btl.mb_type = ldi->raid_level; 751 btl.mb_state = ldi->state; 752 } else if (mc->mc_length == sizeof(*pdi)) { 753 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 754 755 if (pdi->channel != bus || pdi->target != target) { 756 #ifdef MLYDEBUG 757 printf("%s: WARNING: BTL rescan (physical) for %d:%d " 758 " returned data for %d:%d instead\n", 759 device_xname(mly->mly_dv), 760 bus, target, pdi->channel, pdi->target); 761 #endif 762 goto out; 763 } 764 765 btl.mb_flags = MLY_BTL_PHYSICAL; 766 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; 767 btl.mb_state = pdi->state; 768 btl.mb_speed = pdi->speed; 769 btl.mb_width = pdi->width; 770 771 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 772 btl.mb_flags |= MLY_BTL_PROTECTED; 773 if (pdi->command_tags != 0) 774 btl.mb_flags |= MLY_BTL_TQING; 775 } else { 776 printf("%s: BTL rescan result invalid\n", device_xname(mly->mly_dv)); 777 goto out; 778 } 779 780 /* Decide whether we need to rescan the device. */ 781 if (btl.mb_flags != btlp->mb_flags || 782 btl.mb_speed != btlp->mb_speed || 783 btl.mb_width != btlp->mb_width) 784 rescan = 1; 785 786 out: 787 *btlp = btl; 788 789 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) { 790 xm.xm_target = target; 791 mly_get_xfer_mode(mly, bus, &xm); 792 /* XXX SCSI mid-layer rescan goes here. */ 793 } 794 795 /* Wake anybody waiting on the device to be rescanned. */ 796 wakeup(btlp); 797 798 free(mc->mc_data, M_DEVBUF); 799 mly_ccb_free(mly, mc); 800 } 801 802 /* 803 * Get the current health status and set the 'next event' counter to suit. 804 */ 805 static int 806 mly_get_eventstatus(struct mly_softc *mly) 807 { 808 struct mly_cmd_ioctl mci; 809 struct mly_health_status *mh; 810 int rv; 811 812 /* Build the gethealthstatus ioctl and send it. */ 813 memset(&mci, 0, sizeof(mci)); 814 mh = NULL; 815 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 816 817 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL); 818 if (rv) 819 return (rv); 820 821 /* Get the event counter. */ 822 mly->mly_event_change = le32toh(mh->change_counter); 823 mly->mly_event_waiting = le32toh(mh->next_event); 824 mly->mly_event_counter = le32toh(mh->next_event); 825 826 /* Save the health status into the memory mailbox */ 827 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh)); 828 829 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 830 offsetof(struct mly_mmbox, mmm_health), 831 sizeof(mly->mly_mmbox->mmm_health), 832 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 833 834 free(mh, M_DEVBUF); 835 return (0); 836 } 837 838 /* 839 * Enable memory mailbox mode. 840 */ 841 static int 842 mly_enable_mmbox(struct mly_softc *mly) 843 { 844 struct mly_cmd_ioctl mci; 845 u_int8_t *sp; 846 u_int64_t tmp; 847 int rv; 848 849 /* Build the ioctl and send it. */ 850 memset(&mci, 0, sizeof(mci)); 851 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 852 853 /* Set buffer addresses. */ 854 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 855 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp); 856 857 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 858 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp); 859 860 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 861 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp); 862 863 /* Set buffer sizes - abuse of data_size field is revolting. */ 864 sp = (u_int8_t *)&mci.data_size; 865 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10; 866 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10; 867 mci.param.setmemorymailbox.health_buffer_size = 868 sizeof(union mly_health_region) >> 10; 869 870 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL); 871 if (rv) 872 return (rv); 873 874 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE; 875 return (0); 876 } 877 878 /* 879 * Flush all pending I/O from the controller. 880 */ 881 static int 882 mly_flush(struct mly_softc *mly) 883 { 884 struct mly_cmd_ioctl mci; 885 886 /* Build the ioctl */ 887 memset(&mci, 0, sizeof(mci)); 888 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 889 mci.param.deviceoperation.operation_device = 890 MLY_OPDEVICE_PHYSICAL_CONTROLLER; 891 892 /* Pass it off to the controller */ 893 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL)); 894 } 895 896 /* 897 * Perform an ioctl command. 898 * 899 * If (data) is not NULL, the command requires data transfer to the 900 * controller. If (*data) is NULL the command requires data transfer from 901 * the controller, and we will allocate a buffer for it. 902 */ 903 static int 904 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data, 905 size_t datasize, void *sense_buffer, 906 size_t *sense_length) 907 { 908 struct mly_ccb *mc; 909 struct mly_cmd_ioctl *mci; 910 u_int8_t status; 911 int rv; 912 913 mc = NULL; 914 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 915 goto bad; 916 917 /* 918 * Copy the ioctl structure, but save some important fields and then 919 * fixup. 920 */ 921 mci = &mc->mc_packet->ioctl; 922 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address); 923 ioctl->maximum_sense_size = mci->maximum_sense_size; 924 *mci = *ioctl; 925 mci->opcode = MDACMD_IOCTL; 926 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 927 928 /* Handle the data buffer. */ 929 if (data != NULL) { 930 if (*data == NULL) { 931 /* Allocate data buffer */ 932 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT); 933 mc->mc_flags |= MLY_CCB_DATAIN; 934 } else { 935 mc->mc_data = *data; 936 mc->mc_flags |= MLY_CCB_DATAOUT; 937 } 938 mc->mc_length = datasize; 939 mc->mc_packet->generic.data_size = htole32(datasize); 940 } 941 942 /* Run the command. */ 943 if (datasize > 0) 944 if ((rv = mly_ccb_map(mly, mc)) != 0) 945 goto bad; 946 rv = mly_ccb_poll(mly, mc, 30000); 947 if (datasize > 0) 948 mly_ccb_unmap(mly, mc); 949 if (rv != 0) 950 goto bad; 951 952 /* Clean up and return any data. */ 953 status = mc->mc_status; 954 955 if (status != 0) 956 printf("mly_ioctl: command status %d\n", status); 957 958 if (mc->mc_sense > 0 && sense_buffer != NULL) { 959 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense); 960 *sense_length = mc->mc_sense; 961 goto bad; 962 } 963 964 /* Should we return a data pointer? */ 965 if (data != NULL && *data == NULL) 966 *data = mc->mc_data; 967 968 /* Command completed OK. */ 969 rv = (status != 0 ? EIO : 0); 970 971 bad: 972 if (mc != NULL) { 973 /* Do we need to free a data buffer we allocated? */ 974 if (rv != 0 && mc->mc_data != NULL && 975 (data == NULL || *data == NULL)) 976 free(mc->mc_data, M_DEVBUF); 977 mly_ccb_free(mly, mc); 978 } 979 980 return (rv); 981 } 982 983 /* 984 * Check for event(s) outstanding in the controller. 985 */ 986 static void 987 mly_check_event(struct mly_softc *mly) 988 { 989 990 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 991 offsetof(struct mly_mmbox, mmm_health), 992 sizeof(mly->mly_mmbox->mmm_health), 993 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 994 995 /* 996 * The controller may have updated the health status information, so 997 * check for it here. Note that the counters are all in host 998 * memory, so this check is very cheap. Also note that we depend on 999 * checking on completion 1000 */ 1001 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) != 1002 mly->mly_event_change) { 1003 mly->mly_event_change = 1004 le32toh(mly->mly_mmbox->mmm_health.status.change_counter); 1005 mly->mly_event_waiting = 1006 le32toh(mly->mly_mmbox->mmm_health.status.next_event); 1007 1008 /* Wake up anyone that might be interested in this. */ 1009 wakeup(&mly->mly_event_change); 1010 } 1011 1012 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1013 offsetof(struct mly_mmbox, mmm_health), 1014 sizeof(mly->mly_mmbox->mmm_health), 1015 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1016 1017 if (mly->mly_event_counter != mly->mly_event_waiting) 1018 mly_fetch_event(mly); 1019 } 1020 1021 /* 1022 * Fetch one event from the controller. If we fail due to resource 1023 * starvation, we'll be retried the next time a command completes. 1024 */ 1025 static void 1026 mly_fetch_event(struct mly_softc *mly) 1027 { 1028 struct mly_ccb *mc; 1029 struct mly_cmd_ioctl *mci; 1030 int s; 1031 u_int32_t event; 1032 1033 /* Get a command. */ 1034 if (mly_ccb_alloc(mly, &mc)) 1035 return; 1036 1037 /* Set up the data buffer. */ 1038 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, 1039 M_NOWAIT|M_ZERO); 1040 1041 mc->mc_length = sizeof(struct mly_event); 1042 mc->mc_flags |= MLY_CCB_DATAIN; 1043 mc->mc_complete = mly_complete_event; 1044 1045 /* 1046 * Get an event number to fetch. It's possible that we've raced 1047 * with another context for the last event, in which case there will 1048 * be no more events. 1049 */ 1050 s = splbio(); 1051 if (mly->mly_event_counter == mly->mly_event_waiting) { 1052 splx(s); 1053 free(mc->mc_data, M_DEVBUF); 1054 mly_ccb_free(mly, mc); 1055 return; 1056 } 1057 event = mly->mly_event_counter++; 1058 splx(s); 1059 1060 /* 1061 * Build the ioctl. 1062 * 1063 * At this point we are committed to sending this request, as it 1064 * will be the only one constructed for this particular event 1065 * number. 1066 */ 1067 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 1068 mci->opcode = MDACMD_IOCTL; 1069 mci->data_size = htole32(sizeof(struct mly_event)); 1070 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff), 1071 mci->addr); 1072 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 1073 mci->sub_ioctl = MDACIOCTL_GETEVENT; 1074 mci->param.getevent.sequence_number_low = htole16(event & 0xffff); 1075 1076 /* 1077 * Submit the command. 1078 */ 1079 if (mly_ccb_map(mly, mc) != 0) 1080 goto bad; 1081 mly_ccb_enqueue(mly, mc); 1082 return; 1083 1084 bad: 1085 printf("%s: couldn't fetch event %u\n", device_xname(mly->mly_dv), event); 1086 free(mc->mc_data, M_DEVBUF); 1087 mly_ccb_free(mly, mc); 1088 } 1089 1090 /* 1091 * Handle the completion of an event poll. 1092 */ 1093 static void 1094 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc) 1095 { 1096 struct mly_event *me; 1097 1098 me = (struct mly_event *)mc->mc_data; 1099 mly_ccb_unmap(mly, mc); 1100 mly_ccb_free(mly, mc); 1101 1102 /* If the event was successfully fetched, process it. */ 1103 if (mc->mc_status == SCSI_OK) 1104 mly_process_event(mly, me); 1105 else 1106 aprint_error_dev(mly->mly_dv, "unable to fetch event; status = 0x%x\n", 1107 mc->mc_status); 1108 1109 free(me, M_DEVBUF); 1110 1111 /* Check for another event. */ 1112 mly_check_event(mly); 1113 } 1114 1115 /* 1116 * Process a controller event. Called with interrupts blocked (i.e., at 1117 * interrupt time). 1118 */ 1119 static void 1120 mly_process_event(struct mly_softc *mly, struct mly_event *me) 1121 { 1122 struct scsi_sense_data *ssd; 1123 int bus, target, event, class, action; 1124 const char *fp, *tp; 1125 1126 ssd = (struct scsi_sense_data *)&me->sense[0]; 1127 1128 /* 1129 * Errors can be reported using vendor-unique sense data. In this 1130 * case, the event code will be 0x1c (Request sense data present), 1131 * the sense key will be 0x09 (vendor specific), the MSB of the ASC 1132 * will be set, and the actual event code will be a 16-bit value 1133 * comprised of the ASCQ (low byte) and low seven bits of the ASC 1134 * (low seven bits of the high byte). 1135 */ 1136 if (le32toh(me->code) == 0x1c && 1137 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC && 1138 (ssd->asc & 0x80) != 0) { 1139 event = ((int)(ssd->asc & ~0x80) << 8) + 1140 ssd->ascq; 1141 } else 1142 event = le32toh(me->code); 1143 1144 /* Look up event, get codes. */ 1145 fp = mly_describe_code(mly_table_event, event); 1146 1147 /* Quiet event? */ 1148 class = fp[0]; 1149 #ifdef notyet 1150 if (isupper(class) && bootverbose) 1151 class = tolower(class); 1152 #endif 1153 1154 /* Get action code, text string. */ 1155 action = fp[1]; 1156 tp = fp + 3; 1157 1158 /* 1159 * Print some information about the event. 1160 * 1161 * This code uses a table derived from the corresponding portion of 1162 * the Linux driver, and thus the parser is very similar. 1163 */ 1164 switch (class) { 1165 case 'p': 1166 /* 1167 * Error on physical drive. 1168 */ 1169 printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv), 1170 me->channel, me->target, tp); 1171 if (action == 'r') 1172 mly->mly_btl[me->channel][me->target].mb_flags |= 1173 MLY_BTL_RESCAN; 1174 break; 1175 1176 case 'l': 1177 case 'm': 1178 /* 1179 * Error on logical unit, or message about logical unit. 1180 */ 1181 bus = MLY_LOGDEV_BUS(mly, me->lun); 1182 target = MLY_LOGDEV_TARGET(mly, me->lun); 1183 printf("%s: logical device %d:%d %s\n", device_xname(mly->mly_dv), 1184 bus, target, tp); 1185 if (action == 'r') 1186 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 1187 break; 1188 1189 case 's': 1190 /* 1191 * Report of sense data. 1192 */ 1193 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE || 1194 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) && 1195 ssd->asc == 0x04 && 1196 (ssd->ascq == 0x01 || 1197 ssd->ascq == 0x02)) { 1198 /* Ignore NO_SENSE or NOT_READY in one case */ 1199 break; 1200 } 1201 1202 /* 1203 * XXX Should translate this if SCSIVERBOSE. 1204 */ 1205 printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv), 1206 me->channel, me->target, tp); 1207 printf("%s: sense key %d asc %02x ascq %02x\n", 1208 device_xname(mly->mly_dv), SSD_SENSE_KEY(ssd->flags), 1209 ssd->asc, ssd->ascq); 1210 printf("%s: info %x%x%x%x csi %x%x%x%x\n", 1211 device_xname(mly->mly_dv), ssd->info[0], ssd->info[1], 1212 ssd->info[2], ssd->info[3], ssd->csi[0], 1213 ssd->csi[1], ssd->csi[2], 1214 ssd->csi[3]); 1215 if (action == 'r') 1216 mly->mly_btl[me->channel][me->target].mb_flags |= 1217 MLY_BTL_RESCAN; 1218 break; 1219 1220 case 'e': 1221 printf("%s: ", device_xname(mly->mly_dv)); 1222 printf(tp, me->target, me->lun); 1223 break; 1224 1225 case 'c': 1226 printf("%s: controller %s\n", device_xname(mly->mly_dv), tp); 1227 break; 1228 1229 case '?': 1230 printf("%s: %s - %d\n", device_xname(mly->mly_dv), tp, event); 1231 break; 1232 1233 default: 1234 /* Probably a 'noisy' event being ignored. */ 1235 break; 1236 } 1237 } 1238 1239 /* 1240 * Perform periodic activities. 1241 */ 1242 static void 1243 mly_thread(void *cookie) 1244 { 1245 struct mly_softc *mly; 1246 struct mly_btl *btl; 1247 int s, bus, target, done; 1248 1249 mly = (struct mly_softc *)cookie; 1250 1251 for (;;) { 1252 /* Check for new events. */ 1253 mly_check_event(mly); 1254 1255 /* Re-scan up to 1 device. */ 1256 s = splbio(); 1257 done = 0; 1258 for (bus = 0; bus < mly->mly_nchans && !done; bus++) { 1259 for (target = 0; target < MLY_MAX_TARGETS; target++) { 1260 /* Perform device rescan? */ 1261 btl = &mly->mly_btl[bus][target]; 1262 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) { 1263 btl->mb_flags ^= MLY_BTL_RESCAN; 1264 mly_scan_btl(mly, bus, target); 1265 done = 1; 1266 break; 1267 } 1268 } 1269 } 1270 splx(s); 1271 1272 /* Sleep for N seconds. */ 1273 tsleep(mly_thread, PWAIT, "mlyzzz", 1274 hz * MLY_PERIODIC_INTERVAL); 1275 } 1276 } 1277 1278 /* 1279 * Submit a command to the controller and poll on completion. Return 1280 * non-zero on timeout. 1281 */ 1282 static int 1283 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1284 { 1285 int rv; 1286 1287 if ((rv = mly_ccb_submit(mly, mc)) != 0) 1288 return (rv); 1289 1290 for (timo *= 10; timo != 0; timo--) { 1291 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) 1292 break; 1293 mly_intr(mly); 1294 DELAY(100); 1295 } 1296 1297 return (timo == 0); 1298 } 1299 1300 /* 1301 * Submit a command to the controller and sleep on completion. Return 1302 * non-zero on timeout. 1303 */ 1304 static int 1305 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1306 { 1307 int rv, s; 1308 1309 mly_ccb_enqueue(mly, mc); 1310 1311 s = splbio(); 1312 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) { 1313 splx(s); 1314 return (0); 1315 } 1316 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000); 1317 splx(s); 1318 1319 return (rv); 1320 } 1321 1322 /* 1323 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1324 * the order that they were enqueued and try to submit their command blocks 1325 * to the controller for execution. 1326 */ 1327 void 1328 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc) 1329 { 1330 int s; 1331 1332 s = splbio(); 1333 1334 if (mc != NULL) 1335 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq); 1336 1337 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) { 1338 if (mly_ccb_submit(mly, mc)) 1339 break; 1340 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq); 1341 } 1342 1343 splx(s); 1344 } 1345 1346 /* 1347 * Deliver a command to the controller. 1348 */ 1349 static int 1350 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc) 1351 { 1352 union mly_cmd_packet *pkt; 1353 int s, off; 1354 1355 mc->mc_packet->generic.command_id = htole16(mc->mc_slot); 1356 1357 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1358 mc->mc_packetphys - mly->mly_pkt_busaddr, 1359 sizeof(union mly_cmd_packet), 1360 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1361 1362 s = splbio(); 1363 1364 /* 1365 * Do we have to use the hardware mailbox? 1366 */ 1367 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) { 1368 /* 1369 * Check to see if the controller is ready for us. 1370 */ 1371 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) { 1372 splx(s); 1373 return (EBUSY); 1374 } 1375 1376 /* 1377 * It's ready, send the command. 1378 */ 1379 mly_outl(mly, mly->mly_cmd_mailbox, 1380 (u_int64_t)mc->mc_packetphys & 0xffffffff); 1381 mly_outl(mly, mly->mly_cmd_mailbox + 4, 1382 (u_int64_t)mc->mc_packetphys >> 32); 1383 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT); 1384 } else { 1385 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx]; 1386 off = (char *)pkt - (char *)mly->mly_mmbox; 1387 1388 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1389 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1390 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1391 1392 /* Check to see if the next index is free yet. */ 1393 if (pkt->mmbox.flag != 0) { 1394 splx(s); 1395 return (EBUSY); 1396 } 1397 1398 /* Copy in new command */ 1399 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data, 1400 sizeof(pkt->mmbox.data)); 1401 1402 /* Copy flag last. */ 1403 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 1404 1405 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1406 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1407 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1408 1409 /* Signal controller and update index. */ 1410 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT); 1411 mly->mly_mmbox_cmd_idx = 1412 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS; 1413 } 1414 1415 splx(s); 1416 return (0); 1417 } 1418 1419 /* 1420 * Pick up completed commands from the controller and handle accordingly. 1421 */ 1422 int 1423 mly_intr(void *cookie) 1424 { 1425 struct mly_ccb *mc; 1426 union mly_status_packet *sp; 1427 u_int16_t slot; 1428 int forus, off; 1429 struct mly_softc *mly; 1430 1431 mly = cookie; 1432 forus = 0; 1433 1434 /* 1435 * Pick up hardware-mailbox commands. 1436 */ 1437 if (mly_odbr_true(mly, MLY_HM_STSREADY)) { 1438 slot = mly_inw(mly, mly->mly_status_mailbox); 1439 1440 if (slot < MLY_SLOT_MAX) { 1441 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1442 mc->mc_status = 1443 mly_inb(mly, mly->mly_status_mailbox + 2); 1444 mc->mc_sense = 1445 mly_inb(mly, mly->mly_status_mailbox + 3); 1446 mc->mc_resid = 1447 mly_inl(mly, mly->mly_status_mailbox + 4); 1448 1449 mly_ccb_complete(mly, mc); 1450 } else { 1451 /* Slot 0xffff may mean "extremely bogus command". */ 1452 printf("%s: got HM completion for illegal slot %u\n", 1453 device_xname(mly->mly_dv), slot); 1454 } 1455 1456 /* Unconditionally acknowledge status. */ 1457 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY); 1458 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 1459 forus = 1; 1460 } 1461 1462 /* 1463 * Pick up memory-mailbox commands. 1464 */ 1465 if (mly_odbr_true(mly, MLY_AM_STSREADY)) { 1466 for (;;) { 1467 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx]; 1468 off = (char *)sp - (char *)mly->mly_mmbox; 1469 1470 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1471 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1472 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1473 1474 /* Check for more status. */ 1475 if (sp->mmbox.flag == 0) 1476 break; 1477 1478 /* Get slot number. */ 1479 slot = le16toh(sp->status.command_id); 1480 if (slot < MLY_SLOT_MAX) { 1481 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1482 mc->mc_status = sp->status.status; 1483 mc->mc_sense = sp->status.sense_length; 1484 mc->mc_resid = le32toh(sp->status.residue); 1485 mly_ccb_complete(mly, mc); 1486 } else { 1487 /* 1488 * Slot 0xffff may mean "extremely bogus 1489 * command". 1490 */ 1491 printf("%s: got AM completion for illegal " 1492 "slot %u at %d\n", device_xname(mly->mly_dv), 1493 slot, mly->mly_mmbox_sts_idx); 1494 } 1495 1496 /* Clear and move to next index. */ 1497 sp->mmbox.flag = 0; 1498 mly->mly_mmbox_sts_idx = 1499 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS; 1500 } 1501 1502 /* Acknowledge that we have collected status value(s). */ 1503 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY); 1504 forus = 1; 1505 } 1506 1507 /* 1508 * Run the queue. 1509 */ 1510 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue)) 1511 mly_ccb_enqueue(mly, NULL); 1512 1513 return (forus); 1514 } 1515 1516 /* 1517 * Process completed commands 1518 */ 1519 static void 1520 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc) 1521 { 1522 void (*complete)(struct mly_softc *, struct mly_ccb *); 1523 1524 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1525 mc->mc_packetphys - mly->mly_pkt_busaddr, 1526 sizeof(union mly_cmd_packet), 1527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1528 1529 complete = mc->mc_complete; 1530 mc->mc_flags |= MLY_CCB_COMPLETE; 1531 1532 /* 1533 * Call completion handler or wake up sleeping consumer. 1534 */ 1535 if (complete != NULL) 1536 (*complete)(mly, mc); 1537 else 1538 wakeup(mc); 1539 } 1540 1541 /* 1542 * Allocate a command. 1543 */ 1544 int 1545 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp) 1546 { 1547 struct mly_ccb *mc; 1548 int s; 1549 1550 s = splbio(); 1551 mc = SLIST_FIRST(&mly->mly_ccb_free); 1552 if (mc != NULL) 1553 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist); 1554 splx(s); 1555 1556 *mcp = mc; 1557 return (mc == NULL ? EAGAIN : 0); 1558 } 1559 1560 /* 1561 * Release a command back to the freelist. 1562 */ 1563 void 1564 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc) 1565 { 1566 int s; 1567 1568 /* 1569 * Fill in parts of the command that may cause confusion if a 1570 * consumer doesn't when we are later allocated. 1571 */ 1572 mc->mc_data = NULL; 1573 mc->mc_flags = 0; 1574 mc->mc_complete = NULL; 1575 mc->mc_private = NULL; 1576 mc->mc_packet->generic.command_control = 0; 1577 1578 /* 1579 * By default, we set up to overwrite the command packet with sense 1580 * information. 1581 */ 1582 mc->mc_packet->generic.sense_buffer_address = 1583 htole64(mc->mc_packetphys); 1584 mc->mc_packet->generic.maximum_sense_size = 1585 sizeof(union mly_cmd_packet); 1586 1587 s = splbio(); 1588 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist); 1589 splx(s); 1590 } 1591 1592 /* 1593 * Allocate and initialize command and packet structures. 1594 * 1595 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our 1596 * allocation to that number. If we don't yet know how many commands the 1597 * controller supports, allocate a very small set (suitable for initialization 1598 * purposes only). 1599 */ 1600 static int 1601 mly_alloc_ccbs(struct mly_softc *mly) 1602 { 1603 struct mly_ccb *mc; 1604 int i, rv; 1605 1606 if (mly->mly_controllerinfo == NULL) 1607 mly->mly_ncmds = MLY_CCBS_RESV; 1608 else { 1609 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands); 1610 mly->mly_ncmds = min(MLY_MAX_CCBS, i); 1611 } 1612 1613 /* 1614 * Allocate enough space for all the command packets in one chunk 1615 * and map them permanently into controller-visible space. 1616 */ 1617 rv = mly_dmamem_alloc(mly, 1618 mly->mly_ncmds * sizeof(union mly_cmd_packet), 1619 &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt, 1620 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg); 1621 if (rv) 1622 return (rv); 1623 1624 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds, 1625 M_DEVBUF, M_NOWAIT|M_ZERO); 1626 1627 for (i = 0; i < mly->mly_ncmds; i++) { 1628 mc = mly->mly_ccbs + i; 1629 mc->mc_slot = MLY_SLOT_START + i; 1630 mc->mc_packet = mly->mly_pkt + i; 1631 mc->mc_packetphys = mly->mly_pkt_busaddr + 1632 (i * sizeof(union mly_cmd_packet)); 1633 1634 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER, 1635 MLY_MAX_SEGS, MLY_MAX_XFER, 0, 1636 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1637 &mc->mc_datamap); 1638 if (rv) { 1639 mly_release_ccbs(mly); 1640 return (rv); 1641 } 1642 1643 mly_ccb_free(mly, mc); 1644 } 1645 1646 return (0); 1647 } 1648 1649 /* 1650 * Free all the storage held by commands. 1651 * 1652 * Must be called with all commands on the free list. 1653 */ 1654 static void 1655 mly_release_ccbs(struct mly_softc *mly) 1656 { 1657 struct mly_ccb *mc; 1658 1659 /* Throw away command buffer DMA maps. */ 1660 while (mly_ccb_alloc(mly, &mc) == 0) 1661 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap); 1662 1663 /* Release CCB storage. */ 1664 free(mly->mly_ccbs, M_DEVBUF); 1665 1666 /* Release the packet storage. */ 1667 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet), 1668 mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg); 1669 } 1670 1671 /* 1672 * Map a command into controller-visible space. 1673 */ 1674 static int 1675 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc) 1676 { 1677 struct mly_cmd_generic *gen; 1678 struct mly_sg_entry *sg; 1679 bus_dma_segment_t *ds; 1680 int flg, nseg, rv; 1681 1682 #ifdef DIAGNOSTIC 1683 /* Don't map more than once. */ 1684 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0) 1685 panic("mly_ccb_map: already mapped"); 1686 mc->mc_flags |= MLY_CCB_MAPPED; 1687 1688 /* Does the command have a data buffer? */ 1689 if (mc->mc_data == NULL) 1690 panic("mly_ccb_map: no data buffer"); 1691 #endif 1692 1693 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data, 1694 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1695 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ? 1696 BUS_DMA_READ : BUS_DMA_WRITE)); 1697 if (rv != 0) 1698 return (rv); 1699 1700 gen = &mc->mc_packet->generic; 1701 1702 /* 1703 * Can we use the transfer structure directly? 1704 */ 1705 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) { 1706 mc->mc_sgoff = -1; 1707 sg = &gen->transfer.direct.sg[0]; 1708 } else { 1709 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) * 1710 MLY_MAX_SEGS; 1711 sg = mly->mly_sg + mc->mc_sgoff; 1712 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE; 1713 gen->transfer.indirect.entries[0] = htole16(nseg); 1714 gen->transfer.indirect.table_physaddr[0] = 1715 htole64(mly->mly_sg_busaddr + 1716 (mc->mc_sgoff * sizeof(struct mly_sg_entry))); 1717 } 1718 1719 /* 1720 * Fill the S/G table. 1721 */ 1722 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) { 1723 sg->physaddr = htole64(ds->ds_addr); 1724 sg->length = htole64(ds->ds_len); 1725 } 1726 1727 /* 1728 * Sync up the data map. 1729 */ 1730 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1731 flg = BUS_DMASYNC_PREREAD; 1732 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ { 1733 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION; 1734 flg = BUS_DMASYNC_PREWRITE; 1735 } 1736 1737 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1738 1739 /* 1740 * Sync up the chained S/G table, if we're using one. 1741 */ 1742 if (mc->mc_sgoff == -1) 1743 return (0); 1744 1745 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1746 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE); 1747 1748 return (0); 1749 } 1750 1751 /* 1752 * Unmap a command from controller-visible space. 1753 */ 1754 static void 1755 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc) 1756 { 1757 int flg; 1758 1759 #ifdef DIAGNOSTIC 1760 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0) 1761 panic("mly_ccb_unmap: not mapped"); 1762 mc->mc_flags &= ~MLY_CCB_MAPPED; 1763 #endif 1764 1765 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1766 flg = BUS_DMASYNC_POSTREAD; 1767 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ 1768 flg = BUS_DMASYNC_POSTWRITE; 1769 1770 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1771 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap); 1772 1773 if (mc->mc_sgoff == -1) 1774 return; 1775 1776 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1777 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE); 1778 } 1779 1780 /* 1781 * Adjust the size of each I/O before it passes to the SCSI layer. 1782 */ 1783 static void 1784 mly_scsipi_minphys(struct buf *bp) 1785 { 1786 1787 if (bp->b_bcount > MLY_MAX_XFER) 1788 bp->b_bcount = MLY_MAX_XFER; 1789 minphys(bp); 1790 } 1791 1792 /* 1793 * Start a SCSI command. 1794 */ 1795 static void 1796 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1797 void *arg) 1798 { 1799 struct mly_ccb *mc; 1800 struct mly_cmd_scsi_small *ss; 1801 struct scsipi_xfer *xs; 1802 struct scsipi_periph *periph; 1803 struct mly_softc *mly; 1804 struct mly_btl *btl; 1805 int s, tmp; 1806 1807 mly = device_private(chan->chan_adapter->adapt_dev); 1808 1809 switch (req) { 1810 case ADAPTER_REQ_RUN_XFER: 1811 xs = arg; 1812 periph = xs->xs_periph; 1813 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target]; 1814 s = splbio(); 1815 tmp = btl->mb_flags; 1816 splx(s); 1817 1818 /* 1819 * Check for I/O attempt to a protected or non-existant 1820 * device. 1821 */ 1822 if ((tmp & MLY_BTL_PROTECTED) != 0) { 1823 xs->error = XS_SELTIMEOUT; 1824 scsipi_done(xs); 1825 break; 1826 } 1827 1828 #ifdef DIAGNOSTIC 1829 /* XXX Increase if/when we support large SCSI commands. */ 1830 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) { 1831 printf("%s: cmd too large\n", device_xname(mly->mly_dv)); 1832 xs->error = XS_DRIVER_STUFFUP; 1833 scsipi_done(xs); 1834 break; 1835 } 1836 #endif 1837 1838 if (mly_ccb_alloc(mly, &mc)) { 1839 xs->error = XS_RESOURCE_SHORTAGE; 1840 scsipi_done(xs); 1841 break; 1842 } 1843 1844 /* Build the command. */ 1845 mc->mc_data = xs->data; 1846 mc->mc_length = xs->datalen; 1847 mc->mc_complete = mly_scsipi_complete; 1848 mc->mc_private = xs; 1849 1850 /* Build the packet for the controller. */ 1851 ss = &mc->mc_packet->scsi_small; 1852 ss->opcode = MDACMD_SCSI; 1853 #ifdef notdef 1854 /* 1855 * XXX FreeBSD does this, but it doesn't fix anything, 1856 * XXX and appears potentially harmful. 1857 */ 1858 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT; 1859 #endif 1860 1861 ss->data_size = htole32(xs->datalen); 1862 _lto3l(MLY_PHYADDR(0, chan->chan_channel, 1863 periph->periph_target, periph->periph_lun), ss->addr); 1864 1865 if (xs->timeout < 60 * 1000) 1866 ss->timeout = xs->timeout / 1000 | 1867 MLY_TIMEOUT_SECONDS; 1868 else if (xs->timeout < 60 * 60 * 1000) 1869 ss->timeout = xs->timeout / (60 * 1000) | 1870 MLY_TIMEOUT_MINUTES; 1871 else 1872 ss->timeout = xs->timeout / (60 * 60 * 1000) | 1873 MLY_TIMEOUT_HOURS; 1874 1875 ss->maximum_sense_size = sizeof(xs->sense); 1876 ss->cdb_length = xs->cmdlen; 1877 memcpy(ss->cdb, xs->cmd, xs->cmdlen); 1878 1879 if (mc->mc_length != 0) { 1880 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0) 1881 mc->mc_flags |= MLY_CCB_DATAOUT; 1882 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */ 1883 mc->mc_flags |= MLY_CCB_DATAIN; 1884 1885 if (mly_ccb_map(mly, mc) != 0) { 1886 xs->error = XS_DRIVER_STUFFUP; 1887 mly_ccb_free(mly, mc); 1888 scsipi_done(xs); 1889 break; 1890 } 1891 } 1892 1893 /* 1894 * Give the command to the controller. 1895 */ 1896 if ((xs->xs_control & XS_CTL_POLL) != 0) { 1897 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) { 1898 xs->error = XS_REQUEUE; 1899 if (mc->mc_length != 0) 1900 mly_ccb_unmap(mly, mc); 1901 mly_ccb_free(mly, mc); 1902 scsipi_done(xs); 1903 } 1904 } else 1905 mly_ccb_enqueue(mly, mc); 1906 1907 break; 1908 1909 case ADAPTER_REQ_GROW_RESOURCES: 1910 /* 1911 * Not supported. 1912 */ 1913 break; 1914 1915 case ADAPTER_REQ_SET_XFER_MODE: 1916 /* 1917 * We can't change the transfer mode, but at least let 1918 * scsipi know what the adapter has negotiated. 1919 */ 1920 mly_get_xfer_mode(mly, chan->chan_channel, arg); 1921 break; 1922 } 1923 } 1924 1925 /* 1926 * Handle completion of a SCSI command. 1927 */ 1928 static void 1929 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc) 1930 { 1931 struct scsipi_xfer *xs; 1932 struct scsipi_channel *chan; 1933 struct scsipi_inquiry_data *inq; 1934 struct mly_btl *btl; 1935 int target, sl, s; 1936 const char *p; 1937 1938 xs = mc->mc_private; 1939 xs->status = mc->mc_status; 1940 1941 /* 1942 * XXX The `resid' value as returned by the controller appears to be 1943 * bogus, so we always set it to zero. Is it perhaps the transfer 1944 * count? 1945 */ 1946 xs->resid = 0; /* mc->mc_resid; */ 1947 1948 if (mc->mc_length != 0) 1949 mly_ccb_unmap(mly, mc); 1950 1951 switch (mc->mc_status) { 1952 case SCSI_OK: 1953 /* 1954 * In order to report logical device type and status, we 1955 * overwrite the result of the INQUIRY command to logical 1956 * devices. 1957 */ 1958 if (xs->cmd->opcode == INQUIRY) { 1959 chan = xs->xs_periph->periph_channel; 1960 target = xs->xs_periph->periph_target; 1961 btl = &mly->mly_btl[chan->chan_channel][target]; 1962 1963 s = splbio(); 1964 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) { 1965 inq = (struct scsipi_inquiry_data *)xs->data; 1966 mly_padstr(inq->vendor, "MYLEX", 8); 1967 p = mly_describe_code(mly_table_device_type, 1968 btl->mb_type); 1969 mly_padstr(inq->product, p, 16); 1970 p = mly_describe_code(mly_table_device_state, 1971 btl->mb_state); 1972 mly_padstr(inq->revision, p, 4); 1973 } 1974 splx(s); 1975 } 1976 1977 xs->error = XS_NOERROR; 1978 break; 1979 1980 case SCSI_CHECK: 1981 sl = mc->mc_sense; 1982 if (sl > sizeof(xs->sense.scsi_sense)) 1983 sl = sizeof(xs->sense.scsi_sense); 1984 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl); 1985 xs->error = XS_SENSE; 1986 break; 1987 1988 case SCSI_BUSY: 1989 case SCSI_QUEUE_FULL: 1990 xs->error = XS_BUSY; 1991 break; 1992 1993 default: 1994 printf("%s: unknown SCSI status 0x%x\n", 1995 device_xname(mly->mly_dv), xs->status); 1996 xs->error = XS_DRIVER_STUFFUP; 1997 break; 1998 } 1999 2000 mly_ccb_free(mly, mc); 2001 scsipi_done(xs); 2002 } 2003 2004 /* 2005 * Notify scsipi about a target's transfer mode. 2006 */ 2007 static void 2008 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm) 2009 { 2010 struct mly_btl *btl; 2011 int s; 2012 2013 btl = &mly->mly_btl[bus][xm->xm_target]; 2014 xm->xm_mode = 0; 2015 2016 s = splbio(); 2017 2018 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) { 2019 if (btl->mb_speed == 0) { 2020 xm->xm_period = 0; 2021 xm->xm_offset = 0; 2022 } else { 2023 xm->xm_period = 12; /* XXX */ 2024 xm->xm_offset = 8; /* XXX */ 2025 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */ 2026 } 2027 2028 switch (btl->mb_width) { 2029 case 32: 2030 xm->xm_mode = PERIPH_CAP_WIDE32; 2031 break; 2032 case 16: 2033 xm->xm_mode = PERIPH_CAP_WIDE16; 2034 break; 2035 default: 2036 xm->xm_mode = 0; 2037 break; 2038 } 2039 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ { 2040 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC; 2041 xm->xm_period = 12; 2042 xm->xm_offset = 8; 2043 } 2044 2045 if ((btl->mb_flags & MLY_BTL_TQING) != 0) 2046 xm->xm_mode |= PERIPH_CAP_TQING; 2047 2048 splx(s); 2049 2050 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm); 2051 } 2052 2053 /* 2054 * ioctl hook; used here only to initiate low-level rescans. 2055 */ 2056 static int 2057 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data, 2058 int flag, struct proc *p) 2059 { 2060 struct mly_softc *mly; 2061 int rv; 2062 2063 mly = device_private(chan->chan_adapter->adapt_dev); 2064 2065 switch (cmd) { 2066 case SCBUSIOLLSCAN: 2067 mly_scan_channel(mly, chan->chan_channel); 2068 rv = 0; 2069 break; 2070 default: 2071 rv = ENOTTY; 2072 break; 2073 } 2074 2075 return (rv); 2076 } 2077 2078 /* 2079 * Handshake with the firmware while the card is being initialized. 2080 */ 2081 static int 2082 mly_fwhandshake(struct mly_softc *mly) 2083 { 2084 u_int8_t error; 2085 int spinup; 2086 2087 spinup = 0; 2088 2089 /* Set HM_STSACK and let the firmware initialize. */ 2090 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 2091 DELAY(1000); /* too short? */ 2092 2093 /* If HM_STSACK is still true, the controller is initializing. */ 2094 if (!mly_idbr_true(mly, MLY_HM_STSACK)) 2095 return (0); 2096 2097 printf("%s: controller initialization started\n", 2098 device_xname(mly->mly_dv)); 2099 2100 /* 2101 * Spin waiting for initialization to finish, or for a message to be 2102 * delivered. 2103 */ 2104 while (mly_idbr_true(mly, MLY_HM_STSACK)) { 2105 /* Check for a message */ 2106 if (!mly_error_valid(mly)) 2107 continue; 2108 2109 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY; 2110 (void)mly_inb(mly, mly->mly_cmd_mailbox); 2111 (void)mly_inb(mly, mly->mly_cmd_mailbox + 1); 2112 2113 switch (error) { 2114 case MLY_MSG_SPINUP: 2115 if (!spinup) { 2116 printf("%s: drive spinup in progress\n", 2117 device_xname(mly->mly_dv)); 2118 spinup = 1; 2119 } 2120 break; 2121 2122 case MLY_MSG_RACE_RECOVERY_FAIL: 2123 printf("%s: mirror race recovery failed - \n", 2124 device_xname(mly->mly_dv)); 2125 printf("%s: one or more drives offline\n", 2126 device_xname(mly->mly_dv)); 2127 break; 2128 2129 case MLY_MSG_RACE_IN_PROGRESS: 2130 printf("%s: mirror race recovery in progress\n", 2131 device_xname(mly->mly_dv)); 2132 break; 2133 2134 case MLY_MSG_RACE_ON_CRITICAL: 2135 printf("%s: mirror race recovery on critical drive\n", 2136 device_xname(mly->mly_dv)); 2137 break; 2138 2139 case MLY_MSG_PARITY_ERROR: 2140 printf("%s: FATAL MEMORY PARITY ERROR\n", 2141 device_xname(mly->mly_dv)); 2142 return (ENXIO); 2143 2144 default: 2145 printf("%s: unknown initialization code 0x%x\n", 2146 device_xname(mly->mly_dv), error); 2147 break; 2148 } 2149 } 2150 2151 return (0); 2152 } 2153 2154 /* 2155 * Space-fill a character string 2156 */ 2157 static void 2158 mly_padstr(char *dst, const char *src, int len) 2159 { 2160 2161 while (len-- > 0) { 2162 if (*src != '\0') 2163 *dst++ = *src++; 2164 else 2165 *dst++ = ' '; 2166 } 2167 } 2168 2169 /* 2170 * Allocate DMA safe memory. 2171 */ 2172 static int 2173 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap, 2174 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg) 2175 { 2176 int rseg, rv, state; 2177 2178 state = 0; 2179 2180 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0, 2181 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2182 aprint_error_dev(mly->mly_dv, "dmamem_alloc = %d\n", rv); 2183 goto bad; 2184 } 2185 2186 state++; 2187 2188 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva, 2189 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2190 aprint_error_dev(mly->mly_dv, "dmamem_map = %d\n", rv); 2191 goto bad; 2192 } 2193 2194 state++; 2195 2196 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0, 2197 BUS_DMA_NOWAIT, dmamap)) != 0) { 2198 aprint_error_dev(mly->mly_dv, "dmamap_create = %d\n", rv); 2199 goto bad; 2200 } 2201 2202 state++; 2203 2204 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size, 2205 NULL, BUS_DMA_NOWAIT)) != 0) { 2206 aprint_error_dev(mly->mly_dv, "dmamap_load = %d\n", rv); 2207 goto bad; 2208 } 2209 2210 *paddr = (*dmamap)->dm_segs[0].ds_addr; 2211 memset(*kva, 0, size); 2212 return (0); 2213 2214 bad: 2215 if (state > 2) 2216 bus_dmamap_destroy(mly->mly_dmat, *dmamap); 2217 if (state > 1) 2218 bus_dmamem_unmap(mly->mly_dmat, *kva, size); 2219 if (state > 0) 2220 bus_dmamem_free(mly->mly_dmat, seg, 1); 2221 2222 return (rv); 2223 } 2224 2225 /* 2226 * Free DMA safe memory. 2227 */ 2228 static void 2229 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap, 2230 void *kva, bus_dma_segment_t *seg) 2231 { 2232 2233 bus_dmamap_unload(mly->mly_dmat, dmamap); 2234 bus_dmamap_destroy(mly->mly_dmat, dmamap); 2235 bus_dmamem_unmap(mly->mly_dmat, kva, size); 2236 bus_dmamem_free(mly->mly_dmat, seg, 1); 2237 } 2238 2239 2240 /* 2241 * Accept an open operation on the control device. 2242 */ 2243 int 2244 mlyopen(dev_t dev, int flag, int mode, struct lwp *l) 2245 { 2246 struct mly_softc *mly; 2247 2248 if ((mly = device_lookup_private(&mly_cd, minor(dev))) == NULL) 2249 return (ENXIO); 2250 if ((mly->mly_state & MLY_STATE_INITOK) == 0) 2251 return (ENXIO); 2252 if ((mly->mly_state & MLY_STATE_OPEN) != 0) 2253 return (EBUSY); 2254 2255 mly->mly_state |= MLY_STATE_OPEN; 2256 return (0); 2257 } 2258 2259 /* 2260 * Accept the last close on the control device. 2261 */ 2262 int 2263 mlyclose(dev_t dev, int flag, int mode, 2264 struct lwp *l) 2265 { 2266 struct mly_softc *mly; 2267 2268 mly = device_lookup_private(&mly_cd, minor(dev)); 2269 mly->mly_state &= ~MLY_STATE_OPEN; 2270 return (0); 2271 } 2272 2273 /* 2274 * Handle control operations. 2275 */ 2276 int 2277 mlyioctl(dev_t dev, u_long cmd, void *data, int flag, 2278 struct lwp *l) 2279 { 2280 struct mly_softc *mly; 2281 int rv; 2282 2283 mly = device_lookup_private(&mly_cd, minor(dev)); 2284 2285 switch (cmd) { 2286 case MLYIO_COMMAND: 2287 rv = kauth_authorize_device_passthru(l->l_cred, dev, 2288 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 2289 if (rv) 2290 break; 2291 2292 rv = mly_user_command(mly, (void *)data); 2293 break; 2294 case MLYIO_HEALTH: 2295 rv = mly_user_health(mly, (void *)data); 2296 break; 2297 default: 2298 rv = ENOTTY; 2299 break; 2300 } 2301 2302 return (rv); 2303 } 2304 2305 /* 2306 * Execute a command passed in from userspace. 2307 * 2308 * The control structure contains the actual command for the controller, as 2309 * well as the user-space data pointer and data size, and an optional sense 2310 * buffer size/pointer. On completion, the data size is adjusted to the 2311 * command residual, and the sense buffer size to the size of the returned 2312 * sense data. 2313 */ 2314 static int 2315 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc) 2316 { 2317 struct mly_ccb *mc; 2318 int rv, mapped; 2319 2320 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 2321 return (rv); 2322 2323 mapped = 0; 2324 mc->mc_data = NULL; 2325 2326 /* 2327 * Handle data size/direction. 2328 */ 2329 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) { 2330 if (mc->mc_length > MAXPHYS) { 2331 rv = EINVAL; 2332 goto out; 2333 } 2334 2335 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK); 2336 if (mc->mc_data == NULL) { 2337 rv = ENOMEM; 2338 goto out; 2339 } 2340 2341 if (uc->DataTransferLength > 0) { 2342 mc->mc_flags |= MLY_CCB_DATAIN; 2343 memset(mc->mc_data, 0, mc->mc_length); 2344 } 2345 2346 if (uc->DataTransferLength < 0) { 2347 mc->mc_flags |= MLY_CCB_DATAOUT; 2348 rv = copyin(uc->DataTransferBuffer, mc->mc_data, 2349 mc->mc_length); 2350 if (rv != 0) 2351 goto out; 2352 } 2353 2354 if ((rv = mly_ccb_map(mly, mc)) != 0) 2355 goto out; 2356 mapped = 1; 2357 } 2358 2359 /* Copy in the command and execute it. */ 2360 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox)); 2361 2362 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0) 2363 goto out; 2364 2365 /* Return the data to userspace. */ 2366 if (uc->DataTransferLength > 0) { 2367 rv = copyout(mc->mc_data, uc->DataTransferBuffer, 2368 mc->mc_length); 2369 if (rv != 0) 2370 goto out; 2371 } 2372 2373 /* Return the sense buffer to userspace. */ 2374 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) { 2375 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer, 2376 min(uc->RequestSenseLength, mc->mc_sense)); 2377 if (rv != 0) 2378 goto out; 2379 } 2380 2381 /* Return command results to userspace (caller will copy out). */ 2382 uc->DataTransferLength = mc->mc_resid; 2383 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 2384 uc->CommandStatus = mc->mc_status; 2385 rv = 0; 2386 2387 out: 2388 if (mapped) 2389 mly_ccb_unmap(mly, mc); 2390 if (mc->mc_data != NULL) 2391 free(mc->mc_data, M_DEVBUF); 2392 mly_ccb_free(mly, mc); 2393 2394 return (rv); 2395 } 2396 2397 /* 2398 * Return health status to userspace. If the health change index in the 2399 * user structure does not match that currently exported by the controller, 2400 * we return the current status immediately. Otherwise, we block until 2401 * either interrupted or new status is delivered. 2402 */ 2403 static int 2404 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh) 2405 { 2406 struct mly_health_status mh; 2407 int rv, s; 2408 2409 /* Fetch the current health status from userspace. */ 2410 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh)); 2411 if (rv != 0) 2412 return (rv); 2413 2414 /* spin waiting for a status update */ 2415 s = splbio(); 2416 if (mly->mly_event_change == mh.change_counter) 2417 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH, 2418 "mlyhealth", 0); 2419 splx(s); 2420 2421 if (rv == 0) { 2422 /* 2423 * Copy the controller's health status buffer out (there is 2424 * a race here if it changes again). 2425 */ 2426 rv = copyout(&mly->mly_mmbox->mmm_health.status, 2427 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); 2428 } 2429 2430 return (rv); 2431 } 2432