1 /* $NetBSD: mly.c,v 1.43 2009/11/26 15:17:10 njoly Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 2000, 2001 Michael Smith 34 * Copyright (c) 2000 BSDi 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp 59 */ 60 61 /* 62 * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware. 63 * 64 * TODO: 65 * 66 * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ. 67 * o Handle FC and multiple LUNs. 68 * o Fix mmbox usage. 69 * o Fix transfer speed fudge. 70 */ 71 72 #include <sys/cdefs.h> 73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.43 2009/11/26 15:17:10 njoly Exp $"); 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/device.h> 78 #include <sys/kernel.h> 79 #include <sys/queue.h> 80 #include <sys/buf.h> 81 #include <sys/endian.h> 82 #include <sys/conf.h> 83 #include <sys/malloc.h> 84 #include <sys/ioctl.h> 85 #include <sys/scsiio.h> 86 #include <sys/kthread.h> 87 #include <sys/kauth.h> 88 89 #include <uvm/uvm_extern.h> 90 91 #include <sys/bus.h> 92 93 #include <dev/scsipi/scsi_all.h> 94 #include <dev/scsipi/scsipi_all.h> 95 #include <dev/scsipi/scsiconf.h> 96 97 #include <dev/pci/pcireg.h> 98 #include <dev/pci/pcivar.h> 99 #include <dev/pci/pcidevs.h> 100 101 #include <dev/pci/mlyreg.h> 102 #include <dev/pci/mlyio.h> 103 #include <dev/pci/mlyvar.h> 104 #include <dev/pci/mly_tables.h> 105 106 static void mly_attach(device_t, device_t, void *); 107 static int mly_match(device_t, cfdata_t, void *); 108 static const struct mly_ident *mly_find_ident(struct pci_attach_args *); 109 static int mly_fwhandshake(struct mly_softc *); 110 static int mly_flush(struct mly_softc *); 111 static int mly_intr(void *); 112 static void mly_shutdown(void *); 113 114 static int mly_alloc_ccbs(struct mly_softc *); 115 static void mly_check_event(struct mly_softc *); 116 static void mly_complete_event(struct mly_softc *, struct mly_ccb *); 117 static void mly_complete_rescan(struct mly_softc *, struct mly_ccb *); 118 static int mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *, 119 void **, bus_addr_t *, bus_dma_segment_t *); 120 static void mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t, 121 void *, bus_dma_segment_t *); 122 static int mly_enable_mmbox(struct mly_softc *); 123 static void mly_fetch_event(struct mly_softc *); 124 static int mly_get_controllerinfo(struct mly_softc *); 125 static int mly_get_eventstatus(struct mly_softc *); 126 static int mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *, 127 void **, size_t, void *, size_t *); 128 static void mly_padstr(char *, const char *, int); 129 static void mly_process_event(struct mly_softc *, struct mly_event *); 130 static void mly_release_ccbs(struct mly_softc *); 131 static int mly_scan_btl(struct mly_softc *, int, int); 132 static void mly_scan_channel(struct mly_softc *, int); 133 static void mly_thread(void *); 134 135 static int mly_ccb_alloc(struct mly_softc *, struct mly_ccb **); 136 static void mly_ccb_complete(struct mly_softc *, struct mly_ccb *); 137 static void mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *); 138 static void mly_ccb_free(struct mly_softc *, struct mly_ccb *); 139 static int mly_ccb_map(struct mly_softc *, struct mly_ccb *); 140 static int mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int); 141 static int mly_ccb_submit(struct mly_softc *, struct mly_ccb *); 142 static void mly_ccb_unmap(struct mly_softc *, struct mly_ccb *); 143 static int mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int); 144 145 static void mly_get_xfer_mode(struct mly_softc *, int, 146 struct scsipi_xfer_mode *); 147 static void mly_scsipi_complete(struct mly_softc *, struct mly_ccb *); 148 static int mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *, 149 int, struct proc *); 150 static void mly_scsipi_minphys(struct buf *); 151 static void mly_scsipi_request(struct scsipi_channel *, 152 scsipi_adapter_req_t, void *); 153 154 static int mly_user_command(struct mly_softc *, struct mly_user_command *); 155 static int mly_user_health(struct mly_softc *, struct mly_user_health *); 156 157 extern struct cfdriver mly_cd; 158 159 CFATTACH_DECL(mly, sizeof(struct mly_softc), 160 mly_match, mly_attach, NULL, NULL); 161 162 dev_type_open(mlyopen); 163 dev_type_close(mlyclose); 164 dev_type_ioctl(mlyioctl); 165 166 const struct cdevsw mly_cdevsw = { 167 mlyopen, mlyclose, noread, nowrite, mlyioctl, 168 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER, 169 }; 170 171 static struct mly_ident { 172 u_short vendor; 173 u_short product; 174 u_short subvendor; 175 u_short subproduct; 176 int hwif; 177 const char *desc; 178 } const mly_ident[] = { 179 { 180 PCI_VENDOR_MYLEX, 181 PCI_PRODUCT_MYLEX_EXTREMERAID, 182 PCI_VENDOR_MYLEX, 183 0x0040, 184 MLY_HWIF_STRONGARM, 185 "eXtremeRAID 2000" 186 }, 187 { 188 PCI_VENDOR_MYLEX, 189 PCI_PRODUCT_MYLEX_EXTREMERAID, 190 PCI_VENDOR_MYLEX, 191 0x0030, 192 MLY_HWIF_STRONGARM, 193 "eXtremeRAID 3000" 194 }, 195 { 196 PCI_VENDOR_MYLEX, 197 PCI_PRODUCT_MYLEX_ACCELERAID, 198 PCI_VENDOR_MYLEX, 199 0x0050, 200 MLY_HWIF_I960RX, 201 "AcceleRAID 352" 202 }, 203 { 204 PCI_VENDOR_MYLEX, 205 PCI_PRODUCT_MYLEX_ACCELERAID, 206 PCI_VENDOR_MYLEX, 207 0x0052, 208 MLY_HWIF_I960RX, 209 "AcceleRAID 170" 210 }, 211 { 212 PCI_VENDOR_MYLEX, 213 PCI_PRODUCT_MYLEX_ACCELERAID, 214 PCI_VENDOR_MYLEX, 215 0x0054, 216 MLY_HWIF_I960RX, 217 "AcceleRAID 160" 218 }, 219 }; 220 221 static void *mly_sdh; 222 223 /* 224 * Try to find a `mly_ident' entry corresponding to this board. 225 */ 226 static const struct mly_ident * 227 mly_find_ident(struct pci_attach_args *pa) 228 { 229 const struct mly_ident *mpi, *maxmpi; 230 pcireg_t reg; 231 232 mpi = mly_ident; 233 maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]); 234 235 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 236 return (NULL); 237 238 for (; mpi < maxmpi; mpi++) { 239 if (PCI_VENDOR(pa->pa_id) != mpi->vendor || 240 PCI_PRODUCT(pa->pa_id) != mpi->product) 241 continue; 242 243 if (mpi->subvendor == 0x0000) 244 return (mpi); 245 246 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 247 248 if (PCI_VENDOR(reg) == mpi->subvendor && 249 PCI_PRODUCT(reg) == mpi->subproduct) 250 return (mpi); 251 } 252 253 return (NULL); 254 } 255 256 /* 257 * Match a supported board. 258 */ 259 static int 260 mly_match(device_t parent, cfdata_t cfdata, void *aux) 261 { 262 263 return (mly_find_ident(aux) != NULL); 264 } 265 266 /* 267 * Attach a supported board. 268 */ 269 static void 270 mly_attach(device_t parent, device_t self, void *aux) 271 { 272 struct pci_attach_args *pa; 273 struct mly_softc *mly; 274 struct mly_ioctl_getcontrollerinfo *mi; 275 const struct mly_ident *ident; 276 pci_chipset_tag_t pc; 277 pci_intr_handle_t ih; 278 bus_space_handle_t memh, ioh; 279 bus_space_tag_t memt, iot; 280 pcireg_t reg; 281 const char *intrstr; 282 int ior, memr, i, rv, state; 283 struct scsipi_adapter *adapt; 284 struct scsipi_channel *chan; 285 286 mly = device_private(self); 287 pa = aux; 288 pc = pa->pa_pc; 289 ident = mly_find_ident(pa); 290 state = 0; 291 292 mly->mly_dmat = pa->pa_dmat; 293 mly->mly_hwif = ident->hwif; 294 295 printf(": Mylex %s\n", ident->desc); 296 297 /* 298 * Map the PCI register window. 299 */ 300 memr = -1; 301 ior = -1; 302 303 for (i = 0x10; i <= 0x14; i += 4) { 304 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 305 306 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 307 if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0) 308 ior = i; 309 } else { 310 if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0) 311 memr = i; 312 } 313 } 314 315 if (memr != -1) 316 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0, 317 &memt, &memh, NULL, NULL)) 318 memr = -1; 319 if (ior != -1) 320 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0, 321 &iot, &ioh, NULL, NULL)) 322 ior = -1; 323 324 if (memr != -1) { 325 mly->mly_iot = memt; 326 mly->mly_ioh = memh; 327 } else if (ior != -1) { 328 mly->mly_iot = iot; 329 mly->mly_ioh = ioh; 330 } else { 331 aprint_error_dev(self, "can't map i/o or memory space\n"); 332 return; 333 } 334 335 /* 336 * Enable the device. 337 */ 338 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 339 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 340 reg | PCI_COMMAND_MASTER_ENABLE); 341 342 /* 343 * Map and establish the interrupt. 344 */ 345 if (pci_intr_map(pa, &ih)) { 346 aprint_error_dev(self, "can't map interrupt\n"); 347 return; 348 } 349 intrstr = pci_intr_string(pc, ih); 350 mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly); 351 if (mly->mly_ih == NULL) { 352 aprint_error_dev(self, "can't establish interrupt"); 353 if (intrstr != NULL) 354 aprint_error(" at %s", intrstr); 355 aprint_error("\n"); 356 return; 357 } 358 359 if (intrstr != NULL) 360 aprint_normal_dev(&mly->mly_dv, "interrupting at %s\n", 361 intrstr); 362 363 /* 364 * Take care of interface-specific tasks. 365 */ 366 switch (mly->mly_hwif) { 367 case MLY_HWIF_I960RX: 368 mly->mly_doorbell_true = 0x00; 369 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX; 370 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; 371 mly->mly_idbr = MLY_I960RX_IDBR; 372 mly->mly_odbr = MLY_I960RX_ODBR; 373 mly->mly_error_status = MLY_I960RX_ERROR_STATUS; 374 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; 375 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; 376 break; 377 378 case MLY_HWIF_STRONGARM: 379 mly->mly_doorbell_true = 0xff; 380 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; 381 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; 382 mly->mly_idbr = MLY_STRONGARM_IDBR; 383 mly->mly_odbr = MLY_STRONGARM_ODBR; 384 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS; 385 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; 386 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; 387 break; 388 } 389 390 /* 391 * Allocate and map the scatter/gather lists. 392 */ 393 rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 394 &mly->mly_sg_dmamap, (void **)&mly->mly_sg, 395 &mly->mly_sg_busaddr, &mly->mly_sg_seg); 396 if (rv) { 397 printf("%s: unable to allocate S/G maps\n", 398 device_xname(&mly->mly_dv)); 399 goto bad; 400 } 401 state++; 402 403 /* 404 * Allocate and map the memory mailbox. 405 */ 406 rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox), 407 &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox, 408 &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg); 409 if (rv) { 410 aprint_error_dev(&mly->mly_dv, "unable to allocate mailboxes\n"); 411 goto bad; 412 } 413 state++; 414 415 /* 416 * Initialise per-controller queues. 417 */ 418 SLIST_INIT(&mly->mly_ccb_free); 419 SIMPLEQ_INIT(&mly->mly_ccb_queue); 420 421 /* 422 * Disable interrupts before we start talking to the controller. 423 */ 424 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE); 425 426 /* 427 * Wait for the controller to come ready, handshaking with the 428 * firmware if required. This is typically only necessary on 429 * platforms where the controller BIOS does not run. 430 */ 431 if (mly_fwhandshake(mly)) { 432 aprint_error_dev(&mly->mly_dv, "unable to bring controller online\n"); 433 goto bad; 434 } 435 436 /* 437 * Allocate initial command buffers, obtain controller feature 438 * information, and then reallocate command buffers, since we'll 439 * know how many we want. 440 */ 441 if (mly_alloc_ccbs(mly)) { 442 aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n"); 443 goto bad; 444 } 445 state++; 446 if (mly_get_controllerinfo(mly)) { 447 aprint_error_dev(&mly->mly_dv, "unable to retrieve controller info\n"); 448 goto bad; 449 } 450 mly_release_ccbs(mly); 451 if (mly_alloc_ccbs(mly)) { 452 aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n"); 453 state--; 454 goto bad; 455 } 456 457 /* 458 * Get the current event counter for health purposes, populate the 459 * initial health status buffer. 460 */ 461 if (mly_get_eventstatus(mly)) { 462 aprint_error_dev(&mly->mly_dv, "unable to retrieve event status\n"); 463 goto bad; 464 } 465 466 /* 467 * Enable memory-mailbox mode. 468 */ 469 if (mly_enable_mmbox(mly)) { 470 aprint_error_dev(&mly->mly_dv, "unable to enable memory mailbox\n"); 471 goto bad; 472 } 473 474 /* 475 * Print a little information about the controller. 476 */ 477 mi = mly->mly_controllerinfo; 478 479 printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d " 480 "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(&mly->mly_dv), 481 mi->physical_channels_present, 482 (mi->physical_channels_present) > 1 ? "s" : "", 483 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, 484 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 485 le16toh(mi->memory_size)); 486 487 /* 488 * Register our `shutdownhook'. 489 */ 490 if (mly_sdh == NULL) 491 shutdownhook_establish(mly_shutdown, NULL); 492 493 /* 494 * Clear any previous BTL information. For each bus that scsipi 495 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve 496 * all BTL info at that point. 497 */ 498 memset(&mly->mly_btl, 0, sizeof(mly->mly_btl)); 499 500 mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present + 501 mly->mly_controllerinfo->virtual_channels_present; 502 503 /* 504 * Attach to scsipi. 505 */ 506 adapt = &mly->mly_adapt; 507 memset(adapt, 0, sizeof(*adapt)); 508 adapt->adapt_dev = &mly->mly_dv; 509 adapt->adapt_nchannels = mly->mly_nchans; 510 adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV; 511 adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV; 512 adapt->adapt_request = mly_scsipi_request; 513 adapt->adapt_minphys = mly_scsipi_minphys; 514 adapt->adapt_ioctl = mly_scsipi_ioctl; 515 516 for (i = 0; i < mly->mly_nchans; i++) { 517 chan = &mly->mly_chans[i]; 518 memset(chan, 0, sizeof(*chan)); 519 chan->chan_adapter = adapt; 520 chan->chan_bustype = &scsi_bustype; 521 chan->chan_channel = i; 522 chan->chan_ntargets = MLY_MAX_TARGETS; 523 chan->chan_nluns = MLY_MAX_LUNS; 524 chan->chan_id = mly->mly_controllerparam->initiator_id; 525 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 526 config_found(&mly->mly_dv, chan, scsiprint); 527 } 528 529 /* 530 * Now enable interrupts... 531 */ 532 mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE); 533 534 /* 535 * Finally, create our monitoring thread. 536 */ 537 mly->mly_state |= MLY_STATE_INITOK; 538 rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly, 539 &mly->mly_thread, "%s", device_xname(&mly->mly_dv)); 540 if (rv != 0) 541 aprint_error_dev(&mly->mly_dv, "unable to create thread (%d)\n", 542 rv); 543 return; 544 545 bad: 546 if (state > 2) 547 mly_release_ccbs(mly); 548 if (state > 1) 549 mly_dmamem_free(mly, sizeof(struct mly_mmbox), 550 mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox, 551 &mly->mly_mmbox_seg); 552 if (state > 0) 553 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS, 554 mly->mly_sg_dmamap, (void *)mly->mly_sg, 555 &mly->mly_sg_seg); 556 } 557 558 /* 559 * Scan all possible devices on the specified channel. 560 */ 561 static void 562 mly_scan_channel(struct mly_softc *mly, int bus) 563 { 564 int s, target; 565 566 for (target = 0; target < MLY_MAX_TARGETS; target++) { 567 s = splbio(); 568 if (!mly_scan_btl(mly, bus, target)) { 569 tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan", 570 0); 571 } 572 splx(s); 573 } 574 } 575 576 /* 577 * Shut down all configured `mly' devices. 578 */ 579 static void 580 mly_shutdown(void *cookie) 581 { 582 struct mly_softc *mly; 583 int i; 584 585 for (i = 0; i < mly_cd.cd_ndevs; i++) { 586 if ((mly = device_lookup_private(&mly_cd, i)) == NULL) 587 continue; 588 589 if (mly_flush(mly)) 590 aprint_error_dev(&mly->mly_dv, "unable to flush cache\n"); 591 } 592 } 593 594 /* 595 * Fill in the mly_controllerinfo and mly_controllerparam fields in the 596 * softc. 597 */ 598 static int 599 mly_get_controllerinfo(struct mly_softc *mly) 600 { 601 struct mly_cmd_ioctl mci; 602 int rv; 603 604 /* 605 * Build the getcontrollerinfo ioctl and send it. 606 */ 607 memset(&mci, 0, sizeof(mci)); 608 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 609 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo, 610 sizeof(*mly->mly_controllerinfo), NULL, NULL); 611 if (rv != 0) 612 return (rv); 613 614 /* 615 * Build the getcontrollerparameter ioctl and send it. 616 */ 617 memset(&mci, 0, sizeof(mci)); 618 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 619 rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam, 620 sizeof(*mly->mly_controllerparam), NULL, NULL); 621 622 return (rv); 623 } 624 625 /* 626 * Rescan a device, possibly as a consequence of getting an event which 627 * suggests that it may have changed. Must be called with interrupts 628 * blocked. 629 */ 630 static int 631 mly_scan_btl(struct mly_softc *mly, int bus, int target) 632 { 633 struct mly_ccb *mc; 634 struct mly_cmd_ioctl *mci; 635 int rv; 636 637 if (target == mly->mly_controllerparam->initiator_id) { 638 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED; 639 return (EIO); 640 } 641 642 /* Don't re-scan if a scan is already in progress. */ 643 if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0) 644 return (EBUSY); 645 646 /* Get a command. */ 647 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 648 return (rv); 649 650 /* Set up the data buffer. */ 651 mc->mc_data = malloc(sizeof(union mly_devinfo), 652 M_DEVBUF, M_NOWAIT|M_ZERO); 653 654 mc->mc_flags |= MLY_CCB_DATAIN; 655 mc->mc_complete = mly_complete_rescan; 656 657 /* 658 * Build the ioctl. 659 */ 660 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 661 mci->opcode = MDACMD_IOCTL; 662 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 663 memset(&mci->param, 0, sizeof(mci->param)); 664 665 if (MLY_BUS_IS_VIRTUAL(mly, bus)) { 666 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid); 667 mci->data_size = htole32(mc->mc_length); 668 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 669 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)), 670 mci->addr); 671 } else { 672 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid); 673 mci->data_size = htole32(mc->mc_length); 674 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 675 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr); 676 } 677 678 /* 679 * Dispatch the command. 680 */ 681 if ((rv = mly_ccb_map(mly, mc)) != 0) { 682 free(mc->mc_data, M_DEVBUF); 683 mly_ccb_free(mly, mc); 684 return(rv); 685 } 686 687 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING; 688 mly_ccb_enqueue(mly, mc); 689 return (0); 690 } 691 692 /* 693 * Handle the completion of a rescan operation. 694 */ 695 static void 696 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc) 697 { 698 struct mly_ioctl_getlogdevinfovalid *ldi; 699 struct mly_ioctl_getphysdevinfovalid *pdi; 700 struct mly_cmd_ioctl *mci; 701 struct mly_btl btl, *btlp; 702 struct scsipi_xfer_mode xm; 703 int bus, target, rescan; 704 u_int tmp; 705 706 mly_ccb_unmap(mly, mc); 707 708 /* 709 * Recover the bus and target from the command. We need these even 710 * in the case where we don't have a useful response. 711 */ 712 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 713 tmp = _3ltol(mci->addr); 714 rescan = 0; 715 716 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { 717 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp)); 718 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp)); 719 } else { 720 bus = MLY_PHYADDR_CHANNEL(tmp); 721 target = MLY_PHYADDR_TARGET(tmp); 722 } 723 724 btlp = &mly->mly_btl[bus][target]; 725 726 /* The default result is 'no device'. */ 727 memset(&btl, 0, sizeof(btl)); 728 btl.mb_flags = MLY_BTL_PROTECTED; 729 730 /* If the rescan completed OK, we have possibly-new BTL data. */ 731 if (mc->mc_status != 0) 732 goto out; 733 734 if (mc->mc_length == sizeof(*ldi)) { 735 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 736 tmp = le32toh(ldi->logical_device_number); 737 738 if (MLY_LOGDEV_BUS(mly, tmp) != bus || 739 MLY_LOGDEV_TARGET(mly, tmp) != target) { 740 #ifdef MLYDEBUG 741 printf("%s: WARNING: BTL rescan (logical) for %d:%d " 742 "returned data for %d:%d instead\n", 743 device_xname(&mly->mly_dv), bus, target, 744 MLY_LOGDEV_BUS(mly, tmp), 745 MLY_LOGDEV_TARGET(mly, tmp)); 746 #endif 747 goto out; 748 } 749 750 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING; 751 btl.mb_type = ldi->raid_level; 752 btl.mb_state = ldi->state; 753 } else if (mc->mc_length == sizeof(*pdi)) { 754 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 755 756 if (pdi->channel != bus || pdi->target != target) { 757 #ifdef MLYDEBUG 758 printf("%s: WARNING: BTL rescan (physical) for %d:%d " 759 " returned data for %d:%d instead\n", 760 device_xname(&mly->mly_dv), 761 bus, target, pdi->channel, pdi->target); 762 #endif 763 goto out; 764 } 765 766 btl.mb_flags = MLY_BTL_PHYSICAL; 767 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; 768 btl.mb_state = pdi->state; 769 btl.mb_speed = pdi->speed; 770 btl.mb_width = pdi->width; 771 772 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 773 btl.mb_flags |= MLY_BTL_PROTECTED; 774 if (pdi->command_tags != 0) 775 btl.mb_flags |= MLY_BTL_TQING; 776 } else { 777 printf("%s: BTL rescan result invalid\n", device_xname(&mly->mly_dv)); 778 goto out; 779 } 780 781 /* Decide whether we need to rescan the device. */ 782 if (btl.mb_flags != btlp->mb_flags || 783 btl.mb_speed != btlp->mb_speed || 784 btl.mb_width != btlp->mb_width) 785 rescan = 1; 786 787 out: 788 *btlp = btl; 789 790 if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) { 791 xm.xm_target = target; 792 mly_get_xfer_mode(mly, bus, &xm); 793 /* XXX SCSI mid-layer rescan goes here. */ 794 } 795 796 /* Wake anybody waiting on the device to be rescanned. */ 797 wakeup(btlp); 798 799 free(mc->mc_data, M_DEVBUF); 800 mly_ccb_free(mly, mc); 801 } 802 803 /* 804 * Get the current health status and set the 'next event' counter to suit. 805 */ 806 static int 807 mly_get_eventstatus(struct mly_softc *mly) 808 { 809 struct mly_cmd_ioctl mci; 810 struct mly_health_status *mh; 811 int rv; 812 813 /* Build the gethealthstatus ioctl and send it. */ 814 memset(&mci, 0, sizeof(mci)); 815 mh = NULL; 816 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 817 818 rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL); 819 if (rv) 820 return (rv); 821 822 /* Get the event counter. */ 823 mly->mly_event_change = le32toh(mh->change_counter); 824 mly->mly_event_waiting = le32toh(mh->next_event); 825 mly->mly_event_counter = le32toh(mh->next_event); 826 827 /* Save the health status into the memory mailbox */ 828 memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh)); 829 830 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 831 offsetof(struct mly_mmbox, mmm_health), 832 sizeof(mly->mly_mmbox->mmm_health), 833 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 834 835 free(mh, M_DEVBUF); 836 return (0); 837 } 838 839 /* 840 * Enable memory mailbox mode. 841 */ 842 static int 843 mly_enable_mmbox(struct mly_softc *mly) 844 { 845 struct mly_cmd_ioctl mci; 846 u_int8_t *sp; 847 u_int64_t tmp; 848 int rv; 849 850 /* Build the ioctl and send it. */ 851 memset(&mci, 0, sizeof(mci)); 852 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 853 854 /* Set buffer addresses. */ 855 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 856 mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp); 857 858 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 859 mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp); 860 861 tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 862 mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp); 863 864 /* Set buffer sizes - abuse of data_size field is revolting. */ 865 sp = (u_int8_t *)&mci.data_size; 866 sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10; 867 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10; 868 mci.param.setmemorymailbox.health_buffer_size = 869 sizeof(union mly_health_region) >> 10; 870 871 rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL); 872 if (rv) 873 return (rv); 874 875 mly->mly_state |= MLY_STATE_MMBOX_ACTIVE; 876 return (0); 877 } 878 879 /* 880 * Flush all pending I/O from the controller. 881 */ 882 static int 883 mly_flush(struct mly_softc *mly) 884 { 885 struct mly_cmd_ioctl mci; 886 887 /* Build the ioctl */ 888 memset(&mci, 0, sizeof(mci)); 889 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 890 mci.param.deviceoperation.operation_device = 891 MLY_OPDEVICE_PHYSICAL_CONTROLLER; 892 893 /* Pass it off to the controller */ 894 return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL)); 895 } 896 897 /* 898 * Perform an ioctl command. 899 * 900 * If (data) is not NULL, the command requires data transfer to the 901 * controller. If (*data) is NULL the command requires data transfer from 902 * the controller, and we will allocate a buffer for it. 903 */ 904 static int 905 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data, 906 size_t datasize, void *sense_buffer, 907 size_t *sense_length) 908 { 909 struct mly_ccb *mc; 910 struct mly_cmd_ioctl *mci; 911 u_int8_t status; 912 int rv; 913 914 mc = NULL; 915 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 916 goto bad; 917 918 /* 919 * Copy the ioctl structure, but save some important fields and then 920 * fixup. 921 */ 922 mci = &mc->mc_packet->ioctl; 923 ioctl->sense_buffer_address = htole64(mci->sense_buffer_address); 924 ioctl->maximum_sense_size = mci->maximum_sense_size; 925 *mci = *ioctl; 926 mci->opcode = MDACMD_IOCTL; 927 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 928 929 /* Handle the data buffer. */ 930 if (data != NULL) { 931 if (*data == NULL) { 932 /* Allocate data buffer */ 933 mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT); 934 mc->mc_flags |= MLY_CCB_DATAIN; 935 } else { 936 mc->mc_data = *data; 937 mc->mc_flags |= MLY_CCB_DATAOUT; 938 } 939 mc->mc_length = datasize; 940 mc->mc_packet->generic.data_size = htole32(datasize); 941 } 942 943 /* Run the command. */ 944 if (datasize > 0) 945 if ((rv = mly_ccb_map(mly, mc)) != 0) 946 goto bad; 947 rv = mly_ccb_poll(mly, mc, 30000); 948 if (datasize > 0) 949 mly_ccb_unmap(mly, mc); 950 if (rv != 0) 951 goto bad; 952 953 /* Clean up and return any data. */ 954 status = mc->mc_status; 955 956 if (status != 0) 957 printf("mly_ioctl: command status %d\n", status); 958 959 if (mc->mc_sense > 0 && sense_buffer != NULL) { 960 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense); 961 *sense_length = mc->mc_sense; 962 goto bad; 963 } 964 965 /* Should we return a data pointer? */ 966 if (data != NULL && *data == NULL) 967 *data = mc->mc_data; 968 969 /* Command completed OK. */ 970 rv = (status != 0 ? EIO : 0); 971 972 bad: 973 if (mc != NULL) { 974 /* Do we need to free a data buffer we allocated? */ 975 if (rv != 0 && mc->mc_data != NULL && 976 (data == NULL || *data == NULL)) 977 free(mc->mc_data, M_DEVBUF); 978 mly_ccb_free(mly, mc); 979 } 980 981 return (rv); 982 } 983 984 /* 985 * Check for event(s) outstanding in the controller. 986 */ 987 static void 988 mly_check_event(struct mly_softc *mly) 989 { 990 991 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 992 offsetof(struct mly_mmbox, mmm_health), 993 sizeof(mly->mly_mmbox->mmm_health), 994 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 995 996 /* 997 * The controller may have updated the health status information, so 998 * check for it here. Note that the counters are all in host 999 * memory, so this check is very cheap. Also note that we depend on 1000 * checking on completion 1001 */ 1002 if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) != 1003 mly->mly_event_change) { 1004 mly->mly_event_change = 1005 le32toh(mly->mly_mmbox->mmm_health.status.change_counter); 1006 mly->mly_event_waiting = 1007 le32toh(mly->mly_mmbox->mmm_health.status.next_event); 1008 1009 /* Wake up anyone that might be interested in this. */ 1010 wakeup(&mly->mly_event_change); 1011 } 1012 1013 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1014 offsetof(struct mly_mmbox, mmm_health), 1015 sizeof(mly->mly_mmbox->mmm_health), 1016 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1017 1018 if (mly->mly_event_counter != mly->mly_event_waiting) 1019 mly_fetch_event(mly); 1020 } 1021 1022 /* 1023 * Fetch one event from the controller. If we fail due to resource 1024 * starvation, we'll be retried the next time a command completes. 1025 */ 1026 static void 1027 mly_fetch_event(struct mly_softc *mly) 1028 { 1029 struct mly_ccb *mc; 1030 struct mly_cmd_ioctl *mci; 1031 int s; 1032 u_int32_t event; 1033 1034 /* Get a command. */ 1035 if (mly_ccb_alloc(mly, &mc)) 1036 return; 1037 1038 /* Set up the data buffer. */ 1039 mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, 1040 M_NOWAIT|M_ZERO); 1041 1042 mc->mc_length = sizeof(struct mly_event); 1043 mc->mc_flags |= MLY_CCB_DATAIN; 1044 mc->mc_complete = mly_complete_event; 1045 1046 /* 1047 * Get an event number to fetch. It's possible that we've raced 1048 * with another context for the last event, in which case there will 1049 * be no more events. 1050 */ 1051 s = splbio(); 1052 if (mly->mly_event_counter == mly->mly_event_waiting) { 1053 splx(s); 1054 free(mc->mc_data, M_DEVBUF); 1055 mly_ccb_free(mly, mc); 1056 return; 1057 } 1058 event = mly->mly_event_counter++; 1059 splx(s); 1060 1061 /* 1062 * Build the ioctl. 1063 * 1064 * At this point we are committed to sending this request, as it 1065 * will be the only one constructed for this particular event 1066 * number. 1067 */ 1068 mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl; 1069 mci->opcode = MDACMD_IOCTL; 1070 mci->data_size = htole32(sizeof(struct mly_event)); 1071 _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff), 1072 mci->addr); 1073 mci->timeout = 30 | MLY_TIMEOUT_SECONDS; 1074 mci->sub_ioctl = MDACIOCTL_GETEVENT; 1075 mci->param.getevent.sequence_number_low = htole16(event & 0xffff); 1076 1077 /* 1078 * Submit the command. 1079 */ 1080 if (mly_ccb_map(mly, mc) != 0) 1081 goto bad; 1082 mly_ccb_enqueue(mly, mc); 1083 return; 1084 1085 bad: 1086 printf("%s: couldn't fetch event %u\n", device_xname(&mly->mly_dv), event); 1087 free(mc->mc_data, M_DEVBUF); 1088 mly_ccb_free(mly, mc); 1089 } 1090 1091 /* 1092 * Handle the completion of an event poll. 1093 */ 1094 static void 1095 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc) 1096 { 1097 struct mly_event *me; 1098 1099 me = (struct mly_event *)mc->mc_data; 1100 mly_ccb_unmap(mly, mc); 1101 mly_ccb_free(mly, mc); 1102 1103 /* If the event was successfully fetched, process it. */ 1104 if (mc->mc_status == SCSI_OK) 1105 mly_process_event(mly, me); 1106 else 1107 aprint_error_dev(&mly->mly_dv, "unable to fetch event; status = 0x%x\n", 1108 mc->mc_status); 1109 1110 free(me, M_DEVBUF); 1111 1112 /* Check for another event. */ 1113 mly_check_event(mly); 1114 } 1115 1116 /* 1117 * Process a controller event. Called with interrupts blocked (i.e., at 1118 * interrupt time). 1119 */ 1120 static void 1121 mly_process_event(struct mly_softc *mly, struct mly_event *me) 1122 { 1123 struct scsi_sense_data *ssd; 1124 int bus, target, event, class, action; 1125 const char *fp, *tp; 1126 1127 ssd = (struct scsi_sense_data *)&me->sense[0]; 1128 1129 /* 1130 * Errors can be reported using vendor-unique sense data. In this 1131 * case, the event code will be 0x1c (Request sense data present), 1132 * the sense key will be 0x09 (vendor specific), the MSB of the ASC 1133 * will be set, and the actual event code will be a 16-bit value 1134 * comprised of the ASCQ (low byte) and low seven bits of the ASC 1135 * (low seven bits of the high byte). 1136 */ 1137 if (le32toh(me->code) == 0x1c && 1138 SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC && 1139 (ssd->asc & 0x80) != 0) { 1140 event = ((int)(ssd->asc & ~0x80) << 8) + 1141 ssd->ascq; 1142 } else 1143 event = le32toh(me->code); 1144 1145 /* Look up event, get codes. */ 1146 fp = mly_describe_code(mly_table_event, event); 1147 1148 /* Quiet event? */ 1149 class = fp[0]; 1150 #ifdef notyet 1151 if (isupper(class) && bootverbose) 1152 class = tolower(class); 1153 #endif 1154 1155 /* Get action code, text string. */ 1156 action = fp[1]; 1157 tp = fp + 3; 1158 1159 /* 1160 * Print some information about the event. 1161 * 1162 * This code uses a table derived from the corresponding portion of 1163 * the Linux driver, and thus the parser is very similar. 1164 */ 1165 switch (class) { 1166 case 'p': 1167 /* 1168 * Error on physical drive. 1169 */ 1170 printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv), 1171 me->channel, me->target, tp); 1172 if (action == 'r') 1173 mly->mly_btl[me->channel][me->target].mb_flags |= 1174 MLY_BTL_RESCAN; 1175 break; 1176 1177 case 'l': 1178 case 'm': 1179 /* 1180 * Error on logical unit, or message about logical unit. 1181 */ 1182 bus = MLY_LOGDEV_BUS(mly, me->lun); 1183 target = MLY_LOGDEV_TARGET(mly, me->lun); 1184 printf("%s: logical device %d:%d %s\n", device_xname(&mly->mly_dv), 1185 bus, target, tp); 1186 if (action == 'r') 1187 mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 1188 break; 1189 1190 case 's': 1191 /* 1192 * Report of sense data. 1193 */ 1194 if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE || 1195 SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) && 1196 ssd->asc == 0x04 && 1197 (ssd->ascq == 0x01 || 1198 ssd->ascq == 0x02)) { 1199 /* Ignore NO_SENSE or NOT_READY in one case */ 1200 break; 1201 } 1202 1203 /* 1204 * XXX Should translate this if SCSIVERBOSE. 1205 */ 1206 printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv), 1207 me->channel, me->target, tp); 1208 printf("%s: sense key %d asc %02x ascq %02x\n", 1209 device_xname(&mly->mly_dv), SSD_SENSE_KEY(ssd->flags), 1210 ssd->asc, ssd->ascq); 1211 printf("%s: info %x%x%x%x csi %x%x%x%x\n", 1212 device_xname(&mly->mly_dv), ssd->info[0], ssd->info[1], 1213 ssd->info[2], ssd->info[3], ssd->csi[0], 1214 ssd->csi[1], ssd->csi[2], 1215 ssd->csi[3]); 1216 if (action == 'r') 1217 mly->mly_btl[me->channel][me->target].mb_flags |= 1218 MLY_BTL_RESCAN; 1219 break; 1220 1221 case 'e': 1222 printf("%s: ", device_xname(&mly->mly_dv)); 1223 printf(tp, me->target, me->lun); 1224 break; 1225 1226 case 'c': 1227 printf("%s: controller %s\n", device_xname(&mly->mly_dv), tp); 1228 break; 1229 1230 case '?': 1231 printf("%s: %s - %d\n", device_xname(&mly->mly_dv), tp, event); 1232 break; 1233 1234 default: 1235 /* Probably a 'noisy' event being ignored. */ 1236 break; 1237 } 1238 } 1239 1240 /* 1241 * Perform periodic activities. 1242 */ 1243 static void 1244 mly_thread(void *cookie) 1245 { 1246 struct mly_softc *mly; 1247 struct mly_btl *btl; 1248 int s, bus, target, done; 1249 1250 mly = (struct mly_softc *)cookie; 1251 1252 for (;;) { 1253 /* Check for new events. */ 1254 mly_check_event(mly); 1255 1256 /* Re-scan up to 1 device. */ 1257 s = splbio(); 1258 done = 0; 1259 for (bus = 0; bus < mly->mly_nchans && !done; bus++) { 1260 for (target = 0; target < MLY_MAX_TARGETS; target++) { 1261 /* Perform device rescan? */ 1262 btl = &mly->mly_btl[bus][target]; 1263 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) { 1264 btl->mb_flags ^= MLY_BTL_RESCAN; 1265 mly_scan_btl(mly, bus, target); 1266 done = 1; 1267 break; 1268 } 1269 } 1270 } 1271 splx(s); 1272 1273 /* Sleep for N seconds. */ 1274 tsleep(mly_thread, PWAIT, "mlyzzz", 1275 hz * MLY_PERIODIC_INTERVAL); 1276 } 1277 } 1278 1279 /* 1280 * Submit a command to the controller and poll on completion. Return 1281 * non-zero on timeout. 1282 */ 1283 static int 1284 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1285 { 1286 int rv; 1287 1288 if ((rv = mly_ccb_submit(mly, mc)) != 0) 1289 return (rv); 1290 1291 for (timo *= 10; timo != 0; timo--) { 1292 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) 1293 break; 1294 mly_intr(mly); 1295 DELAY(100); 1296 } 1297 1298 return (timo == 0); 1299 } 1300 1301 /* 1302 * Submit a command to the controller and sleep on completion. Return 1303 * non-zero on timeout. 1304 */ 1305 static int 1306 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo) 1307 { 1308 int rv, s; 1309 1310 mly_ccb_enqueue(mly, mc); 1311 1312 s = splbio(); 1313 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) { 1314 splx(s); 1315 return (0); 1316 } 1317 rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000); 1318 splx(s); 1319 1320 return (rv); 1321 } 1322 1323 /* 1324 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1325 * the order that they were enqueued and try to submit their command blocks 1326 * to the controller for execution. 1327 */ 1328 void 1329 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc) 1330 { 1331 int s; 1332 1333 s = splbio(); 1334 1335 if (mc != NULL) 1336 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq); 1337 1338 while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) { 1339 if (mly_ccb_submit(mly, mc)) 1340 break; 1341 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq); 1342 } 1343 1344 splx(s); 1345 } 1346 1347 /* 1348 * Deliver a command to the controller. 1349 */ 1350 static int 1351 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc) 1352 { 1353 union mly_cmd_packet *pkt; 1354 int s, off; 1355 1356 mc->mc_packet->generic.command_id = htole16(mc->mc_slot); 1357 1358 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1359 mc->mc_packetphys - mly->mly_pkt_busaddr, 1360 sizeof(union mly_cmd_packet), 1361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1362 1363 s = splbio(); 1364 1365 /* 1366 * Do we have to use the hardware mailbox? 1367 */ 1368 if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) { 1369 /* 1370 * Check to see if the controller is ready for us. 1371 */ 1372 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) { 1373 splx(s); 1374 return (EBUSY); 1375 } 1376 1377 /* 1378 * It's ready, send the command. 1379 */ 1380 mly_outl(mly, mly->mly_cmd_mailbox, 1381 (u_int64_t)mc->mc_packetphys & 0xffffffff); 1382 mly_outl(mly, mly->mly_cmd_mailbox + 4, 1383 (u_int64_t)mc->mc_packetphys >> 32); 1384 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT); 1385 } else { 1386 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx]; 1387 off = (char *)pkt - (char *)mly->mly_mmbox; 1388 1389 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1390 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1391 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1392 1393 /* Check to see if the next index is free yet. */ 1394 if (pkt->mmbox.flag != 0) { 1395 splx(s); 1396 return (EBUSY); 1397 } 1398 1399 /* Copy in new command */ 1400 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data, 1401 sizeof(pkt->mmbox.data)); 1402 1403 /* Copy flag last. */ 1404 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 1405 1406 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1407 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1408 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1409 1410 /* Signal controller and update index. */ 1411 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT); 1412 mly->mly_mmbox_cmd_idx = 1413 (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS; 1414 } 1415 1416 splx(s); 1417 return (0); 1418 } 1419 1420 /* 1421 * Pick up completed commands from the controller and handle accordingly. 1422 */ 1423 int 1424 mly_intr(void *cookie) 1425 { 1426 struct mly_ccb *mc; 1427 union mly_status_packet *sp; 1428 u_int16_t slot; 1429 int forus, off; 1430 struct mly_softc *mly; 1431 1432 mly = cookie; 1433 forus = 0; 1434 1435 /* 1436 * Pick up hardware-mailbox commands. 1437 */ 1438 if (mly_odbr_true(mly, MLY_HM_STSREADY)) { 1439 slot = mly_inw(mly, mly->mly_status_mailbox); 1440 1441 if (slot < MLY_SLOT_MAX) { 1442 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1443 mc->mc_status = 1444 mly_inb(mly, mly->mly_status_mailbox + 2); 1445 mc->mc_sense = 1446 mly_inb(mly, mly->mly_status_mailbox + 3); 1447 mc->mc_resid = 1448 mly_inl(mly, mly->mly_status_mailbox + 4); 1449 1450 mly_ccb_complete(mly, mc); 1451 } else { 1452 /* Slot 0xffff may mean "extremely bogus command". */ 1453 printf("%s: got HM completion for illegal slot %u\n", 1454 device_xname(&mly->mly_dv), slot); 1455 } 1456 1457 /* Unconditionally acknowledge status. */ 1458 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY); 1459 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 1460 forus = 1; 1461 } 1462 1463 /* 1464 * Pick up memory-mailbox commands. 1465 */ 1466 if (mly_odbr_true(mly, MLY_AM_STSREADY)) { 1467 for (;;) { 1468 sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx]; 1469 off = (char *)sp - (char *)mly->mly_mmbox; 1470 1471 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap, 1472 off, sizeof(mly->mly_mmbox->mmm_command[0]), 1473 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1474 1475 /* Check for more status. */ 1476 if (sp->mmbox.flag == 0) 1477 break; 1478 1479 /* Get slot number. */ 1480 slot = le16toh(sp->status.command_id); 1481 if (slot < MLY_SLOT_MAX) { 1482 mc = mly->mly_ccbs + (slot - MLY_SLOT_START); 1483 mc->mc_status = sp->status.status; 1484 mc->mc_sense = sp->status.sense_length; 1485 mc->mc_resid = le32toh(sp->status.residue); 1486 mly_ccb_complete(mly, mc); 1487 } else { 1488 /* 1489 * Slot 0xffff may mean "extremely bogus 1490 * command". 1491 */ 1492 printf("%s: got AM completion for illegal " 1493 "slot %u at %d\n", device_xname(&mly->mly_dv), 1494 slot, mly->mly_mmbox_sts_idx); 1495 } 1496 1497 /* Clear and move to next index. */ 1498 sp->mmbox.flag = 0; 1499 mly->mly_mmbox_sts_idx = 1500 (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS; 1501 } 1502 1503 /* Acknowledge that we have collected status value(s). */ 1504 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY); 1505 forus = 1; 1506 } 1507 1508 /* 1509 * Run the queue. 1510 */ 1511 if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue)) 1512 mly_ccb_enqueue(mly, NULL); 1513 1514 return (forus); 1515 } 1516 1517 /* 1518 * Process completed commands 1519 */ 1520 static void 1521 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc) 1522 { 1523 void (*complete)(struct mly_softc *, struct mly_ccb *); 1524 1525 bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap, 1526 mc->mc_packetphys - mly->mly_pkt_busaddr, 1527 sizeof(union mly_cmd_packet), 1528 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1529 1530 complete = mc->mc_complete; 1531 mc->mc_flags |= MLY_CCB_COMPLETE; 1532 1533 /* 1534 * Call completion handler or wake up sleeping consumer. 1535 */ 1536 if (complete != NULL) 1537 (*complete)(mly, mc); 1538 else 1539 wakeup(mc); 1540 } 1541 1542 /* 1543 * Allocate a command. 1544 */ 1545 int 1546 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp) 1547 { 1548 struct mly_ccb *mc; 1549 int s; 1550 1551 s = splbio(); 1552 mc = SLIST_FIRST(&mly->mly_ccb_free); 1553 if (mc != NULL) 1554 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist); 1555 splx(s); 1556 1557 *mcp = mc; 1558 return (mc == NULL ? EAGAIN : 0); 1559 } 1560 1561 /* 1562 * Release a command back to the freelist. 1563 */ 1564 void 1565 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc) 1566 { 1567 int s; 1568 1569 /* 1570 * Fill in parts of the command that may cause confusion if a 1571 * consumer doesn't when we are later allocated. 1572 */ 1573 mc->mc_data = NULL; 1574 mc->mc_flags = 0; 1575 mc->mc_complete = NULL; 1576 mc->mc_private = NULL; 1577 mc->mc_packet->generic.command_control = 0; 1578 1579 /* 1580 * By default, we set up to overwrite the command packet with sense 1581 * information. 1582 */ 1583 mc->mc_packet->generic.sense_buffer_address = 1584 htole64(mc->mc_packetphys); 1585 mc->mc_packet->generic.maximum_sense_size = 1586 sizeof(union mly_cmd_packet); 1587 1588 s = splbio(); 1589 SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist); 1590 splx(s); 1591 } 1592 1593 /* 1594 * Allocate and initialize command and packet structures. 1595 * 1596 * If the controller supports fewer than MLY_MAX_CCBS commands, limit our 1597 * allocation to that number. If we don't yet know how many commands the 1598 * controller supports, allocate a very small set (suitable for initialization 1599 * purposes only). 1600 */ 1601 static int 1602 mly_alloc_ccbs(struct mly_softc *mly) 1603 { 1604 struct mly_ccb *mc; 1605 int i, rv; 1606 1607 if (mly->mly_controllerinfo == NULL) 1608 mly->mly_ncmds = MLY_CCBS_RESV; 1609 else { 1610 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands); 1611 mly->mly_ncmds = min(MLY_MAX_CCBS, i); 1612 } 1613 1614 /* 1615 * Allocate enough space for all the command packets in one chunk 1616 * and map them permanently into controller-visible space. 1617 */ 1618 rv = mly_dmamem_alloc(mly, 1619 mly->mly_ncmds * sizeof(union mly_cmd_packet), 1620 &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt, 1621 &mly->mly_pkt_busaddr, &mly->mly_pkt_seg); 1622 if (rv) 1623 return (rv); 1624 1625 mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds, 1626 M_DEVBUF, M_NOWAIT|M_ZERO); 1627 1628 for (i = 0; i < mly->mly_ncmds; i++) { 1629 mc = mly->mly_ccbs + i; 1630 mc->mc_slot = MLY_SLOT_START + i; 1631 mc->mc_packet = mly->mly_pkt + i; 1632 mc->mc_packetphys = mly->mly_pkt_busaddr + 1633 (i * sizeof(union mly_cmd_packet)); 1634 1635 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER, 1636 MLY_MAX_SEGS, MLY_MAX_XFER, 0, 1637 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1638 &mc->mc_datamap); 1639 if (rv) { 1640 mly_release_ccbs(mly); 1641 return (rv); 1642 } 1643 1644 mly_ccb_free(mly, mc); 1645 } 1646 1647 return (0); 1648 } 1649 1650 /* 1651 * Free all the storage held by commands. 1652 * 1653 * Must be called with all commands on the free list. 1654 */ 1655 static void 1656 mly_release_ccbs(struct mly_softc *mly) 1657 { 1658 struct mly_ccb *mc; 1659 1660 /* Throw away command buffer DMA maps. */ 1661 while (mly_ccb_alloc(mly, &mc) == 0) 1662 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap); 1663 1664 /* Release CCB storage. */ 1665 free(mly->mly_ccbs, M_DEVBUF); 1666 1667 /* Release the packet storage. */ 1668 mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet), 1669 mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg); 1670 } 1671 1672 /* 1673 * Map a command into controller-visible space. 1674 */ 1675 static int 1676 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc) 1677 { 1678 struct mly_cmd_generic *gen; 1679 struct mly_sg_entry *sg; 1680 bus_dma_segment_t *ds; 1681 int flg, nseg, rv; 1682 1683 #ifdef DIAGNOSTIC 1684 /* Don't map more than once. */ 1685 if ((mc->mc_flags & MLY_CCB_MAPPED) != 0) 1686 panic("mly_ccb_map: already mapped"); 1687 mc->mc_flags |= MLY_CCB_MAPPED; 1688 1689 /* Does the command have a data buffer? */ 1690 if (mc->mc_data == NULL) 1691 panic("mly_ccb_map: no data buffer"); 1692 #endif 1693 1694 rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data, 1695 mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1696 ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ? 1697 BUS_DMA_READ : BUS_DMA_WRITE)); 1698 if (rv != 0) 1699 return (rv); 1700 1701 gen = &mc->mc_packet->generic; 1702 1703 /* 1704 * Can we use the transfer structure directly? 1705 */ 1706 if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) { 1707 mc->mc_sgoff = -1; 1708 sg = &gen->transfer.direct.sg[0]; 1709 } else { 1710 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) * 1711 MLY_MAX_SEGS; 1712 sg = mly->mly_sg + mc->mc_sgoff; 1713 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE; 1714 gen->transfer.indirect.entries[0] = htole16(nseg); 1715 gen->transfer.indirect.table_physaddr[0] = 1716 htole64(mly->mly_sg_busaddr + 1717 (mc->mc_sgoff * sizeof(struct mly_sg_entry))); 1718 } 1719 1720 /* 1721 * Fill the S/G table. 1722 */ 1723 for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) { 1724 sg->physaddr = htole64(ds->ds_addr); 1725 sg->length = htole64(ds->ds_len); 1726 } 1727 1728 /* 1729 * Sync up the data map. 1730 */ 1731 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1732 flg = BUS_DMASYNC_PREREAD; 1733 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ { 1734 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION; 1735 flg = BUS_DMASYNC_PREWRITE; 1736 } 1737 1738 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1739 1740 /* 1741 * Sync up the chained S/G table, if we're using one. 1742 */ 1743 if (mc->mc_sgoff == -1) 1744 return (0); 1745 1746 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1747 MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE); 1748 1749 return (0); 1750 } 1751 1752 /* 1753 * Unmap a command from controller-visible space. 1754 */ 1755 static void 1756 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc) 1757 { 1758 int flg; 1759 1760 #ifdef DIAGNOSTIC 1761 if ((mc->mc_flags & MLY_CCB_MAPPED) == 0) 1762 panic("mly_ccb_unmap: not mapped"); 1763 mc->mc_flags &= ~MLY_CCB_MAPPED; 1764 #endif 1765 1766 if ((mc->mc_flags & MLY_CCB_DATAIN) != 0) 1767 flg = BUS_DMASYNC_POSTREAD; 1768 else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ 1769 flg = BUS_DMASYNC_POSTWRITE; 1770 1771 bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg); 1772 bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap); 1773 1774 if (mc->mc_sgoff == -1) 1775 return; 1776 1777 bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff, 1778 MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE); 1779 } 1780 1781 /* 1782 * Adjust the size of each I/O before it passes to the SCSI layer. 1783 */ 1784 static void 1785 mly_scsipi_minphys(struct buf *bp) 1786 { 1787 1788 if (bp->b_bcount > MLY_MAX_XFER) 1789 bp->b_bcount = MLY_MAX_XFER; 1790 minphys(bp); 1791 } 1792 1793 /* 1794 * Start a SCSI command. 1795 */ 1796 static void 1797 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1798 void *arg) 1799 { 1800 struct mly_ccb *mc; 1801 struct mly_cmd_scsi_small *ss; 1802 struct scsipi_xfer *xs; 1803 struct scsipi_periph *periph; 1804 struct mly_softc *mly; 1805 struct mly_btl *btl; 1806 int s, tmp; 1807 1808 mly = device_private(chan->chan_adapter->adapt_dev); 1809 1810 switch (req) { 1811 case ADAPTER_REQ_RUN_XFER: 1812 xs = arg; 1813 periph = xs->xs_periph; 1814 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target]; 1815 s = splbio(); 1816 tmp = btl->mb_flags; 1817 splx(s); 1818 1819 /* 1820 * Check for I/O attempt to a protected or non-existant 1821 * device. 1822 */ 1823 if ((tmp & MLY_BTL_PROTECTED) != 0) { 1824 xs->error = XS_SELTIMEOUT; 1825 scsipi_done(xs); 1826 break; 1827 } 1828 1829 #ifdef DIAGNOSTIC 1830 /* XXX Increase if/when we support large SCSI commands. */ 1831 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) { 1832 printf("%s: cmd too large\n", device_xname(&mly->mly_dv)); 1833 xs->error = XS_DRIVER_STUFFUP; 1834 scsipi_done(xs); 1835 break; 1836 } 1837 #endif 1838 1839 if (mly_ccb_alloc(mly, &mc)) { 1840 xs->error = XS_RESOURCE_SHORTAGE; 1841 scsipi_done(xs); 1842 break; 1843 } 1844 1845 /* Build the command. */ 1846 mc->mc_data = xs->data; 1847 mc->mc_length = xs->datalen; 1848 mc->mc_complete = mly_scsipi_complete; 1849 mc->mc_private = xs; 1850 1851 /* Build the packet for the controller. */ 1852 ss = &mc->mc_packet->scsi_small; 1853 ss->opcode = MDACMD_SCSI; 1854 #ifdef notdef 1855 /* 1856 * XXX FreeBSD does this, but it doesn't fix anything, 1857 * XXX and appears potentially harmful. 1858 */ 1859 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT; 1860 #endif 1861 1862 ss->data_size = htole32(xs->datalen); 1863 _lto3l(MLY_PHYADDR(0, chan->chan_channel, 1864 periph->periph_target, periph->periph_lun), ss->addr); 1865 1866 if (xs->timeout < 60 * 1000) 1867 ss->timeout = xs->timeout / 1000 | 1868 MLY_TIMEOUT_SECONDS; 1869 else if (xs->timeout < 60 * 60 * 1000) 1870 ss->timeout = xs->timeout / (60 * 1000) | 1871 MLY_TIMEOUT_MINUTES; 1872 else 1873 ss->timeout = xs->timeout / (60 * 60 * 1000) | 1874 MLY_TIMEOUT_HOURS; 1875 1876 ss->maximum_sense_size = sizeof(xs->sense); 1877 ss->cdb_length = xs->cmdlen; 1878 memcpy(ss->cdb, xs->cmd, xs->cmdlen); 1879 1880 if (mc->mc_length != 0) { 1881 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0) 1882 mc->mc_flags |= MLY_CCB_DATAOUT; 1883 else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */ 1884 mc->mc_flags |= MLY_CCB_DATAIN; 1885 1886 if (mly_ccb_map(mly, mc) != 0) { 1887 xs->error = XS_DRIVER_STUFFUP; 1888 mly_ccb_free(mly, mc); 1889 scsipi_done(xs); 1890 break; 1891 } 1892 } 1893 1894 /* 1895 * Give the command to the controller. 1896 */ 1897 if ((xs->xs_control & XS_CTL_POLL) != 0) { 1898 if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) { 1899 xs->error = XS_REQUEUE; 1900 if (mc->mc_length != 0) 1901 mly_ccb_unmap(mly, mc); 1902 mly_ccb_free(mly, mc); 1903 scsipi_done(xs); 1904 } 1905 } else 1906 mly_ccb_enqueue(mly, mc); 1907 1908 break; 1909 1910 case ADAPTER_REQ_GROW_RESOURCES: 1911 /* 1912 * Not supported. 1913 */ 1914 break; 1915 1916 case ADAPTER_REQ_SET_XFER_MODE: 1917 /* 1918 * We can't change the transfer mode, but at least let 1919 * scsipi know what the adapter has negotiated. 1920 */ 1921 mly_get_xfer_mode(mly, chan->chan_channel, arg); 1922 break; 1923 } 1924 } 1925 1926 /* 1927 * Handle completion of a SCSI command. 1928 */ 1929 static void 1930 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc) 1931 { 1932 struct scsipi_xfer *xs; 1933 struct scsipi_channel *chan; 1934 struct scsipi_inquiry_data *inq; 1935 struct mly_btl *btl; 1936 int target, sl, s; 1937 const char *p; 1938 1939 xs = mc->mc_private; 1940 xs->status = mc->mc_status; 1941 1942 /* 1943 * XXX The `resid' value as returned by the controller appears to be 1944 * bogus, so we always set it to zero. Is it perhaps the transfer 1945 * count? 1946 */ 1947 xs->resid = 0; /* mc->mc_resid; */ 1948 1949 if (mc->mc_length != 0) 1950 mly_ccb_unmap(mly, mc); 1951 1952 switch (mc->mc_status) { 1953 case SCSI_OK: 1954 /* 1955 * In order to report logical device type and status, we 1956 * overwrite the result of the INQUIRY command to logical 1957 * devices. 1958 */ 1959 if (xs->cmd->opcode == INQUIRY) { 1960 chan = xs->xs_periph->periph_channel; 1961 target = xs->xs_periph->periph_target; 1962 btl = &mly->mly_btl[chan->chan_channel][target]; 1963 1964 s = splbio(); 1965 if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) { 1966 inq = (struct scsipi_inquiry_data *)xs->data; 1967 mly_padstr(inq->vendor, "MYLEX", 8); 1968 p = mly_describe_code(mly_table_device_type, 1969 btl->mb_type); 1970 mly_padstr(inq->product, p, 16); 1971 p = mly_describe_code(mly_table_device_state, 1972 btl->mb_state); 1973 mly_padstr(inq->revision, p, 4); 1974 } 1975 splx(s); 1976 } 1977 1978 xs->error = XS_NOERROR; 1979 break; 1980 1981 case SCSI_CHECK: 1982 sl = mc->mc_sense; 1983 if (sl > sizeof(xs->sense.scsi_sense)) 1984 sl = sizeof(xs->sense.scsi_sense); 1985 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl); 1986 xs->error = XS_SENSE; 1987 break; 1988 1989 case SCSI_BUSY: 1990 case SCSI_QUEUE_FULL: 1991 xs->error = XS_BUSY; 1992 break; 1993 1994 default: 1995 printf("%s: unknown SCSI status 0x%x\n", 1996 device_xname(&mly->mly_dv), xs->status); 1997 xs->error = XS_DRIVER_STUFFUP; 1998 break; 1999 } 2000 2001 mly_ccb_free(mly, mc); 2002 scsipi_done(xs); 2003 } 2004 2005 /* 2006 * Notify scsipi about a target's transfer mode. 2007 */ 2008 static void 2009 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm) 2010 { 2011 struct mly_btl *btl; 2012 int s; 2013 2014 btl = &mly->mly_btl[bus][xm->xm_target]; 2015 xm->xm_mode = 0; 2016 2017 s = splbio(); 2018 2019 if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) { 2020 if (btl->mb_speed == 0) { 2021 xm->xm_period = 0; 2022 xm->xm_offset = 0; 2023 } else { 2024 xm->xm_period = 12; /* XXX */ 2025 xm->xm_offset = 8; /* XXX */ 2026 xm->xm_mode |= PERIPH_CAP_SYNC; /* XXX */ 2027 } 2028 2029 switch (btl->mb_width) { 2030 case 32: 2031 xm->xm_mode = PERIPH_CAP_WIDE32; 2032 break; 2033 case 16: 2034 xm->xm_mode = PERIPH_CAP_WIDE16; 2035 break; 2036 default: 2037 xm->xm_mode = 0; 2038 break; 2039 } 2040 } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ { 2041 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC; 2042 xm->xm_period = 12; 2043 xm->xm_offset = 8; 2044 } 2045 2046 if ((btl->mb_flags & MLY_BTL_TQING) != 0) 2047 xm->xm_mode |= PERIPH_CAP_TQING; 2048 2049 splx(s); 2050 2051 scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm); 2052 } 2053 2054 /* 2055 * ioctl hook; used here only to initiate low-level rescans. 2056 */ 2057 static int 2058 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data, 2059 int flag, struct proc *p) 2060 { 2061 struct mly_softc *mly; 2062 int rv; 2063 2064 mly = device_private(chan->chan_adapter->adapt_dev); 2065 2066 switch (cmd) { 2067 case SCBUSIOLLSCAN: 2068 mly_scan_channel(mly, chan->chan_channel); 2069 rv = 0; 2070 break; 2071 default: 2072 rv = ENOTTY; 2073 break; 2074 } 2075 2076 return (rv); 2077 } 2078 2079 /* 2080 * Handshake with the firmware while the card is being initialized. 2081 */ 2082 static int 2083 mly_fwhandshake(struct mly_softc *mly) 2084 { 2085 u_int8_t error, param0, param1; 2086 int spinup; 2087 2088 spinup = 0; 2089 2090 /* Set HM_STSACK and let the firmware initialize. */ 2091 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK); 2092 DELAY(1000); /* too short? */ 2093 2094 /* If HM_STSACK is still true, the controller is initializing. */ 2095 if (!mly_idbr_true(mly, MLY_HM_STSACK)) 2096 return (0); 2097 2098 printf("%s: controller initialization started\n", 2099 device_xname(&mly->mly_dv)); 2100 2101 /* 2102 * Spin waiting for initialization to finish, or for a message to be 2103 * delivered. 2104 */ 2105 while (mly_idbr_true(mly, MLY_HM_STSACK)) { 2106 /* Check for a message */ 2107 if (!mly_error_valid(mly)) 2108 continue; 2109 2110 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY; 2111 param0 = mly_inb(mly, mly->mly_cmd_mailbox); 2112 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1); 2113 2114 switch (error) { 2115 case MLY_MSG_SPINUP: 2116 if (!spinup) { 2117 printf("%s: drive spinup in progress\n", 2118 device_xname(&mly->mly_dv)); 2119 spinup = 1; 2120 } 2121 break; 2122 2123 case MLY_MSG_RACE_RECOVERY_FAIL: 2124 printf("%s: mirror race recovery failed - \n", 2125 device_xname(&mly->mly_dv)); 2126 printf("%s: one or more drives offline\n", 2127 device_xname(&mly->mly_dv)); 2128 break; 2129 2130 case MLY_MSG_RACE_IN_PROGRESS: 2131 printf("%s: mirror race recovery in progress\n", 2132 device_xname(&mly->mly_dv)); 2133 break; 2134 2135 case MLY_MSG_RACE_ON_CRITICAL: 2136 printf("%s: mirror race recovery on critical drive\n", 2137 device_xname(&mly->mly_dv)); 2138 break; 2139 2140 case MLY_MSG_PARITY_ERROR: 2141 printf("%s: FATAL MEMORY PARITY ERROR\n", 2142 device_xname(&mly->mly_dv)); 2143 return (ENXIO); 2144 2145 default: 2146 printf("%s: unknown initialization code 0x%x\n", 2147 device_xname(&mly->mly_dv), error); 2148 break; 2149 } 2150 } 2151 2152 return (0); 2153 } 2154 2155 /* 2156 * Space-fill a character string 2157 */ 2158 static void 2159 mly_padstr(char *dst, const char *src, int len) 2160 { 2161 2162 while (len-- > 0) { 2163 if (*src != '\0') 2164 *dst++ = *src++; 2165 else 2166 *dst++ = ' '; 2167 } 2168 } 2169 2170 /* 2171 * Allocate DMA safe memory. 2172 */ 2173 static int 2174 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap, 2175 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg) 2176 { 2177 int rseg, rv, state; 2178 2179 state = 0; 2180 2181 if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0, 2182 seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 2183 aprint_error_dev(&mly->mly_dv, "dmamem_alloc = %d\n", rv); 2184 goto bad; 2185 } 2186 2187 state++; 2188 2189 if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva, 2190 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2191 aprint_error_dev(&mly->mly_dv, "dmamem_map = %d\n", rv); 2192 goto bad; 2193 } 2194 2195 state++; 2196 2197 if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0, 2198 BUS_DMA_NOWAIT, dmamap)) != 0) { 2199 aprint_error_dev(&mly->mly_dv, "dmamap_create = %d\n", rv); 2200 goto bad; 2201 } 2202 2203 state++; 2204 2205 if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size, 2206 NULL, BUS_DMA_NOWAIT)) != 0) { 2207 aprint_error_dev(&mly->mly_dv, "dmamap_load = %d\n", rv); 2208 goto bad; 2209 } 2210 2211 *paddr = (*dmamap)->dm_segs[0].ds_addr; 2212 memset(*kva, 0, size); 2213 return (0); 2214 2215 bad: 2216 if (state > 2) 2217 bus_dmamap_destroy(mly->mly_dmat, *dmamap); 2218 if (state > 1) 2219 bus_dmamem_unmap(mly->mly_dmat, *kva, size); 2220 if (state > 0) 2221 bus_dmamem_free(mly->mly_dmat, seg, 1); 2222 2223 return (rv); 2224 } 2225 2226 /* 2227 * Free DMA safe memory. 2228 */ 2229 static void 2230 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap, 2231 void *kva, bus_dma_segment_t *seg) 2232 { 2233 2234 bus_dmamap_unload(mly->mly_dmat, dmamap); 2235 bus_dmamap_destroy(mly->mly_dmat, dmamap); 2236 bus_dmamem_unmap(mly->mly_dmat, kva, size); 2237 bus_dmamem_free(mly->mly_dmat, seg, 1); 2238 } 2239 2240 2241 /* 2242 * Accept an open operation on the control device. 2243 */ 2244 int 2245 mlyopen(dev_t dev, int flag, int mode, struct lwp *l) 2246 { 2247 struct mly_softc *mly; 2248 2249 if ((mly = device_lookup_private(&mly_cd, minor(dev))) == NULL) 2250 return (ENXIO); 2251 if ((mly->mly_state & MLY_STATE_INITOK) == 0) 2252 return (ENXIO); 2253 if ((mly->mly_state & MLY_STATE_OPEN) != 0) 2254 return (EBUSY); 2255 2256 mly->mly_state |= MLY_STATE_OPEN; 2257 return (0); 2258 } 2259 2260 /* 2261 * Accept the last close on the control device. 2262 */ 2263 int 2264 mlyclose(dev_t dev, int flag, int mode, 2265 struct lwp *l) 2266 { 2267 struct mly_softc *mly; 2268 2269 mly = device_lookup_private(&mly_cd, minor(dev)); 2270 mly->mly_state &= ~MLY_STATE_OPEN; 2271 return (0); 2272 } 2273 2274 /* 2275 * Handle control operations. 2276 */ 2277 int 2278 mlyioctl(dev_t dev, u_long cmd, void *data, int flag, 2279 struct lwp *l) 2280 { 2281 struct mly_softc *mly; 2282 int rv; 2283 2284 mly = device_lookup_private(&mly_cd, minor(dev)); 2285 2286 switch (cmd) { 2287 case MLYIO_COMMAND: 2288 rv = kauth_authorize_device_passthru(l->l_cred, dev, 2289 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 2290 if (rv) 2291 break; 2292 2293 rv = mly_user_command(mly, (void *)data); 2294 break; 2295 case MLYIO_HEALTH: 2296 rv = mly_user_health(mly, (void *)data); 2297 break; 2298 default: 2299 rv = ENOTTY; 2300 break; 2301 } 2302 2303 return (rv); 2304 } 2305 2306 /* 2307 * Execute a command passed in from userspace. 2308 * 2309 * The control structure contains the actual command for the controller, as 2310 * well as the user-space data pointer and data size, and an optional sense 2311 * buffer size/pointer. On completion, the data size is adjusted to the 2312 * command residual, and the sense buffer size to the size of the returned 2313 * sense data. 2314 */ 2315 static int 2316 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc) 2317 { 2318 struct mly_ccb *mc; 2319 int rv, mapped; 2320 2321 if ((rv = mly_ccb_alloc(mly, &mc)) != 0) 2322 return (rv); 2323 2324 mapped = 0; 2325 mc->mc_data = NULL; 2326 2327 /* 2328 * Handle data size/direction. 2329 */ 2330 if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) { 2331 if (mc->mc_length > MAXPHYS) { 2332 rv = EINVAL; 2333 goto out; 2334 } 2335 2336 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK); 2337 if (mc->mc_data == NULL) { 2338 rv = ENOMEM; 2339 goto out; 2340 } 2341 2342 if (uc->DataTransferLength > 0) { 2343 mc->mc_flags |= MLY_CCB_DATAIN; 2344 memset(mc->mc_data, 0, mc->mc_length); 2345 } 2346 2347 if (uc->DataTransferLength < 0) { 2348 mc->mc_flags |= MLY_CCB_DATAOUT; 2349 rv = copyin(uc->DataTransferBuffer, mc->mc_data, 2350 mc->mc_length); 2351 if (rv != 0) 2352 goto out; 2353 } 2354 2355 if ((rv = mly_ccb_map(mly, mc)) != 0) 2356 goto out; 2357 mapped = 1; 2358 } 2359 2360 /* Copy in the command and execute it. */ 2361 memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox)); 2362 2363 if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0) 2364 goto out; 2365 2366 /* Return the data to userspace. */ 2367 if (uc->DataTransferLength > 0) { 2368 rv = copyout(mc->mc_data, uc->DataTransferBuffer, 2369 mc->mc_length); 2370 if (rv != 0) 2371 goto out; 2372 } 2373 2374 /* Return the sense buffer to userspace. */ 2375 if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) { 2376 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer, 2377 min(uc->RequestSenseLength, mc->mc_sense)); 2378 if (rv != 0) 2379 goto out; 2380 } 2381 2382 /* Return command results to userspace (caller will copy out). */ 2383 uc->DataTransferLength = mc->mc_resid; 2384 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 2385 uc->CommandStatus = mc->mc_status; 2386 rv = 0; 2387 2388 out: 2389 if (mapped) 2390 mly_ccb_unmap(mly, mc); 2391 if (mc->mc_data != NULL) 2392 free(mc->mc_data, M_DEVBUF); 2393 mly_ccb_free(mly, mc); 2394 2395 return (rv); 2396 } 2397 2398 /* 2399 * Return health status to userspace. If the health change index in the 2400 * user structure does not match that currently exported by the controller, 2401 * we return the current status immediately. Otherwise, we block until 2402 * either interrupted or new status is delivered. 2403 */ 2404 static int 2405 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh) 2406 { 2407 struct mly_health_status mh; 2408 int rv, s; 2409 2410 /* Fetch the current health status from userspace. */ 2411 rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh)); 2412 if (rv != 0) 2413 return (rv); 2414 2415 /* spin waiting for a status update */ 2416 s = splbio(); 2417 if (mly->mly_event_change == mh.change_counter) 2418 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH, 2419 "mlyhealth", 0); 2420 splx(s); 2421 2422 if (rv == 0) { 2423 /* 2424 * Copy the controller's health status buffer out (there is 2425 * a race here if it changes again). 2426 */ 2427 rv = copyout(&mly->mly_mmbox->mmm_health.status, 2428 uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); 2429 } 2430 2431 return (rv); 2432 } 2433