1 /* $NetBSD: arcmsr.c,v 1.21 2008/06/24 10:14:41 gmcgarry Exp $ */ 2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */ 3 4 /* 5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme@netbsd.org> 6 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.21 2008/06/24 10:14:41 gmcgarry Exp $"); 25 26 #include <sys/param.h> 27 #include <sys/buf.h> 28 #include <sys/kernel.h> 29 #include <sys/malloc.h> 30 #include <sys/device.h> 31 #include <sys/kmem.h> 32 #include <sys/kthread.h> 33 #include <sys/mutex.h> 34 #include <sys/condvar.h> 35 #include <sys/rwlock.h> 36 37 #if NBIO > 0 38 #include <sys/ioctl.h> 39 #include <dev/biovar.h> 40 #endif 41 42 #include <dev/pci/pcireg.h> 43 #include <dev/pci/pcivar.h> 44 #include <dev/pci/pcidevs.h> 45 46 #include <dev/scsipi/scsipi_all.h> 47 #include <dev/scsipi/scsi_all.h> 48 #include <dev/scsipi/scsiconf.h> 49 50 #include <dev/sysmon/sysmonvar.h> 51 52 #include <sys/bus.h> 53 54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 55 56 #include <dev/pci/arcmsrvar.h> 57 58 /* #define ARC_DEBUG */ 59 #ifdef ARC_DEBUG 60 #define ARC_D_INIT (1<<0) 61 #define ARC_D_RW (1<<1) 62 #define ARC_D_DB (1<<2) 63 64 int arcdebug = 0; 65 66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0) 67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0) 68 69 #else 70 #define DPRINTF(p, ...) /* p */ 71 #define DNPRINTF(n, p, ...) /* n, p */ 72 #endif 73 74 /* 75 * the fw header must always equal this. 76 */ 77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 }; 78 79 /* 80 * autoconf(9) glue. 81 */ 82 static int arc_match(device_t, cfdata_t, void *); 83 static void arc_attach(device_t, device_t, void *); 84 static int arc_detach(device_t, int); 85 static bool arc_shutdown(device_t, int); 86 static int arc_intr(void *); 87 static void arc_minphys(struct buf *); 88 89 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc), 90 arc_match, arc_attach, arc_detach, NULL); 91 92 /* 93 * bio(4) and sysmon_envsys(9) glue. 94 */ 95 #if NBIO > 0 96 static int arc_bioctl(device_t, u_long, void *); 97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *); 98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *); 99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *); 100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *); 101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *, 102 struct arc_fw_diskinfo *, int); 103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *); 104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *); 105 static int arc_bio_getvol(struct arc_softc *, int, 106 struct arc_fw_volinfo *); 107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *); 108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *); 109 static void arc_create_sensors(void *); 110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *); 111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *); 112 #endif 113 114 static int 115 arc_match(device_t parent, cfdata_t match, void *aux) 116 { 117 struct pci_attach_args *pa = aux; 118 119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) { 120 switch (PCI_PRODUCT(pa->pa_id)) { 121 case PCI_PRODUCT_ARECA_ARC1110: 122 case PCI_PRODUCT_ARECA_ARC1120: 123 case PCI_PRODUCT_ARECA_ARC1130: 124 case PCI_PRODUCT_ARECA_ARC1160: 125 case PCI_PRODUCT_ARECA_ARC1170: 126 case PCI_PRODUCT_ARECA_ARC1200: 127 case PCI_PRODUCT_ARECA_ARC1202: 128 case PCI_PRODUCT_ARECA_ARC1210: 129 case PCI_PRODUCT_ARECA_ARC1220: 130 case PCI_PRODUCT_ARECA_ARC1230: 131 case PCI_PRODUCT_ARECA_ARC1260: 132 case PCI_PRODUCT_ARECA_ARC1270: 133 case PCI_PRODUCT_ARECA_ARC1280: 134 case PCI_PRODUCT_ARECA_ARC1380: 135 case PCI_PRODUCT_ARECA_ARC1381: 136 case PCI_PRODUCT_ARECA_ARC1680: 137 case PCI_PRODUCT_ARECA_ARC1681: 138 return 1; 139 default: 140 break; 141 } 142 } 143 144 return 0; 145 } 146 147 static void 148 arc_attach(device_t parent, device_t self, void *aux) 149 { 150 struct arc_softc *sc = device_private(self); 151 struct pci_attach_args *pa = aux; 152 struct scsipi_adapter *adapt = &sc->sc_adapter; 153 struct scsipi_channel *chan = &sc->sc_chan; 154 155 sc->sc_dev = self; 156 sc->sc_talking = 0; 157 rw_init(&sc->sc_rwlock); 158 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO); 159 cv_init(&sc->sc_condvar, "arcdb"); 160 161 if (arc_map_pci_resources(self, pa) != 0) { 162 /* error message printed by arc_map_pci_resources */ 163 return; 164 } 165 166 if (arc_query_firmware(self) != 0) { 167 /* error message printed by arc_query_firmware */ 168 goto unmap_pci; 169 } 170 171 if (arc_alloc_ccbs(self) != 0) { 172 /* error message printed by arc_alloc_ccbs */ 173 goto unmap_pci; 174 } 175 176 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown)) 177 panic("%s: couldn't establish shutdown handler\n", 178 device_xname(self)); 179 180 memset(adapt, 0, sizeof(*adapt)); 181 adapt->adapt_dev = self; 182 adapt->adapt_nchannels = 1; 183 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET; 184 adapt->adapt_max_periph = adapt->adapt_openings; 185 adapt->adapt_minphys = arc_minphys; 186 adapt->adapt_request = arc_scsi_cmd; 187 188 memset(chan, 0, sizeof(*chan)); 189 chan->chan_adapter = adapt; 190 chan->chan_bustype = &scsi_bustype; 191 chan->chan_nluns = ARC_MAX_LUN; 192 chan->chan_ntargets = ARC_MAX_TARGET; 193 chan->chan_id = ARC_MAX_TARGET; 194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 195 196 /* 197 * Save the device_t returned, because we could to attach 198 * devices via the management interface. 199 */ 200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint); 201 202 /* enable interrupts */ 203 arc_write(sc, ARC_REG_INTRMASK, 204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL)); 205 206 #if NBIO > 0 207 /* 208 * Register the driver to bio(4) and setup the sensors. 209 */ 210 if (bio_register(self, arc_bioctl) != 0) 211 panic("%s: bioctl registration failed\n", device_xname(self)); 212 213 /* 214 * you need to talk to the firmware to get volume info. our firmware 215 * interface relies on being able to sleep, so we need to use a thread 216 * to do the work. 217 */ 218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0) 220 panic("%s: unable to create a kernel thread for sensors\n", 221 device_xname(self)); 222 #endif 223 224 return; 225 226 unmap_pci: 227 arc_unmap_pci_resources(sc); 228 } 229 230 static int 231 arc_detach(device_t self, int flags) 232 { 233 struct arc_softc *sc = device_private(self); 234 235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0) 236 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n"); 237 238 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0) 239 aprint_error_dev(self, "timeout waiting to flush cache\n"); 240 241 return 0; 242 } 243 244 static bool 245 arc_shutdown(device_t self, int how) 246 { 247 struct arc_softc *sc = device_private(self); 248 249 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0) 250 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n"); 251 252 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0) 253 aprint_error_dev(self, "timeout waiting to flush cache\n"); 254 255 return true; 256 } 257 258 static void 259 arc_minphys(struct buf *bp) 260 { 261 if (bp->b_bcount > MAXPHYS) 262 bp->b_bcount = MAXPHYS; 263 minphys(bp); 264 } 265 266 static int 267 arc_intr(void *arg) 268 { 269 struct arc_softc *sc = arg; 270 struct arc_ccb *ccb = NULL; 271 char *kva = ARC_DMA_KVA(sc->sc_requests); 272 struct arc_io_cmd *cmd; 273 uint32_t reg, intrstat; 274 275 mutex_spin_enter(&sc->sc_mutex); 276 intrstat = arc_read(sc, ARC_REG_INTRSTAT); 277 if (intrstat == 0x0) { 278 mutex_spin_exit(&sc->sc_mutex); 279 return 0; 280 } 281 282 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL; 283 arc_write(sc, ARC_REG_INTRSTAT, intrstat); 284 285 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) { 286 if (sc->sc_talking) { 287 arc_write(sc, ARC_REG_INTRMASK, 288 ~ARC_REG_INTRMASK_POSTQUEUE); 289 cv_broadcast(&sc->sc_condvar); 290 } else { 291 /* otherwise drop it */ 292 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL); 293 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg); 294 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) 295 arc_write(sc, ARC_REG_INB_DOORBELL, 296 ARC_REG_INB_DOORBELL_READ_OK); 297 } 298 } 299 mutex_spin_exit(&sc->sc_mutex); 300 301 while ((reg = arc_pop(sc)) != 0xffffffff) { 302 cmd = (struct arc_io_cmd *)(kva + 303 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) - 304 (uint32_t)ARC_DMA_DVA(sc->sc_requests))); 305 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)]; 306 307 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 308 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 309 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 310 311 arc_scsi_cmd_done(sc, ccb, reg); 312 } 313 314 315 return 1; 316 } 317 318 void 319 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 320 { 321 struct scsipi_periph *periph; 322 struct scsipi_xfer *xs; 323 struct scsipi_adapter *adapt = chan->chan_adapter; 324 struct arc_softc *sc = device_private(adapt->adapt_dev); 325 struct arc_ccb *ccb; 326 struct arc_msg_scsicmd *cmd; 327 uint32_t reg; 328 uint8_t target; 329 330 switch (req) { 331 case ADAPTER_REQ_GROW_RESOURCES: 332 /* Not supported. */ 333 return; 334 case ADAPTER_REQ_SET_XFER_MODE: 335 /* Not supported. */ 336 return; 337 case ADAPTER_REQ_RUN_XFER: 338 break; 339 } 340 341 mutex_spin_enter(&sc->sc_mutex); 342 343 xs = arg; 344 periph = xs->xs_periph; 345 target = periph->periph_target; 346 347 if (xs->cmdlen > ARC_MSG_CDBLEN) { 348 memset(&xs->sense, 0, sizeof(xs->sense)); 349 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70; 350 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 351 xs->sense.scsi_sense.asc = 0x20; 352 xs->error = XS_SENSE; 353 xs->status = SCSI_CHECK; 354 mutex_spin_exit(&sc->sc_mutex); 355 scsipi_done(xs); 356 return; 357 } 358 359 ccb = arc_get_ccb(sc); 360 if (ccb == NULL) { 361 xs->error = XS_RESOURCE_SHORTAGE; 362 mutex_spin_exit(&sc->sc_mutex); 363 scsipi_done(xs); 364 return; 365 } 366 367 ccb->ccb_xs = xs; 368 369 if (arc_load_xs(ccb) != 0) { 370 xs->error = XS_DRIVER_STUFFUP; 371 arc_put_ccb(sc, ccb); 372 mutex_spin_exit(&sc->sc_mutex); 373 scsipi_done(xs); 374 return; 375 } 376 377 cmd = &ccb->ccb_cmd->cmd; 378 reg = ccb->ccb_cmd_post; 379 380 /* bus is always 0 */ 381 cmd->target = target; 382 cmd->lun = periph->periph_lun; 383 cmd->function = 1; /* XXX magic number */ 384 385 cmd->cdb_len = xs->cmdlen; 386 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs; 387 if (xs->xs_control & XS_CTL_DATA_OUT) 388 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE; 389 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) { 390 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512; 391 reg |= ARC_REG_POST_QUEUE_BIGFRAME; 392 } 393 394 cmd->context = htole32(ccb->ccb_id); 395 cmd->data_len = htole32(xs->datalen); 396 397 memcpy(cmd->cdb, xs->cmd, xs->cmdlen); 398 399 /* we've built the command, let's put it on the hw */ 400 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 401 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 402 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 403 404 arc_push(sc, reg); 405 if (xs->xs_control & XS_CTL_POLL) { 406 if (arc_complete(sc, ccb, xs->timeout) != 0) { 407 xs->error = XS_DRIVER_STUFFUP; 408 mutex_spin_exit(&sc->sc_mutex); 409 scsipi_done(xs); 410 return; 411 } 412 } 413 414 mutex_spin_exit(&sc->sc_mutex); 415 } 416 417 int 418 arc_load_xs(struct arc_ccb *ccb) 419 { 420 struct arc_softc *sc = ccb->ccb_sc; 421 struct scsipi_xfer *xs = ccb->ccb_xs; 422 bus_dmamap_t dmap = ccb->ccb_dmamap; 423 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge; 424 uint64_t addr; 425 int i, error; 426 427 if (xs->datalen == 0) 428 return 0; 429 430 error = bus_dmamap_load(sc->sc_dmat, dmap, 431 xs->data, xs->datalen, NULL, 432 (xs->xs_control & XS_CTL_NOSLEEP) ? 433 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 434 if (error != 0) { 435 aprint_error("%s: error %d loading dmamap\n", 436 device_xname(sc->sc_dev), error); 437 return 1; 438 } 439 440 for (i = 0; i < dmap->dm_nsegs; i++) { 441 sge = &sgl[i]; 442 443 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len); 444 addr = dmap->dm_segs[i].ds_addr; 445 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32)); 446 sge->sg_lo_addr = htole32((uint32_t)addr); 447 } 448 449 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 450 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 451 BUS_DMASYNC_PREWRITE); 452 453 return 0; 454 } 455 456 void 457 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg) 458 { 459 struct scsipi_xfer *xs = ccb->ccb_xs; 460 struct arc_msg_scsicmd *cmd; 461 462 if (xs->datalen != 0) { 463 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 464 ccb->ccb_dmamap->dm_mapsize, 465 (xs->xs_control & XS_CTL_DATA_IN) ? 466 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 467 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 468 } 469 470 /* timeout_del */ 471 xs->status |= XS_STS_DONE; 472 473 if (reg & ARC_REG_REPLY_QUEUE_ERR) { 474 cmd = &ccb->ccb_cmd->cmd; 475 476 switch (cmd->status) { 477 case ARC_MSG_STATUS_SELTIMEOUT: 478 case ARC_MSG_STATUS_ABORTED: 479 case ARC_MSG_STATUS_INIT_FAIL: 480 xs->status = SCSI_OK; 481 xs->error = XS_SELTIMEOUT; 482 break; 483 484 case SCSI_CHECK: 485 memset(&xs->sense, 0, sizeof(xs->sense)); 486 memcpy(&xs->sense, cmd->sense_data, 487 min(ARC_MSG_SENSELEN, sizeof(xs->sense))); 488 xs->sense.scsi_sense.response_code = 489 SSD_RCODE_VALID | 0x70; 490 xs->status = SCSI_CHECK; 491 xs->error = XS_SENSE; 492 xs->resid = 0; 493 break; 494 495 default: 496 /* unknown device status */ 497 xs->error = XS_BUSY; /* try again later? */ 498 xs->status = SCSI_BUSY; 499 break; 500 } 501 } else { 502 xs->status = SCSI_OK; 503 xs->error = XS_NOERROR; 504 xs->resid = 0; 505 } 506 507 arc_put_ccb(sc, ccb); 508 scsipi_done(xs); 509 } 510 511 int 512 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout) 513 { 514 struct arc_ccb *ccb = NULL; 515 char *kva = ARC_DMA_KVA(sc->sc_requests); 516 struct arc_io_cmd *cmd; 517 uint32_t reg; 518 519 do { 520 reg = arc_pop(sc); 521 if (reg == 0xffffffff) { 522 if (timeout-- == 0) 523 return 1; 524 525 delay(1000); 526 continue; 527 } 528 529 cmd = (struct arc_io_cmd *)(kva + 530 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) - 531 ARC_DMA_DVA(sc->sc_requests))); 532 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)]; 533 534 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 535 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 537 538 arc_scsi_cmd_done(sc, ccb, reg); 539 } while (nccb != ccb); 540 541 return 0; 542 } 543 544 int 545 arc_map_pci_resources(device_t self, struct pci_attach_args *pa) 546 { 547 struct arc_softc *sc = device_private(self); 548 pcireg_t memtype; 549 pci_intr_handle_t ih; 550 551 sc->sc_pc = pa->pa_pc; 552 sc->sc_tag = pa->pa_tag; 553 sc->sc_dmat = pa->pa_dmat; 554 555 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR); 556 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot, 557 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) { 558 aprint_error(": unable to map system interface register\n"); 559 return 1; 560 } 561 562 if (pci_intr_map(pa, &ih) != 0) { 563 aprint_error(": unable to map interrupt\n"); 564 goto unmap; 565 } 566 567 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, 568 arc_intr, sc); 569 if (sc->sc_ih == NULL) { 570 aprint_error(": unable to map interrupt [2]\n"); 571 goto unmap; 572 } 573 574 aprint_normal("\n"); 575 aprint_normal_dev(self, "interrupting at %s\n", 576 pci_intr_string(pa->pa_pc, ih)); 577 578 return 0; 579 580 unmap: 581 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 582 sc->sc_ios = 0; 583 return 1; 584 } 585 586 void 587 arc_unmap_pci_resources(struct arc_softc *sc) 588 { 589 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 590 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 591 sc->sc_ios = 0; 592 } 593 594 int 595 arc_query_firmware(device_t self) 596 { 597 struct arc_softc *sc = device_private(self); 598 struct arc_msg_firmware_info fwinfo; 599 char string[81]; /* sizeof(vendor)*2+1 */ 600 601 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK, 602 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) { 603 aprint_debug_dev(self, "timeout waiting for firmware ok\n"); 604 return 1; 605 } 606 607 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) { 608 aprint_debug_dev(self, "timeout waiting for get config\n"); 609 return 1; 610 } 611 612 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) { 613 aprint_debug_dev(self, "timeout waiting to start bg rebuild\n"); 614 return 1; 615 } 616 617 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo)); 618 619 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n", 620 device_xname(self), htole32(fwinfo.signature)); 621 622 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) { 623 aprint_error_dev(self, "invalid firmware info from iop\n"); 624 return 1; 625 } 626 627 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n", 628 device_xname(self), htole32(fwinfo.request_len)); 629 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n", 630 device_xname(self), htole32(fwinfo.queue_len)); 631 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n", 632 device_xname(self), htole32(fwinfo.sdram_size)); 633 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n", 634 device_xname(self), htole32(fwinfo.sata_ports)); 635 636 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor)); 637 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n", 638 device_xname(self), string); 639 640 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model)); 641 aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n", 642 string); 643 644 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version)); 645 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n", 646 device_xname(self), string); 647 648 aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n", 649 htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string); 650 651 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) { 652 aprint_error_dev(self, 653 "unexpected request frame size (%d != %d)\n", 654 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN); 655 return 1; 656 } 657 658 sc->sc_req_count = htole32(fwinfo.queue_len); 659 660 return 0; 661 } 662 663 #if NBIO > 0 664 static int 665 arc_bioctl(device_t self, u_long cmd, void *addr) 666 { 667 struct arc_softc *sc = device_private(self); 668 int error = 0; 669 670 switch (cmd) { 671 case BIOCINQ: 672 error = arc_bio_inq(sc, (struct bioc_inq *)addr); 673 break; 674 675 case BIOCVOL: 676 error = arc_bio_vol(sc, (struct bioc_vol *)addr); 677 break; 678 679 case BIOCDISK: 680 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr); 681 break; 682 683 case BIOCDISK_NOVOL: 684 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr); 685 break; 686 687 case BIOCALARM: 688 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr); 689 break; 690 691 case BIOCSETSTATE: 692 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr); 693 break; 694 695 case BIOCVOLOPS: 696 error = arc_bio_volops(sc, (struct bioc_volops *)addr); 697 break; 698 699 default: 700 error = ENOTTY; 701 break; 702 } 703 704 return error; 705 } 706 707 static int 708 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply) 709 { 710 switch (*reply) { 711 case ARC_FW_CMD_RAIDINVAL: 712 printf("%s: firmware error (invalid raid set)\n", 713 device_xname(sc->sc_dev)); 714 return EINVAL; 715 case ARC_FW_CMD_VOLINVAL: 716 printf("%s: firmware error (invalid volume set)\n", 717 device_xname(sc->sc_dev)); 718 return EINVAL; 719 case ARC_FW_CMD_NORAID: 720 printf("%s: firmware error (unexistent raid set)\n", 721 device_xname(sc->sc_dev)); 722 return ENODEV; 723 case ARC_FW_CMD_NOVOLUME: 724 printf("%s: firmware error (unexistent volume set)\n", 725 device_xname(sc->sc_dev)); 726 return ENODEV; 727 case ARC_FW_CMD_NOPHYSDRV: 728 printf("%s: firmware error (unexistent physical drive)\n", 729 device_xname(sc->sc_dev)); 730 return ENODEV; 731 case ARC_FW_CMD_PARAM_ERR: 732 printf("%s: firmware error (parameter error)\n", 733 device_xname(sc->sc_dev)); 734 return EINVAL; 735 case ARC_FW_CMD_UNSUPPORTED: 736 printf("%s: firmware error (unsupported command)\n", 737 device_xname(sc->sc_dev)); 738 return EOPNOTSUPP; 739 case ARC_FW_CMD_DISKCFG_CHGD: 740 printf("%s: firmware error (disk configuration changed)\n", 741 device_xname(sc->sc_dev)); 742 return EINVAL; 743 case ARC_FW_CMD_PASS_INVAL: 744 printf("%s: firmware error (invalid password)\n", 745 device_xname(sc->sc_dev)); 746 return EINVAL; 747 case ARC_FW_CMD_NODISKSPACE: 748 printf("%s: firmware error (no disk space available)\n", 749 device_xname(sc->sc_dev)); 750 return EOPNOTSUPP; 751 case ARC_FW_CMD_CHECKSUM_ERR: 752 printf("%s: firmware error (checksum error)\n", 753 device_xname(sc->sc_dev)); 754 return EINVAL; 755 case ARC_FW_CMD_PASS_REQD: 756 printf("%s: firmware error (password required)\n", 757 device_xname(sc->sc_dev)); 758 return EPERM; 759 case ARC_FW_CMD_OK: 760 default: 761 return 0; 762 } 763 } 764 765 static int 766 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba) 767 { 768 uint8_t request[2], reply[1]; 769 size_t len; 770 int error = 0; 771 772 switch (ba->ba_opcode) { 773 case BIOC_SAENABLE: 774 case BIOC_SADISABLE: 775 request[0] = ARC_FW_SET_ALARM; 776 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ? 777 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE; 778 len = sizeof(request); 779 780 break; 781 782 case BIOC_SASILENCE: 783 request[0] = ARC_FW_MUTE_ALARM; 784 len = 1; 785 786 break; 787 788 case BIOC_GASTATUS: 789 /* system info is too big/ugly to deal with here */ 790 return arc_bio_alarm_state(sc, ba); 791 792 default: 793 return EOPNOTSUPP; 794 } 795 796 error = arc_msgbuf(sc, request, len, reply, sizeof(reply)); 797 if (error != 0) 798 return error; 799 800 return arc_fw_parse_status_code(sc, &reply[0]); 801 } 802 803 static int 804 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba) 805 { 806 struct arc_fw_sysinfo *sysinfo; 807 uint8_t request; 808 int error = 0; 809 810 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP); 811 812 request = ARC_FW_SYSINFO; 813 error = arc_msgbuf(sc, &request, sizeof(request), 814 sysinfo, sizeof(struct arc_fw_sysinfo)); 815 816 if (error != 0) 817 goto out; 818 819 ba->ba_status = sysinfo->alarm; 820 821 out: 822 kmem_free(sysinfo, sizeof(*sysinfo)); 823 return error; 824 } 825 826 static int 827 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc) 828 { 829 /* to create a raid set */ 830 struct req_craidset { 831 uint8_t cmdcode; 832 uint32_t devmask; 833 uint8_t raidset_name[16]; 834 } __packed; 835 836 /* to create a volume set */ 837 struct req_cvolset { 838 uint8_t cmdcode; 839 uint8_t raidset; 840 uint8_t volset_name[16]; 841 uint64_t capacity; 842 uint8_t raidlevel; 843 uint8_t stripe; 844 uint8_t scsi_chan; 845 uint8_t scsi_target; 846 uint8_t scsi_lun; 847 uint8_t tagqueue; 848 uint8_t cache; 849 uint8_t speed; 850 uint8_t quick_init; 851 } __packed; 852 853 struct scsibus_softc *scsibus_sc = NULL; 854 struct req_craidset req_craidset; 855 struct req_cvolset req_cvolset; 856 uint8_t request[2]; 857 uint8_t reply[1]; 858 int error = 0; 859 860 switch (bc->bc_opcode) { 861 case BIOC_VCREATE_VOLUME: 862 { 863 /* 864 * Zero out the structs so that we use some defaults 865 * in raid and volume sets. 866 */ 867 memset(&req_craidset, 0, sizeof(req_craidset)); 868 memset(&req_cvolset, 0, sizeof(req_cvolset)); 869 870 /* 871 * Firstly we have to create the raid set and 872 * use the default name for all them. 873 */ 874 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET; 875 req_craidset.devmask = bc->bc_devmask; 876 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset), 877 reply, sizeof(reply)); 878 if (error != 0) 879 return error; 880 881 error = arc_fw_parse_status_code(sc, &reply[0]); 882 if (error) { 883 printf("%s: create raidset%d failed\n", 884 device_xname(sc->sc_dev), bc->bc_volid); 885 return error; 886 } 887 888 /* 889 * At this point the raid set was created, so it's 890 * time to create the volume set. 891 */ 892 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME; 893 req_cvolset.raidset = bc->bc_volid; 894 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE; 895 896 /* 897 * Set the RAID level. 898 */ 899 switch (bc->bc_level) { 900 case 0: 901 case 1: 902 req_cvolset.raidlevel = bc->bc_level; 903 break; 904 case BIOC_SVOL_RAID10: 905 req_cvolset.raidlevel = 1; 906 break; 907 case 3: 908 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3; 909 break; 910 case 5: 911 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5; 912 break; 913 case 6: 914 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6; 915 break; 916 default: 917 return EOPNOTSUPP; 918 } 919 920 /* 921 * Set the stripe size. 922 */ 923 switch (bc->bc_stripe) { 924 case 4: 925 req_cvolset.stripe = 0; 926 break; 927 case 8: 928 req_cvolset.stripe = 1; 929 break; 930 case 16: 931 req_cvolset.stripe = 2; 932 break; 933 case 32: 934 req_cvolset.stripe = 3; 935 break; 936 case 64: 937 req_cvolset.stripe = 4; 938 break; 939 case 128: 940 req_cvolset.stripe = 5; 941 break; 942 default: 943 req_cvolset.stripe = 4; /* by default 64K */ 944 break; 945 } 946 947 req_cvolset.scsi_chan = bc->bc_channel; 948 req_cvolset.scsi_target = bc->bc_target; 949 req_cvolset.scsi_lun = bc->bc_lun; 950 req_cvolset.tagqueue = 1; /* always enabled */ 951 req_cvolset.cache = 1; /* always enabled */ 952 req_cvolset.speed = 4; /* always max speed */ 953 954 /* RAID 1 and 1+0 levels need foreground initialization */ 955 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10) 956 req_cvolset.quick_init = 1; /* foreground init */ 957 958 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset), 959 reply, sizeof(reply)); 960 if (error != 0) 961 return error; 962 963 error = arc_fw_parse_status_code(sc, &reply[0]); 964 if (error) { 965 printf("%s: create volumeset%d failed\n", 966 device_xname(sc->sc_dev), bc->bc_volid); 967 return error; 968 } 969 970 /* 971 * If we are creating a RAID 1 or RAID 1+0 volume, 972 * the volume will be created immediately but it won't 973 * be available until the initialization is done... so 974 * don't bother attaching the sd(4) device. 975 */ 976 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10) 977 break; 978 979 /* 980 * Do a rescan on the bus to attach the device associated 981 * with the new volume. 982 */ 983 scsibus_sc = device_private(sc->sc_scsibus_dv); 984 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun); 985 986 break; 987 } 988 case BIOC_VREMOVE_VOLUME: 989 { 990 /* 991 * Remove the volume set specified in bc_volid. 992 */ 993 request[0] = ARC_FW_DELETE_VOLUME; 994 request[1] = bc->bc_volid; 995 error = arc_msgbuf(sc, request, sizeof(request), 996 reply, sizeof(reply)); 997 if (error != 0) 998 return error; 999 1000 error = arc_fw_parse_status_code(sc, &reply[0]); 1001 if (error) { 1002 printf("%s: delete volumeset%d failed\n", 1003 device_xname(sc->sc_dev), bc->bc_volid); 1004 return error; 1005 } 1006 1007 /* 1008 * Detach the sd(4) device associated with the volume, 1009 * but if there's an error don't make it a priority. 1010 */ 1011 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target, 1012 bc->bc_lun, 0); 1013 if (error) 1014 printf("%s: couldn't detach sd device for volume %d " 1015 "at %u:%u.%u (error=%d)\n", 1016 device_xname(sc->sc_dev), bc->bc_volid, 1017 bc->bc_channel, bc->bc_target, bc->bc_lun, error); 1018 1019 /* 1020 * and remove the raid set specified in bc_volid, 1021 * we only care about volumes. 1022 */ 1023 request[0] = ARC_FW_DELETE_RAIDSET; 1024 request[1] = bc->bc_volid; 1025 error = arc_msgbuf(sc, request, sizeof(request), 1026 reply, sizeof(reply)); 1027 if (error != 0) 1028 return error; 1029 1030 error = arc_fw_parse_status_code(sc, &reply[0]); 1031 if (error) { 1032 printf("%s: delete raidset%d failed\n", 1033 device_xname(sc->sc_dev), bc->bc_volid); 1034 return error; 1035 } 1036 1037 break; 1038 } 1039 default: 1040 return EOPNOTSUPP; 1041 } 1042 1043 return error; 1044 } 1045 1046 static int 1047 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs) 1048 { 1049 /* for a hotspare disk */ 1050 struct request_hs { 1051 uint8_t cmdcode; 1052 uint32_t devmask; 1053 } __packed; 1054 1055 /* for a pass-through disk */ 1056 struct request_pt { 1057 uint8_t cmdcode; 1058 uint8_t devid; 1059 uint8_t scsi_chan; 1060 uint8_t scsi_id; 1061 uint8_t scsi_lun; 1062 uint8_t tagged_queue; 1063 uint8_t cache_mode; 1064 uint8_t max_speed; 1065 } __packed; 1066 1067 struct scsibus_softc *scsibus_sc = NULL; 1068 struct request_hs req_hs; /* to add/remove hotspare */ 1069 struct request_pt req_pt; /* to add a pass-through */ 1070 uint8_t req_gen[2]; 1071 uint8_t reply[1]; 1072 int error = 0; 1073 1074 switch (bs->bs_status) { 1075 case BIOC_SSHOTSPARE: 1076 { 1077 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE; 1078 req_hs.devmask = (1 << bs->bs_target); 1079 goto hotspare; 1080 } 1081 case BIOC_SSDELHOTSPARE: 1082 { 1083 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE; 1084 req_hs.devmask = (1 << bs->bs_target); 1085 goto hotspare; 1086 } 1087 case BIOC_SSPASSTHRU: 1088 { 1089 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU; 1090 req_pt.devid = bs->bs_other_id; /* this wants device# */ 1091 req_pt.scsi_chan = bs->bs_channel; 1092 req_pt.scsi_id = bs->bs_target; 1093 req_pt.scsi_lun = bs->bs_lun; 1094 req_pt.tagged_queue = 1; /* always enabled */ 1095 req_pt.cache_mode = 1; /* always enabled */ 1096 req_pt.max_speed = 4; /* always max speed */ 1097 1098 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt), 1099 reply, sizeof(reply)); 1100 if (error != 0) 1101 return error; 1102 1103 /* 1104 * Do a rescan on the bus to attach the new device 1105 * associated with the pass-through disk. 1106 */ 1107 scsibus_sc = device_private(sc->sc_scsibus_dv); 1108 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun); 1109 1110 goto out; 1111 } 1112 case BIOC_SSDELPASSTHRU: 1113 { 1114 req_gen[0] = ARC_FW_DELETE_PASSTHRU; 1115 req_gen[1] = bs->bs_target; 1116 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen), 1117 reply, sizeof(reply)); 1118 if (error != 0) 1119 return error; 1120 1121 /* 1122 * Detach the sd device associated with this pass-through disk. 1123 */ 1124 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target, 1125 bs->bs_lun, 0); 1126 if (error) 1127 printf("%s: couldn't detach sd device for the " 1128 "pass-through disk at %u:%u.%u (error=%d)\n", 1129 device_xname(sc->sc_dev), 1130 bs->bs_channel, bs->bs_target, bs->bs_lun, error); 1131 1132 goto out; 1133 } 1134 case BIOC_SSCHECKSTART_VOL: 1135 { 1136 req_gen[0] = ARC_FW_START_CHECKVOL; 1137 req_gen[1] = bs->bs_volid; 1138 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen), 1139 reply, sizeof(reply)); 1140 if (error != 0) 1141 return error; 1142 1143 goto out; 1144 } 1145 case BIOC_SSCHECKSTOP_VOL: 1146 { 1147 uint8_t req = ARC_FW_STOP_CHECKVOL; 1148 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply)); 1149 if (error != 0) 1150 return error; 1151 1152 goto out; 1153 } 1154 default: 1155 return EOPNOTSUPP; 1156 } 1157 1158 hotspare: 1159 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs), 1160 reply, sizeof(reply)); 1161 if (error != 0) 1162 return error; 1163 1164 out: 1165 return arc_fw_parse_status_code(sc, &reply[0]); 1166 } 1167 1168 static int 1169 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi) 1170 { 1171 uint8_t request[2]; 1172 struct arc_fw_sysinfo *sysinfo = NULL; 1173 struct arc_fw_raidinfo *raidinfo; 1174 int nvols = 0, i; 1175 int error = 0; 1176 1177 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP); 1178 1179 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) { 1180 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP); 1181 1182 request[0] = ARC_FW_SYSINFO; 1183 error = arc_msgbuf(sc, request, 1, sysinfo, 1184 sizeof(struct arc_fw_sysinfo)); 1185 if (error != 0) 1186 goto out; 1187 1188 sc->sc_maxraidset = sysinfo->max_raid_set; 1189 sc->sc_maxvolset = sysinfo->max_volume_set; 1190 sc->sc_cchans = sysinfo->ide_channels; 1191 } 1192 1193 request[0] = ARC_FW_RAIDINFO; 1194 for (i = 0; i < sc->sc_maxraidset; i++) { 1195 request[1] = i; 1196 error = arc_msgbuf(sc, request, sizeof(request), raidinfo, 1197 sizeof(struct arc_fw_raidinfo)); 1198 if (error != 0) 1199 goto out; 1200 1201 if (raidinfo->volumes) 1202 nvols++; 1203 } 1204 1205 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev)); 1206 bi->bi_novol = nvols; 1207 bi->bi_nodisk = sc->sc_cchans; 1208 1209 out: 1210 if (sysinfo) 1211 kmem_free(sysinfo, sizeof(*sysinfo)); 1212 kmem_free(raidinfo, sizeof(*raidinfo)); 1213 return error; 1214 } 1215 1216 static int 1217 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo) 1218 { 1219 uint8_t request[2]; 1220 int error = 0; 1221 int nvols = 0, i; 1222 1223 request[0] = ARC_FW_VOLINFO; 1224 for (i = 0; i < sc->sc_maxvolset; i++) { 1225 request[1] = i; 1226 error = arc_msgbuf(sc, request, sizeof(request), volinfo, 1227 sizeof(struct arc_fw_volinfo)); 1228 if (error != 0) 1229 goto out; 1230 1231 if (volinfo->capacity == 0 && volinfo->capacity2 == 0) 1232 continue; 1233 1234 if (nvols == vol) 1235 break; 1236 1237 nvols++; 1238 } 1239 1240 if (nvols != vol || 1241 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) { 1242 error = ENODEV; 1243 goto out; 1244 } 1245 1246 out: 1247 return error; 1248 } 1249 1250 static int 1251 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv) 1252 { 1253 struct arc_fw_volinfo *volinfo; 1254 uint64_t blocks; 1255 uint32_t status; 1256 int error = 0; 1257 1258 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP); 1259 1260 error = arc_bio_getvol(sc, bv->bv_volid, volinfo); 1261 if (error != 0) 1262 goto out; 1263 1264 bv->bv_percent = -1; 1265 bv->bv_seconds = 0; 1266 1267 status = htole32(volinfo->volume_status); 1268 if (status == 0x0) { 1269 if (htole32(volinfo->fail_mask) == 0x0) 1270 bv->bv_status = BIOC_SVONLINE; 1271 else 1272 bv->bv_status = BIOC_SVDEGRADED; 1273 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) { 1274 bv->bv_status = BIOC_SVDEGRADED; 1275 } else if (status & ARC_FW_VOL_STATUS_FAILED) { 1276 bv->bv_status = BIOC_SVOFFLINE; 1277 } else if (status & ARC_FW_VOL_STATUS_INITTING) { 1278 bv->bv_status = BIOC_SVBUILDING; 1279 bv->bv_percent = htole32(volinfo->progress); 1280 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) { 1281 bv->bv_status = BIOC_SVREBUILD; 1282 bv->bv_percent = htole32(volinfo->progress); 1283 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) { 1284 bv->bv_status = BIOC_SVMIGRATING; 1285 bv->bv_percent = htole32(volinfo->progress); 1286 } else if (status & ARC_FW_VOL_STATUS_CHECKING) { 1287 bv->bv_status = BIOC_SVCHECKING; 1288 bv->bv_percent = htole32(volinfo->progress); 1289 } else if (status & ARC_FW_VOL_STATUS_NEED_INIT) { 1290 bv->bv_status = BIOC_SVOFFLINE; 1291 } else { 1292 printf("%s: volume %d status 0x%x\n", 1293 device_xname(sc->sc_dev), bv->bv_volid, status); 1294 } 1295 1296 blocks = (uint64_t)htole32(volinfo->capacity2) << 32; 1297 blocks += (uint64_t)htole32(volinfo->capacity); 1298 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */ 1299 1300 switch (volinfo->raid_level) { 1301 case ARC_FW_VOL_RAIDLEVEL_0: 1302 bv->bv_level = 0; 1303 break; 1304 case ARC_FW_VOL_RAIDLEVEL_1: 1305 if (volinfo->member_disks > 2) 1306 bv->bv_level = BIOC_SVOL_RAID10; 1307 else 1308 bv->bv_level = 1; 1309 break; 1310 case ARC_FW_VOL_RAIDLEVEL_3: 1311 bv->bv_level = 3; 1312 break; 1313 case ARC_FW_VOL_RAIDLEVEL_5: 1314 bv->bv_level = 5; 1315 break; 1316 case ARC_FW_VOL_RAIDLEVEL_6: 1317 bv->bv_level = 6; 1318 break; 1319 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU: 1320 bv->bv_level = BIOC_SVOL_PASSTHRU; 1321 break; 1322 default: 1323 bv->bv_level = -1; 1324 break; 1325 } 1326 1327 bv->bv_nodisk = volinfo->member_disks; 1328 bv->bv_stripe_size = volinfo->stripe_size / 2; 1329 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid); 1330 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name, 1331 sizeof(volinfo->set_name)); 1332 1333 out: 1334 kmem_free(volinfo, sizeof(*volinfo)); 1335 return error; 1336 } 1337 1338 static int 1339 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd) 1340 { 1341 struct arc_fw_diskinfo *diskinfo; 1342 uint8_t request[2]; 1343 int error = 0; 1344 1345 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP); 1346 1347 if (bd->bd_diskid >= sc->sc_cchans) { 1348 error = ENODEV; 1349 goto out; 1350 } 1351 1352 request[0] = ARC_FW_DISKINFO; 1353 request[1] = bd->bd_diskid; 1354 error = arc_msgbuf(sc, request, sizeof(request), 1355 diskinfo, sizeof(struct arc_fw_diskinfo)); 1356 if (error != 0) 1357 goto out; 1358 1359 /* skip disks with no capacity */ 1360 if (htole32(diskinfo->capacity) == 0 && 1361 htole32(diskinfo->capacity2) == 0) 1362 goto out; 1363 1364 bd->bd_disknovol = true; 1365 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid); 1366 1367 out: 1368 kmem_free(diskinfo, sizeof(*diskinfo)); 1369 return error; 1370 } 1371 1372 static void 1373 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd, 1374 struct arc_fw_diskinfo *diskinfo, int diskid) 1375 { 1376 uint64_t blocks; 1377 char model[81]; 1378 char serial[41]; 1379 char rev[17]; 1380 1381 switch (diskinfo->device_state) { 1382 case ARC_FW_DISK_PASSTHRU: 1383 bd->bd_status = BIOC_SDPASSTHRU; 1384 break; 1385 case ARC_FW_DISK_INITIALIZED: 1386 case ARC_FW_DISK_RAIDMEMBER: 1387 bd->bd_status = BIOC_SDONLINE; 1388 break; 1389 case ARC_FW_DISK_HOTSPARE: 1390 bd->bd_status = BIOC_SDHOTSPARE; 1391 break; 1392 case ARC_FW_DISK_UNUSED: 1393 bd->bd_status = BIOC_SDUNUSED; 1394 break; 1395 case 0: 1396 /* disk has been disconnected */ 1397 bd->bd_status = BIOC_SDOFFLINE; 1398 bd->bd_channel = 1; 1399 bd->bd_target = 0; 1400 bd->bd_lun = 0; 1401 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor)); 1402 break; 1403 default: 1404 printf("%s: unknown disk device_state: 0x%x\n", __func__, 1405 diskinfo->device_state); 1406 bd->bd_status = BIOC_SDINVALID; 1407 return; 1408 } 1409 1410 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32; 1411 blocks += (uint64_t)htole32(diskinfo->capacity); 1412 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */ 1413 1414 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model)); 1415 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial)); 1416 scsipi_strvis(rev, 17, diskinfo->firmware_rev, 1417 sizeof(diskinfo->firmware_rev)); 1418 1419 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev); 1420 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial)); 1421 1422 #if 0 1423 bd->bd_channel = diskinfo->scsi_attr.channel; 1424 bd->bd_target = diskinfo->scsi_attr.target; 1425 bd->bd_lun = diskinfo->scsi_attr.lun; 1426 #endif 1427 1428 /* 1429 * the firwmare doesnt seem to fill scsi_attr in, so fake it with 1430 * the diskid. 1431 */ 1432 bd->bd_channel = 0; 1433 bd->bd_target = diskid; 1434 bd->bd_lun = 0; 1435 } 1436 1437 static int 1438 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd) 1439 { 1440 struct arc_fw_raidinfo *raidinfo; 1441 struct arc_fw_volinfo *volinfo; 1442 struct arc_fw_diskinfo *diskinfo; 1443 uint8_t request[2]; 1444 int error = 0; 1445 1446 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP); 1447 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP); 1448 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP); 1449 1450 error = arc_bio_getvol(sc, bd->bd_volid, volinfo); 1451 if (error != 0) 1452 goto out; 1453 1454 request[0] = ARC_FW_RAIDINFO; 1455 request[1] = volinfo->raid_set_number; 1456 1457 error = arc_msgbuf(sc, request, sizeof(request), raidinfo, 1458 sizeof(struct arc_fw_raidinfo)); 1459 if (error != 0) 1460 goto out; 1461 1462 if (bd->bd_diskid >= sc->sc_cchans || 1463 bd->bd_diskid >= raidinfo->member_devices) { 1464 error = ENODEV; 1465 goto out; 1466 } 1467 1468 if (raidinfo->device_array[bd->bd_diskid] == 0xff) { 1469 /* 1470 * The disk has been disconnected, mark it offline 1471 * and put it on another bus. 1472 */ 1473 bd->bd_channel = 1; 1474 bd->bd_target = 0; 1475 bd->bd_lun = 0; 1476 bd->bd_status = BIOC_SDOFFLINE; 1477 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor)); 1478 goto out; 1479 } 1480 1481 request[0] = ARC_FW_DISKINFO; 1482 request[1] = raidinfo->device_array[bd->bd_diskid]; 1483 error = arc_msgbuf(sc, request, sizeof(request), diskinfo, 1484 sizeof(struct arc_fw_diskinfo)); 1485 if (error != 0) 1486 goto out; 1487 1488 /* now fill our bio disk with data from the firmware */ 1489 arc_bio_disk_filldata(sc, bd, diskinfo, 1490 raidinfo->device_array[bd->bd_diskid]); 1491 1492 out: 1493 kmem_free(raidinfo, sizeof(*raidinfo)); 1494 kmem_free(volinfo, sizeof(*volinfo)); 1495 kmem_free(diskinfo, sizeof(*diskinfo)); 1496 return error; 1497 } 1498 #endif /* NBIO > 0 */ 1499 1500 uint8_t 1501 arc_msg_cksum(void *cmd, uint16_t len) 1502 { 1503 uint8_t *buf = cmd; 1504 uint8_t cksum; 1505 int i; 1506 1507 cksum = (uint8_t)(len >> 8) + (uint8_t)len; 1508 for (i = 0; i < len; i++) 1509 cksum += buf[i]; 1510 1511 return cksum; 1512 } 1513 1514 1515 int 1516 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr, 1517 size_t rbuflen) 1518 { 1519 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN]; 1520 uint8_t *wbuf, *rbuf; 1521 int wlen, wdone = 0, rlen, rdone = 0; 1522 struct arc_fw_bufhdr *bufhdr; 1523 uint32_t reg, rwlen; 1524 int error = 0; 1525 #ifdef ARC_DEBUG 1526 int i; 1527 #endif 1528 1529 wbuf = rbuf = NULL; 1530 1531 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n", 1532 device_xname(sc->sc_dev), wbuflen, rbuflen); 1533 1534 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */ 1535 wbuf = kmem_alloc(wlen, KM_SLEEP); 1536 1537 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */ 1538 rbuf = kmem_alloc(rlen, KM_SLEEP); 1539 1540 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n", 1541 device_xname(sc->sc_dev), wlen, rlen); 1542 1543 bufhdr = (struct arc_fw_bufhdr *)wbuf; 1544 bufhdr->hdr = arc_fw_hdr; 1545 bufhdr->len = htole16(wbuflen); 1546 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen); 1547 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen); 1548 1549 arc_lock(sc); 1550 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) { 1551 error = EBUSY; 1552 goto out; 1553 } 1554 1555 reg = ARC_REG_OUTB_DOORBELL_READ_OK; 1556 1557 do { 1558 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) { 1559 memset(rwbuf, 0, sizeof(rwbuf)); 1560 rwlen = (wlen - wdone) % sizeof(rwbuf); 1561 memcpy(rwbuf, &wbuf[wdone], rwlen); 1562 1563 #ifdef ARC_DEBUG 1564 if (arcdebug & ARC_D_DB) { 1565 printf("%s: write %d:", 1566 device_xname(sc->sc_dev), rwlen); 1567 for (i = 0; i < rwlen; i++) 1568 printf(" 0x%02x", rwbuf[i]); 1569 printf("\n"); 1570 } 1571 #endif 1572 1573 /* copy the chunk to the hw */ 1574 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen); 1575 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf, 1576 sizeof(rwbuf)); 1577 1578 /* say we have a buffer for the hw */ 1579 arc_write(sc, ARC_REG_INB_DOORBELL, 1580 ARC_REG_INB_DOORBELL_WRITE_OK); 1581 1582 wdone += rwlen; 1583 } 1584 1585 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0) 1586 arc_wait(sc); 1587 1588 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg); 1589 1590 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n", 1591 device_xname(sc->sc_dev), reg); 1592 1593 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) { 1594 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN); 1595 if (rwlen > sizeof(rwbuf)) { 1596 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n", 1597 device_xname(sc->sc_dev)); 1598 error = EIO; 1599 goto out; 1600 } 1601 1602 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf, 1603 sizeof(rwbuf)); 1604 1605 arc_write(sc, ARC_REG_INB_DOORBELL, 1606 ARC_REG_INB_DOORBELL_READ_OK); 1607 1608 #ifdef ARC_DEBUG 1609 printf("%s: len: %d+%d=%d/%d\n", 1610 device_xname(sc->sc_dev), 1611 rwlen, rdone, rwlen + rdone, rlen); 1612 if (arcdebug & ARC_D_DB) { 1613 printf("%s: read:", 1614 device_xname(sc->sc_dev)); 1615 for (i = 0; i < rwlen; i++) 1616 printf(" 0x%02x", rwbuf[i]); 1617 printf("\n"); 1618 } 1619 #endif 1620 1621 if ((rdone + rwlen) > rlen) { 1622 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n", 1623 device_xname(sc->sc_dev)); 1624 error = EIO; 1625 goto out; 1626 } 1627 1628 memcpy(&rbuf[rdone], rwbuf, rwlen); 1629 rdone += rwlen; 1630 } 1631 } while (rdone != rlen); 1632 1633 bufhdr = (struct arc_fw_bufhdr *)rbuf; 1634 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 || 1635 bufhdr->len != htole16(rbuflen)) { 1636 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n", 1637 device_xname(sc->sc_dev)); 1638 error = EIO; 1639 goto out; 1640 } 1641 1642 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen); 1643 1644 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) { 1645 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n", 1646 device_xname(sc->sc_dev)); 1647 error = EIO; 1648 goto out; 1649 } 1650 1651 out: 1652 arc_unlock(sc); 1653 kmem_free(wbuf, wlen); 1654 kmem_free(rbuf, rlen); 1655 1656 return error; 1657 } 1658 1659 void 1660 arc_lock(struct arc_softc *sc) 1661 { 1662 rw_enter(&sc->sc_rwlock, RW_WRITER); 1663 mutex_spin_enter(&sc->sc_mutex); 1664 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE); 1665 sc->sc_talking = 1; 1666 } 1667 1668 void 1669 arc_unlock(struct arc_softc *sc) 1670 { 1671 KASSERT(mutex_owned(&sc->sc_mutex)); 1672 1673 arc_write(sc, ARC_REG_INTRMASK, 1674 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL)); 1675 sc->sc_talking = 0; 1676 mutex_spin_exit(&sc->sc_mutex); 1677 rw_exit(&sc->sc_rwlock); 1678 } 1679 1680 void 1681 arc_wait(struct arc_softc *sc) 1682 { 1683 KASSERT(mutex_owned(&sc->sc_mutex)); 1684 1685 arc_write(sc, ARC_REG_INTRMASK, 1686 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL)); 1687 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK) 1688 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE); 1689 } 1690 1691 #if NBIO > 0 1692 static void 1693 arc_create_sensors(void *arg) 1694 { 1695 struct arc_softc *sc = arg; 1696 struct bioc_inq bi; 1697 struct bioc_vol bv; 1698 int i, j; 1699 size_t slen, count = 0; 1700 1701 memset(&bi, 0, sizeof(bi)); 1702 if (arc_bio_inq(sc, &bi) != 0) { 1703 aprint_error("%s: unable to query firmware for sensor info\n", 1704 device_xname(sc->sc_dev)); 1705 kthread_exit(0); 1706 } 1707 1708 /* There's no point to continue if there are no volumes */ 1709 if (!bi.bi_novol) 1710 kthread_exit(0); 1711 1712 for (i = 0; i < bi.bi_novol; i++) { 1713 memset(&bv, 0, sizeof(bv)); 1714 bv.bv_volid = i; 1715 if (arc_bio_vol(sc, &bv) != 0) 1716 kthread_exit(0); 1717 1718 /* Skip passthrough volumes */ 1719 if (bv.bv_level == BIOC_SVOL_PASSTHRU) 1720 continue; 1721 1722 /* new volume found */ 1723 sc->sc_nsensors++; 1724 /* new disk in a volume found */ 1725 sc->sc_nsensors+= bv.bv_nodisk; 1726 } 1727 1728 /* No valid volumes */ 1729 if (!sc->sc_nsensors) 1730 kthread_exit(0); 1731 1732 sc->sc_sme = sysmon_envsys_create(); 1733 slen = sizeof(envsys_data_t) * sc->sc_nsensors; 1734 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP); 1735 1736 /* Attach sensors for volumes and disks */ 1737 for (i = 0; i < bi.bi_novol; i++) { 1738 memset(&bv, 0, sizeof(bv)); 1739 bv.bv_volid = i; 1740 if (arc_bio_vol(sc, &bv) != 0) 1741 goto bad; 1742 1743 sc->sc_sensors[count].units = ENVSYS_DRIVE; 1744 sc->sc_sensors[count].monitor = true; 1745 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED; 1746 1747 /* Skip passthrough volumes */ 1748 if (bv.bv_level == BIOC_SVOL_PASSTHRU) 1749 continue; 1750 1751 if (bv.bv_level == BIOC_SVOL_RAID10) 1752 snprintf(sc->sc_sensors[count].desc, 1753 sizeof(sc->sc_sensors[count].desc), 1754 "RAID 1+0 volume%d (%s)", i, bv.bv_dev); 1755 else 1756 snprintf(sc->sc_sensors[count].desc, 1757 sizeof(sc->sc_sensors[count].desc), 1758 "RAID %d volume%d (%s)", bv.bv_level, i, 1759 bv.bv_dev); 1760 1761 sc->sc_sensors[count].value_max = i; 1762 1763 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1764 &sc->sc_sensors[count])) 1765 goto bad; 1766 1767 count++; 1768 1769 /* Attach disk sensors for this volume */ 1770 for (j = 0; j < bv.bv_nodisk; j++) { 1771 sc->sc_sensors[count].units = ENVSYS_DRIVE; 1772 sc->sc_sensors[count].monitor = true; 1773 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED; 1774 1775 snprintf(sc->sc_sensors[count].desc, 1776 sizeof(sc->sc_sensors[count].desc), 1777 "disk%d volume%d (%s)", j, i, bv.bv_dev); 1778 sc->sc_sensors[count].value_max = i; 1779 sc->sc_sensors[count].value_avg = j + 10; 1780 1781 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1782 &sc->sc_sensors[count])) 1783 goto bad; 1784 1785 count++; 1786 } 1787 } 1788 1789 /* 1790 * Register our envsys driver with the framework now that the 1791 * sensors were all attached. 1792 */ 1793 sc->sc_sme->sme_name = device_xname(sc->sc_dev); 1794 sc->sc_sme->sme_cookie = sc; 1795 sc->sc_sme->sme_refresh = arc_refresh_sensors; 1796 1797 if (sysmon_envsys_register(sc->sc_sme)) { 1798 aprint_debug("%s: unable to register with sysmon\n", 1799 device_xname(sc->sc_dev)); 1800 goto bad; 1801 } 1802 kthread_exit(0); 1803 1804 bad: 1805 kmem_free(sc->sc_sensors, slen); 1806 sysmon_envsys_destroy(sc->sc_sme); 1807 kthread_exit(0); 1808 } 1809 1810 static void 1811 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata) 1812 { 1813 struct arc_softc *sc = sme->sme_cookie; 1814 struct bioc_vol bv; 1815 struct bioc_disk bd; 1816 1817 /* sanity check */ 1818 if (edata->units != ENVSYS_DRIVE) 1819 return; 1820 1821 memset(&bv, 0, sizeof(bv)); 1822 bv.bv_volid = edata->value_max; 1823 1824 if (arc_bio_vol(sc, &bv)) { 1825 edata->value_cur = ENVSYS_DRIVE_EMPTY; 1826 edata->state = ENVSYS_SINVALID; 1827 return; 1828 } 1829 1830 /* Current sensor is handling a disk volume member */ 1831 if (edata->value_avg) { 1832 memset(&bd, 0, sizeof(bd)); 1833 bd.bd_volid = edata->value_max; 1834 bd.bd_diskid = edata->value_avg - 10; 1835 1836 if (arc_bio_disk_volume(sc, &bd)) { 1837 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1838 edata->state = ENVSYS_SCRITICAL; 1839 return; 1840 } 1841 1842 switch (bd.bd_status) { 1843 case BIOC_SDONLINE: 1844 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1845 edata->state = ENVSYS_SVALID; 1846 break; 1847 case BIOC_SDOFFLINE: 1848 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1849 edata->state = ENVSYS_SCRITICAL; 1850 break; 1851 default: 1852 edata->value_cur = ENVSYS_DRIVE_FAIL; 1853 edata->state = ENVSYS_SCRITICAL; 1854 break; 1855 } 1856 1857 return; 1858 } 1859 1860 /* Current sensor is handling a volume */ 1861 switch (bv.bv_status) { 1862 case BIOC_SVOFFLINE: 1863 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1864 edata->state = ENVSYS_SCRITICAL; 1865 break; 1866 case BIOC_SVDEGRADED: 1867 edata->value_cur = ENVSYS_DRIVE_PFAIL; 1868 edata->state = ENVSYS_SCRITICAL; 1869 break; 1870 case BIOC_SVBUILDING: 1871 edata->value_cur = ENVSYS_DRIVE_BUILD; 1872 edata->state = ENVSYS_SVALID; 1873 break; 1874 case BIOC_SVMIGRATING: 1875 edata->value_cur = ENVSYS_DRIVE_MIGRATING; 1876 edata->state = ENVSYS_SVALID; 1877 break; 1878 case BIOC_SVCHECKING: 1879 edata->value_cur = ENVSYS_DRIVE_CHECK; 1880 edata->state = ENVSYS_SVALID; 1881 break; 1882 case BIOC_SVREBUILD: 1883 edata->value_cur = ENVSYS_DRIVE_REBUILD; 1884 edata->state = ENVSYS_SCRITICAL; 1885 break; 1886 case BIOC_SVSCRUB: 1887 case BIOC_SVONLINE: 1888 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1889 edata->state = ENVSYS_SVALID; 1890 break; 1891 case BIOC_SVINVALID: 1892 /* FALLTHROUGH */ 1893 default: 1894 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */ 1895 edata->state = ENVSYS_SINVALID; 1896 break; 1897 } 1898 } 1899 #endif /* NBIO > 0 */ 1900 1901 uint32_t 1902 arc_read(struct arc_softc *sc, bus_size_t r) 1903 { 1904 uint32_t v; 1905 1906 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1907 BUS_SPACE_BARRIER_READ); 1908 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1909 1910 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n", 1911 device_xname(sc->sc_dev), r, v); 1912 1913 return v; 1914 } 1915 1916 void 1917 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len) 1918 { 1919 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len, 1920 BUS_SPACE_BARRIER_READ); 1921 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r, 1922 (uint32_t *)buf, len >> 2); 1923 } 1924 1925 void 1926 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v) 1927 { 1928 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n", 1929 device_xname(sc->sc_dev), r, v); 1930 1931 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1932 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1933 BUS_SPACE_BARRIER_WRITE); 1934 } 1935 1936 void 1937 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len) 1938 { 1939 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r, 1940 (const uint32_t *)buf, len >> 2); 1941 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len, 1942 BUS_SPACE_BARRIER_WRITE); 1943 } 1944 1945 int 1946 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask, 1947 uint32_t target) 1948 { 1949 int i; 1950 1951 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n", 1952 device_xname(sc->sc_dev), r, mask, target); 1953 1954 for (i = 0; i < 10000; i++) { 1955 if ((arc_read(sc, r) & mask) == target) 1956 return 0; 1957 delay(1000); 1958 } 1959 1960 return 1; 1961 } 1962 1963 int 1964 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask, 1965 uint32_t target) 1966 { 1967 int i; 1968 1969 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n", 1970 device_xname(sc->sc_dev), r, mask, target); 1971 1972 for (i = 0; i < 10000; i++) { 1973 if ((arc_read(sc, r) & mask) != target) 1974 return 0; 1975 delay(1000); 1976 } 1977 1978 return 1; 1979 } 1980 1981 int 1982 arc_msg0(struct arc_softc *sc, uint32_t m) 1983 { 1984 /* post message */ 1985 arc_write(sc, ARC_REG_INB_MSG0, m); 1986 /* wait for the fw to do it */ 1987 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0, 1988 ARC_REG_INTRSTAT_MSG0) != 0) 1989 return 1; 1990 1991 /* ack it */ 1992 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0); 1993 1994 return 0; 1995 } 1996 1997 struct arc_dmamem * 1998 arc_dmamem_alloc(struct arc_softc *sc, size_t size) 1999 { 2000 struct arc_dmamem *adm; 2001 int nsegs; 2002 2003 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP); 2004 if (adm == NULL) 2005 return NULL; 2006 2007 adm->adm_size = size; 2008 2009 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2010 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0) 2011 goto admfree; 2012 2013 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg, 2014 1, &nsegs, BUS_DMA_NOWAIT) != 0) 2015 goto destroy; 2016 2017 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size, 2018 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0) 2019 goto free; 2020 2021 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size, 2022 NULL, BUS_DMA_NOWAIT) != 0) 2023 goto unmap; 2024 2025 memset(adm->adm_kva, 0, size); 2026 2027 return adm; 2028 2029 unmap: 2030 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size); 2031 free: 2032 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1); 2033 destroy: 2034 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map); 2035 admfree: 2036 kmem_free(adm, sizeof(*adm)); 2037 2038 return NULL; 2039 } 2040 2041 void 2042 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm) 2043 { 2044 bus_dmamap_unload(sc->sc_dmat, adm->adm_map); 2045 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size); 2046 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1); 2047 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map); 2048 kmem_free(adm, sizeof(*adm)); 2049 } 2050 2051 int 2052 arc_alloc_ccbs(device_t self) 2053 { 2054 struct arc_softc *sc = device_private(self); 2055 struct arc_ccb *ccb; 2056 uint8_t *cmd; 2057 int i; 2058 size_t ccbslen; 2059 2060 TAILQ_INIT(&sc->sc_ccb_free); 2061 2062 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count; 2063 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP); 2064 2065 sc->sc_requests = arc_dmamem_alloc(sc, 2066 ARC_MAX_IOCMDLEN * sc->sc_req_count); 2067 if (sc->sc_requests == NULL) { 2068 aprint_error_dev(self, "unable to allocate ccb dmamem\n"); 2069 goto free_ccbs; 2070 } 2071 cmd = ARC_DMA_KVA(sc->sc_requests); 2072 2073 for (i = 0; i < sc->sc_req_count; i++) { 2074 ccb = &sc->sc_ccbs[i]; 2075 2076 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN, 2077 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) { 2078 aprint_error_dev(self, 2079 "unable to create dmamap for ccb %d\n", i); 2080 goto free_maps; 2081 } 2082 2083 ccb->ccb_sc = sc; 2084 ccb->ccb_id = i; 2085 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i; 2086 2087 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset]; 2088 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) + 2089 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT; 2090 2091 arc_put_ccb(sc, ccb); 2092 } 2093 2094 return 0; 2095 2096 free_maps: 2097 while ((ccb = arc_get_ccb(sc)) != NULL) 2098 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2099 arc_dmamem_free(sc, sc->sc_requests); 2100 2101 free_ccbs: 2102 kmem_free(sc->sc_ccbs, ccbslen); 2103 2104 return 1; 2105 } 2106 2107 struct arc_ccb * 2108 arc_get_ccb(struct arc_softc *sc) 2109 { 2110 struct arc_ccb *ccb; 2111 2112 ccb = TAILQ_FIRST(&sc->sc_ccb_free); 2113 if (ccb != NULL) 2114 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link); 2115 2116 return ccb; 2117 } 2118 2119 void 2120 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb) 2121 { 2122 ccb->ccb_xs = NULL; 2123 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN); 2124 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link); 2125 } 2126