1 /* $NetBSD: arcmsr.c,v 1.25 2010/04/03 17:54:24 jruoho Exp $ */ 2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */ 3 4 /* 5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme@netbsd.org> 6 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.25 2010/04/03 17:54:24 jruoho Exp $"); 25 26 #include <sys/param.h> 27 #include <sys/buf.h> 28 #include <sys/kernel.h> 29 #include <sys/malloc.h> 30 #include <sys/device.h> 31 #include <sys/kmem.h> 32 #include <sys/kthread.h> 33 #include <sys/mutex.h> 34 #include <sys/condvar.h> 35 #include <sys/rwlock.h> 36 37 #if NBIO > 0 38 #include <sys/ioctl.h> 39 #include <dev/biovar.h> 40 #endif 41 42 #include <dev/pci/pcireg.h> 43 #include <dev/pci/pcivar.h> 44 #include <dev/pci/pcidevs.h> 45 46 #include <dev/scsipi/scsipi_all.h> 47 #include <dev/scsipi/scsi_all.h> 48 #include <dev/scsipi/scsiconf.h> 49 50 #include <dev/sysmon/sysmonvar.h> 51 52 #include <sys/bus.h> 53 54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 55 56 #include <dev/pci/arcmsrvar.h> 57 58 /* #define ARC_DEBUG */ 59 #ifdef ARC_DEBUG 60 #define ARC_D_INIT (1<<0) 61 #define ARC_D_RW (1<<1) 62 #define ARC_D_DB (1<<2) 63 64 int arcdebug = 0; 65 66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0) 67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0) 68 69 #else 70 #define DPRINTF(p, ...) /* p */ 71 #define DNPRINTF(n, p, ...) /* n, p */ 72 #endif 73 74 /* 75 * the fw header must always equal this. 76 */ 77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 }; 78 79 /* 80 * autoconf(9) glue. 81 */ 82 static int arc_match(device_t, cfdata_t, void *); 83 static void arc_attach(device_t, device_t, void *); 84 static int arc_detach(device_t, int); 85 static bool arc_shutdown(device_t, int); 86 static int arc_intr(void *); 87 static void arc_minphys(struct buf *); 88 89 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc), 90 arc_match, arc_attach, arc_detach, NULL); 91 92 /* 93 * bio(4) and sysmon_envsys(9) glue. 94 */ 95 #if NBIO > 0 96 static int arc_bioctl(device_t, u_long, void *); 97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *); 98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *); 99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *); 100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *); 101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *, 102 struct arc_fw_diskinfo *, int); 103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *); 104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *); 105 static int arc_bio_getvol(struct arc_softc *, int, 106 struct arc_fw_volinfo *); 107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *); 108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *); 109 static void arc_create_sensors(void *); 110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *); 111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *); 112 #endif 113 114 static int 115 arc_match(device_t parent, cfdata_t match, void *aux) 116 { 117 struct pci_attach_args *pa = aux; 118 119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) { 120 switch (PCI_PRODUCT(pa->pa_id)) { 121 case PCI_PRODUCT_ARECA_ARC1110: 122 case PCI_PRODUCT_ARECA_ARC1120: 123 case PCI_PRODUCT_ARECA_ARC1130: 124 case PCI_PRODUCT_ARECA_ARC1160: 125 case PCI_PRODUCT_ARECA_ARC1170: 126 case PCI_PRODUCT_ARECA_ARC1200: 127 case PCI_PRODUCT_ARECA_ARC1202: 128 case PCI_PRODUCT_ARECA_ARC1210: 129 case PCI_PRODUCT_ARECA_ARC1220: 130 case PCI_PRODUCT_ARECA_ARC1230: 131 case PCI_PRODUCT_ARECA_ARC1260: 132 case PCI_PRODUCT_ARECA_ARC1270: 133 case PCI_PRODUCT_ARECA_ARC1280: 134 case PCI_PRODUCT_ARECA_ARC1380: 135 case PCI_PRODUCT_ARECA_ARC1381: 136 case PCI_PRODUCT_ARECA_ARC1680: 137 case PCI_PRODUCT_ARECA_ARC1681: 138 return 1; 139 default: 140 break; 141 } 142 } 143 144 return 0; 145 } 146 147 static void 148 arc_attach(device_t parent, device_t self, void *aux) 149 { 150 struct arc_softc *sc = device_private(self); 151 struct pci_attach_args *pa = aux; 152 struct scsipi_adapter *adapt = &sc->sc_adapter; 153 struct scsipi_channel *chan = &sc->sc_chan; 154 155 sc->sc_dev = self; 156 sc->sc_talking = 0; 157 rw_init(&sc->sc_rwlock); 158 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO); 159 cv_init(&sc->sc_condvar, "arcdb"); 160 161 if (arc_map_pci_resources(self, pa) != 0) { 162 /* error message printed by arc_map_pci_resources */ 163 return; 164 } 165 166 if (arc_query_firmware(self) != 0) { 167 /* error message printed by arc_query_firmware */ 168 goto unmap_pci; 169 } 170 171 if (arc_alloc_ccbs(self) != 0) { 172 /* error message printed by arc_alloc_ccbs */ 173 goto unmap_pci; 174 } 175 176 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown)) 177 panic("%s: couldn't establish shutdown handler\n", 178 device_xname(self)); 179 180 memset(adapt, 0, sizeof(*adapt)); 181 adapt->adapt_dev = self; 182 adapt->adapt_nchannels = 1; 183 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET; 184 adapt->adapt_max_periph = adapt->adapt_openings; 185 adapt->adapt_minphys = arc_minphys; 186 adapt->adapt_request = arc_scsi_cmd; 187 188 memset(chan, 0, sizeof(*chan)); 189 chan->chan_adapter = adapt; 190 chan->chan_bustype = &scsi_bustype; 191 chan->chan_nluns = ARC_MAX_LUN; 192 chan->chan_ntargets = ARC_MAX_TARGET; 193 chan->chan_id = ARC_MAX_TARGET; 194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 195 196 /* 197 * Save the device_t returned, because we could to attach 198 * devices via the management interface. 199 */ 200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint); 201 202 /* enable interrupts */ 203 arc_write(sc, ARC_REG_INTRMASK, 204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL)); 205 206 #if NBIO > 0 207 /* 208 * Register the driver to bio(4) and setup the sensors. 209 */ 210 if (bio_register(self, arc_bioctl) != 0) 211 panic("%s: bioctl registration failed\n", device_xname(self)); 212 213 /* 214 * you need to talk to the firmware to get volume info. our firmware 215 * interface relies on being able to sleep, so we need to use a thread 216 * to do the work. 217 */ 218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0) 220 panic("%s: unable to create a kernel thread for sensors\n", 221 device_xname(self)); 222 #endif 223 224 return; 225 226 unmap_pci: 227 arc_unmap_pci_resources(sc); 228 } 229 230 static int 231 arc_detach(device_t self, int flags) 232 { 233 struct arc_softc *sc = device_private(self); 234 235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0) 236 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n"); 237 238 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0) 239 aprint_error_dev(self, "timeout waiting to flush cache\n"); 240 241 if (sc->sc_sme != NULL) 242 sysmon_envsys_unregister(sc->sc_sme); 243 244 return 0; 245 } 246 247 static bool 248 arc_shutdown(device_t self, int how) 249 { 250 struct arc_softc *sc = device_private(self); 251 252 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0) 253 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n"); 254 255 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0) 256 aprint_error_dev(self, "timeout waiting to flush cache\n"); 257 258 return true; 259 } 260 261 static void 262 arc_minphys(struct buf *bp) 263 { 264 if (bp->b_bcount > MAXPHYS) 265 bp->b_bcount = MAXPHYS; 266 minphys(bp); 267 } 268 269 static int 270 arc_intr(void *arg) 271 { 272 struct arc_softc *sc = arg; 273 struct arc_ccb *ccb = NULL; 274 char *kva = ARC_DMA_KVA(sc->sc_requests); 275 struct arc_io_cmd *cmd; 276 uint32_t reg, intrstat; 277 278 mutex_spin_enter(&sc->sc_mutex); 279 intrstat = arc_read(sc, ARC_REG_INTRSTAT); 280 if (intrstat == 0x0) { 281 mutex_spin_exit(&sc->sc_mutex); 282 return 0; 283 } 284 285 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL; 286 arc_write(sc, ARC_REG_INTRSTAT, intrstat); 287 288 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) { 289 if (sc->sc_talking) { 290 arc_write(sc, ARC_REG_INTRMASK, 291 ~ARC_REG_INTRMASK_POSTQUEUE); 292 cv_broadcast(&sc->sc_condvar); 293 } else { 294 /* otherwise drop it */ 295 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL); 296 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg); 297 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) 298 arc_write(sc, ARC_REG_INB_DOORBELL, 299 ARC_REG_INB_DOORBELL_READ_OK); 300 } 301 } 302 mutex_spin_exit(&sc->sc_mutex); 303 304 while ((reg = arc_pop(sc)) != 0xffffffff) { 305 cmd = (struct arc_io_cmd *)(kva + 306 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) - 307 (uint32_t)ARC_DMA_DVA(sc->sc_requests))); 308 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)]; 309 310 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 311 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 313 314 arc_scsi_cmd_done(sc, ccb, reg); 315 } 316 317 318 return 1; 319 } 320 321 void 322 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 323 { 324 struct scsipi_periph *periph; 325 struct scsipi_xfer *xs; 326 struct scsipi_adapter *adapt = chan->chan_adapter; 327 struct arc_softc *sc = device_private(adapt->adapt_dev); 328 struct arc_ccb *ccb; 329 struct arc_msg_scsicmd *cmd; 330 uint32_t reg; 331 uint8_t target; 332 333 switch (req) { 334 case ADAPTER_REQ_GROW_RESOURCES: 335 /* Not supported. */ 336 return; 337 case ADAPTER_REQ_SET_XFER_MODE: 338 /* Not supported. */ 339 return; 340 case ADAPTER_REQ_RUN_XFER: 341 break; 342 } 343 344 mutex_spin_enter(&sc->sc_mutex); 345 346 xs = arg; 347 periph = xs->xs_periph; 348 target = periph->periph_target; 349 350 if (xs->cmdlen > ARC_MSG_CDBLEN) { 351 memset(&xs->sense, 0, sizeof(xs->sense)); 352 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70; 353 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 354 xs->sense.scsi_sense.asc = 0x20; 355 xs->error = XS_SENSE; 356 xs->status = SCSI_CHECK; 357 mutex_spin_exit(&sc->sc_mutex); 358 scsipi_done(xs); 359 return; 360 } 361 362 ccb = arc_get_ccb(sc); 363 if (ccb == NULL) { 364 xs->error = XS_RESOURCE_SHORTAGE; 365 mutex_spin_exit(&sc->sc_mutex); 366 scsipi_done(xs); 367 return; 368 } 369 370 ccb->ccb_xs = xs; 371 372 if (arc_load_xs(ccb) != 0) { 373 xs->error = XS_DRIVER_STUFFUP; 374 arc_put_ccb(sc, ccb); 375 mutex_spin_exit(&sc->sc_mutex); 376 scsipi_done(xs); 377 return; 378 } 379 380 cmd = &ccb->ccb_cmd->cmd; 381 reg = ccb->ccb_cmd_post; 382 383 /* bus is always 0 */ 384 cmd->target = target; 385 cmd->lun = periph->periph_lun; 386 cmd->function = 1; /* XXX magic number */ 387 388 cmd->cdb_len = xs->cmdlen; 389 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs; 390 if (xs->xs_control & XS_CTL_DATA_OUT) 391 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE; 392 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) { 393 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512; 394 reg |= ARC_REG_POST_QUEUE_BIGFRAME; 395 } 396 397 cmd->context = htole32(ccb->ccb_id); 398 cmd->data_len = htole32(xs->datalen); 399 400 memcpy(cmd->cdb, xs->cmd, xs->cmdlen); 401 402 /* we've built the command, let's put it on the hw */ 403 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 404 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 405 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 406 407 arc_push(sc, reg); 408 if (xs->xs_control & XS_CTL_POLL) { 409 if (arc_complete(sc, ccb, xs->timeout) != 0) { 410 xs->error = XS_DRIVER_STUFFUP; 411 mutex_spin_exit(&sc->sc_mutex); 412 scsipi_done(xs); 413 return; 414 } 415 } 416 417 mutex_spin_exit(&sc->sc_mutex); 418 } 419 420 int 421 arc_load_xs(struct arc_ccb *ccb) 422 { 423 struct arc_softc *sc = ccb->ccb_sc; 424 struct scsipi_xfer *xs = ccb->ccb_xs; 425 bus_dmamap_t dmap = ccb->ccb_dmamap; 426 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge; 427 uint64_t addr; 428 int i, error; 429 430 if (xs->datalen == 0) 431 return 0; 432 433 error = bus_dmamap_load(sc->sc_dmat, dmap, 434 xs->data, xs->datalen, NULL, 435 (xs->xs_control & XS_CTL_NOSLEEP) ? 436 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 437 if (error != 0) { 438 aprint_error("%s: error %d loading dmamap\n", 439 device_xname(sc->sc_dev), error); 440 return 1; 441 } 442 443 for (i = 0; i < dmap->dm_nsegs; i++) { 444 sge = &sgl[i]; 445 446 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len); 447 addr = dmap->dm_segs[i].ds_addr; 448 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32)); 449 sge->sg_lo_addr = htole32((uint32_t)addr); 450 } 451 452 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 453 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 454 BUS_DMASYNC_PREWRITE); 455 456 return 0; 457 } 458 459 void 460 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg) 461 { 462 struct scsipi_xfer *xs = ccb->ccb_xs; 463 struct arc_msg_scsicmd *cmd; 464 465 if (xs->datalen != 0) { 466 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 467 ccb->ccb_dmamap->dm_mapsize, 468 (xs->xs_control & XS_CTL_DATA_IN) ? 469 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 470 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 471 } 472 473 /* timeout_del */ 474 xs->status |= XS_STS_DONE; 475 476 if (reg & ARC_REG_REPLY_QUEUE_ERR) { 477 cmd = &ccb->ccb_cmd->cmd; 478 479 switch (cmd->status) { 480 case ARC_MSG_STATUS_SELTIMEOUT: 481 case ARC_MSG_STATUS_ABORTED: 482 case ARC_MSG_STATUS_INIT_FAIL: 483 xs->status = SCSI_OK; 484 xs->error = XS_SELTIMEOUT; 485 break; 486 487 case SCSI_CHECK: 488 memset(&xs->sense, 0, sizeof(xs->sense)); 489 memcpy(&xs->sense, cmd->sense_data, 490 min(ARC_MSG_SENSELEN, sizeof(xs->sense))); 491 xs->sense.scsi_sense.response_code = 492 SSD_RCODE_VALID | 0x70; 493 xs->status = SCSI_CHECK; 494 xs->error = XS_SENSE; 495 xs->resid = 0; 496 break; 497 498 default: 499 /* unknown device status */ 500 xs->error = XS_BUSY; /* try again later? */ 501 xs->status = SCSI_BUSY; 502 break; 503 } 504 } else { 505 xs->status = SCSI_OK; 506 xs->error = XS_NOERROR; 507 xs->resid = 0; 508 } 509 510 arc_put_ccb(sc, ccb); 511 scsipi_done(xs); 512 } 513 514 int 515 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout) 516 { 517 struct arc_ccb *ccb = NULL; 518 char *kva = ARC_DMA_KVA(sc->sc_requests); 519 struct arc_io_cmd *cmd; 520 uint32_t reg; 521 522 do { 523 reg = arc_pop(sc); 524 if (reg == 0xffffffff) { 525 if (timeout-- == 0) 526 return 1; 527 528 delay(1000); 529 continue; 530 } 531 532 cmd = (struct arc_io_cmd *)(kva + 533 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) - 534 ARC_DMA_DVA(sc->sc_requests))); 535 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)]; 536 537 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests), 538 ccb->ccb_offset, ARC_MAX_IOCMDLEN, 539 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 540 541 arc_scsi_cmd_done(sc, ccb, reg); 542 } while (nccb != ccb); 543 544 return 0; 545 } 546 547 int 548 arc_map_pci_resources(device_t self, struct pci_attach_args *pa) 549 { 550 struct arc_softc *sc = device_private(self); 551 pcireg_t memtype; 552 pci_intr_handle_t ih; 553 554 sc->sc_pc = pa->pa_pc; 555 sc->sc_tag = pa->pa_tag; 556 sc->sc_dmat = pa->pa_dmat; 557 558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR); 559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot, 560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) { 561 aprint_error(": unable to map system interface register\n"); 562 return 1; 563 } 564 565 if (pci_intr_map(pa, &ih) != 0) { 566 aprint_error(": unable to map interrupt\n"); 567 goto unmap; 568 } 569 570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, 571 arc_intr, sc); 572 if (sc->sc_ih == NULL) { 573 aprint_error(": unable to map interrupt [2]\n"); 574 goto unmap; 575 } 576 577 aprint_normal("\n"); 578 aprint_normal_dev(self, "interrupting at %s\n", 579 pci_intr_string(pa->pa_pc, ih)); 580 581 return 0; 582 583 unmap: 584 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 585 sc->sc_ios = 0; 586 return 1; 587 } 588 589 void 590 arc_unmap_pci_resources(struct arc_softc *sc) 591 { 592 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 594 sc->sc_ios = 0; 595 } 596 597 int 598 arc_query_firmware(device_t self) 599 { 600 struct arc_softc *sc = device_private(self); 601 struct arc_msg_firmware_info fwinfo; 602 char string[81]; /* sizeof(vendor)*2+1 */ 603 604 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK, 605 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) { 606 aprint_debug_dev(self, "timeout waiting for firmware ok\n"); 607 return 1; 608 } 609 610 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) { 611 aprint_debug_dev(self, "timeout waiting for get config\n"); 612 return 1; 613 } 614 615 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) { 616 aprint_debug_dev(self, "timeout waiting to start bg rebuild\n"); 617 return 1; 618 } 619 620 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo)); 621 622 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n", 623 device_xname(self), htole32(fwinfo.signature)); 624 625 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) { 626 aprint_error_dev(self, "invalid firmware info from iop\n"); 627 return 1; 628 } 629 630 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n", 631 device_xname(self), htole32(fwinfo.request_len)); 632 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n", 633 device_xname(self), htole32(fwinfo.queue_len)); 634 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n", 635 device_xname(self), htole32(fwinfo.sdram_size)); 636 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n", 637 device_xname(self), htole32(fwinfo.sata_ports)); 638 639 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor)); 640 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n", 641 device_xname(self), string); 642 643 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model)); 644 aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n", 645 string); 646 647 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version)); 648 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n", 649 device_xname(self), string); 650 651 aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n", 652 htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string); 653 654 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) { 655 aprint_error_dev(self, 656 "unexpected request frame size (%d != %d)\n", 657 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN); 658 return 1; 659 } 660 661 sc->sc_req_count = htole32(fwinfo.queue_len); 662 663 return 0; 664 } 665 666 #if NBIO > 0 667 static int 668 arc_bioctl(device_t self, u_long cmd, void *addr) 669 { 670 struct arc_softc *sc = device_private(self); 671 int error = 0; 672 673 switch (cmd) { 674 case BIOCINQ: 675 error = arc_bio_inq(sc, (struct bioc_inq *)addr); 676 break; 677 678 case BIOCVOL: 679 error = arc_bio_vol(sc, (struct bioc_vol *)addr); 680 break; 681 682 case BIOCDISK: 683 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr); 684 break; 685 686 case BIOCDISK_NOVOL: 687 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr); 688 break; 689 690 case BIOCALARM: 691 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr); 692 break; 693 694 case BIOCSETSTATE: 695 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr); 696 break; 697 698 case BIOCVOLOPS: 699 error = arc_bio_volops(sc, (struct bioc_volops *)addr); 700 break; 701 702 default: 703 error = ENOTTY; 704 break; 705 } 706 707 return error; 708 } 709 710 static int 711 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply) 712 { 713 switch (*reply) { 714 case ARC_FW_CMD_RAIDINVAL: 715 printf("%s: firmware error (invalid raid set)\n", 716 device_xname(sc->sc_dev)); 717 return EINVAL; 718 case ARC_FW_CMD_VOLINVAL: 719 printf("%s: firmware error (invalid volume set)\n", 720 device_xname(sc->sc_dev)); 721 return EINVAL; 722 case ARC_FW_CMD_NORAID: 723 printf("%s: firmware error (unexistent raid set)\n", 724 device_xname(sc->sc_dev)); 725 return ENODEV; 726 case ARC_FW_CMD_NOVOLUME: 727 printf("%s: firmware error (unexistent volume set)\n", 728 device_xname(sc->sc_dev)); 729 return ENODEV; 730 case ARC_FW_CMD_NOPHYSDRV: 731 printf("%s: firmware error (unexistent physical drive)\n", 732 device_xname(sc->sc_dev)); 733 return ENODEV; 734 case ARC_FW_CMD_PARAM_ERR: 735 printf("%s: firmware error (parameter error)\n", 736 device_xname(sc->sc_dev)); 737 return EINVAL; 738 case ARC_FW_CMD_UNSUPPORTED: 739 printf("%s: firmware error (unsupported command)\n", 740 device_xname(sc->sc_dev)); 741 return EOPNOTSUPP; 742 case ARC_FW_CMD_DISKCFG_CHGD: 743 printf("%s: firmware error (disk configuration changed)\n", 744 device_xname(sc->sc_dev)); 745 return EINVAL; 746 case ARC_FW_CMD_PASS_INVAL: 747 printf("%s: firmware error (invalid password)\n", 748 device_xname(sc->sc_dev)); 749 return EINVAL; 750 case ARC_FW_CMD_NODISKSPACE: 751 printf("%s: firmware error (no disk space available)\n", 752 device_xname(sc->sc_dev)); 753 return EOPNOTSUPP; 754 case ARC_FW_CMD_CHECKSUM_ERR: 755 printf("%s: firmware error (checksum error)\n", 756 device_xname(sc->sc_dev)); 757 return EINVAL; 758 case ARC_FW_CMD_PASS_REQD: 759 printf("%s: firmware error (password required)\n", 760 device_xname(sc->sc_dev)); 761 return EPERM; 762 case ARC_FW_CMD_OK: 763 default: 764 return 0; 765 } 766 } 767 768 static int 769 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba) 770 { 771 uint8_t request[2], reply[1]; 772 size_t len; 773 int error = 0; 774 775 switch (ba->ba_opcode) { 776 case BIOC_SAENABLE: 777 case BIOC_SADISABLE: 778 request[0] = ARC_FW_SET_ALARM; 779 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ? 780 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE; 781 len = sizeof(request); 782 783 break; 784 785 case BIOC_SASILENCE: 786 request[0] = ARC_FW_MUTE_ALARM; 787 len = 1; 788 789 break; 790 791 case BIOC_GASTATUS: 792 /* system info is too big/ugly to deal with here */ 793 return arc_bio_alarm_state(sc, ba); 794 795 default: 796 return EOPNOTSUPP; 797 } 798 799 error = arc_msgbuf(sc, request, len, reply, sizeof(reply)); 800 if (error != 0) 801 return error; 802 803 return arc_fw_parse_status_code(sc, &reply[0]); 804 } 805 806 static int 807 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba) 808 { 809 struct arc_fw_sysinfo *sysinfo; 810 uint8_t request; 811 int error = 0; 812 813 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP); 814 815 request = ARC_FW_SYSINFO; 816 error = arc_msgbuf(sc, &request, sizeof(request), 817 sysinfo, sizeof(struct arc_fw_sysinfo)); 818 819 if (error != 0) 820 goto out; 821 822 ba->ba_status = sysinfo->alarm; 823 824 out: 825 kmem_free(sysinfo, sizeof(*sysinfo)); 826 return error; 827 } 828 829 static int 830 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc) 831 { 832 /* to create a raid set */ 833 struct req_craidset { 834 uint8_t cmdcode; 835 uint32_t devmask; 836 uint8_t raidset_name[16]; 837 } __packed; 838 839 /* to create a volume set */ 840 struct req_cvolset { 841 uint8_t cmdcode; 842 uint8_t raidset; 843 uint8_t volset_name[16]; 844 uint64_t capacity; 845 uint8_t raidlevel; 846 uint8_t stripe; 847 uint8_t scsi_chan; 848 uint8_t scsi_target; 849 uint8_t scsi_lun; 850 uint8_t tagqueue; 851 uint8_t cache; 852 uint8_t speed; 853 uint8_t quick_init; 854 } __packed; 855 856 struct scsibus_softc *scsibus_sc = NULL; 857 struct req_craidset req_craidset; 858 struct req_cvolset req_cvolset; 859 uint8_t request[2]; 860 uint8_t reply[1]; 861 int error = 0; 862 863 switch (bc->bc_opcode) { 864 case BIOC_VCREATE_VOLUME: 865 { 866 /* 867 * Zero out the structs so that we use some defaults 868 * in raid and volume sets. 869 */ 870 memset(&req_craidset, 0, sizeof(req_craidset)); 871 memset(&req_cvolset, 0, sizeof(req_cvolset)); 872 873 /* 874 * Firstly we have to create the raid set and 875 * use the default name for all them. 876 */ 877 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET; 878 req_craidset.devmask = bc->bc_devmask; 879 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset), 880 reply, sizeof(reply)); 881 if (error != 0) 882 return error; 883 884 error = arc_fw_parse_status_code(sc, &reply[0]); 885 if (error) { 886 printf("%s: create raidset%d failed\n", 887 device_xname(sc->sc_dev), bc->bc_volid); 888 return error; 889 } 890 891 /* 892 * At this point the raid set was created, so it's 893 * time to create the volume set. 894 */ 895 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME; 896 req_cvolset.raidset = bc->bc_volid; 897 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE; 898 899 /* 900 * Set the RAID level. 901 */ 902 switch (bc->bc_level) { 903 case 0: 904 case 1: 905 req_cvolset.raidlevel = bc->bc_level; 906 break; 907 case BIOC_SVOL_RAID10: 908 req_cvolset.raidlevel = 1; 909 break; 910 case 3: 911 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3; 912 break; 913 case 5: 914 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5; 915 break; 916 case 6: 917 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6; 918 break; 919 default: 920 return EOPNOTSUPP; 921 } 922 923 /* 924 * Set the stripe size. 925 */ 926 switch (bc->bc_stripe) { 927 case 4: 928 req_cvolset.stripe = 0; 929 break; 930 case 8: 931 req_cvolset.stripe = 1; 932 break; 933 case 16: 934 req_cvolset.stripe = 2; 935 break; 936 case 32: 937 req_cvolset.stripe = 3; 938 break; 939 case 64: 940 req_cvolset.stripe = 4; 941 break; 942 case 128: 943 req_cvolset.stripe = 5; 944 break; 945 default: 946 req_cvolset.stripe = 4; /* by default 64K */ 947 break; 948 } 949 950 req_cvolset.scsi_chan = bc->bc_channel; 951 req_cvolset.scsi_target = bc->bc_target; 952 req_cvolset.scsi_lun = bc->bc_lun; 953 req_cvolset.tagqueue = 1; /* always enabled */ 954 req_cvolset.cache = 1; /* always enabled */ 955 req_cvolset.speed = 4; /* always max speed */ 956 957 /* RAID 1 and 1+0 levels need foreground initialization */ 958 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10) 959 req_cvolset.quick_init = 1; /* foreground init */ 960 961 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset), 962 reply, sizeof(reply)); 963 if (error != 0) 964 return error; 965 966 error = arc_fw_parse_status_code(sc, &reply[0]); 967 if (error) { 968 printf("%s: create volumeset%d failed\n", 969 device_xname(sc->sc_dev), bc->bc_volid); 970 return error; 971 } 972 973 /* 974 * If we are creating a RAID 1 or RAID 1+0 volume, 975 * the volume will be created immediately but it won't 976 * be available until the initialization is done... so 977 * don't bother attaching the sd(4) device. 978 */ 979 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10) 980 break; 981 982 /* 983 * Do a rescan on the bus to attach the device associated 984 * with the new volume. 985 */ 986 scsibus_sc = device_private(sc->sc_scsibus_dv); 987 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun); 988 989 break; 990 } 991 case BIOC_VREMOVE_VOLUME: 992 { 993 /* 994 * Remove the volume set specified in bc_volid. 995 */ 996 request[0] = ARC_FW_DELETE_VOLUME; 997 request[1] = bc->bc_volid; 998 error = arc_msgbuf(sc, request, sizeof(request), 999 reply, sizeof(reply)); 1000 if (error != 0) 1001 return error; 1002 1003 error = arc_fw_parse_status_code(sc, &reply[0]); 1004 if (error) { 1005 printf("%s: delete volumeset%d failed\n", 1006 device_xname(sc->sc_dev), bc->bc_volid); 1007 return error; 1008 } 1009 1010 /* 1011 * Detach the sd(4) device associated with the volume, 1012 * but if there's an error don't make it a priority. 1013 */ 1014 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target, 1015 bc->bc_lun, 0); 1016 if (error) 1017 printf("%s: couldn't detach sd device for volume %d " 1018 "at %u:%u.%u (error=%d)\n", 1019 device_xname(sc->sc_dev), bc->bc_volid, 1020 bc->bc_channel, bc->bc_target, bc->bc_lun, error); 1021 1022 /* 1023 * and remove the raid set specified in bc_volid, 1024 * we only care about volumes. 1025 */ 1026 request[0] = ARC_FW_DELETE_RAIDSET; 1027 request[1] = bc->bc_volid; 1028 error = arc_msgbuf(sc, request, sizeof(request), 1029 reply, sizeof(reply)); 1030 if (error != 0) 1031 return error; 1032 1033 error = arc_fw_parse_status_code(sc, &reply[0]); 1034 if (error) { 1035 printf("%s: delete raidset%d failed\n", 1036 device_xname(sc->sc_dev), bc->bc_volid); 1037 return error; 1038 } 1039 1040 break; 1041 } 1042 default: 1043 return EOPNOTSUPP; 1044 } 1045 1046 return error; 1047 } 1048 1049 static int 1050 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs) 1051 { 1052 /* for a hotspare disk */ 1053 struct request_hs { 1054 uint8_t cmdcode; 1055 uint32_t devmask; 1056 } __packed; 1057 1058 /* for a pass-through disk */ 1059 struct request_pt { 1060 uint8_t cmdcode; 1061 uint8_t devid; 1062 uint8_t scsi_chan; 1063 uint8_t scsi_id; 1064 uint8_t scsi_lun; 1065 uint8_t tagged_queue; 1066 uint8_t cache_mode; 1067 uint8_t max_speed; 1068 } __packed; 1069 1070 struct scsibus_softc *scsibus_sc = NULL; 1071 struct request_hs req_hs; /* to add/remove hotspare */ 1072 struct request_pt req_pt; /* to add a pass-through */ 1073 uint8_t req_gen[2]; 1074 uint8_t reply[1]; 1075 int error = 0; 1076 1077 switch (bs->bs_status) { 1078 case BIOC_SSHOTSPARE: 1079 { 1080 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE; 1081 req_hs.devmask = (1 << bs->bs_target); 1082 goto hotspare; 1083 } 1084 case BIOC_SSDELHOTSPARE: 1085 { 1086 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE; 1087 req_hs.devmask = (1 << bs->bs_target); 1088 goto hotspare; 1089 } 1090 case BIOC_SSPASSTHRU: 1091 { 1092 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU; 1093 req_pt.devid = bs->bs_other_id; /* this wants device# */ 1094 req_pt.scsi_chan = bs->bs_channel; 1095 req_pt.scsi_id = bs->bs_target; 1096 req_pt.scsi_lun = bs->bs_lun; 1097 req_pt.tagged_queue = 1; /* always enabled */ 1098 req_pt.cache_mode = 1; /* always enabled */ 1099 req_pt.max_speed = 4; /* always max speed */ 1100 1101 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt), 1102 reply, sizeof(reply)); 1103 if (error != 0) 1104 return error; 1105 1106 /* 1107 * Do a rescan on the bus to attach the new device 1108 * associated with the pass-through disk. 1109 */ 1110 scsibus_sc = device_private(sc->sc_scsibus_dv); 1111 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun); 1112 1113 goto out; 1114 } 1115 case BIOC_SSDELPASSTHRU: 1116 { 1117 req_gen[0] = ARC_FW_DELETE_PASSTHRU; 1118 req_gen[1] = bs->bs_target; 1119 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen), 1120 reply, sizeof(reply)); 1121 if (error != 0) 1122 return error; 1123 1124 /* 1125 * Detach the sd device associated with this pass-through disk. 1126 */ 1127 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target, 1128 bs->bs_lun, 0); 1129 if (error) 1130 printf("%s: couldn't detach sd device for the " 1131 "pass-through disk at %u:%u.%u (error=%d)\n", 1132 device_xname(sc->sc_dev), 1133 bs->bs_channel, bs->bs_target, bs->bs_lun, error); 1134 1135 goto out; 1136 } 1137 case BIOC_SSCHECKSTART_VOL: 1138 { 1139 req_gen[0] = ARC_FW_START_CHECKVOL; 1140 req_gen[1] = bs->bs_volid; 1141 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen), 1142 reply, sizeof(reply)); 1143 if (error != 0) 1144 return error; 1145 1146 goto out; 1147 } 1148 case BIOC_SSCHECKSTOP_VOL: 1149 { 1150 uint8_t req = ARC_FW_STOP_CHECKVOL; 1151 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply)); 1152 if (error != 0) 1153 return error; 1154 1155 goto out; 1156 } 1157 default: 1158 return EOPNOTSUPP; 1159 } 1160 1161 hotspare: 1162 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs), 1163 reply, sizeof(reply)); 1164 if (error != 0) 1165 return error; 1166 1167 out: 1168 return arc_fw_parse_status_code(sc, &reply[0]); 1169 } 1170 1171 static int 1172 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi) 1173 { 1174 uint8_t request[2]; 1175 struct arc_fw_sysinfo *sysinfo = NULL; 1176 struct arc_fw_raidinfo *raidinfo; 1177 int nvols = 0, i; 1178 int error = 0; 1179 1180 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP); 1181 1182 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) { 1183 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP); 1184 1185 request[0] = ARC_FW_SYSINFO; 1186 error = arc_msgbuf(sc, request, 1, sysinfo, 1187 sizeof(struct arc_fw_sysinfo)); 1188 if (error != 0) 1189 goto out; 1190 1191 sc->sc_maxraidset = sysinfo->max_raid_set; 1192 sc->sc_maxvolset = sysinfo->max_volume_set; 1193 sc->sc_cchans = sysinfo->ide_channels; 1194 } 1195 1196 request[0] = ARC_FW_RAIDINFO; 1197 for (i = 0; i < sc->sc_maxraidset; i++) { 1198 request[1] = i; 1199 error = arc_msgbuf(sc, request, sizeof(request), raidinfo, 1200 sizeof(struct arc_fw_raidinfo)); 1201 if (error != 0) 1202 goto out; 1203 1204 nvols += raidinfo->volumes; 1205 } 1206 1207 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev)); 1208 bi->bi_novol = nvols; 1209 bi->bi_nodisk = sc->sc_cchans; 1210 1211 out: 1212 if (sysinfo) 1213 kmem_free(sysinfo, sizeof(*sysinfo)); 1214 kmem_free(raidinfo, sizeof(*raidinfo)); 1215 return error; 1216 } 1217 1218 static int 1219 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo) 1220 { 1221 uint8_t request[2]; 1222 int error = 0; 1223 int nvols = 0, i; 1224 1225 request[0] = ARC_FW_VOLINFO; 1226 for (i = 0; i < sc->sc_maxvolset; i++) { 1227 request[1] = i; 1228 error = arc_msgbuf(sc, request, sizeof(request), volinfo, 1229 sizeof(struct arc_fw_volinfo)); 1230 if (error != 0) 1231 goto out; 1232 1233 if (volinfo->capacity == 0 && volinfo->capacity2 == 0) 1234 continue; 1235 1236 if (nvols == vol) 1237 break; 1238 1239 nvols++; 1240 } 1241 1242 if (nvols != vol || 1243 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) { 1244 error = ENODEV; 1245 goto out; 1246 } 1247 1248 out: 1249 return error; 1250 } 1251 1252 static int 1253 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv) 1254 { 1255 struct arc_fw_volinfo *volinfo; 1256 uint64_t blocks; 1257 uint32_t status; 1258 int error = 0; 1259 1260 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP); 1261 1262 error = arc_bio_getvol(sc, bv->bv_volid, volinfo); 1263 if (error != 0) 1264 goto out; 1265 1266 bv->bv_percent = -1; 1267 bv->bv_seconds = 0; 1268 1269 status = htole32(volinfo->volume_status); 1270 if (status == 0x0) { 1271 if (htole32(volinfo->fail_mask) == 0x0) 1272 bv->bv_status = BIOC_SVONLINE; 1273 else 1274 bv->bv_status = BIOC_SVDEGRADED; 1275 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) { 1276 bv->bv_status = BIOC_SVDEGRADED; 1277 } else if (status & ARC_FW_VOL_STATUS_FAILED) { 1278 bv->bv_status = BIOC_SVOFFLINE; 1279 } else if (status & ARC_FW_VOL_STATUS_INITTING) { 1280 bv->bv_status = BIOC_SVBUILDING; 1281 bv->bv_percent = htole32(volinfo->progress); 1282 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) { 1283 bv->bv_status = BIOC_SVREBUILD; 1284 bv->bv_percent = htole32(volinfo->progress); 1285 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) { 1286 bv->bv_status = BIOC_SVMIGRATING; 1287 bv->bv_percent = htole32(volinfo->progress); 1288 } else if (status & ARC_FW_VOL_STATUS_CHECKING) { 1289 bv->bv_status = BIOC_SVCHECKING; 1290 bv->bv_percent = htole32(volinfo->progress); 1291 } else if (status & ARC_FW_VOL_STATUS_NEED_INIT) { 1292 bv->bv_status = BIOC_SVOFFLINE; 1293 } else { 1294 printf("%s: volume %d status 0x%x\n", 1295 device_xname(sc->sc_dev), bv->bv_volid, status); 1296 } 1297 1298 blocks = (uint64_t)htole32(volinfo->capacity2) << 32; 1299 blocks += (uint64_t)htole32(volinfo->capacity); 1300 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */ 1301 1302 switch (volinfo->raid_level) { 1303 case ARC_FW_VOL_RAIDLEVEL_0: 1304 bv->bv_level = 0; 1305 break; 1306 case ARC_FW_VOL_RAIDLEVEL_1: 1307 if (volinfo->member_disks > 2) 1308 bv->bv_level = BIOC_SVOL_RAID10; 1309 else 1310 bv->bv_level = 1; 1311 break; 1312 case ARC_FW_VOL_RAIDLEVEL_3: 1313 bv->bv_level = 3; 1314 break; 1315 case ARC_FW_VOL_RAIDLEVEL_5: 1316 bv->bv_level = 5; 1317 break; 1318 case ARC_FW_VOL_RAIDLEVEL_6: 1319 bv->bv_level = 6; 1320 break; 1321 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU: 1322 bv->bv_level = BIOC_SVOL_PASSTHRU; 1323 break; 1324 default: 1325 bv->bv_level = -1; 1326 break; 1327 } 1328 1329 bv->bv_nodisk = volinfo->member_disks; 1330 bv->bv_stripe_size = volinfo->stripe_size / 2; 1331 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid); 1332 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name, 1333 sizeof(volinfo->set_name)); 1334 1335 out: 1336 kmem_free(volinfo, sizeof(*volinfo)); 1337 return error; 1338 } 1339 1340 static int 1341 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd) 1342 { 1343 struct arc_fw_diskinfo *diskinfo; 1344 uint8_t request[2]; 1345 int error = 0; 1346 1347 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP); 1348 1349 if (bd->bd_diskid >= sc->sc_cchans) { 1350 error = ENODEV; 1351 goto out; 1352 } 1353 1354 request[0] = ARC_FW_DISKINFO; 1355 request[1] = bd->bd_diskid; 1356 error = arc_msgbuf(sc, request, sizeof(request), 1357 diskinfo, sizeof(struct arc_fw_diskinfo)); 1358 if (error != 0) 1359 goto out; 1360 1361 /* skip disks with no capacity */ 1362 if (htole32(diskinfo->capacity) == 0 && 1363 htole32(diskinfo->capacity2) == 0) 1364 goto out; 1365 1366 bd->bd_disknovol = true; 1367 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid); 1368 1369 out: 1370 kmem_free(diskinfo, sizeof(*diskinfo)); 1371 return error; 1372 } 1373 1374 static void 1375 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd, 1376 struct arc_fw_diskinfo *diskinfo, int diskid) 1377 { 1378 uint64_t blocks; 1379 char model[81]; 1380 char serial[41]; 1381 char rev[17]; 1382 1383 /* Ignore bit zero for now, we don't know what it means */ 1384 diskinfo->device_state &= ~0x1; 1385 1386 switch (diskinfo->device_state) { 1387 case ARC_FW_DISK_FAILED: 1388 bd->bd_status = BIOC_SDFAILED; 1389 break; 1390 case ARC_FW_DISK_PASSTHRU: 1391 bd->bd_status = BIOC_SDPASSTHRU; 1392 break; 1393 case ARC_FW_DISK_NORMAL: 1394 bd->bd_status = BIOC_SDONLINE; 1395 break; 1396 case ARC_FW_DISK_HOTSPARE: 1397 bd->bd_status = BIOC_SDHOTSPARE; 1398 break; 1399 case ARC_FW_DISK_UNUSED: 1400 bd->bd_status = BIOC_SDUNUSED; 1401 break; 1402 case 0: 1403 /* disk has been disconnected */ 1404 bd->bd_status = BIOC_SDOFFLINE; 1405 bd->bd_channel = 1; 1406 bd->bd_target = 0; 1407 bd->bd_lun = 0; 1408 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor)); 1409 break; 1410 default: 1411 printf("%s: unknown disk device_state: 0x%x\n", __func__, 1412 diskinfo->device_state); 1413 bd->bd_status = BIOC_SDINVALID; 1414 return; 1415 } 1416 1417 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32; 1418 blocks += (uint64_t)htole32(diskinfo->capacity); 1419 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */ 1420 1421 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model)); 1422 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial)); 1423 scsipi_strvis(rev, 17, diskinfo->firmware_rev, 1424 sizeof(diskinfo->firmware_rev)); 1425 1426 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev); 1427 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial)); 1428 1429 #if 0 1430 bd->bd_channel = diskinfo->scsi_attr.channel; 1431 bd->bd_target = diskinfo->scsi_attr.target; 1432 bd->bd_lun = diskinfo->scsi_attr.lun; 1433 #endif 1434 1435 /* 1436 * the firwmare doesnt seem to fill scsi_attr in, so fake it with 1437 * the diskid. 1438 */ 1439 bd->bd_channel = 0; 1440 bd->bd_target = diskid; 1441 bd->bd_lun = 0; 1442 } 1443 1444 static int 1445 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd) 1446 { 1447 struct arc_fw_raidinfo *raidinfo; 1448 struct arc_fw_volinfo *volinfo; 1449 struct arc_fw_diskinfo *diskinfo; 1450 uint8_t request[2]; 1451 int error = 0; 1452 1453 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP); 1454 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP); 1455 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP); 1456 1457 error = arc_bio_getvol(sc, bd->bd_volid, volinfo); 1458 if (error != 0) 1459 goto out; 1460 1461 request[0] = ARC_FW_RAIDINFO; 1462 request[1] = volinfo->raid_set_number; 1463 1464 error = arc_msgbuf(sc, request, sizeof(request), raidinfo, 1465 sizeof(struct arc_fw_raidinfo)); 1466 if (error != 0) 1467 goto out; 1468 1469 if (bd->bd_diskid >= sc->sc_cchans || 1470 bd->bd_diskid >= raidinfo->member_devices) { 1471 error = ENODEV; 1472 goto out; 1473 } 1474 1475 if (raidinfo->device_array[bd->bd_diskid] == 0xff) { 1476 /* 1477 * The disk has been disconnected, mark it offline 1478 * and put it on another bus. 1479 */ 1480 bd->bd_channel = 1; 1481 bd->bd_target = 0; 1482 bd->bd_lun = 0; 1483 bd->bd_status = BIOC_SDOFFLINE; 1484 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor)); 1485 goto out; 1486 } 1487 1488 request[0] = ARC_FW_DISKINFO; 1489 request[1] = raidinfo->device_array[bd->bd_diskid]; 1490 error = arc_msgbuf(sc, request, sizeof(request), diskinfo, 1491 sizeof(struct arc_fw_diskinfo)); 1492 if (error != 0) 1493 goto out; 1494 1495 /* now fill our bio disk with data from the firmware */ 1496 arc_bio_disk_filldata(sc, bd, diskinfo, 1497 raidinfo->device_array[bd->bd_diskid]); 1498 1499 out: 1500 kmem_free(raidinfo, sizeof(*raidinfo)); 1501 kmem_free(volinfo, sizeof(*volinfo)); 1502 kmem_free(diskinfo, sizeof(*diskinfo)); 1503 return error; 1504 } 1505 #endif /* NBIO > 0 */ 1506 1507 uint8_t 1508 arc_msg_cksum(void *cmd, uint16_t len) 1509 { 1510 uint8_t *buf = cmd; 1511 uint8_t cksum; 1512 int i; 1513 1514 cksum = (uint8_t)(len >> 8) + (uint8_t)len; 1515 for (i = 0; i < len; i++) 1516 cksum += buf[i]; 1517 1518 return cksum; 1519 } 1520 1521 1522 int 1523 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr, 1524 size_t rbuflen) 1525 { 1526 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN]; 1527 uint8_t *wbuf, *rbuf; 1528 int wlen, wdone = 0, rlen, rdone = 0; 1529 struct arc_fw_bufhdr *bufhdr; 1530 uint32_t reg, rwlen; 1531 int error = 0; 1532 #ifdef ARC_DEBUG 1533 int i; 1534 #endif 1535 1536 wbuf = rbuf = NULL; 1537 1538 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n", 1539 device_xname(sc->sc_dev), wbuflen, rbuflen); 1540 1541 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */ 1542 wbuf = kmem_alloc(wlen, KM_SLEEP); 1543 1544 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */ 1545 rbuf = kmem_alloc(rlen, KM_SLEEP); 1546 1547 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n", 1548 device_xname(sc->sc_dev), wlen, rlen); 1549 1550 bufhdr = (struct arc_fw_bufhdr *)wbuf; 1551 bufhdr->hdr = arc_fw_hdr; 1552 bufhdr->len = htole16(wbuflen); 1553 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen); 1554 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen); 1555 1556 arc_lock(sc); 1557 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) { 1558 error = EBUSY; 1559 goto out; 1560 } 1561 1562 reg = ARC_REG_OUTB_DOORBELL_READ_OK; 1563 1564 do { 1565 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) { 1566 memset(rwbuf, 0, sizeof(rwbuf)); 1567 rwlen = (wlen - wdone) % sizeof(rwbuf); 1568 memcpy(rwbuf, &wbuf[wdone], rwlen); 1569 1570 #ifdef ARC_DEBUG 1571 if (arcdebug & ARC_D_DB) { 1572 printf("%s: write %d:", 1573 device_xname(sc->sc_dev), rwlen); 1574 for (i = 0; i < rwlen; i++) 1575 printf(" 0x%02x", rwbuf[i]); 1576 printf("\n"); 1577 } 1578 #endif 1579 1580 /* copy the chunk to the hw */ 1581 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen); 1582 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf, 1583 sizeof(rwbuf)); 1584 1585 /* say we have a buffer for the hw */ 1586 arc_write(sc, ARC_REG_INB_DOORBELL, 1587 ARC_REG_INB_DOORBELL_WRITE_OK); 1588 1589 wdone += rwlen; 1590 } 1591 1592 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0) 1593 arc_wait(sc); 1594 1595 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg); 1596 1597 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n", 1598 device_xname(sc->sc_dev), reg); 1599 1600 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) { 1601 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN); 1602 if (rwlen > sizeof(rwbuf)) { 1603 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n", 1604 device_xname(sc->sc_dev)); 1605 error = EIO; 1606 goto out; 1607 } 1608 1609 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf, 1610 sizeof(rwbuf)); 1611 1612 arc_write(sc, ARC_REG_INB_DOORBELL, 1613 ARC_REG_INB_DOORBELL_READ_OK); 1614 1615 #ifdef ARC_DEBUG 1616 printf("%s: len: %d+%d=%d/%d\n", 1617 device_xname(sc->sc_dev), 1618 rwlen, rdone, rwlen + rdone, rlen); 1619 if (arcdebug & ARC_D_DB) { 1620 printf("%s: read:", 1621 device_xname(sc->sc_dev)); 1622 for (i = 0; i < rwlen; i++) 1623 printf(" 0x%02x", rwbuf[i]); 1624 printf("\n"); 1625 } 1626 #endif 1627 1628 if ((rdone + rwlen) > rlen) { 1629 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n", 1630 device_xname(sc->sc_dev)); 1631 error = EIO; 1632 goto out; 1633 } 1634 1635 memcpy(&rbuf[rdone], rwbuf, rwlen); 1636 rdone += rwlen; 1637 } 1638 } while (rdone != rlen); 1639 1640 bufhdr = (struct arc_fw_bufhdr *)rbuf; 1641 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 || 1642 bufhdr->len != htole16(rbuflen)) { 1643 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n", 1644 device_xname(sc->sc_dev)); 1645 error = EIO; 1646 goto out; 1647 } 1648 1649 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen); 1650 1651 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) { 1652 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n", 1653 device_xname(sc->sc_dev)); 1654 error = EIO; 1655 goto out; 1656 } 1657 1658 out: 1659 arc_unlock(sc); 1660 kmem_free(wbuf, wlen); 1661 kmem_free(rbuf, rlen); 1662 1663 return error; 1664 } 1665 1666 void 1667 arc_lock(struct arc_softc *sc) 1668 { 1669 rw_enter(&sc->sc_rwlock, RW_WRITER); 1670 mutex_spin_enter(&sc->sc_mutex); 1671 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE); 1672 sc->sc_talking = 1; 1673 } 1674 1675 void 1676 arc_unlock(struct arc_softc *sc) 1677 { 1678 KASSERT(mutex_owned(&sc->sc_mutex)); 1679 1680 arc_write(sc, ARC_REG_INTRMASK, 1681 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL)); 1682 sc->sc_talking = 0; 1683 mutex_spin_exit(&sc->sc_mutex); 1684 rw_exit(&sc->sc_rwlock); 1685 } 1686 1687 void 1688 arc_wait(struct arc_softc *sc) 1689 { 1690 KASSERT(mutex_owned(&sc->sc_mutex)); 1691 1692 arc_write(sc, ARC_REG_INTRMASK, 1693 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL)); 1694 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK) 1695 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE); 1696 } 1697 1698 #if NBIO > 0 1699 static void 1700 arc_create_sensors(void *arg) 1701 { 1702 struct arc_softc *sc = arg; 1703 struct bioc_inq bi; 1704 struct bioc_vol bv; 1705 int i, j; 1706 size_t slen, count = 0; 1707 1708 memset(&bi, 0, sizeof(bi)); 1709 if (arc_bio_inq(sc, &bi) != 0) { 1710 aprint_error("%s: unable to query firmware for sensor info\n", 1711 device_xname(sc->sc_dev)); 1712 kthread_exit(0); 1713 } 1714 1715 /* There's no point to continue if there are no volumes */ 1716 if (!bi.bi_novol) 1717 kthread_exit(0); 1718 1719 for (i = 0; i < bi.bi_novol; i++) { 1720 memset(&bv, 0, sizeof(bv)); 1721 bv.bv_volid = i; 1722 if (arc_bio_vol(sc, &bv) != 0) 1723 kthread_exit(0); 1724 1725 /* Skip passthrough volumes */ 1726 if (bv.bv_level == BIOC_SVOL_PASSTHRU) 1727 continue; 1728 1729 /* new volume found */ 1730 sc->sc_nsensors++; 1731 /* new disk in a volume found */ 1732 sc->sc_nsensors+= bv.bv_nodisk; 1733 } 1734 1735 /* No valid volumes */ 1736 if (!sc->sc_nsensors) 1737 kthread_exit(0); 1738 1739 sc->sc_sme = sysmon_envsys_create(); 1740 slen = sizeof(envsys_data_t) * sc->sc_nsensors; 1741 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP); 1742 1743 /* Attach sensors for volumes and disks */ 1744 for (i = 0; i < bi.bi_novol; i++) { 1745 memset(&bv, 0, sizeof(bv)); 1746 bv.bv_volid = i; 1747 if (arc_bio_vol(sc, &bv) != 0) 1748 goto bad; 1749 1750 sc->sc_sensors[count].units = ENVSYS_DRIVE; 1751 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED; 1752 1753 /* Skip passthrough volumes */ 1754 if (bv.bv_level == BIOC_SVOL_PASSTHRU) 1755 continue; 1756 1757 if (bv.bv_level == BIOC_SVOL_RAID10) 1758 snprintf(sc->sc_sensors[count].desc, 1759 sizeof(sc->sc_sensors[count].desc), 1760 "RAID 1+0 volume%d (%s)", i, bv.bv_dev); 1761 else 1762 snprintf(sc->sc_sensors[count].desc, 1763 sizeof(sc->sc_sensors[count].desc), 1764 "RAID %d volume%d (%s)", bv.bv_level, i, 1765 bv.bv_dev); 1766 1767 sc->sc_sensors[count].value_max = i; 1768 1769 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1770 &sc->sc_sensors[count])) 1771 goto bad; 1772 1773 count++; 1774 1775 /* Attach disk sensors for this volume */ 1776 for (j = 0; j < bv.bv_nodisk; j++) { 1777 sc->sc_sensors[count].units = ENVSYS_DRIVE; 1778 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED; 1779 1780 snprintf(sc->sc_sensors[count].desc, 1781 sizeof(sc->sc_sensors[count].desc), 1782 "disk%d volume%d (%s)", j, i, bv.bv_dev); 1783 sc->sc_sensors[count].value_max = i; 1784 sc->sc_sensors[count].value_avg = j + 10; 1785 1786 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1787 &sc->sc_sensors[count])) 1788 goto bad; 1789 1790 count++; 1791 } 1792 } 1793 1794 /* 1795 * Register our envsys driver with the framework now that the 1796 * sensors were all attached. 1797 */ 1798 sc->sc_sme->sme_name = device_xname(sc->sc_dev); 1799 sc->sc_sme->sme_cookie = sc; 1800 sc->sc_sme->sme_refresh = arc_refresh_sensors; 1801 1802 if (sysmon_envsys_register(sc->sc_sme)) { 1803 aprint_debug("%s: unable to register with sysmon\n", 1804 device_xname(sc->sc_dev)); 1805 goto bad; 1806 } 1807 kthread_exit(0); 1808 1809 bad: 1810 sysmon_envsys_destroy(sc->sc_sme); 1811 kmem_free(sc->sc_sensors, slen); 1812 1813 sc->sc_sme = NULL; 1814 sc->sc_sensors = NULL; 1815 1816 kthread_exit(0); 1817 } 1818 1819 static void 1820 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata) 1821 { 1822 struct arc_softc *sc = sme->sme_cookie; 1823 struct bioc_vol bv; 1824 struct bioc_disk bd; 1825 1826 /* sanity check */ 1827 if (edata->units != ENVSYS_DRIVE) 1828 return; 1829 1830 memset(&bv, 0, sizeof(bv)); 1831 bv.bv_volid = edata->value_max; 1832 1833 if (arc_bio_vol(sc, &bv)) { 1834 edata->value_cur = ENVSYS_DRIVE_EMPTY; 1835 edata->state = ENVSYS_SINVALID; 1836 return; 1837 } 1838 1839 /* Current sensor is handling a disk volume member */ 1840 if (edata->value_avg) { 1841 memset(&bd, 0, sizeof(bd)); 1842 bd.bd_volid = edata->value_max; 1843 bd.bd_diskid = edata->value_avg - 10; 1844 1845 if (arc_bio_disk_volume(sc, &bd)) { 1846 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1847 edata->state = ENVSYS_SCRITICAL; 1848 return; 1849 } 1850 1851 switch (bd.bd_status) { 1852 case BIOC_SDONLINE: 1853 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1854 edata->state = ENVSYS_SVALID; 1855 break; 1856 case BIOC_SDOFFLINE: 1857 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1858 edata->state = ENVSYS_SCRITICAL; 1859 break; 1860 default: 1861 edata->value_cur = ENVSYS_DRIVE_FAIL; 1862 edata->state = ENVSYS_SCRITICAL; 1863 break; 1864 } 1865 1866 return; 1867 } 1868 1869 /* Current sensor is handling a volume */ 1870 switch (bv.bv_status) { 1871 case BIOC_SVOFFLINE: 1872 edata->value_cur = ENVSYS_DRIVE_OFFLINE; 1873 edata->state = ENVSYS_SCRITICAL; 1874 break; 1875 case BIOC_SVDEGRADED: 1876 edata->value_cur = ENVSYS_DRIVE_PFAIL; 1877 edata->state = ENVSYS_SCRITICAL; 1878 break; 1879 case BIOC_SVBUILDING: 1880 edata->value_cur = ENVSYS_DRIVE_BUILD; 1881 edata->state = ENVSYS_SVALID; 1882 break; 1883 case BIOC_SVMIGRATING: 1884 edata->value_cur = ENVSYS_DRIVE_MIGRATING; 1885 edata->state = ENVSYS_SVALID; 1886 break; 1887 case BIOC_SVCHECKING: 1888 edata->value_cur = ENVSYS_DRIVE_CHECK; 1889 edata->state = ENVSYS_SVALID; 1890 break; 1891 case BIOC_SVREBUILD: 1892 edata->value_cur = ENVSYS_DRIVE_REBUILD; 1893 edata->state = ENVSYS_SCRITICAL; 1894 break; 1895 case BIOC_SVSCRUB: 1896 case BIOC_SVONLINE: 1897 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1898 edata->state = ENVSYS_SVALID; 1899 break; 1900 case BIOC_SVINVALID: 1901 /* FALLTHROUGH */ 1902 default: 1903 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */ 1904 edata->state = ENVSYS_SINVALID; 1905 break; 1906 } 1907 } 1908 #endif /* NBIO > 0 */ 1909 1910 uint32_t 1911 arc_read(struct arc_softc *sc, bus_size_t r) 1912 { 1913 uint32_t v; 1914 1915 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1916 BUS_SPACE_BARRIER_READ); 1917 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1918 1919 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n", 1920 device_xname(sc->sc_dev), r, v); 1921 1922 return v; 1923 } 1924 1925 void 1926 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len) 1927 { 1928 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len, 1929 BUS_SPACE_BARRIER_READ); 1930 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r, 1931 (uint32_t *)buf, len >> 2); 1932 } 1933 1934 void 1935 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v) 1936 { 1937 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n", 1938 device_xname(sc->sc_dev), r, v); 1939 1940 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1941 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1942 BUS_SPACE_BARRIER_WRITE); 1943 } 1944 1945 void 1946 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len) 1947 { 1948 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r, 1949 (const uint32_t *)buf, len >> 2); 1950 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len, 1951 BUS_SPACE_BARRIER_WRITE); 1952 } 1953 1954 int 1955 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask, 1956 uint32_t target) 1957 { 1958 int i; 1959 1960 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n", 1961 device_xname(sc->sc_dev), r, mask, target); 1962 1963 for (i = 0; i < 10000; i++) { 1964 if ((arc_read(sc, r) & mask) == target) 1965 return 0; 1966 delay(1000); 1967 } 1968 1969 return 1; 1970 } 1971 1972 int 1973 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask, 1974 uint32_t target) 1975 { 1976 int i; 1977 1978 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n", 1979 device_xname(sc->sc_dev), r, mask, target); 1980 1981 for (i = 0; i < 10000; i++) { 1982 if ((arc_read(sc, r) & mask) != target) 1983 return 0; 1984 delay(1000); 1985 } 1986 1987 return 1; 1988 } 1989 1990 int 1991 arc_msg0(struct arc_softc *sc, uint32_t m) 1992 { 1993 /* post message */ 1994 arc_write(sc, ARC_REG_INB_MSG0, m); 1995 /* wait for the fw to do it */ 1996 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0, 1997 ARC_REG_INTRSTAT_MSG0) != 0) 1998 return 1; 1999 2000 /* ack it */ 2001 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0); 2002 2003 return 0; 2004 } 2005 2006 struct arc_dmamem * 2007 arc_dmamem_alloc(struct arc_softc *sc, size_t size) 2008 { 2009 struct arc_dmamem *adm; 2010 int nsegs; 2011 2012 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP); 2013 if (adm == NULL) 2014 return NULL; 2015 2016 adm->adm_size = size; 2017 2018 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2019 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0) 2020 goto admfree; 2021 2022 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg, 2023 1, &nsegs, BUS_DMA_NOWAIT) != 0) 2024 goto destroy; 2025 2026 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size, 2027 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0) 2028 goto free; 2029 2030 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size, 2031 NULL, BUS_DMA_NOWAIT) != 0) 2032 goto unmap; 2033 2034 memset(adm->adm_kva, 0, size); 2035 2036 return adm; 2037 2038 unmap: 2039 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size); 2040 free: 2041 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1); 2042 destroy: 2043 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map); 2044 admfree: 2045 kmem_free(adm, sizeof(*adm)); 2046 2047 return NULL; 2048 } 2049 2050 void 2051 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm) 2052 { 2053 bus_dmamap_unload(sc->sc_dmat, adm->adm_map); 2054 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size); 2055 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1); 2056 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map); 2057 kmem_free(adm, sizeof(*adm)); 2058 } 2059 2060 int 2061 arc_alloc_ccbs(device_t self) 2062 { 2063 struct arc_softc *sc = device_private(self); 2064 struct arc_ccb *ccb; 2065 uint8_t *cmd; 2066 int i; 2067 size_t ccbslen; 2068 2069 TAILQ_INIT(&sc->sc_ccb_free); 2070 2071 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count; 2072 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP); 2073 2074 sc->sc_requests = arc_dmamem_alloc(sc, 2075 ARC_MAX_IOCMDLEN * sc->sc_req_count); 2076 if (sc->sc_requests == NULL) { 2077 aprint_error_dev(self, "unable to allocate ccb dmamem\n"); 2078 goto free_ccbs; 2079 } 2080 cmd = ARC_DMA_KVA(sc->sc_requests); 2081 2082 for (i = 0; i < sc->sc_req_count; i++) { 2083 ccb = &sc->sc_ccbs[i]; 2084 2085 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN, 2086 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) { 2087 aprint_error_dev(self, 2088 "unable to create dmamap for ccb %d\n", i); 2089 goto free_maps; 2090 } 2091 2092 ccb->ccb_sc = sc; 2093 ccb->ccb_id = i; 2094 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i; 2095 2096 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset]; 2097 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) + 2098 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT; 2099 2100 arc_put_ccb(sc, ccb); 2101 } 2102 2103 return 0; 2104 2105 free_maps: 2106 while ((ccb = arc_get_ccb(sc)) != NULL) 2107 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2108 arc_dmamem_free(sc, sc->sc_requests); 2109 2110 free_ccbs: 2111 kmem_free(sc->sc_ccbs, ccbslen); 2112 2113 return 1; 2114 } 2115 2116 struct arc_ccb * 2117 arc_get_ccb(struct arc_softc *sc) 2118 { 2119 struct arc_ccb *ccb; 2120 2121 ccb = TAILQ_FIRST(&sc->sc_ccb_free); 2122 if (ccb != NULL) 2123 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link); 2124 2125 return ccb; 2126 } 2127 2128 void 2129 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb) 2130 { 2131 ccb->ccb_xs = NULL; 2132 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN); 2133 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link); 2134 } 2135