1 /* $NetBSD: cac.c,v 1.56 2016/07/07 06:55:41 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Driver for Compaq array controllers. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: cac.c,v 1.56 2016/07/07 06:55:41 msaitoh Exp $"); 38 39 #include "bio.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/device.h> 45 #include <sys/queue.h> 46 #include <sys/proc.h> 47 #include <sys/buf.h> 48 #include <sys/endian.h> 49 #include <sys/malloc.h> 50 #include <sys/pool.h> 51 52 #include <sys/bswap.h> 53 #include <sys/bus.h> 54 55 #include <dev/ic/cacreg.h> 56 #include <dev/ic/cacvar.h> 57 58 #if NBIO > 0 59 #include <dev/biovar.h> 60 #endif /* NBIO > 0 */ 61 62 #include "locators.h" 63 64 static struct cac_ccb *cac_ccb_alloc(struct cac_softc *, int); 65 static void cac_ccb_done(struct cac_softc *, struct cac_ccb *); 66 static void cac_ccb_free(struct cac_softc *, struct cac_ccb *); 67 static int cac_ccb_poll(struct cac_softc *, struct cac_ccb *, int); 68 static int cac_ccb_start(struct cac_softc *, struct cac_ccb *); 69 static int cac_print(void *, const char *); 70 static void cac_shutdown(void *); 71 72 static struct cac_ccb *cac_l0_completed(struct cac_softc *); 73 static int cac_l0_fifo_full(struct cac_softc *); 74 static void cac_l0_intr_enable(struct cac_softc *, int); 75 static int cac_l0_intr_pending(struct cac_softc *); 76 static void cac_l0_submit(struct cac_softc *, struct cac_ccb *); 77 78 static void *cac_sdh; /* shutdown hook */ 79 80 #if NBIO > 0 81 int cac_ioctl(device_t, u_long, void *); 82 int cac_ioctl_vol(struct cac_softc *, struct bioc_vol *); 83 int cac_create_sensors(struct cac_softc *); 84 void cac_sensor_refresh(struct sysmon_envsys *, envsys_data_t *); 85 #endif /* NBIO > 0 */ 86 87 const struct cac_linkage cac_l0 = { 88 cac_l0_completed, 89 cac_l0_fifo_full, 90 cac_l0_intr_enable, 91 cac_l0_intr_pending, 92 cac_l0_submit 93 }; 94 95 /* 96 * Initialise our interface to the controller. 97 */ 98 int 99 cac_init(struct cac_softc *sc, const char *intrstr, int startfw) 100 { 101 struct cac_controller_info cinfo; 102 struct cac_attach_args caca; 103 int error, rseg, size, i; 104 bus_dma_segment_t seg; 105 struct cac_ccb *ccb; 106 int locs[CACCF_NLOCS]; 107 char firm[8]; 108 109 if (intrstr != NULL) 110 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 111 112 SIMPLEQ_INIT(&sc->sc_ccb_free); 113 SIMPLEQ_INIT(&sc->sc_ccb_queue); 114 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); 115 cv_init(&sc->sc_ccb_cv, "cacccb"); 116 117 size = sizeof(struct cac_ccb) * CAC_MAX_CCBS; 118 119 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 120 &rseg, BUS_DMA_NOWAIT)) != 0) { 121 aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, error = %d\n", 122 error); 123 return (-1); 124 } 125 126 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 127 (void **)&sc->sc_ccbs, 128 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 129 aprint_error_dev(sc->sc_dev, "unable to map CCBs, error = %d\n", 130 error); 131 return (-1); 132 } 133 134 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 135 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 136 aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n", 137 error); 138 return (-1); 139 } 140 141 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs, 142 size, NULL, BUS_DMA_NOWAIT)) != 0) { 143 aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, error = %d\n", 144 error); 145 return (-1); 146 } 147 148 sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; 149 memset(sc->sc_ccbs, 0, size); 150 ccb = (struct cac_ccb *)sc->sc_ccbs; 151 152 for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) { 153 /* Create the DMA map for this CCB's data */ 154 error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER, 155 CAC_SG_SIZE, CAC_MAX_XFER, 0, 156 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 157 &ccb->ccb_dmamap_xfer); 158 159 if (error) { 160 aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", 161 error); 162 break; 163 } 164 165 ccb->ccb_flags = 0; 166 ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb); 167 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain); 168 } 169 170 /* Start firmware background tasks, if needed. */ 171 if (startfw) { 172 if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo), 173 0, 0, CAC_CCB_DATA_IN, NULL)) { 174 aprint_error_dev(sc->sc_dev, "CAC_CMD_START_FIRMWARE failed\n"); 175 return (-1); 176 } 177 } 178 179 if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0, 180 CAC_CCB_DATA_IN, NULL)) { 181 aprint_error_dev(sc->sc_dev, "CAC_CMD_GET_CTRL_INFO failed\n"); 182 return (-1); 183 } 184 185 strlcpy(firm, cinfo.firm_rev, 4+1); 186 printf("%s: %d channels, firmware <%s>\n", device_xname(sc->sc_dev), 187 cinfo.scsi_chips, firm); 188 189 sc->sc_nunits = cinfo.num_drvs; 190 for (i = 0; i < cinfo.num_drvs; i++) { 191 caca.caca_unit = i; 192 193 locs[CACCF_UNIT] = i; 194 195 config_found_sm_loc(sc->sc_dev, "cac", locs, &caca, 196 cac_print, config_stdsubmatch); 197 } 198 199 /* Set our `shutdownhook' before we start any device activity. */ 200 if (cac_sdh == NULL) 201 cac_sdh = shutdownhook_establish(cac_shutdown, NULL); 202 203 mutex_enter(&sc->sc_mutex); 204 (*sc->sc_cl.cl_intr_enable)(sc, CAC_INTR_ENABLE); 205 mutex_exit(&sc->sc_mutex); 206 207 #if NBIO > 0 208 if (bio_register(sc->sc_dev, cac_ioctl) != 0) 209 aprint_error_dev(sc->sc_dev, "controller registration failed"); 210 else 211 sc->sc_ioctl = cac_ioctl; 212 if (cac_create_sensors(sc) != 0) 213 aprint_error_dev(sc->sc_dev, "unable to create sensors\n"); 214 #endif 215 216 return (0); 217 } 218 219 /* 220 * Shut down all `cac' controllers. 221 */ 222 static void 223 cac_shutdown(void *cookie) 224 { 225 extern struct cfdriver cac_cd; 226 struct cac_softc *sc; 227 u_int8_t tbuf[512]; 228 int i; 229 230 for (i = 0; i < cac_cd.cd_ndevs; i++) { 231 if ((sc = device_lookup_private(&cac_cd, i)) == NULL) 232 continue; 233 memset(tbuf, 0, sizeof(tbuf)); 234 tbuf[0] = 1; 235 cac_cmd(sc, CAC_CMD_FLUSH_CACHE, tbuf, sizeof(tbuf), 0, 0, 236 CAC_CCB_DATA_OUT, NULL); 237 } 238 } 239 240 /* 241 * Print autoconfiguration message for a sub-device. 242 */ 243 static int 244 cac_print(void *aux, const char *pnp) 245 { 246 struct cac_attach_args *caca; 247 248 caca = (struct cac_attach_args *)aux; 249 250 if (pnp != NULL) 251 aprint_normal("block device at %s", pnp); 252 aprint_normal(" unit %d", caca->caca_unit); 253 return (UNCONF); 254 } 255 256 /* 257 * Handle an interrupt from the controller: process finished CCBs and 258 * dequeue any waiting CCBs. 259 */ 260 int 261 cac_intr(void *cookie) 262 { 263 struct cac_softc *sc; 264 struct cac_ccb *ccb; 265 int rv; 266 267 sc = cookie; 268 269 mutex_enter(&sc->sc_mutex); 270 271 if ((*sc->sc_cl.cl_intr_pending)(sc)) { 272 while ((ccb = (*sc->sc_cl.cl_completed)(sc)) != NULL) { 273 cac_ccb_done(sc, ccb); 274 cac_ccb_start(sc, NULL); 275 } 276 rv = 1; 277 } else 278 rv = 0; 279 280 mutex_exit(&sc->sc_mutex); 281 282 return (rv); 283 } 284 285 /* 286 * Execute a [polled] command. 287 */ 288 int 289 cac_cmd(struct cac_softc *sc, int command, void *data, int datasize, 290 int drive, int blkno, int flags, struct cac_context *context) 291 { 292 struct cac_ccb *ccb; 293 struct cac_sgb *sgb; 294 int i, rv, size, nsegs; 295 296 size = 0; 297 298 if ((ccb = cac_ccb_alloc(sc, 1)) == NULL) { 299 aprint_error_dev(sc->sc_dev, "unable to alloc CCB"); 300 return (EAGAIN); 301 } 302 303 if ((flags & (CAC_CCB_DATA_IN | CAC_CCB_DATA_OUT)) != 0) { 304 bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, 305 (void *)data, datasize, NULL, BUS_DMA_NOWAIT | 306 BUS_DMA_STREAMING | ((flags & CAC_CCB_DATA_IN) ? 307 BUS_DMA_READ : BUS_DMA_WRITE)); 308 309 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, datasize, 310 (flags & CAC_CCB_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD : 311 BUS_DMASYNC_PREWRITE); 312 313 sgb = ccb->ccb_seg; 314 nsegs = min(ccb->ccb_dmamap_xfer->dm_nsegs, CAC_SG_SIZE); 315 316 for (i = 0; i < nsegs; i++, sgb++) { 317 size += ccb->ccb_dmamap_xfer->dm_segs[i].ds_len; 318 sgb->length = 319 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 320 sgb->addr = 321 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 322 } 323 } else { 324 size = datasize; 325 nsegs = 0; 326 } 327 328 ccb->ccb_hdr.drive = drive; 329 ccb->ccb_hdr.priority = 0; 330 ccb->ccb_hdr.size = htole16((sizeof(struct cac_req) + 331 sizeof(struct cac_sgb) * CAC_SG_SIZE) >> 2); 332 333 ccb->ccb_req.next = 0; 334 ccb->ccb_req.error = 0; 335 ccb->ccb_req.reserved = 0; 336 ccb->ccb_req.bcount = htole16(howmany(size, DEV_BSIZE)); 337 ccb->ccb_req.command = command; 338 ccb->ccb_req.sgcount = nsegs; 339 ccb->ccb_req.blkno = htole32(blkno); 340 341 ccb->ccb_flags = flags; 342 ccb->ccb_datasize = size; 343 344 mutex_enter(&sc->sc_mutex); 345 346 if (context == NULL) { 347 memset(&ccb->ccb_context, 0, sizeof(struct cac_context)); 348 349 /* Synchronous commands musn't wait. */ 350 if ((*sc->sc_cl.cl_fifo_full)(sc)) { 351 cac_ccb_free(sc, ccb); 352 rv = EAGAIN; 353 } else { 354 #ifdef DIAGNOSTIC 355 ccb->ccb_flags |= CAC_CCB_ACTIVE; 356 #endif 357 (*sc->sc_cl.cl_submit)(sc, ccb); 358 rv = cac_ccb_poll(sc, ccb, 2000); 359 cac_ccb_free(sc, ccb); 360 } 361 } else { 362 memcpy(&ccb->ccb_context, context, sizeof(struct cac_context)); 363 (void)cac_ccb_start(sc, ccb); 364 rv = 0; 365 } 366 367 mutex_exit(&sc->sc_mutex); 368 return (rv); 369 } 370 371 /* 372 * Wait for the specified CCB to complete. 373 */ 374 static int 375 cac_ccb_poll(struct cac_softc *sc, struct cac_ccb *wantccb, int timo) 376 { 377 struct cac_ccb *ccb; 378 379 KASSERT(mutex_owned(&sc->sc_mutex)); 380 381 timo *= 1000; 382 383 do { 384 for (; timo != 0; timo--) { 385 ccb = (*sc->sc_cl.cl_completed)(sc); 386 if (ccb != NULL) 387 break; 388 DELAY(1); 389 } 390 391 if (timo == 0) { 392 printf("%s: timeout\n", device_xname(sc->sc_dev)); 393 return (EBUSY); 394 } 395 cac_ccb_done(sc, ccb); 396 } while (ccb != wantccb); 397 398 return (0); 399 } 400 401 /* 402 * Enqueue the specified command (if any) and attempt to start all enqueued 403 * commands. 404 */ 405 static int 406 cac_ccb_start(struct cac_softc *sc, struct cac_ccb *ccb) 407 { 408 409 KASSERT(mutex_owned(&sc->sc_mutex)); 410 411 if (ccb != NULL) 412 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain); 413 414 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { 415 if ((*sc->sc_cl.cl_fifo_full)(sc)) 416 return (EAGAIN); 417 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain); 418 #ifdef DIAGNOSTIC 419 ccb->ccb_flags |= CAC_CCB_ACTIVE; 420 #endif 421 (*sc->sc_cl.cl_submit)(sc, ccb); 422 } 423 424 return (0); 425 } 426 427 /* 428 * Process a finished CCB. 429 */ 430 static void 431 cac_ccb_done(struct cac_softc *sc, struct cac_ccb *ccb) 432 { 433 device_t dv; 434 void *context; 435 int error; 436 437 error = 0; 438 439 KASSERT(mutex_owned(&sc->sc_mutex)); 440 441 #ifdef DIAGNOSTIC 442 if ((ccb->ccb_flags & CAC_CCB_ACTIVE) == 0) 443 panic("cac_ccb_done: CCB not active"); 444 ccb->ccb_flags &= ~CAC_CCB_ACTIVE; 445 #endif 446 447 if ((ccb->ccb_flags & (CAC_CCB_DATA_IN | CAC_CCB_DATA_OUT)) != 0) { 448 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 449 ccb->ccb_datasize, ccb->ccb_flags & CAC_CCB_DATA_IN ? 450 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 451 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 452 } 453 454 error = ccb->ccb_req.error; 455 if (ccb->ccb_context.cc_handler != NULL) { 456 dv = ccb->ccb_context.cc_dv; 457 context = ccb->ccb_context.cc_context; 458 cac_ccb_free(sc, ccb); 459 (*ccb->ccb_context.cc_handler)(dv, context, error); 460 } else { 461 if ((error & CAC_RET_SOFT_ERROR) != 0) 462 aprint_error_dev(sc->sc_dev, "soft error; array may be degraded\n"); 463 if ((error & CAC_RET_HARD_ERROR) != 0) 464 aprint_error_dev(sc->sc_dev, "hard error\n"); 465 if ((error & CAC_RET_CMD_REJECTED) != 0) { 466 error = 1; 467 aprint_error_dev(sc->sc_dev, "invalid request\n"); 468 } 469 } 470 } 471 472 /* 473 * Allocate a CCB. 474 */ 475 static struct cac_ccb * 476 cac_ccb_alloc(struct cac_softc *sc, int nosleep) 477 { 478 struct cac_ccb *ccb; 479 480 mutex_enter(&sc->sc_mutex); 481 482 for (;;) { 483 if ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)) != NULL) { 484 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain); 485 break; 486 } 487 if (nosleep) { 488 ccb = NULL; 489 break; 490 } 491 cv_wait(&sc->sc_ccb_cv, &sc->sc_mutex); 492 } 493 494 mutex_exit(&sc->sc_mutex); 495 return (ccb); 496 } 497 498 /* 499 * Put a CCB onto the freelist. 500 */ 501 static void 502 cac_ccb_free(struct cac_softc *sc, struct cac_ccb *ccb) 503 { 504 505 KASSERT(mutex_owned(&sc->sc_mutex)); 506 507 ccb->ccb_flags = 0; 508 if (SIMPLEQ_EMPTY(&sc->sc_ccb_free)) 509 cv_signal(&sc->sc_ccb_cv); 510 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); 511 } 512 513 /* 514 * Board specific linkage shared between multiple bus types. 515 */ 516 517 static int 518 cac_l0_fifo_full(struct cac_softc *sc) 519 { 520 521 KASSERT(mutex_owned(&sc->sc_mutex)); 522 523 return (cac_inl(sc, CAC_REG_CMD_FIFO) == 0); 524 } 525 526 static void 527 cac_l0_submit(struct cac_softc *sc, struct cac_ccb *ccb) 528 { 529 530 KASSERT(mutex_owned(&sc->sc_mutex)); 531 532 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 533 (char *)ccb - (char *)sc->sc_ccbs, 534 sizeof(struct cac_ccb), BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 535 cac_outl(sc, CAC_REG_CMD_FIFO, ccb->ccb_paddr); 536 } 537 538 static struct cac_ccb * 539 cac_l0_completed(struct cac_softc *sc) 540 { 541 struct cac_ccb *ccb; 542 paddr_t off; 543 544 KASSERT(mutex_owned(&sc->sc_mutex)); 545 546 if ((off = cac_inl(sc, CAC_REG_DONE_FIFO)) == 0) 547 return (NULL); 548 549 if ((off & 3) != 0) 550 aprint_error_dev(sc->sc_dev, "failed command list returned: %lx\n", 551 (long)off); 552 553 off = (off & ~3) - sc->sc_ccbs_paddr; 554 ccb = (struct cac_ccb *)((char *)sc->sc_ccbs + off); 555 556 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, off, sizeof(struct cac_ccb), 557 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 558 559 if ((off & 3) != 0 && ccb->ccb_req.error == 0) 560 ccb->ccb_req.error = CAC_RET_CMD_REJECTED; 561 562 return (ccb); 563 } 564 565 static int 566 cac_l0_intr_pending(struct cac_softc *sc) 567 { 568 569 KASSERT(mutex_owned(&sc->sc_mutex)); 570 571 return (cac_inl(sc, CAC_REG_INTR_PENDING) & CAC_INTR_ENABLE); 572 } 573 574 static void 575 cac_l0_intr_enable(struct cac_softc *sc, int state) 576 { 577 578 KASSERT(mutex_owned(&sc->sc_mutex)); 579 580 cac_outl(sc, CAC_REG_INTR_MASK, 581 state ? CAC_INTR_ENABLE : CAC_INTR_DISABLE); 582 } 583 584 #if NBIO > 0 585 const int cac_level[] = { 0, 4, 1, 5, 51, 7 }; 586 const int cac_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE, 587 BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED, 588 BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING, 589 BIOC_SVOFFLINE, BIOC_SVBUILDING }; 590 591 int 592 cac_ioctl(device_t dev, u_long cmd, void *addr) 593 { 594 struct cac_softc *sc = device_private(dev); 595 struct bioc_inq *bi; 596 struct bioc_disk *bd; 597 cac_lock_t lock; 598 int error = 0; 599 600 lock = CAC_LOCK(sc); 601 switch (cmd) { 602 case BIOCINQ: 603 bi = (struct bioc_inq *)addr; 604 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev)); 605 bi->bi_novol = sc->sc_nunits; 606 bi->bi_nodisk = 0; 607 break; 608 609 case BIOCVOL: 610 error = cac_ioctl_vol(sc, (struct bioc_vol *)addr); 611 break; 612 613 case BIOCDISK: 614 case BIOCDISK_NOVOL: 615 bd = (struct bioc_disk *)addr; 616 if (bd->bd_volid > sc->sc_nunits) { 617 error = EINVAL; 618 break; 619 } 620 /* No disk information yet */ 621 break; 622 623 case BIOCBLINK: 624 case BIOCALARM: 625 case BIOCSETSTATE: 626 default: 627 error = EINVAL; 628 } 629 CAC_UNLOCK(sc, lock); 630 631 return (error); 632 } 633 634 int 635 cac_ioctl_vol(struct cac_softc *sc, struct bioc_vol *bv) 636 { 637 struct cac_drive_info dinfo; 638 struct cac_drive_status dstatus; 639 u_int32_t blks; 640 641 if (bv->bv_volid > sc->sc_nunits) { 642 return EINVAL; 643 } 644 if (cac_cmd(sc, CAC_CMD_GET_LOG_DRV_INFO, &dinfo, sizeof(dinfo), 645 bv->bv_volid, 0, CAC_CCB_DATA_IN, NULL)) { 646 return EIO; 647 } 648 if (cac_cmd(sc, CAC_CMD_SENSE_DRV_STATUS, &dstatus, sizeof(dstatus), 649 bv->bv_volid, 0, CAC_CCB_DATA_IN, NULL)) { 650 return EIO; 651 } 652 blks = CAC_GET2(dinfo.ncylinders) * CAC_GET1(dinfo.nheads) * 653 CAC_GET1(dinfo.nsectors); 654 bv->bv_size = (off_t)blks * CAC_GET2(dinfo.secsize); 655 bv->bv_level = cac_level[CAC_GET1(dinfo.mirror)]; /*XXX limit check */ 656 bv->bv_nodisk = 0; /* XXX */ 657 bv->bv_status = 0; /* XXX */ 658 bv->bv_percent = -1; 659 bv->bv_seconds = 0; 660 if (dstatus.stat < sizeof(cac_stat)/sizeof(cac_stat[0])) 661 bv->bv_status = cac_stat[dstatus.stat]; 662 if (bv->bv_status == BIOC_SVREBUILD || 663 bv->bv_status == BIOC_SVBUILDING) 664 bv->bv_percent = ((blks - CAC_GET4(dstatus.prog)) * 1000ULL) / 665 blks; 666 return 0; 667 } 668 669 int 670 cac_create_sensors(struct cac_softc *sc) 671 { 672 int i; 673 int nsensors = sc->sc_nunits; 674 675 sc->sc_sme = sysmon_envsys_create(); 676 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, 677 M_DEVBUF, M_NOWAIT | M_ZERO); 678 if (sc->sc_sensor == NULL) { 679 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n"); 680 return(ENOMEM); 681 } 682 683 for (i = 0; i < nsensors; i++) { 684 sc->sc_sensor[i].units = ENVSYS_DRIVE; 685 sc->sc_sensor[i].state = ENVSYS_SINVALID; 686 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY; 687 /* Enable monitoring for drive state changes */ 688 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED; 689 /* logical drives */ 690 snprintf(sc->sc_sensor[i].desc, 691 sizeof(sc->sc_sensor[i].desc), "%s:%d", 692 device_xname(sc->sc_dev), i); 693 if (sysmon_envsys_sensor_attach(sc->sc_sme, 694 &sc->sc_sensor[i])) 695 goto out; 696 } 697 sc->sc_sme->sme_name = device_xname(sc->sc_dev); 698 sc->sc_sme->sme_cookie = sc; 699 sc->sc_sme->sme_refresh = cac_sensor_refresh; 700 if (sysmon_envsys_register(sc->sc_sme)) { 701 aprint_error_dev(sc->sc_dev, "unable to register with sysmon\n"); 702 return(1); 703 } 704 return (0); 705 706 out: 707 free(sc->sc_sensor, M_DEVBUF); 708 sysmon_envsys_destroy(sc->sc_sme); 709 return EINVAL; 710 } 711 712 void 713 cac_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 714 { 715 struct cac_softc *sc = sme->sme_cookie; 716 struct bioc_vol bv; 717 int s; 718 719 if (edata->sensor >= sc->sc_nunits) 720 return; 721 722 memset(&bv, 0, sizeof(bv)); 723 bv.bv_volid = edata->sensor; 724 s = splbio(); 725 if (cac_ioctl_vol(sc, &bv)) 726 bv.bv_status = BIOC_SVINVALID; 727 splx(s); 728 729 bio_vol_to_envsys(edata, &bv); 730 } 731 #endif /* NBIO > 0 */ 732