xref: /netbsd-src/sys/dev/ic/cac.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: cac.c,v 1.61 2019/11/10 21:16:35 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for Compaq array controllers.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: cac.c,v 1.61 2019/11/10 21:16:35 chs Exp $");
38 
39 #if defined(_KERNEL_OPT)
40 #include "bio.h"
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/proc.h>
49 #include <sys/buf.h>
50 #include <sys/endian.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/module.h>
54 #include <sys/bswap.h>
55 #include <sys/bus.h>
56 
57 #include <dev/ic/cacreg.h>
58 #include <dev/ic/cacvar.h>
59 
60 #if NBIO > 0
61 #include <dev/biovar.h>
62 #endif /* NBIO > 0 */
63 
64 #include "ioconf.h"
65 #include "locators.h"
66 
67 static struct	cac_ccb *cac_ccb_alloc(struct cac_softc *, int);
68 static void	cac_ccb_done(struct cac_softc *, struct cac_ccb *);
69 static void	cac_ccb_free(struct cac_softc *, struct cac_ccb *);
70 static int	cac_ccb_poll(struct cac_softc *, struct cac_ccb *, int);
71 static int	cac_ccb_start(struct cac_softc *, struct cac_ccb *);
72 static int	cac_print(void *, const char *);
73 static void	cac_shutdown(void *);
74 
75 static struct	cac_ccb *cac_l0_completed(struct cac_softc *);
76 static int	cac_l0_fifo_full(struct cac_softc *);
77 static void	cac_l0_intr_enable(struct cac_softc *, int);
78 static int	cac_l0_intr_pending(struct cac_softc *);
79 static void	cac_l0_submit(struct cac_softc *, struct cac_ccb *);
80 
81 static void	*cac_sdh;	/* shutdown hook */
82 
83 #if NBIO > 0
84 int		cac_ioctl(device_t, u_long, void *);
85 int		cac_ioctl_vol(struct cac_softc *, struct bioc_vol *);
86 int		cac_create_sensors(struct cac_softc *);
87 void		cac_sensor_refresh(struct sysmon_envsys *, envsys_data_t *);
88 #endif /* NBIO > 0 */
89 
90 const struct cac_linkage cac_l0 = {
91 	cac_l0_completed,
92 	cac_l0_fifo_full,
93 	cac_l0_intr_enable,
94 	cac_l0_intr_pending,
95 	cac_l0_submit
96 };
97 
98 /*
99  * Initialise our interface to the controller.
100  */
101 int
102 cac_init(struct cac_softc *sc, const char *intrstr, int startfw)
103 {
104 	struct cac_controller_info cinfo;
105 	int error, rseg, size, i;
106 	bus_dma_segment_t seg;
107 	struct cac_ccb *ccb;
108 	char firm[8];
109 
110 	if (intrstr != NULL)
111 		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
112 
113 	SIMPLEQ_INIT(&sc->sc_ccb_free);
114 	SIMPLEQ_INIT(&sc->sc_ccb_queue);
115 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
116 	cv_init(&sc->sc_ccb_cv, "cacccb");
117 
118         size = sizeof(struct cac_ccb) * CAC_MAX_CCBS;
119 
120 	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
121 	    &rseg, BUS_DMA_NOWAIT)) != 0) {
122 		aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, error = %d\n",
123 		    error);
124 		return (-1);
125 	}
126 
127 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
128 	    (void **)&sc->sc_ccbs,
129 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
130 		aprint_error_dev(sc->sc_dev, "unable to map CCBs, error = %d\n",
131 		    error);
132 		return (-1);
133 	}
134 
135 	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
136 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
137 		aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
138 		    error);
139 		return (-1);
140 	}
141 
142 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs,
143 	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
144 		aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, error = %d\n",
145 		    error);
146 		return (-1);
147 	}
148 
149 	sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
150 	memset(sc->sc_ccbs, 0, size);
151 	ccb = (struct cac_ccb *)sc->sc_ccbs;
152 
153 	for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) {
154 		/* Create the DMA map for this CCB's data */
155 		error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER,
156 		    CAC_SG_SIZE, CAC_MAX_XFER, 0,
157 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
158 		    &ccb->ccb_dmamap_xfer);
159 
160 		if (error) {
161 			aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n",
162 			    error);
163 			break;
164 		}
165 
166 		ccb->ccb_flags = 0;
167 		ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb);
168 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain);
169 	}
170 
171 	/* Start firmware background tasks, if needed. */
172 	if (startfw) {
173 		if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo),
174 		    0, 0, CAC_CCB_DATA_IN, NULL)) {
175 			aprint_error_dev(sc->sc_dev, "CAC_CMD_START_FIRMWARE failed\n");
176 			return (-1);
177 		}
178 	}
179 
180 	if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0,
181 	    CAC_CCB_DATA_IN, NULL)) {
182 		aprint_error_dev(sc->sc_dev, "CAC_CMD_GET_CTRL_INFO failed\n");
183 		return (-1);
184 	}
185 
186 	strlcpy(firm, cinfo.firm_rev, 4+1);
187 	printf("%s: %d channels, firmware <%s>\n", device_xname(sc->sc_dev),
188 	    cinfo.scsi_chips, firm);
189 
190 	/* Limit number of units to size of our sc_unitmask */
191 	sc->sc_nunits = cinfo.num_drvs;
192 	if (sc->sc_nunits > sizeof(sc->sc_unitmask) * NBBY)
193 		sc->sc_nunits = sizeof(sc->sc_unitmask) * NBBY;
194 
195 	/* Attach our units */
196 	sc->sc_unitmask = 0;
197 	cac_rescan(sc->sc_dev, "cac", 0);
198 
199 	/* Set our `shutdownhook' before we start any device activity. */
200 	if (cac_sdh == NULL)
201 		cac_sdh = shutdownhook_establish(cac_shutdown, NULL);
202 
203 	mutex_enter(&sc->sc_mutex);
204 	(*sc->sc_cl.cl_intr_enable)(sc, CAC_INTR_ENABLE);
205 	mutex_exit(&sc->sc_mutex);
206 
207 #if NBIO > 0
208 	if (bio_register(sc->sc_dev, cac_ioctl) != 0)
209 		aprint_error_dev(sc->sc_dev, "controller registration failed");
210 	else
211 		sc->sc_ioctl = cac_ioctl;
212 	if (cac_create_sensors(sc) != 0)
213 		aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
214 #endif
215 
216 	return (0);
217 }
218 
219 int
220 cac_rescan(device_t self, const char *attr, const int *flags)
221 {
222 	struct cac_softc *sc;
223 	struct cac_attach_args caca;
224 	int locs[CACCF_NLOCS];
225 	int i;
226 
227 	sc = device_private(self);
228 	for (i = 0; i < sc->sc_nunits; i++) {
229 		if (sc->sc_unitmask & (1 << i))
230 			continue;
231 		caca.caca_unit = i;
232 
233 		locs[CACCF_UNIT] = i;
234 
235 		if (config_found_sm_loc(self, attr, locs, &caca, cac_print,
236 			    config_stdsubmatch))
237 			sc->sc_unitmask |= 1 << i;
238 	}
239 	return 0;
240 }
241 
242 /*
243  * Shut down all `cac' controllers.
244  */
245 static void
246 cac_shutdown(void *cookie)
247 {
248 	struct cac_softc *sc;
249 	u_int8_t tbuf[512];
250 	int i;
251 
252 	for (i = 0; i < cac_cd.cd_ndevs; i++) {
253 		if ((sc = device_lookup_private(&cac_cd, i)) == NULL)
254 			continue;
255 		memset(tbuf, 0, sizeof(tbuf));
256 		tbuf[0] = 1;
257 		cac_cmd(sc, CAC_CMD_FLUSH_CACHE, tbuf, sizeof(tbuf), 0, 0,
258 		    CAC_CCB_DATA_OUT, NULL);
259 	}
260 }
261 
262 /*
263  * Print autoconfiguration message for a sub-device.
264  */
265 static int
266 cac_print(void *aux, const char *pnp)
267 {
268 	struct cac_attach_args *caca;
269 
270 	caca = (struct cac_attach_args *)aux;
271 
272 	if (pnp != NULL)
273 		aprint_normal("block device at %s", pnp);
274 	aprint_normal(" unit %d", caca->caca_unit);
275 	return (UNCONF);
276 }
277 
278 /*
279  * Handle an interrupt from the controller: process finished CCBs and
280  * dequeue any waiting CCBs.
281  */
282 int
283 cac_intr(void *cookie)
284 {
285 	struct cac_softc *sc;
286 	struct cac_ccb *ccb;
287 	int rv;
288 
289 	sc = cookie;
290 
291 	mutex_enter(&sc->sc_mutex);
292 
293 	if ((*sc->sc_cl.cl_intr_pending)(sc)) {
294 		while ((ccb = (*sc->sc_cl.cl_completed)(sc)) != NULL) {
295 			cac_ccb_done(sc, ccb);
296 			cac_ccb_start(sc, NULL);
297 		}
298 		rv = 1;
299 	} else
300 		rv = 0;
301 
302 	mutex_exit(&sc->sc_mutex);
303 
304 	return (rv);
305 }
306 
307 /*
308  * Execute a [polled] command.
309  */
310 int
311 cac_cmd(struct cac_softc *sc, int command, void *data, int datasize,
312 	int drive, int blkno, int flags, struct cac_context *context)
313 {
314 	struct cac_ccb *ccb;
315 	struct cac_sgb *sgb;
316 	int i, rv, size, nsegs;
317 
318 	size = 0;
319 
320 	if ((ccb = cac_ccb_alloc(sc, 1)) == NULL) {
321 		aprint_error_dev(sc->sc_dev, "unable to alloc CCB");
322 		return (EAGAIN);
323 	}
324 
325 	if ((flags & (CAC_CCB_DATA_IN | CAC_CCB_DATA_OUT)) != 0) {
326 		bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer,
327 		    (void *)data, datasize, NULL, BUS_DMA_NOWAIT |
328 		    BUS_DMA_STREAMING | ((flags & CAC_CCB_DATA_IN) ?
329 		    BUS_DMA_READ : BUS_DMA_WRITE));
330 
331 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, datasize,
332 		    (flags & CAC_CCB_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
333 		    BUS_DMASYNC_PREWRITE);
334 
335 		sgb = ccb->ccb_seg;
336 		nsegs = uimin(ccb->ccb_dmamap_xfer->dm_nsegs, CAC_SG_SIZE);
337 
338 		for (i = 0; i < nsegs; i++, sgb++) {
339 			size += ccb->ccb_dmamap_xfer->dm_segs[i].ds_len;
340 			sgb->length =
341 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
342 			sgb->addr =
343 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
344 		}
345 	} else {
346 		size = datasize;
347 		nsegs = 0;
348 	}
349 
350 	ccb->ccb_hdr.drive = drive;
351 	ccb->ccb_hdr.priority = 0;
352 	ccb->ccb_hdr.size = htole16((sizeof(struct cac_req) +
353 	    sizeof(struct cac_sgb) * CAC_SG_SIZE) >> 2);
354 
355 	ccb->ccb_req.next = 0;
356 	ccb->ccb_req.error = 0;
357 	ccb->ccb_req.reserved = 0;
358 	ccb->ccb_req.bcount = htole16(howmany(size, DEV_BSIZE));
359 	ccb->ccb_req.command = command;
360 	ccb->ccb_req.sgcount = nsegs;
361 	ccb->ccb_req.blkno = htole32(blkno);
362 
363 	ccb->ccb_flags = flags;
364 	ccb->ccb_datasize = size;
365 
366 	mutex_enter(&sc->sc_mutex);
367 
368 	if (context == NULL) {
369 		memset(&ccb->ccb_context, 0, sizeof(struct cac_context));
370 
371 		/* Synchronous commands musn't wait. */
372 		if ((*sc->sc_cl.cl_fifo_full)(sc)) {
373 			cac_ccb_free(sc, ccb);
374 			rv = EAGAIN;
375 		} else {
376 #ifdef DIAGNOSTIC
377 			ccb->ccb_flags |= CAC_CCB_ACTIVE;
378 #endif
379 			(*sc->sc_cl.cl_submit)(sc, ccb);
380 			rv = cac_ccb_poll(sc, ccb, 2000);
381 			cac_ccb_free(sc, ccb);
382 		}
383 	} else {
384 		memcpy(&ccb->ccb_context, context, sizeof(struct cac_context));
385 		(void)cac_ccb_start(sc, ccb);
386 		rv = 0;
387 	}
388 
389 	mutex_exit(&sc->sc_mutex);
390 	return (rv);
391 }
392 
393 /*
394  * Wait for the specified CCB to complete.
395  */
396 static int
397 cac_ccb_poll(struct cac_softc *sc, struct cac_ccb *wantccb, int timo)
398 {
399 	struct cac_ccb *ccb;
400 
401 	KASSERT(mutex_owned(&sc->sc_mutex));
402 
403 	timo *= 1000;
404 
405 	do {
406 		for (; timo != 0; timo--) {
407 			ccb = (*sc->sc_cl.cl_completed)(sc);
408 			if (ccb != NULL)
409 				break;
410 			DELAY(1);
411 		}
412 
413 		if (timo == 0) {
414 			printf("%s: timeout\n", device_xname(sc->sc_dev));
415 			return (EBUSY);
416 		}
417 		cac_ccb_done(sc, ccb);
418 	} while (ccb != wantccb);
419 
420 	return (0);
421 }
422 
423 /*
424  * Enqueue the specified command (if any) and attempt to start all enqueued
425  * commands.
426  */
427 static int
428 cac_ccb_start(struct cac_softc *sc, struct cac_ccb *ccb)
429 {
430 
431 	KASSERT(mutex_owned(&sc->sc_mutex));
432 
433 	if (ccb != NULL)
434 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain);
435 
436 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
437 		if ((*sc->sc_cl.cl_fifo_full)(sc))
438 			return (EAGAIN);
439 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain);
440 #ifdef DIAGNOSTIC
441 		ccb->ccb_flags |= CAC_CCB_ACTIVE;
442 #endif
443 		(*sc->sc_cl.cl_submit)(sc, ccb);
444 	}
445 
446 	return (0);
447 }
448 
449 /*
450  * Process a finished CCB.
451  */
452 static void
453 cac_ccb_done(struct cac_softc *sc, struct cac_ccb *ccb)
454 {
455 	device_t dv;
456 	void *context;
457 	int error;
458 
459 	error = 0;
460 
461 	KASSERT(mutex_owned(&sc->sc_mutex));
462 
463 #ifdef DIAGNOSTIC
464 	if ((ccb->ccb_flags & CAC_CCB_ACTIVE) == 0)
465 		panic("cac_ccb_done: CCB not active");
466 	ccb->ccb_flags &= ~CAC_CCB_ACTIVE;
467 #endif
468 
469 	if ((ccb->ccb_flags & (CAC_CCB_DATA_IN | CAC_CCB_DATA_OUT)) != 0) {
470 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
471 		    ccb->ccb_datasize, ccb->ccb_flags & CAC_CCB_DATA_IN ?
472 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
473 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
474 	}
475 
476 	error = ccb->ccb_req.error;
477 	if (ccb->ccb_context.cc_handler != NULL) {
478 		dv = ccb->ccb_context.cc_dv;
479 		context = ccb->ccb_context.cc_context;
480 		cac_ccb_free(sc, ccb);
481 		(*ccb->ccb_context.cc_handler)(dv, context, error);
482 	} else {
483 		if ((error & CAC_RET_SOFT_ERROR) != 0)
484 			aprint_error_dev(sc->sc_dev, "soft error; array may be degraded\n");
485 		if ((error & CAC_RET_HARD_ERROR) != 0)
486 			aprint_error_dev(sc->sc_dev, "hard error\n");
487 		if ((error & CAC_RET_CMD_REJECTED) != 0) {
488 			error = 1;
489 			aprint_error_dev(sc->sc_dev, "invalid request\n");
490 		}
491 	}
492 }
493 
494 /*
495  * Allocate a CCB.
496  */
497 static struct cac_ccb *
498 cac_ccb_alloc(struct cac_softc *sc, int nosleep)
499 {
500 	struct cac_ccb *ccb;
501 
502 	mutex_enter(&sc->sc_mutex);
503 
504 	for (;;) {
505 		if ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)) != NULL) {
506 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
507 			break;
508 		}
509 		if (nosleep) {
510 			ccb = NULL;
511 			break;
512 		}
513 		cv_wait(&sc->sc_ccb_cv, &sc->sc_mutex);
514 	}
515 
516 	mutex_exit(&sc->sc_mutex);
517 	return (ccb);
518 }
519 
520 /*
521  * Put a CCB onto the freelist.
522  */
523 static void
524 cac_ccb_free(struct cac_softc *sc, struct cac_ccb *ccb)
525 {
526 
527 	KASSERT(mutex_owned(&sc->sc_mutex));
528 
529 	ccb->ccb_flags = 0;
530 	if (SIMPLEQ_EMPTY(&sc->sc_ccb_free))
531 		cv_signal(&sc->sc_ccb_cv);
532 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
533 }
534 
535 /*
536  * Board specific linkage shared between multiple bus types.
537  */
538 
539 static int
540 cac_l0_fifo_full(struct cac_softc *sc)
541 {
542 
543 	KASSERT(mutex_owned(&sc->sc_mutex));
544 
545 	return (cac_inl(sc, CAC_REG_CMD_FIFO) == 0);
546 }
547 
548 static void
549 cac_l0_submit(struct cac_softc *sc, struct cac_ccb *ccb)
550 {
551 
552 	KASSERT(mutex_owned(&sc->sc_mutex));
553 
554 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
555 	    (char *)ccb - (char *)sc->sc_ccbs,
556 	    sizeof(struct cac_ccb), BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
557 	cac_outl(sc, CAC_REG_CMD_FIFO, ccb->ccb_paddr);
558 }
559 
560 static struct cac_ccb *
561 cac_l0_completed(struct cac_softc *sc)
562 {
563 	struct cac_ccb *ccb;
564 	paddr_t off;
565 
566 	KASSERT(mutex_owned(&sc->sc_mutex));
567 
568 	if ((off = cac_inl(sc, CAC_REG_DONE_FIFO)) == 0)
569 		return (NULL);
570 
571 	if ((off & 3) != 0)
572 		aprint_error_dev(sc->sc_dev, "failed command list returned: %lx\n",
573 		    (long)off);
574 
575 	off = (off & ~3) - sc->sc_ccbs_paddr;
576 	ccb = (struct cac_ccb *)((char *)sc->sc_ccbs + off);
577 
578 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, off, sizeof(struct cac_ccb),
579 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
580 
581 	if ((off & 3) != 0 && ccb->ccb_req.error == 0)
582 		ccb->ccb_req.error = CAC_RET_CMD_REJECTED;
583 
584 	return (ccb);
585 }
586 
587 static int
588 cac_l0_intr_pending(struct cac_softc *sc)
589 {
590 
591 	KASSERT(mutex_owned(&sc->sc_mutex));
592 
593 	return (cac_inl(sc, CAC_REG_INTR_PENDING) & CAC_INTR_ENABLE);
594 }
595 
596 static void
597 cac_l0_intr_enable(struct cac_softc *sc, int state)
598 {
599 
600 	KASSERT(mutex_owned(&sc->sc_mutex));
601 
602 	cac_outl(sc, CAC_REG_INTR_MASK,
603 	    state ? CAC_INTR_ENABLE : CAC_INTR_DISABLE);
604 }
605 
606 #if NBIO > 0
607 const int cac_level[] = { 0, 4, 1, 5, 51, 7 };
608 const int cac_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE,
609     BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED,
610     BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING,
611     BIOC_SVOFFLINE, BIOC_SVBUILDING };
612 
613 int
614 cac_ioctl(device_t dev, u_long cmd, void *addr)
615 {
616 	struct cac_softc *sc = device_private(dev);
617 	struct bioc_inq *bi;
618 	struct bioc_disk *bd;
619 	cac_lock_t lock;
620 	int error = 0;
621 
622 	lock = CAC_LOCK(sc);
623 	switch (cmd) {
624 	case BIOCINQ:
625 		bi = (struct bioc_inq *)addr;
626 		strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
627 		bi->bi_novol = sc->sc_nunits;
628 		bi->bi_nodisk = 0;
629 		break;
630 
631 	case BIOCVOL:
632 		error = cac_ioctl_vol(sc, (struct bioc_vol *)addr);
633 		break;
634 
635 	case BIOCDISK:
636 	case BIOCDISK_NOVOL:
637 		bd = (struct bioc_disk *)addr;
638 		if (bd->bd_volid > sc->sc_nunits) {
639 			error = EINVAL;
640 			break;
641 		}
642 		/* No disk information yet */
643 		break;
644 
645 	default:
646 		error = EINVAL;
647 	}
648 	CAC_UNLOCK(sc, lock);
649 
650 	return (error);
651 }
652 
653 int
654 cac_ioctl_vol(struct cac_softc *sc, struct bioc_vol *bv)
655 {
656 	struct cac_drive_info dinfo;
657 	struct cac_drive_status dstatus;
658 	u_int32_t blks;
659 
660 	if (bv->bv_volid > sc->sc_nunits) {
661 		return EINVAL;
662 	}
663 	if (cac_cmd(sc, CAC_CMD_GET_LOG_DRV_INFO, &dinfo, sizeof(dinfo),
664 	    bv->bv_volid, 0, CAC_CCB_DATA_IN, NULL)) {
665 		return EIO;
666 	}
667 	if (cac_cmd(sc, CAC_CMD_SENSE_DRV_STATUS, &dstatus, sizeof(dstatus),
668 	    bv->bv_volid, 0, CAC_CCB_DATA_IN, NULL)) {
669 		return EIO;
670 	}
671 	blks = CAC_GET2(dinfo.ncylinders) * CAC_GET1(dinfo.nheads) *
672 	    CAC_GET1(dinfo.nsectors);
673 	bv->bv_size = (off_t)blks * CAC_GET2(dinfo.secsize);
674 	bv->bv_level = cac_level[CAC_GET1(dinfo.mirror)];	/*XXX limit check */
675 	bv->bv_nodisk = 0;		/* XXX */
676 	bv->bv_status = 0;		/* XXX */
677 	bv->bv_percent = -1;
678 	bv->bv_seconds = 0;
679 	if (dstatus.stat < sizeof(cac_stat)/sizeof(cac_stat[0]))
680 		bv->bv_status = cac_stat[dstatus.stat];
681 	if (bv->bv_status == BIOC_SVREBUILD ||
682 	    bv->bv_status == BIOC_SVBUILDING)
683 		bv->bv_percent = ((blks - CAC_GET4(dstatus.prog)) * 1000ULL) /
684 		    blks;
685 	return 0;
686 }
687 
688 int
689 cac_create_sensors(struct cac_softc *sc)
690 {
691 	int			i;
692 	int nsensors = sc->sc_nunits;
693 
694 	sc->sc_sme = sysmon_envsys_create();
695 	sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
696 	    M_DEVBUF, M_WAITOK | M_ZERO);
697 	for (i = 0; i < nsensors; i++) {
698 		sc->sc_sensor[i].units = ENVSYS_DRIVE;
699 		sc->sc_sensor[i].state = ENVSYS_SINVALID;
700 		sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
701 		/* Enable monitoring for drive state changes */
702 		sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
703 		/* logical drives */
704 		snprintf(sc->sc_sensor[i].desc,
705 		    sizeof(sc->sc_sensor[i].desc), "%s:%d",
706 		    device_xname(sc->sc_dev), i);
707 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
708 		    &sc->sc_sensor[i]))
709 			goto out;
710 	}
711 	sc->sc_sme->sme_name = device_xname(sc->sc_dev);
712 	sc->sc_sme->sme_cookie = sc;
713 	sc->sc_sme->sme_refresh = cac_sensor_refresh;
714 	if (sysmon_envsys_register(sc->sc_sme)) {
715 		aprint_error_dev(sc->sc_dev, "unable to register with sysmon\n");
716 		return(1);
717 	}
718 	return (0);
719 
720 out:
721 	free(sc->sc_sensor, M_DEVBUF);
722 	sysmon_envsys_destroy(sc->sc_sme);
723 	return EINVAL;
724 }
725 
726 void
727 cac_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
728 {
729 	struct cac_softc	*sc = sme->sme_cookie;
730 	struct bioc_vol		bv;
731 	int s;
732 
733 	if (edata->sensor >= sc->sc_nunits)
734 		return;
735 
736 	memset(&bv, 0, sizeof(bv));
737 	bv.bv_volid = edata->sensor;
738 	s = splbio();
739 	if (cac_ioctl_vol(sc, &bv))
740 		bv.bv_status = BIOC_SVINVALID;
741 	splx(s);
742 
743 	bio_vol_to_envsys(edata, &bv);
744 }
745 #endif /* NBIO > 0 */
746 
747 MODULE(MODULE_CLASS_DRIVER, cac, NULL);
748 
749 #ifdef _MODULE
750 CFDRIVER_DECL(cac, DV_DISK, NULL);
751 #endif
752 
753 static int
754 cac_modcmd(modcmd_t cmd, void *opaque)
755 {
756 	int error = 0;
757 
758 #ifdef _MODULE
759 	switch (cmd) {
760 	case MODULE_CMD_INIT:
761 		error = config_cfdriver_attach(&cac_cd);
762 		break;
763 	case MODULE_CMD_FINI:
764 		error = config_cfdriver_detach(&cac_cd);
765 		break;
766 	default:
767 		error = ENOTTY;
768 		break;
769 	}
770 #endif
771 	return error;
772 }
773