xref: /netbsd-src/sys/dev/pci/arcmsr.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: arcmsr.c,v 1.30 2011/06/20 22:03:16 pgoyette Exp $ */
2 /*	$OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3 
4 /*
5  * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme@netbsd.org>
6  * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.30 2011/06/20 22:03:16 pgoyette Exp $");
25 
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36 
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49 
50 #include <dev/sysmon/sysmonvar.h>
51 
52 #include <sys/bus.h>
53 
54 #include <dev/pci/arcmsrvar.h>
55 
56 /* #define ARC_DEBUG */
57 #ifdef ARC_DEBUG
58 #define ARC_D_INIT	(1<<0)
59 #define ARC_D_RW	(1<<1)
60 #define ARC_D_DB	(1<<2)
61 
62 int arcdebug = 0;
63 
64 #define DPRINTF(p...)		do { if (arcdebug) printf(p); } while (0)
65 #define DNPRINTF(n, p...)	do { if ((n) & arcdebug) printf(p); } while (0)
66 
67 #else
68 #define DPRINTF(p, ...)		/* p */
69 #define DNPRINTF(n, p, ...)	/* n, p */
70 #endif
71 
72 /*
73  * the fw header must always equal this.
74  */
75 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
76 
77 /*
78  * autoconf(9) glue.
79  */
80 static int 	arc_match(device_t, cfdata_t, void *);
81 static void 	arc_attach(device_t, device_t, void *);
82 static int 	arc_detach(device_t, int);
83 static bool 	arc_shutdown(device_t, int);
84 static int 	arc_intr(void *);
85 static void	arc_minphys(struct buf *);
86 
87 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc),
88 	arc_match, arc_attach, arc_detach, NULL);
89 
90 /*
91  * bio(4) and sysmon_envsys(9) glue.
92  */
93 #if NBIO > 0
94 static int 	arc_bioctl(device_t, u_long, void *);
95 static int 	arc_bio_inq(struct arc_softc *, struct bioc_inq *);
96 static int 	arc_bio_vol(struct arc_softc *, struct bioc_vol *);
97 static int	arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
98 static int	arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
99 static void	arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
100 				      struct arc_fw_diskinfo *, int);
101 static int 	arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
102 static int 	arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
103 static int 	arc_bio_getvol(struct arc_softc *, int,
104 			       struct arc_fw_volinfo *);
105 static int	arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
106 static int 	arc_bio_volops(struct arc_softc *, struct bioc_volops *);
107 static void 	arc_create_sensors(void *);
108 static void 	arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
109 static int	arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
110 #endif
111 
112 static int
113 arc_match(device_t parent, cfdata_t match, void *aux)
114 {
115 	struct pci_attach_args *pa = aux;
116 
117 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
118 		switch (PCI_PRODUCT(pa->pa_id)) {
119 		case PCI_PRODUCT_ARECA_ARC1110:
120 		case PCI_PRODUCT_ARECA_ARC1120:
121 		case PCI_PRODUCT_ARECA_ARC1130:
122 		case PCI_PRODUCT_ARECA_ARC1160:
123 		case PCI_PRODUCT_ARECA_ARC1170:
124 		case PCI_PRODUCT_ARECA_ARC1200:
125 		case PCI_PRODUCT_ARECA_ARC1202:
126 		case PCI_PRODUCT_ARECA_ARC1210:
127 		case PCI_PRODUCT_ARECA_ARC1220:
128 		case PCI_PRODUCT_ARECA_ARC1230:
129 		case PCI_PRODUCT_ARECA_ARC1260:
130 		case PCI_PRODUCT_ARECA_ARC1270:
131 		case PCI_PRODUCT_ARECA_ARC1280:
132 		case PCI_PRODUCT_ARECA_ARC1380:
133 		case PCI_PRODUCT_ARECA_ARC1381:
134 		case PCI_PRODUCT_ARECA_ARC1680:
135 		case PCI_PRODUCT_ARECA_ARC1681:
136 			return 1;
137 		default:
138 			break;
139 		}
140 	}
141 
142 	return 0;
143 }
144 
145 static void
146 arc_attach(device_t parent, device_t self, void *aux)
147 {
148 	struct arc_softc	*sc = device_private(self);
149 	struct pci_attach_args	*pa = aux;
150 	struct scsipi_adapter	*adapt = &sc->sc_adapter;
151 	struct scsipi_channel	*chan = &sc->sc_chan;
152 
153 	sc->sc_dev = self;
154 	sc->sc_talking = 0;
155 	rw_init(&sc->sc_rwlock);
156 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
157 	cv_init(&sc->sc_condvar, "arcdb");
158 
159 	if (arc_map_pci_resources(self, pa) != 0) {
160 		/* error message printed by arc_map_pci_resources */
161 		return;
162 	}
163 
164 	if (arc_query_firmware(self) != 0) {
165 		/* error message printed by arc_query_firmware */
166 		goto unmap_pci;
167 	}
168 
169 	if (arc_alloc_ccbs(self) != 0) {
170 		/* error message printed by arc_alloc_ccbs */
171 		goto unmap_pci;
172 	}
173 
174 	if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
175 		panic("%s: couldn't establish shutdown handler\n",
176 		    device_xname(self));
177 
178 	memset(adapt, 0, sizeof(*adapt));
179 	adapt->adapt_dev = self;
180 	adapt->adapt_nchannels = 1;
181 	adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
182 	adapt->adapt_max_periph = adapt->adapt_openings;
183 	adapt->adapt_minphys = arc_minphys;
184 	adapt->adapt_request = arc_scsi_cmd;
185 
186 	memset(chan, 0, sizeof(*chan));
187 	chan->chan_adapter = adapt;
188 	chan->chan_bustype = &scsi_bustype;
189 	chan->chan_nluns = ARC_MAX_LUN;
190 	chan->chan_ntargets = ARC_MAX_TARGET;
191 	chan->chan_id = ARC_MAX_TARGET;
192 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
193 
194 	/*
195 	 * Save the device_t returned, because we could to attach
196 	 * devices via the management interface.
197 	 */
198 	sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
199 
200 	/* enable interrupts */
201 	arc_write(sc, ARC_REG_INTRMASK,
202 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
203 
204 #if NBIO > 0
205 	/*
206 	 * Register the driver to bio(4) and setup the sensors.
207 	 */
208 	if (bio_register(self, arc_bioctl) != 0)
209 		panic("%s: bioctl registration failed\n", device_xname(self));
210 
211 	/*
212 	 * you need to talk to the firmware to get volume info. our firmware
213 	 * interface relies on being able to sleep, so we need to use a thread
214 	 * to do the work.
215 	 */
216 	if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
217 	    arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
218 		panic("%s: unable to create a kernel thread for sensors\n",
219 		    device_xname(self));
220 #endif
221 
222         return;
223 
224 unmap_pci:
225 	arc_unmap_pci_resources(sc);
226 }
227 
228 static int
229 arc_detach(device_t self, int flags)
230 {
231 	struct arc_softc		*sc = device_private(self);
232 
233 	if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
234 		aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
235 
236 	if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
237 		aprint_error_dev(self, "timeout waiting to flush cache\n");
238 
239 	if (sc->sc_sme != NULL)
240 		sysmon_envsys_unregister(sc->sc_sme);
241 
242 	return 0;
243 }
244 
245 static bool
246 arc_shutdown(device_t self, int how)
247 {
248 	struct arc_softc		*sc = device_private(self);
249 
250 	if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
251 		aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
252 
253 	if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
254 		aprint_error_dev(self, "timeout waiting to flush cache\n");
255 
256 	return true;
257 }
258 
259 static void
260 arc_minphys(struct buf *bp)
261 {
262 	if (bp->b_bcount > MAXPHYS)
263 		bp->b_bcount = MAXPHYS;
264 	minphys(bp);
265 }
266 
267 static int
268 arc_intr(void *arg)
269 {
270 	struct arc_softc		*sc = arg;
271 	struct arc_ccb			*ccb = NULL;
272 	char				*kva = ARC_DMA_KVA(sc->sc_requests);
273 	struct arc_io_cmd		*cmd;
274 	uint32_t			reg, intrstat;
275 
276 	mutex_spin_enter(&sc->sc_mutex);
277 	intrstat = arc_read(sc, ARC_REG_INTRSTAT);
278 	if (intrstat == 0x0) {
279 		mutex_spin_exit(&sc->sc_mutex);
280 		return 0;
281 	}
282 
283 	intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
284 	arc_write(sc, ARC_REG_INTRSTAT, intrstat);
285 
286 	if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
287 		if (sc->sc_talking) {
288 			arc_write(sc, ARC_REG_INTRMASK,
289 			    ~ARC_REG_INTRMASK_POSTQUEUE);
290 			cv_broadcast(&sc->sc_condvar);
291 		} else {
292 			/* otherwise drop it */
293 			reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
294 			arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
295 			if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
296 				arc_write(sc, ARC_REG_INB_DOORBELL,
297 				    ARC_REG_INB_DOORBELL_READ_OK);
298 		}
299 	}
300 	mutex_spin_exit(&sc->sc_mutex);
301 
302 	while ((reg = arc_pop(sc)) != 0xffffffff) {
303 		cmd = (struct arc_io_cmd *)(kva +
304 		    ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
305 		    (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
306 		ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
307 
308 		bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
309 		    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
310 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
311 
312 		arc_scsi_cmd_done(sc, ccb, reg);
313 	}
314 
315 
316 	return 1;
317 }
318 
319 void
320 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
321 {
322 	struct scsipi_periph		*periph;
323 	struct scsipi_xfer		*xs;
324 	struct scsipi_adapter		*adapt = chan->chan_adapter;
325 	struct arc_softc		*sc = device_private(adapt->adapt_dev);
326 	struct arc_ccb			*ccb;
327 	struct arc_msg_scsicmd		*cmd;
328 	uint32_t			reg;
329 	uint8_t				target;
330 
331 	switch (req) {
332 	case ADAPTER_REQ_GROW_RESOURCES:
333 		/* Not supported. */
334 		return;
335 	case ADAPTER_REQ_SET_XFER_MODE:
336 		/* Not supported. */
337 		return;
338 	case ADAPTER_REQ_RUN_XFER:
339 		break;
340 	}
341 
342 	mutex_spin_enter(&sc->sc_mutex);
343 
344 	xs = arg;
345 	periph = xs->xs_periph;
346 	target = periph->periph_target;
347 
348 	if (xs->cmdlen > ARC_MSG_CDBLEN) {
349 		memset(&xs->sense, 0, sizeof(xs->sense));
350 		xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
351 		xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
352 		xs->sense.scsi_sense.asc = 0x20;
353 		xs->error = XS_SENSE;
354 		xs->status = SCSI_CHECK;
355 		mutex_spin_exit(&sc->sc_mutex);
356 		scsipi_done(xs);
357 		return;
358 	}
359 
360 	ccb = arc_get_ccb(sc);
361 	if (ccb == NULL) {
362 		xs->error = XS_RESOURCE_SHORTAGE;
363 		mutex_spin_exit(&sc->sc_mutex);
364 		scsipi_done(xs);
365 		return;
366 	}
367 
368 	ccb->ccb_xs = xs;
369 
370 	if (arc_load_xs(ccb) != 0) {
371 		xs->error = XS_DRIVER_STUFFUP;
372 		arc_put_ccb(sc, ccb);
373 		mutex_spin_exit(&sc->sc_mutex);
374 		scsipi_done(xs);
375 		return;
376 	}
377 
378 	cmd = &ccb->ccb_cmd->cmd;
379 	reg = ccb->ccb_cmd_post;
380 
381 	/* bus is always 0 */
382 	cmd->target = target;
383 	cmd->lun = periph->periph_lun;
384 	cmd->function = 1; /* XXX magic number */
385 
386 	cmd->cdb_len = xs->cmdlen;
387 	cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
388 	if (xs->xs_control & XS_CTL_DATA_OUT)
389 		cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
390 	if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
391 		cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
392 		reg |= ARC_REG_POST_QUEUE_BIGFRAME;
393 	}
394 
395 	cmd->context = htole32(ccb->ccb_id);
396 	cmd->data_len = htole32(xs->datalen);
397 
398 	memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
399 
400 	/* we've built the command, let's put it on the hw */
401 	bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
402 	    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
403 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
404 
405 	arc_push(sc, reg);
406 	if (xs->xs_control & XS_CTL_POLL) {
407 		if (arc_complete(sc, ccb, xs->timeout) != 0) {
408 			xs->error = XS_DRIVER_STUFFUP;
409 			mutex_spin_exit(&sc->sc_mutex);
410 			scsipi_done(xs);
411 			return;
412 		}
413 	}
414 
415 	mutex_spin_exit(&sc->sc_mutex);
416 }
417 
418 int
419 arc_load_xs(struct arc_ccb *ccb)
420 {
421 	struct arc_softc		*sc = ccb->ccb_sc;
422 	struct scsipi_xfer		*xs = ccb->ccb_xs;
423 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
424 	struct arc_sge			*sgl = ccb->ccb_cmd->sgl, *sge;
425 	uint64_t			addr;
426 	int				i, error;
427 
428 	if (xs->datalen == 0)
429 		return 0;
430 
431 	error = bus_dmamap_load(sc->sc_dmat, dmap,
432 	    xs->data, xs->datalen, NULL,
433 	    (xs->xs_control & XS_CTL_NOSLEEP) ?
434 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
435 	if (error != 0) {
436 		aprint_error("%s: error %d loading dmamap\n",
437 		    device_xname(sc->sc_dev), error);
438 		return 1;
439 	}
440 
441 	for (i = 0; i < dmap->dm_nsegs; i++) {
442 		sge = &sgl[i];
443 
444 		sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
445 		addr = dmap->dm_segs[i].ds_addr;
446 		sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
447 		sge->sg_lo_addr = htole32((uint32_t)addr);
448 	}
449 
450 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
451 	    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
452 	    BUS_DMASYNC_PREWRITE);
453 
454 	return 0;
455 }
456 
457 void
458 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
459 {
460 	struct scsipi_xfer		*xs = ccb->ccb_xs;
461 	struct arc_msg_scsicmd		*cmd;
462 
463 	if (xs->datalen != 0) {
464 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
465 		    ccb->ccb_dmamap->dm_mapsize,
466 		    (xs->xs_control & XS_CTL_DATA_IN) ?
467 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
468 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
469 	}
470 
471 	/* timeout_del */
472 	xs->status |= XS_STS_DONE;
473 
474 	if (reg & ARC_REG_REPLY_QUEUE_ERR) {
475 		cmd = &ccb->ccb_cmd->cmd;
476 
477 		switch (cmd->status) {
478 		case ARC_MSG_STATUS_SELTIMEOUT:
479 		case ARC_MSG_STATUS_ABORTED:
480 		case ARC_MSG_STATUS_INIT_FAIL:
481 			xs->status = SCSI_OK;
482 			xs->error = XS_SELTIMEOUT;
483 			break;
484 
485 		case SCSI_CHECK:
486 			memset(&xs->sense, 0, sizeof(xs->sense));
487 			memcpy(&xs->sense, cmd->sense_data,
488 			    min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
489 			xs->sense.scsi_sense.response_code =
490 			    SSD_RCODE_VALID | 0x70;
491 			xs->status = SCSI_CHECK;
492 			xs->error = XS_SENSE;
493 			xs->resid = 0;
494 			break;
495 
496 		default:
497 			/* unknown device status */
498 			xs->error = XS_BUSY; /* try again later? */
499 			xs->status = SCSI_BUSY;
500 			break;
501 		}
502 	} else {
503 		xs->status = SCSI_OK;
504 		xs->error = XS_NOERROR;
505 		xs->resid = 0;
506 	}
507 
508 	arc_put_ccb(sc, ccb);
509 	scsipi_done(xs);
510 }
511 
512 int
513 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
514 {
515 	struct arc_ccb			*ccb = NULL;
516 	char				*kva = ARC_DMA_KVA(sc->sc_requests);
517 	struct arc_io_cmd		*cmd;
518 	uint32_t			reg;
519 
520 	do {
521 		reg = arc_pop(sc);
522 		if (reg == 0xffffffff) {
523 			if (timeout-- == 0)
524 				return 1;
525 
526 			delay(1000);
527 			continue;
528 		}
529 
530 		cmd = (struct arc_io_cmd *)(kva +
531 		    ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
532 		    ARC_DMA_DVA(sc->sc_requests)));
533 		ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
534 
535 		bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
536 		    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
537 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
538 
539 		arc_scsi_cmd_done(sc, ccb, reg);
540 	} while (nccb != ccb);
541 
542 	return 0;
543 }
544 
545 int
546 arc_map_pci_resources(device_t self, struct pci_attach_args *pa)
547 {
548 	struct arc_softc		*sc = device_private(self);
549 	pcireg_t			memtype;
550 	pci_intr_handle_t		ih;
551 
552 	sc->sc_pc = pa->pa_pc;
553 	sc->sc_tag = pa->pa_tag;
554 	sc->sc_dmat = pa->pa_dmat;
555 
556 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
557 	if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
558 	    &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
559 		aprint_error(": unable to map system interface register\n");
560 		return 1;
561 	}
562 
563 	if (pci_intr_map(pa, &ih) != 0) {
564 		aprint_error(": unable to map interrupt\n");
565 		goto unmap;
566 	}
567 
568 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
569 	    arc_intr, sc);
570 	if (sc->sc_ih == NULL) {
571 		aprint_error(": unable to map interrupt [2]\n");
572 		goto unmap;
573 	}
574 
575 	aprint_normal("\n");
576 	aprint_normal_dev(self, "interrupting at %s\n",
577 	    pci_intr_string(pa->pa_pc, ih));
578 
579 	return 0;
580 
581 unmap:
582 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
583 	sc->sc_ios = 0;
584 	return 1;
585 }
586 
587 void
588 arc_unmap_pci_resources(struct arc_softc *sc)
589 {
590 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
591 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
592 	sc->sc_ios = 0;
593 }
594 
595 int
596 arc_query_firmware(device_t self)
597 {
598 	struct arc_softc 		*sc = device_private(self);
599 	struct arc_msg_firmware_info	fwinfo;
600 	char				string[81]; /* sizeof(vendor)*2+1 */
601 
602 	if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
603 	    ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
604 		aprint_debug_dev(self, "timeout waiting for firmware ok\n");
605 		return 1;
606 	}
607 
608 	if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
609 		aprint_debug_dev(self, "timeout waiting for get config\n");
610 		return 1;
611 	}
612 
613 	if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
614 		aprint_debug_dev(self, "timeout waiting to start bg rebuild\n");
615 		return 1;
616 	}
617 
618 	arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
619 
620 	DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
621 	    device_xname(self), htole32(fwinfo.signature));
622 
623 	if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
624 		aprint_error_dev(self, "invalid firmware info from iop\n");
625 		return 1;
626 	}
627 
628 	DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
629 	    device_xname(self), htole32(fwinfo.request_len));
630 	DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
631 	    device_xname(self), htole32(fwinfo.queue_len));
632 	DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
633 	    device_xname(self), htole32(fwinfo.sdram_size));
634 	DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
635 	    device_xname(self), htole32(fwinfo.sata_ports));
636 
637 	scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
638 	DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
639 	    device_xname(self), string);
640 
641 	scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
642 	aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n",
643 	    string);
644 
645 	scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
646 	DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
647 	    device_xname(self), string);
648 
649 	aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n",
650 	    htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string);
651 
652 	if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
653 		aprint_error_dev(self,
654 		    "unexpected request frame size (%d != %d)\n",
655 		    htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
656 		return 1;
657 	}
658 
659 	sc->sc_req_count = htole32(fwinfo.queue_len);
660 
661 	return 0;
662 }
663 
664 #if NBIO > 0
665 static int
666 arc_bioctl(device_t self, u_long cmd, void *addr)
667 {
668 	struct arc_softc *sc = device_private(self);
669 	int error = 0;
670 
671 	switch (cmd) {
672 	case BIOCINQ:
673 		error = arc_bio_inq(sc, (struct bioc_inq *)addr);
674 		break;
675 
676 	case BIOCVOL:
677 		error = arc_bio_vol(sc, (struct bioc_vol *)addr);
678 		break;
679 
680 	case BIOCDISK:
681 		error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
682 		break;
683 
684 	case BIOCDISK_NOVOL:
685 		error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
686 		break;
687 
688 	case BIOCALARM:
689 		error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
690 		break;
691 
692 	case BIOCSETSTATE:
693 		error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
694 		break;
695 
696 	case BIOCVOLOPS:
697 		error = arc_bio_volops(sc, (struct bioc_volops *)addr);
698 		break;
699 
700 	default:
701 		error = ENOTTY;
702 		break;
703 	}
704 
705 	return error;
706 }
707 
708 static int
709 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
710 {
711 	switch (*reply) {
712 	case ARC_FW_CMD_RAIDINVAL:
713 		printf("%s: firmware error (invalid raid set)\n",
714 		    device_xname(sc->sc_dev));
715 		return EINVAL;
716 	case ARC_FW_CMD_VOLINVAL:
717 		printf("%s: firmware error (invalid volume set)\n",
718 		    device_xname(sc->sc_dev));
719 		return EINVAL;
720 	case ARC_FW_CMD_NORAID:
721 		printf("%s: firmware error (unexistent raid set)\n",
722 		    device_xname(sc->sc_dev));
723 		return ENODEV;
724 	case ARC_FW_CMD_NOVOLUME:
725 		printf("%s: firmware error (unexistent volume set)\n",
726 		    device_xname(sc->sc_dev));
727 		return ENODEV;
728 	case ARC_FW_CMD_NOPHYSDRV:
729 		printf("%s: firmware error (unexistent physical drive)\n",
730 		    device_xname(sc->sc_dev));
731 		return ENODEV;
732 	case ARC_FW_CMD_PARAM_ERR:
733 		printf("%s: firmware error (parameter error)\n",
734 		    device_xname(sc->sc_dev));
735 		return EINVAL;
736 	case ARC_FW_CMD_UNSUPPORTED:
737 		printf("%s: firmware error (unsupported command)\n",
738 		    device_xname(sc->sc_dev));
739 		return EOPNOTSUPP;
740 	case ARC_FW_CMD_DISKCFG_CHGD:
741 		printf("%s: firmware error (disk configuration changed)\n",
742 		    device_xname(sc->sc_dev));
743 		return EINVAL;
744 	case ARC_FW_CMD_PASS_INVAL:
745 		printf("%s: firmware error (invalid password)\n",
746 		    device_xname(sc->sc_dev));
747 		return EINVAL;
748 	case ARC_FW_CMD_NODISKSPACE:
749 		printf("%s: firmware error (no disk space available)\n",
750 		    device_xname(sc->sc_dev));
751 		return EOPNOTSUPP;
752 	case ARC_FW_CMD_CHECKSUM_ERR:
753 		printf("%s: firmware error (checksum error)\n",
754 		    device_xname(sc->sc_dev));
755 		return EINVAL;
756 	case ARC_FW_CMD_PASS_REQD:
757 		printf("%s: firmware error (password required)\n",
758 		    device_xname(sc->sc_dev));
759 		return EPERM;
760 	case ARC_FW_CMD_OK:
761 	default:
762 		return 0;
763 	}
764 }
765 
766 static int
767 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
768 {
769 	uint8_t	request[2], reply[1];
770 	size_t	len;
771 	int	error = 0;
772 
773 	switch (ba->ba_opcode) {
774 	case BIOC_SAENABLE:
775 	case BIOC_SADISABLE:
776 		request[0] = ARC_FW_SET_ALARM;
777 		request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
778 		    ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
779 		len = sizeof(request);
780 
781 		break;
782 
783 	case BIOC_SASILENCE:
784 		request[0] = ARC_FW_MUTE_ALARM;
785 		len = 1;
786 
787 		break;
788 
789 	case BIOC_GASTATUS:
790 		/* system info is too big/ugly to deal with here */
791 		return arc_bio_alarm_state(sc, ba);
792 
793 	default:
794 		return EOPNOTSUPP;
795 	}
796 
797 	error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
798 	if (error != 0)
799 		return error;
800 
801 	return arc_fw_parse_status_code(sc, &reply[0]);
802 }
803 
804 static int
805 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
806 {
807 	struct arc_fw_sysinfo	*sysinfo;
808 	uint8_t			request;
809 	int			error = 0;
810 
811 	sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
812 
813 	request = ARC_FW_SYSINFO;
814 	error = arc_msgbuf(sc, &request, sizeof(request),
815 	    sysinfo, sizeof(struct arc_fw_sysinfo));
816 
817 	if (error != 0)
818 		goto out;
819 
820 	ba->ba_status = sysinfo->alarm;
821 
822 out:
823 	kmem_free(sysinfo, sizeof(*sysinfo));
824 	return error;
825 }
826 
827 static int
828 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
829 {
830 	/* to create a raid set */
831 	struct req_craidset {
832 		uint8_t		cmdcode;
833 		uint32_t	devmask;
834 		uint8_t 	raidset_name[16];
835 	} __packed;
836 
837 	/* to create a volume set */
838 	struct req_cvolset {
839 		uint8_t 	cmdcode;
840 		uint8_t 	raidset;
841 		uint8_t 	volset_name[16];
842 		uint64_t	capacity;
843 		uint8_t 	raidlevel;
844 		uint8_t 	stripe;
845 		uint8_t 	scsi_chan;
846 		uint8_t 	scsi_target;
847 		uint8_t 	scsi_lun;
848 		uint8_t 	tagqueue;
849 		uint8_t 	cache;
850 		uint8_t 	speed;
851 		uint8_t 	quick_init;
852 	} __packed;
853 
854 	struct scsibus_softc	*scsibus_sc = NULL;
855 	struct req_craidset	req_craidset;
856 	struct req_cvolset 	req_cvolset;
857 	uint8_t 		request[2];
858 	uint8_t 		reply[1];
859 	int 			error = 0;
860 
861 	switch (bc->bc_opcode) {
862 	case BIOC_VCREATE_VOLUME:
863 	    {
864 		/*
865 		 * Zero out the structs so that we use some defaults
866 		 * in raid and volume sets.
867 		 */
868 		memset(&req_craidset, 0, sizeof(req_craidset));
869 		memset(&req_cvolset, 0, sizeof(req_cvolset));
870 
871 		/*
872 		 * Firstly we have to create the raid set and
873 		 * use the default name for all them.
874 		 */
875 		req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
876 		req_craidset.devmask = bc->bc_devmask;
877 		error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
878 		    reply, sizeof(reply));
879 		if (error != 0)
880 			return error;
881 
882 		error = arc_fw_parse_status_code(sc, &reply[0]);
883 		if (error) {
884 			printf("%s: create raidset%d failed\n",
885 			    device_xname(sc->sc_dev), bc->bc_volid);
886 			return error;
887 		}
888 
889 		/*
890 		 * At this point the raid set was created, so it's
891 		 * time to create the volume set.
892 		 */
893 		req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
894 		req_cvolset.raidset = bc->bc_volid;
895 		req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
896 
897 		/*
898 		 * Set the RAID level.
899 		 */
900 		switch (bc->bc_level) {
901 		case 0:
902 		case 1:
903 			req_cvolset.raidlevel = bc->bc_level;
904 			break;
905 		case BIOC_SVOL_RAID10:
906 			req_cvolset.raidlevel = 1;
907 			break;
908 		case 3:
909 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
910 			break;
911 		case 5:
912 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
913 			break;
914 		case 6:
915 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
916 			break;
917 		default:
918 			return EOPNOTSUPP;
919 		}
920 
921 		/*
922 		 * Set the stripe size.
923 		 */
924 		switch (bc->bc_stripe) {
925 		case 4:
926 			req_cvolset.stripe = 0;
927 			break;
928 		case 8:
929 			req_cvolset.stripe = 1;
930 			break;
931 		case 16:
932 			req_cvolset.stripe = 2;
933 			break;
934 		case 32:
935 			req_cvolset.stripe = 3;
936 			break;
937 		case 64:
938 			req_cvolset.stripe = 4;
939 			break;
940 		case 128:
941 			req_cvolset.stripe = 5;
942 			break;
943 		default:
944 			req_cvolset.stripe = 4; /* by default 64K */
945 			break;
946 		}
947 
948 		req_cvolset.scsi_chan = bc->bc_channel;
949 		req_cvolset.scsi_target = bc->bc_target;
950 		req_cvolset.scsi_lun = bc->bc_lun;
951 		req_cvolset.tagqueue = 1; /* always enabled */
952 		req_cvolset.cache = 1; /* always enabled */
953 		req_cvolset.speed = 4; /* always max speed */
954 
955 		/* RAID 1 and 1+0 levels need foreground initialization */
956 		if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
957 			req_cvolset.quick_init = 1; /* foreground init */
958 
959 		error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
960 		    reply, sizeof(reply));
961 		if (error != 0)
962 			return error;
963 
964 		error = arc_fw_parse_status_code(sc, &reply[0]);
965 		if (error) {
966 			printf("%s: create volumeset%d failed\n",
967 			    device_xname(sc->sc_dev), bc->bc_volid);
968 			return error;
969 		}
970 
971 		/*
972 		 * If we are creating a RAID 1 or RAID 1+0 volume,
973 		 * the volume will be created immediately but it won't
974 		 * be available until the initialization is done... so
975 		 * don't bother attaching the sd(4) device.
976 		 */
977 		if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
978 			break;
979 
980 		/*
981 		 * Do a rescan on the bus to attach the device associated
982 		 * with the new volume.
983 		 */
984 		scsibus_sc = device_private(sc->sc_scsibus_dv);
985 		(void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
986 
987 		break;
988 	    }
989 	case BIOC_VREMOVE_VOLUME:
990 	    {
991 		/*
992 		 * Remove the volume set specified in bc_volid.
993 		 */
994 		request[0] = ARC_FW_DELETE_VOLUME;
995 		request[1] = bc->bc_volid;
996 		error = arc_msgbuf(sc, request, sizeof(request),
997 		    reply, sizeof(reply));
998 		if (error != 0)
999 			return error;
1000 
1001 		error = arc_fw_parse_status_code(sc, &reply[0]);
1002 		if (error) {
1003 			printf("%s: delete volumeset%d failed\n",
1004 			    device_xname(sc->sc_dev), bc->bc_volid);
1005 			return error;
1006 		}
1007 
1008 		/*
1009 		 * Detach the sd(4) device associated with the volume,
1010 		 * but if there's an error don't make it a priority.
1011 		 */
1012 		error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1013 					     bc->bc_lun, 0);
1014 		if (error)
1015 			printf("%s: couldn't detach sd device for volume %d "
1016 			    "at %u:%u.%u (error=%d)\n",
1017 			    device_xname(sc->sc_dev), bc->bc_volid,
1018 			    bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1019 
1020 		/*
1021 		 * and remove the raid set specified in bc_volid,
1022 		 * we only care about volumes.
1023 		 */
1024 		request[0] = ARC_FW_DELETE_RAIDSET;
1025 		request[1] = bc->bc_volid;
1026 		error = arc_msgbuf(sc, request, sizeof(request),
1027 		    reply, sizeof(reply));
1028 		if (error != 0)
1029 			return error;
1030 
1031 		error = arc_fw_parse_status_code(sc, &reply[0]);
1032 		if (error) {
1033 			printf("%s: delete raidset%d failed\n",
1034 			    device_xname(sc->sc_dev), bc->bc_volid);
1035 			return error;
1036 		}
1037 
1038 		break;
1039 	    }
1040 	default:
1041 		return EOPNOTSUPP;
1042 	}
1043 
1044 	return error;
1045 }
1046 
1047 static int
1048 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1049 {
1050 	/* for a hotspare disk */
1051 	struct request_hs {
1052 		uint8_t		cmdcode;
1053 		uint32_t	devmask;
1054 	} __packed;
1055 
1056 	/* for a pass-through disk */
1057 	struct request_pt {
1058 		uint8_t 	cmdcode;
1059 		uint8_t		devid;
1060 		uint8_t		scsi_chan;
1061 		uint8_t 	scsi_id;
1062 		uint8_t 	scsi_lun;
1063 		uint8_t 	tagged_queue;
1064 		uint8_t 	cache_mode;
1065 		uint8_t 	max_speed;
1066 	} __packed;
1067 
1068 	struct scsibus_softc	*scsibus_sc = NULL;
1069 	struct request_hs	req_hs; /* to add/remove hotspare */
1070 	struct request_pt	req_pt;	/* to add a pass-through */
1071 	uint8_t			req_gen[2];
1072 	uint8_t			reply[1];
1073 	int			error = 0;
1074 
1075 	switch (bs->bs_status) {
1076 	case BIOC_SSHOTSPARE:
1077 	    {
1078 		req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1079 		req_hs.devmask = (1 << bs->bs_target);
1080 		goto hotspare;
1081 	    }
1082 	case BIOC_SSDELHOTSPARE:
1083 	    {
1084 		req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1085 		req_hs.devmask = (1 << bs->bs_target);
1086 		goto hotspare;
1087 	    }
1088 	case BIOC_SSPASSTHRU:
1089 	    {
1090 		req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1091 		req_pt.devid = bs->bs_other_id; /* this wants device# */
1092 		req_pt.scsi_chan = bs->bs_channel;
1093 		req_pt.scsi_id = bs->bs_target;
1094 		req_pt.scsi_lun = bs->bs_lun;
1095 		req_pt.tagged_queue = 1; /* always enabled */
1096 		req_pt.cache_mode = 1; /* always enabled */
1097 		req_pt.max_speed = 4; /* always max speed */
1098 
1099 		error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1100 		    reply, sizeof(reply));
1101 		if (error != 0)
1102 			return error;
1103 
1104 		/*
1105 		 * Do a rescan on the bus to attach the new device
1106 		 * associated with the pass-through disk.
1107 		 */
1108 		scsibus_sc = device_private(sc->sc_scsibus_dv);
1109 		(void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1110 
1111 		goto out;
1112 	    }
1113 	case BIOC_SSDELPASSTHRU:
1114 	    {
1115 		req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1116 		req_gen[1] = bs->bs_target;
1117 		error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1118 		    reply, sizeof(reply));
1119 		if (error != 0)
1120 			return error;
1121 
1122 		/*
1123 		 * Detach the sd device associated with this pass-through disk.
1124 		 */
1125 		error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1126 					     bs->bs_lun, 0);
1127 		if (error)
1128 			printf("%s: couldn't detach sd device for the "
1129 			    "pass-through disk at %u:%u.%u (error=%d)\n",
1130 			    device_xname(sc->sc_dev),
1131 			    bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1132 
1133 		goto out;
1134 	    }
1135 	case BIOC_SSCHECKSTART_VOL:
1136 	    {
1137 		req_gen[0] = ARC_FW_START_CHECKVOL;
1138 		req_gen[1] = bs->bs_volid;
1139 		error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1140 		    reply, sizeof(reply));
1141 		if (error != 0)
1142 			return error;
1143 
1144 		goto out;
1145 	    }
1146 	case BIOC_SSCHECKSTOP_VOL:
1147 	    {
1148 		uint8_t req = ARC_FW_STOP_CHECKVOL;
1149 		error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1150 		if (error != 0)
1151 			return error;
1152 
1153 		goto out;
1154 	    }
1155 	default:
1156 		return EOPNOTSUPP;
1157 	}
1158 
1159 hotspare:
1160 	error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1161 	    reply, sizeof(reply));
1162 	if (error != 0)
1163 		return error;
1164 
1165 out:
1166 	return arc_fw_parse_status_code(sc, &reply[0]);
1167 }
1168 
1169 static int
1170 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1171 {
1172 	uint8_t			request[2];
1173 	struct arc_fw_sysinfo	*sysinfo = NULL;
1174 	struct arc_fw_raidinfo	*raidinfo;
1175 	int			nvols = 0, i;
1176 	int			error = 0;
1177 
1178 	raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1179 
1180 	if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1181 		sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1182 
1183 		request[0] = ARC_FW_SYSINFO;
1184 		error = arc_msgbuf(sc, request, 1, sysinfo,
1185 		    sizeof(struct arc_fw_sysinfo));
1186 		if (error != 0)
1187 			goto out;
1188 
1189 		sc->sc_maxraidset = sysinfo->max_raid_set;
1190 		sc->sc_maxvolset = sysinfo->max_volume_set;
1191 		sc->sc_cchans = sysinfo->ide_channels;
1192 	}
1193 
1194 	request[0] = ARC_FW_RAIDINFO;
1195 	for (i = 0; i < sc->sc_maxraidset; i++) {
1196 		request[1] = i;
1197 		error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1198 		    sizeof(struct arc_fw_raidinfo));
1199 		if (error != 0)
1200 			goto out;
1201 
1202 		nvols += raidinfo->volumes;
1203 	}
1204 
1205 	strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1206 	bi->bi_novol = nvols;
1207 	bi->bi_nodisk = sc->sc_cchans;
1208 
1209 out:
1210 	if (sysinfo)
1211 		kmem_free(sysinfo, sizeof(*sysinfo));
1212 	kmem_free(raidinfo, sizeof(*raidinfo));
1213 	return error;
1214 }
1215 
1216 static int
1217 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1218 {
1219 	uint8_t			request[2];
1220 	int			error = 0;
1221 	int			nvols = 0, i;
1222 
1223 	request[0] = ARC_FW_VOLINFO;
1224 	for (i = 0; i < sc->sc_maxvolset; i++) {
1225 		request[1] = i;
1226 		error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1227 		    sizeof(struct arc_fw_volinfo));
1228 		if (error != 0)
1229 			goto out;
1230 
1231 		if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1232 			continue;
1233 
1234 		if (nvols == vol)
1235 			break;
1236 
1237 		nvols++;
1238 	}
1239 
1240 	if (nvols != vol ||
1241 	    (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1242 		error = ENODEV;
1243 		goto out;
1244 	}
1245 
1246 out:
1247 	return error;
1248 }
1249 
1250 static int
1251 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1252 {
1253 	struct arc_fw_volinfo	*volinfo;
1254 	uint64_t		blocks;
1255 	uint32_t		status;
1256 	int			error = 0;
1257 
1258 	volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1259 
1260 	error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1261 	if (error != 0)
1262 		goto out;
1263 
1264 	bv->bv_percent = -1;
1265 	bv->bv_seconds = 0;
1266 
1267 	status = htole32(volinfo->volume_status);
1268 	if (status == 0x0) {
1269 		if (htole32(volinfo->fail_mask) == 0x0)
1270 			bv->bv_status = BIOC_SVONLINE;
1271 		else
1272 			bv->bv_status = BIOC_SVDEGRADED;
1273 	} else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1274 		bv->bv_status = BIOC_SVDEGRADED;
1275 	} else if (status & ARC_FW_VOL_STATUS_FAILED) {
1276 		bv->bv_status = BIOC_SVOFFLINE;
1277 	} else if (status & ARC_FW_VOL_STATUS_INITTING) {
1278 		bv->bv_status = BIOC_SVBUILDING;
1279 		bv->bv_percent = htole32(volinfo->progress);
1280 	} else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1281 		bv->bv_status = BIOC_SVREBUILD;
1282 		bv->bv_percent = htole32(volinfo->progress);
1283 	} else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1284 		bv->bv_status = BIOC_SVMIGRATING;
1285 		bv->bv_percent = htole32(volinfo->progress);
1286 	} else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1287 		bv->bv_status = BIOC_SVCHECKING;
1288 		bv->bv_percent = htole32(volinfo->progress);
1289 	} else if (status & ARC_FW_VOL_STATUS_NEED_INIT) {
1290 		bv->bv_status = BIOC_SVOFFLINE;
1291 	} else {
1292 		printf("%s: volume %d status 0x%x\n",
1293 		    device_xname(sc->sc_dev), bv->bv_volid, status);
1294 	}
1295 
1296 	blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1297 	blocks += (uint64_t)htole32(volinfo->capacity);
1298 	bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1299 
1300 	switch (volinfo->raid_level) {
1301 	case ARC_FW_VOL_RAIDLEVEL_0:
1302 		bv->bv_level = 0;
1303 		break;
1304 	case ARC_FW_VOL_RAIDLEVEL_1:
1305 		if (volinfo->member_disks > 2)
1306 			bv->bv_level = BIOC_SVOL_RAID10;
1307 		else
1308 			bv->bv_level = 1;
1309 		break;
1310 	case ARC_FW_VOL_RAIDLEVEL_3:
1311 		bv->bv_level = 3;
1312 		break;
1313 	case ARC_FW_VOL_RAIDLEVEL_5:
1314 		bv->bv_level = 5;
1315 		break;
1316 	case ARC_FW_VOL_RAIDLEVEL_6:
1317 		bv->bv_level = 6;
1318 		break;
1319 	case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1320 		bv->bv_level = BIOC_SVOL_PASSTHRU;
1321 		break;
1322 	default:
1323 		bv->bv_level = -1;
1324 		break;
1325 	}
1326 
1327 	bv->bv_nodisk = volinfo->member_disks;
1328 	bv->bv_stripe_size = volinfo->stripe_size / 2;
1329 	snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1330 	scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1331 	    sizeof(volinfo->set_name));
1332 
1333 out:
1334 	kmem_free(volinfo, sizeof(*volinfo));
1335 	return error;
1336 }
1337 
1338 static int
1339 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1340 {
1341 	struct arc_fw_diskinfo	*diskinfo;
1342 	uint8_t			request[2];
1343 	int			error = 0;
1344 
1345 	diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1346 
1347 	if (bd->bd_diskid >= sc->sc_cchans) {
1348 		error = ENODEV;
1349 		goto out;
1350 	}
1351 
1352 	request[0] = ARC_FW_DISKINFO;
1353 	request[1] = bd->bd_diskid;
1354 	error = arc_msgbuf(sc, request, sizeof(request),
1355 	    diskinfo, sizeof(struct arc_fw_diskinfo));
1356 	if (error != 0)
1357 		goto out;
1358 
1359 	/* skip disks with no capacity */
1360 	if (htole32(diskinfo->capacity) == 0 &&
1361 	    htole32(diskinfo->capacity2) == 0)
1362 		goto out;
1363 
1364 	bd->bd_disknovol = true;
1365 	arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1366 
1367 out:
1368 	kmem_free(diskinfo, sizeof(*diskinfo));
1369 	return error;
1370 }
1371 
1372 static void
1373 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1374 		     struct arc_fw_diskinfo *diskinfo, int diskid)
1375 {
1376 	uint64_t		blocks;
1377 	char			model[81];
1378 	char			serial[41];
1379 	char			rev[17];
1380 
1381 	/* Ignore bit zero for now, we don't know what it means */
1382 	diskinfo->device_state &= ~0x1;
1383 
1384 	switch (diskinfo->device_state) {
1385 	case ARC_FW_DISK_FAILED:
1386 		bd->bd_status = BIOC_SDFAILED;
1387 		break;
1388 	case ARC_FW_DISK_PASSTHRU:
1389 		bd->bd_status = BIOC_SDPASSTHRU;
1390 		break;
1391 	case ARC_FW_DISK_NORMAL:
1392 		bd->bd_status = BIOC_SDONLINE;
1393 		break;
1394 	case ARC_FW_DISK_HOTSPARE:
1395 		bd->bd_status = BIOC_SDHOTSPARE;
1396 		break;
1397 	case ARC_FW_DISK_UNUSED:
1398 		bd->bd_status = BIOC_SDUNUSED;
1399 		break;
1400 	case 0:
1401 		/* disk has been disconnected */
1402 		bd->bd_status = BIOC_SDOFFLINE;
1403 		bd->bd_channel = 1;
1404 		bd->bd_target = 0;
1405 		bd->bd_lun = 0;
1406 		strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1407 		break;
1408 	default:
1409 		printf("%s: unknown disk device_state: 0x%x\n", __func__,
1410 		    diskinfo->device_state);
1411 		bd->bd_status = BIOC_SDINVALID;
1412 		return;
1413 	}
1414 
1415 	blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1416 	blocks += (uint64_t)htole32(diskinfo->capacity);
1417 	bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1418 
1419 	scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1420 	scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1421 	scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1422 	    sizeof(diskinfo->firmware_rev));
1423 
1424 	snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1425 	strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1426 
1427 #if 0
1428 	bd->bd_channel = diskinfo->scsi_attr.channel;
1429 	bd->bd_target = diskinfo->scsi_attr.target;
1430 	bd->bd_lun = diskinfo->scsi_attr.lun;
1431 #endif
1432 
1433 	/*
1434 	 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1435 	 * the diskid.
1436 	 */
1437 	bd->bd_channel = 0;
1438 	bd->bd_target = diskid;
1439 	bd->bd_lun = 0;
1440 }
1441 
1442 static int
1443 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1444 {
1445 	struct arc_fw_raidinfo	*raidinfo;
1446 	struct arc_fw_volinfo	*volinfo;
1447 	struct arc_fw_diskinfo	*diskinfo;
1448 	uint8_t			request[2];
1449 	int			error = 0;
1450 
1451 	volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1452 	raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1453 	diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1454 
1455 	error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1456 	if (error != 0)
1457 		goto out;
1458 
1459 	request[0] = ARC_FW_RAIDINFO;
1460 	request[1] = volinfo->raid_set_number;
1461 
1462 	error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1463 	    sizeof(struct arc_fw_raidinfo));
1464 	if (error != 0)
1465 		goto out;
1466 
1467 	if (bd->bd_diskid >= sc->sc_cchans ||
1468 	    bd->bd_diskid >= raidinfo->member_devices) {
1469 		error = ENODEV;
1470 		goto out;
1471 	}
1472 
1473 	if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1474 		/*
1475 		 * The disk has been disconnected, mark it offline
1476 		 * and put it on another bus.
1477 		 */
1478 		bd->bd_channel = 1;
1479 		bd->bd_target = 0;
1480 		bd->bd_lun = 0;
1481 		bd->bd_status = BIOC_SDOFFLINE;
1482 		strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1483 		goto out;
1484 	}
1485 
1486 	request[0] = ARC_FW_DISKINFO;
1487 	request[1] = raidinfo->device_array[bd->bd_diskid];
1488 	error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1489 	    sizeof(struct arc_fw_diskinfo));
1490 	if (error != 0)
1491 		goto out;
1492 
1493 	/* now fill our bio disk with data from the firmware */
1494 	arc_bio_disk_filldata(sc, bd, diskinfo,
1495 	    raidinfo->device_array[bd->bd_diskid]);
1496 
1497 out:
1498 	kmem_free(raidinfo, sizeof(*raidinfo));
1499 	kmem_free(volinfo, sizeof(*volinfo));
1500 	kmem_free(diskinfo, sizeof(*diskinfo));
1501 	return error;
1502 }
1503 #endif /* NBIO > 0 */
1504 
1505 uint8_t
1506 arc_msg_cksum(void *cmd, uint16_t len)
1507 {
1508 	uint8_t	*buf = cmd;
1509 	uint8_t	cksum;
1510 	int	i;
1511 
1512 	cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1513 	for (i = 0; i < len; i++)
1514 		cksum += buf[i];
1515 
1516 	return cksum;
1517 }
1518 
1519 
1520 int
1521 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1522 	   size_t rbuflen)
1523 {
1524 	uint8_t			rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1525 	uint8_t			*wbuf, *rbuf;
1526 	int			wlen, wdone = 0, rlen, rdone = 0;
1527 	struct arc_fw_bufhdr	*bufhdr;
1528 	uint32_t		reg, rwlen;
1529 	int			error = 0;
1530 #ifdef ARC_DEBUG
1531 	int			i;
1532 #endif
1533 
1534 	wbuf = rbuf = NULL;
1535 
1536 	DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1537 	    device_xname(sc->sc_dev), wbuflen, rbuflen);
1538 
1539 	wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1540 	wbuf = kmem_alloc(wlen, KM_SLEEP);
1541 
1542 	rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1543 	rbuf = kmem_alloc(rlen, KM_SLEEP);
1544 
1545 	DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1546 	    device_xname(sc->sc_dev), wlen, rlen);
1547 
1548 	bufhdr = (struct arc_fw_bufhdr *)wbuf;
1549 	bufhdr->hdr = arc_fw_hdr;
1550 	bufhdr->len = htole16(wbuflen);
1551 	memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1552 	wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1553 
1554 	arc_lock(sc);
1555 	if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1556 		error = EBUSY;
1557 		goto out;
1558 	}
1559 
1560 	reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1561 
1562 	do {
1563 		if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1564 			memset(rwbuf, 0, sizeof(rwbuf));
1565 			rwlen = (wlen - wdone) % sizeof(rwbuf);
1566 			memcpy(rwbuf, &wbuf[wdone], rwlen);
1567 
1568 #ifdef ARC_DEBUG
1569 			if (arcdebug & ARC_D_DB) {
1570 				printf("%s: write %d:",
1571 				    device_xname(sc->sc_dev), rwlen);
1572 				for (i = 0; i < rwlen; i++)
1573 					printf(" 0x%02x", rwbuf[i]);
1574 				printf("\n");
1575 			}
1576 #endif
1577 
1578 			/* copy the chunk to the hw */
1579 			arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1580 			arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1581 			    sizeof(rwbuf));
1582 
1583 			/* say we have a buffer for the hw */
1584 			arc_write(sc, ARC_REG_INB_DOORBELL,
1585 			    ARC_REG_INB_DOORBELL_WRITE_OK);
1586 
1587 			wdone += rwlen;
1588 		}
1589 
1590 		while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1591 			arc_wait(sc);
1592 
1593 		arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1594 
1595 		DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1596 		    device_xname(sc->sc_dev), reg);
1597 
1598 		if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1599 			rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1600 			if (rwlen > sizeof(rwbuf)) {
1601 				DNPRINTF(ARC_D_DB, "%s:  rwlen too big\n",
1602 				    device_xname(sc->sc_dev));
1603 				error = EIO;
1604 				goto out;
1605 			}
1606 
1607 			arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1608 			    sizeof(rwbuf));
1609 
1610 			arc_write(sc, ARC_REG_INB_DOORBELL,
1611 			    ARC_REG_INB_DOORBELL_READ_OK);
1612 
1613 #ifdef ARC_DEBUG
1614 			printf("%s:  len: %d+%d=%d/%d\n",
1615 			    device_xname(sc->sc_dev),
1616 			    rwlen, rdone, rwlen + rdone, rlen);
1617 			if (arcdebug & ARC_D_DB) {
1618 				printf("%s: read:",
1619 				    device_xname(sc->sc_dev));
1620 				for (i = 0; i < rwlen; i++)
1621 					printf(" 0x%02x", rwbuf[i]);
1622 				printf("\n");
1623 			}
1624 #endif
1625 
1626 			if ((rdone + rwlen) > rlen) {
1627 				DNPRINTF(ARC_D_DB, "%s:  rwbuf too big\n",
1628 				    device_xname(sc->sc_dev));
1629 				error = EIO;
1630 				goto out;
1631 			}
1632 
1633 			memcpy(&rbuf[rdone], rwbuf, rwlen);
1634 			rdone += rwlen;
1635 		}
1636 	} while (rdone != rlen);
1637 
1638 	bufhdr = (struct arc_fw_bufhdr *)rbuf;
1639 	if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1640 	    bufhdr->len != htole16(rbuflen)) {
1641 		DNPRINTF(ARC_D_DB, "%s:  rbuf hdr is wrong\n",
1642 		    device_xname(sc->sc_dev));
1643 		error = EIO;
1644 		goto out;
1645 	}
1646 
1647 	memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1648 
1649 	if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1650 		DNPRINTF(ARC_D_DB, "%s:  invalid cksum\n",
1651 		    device_xname(sc->sc_dev));
1652 		error = EIO;
1653 		goto out;
1654 	}
1655 
1656 out:
1657 	arc_unlock(sc);
1658 	kmem_free(wbuf, wlen);
1659 	kmem_free(rbuf, rlen);
1660 
1661 	return error;
1662 }
1663 
1664 void
1665 arc_lock(struct arc_softc *sc)
1666 {
1667 	rw_enter(&sc->sc_rwlock, RW_WRITER);
1668 	mutex_spin_enter(&sc->sc_mutex);
1669 	arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1670 	sc->sc_talking = 1;
1671 }
1672 
1673 void
1674 arc_unlock(struct arc_softc *sc)
1675 {
1676 	KASSERT(mutex_owned(&sc->sc_mutex));
1677 
1678 	arc_write(sc, ARC_REG_INTRMASK,
1679 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1680 	sc->sc_talking = 0;
1681 	mutex_spin_exit(&sc->sc_mutex);
1682 	rw_exit(&sc->sc_rwlock);
1683 }
1684 
1685 void
1686 arc_wait(struct arc_softc *sc)
1687 {
1688 	KASSERT(mutex_owned(&sc->sc_mutex));
1689 
1690 	arc_write(sc, ARC_REG_INTRMASK,
1691 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1692 	if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1693 		arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1694 }
1695 
1696 #if NBIO > 0
1697 static void
1698 arc_create_sensors(void *arg)
1699 {
1700 	struct arc_softc	*sc = arg;
1701 	struct bioc_inq		bi;
1702 	struct bioc_vol		bv;
1703 	int			i, j;
1704 	size_t			slen, count = 0;
1705 
1706 	memset(&bi, 0, sizeof(bi));
1707 	if (arc_bio_inq(sc, &bi) != 0) {
1708 		aprint_error("%s: unable to query firmware for sensor info\n",
1709 		    device_xname(sc->sc_dev));
1710 		kthread_exit(0);
1711 	}
1712 
1713 	/* There's no point to continue if there are no volumes */
1714 	if (!bi.bi_novol)
1715 		kthread_exit(0);
1716 
1717 	for (i = 0; i < bi.bi_novol; i++) {
1718 		memset(&bv, 0, sizeof(bv));
1719 		bv.bv_volid = i;
1720 		if (arc_bio_vol(sc, &bv) != 0)
1721 			kthread_exit(0);
1722 
1723 		/* Skip passthrough volumes */
1724 		if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1725 			continue;
1726 
1727 		/* new volume found */
1728 		sc->sc_nsensors++;
1729 		/* new disk in a volume found */
1730 		sc->sc_nsensors+= bv.bv_nodisk;
1731 	}
1732 
1733 	/* No valid volumes */
1734 	if (!sc->sc_nsensors)
1735 		kthread_exit(0);
1736 
1737 	sc->sc_sme = sysmon_envsys_create();
1738 	slen = sizeof(arc_edata_t) * sc->sc_nsensors;
1739 	sc->sc_arc_sensors = kmem_zalloc(slen, KM_SLEEP);
1740 
1741 	/* Attach sensors for volumes and disks */
1742 	for (i = 0; i < bi.bi_novol; i++) {
1743 		memset(&bv, 0, sizeof(bv));
1744 		bv.bv_volid = i;
1745 		if (arc_bio_vol(sc, &bv) != 0)
1746 			goto bad;
1747 
1748 		sc->sc_arc_sensors[count].arc_sensor.units = ENVSYS_DRIVE;
1749 		sc->sc_arc_sensors[count].arc_sensor.state = ENVSYS_SINVALID;
1750 		sc->sc_arc_sensors[count].arc_sensor.value_cur =
1751 		    ENVSYS_DRIVE_EMPTY;
1752 		sc->sc_arc_sensors[count].arc_sensor.flags =
1753 		    ENVSYS_FMONSTCHANGED;
1754 
1755 		/* Skip passthrough volumes */
1756 		if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1757 			continue;
1758 
1759 		if (bv.bv_level == BIOC_SVOL_RAID10)
1760 			snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1761 			    sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1762 			    "RAID 1+0 volume%d (%s)", i, bv.bv_dev);
1763 		else
1764 			snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1765 			    sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1766 			    "RAID %d volume%d (%s)", bv.bv_level, i,
1767 			    bv.bv_dev);
1768 
1769 		sc->sc_arc_sensors[count].arc_volid = i;
1770 
1771 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
1772 		    &sc->sc_arc_sensors[count].arc_sensor))
1773 			goto bad;
1774 
1775 		count++;
1776 
1777 		/* Attach disk sensors for this volume */
1778 		for (j = 0; j < bv.bv_nodisk; j++) {
1779 			sc->sc_arc_sensors[count].arc_sensor.state =
1780 			    ENVSYS_SINVALID;
1781 			sc->sc_arc_sensors[count].arc_sensor.units =
1782 			    ENVSYS_DRIVE;
1783 			sc->sc_arc_sensors[count].arc_sensor.value_cur =
1784 			    ENVSYS_DRIVE_EMPTY;
1785 			sc->sc_arc_sensors[count].arc_sensor.flags =
1786 			    ENVSYS_FMONSTCHANGED;
1787 
1788 			snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1789 			    sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1790 			    "disk%d volume%d (%s)", j, i, bv.bv_dev);
1791 			sc->sc_arc_sensors[count].arc_volid = i;
1792 			sc->sc_arc_sensors[count].arc_diskid = j + 10;
1793 
1794 			if (sysmon_envsys_sensor_attach(sc->sc_sme,
1795 			    &sc->sc_arc_sensors[count].arc_sensor))
1796 				goto bad;
1797 
1798 			count++;
1799 		}
1800 	}
1801 
1802 	/*
1803 	 * Register our envsys driver with the framework now that the
1804 	 * sensors were all attached.
1805 	 */
1806 	sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1807 	sc->sc_sme->sme_cookie = sc;
1808 	sc->sc_sme->sme_refresh = arc_refresh_sensors;
1809 
1810 	if (sysmon_envsys_register(sc->sc_sme)) {
1811 		aprint_debug("%s: unable to register with sysmon\n",
1812 		    device_xname(sc->sc_dev));
1813 		goto bad;
1814 	}
1815 	kthread_exit(0);
1816 
1817 bad:
1818 	sysmon_envsys_destroy(sc->sc_sme);
1819 	kmem_free(sc->sc_arc_sensors, slen);
1820 
1821 	sc->sc_sme = NULL;
1822 	sc->sc_arc_sensors = NULL;
1823 
1824 	kthread_exit(0);
1825 }
1826 
1827 static void
1828 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1829 {
1830 	struct arc_softc	*sc = sme->sme_cookie;
1831 	struct bioc_vol		bv;
1832 	struct bioc_disk	bd;
1833 	arc_edata_t		*arcdata = (arc_edata_t *)edata;
1834 
1835 	/* sanity check */
1836 	if (edata->units != ENVSYS_DRIVE)
1837 		return;
1838 
1839 	memset(&bv, 0, sizeof(bv));
1840 	bv.bv_volid = arcdata->arc_volid;
1841 
1842 	if (arc_bio_vol(sc, &bv)) {
1843 		edata->value_cur = ENVSYS_DRIVE_EMPTY;
1844 		edata->state = ENVSYS_SINVALID;
1845 		return;
1846 	}
1847 
1848 	/* Current sensor is handling a disk volume member */
1849 	if (arcdata->arc_diskid) {
1850 		memset(&bd, 0, sizeof(bd));
1851 		bd.bd_volid = arcdata->arc_volid;
1852 		bd.bd_diskid = arcdata->arc_diskid - 10;
1853 
1854 		if (arc_bio_disk_volume(sc, &bd)) {
1855 			edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1856 			edata->state = ENVSYS_SCRITICAL;
1857 			return;
1858 		}
1859 
1860 		switch (bd.bd_status) {
1861 		case BIOC_SDONLINE:
1862 			edata->value_cur = ENVSYS_DRIVE_ONLINE;
1863 			edata->state = ENVSYS_SVALID;
1864 			break;
1865 		case BIOC_SDOFFLINE:
1866 			edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1867 			edata->state = ENVSYS_SCRITICAL;
1868 			break;
1869 		default:
1870 			edata->value_cur = ENVSYS_DRIVE_FAIL;
1871 			edata->state = ENVSYS_SCRITICAL;
1872 			break;
1873 		}
1874 
1875 		return;
1876 	}
1877 
1878 	/* Current sensor is handling a volume */
1879 	switch (bv.bv_status) {
1880 	case BIOC_SVOFFLINE:
1881 		edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1882 		edata->state = ENVSYS_SCRITICAL;
1883 		break;
1884 	case BIOC_SVDEGRADED:
1885 		edata->value_cur = ENVSYS_DRIVE_PFAIL;
1886 		edata->state = ENVSYS_SCRITICAL;
1887 		break;
1888 	case BIOC_SVBUILDING:
1889 		edata->value_cur = ENVSYS_DRIVE_BUILD;
1890 		edata->state = ENVSYS_SVALID;
1891 		break;
1892 	case BIOC_SVMIGRATING:
1893 		edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1894 		edata->state = ENVSYS_SVALID;
1895 		break;
1896 	case BIOC_SVCHECKING:
1897 		edata->value_cur = ENVSYS_DRIVE_CHECK;
1898 		edata->state = ENVSYS_SVALID;
1899 		break;
1900 	case BIOC_SVREBUILD:
1901 		edata->value_cur = ENVSYS_DRIVE_REBUILD;
1902 		edata->state = ENVSYS_SCRITICAL;
1903 		break;
1904 	case BIOC_SVSCRUB:
1905 	case BIOC_SVONLINE:
1906 		edata->value_cur = ENVSYS_DRIVE_ONLINE;
1907 		edata->state = ENVSYS_SVALID;
1908 		break;
1909 	case BIOC_SVINVALID:
1910 		/* FALLTHROUGH */
1911 	default:
1912 		edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1913 		edata->state = ENVSYS_SINVALID;
1914 		break;
1915 	}
1916 }
1917 #endif /* NBIO > 0 */
1918 
1919 uint32_t
1920 arc_read(struct arc_softc *sc, bus_size_t r)
1921 {
1922 	uint32_t			v;
1923 
1924 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1925 	    BUS_SPACE_BARRIER_READ);
1926 	v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1927 
1928 	DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1929 	    device_xname(sc->sc_dev), r, v);
1930 
1931 	return v;
1932 }
1933 
1934 void
1935 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1936 {
1937 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1938 	    BUS_SPACE_BARRIER_READ);
1939 	bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1940 	    (uint32_t *)buf, len >> 2);
1941 }
1942 
1943 void
1944 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1945 {
1946 	DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1947 	    device_xname(sc->sc_dev), r, v);
1948 
1949 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1950 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1951 	    BUS_SPACE_BARRIER_WRITE);
1952 }
1953 
1954 void
1955 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1956 {
1957 	bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1958 	    (const uint32_t *)buf, len >> 2);
1959 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1960 	    BUS_SPACE_BARRIER_WRITE);
1961 }
1962 
1963 int
1964 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1965 	    uint32_t target)
1966 {
1967 	int i;
1968 
1969 	DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1970 	    device_xname(sc->sc_dev), r, mask, target);
1971 
1972 	for (i = 0; i < 10000; i++) {
1973 		if ((arc_read(sc, r) & mask) == target)
1974 			return 0;
1975 		delay(1000);
1976 	}
1977 
1978 	return 1;
1979 }
1980 
1981 int
1982 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1983 	    uint32_t target)
1984 {
1985 	int i;
1986 
1987 	DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1988 	    device_xname(sc->sc_dev), r, mask, target);
1989 
1990 	for (i = 0; i < 10000; i++) {
1991 		if ((arc_read(sc, r) & mask) != target)
1992 			return 0;
1993 		delay(1000);
1994 	}
1995 
1996 	return 1;
1997 }
1998 
1999 int
2000 arc_msg0(struct arc_softc *sc, uint32_t m)
2001 {
2002 	/* post message */
2003 	arc_write(sc, ARC_REG_INB_MSG0, m);
2004 	/* wait for the fw to do it */
2005 	if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
2006 	    ARC_REG_INTRSTAT_MSG0) != 0)
2007 		return 1;
2008 
2009 	/* ack it */
2010 	arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
2011 
2012 	return 0;
2013 }
2014 
2015 struct arc_dmamem *
2016 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
2017 {
2018 	struct arc_dmamem		*adm;
2019 	int				nsegs;
2020 
2021 	adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
2022 	if (adm == NULL)
2023 		return NULL;
2024 
2025 	adm->adm_size = size;
2026 
2027 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2028 	    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2029 		goto admfree;
2030 
2031 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
2032 	    1, &nsegs, BUS_DMA_NOWAIT) != 0)
2033 		goto destroy;
2034 
2035 	if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
2036 	    &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
2037 		goto free;
2038 
2039 	if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
2040 	    NULL, BUS_DMA_NOWAIT) != 0)
2041 		goto unmap;
2042 
2043 	memset(adm->adm_kva, 0, size);
2044 
2045 	return adm;
2046 
2047 unmap:
2048 	bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2049 free:
2050 	bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2051 destroy:
2052 	bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2053 admfree:
2054 	kmem_free(adm, sizeof(*adm));
2055 
2056 	return NULL;
2057 }
2058 
2059 void
2060 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2061 {
2062 	bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2063 	bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2064 	bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2065 	bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2066 	kmem_free(adm, sizeof(*adm));
2067 }
2068 
2069 int
2070 arc_alloc_ccbs(device_t self)
2071 {
2072 	struct arc_softc 	*sc = device_private(self);
2073 	struct arc_ccb		*ccb;
2074 	uint8_t			*cmd;
2075 	int			i;
2076 	size_t			ccbslen;
2077 
2078 	TAILQ_INIT(&sc->sc_ccb_free);
2079 
2080 	ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2081 	sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2082 
2083 	sc->sc_requests = arc_dmamem_alloc(sc,
2084 	    ARC_MAX_IOCMDLEN * sc->sc_req_count);
2085 	if (sc->sc_requests == NULL) {
2086 		aprint_error_dev(self, "unable to allocate ccb dmamem\n");
2087 		goto free_ccbs;
2088 	}
2089 	cmd = ARC_DMA_KVA(sc->sc_requests);
2090 
2091 	for (i = 0; i < sc->sc_req_count; i++) {
2092 		ccb = &sc->sc_ccbs[i];
2093 
2094 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2095 		    MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2096 			aprint_error_dev(self,
2097 			    "unable to create dmamap for ccb %d\n", i);
2098 			goto free_maps;
2099 		}
2100 
2101 		ccb->ccb_sc = sc;
2102 		ccb->ccb_id = i;
2103 		ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2104 
2105 		ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2106 		ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2107 		    ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2108 
2109 		arc_put_ccb(sc, ccb);
2110 	}
2111 
2112 	return 0;
2113 
2114 free_maps:
2115 	while ((ccb = arc_get_ccb(sc)) != NULL)
2116 	    bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2117 	arc_dmamem_free(sc, sc->sc_requests);
2118 
2119 free_ccbs:
2120 	kmem_free(sc->sc_ccbs, ccbslen);
2121 
2122 	return 1;
2123 }
2124 
2125 struct arc_ccb *
2126 arc_get_ccb(struct arc_softc *sc)
2127 {
2128 	struct arc_ccb			*ccb;
2129 
2130 	ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2131 	if (ccb != NULL)
2132 		TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2133 
2134 	return ccb;
2135 }
2136 
2137 void
2138 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2139 {
2140 	ccb->ccb_xs = NULL;
2141 	memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2142 	TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2143 }
2144