xref: /netbsd-src/sys/dev/pci/arcmsr.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: arcmsr.c,v 1.22 2008/09/23 22:22:41 christos Exp $ */
2 /*	$OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3 
4 /*
5  * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme@netbsd.org>
6  * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.22 2008/09/23 22:22:41 christos Exp $");
25 
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36 
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49 
50 #include <dev/sysmon/sysmonvar.h>
51 
52 #include <sys/bus.h>
53 
54 #include <uvm/uvm_extern.h>	/* for PAGE_SIZE */
55 
56 #include <dev/pci/arcmsrvar.h>
57 
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT	(1<<0)
61 #define ARC_D_RW	(1<<1)
62 #define ARC_D_DB	(1<<2)
63 
64 int arcdebug = 0;
65 
66 #define DPRINTF(p...)		do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...)	do { if ((n) & arcdebug) printf(p); } while (0)
68 
69 #else
70 #define DPRINTF(p, ...)		/* p */
71 #define DNPRINTF(n, p, ...)	/* n, p */
72 #endif
73 
74 /*
75  * the fw header must always equal this.
76  */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78 
79 /*
80  * autoconf(9) glue.
81  */
82 static int 	arc_match(device_t, cfdata_t, void *);
83 static void 	arc_attach(device_t, device_t, void *);
84 static int 	arc_detach(device_t, int);
85 static bool 	arc_shutdown(device_t, int);
86 static int 	arc_intr(void *);
87 static void	arc_minphys(struct buf *);
88 
89 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc),
90 	arc_match, arc_attach, arc_detach, NULL);
91 
92 /*
93  * bio(4) and sysmon_envsys(9) glue.
94  */
95 #if NBIO > 0
96 static int 	arc_bioctl(device_t, u_long, void *);
97 static int 	arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int 	arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int	arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int	arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void	arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 				      struct arc_fw_diskinfo *, int);
103 static int 	arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int 	arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int 	arc_bio_getvol(struct arc_softc *, int,
106 			       struct arc_fw_volinfo *);
107 static int	arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int 	arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void 	arc_create_sensors(void *);
110 static void 	arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int	arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113 
114 static int
115 arc_match(device_t parent, cfdata_t match, void *aux)
116 {
117 	struct pci_attach_args *pa = aux;
118 
119 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 		switch (PCI_PRODUCT(pa->pa_id)) {
121 		case PCI_PRODUCT_ARECA_ARC1110:
122 		case PCI_PRODUCT_ARECA_ARC1120:
123 		case PCI_PRODUCT_ARECA_ARC1130:
124 		case PCI_PRODUCT_ARECA_ARC1160:
125 		case PCI_PRODUCT_ARECA_ARC1170:
126 		case PCI_PRODUCT_ARECA_ARC1200:
127 		case PCI_PRODUCT_ARECA_ARC1202:
128 		case PCI_PRODUCT_ARECA_ARC1210:
129 		case PCI_PRODUCT_ARECA_ARC1220:
130 		case PCI_PRODUCT_ARECA_ARC1230:
131 		case PCI_PRODUCT_ARECA_ARC1260:
132 		case PCI_PRODUCT_ARECA_ARC1270:
133 		case PCI_PRODUCT_ARECA_ARC1280:
134 		case PCI_PRODUCT_ARECA_ARC1380:
135 		case PCI_PRODUCT_ARECA_ARC1381:
136 		case PCI_PRODUCT_ARECA_ARC1680:
137 		case PCI_PRODUCT_ARECA_ARC1681:
138 			return 1;
139 		default:
140 			break;
141 		}
142 	}
143 
144 	return 0;
145 }
146 
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 	struct arc_softc	*sc = device_private(self);
151 	struct pci_attach_args	*pa = aux;
152 	struct scsipi_adapter	*adapt = &sc->sc_adapter;
153 	struct scsipi_channel	*chan = &sc->sc_chan;
154 
155 	sc->sc_dev = self;
156 	sc->sc_talking = 0;
157 	rw_init(&sc->sc_rwlock);
158 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
159 	cv_init(&sc->sc_condvar, "arcdb");
160 
161 	if (arc_map_pci_resources(self, pa) != 0) {
162 		/* error message printed by arc_map_pci_resources */
163 		return;
164 	}
165 
166 	if (arc_query_firmware(self) != 0) {
167 		/* error message printed by arc_query_firmware */
168 		goto unmap_pci;
169 	}
170 
171 	if (arc_alloc_ccbs(self) != 0) {
172 		/* error message printed by arc_alloc_ccbs */
173 		goto unmap_pci;
174 	}
175 
176 	if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
177 		panic("%s: couldn't establish shutdown handler\n",
178 		    device_xname(self));
179 
180 	memset(adapt, 0, sizeof(*adapt));
181 	adapt->adapt_dev = self;
182 	adapt->adapt_nchannels = 1;
183 	adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
184 	adapt->adapt_max_periph = adapt->adapt_openings;
185 	adapt->adapt_minphys = arc_minphys;
186 	adapt->adapt_request = arc_scsi_cmd;
187 
188 	memset(chan, 0, sizeof(*chan));
189 	chan->chan_adapter = adapt;
190 	chan->chan_bustype = &scsi_bustype;
191 	chan->chan_nluns = ARC_MAX_LUN;
192 	chan->chan_ntargets = ARC_MAX_TARGET;
193 	chan->chan_id = ARC_MAX_TARGET;
194 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195 
196 	/*
197 	 * Save the device_t returned, because we could to attach
198 	 * devices via the management interface.
199 	 */
200 	sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201 
202 	/* enable interrupts */
203 	arc_write(sc, ARC_REG_INTRMASK,
204 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205 
206 #if NBIO > 0
207 	/*
208 	 * Register the driver to bio(4) and setup the sensors.
209 	 */
210 	if (bio_register(self, arc_bioctl) != 0)
211 		panic("%s: bioctl registration failed\n", device_xname(self));
212 
213 	/*
214 	 * you need to talk to the firmware to get volume info. our firmware
215 	 * interface relies on being able to sleep, so we need to use a thread
216 	 * to do the work.
217 	 */
218 	if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 	    arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 		panic("%s: unable to create a kernel thread for sensors\n",
221 		    device_xname(self));
222 #endif
223 
224         return;
225 
226 unmap_pci:
227 	arc_unmap_pci_resources(sc);
228 }
229 
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 	struct arc_softc		*sc = device_private(self);
234 
235 	if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
236 		aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
237 
238 	if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
239 		aprint_error_dev(self, "timeout waiting to flush cache\n");
240 
241 	return 0;
242 }
243 
244 static bool
245 arc_shutdown(device_t self, int how)
246 {
247 	struct arc_softc		*sc = device_private(self);
248 
249 	if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
250 		aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
251 
252 	if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
253 		aprint_error_dev(self, "timeout waiting to flush cache\n");
254 
255 	return true;
256 }
257 
258 static void
259 arc_minphys(struct buf *bp)
260 {
261 	if (bp->b_bcount > MAXPHYS)
262 		bp->b_bcount = MAXPHYS;
263 	minphys(bp);
264 }
265 
266 static int
267 arc_intr(void *arg)
268 {
269 	struct arc_softc		*sc = arg;
270 	struct arc_ccb			*ccb = NULL;
271 	char				*kva = ARC_DMA_KVA(sc->sc_requests);
272 	struct arc_io_cmd		*cmd;
273 	uint32_t			reg, intrstat;
274 
275 	mutex_spin_enter(&sc->sc_mutex);
276 	intrstat = arc_read(sc, ARC_REG_INTRSTAT);
277 	if (intrstat == 0x0) {
278 		mutex_spin_exit(&sc->sc_mutex);
279 		return 0;
280 	}
281 
282 	intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
283 	arc_write(sc, ARC_REG_INTRSTAT, intrstat);
284 
285 	if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
286 		if (sc->sc_talking) {
287 			arc_write(sc, ARC_REG_INTRMASK,
288 			    ~ARC_REG_INTRMASK_POSTQUEUE);
289 			cv_broadcast(&sc->sc_condvar);
290 		} else {
291 			/* otherwise drop it */
292 			reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
293 			arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
294 			if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
295 				arc_write(sc, ARC_REG_INB_DOORBELL,
296 				    ARC_REG_INB_DOORBELL_READ_OK);
297 		}
298 	}
299 	mutex_spin_exit(&sc->sc_mutex);
300 
301 	while ((reg = arc_pop(sc)) != 0xffffffff) {
302 		cmd = (struct arc_io_cmd *)(kva +
303 		    ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
304 		    (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
305 		ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
306 
307 		bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
308 		    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
309 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
310 
311 		arc_scsi_cmd_done(sc, ccb, reg);
312 	}
313 
314 
315 	return 1;
316 }
317 
318 void
319 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
320 {
321 	struct scsipi_periph		*periph;
322 	struct scsipi_xfer		*xs;
323 	struct scsipi_adapter		*adapt = chan->chan_adapter;
324 	struct arc_softc		*sc = device_private(adapt->adapt_dev);
325 	struct arc_ccb			*ccb;
326 	struct arc_msg_scsicmd		*cmd;
327 	uint32_t			reg;
328 	uint8_t				target;
329 
330 	switch (req) {
331 	case ADAPTER_REQ_GROW_RESOURCES:
332 		/* Not supported. */
333 		return;
334 	case ADAPTER_REQ_SET_XFER_MODE:
335 		/* Not supported. */
336 		return;
337 	case ADAPTER_REQ_RUN_XFER:
338 		break;
339 	}
340 
341 	mutex_spin_enter(&sc->sc_mutex);
342 
343 	xs = arg;
344 	periph = xs->xs_periph;
345 	target = periph->periph_target;
346 
347 	if (xs->cmdlen > ARC_MSG_CDBLEN) {
348 		memset(&xs->sense, 0, sizeof(xs->sense));
349 		xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
350 		xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
351 		xs->sense.scsi_sense.asc = 0x20;
352 		xs->error = XS_SENSE;
353 		xs->status = SCSI_CHECK;
354 		mutex_spin_exit(&sc->sc_mutex);
355 		scsipi_done(xs);
356 		return;
357 	}
358 
359 	ccb = arc_get_ccb(sc);
360 	if (ccb == NULL) {
361 		xs->error = XS_RESOURCE_SHORTAGE;
362 		mutex_spin_exit(&sc->sc_mutex);
363 		scsipi_done(xs);
364 		return;
365 	}
366 
367 	ccb->ccb_xs = xs;
368 
369 	if (arc_load_xs(ccb) != 0) {
370 		xs->error = XS_DRIVER_STUFFUP;
371 		arc_put_ccb(sc, ccb);
372 		mutex_spin_exit(&sc->sc_mutex);
373 		scsipi_done(xs);
374 		return;
375 	}
376 
377 	cmd = &ccb->ccb_cmd->cmd;
378 	reg = ccb->ccb_cmd_post;
379 
380 	/* bus is always 0 */
381 	cmd->target = target;
382 	cmd->lun = periph->periph_lun;
383 	cmd->function = 1; /* XXX magic number */
384 
385 	cmd->cdb_len = xs->cmdlen;
386 	cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
387 	if (xs->xs_control & XS_CTL_DATA_OUT)
388 		cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
389 	if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
390 		cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
391 		reg |= ARC_REG_POST_QUEUE_BIGFRAME;
392 	}
393 
394 	cmd->context = htole32(ccb->ccb_id);
395 	cmd->data_len = htole32(xs->datalen);
396 
397 	memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
398 
399 	/* we've built the command, let's put it on the hw */
400 	bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
401 	    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
402 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
403 
404 	arc_push(sc, reg);
405 	if (xs->xs_control & XS_CTL_POLL) {
406 		if (arc_complete(sc, ccb, xs->timeout) != 0) {
407 			xs->error = XS_DRIVER_STUFFUP;
408 			mutex_spin_exit(&sc->sc_mutex);
409 			scsipi_done(xs);
410 			return;
411 		}
412 	}
413 
414 	mutex_spin_exit(&sc->sc_mutex);
415 }
416 
417 int
418 arc_load_xs(struct arc_ccb *ccb)
419 {
420 	struct arc_softc		*sc = ccb->ccb_sc;
421 	struct scsipi_xfer		*xs = ccb->ccb_xs;
422 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
423 	struct arc_sge			*sgl = ccb->ccb_cmd->sgl, *sge;
424 	uint64_t			addr;
425 	int				i, error;
426 
427 	if (xs->datalen == 0)
428 		return 0;
429 
430 	error = bus_dmamap_load(sc->sc_dmat, dmap,
431 	    xs->data, xs->datalen, NULL,
432 	    (xs->xs_control & XS_CTL_NOSLEEP) ?
433 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
434 	if (error != 0) {
435 		aprint_error("%s: error %d loading dmamap\n",
436 		    device_xname(sc->sc_dev), error);
437 		return 1;
438 	}
439 
440 	for (i = 0; i < dmap->dm_nsegs; i++) {
441 		sge = &sgl[i];
442 
443 		sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
444 		addr = dmap->dm_segs[i].ds_addr;
445 		sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
446 		sge->sg_lo_addr = htole32((uint32_t)addr);
447 	}
448 
449 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
450 	    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
451 	    BUS_DMASYNC_PREWRITE);
452 
453 	return 0;
454 }
455 
456 void
457 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
458 {
459 	struct scsipi_xfer		*xs = ccb->ccb_xs;
460 	struct arc_msg_scsicmd		*cmd;
461 
462 	if (xs->datalen != 0) {
463 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
464 		    ccb->ccb_dmamap->dm_mapsize,
465 		    (xs->xs_control & XS_CTL_DATA_IN) ?
466 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
467 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
468 	}
469 
470 	/* timeout_del */
471 	xs->status |= XS_STS_DONE;
472 
473 	if (reg & ARC_REG_REPLY_QUEUE_ERR) {
474 		cmd = &ccb->ccb_cmd->cmd;
475 
476 		switch (cmd->status) {
477 		case ARC_MSG_STATUS_SELTIMEOUT:
478 		case ARC_MSG_STATUS_ABORTED:
479 		case ARC_MSG_STATUS_INIT_FAIL:
480 			xs->status = SCSI_OK;
481 			xs->error = XS_SELTIMEOUT;
482 			break;
483 
484 		case SCSI_CHECK:
485 			memset(&xs->sense, 0, sizeof(xs->sense));
486 			memcpy(&xs->sense, cmd->sense_data,
487 			    min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
488 			xs->sense.scsi_sense.response_code =
489 			    SSD_RCODE_VALID | 0x70;
490 			xs->status = SCSI_CHECK;
491 			xs->error = XS_SENSE;
492 			xs->resid = 0;
493 			break;
494 
495 		default:
496 			/* unknown device status */
497 			xs->error = XS_BUSY; /* try again later? */
498 			xs->status = SCSI_BUSY;
499 			break;
500 		}
501 	} else {
502 		xs->status = SCSI_OK;
503 		xs->error = XS_NOERROR;
504 		xs->resid = 0;
505 	}
506 
507 	arc_put_ccb(sc, ccb);
508 	scsipi_done(xs);
509 }
510 
511 int
512 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
513 {
514 	struct arc_ccb			*ccb = NULL;
515 	char				*kva = ARC_DMA_KVA(sc->sc_requests);
516 	struct arc_io_cmd		*cmd;
517 	uint32_t			reg;
518 
519 	do {
520 		reg = arc_pop(sc);
521 		if (reg == 0xffffffff) {
522 			if (timeout-- == 0)
523 				return 1;
524 
525 			delay(1000);
526 			continue;
527 		}
528 
529 		cmd = (struct arc_io_cmd *)(kva +
530 		    ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
531 		    ARC_DMA_DVA(sc->sc_requests)));
532 		ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
533 
534 		bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
535 		    ccb->ccb_offset, ARC_MAX_IOCMDLEN,
536 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
537 
538 		arc_scsi_cmd_done(sc, ccb, reg);
539 	} while (nccb != ccb);
540 
541 	return 0;
542 }
543 
544 int
545 arc_map_pci_resources(device_t self, struct pci_attach_args *pa)
546 {
547 	struct arc_softc		*sc = device_private(self);
548 	pcireg_t			memtype;
549 	pci_intr_handle_t		ih;
550 
551 	sc->sc_pc = pa->pa_pc;
552 	sc->sc_tag = pa->pa_tag;
553 	sc->sc_dmat = pa->pa_dmat;
554 
555 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
556 	if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
557 	    &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
558 		aprint_error(": unable to map system interface register\n");
559 		return 1;
560 	}
561 
562 	if (pci_intr_map(pa, &ih) != 0) {
563 		aprint_error(": unable to map interrupt\n");
564 		goto unmap;
565 	}
566 
567 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
568 	    arc_intr, sc);
569 	if (sc->sc_ih == NULL) {
570 		aprint_error(": unable to map interrupt [2]\n");
571 		goto unmap;
572 	}
573 
574 	aprint_normal("\n");
575 	aprint_normal_dev(self, "interrupting at %s\n",
576 	    pci_intr_string(pa->pa_pc, ih));
577 
578 	return 0;
579 
580 unmap:
581 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
582 	sc->sc_ios = 0;
583 	return 1;
584 }
585 
586 void
587 arc_unmap_pci_resources(struct arc_softc *sc)
588 {
589 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
590 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
591 	sc->sc_ios = 0;
592 }
593 
594 int
595 arc_query_firmware(device_t self)
596 {
597 	struct arc_softc 		*sc = device_private(self);
598 	struct arc_msg_firmware_info	fwinfo;
599 	char				string[81]; /* sizeof(vendor)*2+1 */
600 
601 	if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
602 	    ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
603 		aprint_debug_dev(self, "timeout waiting for firmware ok\n");
604 		return 1;
605 	}
606 
607 	if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
608 		aprint_debug_dev(self, "timeout waiting for get config\n");
609 		return 1;
610 	}
611 
612 	if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
613 		aprint_debug_dev(self, "timeout waiting to start bg rebuild\n");
614 		return 1;
615 	}
616 
617 	arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
618 
619 	DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
620 	    device_xname(self), htole32(fwinfo.signature));
621 
622 	if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
623 		aprint_error_dev(self, "invalid firmware info from iop\n");
624 		return 1;
625 	}
626 
627 	DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
628 	    device_xname(self), htole32(fwinfo.request_len));
629 	DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
630 	    device_xname(self), htole32(fwinfo.queue_len));
631 	DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
632 	    device_xname(self), htole32(fwinfo.sdram_size));
633 	DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
634 	    device_xname(self), htole32(fwinfo.sata_ports));
635 
636 	scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
637 	DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
638 	    device_xname(self), string);
639 
640 	scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
641 	aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n",
642 	    string);
643 
644 	scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
645 	DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
646 	    device_xname(self), string);
647 
648 	aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n",
649 	    htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string);
650 
651 	if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
652 		aprint_error_dev(self,
653 		    "unexpected request frame size (%d != %d)\n",
654 		    htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
655 		return 1;
656 	}
657 
658 	sc->sc_req_count = htole32(fwinfo.queue_len);
659 
660 	return 0;
661 }
662 
663 #if NBIO > 0
664 static int
665 arc_bioctl(device_t self, u_long cmd, void *addr)
666 {
667 	struct arc_softc *sc = device_private(self);
668 	int error = 0;
669 
670 	switch (cmd) {
671 	case BIOCINQ:
672 		error = arc_bio_inq(sc, (struct bioc_inq *)addr);
673 		break;
674 
675 	case BIOCVOL:
676 		error = arc_bio_vol(sc, (struct bioc_vol *)addr);
677 		break;
678 
679 	case BIOCDISK:
680 		error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
681 		break;
682 
683 	case BIOCDISK_NOVOL:
684 		error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
685 		break;
686 
687 	case BIOCALARM:
688 		error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
689 		break;
690 
691 	case BIOCSETSTATE:
692 		error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
693 		break;
694 
695 	case BIOCVOLOPS:
696 		error = arc_bio_volops(sc, (struct bioc_volops *)addr);
697 		break;
698 
699 	default:
700 		error = ENOTTY;
701 		break;
702 	}
703 
704 	return error;
705 }
706 
707 static int
708 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
709 {
710 	switch (*reply) {
711 	case ARC_FW_CMD_RAIDINVAL:
712 		printf("%s: firmware error (invalid raid set)\n",
713 		    device_xname(sc->sc_dev));
714 		return EINVAL;
715 	case ARC_FW_CMD_VOLINVAL:
716 		printf("%s: firmware error (invalid volume set)\n",
717 		    device_xname(sc->sc_dev));
718 		return EINVAL;
719 	case ARC_FW_CMD_NORAID:
720 		printf("%s: firmware error (unexistent raid set)\n",
721 		    device_xname(sc->sc_dev));
722 		return ENODEV;
723 	case ARC_FW_CMD_NOVOLUME:
724 		printf("%s: firmware error (unexistent volume set)\n",
725 		    device_xname(sc->sc_dev));
726 		return ENODEV;
727 	case ARC_FW_CMD_NOPHYSDRV:
728 		printf("%s: firmware error (unexistent physical drive)\n",
729 		    device_xname(sc->sc_dev));
730 		return ENODEV;
731 	case ARC_FW_CMD_PARAM_ERR:
732 		printf("%s: firmware error (parameter error)\n",
733 		    device_xname(sc->sc_dev));
734 		return EINVAL;
735 	case ARC_FW_CMD_UNSUPPORTED:
736 		printf("%s: firmware error (unsupported command)\n",
737 		    device_xname(sc->sc_dev));
738 		return EOPNOTSUPP;
739 	case ARC_FW_CMD_DISKCFG_CHGD:
740 		printf("%s: firmware error (disk configuration changed)\n",
741 		    device_xname(sc->sc_dev));
742 		return EINVAL;
743 	case ARC_FW_CMD_PASS_INVAL:
744 		printf("%s: firmware error (invalid password)\n",
745 		    device_xname(sc->sc_dev));
746 		return EINVAL;
747 	case ARC_FW_CMD_NODISKSPACE:
748 		printf("%s: firmware error (no disk space available)\n",
749 		    device_xname(sc->sc_dev));
750 		return EOPNOTSUPP;
751 	case ARC_FW_CMD_CHECKSUM_ERR:
752 		printf("%s: firmware error (checksum error)\n",
753 		    device_xname(sc->sc_dev));
754 		return EINVAL;
755 	case ARC_FW_CMD_PASS_REQD:
756 		printf("%s: firmware error (password required)\n",
757 		    device_xname(sc->sc_dev));
758 		return EPERM;
759 	case ARC_FW_CMD_OK:
760 	default:
761 		return 0;
762 	}
763 }
764 
765 static int
766 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
767 {
768 	uint8_t	request[2], reply[1];
769 	size_t	len;
770 	int	error = 0;
771 
772 	switch (ba->ba_opcode) {
773 	case BIOC_SAENABLE:
774 	case BIOC_SADISABLE:
775 		request[0] = ARC_FW_SET_ALARM;
776 		request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
777 		    ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
778 		len = sizeof(request);
779 
780 		break;
781 
782 	case BIOC_SASILENCE:
783 		request[0] = ARC_FW_MUTE_ALARM;
784 		len = 1;
785 
786 		break;
787 
788 	case BIOC_GASTATUS:
789 		/* system info is too big/ugly to deal with here */
790 		return arc_bio_alarm_state(sc, ba);
791 
792 	default:
793 		return EOPNOTSUPP;
794 	}
795 
796 	error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
797 	if (error != 0)
798 		return error;
799 
800 	return arc_fw_parse_status_code(sc, &reply[0]);
801 }
802 
803 static int
804 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
805 {
806 	struct arc_fw_sysinfo	*sysinfo;
807 	uint8_t			request;
808 	int			error = 0;
809 
810 	sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
811 
812 	request = ARC_FW_SYSINFO;
813 	error = arc_msgbuf(sc, &request, sizeof(request),
814 	    sysinfo, sizeof(struct arc_fw_sysinfo));
815 
816 	if (error != 0)
817 		goto out;
818 
819 	ba->ba_status = sysinfo->alarm;
820 
821 out:
822 	kmem_free(sysinfo, sizeof(*sysinfo));
823 	return error;
824 }
825 
826 static int
827 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
828 {
829 	/* to create a raid set */
830 	struct req_craidset {
831 		uint8_t		cmdcode;
832 		uint32_t	devmask;
833 		uint8_t 	raidset_name[16];
834 	} __packed;
835 
836 	/* to create a volume set */
837 	struct req_cvolset {
838 		uint8_t 	cmdcode;
839 		uint8_t 	raidset;
840 		uint8_t 	volset_name[16];
841 		uint64_t	capacity;
842 		uint8_t 	raidlevel;
843 		uint8_t 	stripe;
844 		uint8_t 	scsi_chan;
845 		uint8_t 	scsi_target;
846 		uint8_t 	scsi_lun;
847 		uint8_t 	tagqueue;
848 		uint8_t 	cache;
849 		uint8_t 	speed;
850 		uint8_t 	quick_init;
851 	} __packed;
852 
853 	struct scsibus_softc	*scsibus_sc = NULL;
854 	struct req_craidset	req_craidset;
855 	struct req_cvolset 	req_cvolset;
856 	uint8_t 		request[2];
857 	uint8_t 		reply[1];
858 	int 			error = 0;
859 
860 	switch (bc->bc_opcode) {
861 	case BIOC_VCREATE_VOLUME:
862 	    {
863 		/*
864 		 * Zero out the structs so that we use some defaults
865 		 * in raid and volume sets.
866 		 */
867 		memset(&req_craidset, 0, sizeof(req_craidset));
868 		memset(&req_cvolset, 0, sizeof(req_cvolset));
869 
870 		/*
871 		 * Firstly we have to create the raid set and
872 		 * use the default name for all them.
873 		 */
874 		req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
875 		req_craidset.devmask = bc->bc_devmask;
876 		error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
877 		    reply, sizeof(reply));
878 		if (error != 0)
879 			return error;
880 
881 		error = arc_fw_parse_status_code(sc, &reply[0]);
882 		if (error) {
883 			printf("%s: create raidset%d failed\n",
884 			    device_xname(sc->sc_dev), bc->bc_volid);
885 			return error;
886 		}
887 
888 		/*
889 		 * At this point the raid set was created, so it's
890 		 * time to create the volume set.
891 		 */
892 		req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
893 		req_cvolset.raidset = bc->bc_volid;
894 		req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
895 
896 		/*
897 		 * Set the RAID level.
898 		 */
899 		switch (bc->bc_level) {
900 		case 0:
901 		case 1:
902 			req_cvolset.raidlevel = bc->bc_level;
903 			break;
904 		case BIOC_SVOL_RAID10:
905 			req_cvolset.raidlevel = 1;
906 			break;
907 		case 3:
908 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
909 			break;
910 		case 5:
911 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
912 			break;
913 		case 6:
914 			req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
915 			break;
916 		default:
917 			return EOPNOTSUPP;
918 		}
919 
920 		/*
921 		 * Set the stripe size.
922 		 */
923 		switch (bc->bc_stripe) {
924 		case 4:
925 			req_cvolset.stripe = 0;
926 			break;
927 		case 8:
928 			req_cvolset.stripe = 1;
929 			break;
930 		case 16:
931 			req_cvolset.stripe = 2;
932 			break;
933 		case 32:
934 			req_cvolset.stripe = 3;
935 			break;
936 		case 64:
937 			req_cvolset.stripe = 4;
938 			break;
939 		case 128:
940 			req_cvolset.stripe = 5;
941 			break;
942 		default:
943 			req_cvolset.stripe = 4; /* by default 64K */
944 			break;
945 		}
946 
947 		req_cvolset.scsi_chan = bc->bc_channel;
948 		req_cvolset.scsi_target = bc->bc_target;
949 		req_cvolset.scsi_lun = bc->bc_lun;
950 		req_cvolset.tagqueue = 1; /* always enabled */
951 		req_cvolset.cache = 1; /* always enabled */
952 		req_cvolset.speed = 4; /* always max speed */
953 
954 		/* RAID 1 and 1+0 levels need foreground initialization */
955 		if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
956 			req_cvolset.quick_init = 1; /* foreground init */
957 
958 		error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
959 		    reply, sizeof(reply));
960 		if (error != 0)
961 			return error;
962 
963 		error = arc_fw_parse_status_code(sc, &reply[0]);
964 		if (error) {
965 			printf("%s: create volumeset%d failed\n",
966 			    device_xname(sc->sc_dev), bc->bc_volid);
967 			return error;
968 		}
969 
970 		/*
971 		 * If we are creating a RAID 1 or RAID 1+0 volume,
972 		 * the volume will be created immediately but it won't
973 		 * be available until the initialization is done... so
974 		 * don't bother attaching the sd(4) device.
975 		 */
976 		if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
977 			break;
978 
979 		/*
980 		 * Do a rescan on the bus to attach the device associated
981 		 * with the new volume.
982 		 */
983 		scsibus_sc = device_private(sc->sc_scsibus_dv);
984 		(void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
985 
986 		break;
987 	    }
988 	case BIOC_VREMOVE_VOLUME:
989 	    {
990 		/*
991 		 * Remove the volume set specified in bc_volid.
992 		 */
993 		request[0] = ARC_FW_DELETE_VOLUME;
994 		request[1] = bc->bc_volid;
995 		error = arc_msgbuf(sc, request, sizeof(request),
996 		    reply, sizeof(reply));
997 		if (error != 0)
998 			return error;
999 
1000 		error = arc_fw_parse_status_code(sc, &reply[0]);
1001 		if (error) {
1002 			printf("%s: delete volumeset%d failed\n",
1003 			    device_xname(sc->sc_dev), bc->bc_volid);
1004 			return error;
1005 		}
1006 
1007 		/*
1008 		 * Detach the sd(4) device associated with the volume,
1009 		 * but if there's an error don't make it a priority.
1010 		 */
1011 		error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1012 					     bc->bc_lun, 0);
1013 		if (error)
1014 			printf("%s: couldn't detach sd device for volume %d "
1015 			    "at %u:%u.%u (error=%d)\n",
1016 			    device_xname(sc->sc_dev), bc->bc_volid,
1017 			    bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1018 
1019 		/*
1020 		 * and remove the raid set specified in bc_volid,
1021 		 * we only care about volumes.
1022 		 */
1023 		request[0] = ARC_FW_DELETE_RAIDSET;
1024 		request[1] = bc->bc_volid;
1025 		error = arc_msgbuf(sc, request, sizeof(request),
1026 		    reply, sizeof(reply));
1027 		if (error != 0)
1028 			return error;
1029 
1030 		error = arc_fw_parse_status_code(sc, &reply[0]);
1031 		if (error) {
1032 			printf("%s: delete raidset%d failed\n",
1033 			    device_xname(sc->sc_dev), bc->bc_volid);
1034 			return error;
1035 		}
1036 
1037 		break;
1038 	    }
1039 	default:
1040 		return EOPNOTSUPP;
1041 	}
1042 
1043 	return error;
1044 }
1045 
1046 static int
1047 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1048 {
1049 	/* for a hotspare disk */
1050 	struct request_hs {
1051 		uint8_t		cmdcode;
1052 		uint32_t	devmask;
1053 	} __packed;
1054 
1055 	/* for a pass-through disk */
1056 	struct request_pt {
1057 		uint8_t 	cmdcode;
1058 		uint8_t		devid;
1059 		uint8_t		scsi_chan;
1060 		uint8_t 	scsi_id;
1061 		uint8_t 	scsi_lun;
1062 		uint8_t 	tagged_queue;
1063 		uint8_t 	cache_mode;
1064 		uint8_t 	max_speed;
1065 	} __packed;
1066 
1067 	struct scsibus_softc	*scsibus_sc = NULL;
1068 	struct request_hs	req_hs; /* to add/remove hotspare */
1069 	struct request_pt	req_pt;	/* to add a pass-through */
1070 	uint8_t			req_gen[2];
1071 	uint8_t			reply[1];
1072 	int			error = 0;
1073 
1074 	switch (bs->bs_status) {
1075 	case BIOC_SSHOTSPARE:
1076 	    {
1077 		req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1078 		req_hs.devmask = (1 << bs->bs_target);
1079 		goto hotspare;
1080 	    }
1081 	case BIOC_SSDELHOTSPARE:
1082 	    {
1083 		req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1084 		req_hs.devmask = (1 << bs->bs_target);
1085 		goto hotspare;
1086 	    }
1087 	case BIOC_SSPASSTHRU:
1088 	    {
1089 		req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1090 		req_pt.devid = bs->bs_other_id; /* this wants device# */
1091 		req_pt.scsi_chan = bs->bs_channel;
1092 		req_pt.scsi_id = bs->bs_target;
1093 		req_pt.scsi_lun = bs->bs_lun;
1094 		req_pt.tagged_queue = 1; /* always enabled */
1095 		req_pt.cache_mode = 1; /* always enabled */
1096 		req_pt.max_speed = 4; /* always max speed */
1097 
1098 		error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1099 		    reply, sizeof(reply));
1100 		if (error != 0)
1101 			return error;
1102 
1103 		/*
1104 		 * Do a rescan on the bus to attach the new device
1105 		 * associated with the pass-through disk.
1106 		 */
1107 		scsibus_sc = device_private(sc->sc_scsibus_dv);
1108 		(void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1109 
1110 		goto out;
1111 	    }
1112 	case BIOC_SSDELPASSTHRU:
1113 	    {
1114 		req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1115 		req_gen[1] = bs->bs_target;
1116 		error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1117 		    reply, sizeof(reply));
1118 		if (error != 0)
1119 			return error;
1120 
1121 		/*
1122 		 * Detach the sd device associated with this pass-through disk.
1123 		 */
1124 		error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1125 					     bs->bs_lun, 0);
1126 		if (error)
1127 			printf("%s: couldn't detach sd device for the "
1128 			    "pass-through disk at %u:%u.%u (error=%d)\n",
1129 			    device_xname(sc->sc_dev),
1130 			    bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1131 
1132 		goto out;
1133 	    }
1134 	case BIOC_SSCHECKSTART_VOL:
1135 	    {
1136 		req_gen[0] = ARC_FW_START_CHECKVOL;
1137 		req_gen[1] = bs->bs_volid;
1138 		error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1139 		    reply, sizeof(reply));
1140 		if (error != 0)
1141 			return error;
1142 
1143 		goto out;
1144 	    }
1145 	case BIOC_SSCHECKSTOP_VOL:
1146 	    {
1147 		uint8_t req = ARC_FW_STOP_CHECKVOL;
1148 		error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1149 		if (error != 0)
1150 			return error;
1151 
1152 		goto out;
1153 	    }
1154 	default:
1155 		return EOPNOTSUPP;
1156 	}
1157 
1158 hotspare:
1159 	error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1160 	    reply, sizeof(reply));
1161 	if (error != 0)
1162 		return error;
1163 
1164 out:
1165 	return arc_fw_parse_status_code(sc, &reply[0]);
1166 }
1167 
1168 static int
1169 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1170 {
1171 	uint8_t			request[2];
1172 	struct arc_fw_sysinfo	*sysinfo = NULL;
1173 	struct arc_fw_raidinfo	*raidinfo;
1174 	int			nvols = 0, i;
1175 	int			error = 0;
1176 
1177 	raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1178 
1179 	if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1180 		sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1181 
1182 		request[0] = ARC_FW_SYSINFO;
1183 		error = arc_msgbuf(sc, request, 1, sysinfo,
1184 		    sizeof(struct arc_fw_sysinfo));
1185 		if (error != 0)
1186 			goto out;
1187 
1188 		sc->sc_maxraidset = sysinfo->max_raid_set;
1189 		sc->sc_maxvolset = sysinfo->max_volume_set;
1190 		sc->sc_cchans = sysinfo->ide_channels;
1191 	}
1192 
1193 	request[0] = ARC_FW_RAIDINFO;
1194 	for (i = 0; i < sc->sc_maxraidset; i++) {
1195 		request[1] = i;
1196 		error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1197 		    sizeof(struct arc_fw_raidinfo));
1198 		if (error != 0)
1199 			goto out;
1200 
1201 		nvols += raidinfo->volumes;
1202 	}
1203 
1204 	strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1205 	bi->bi_novol = nvols;
1206 	bi->bi_nodisk = sc->sc_cchans;
1207 
1208 out:
1209 	if (sysinfo)
1210 		kmem_free(sysinfo, sizeof(*sysinfo));
1211 	kmem_free(raidinfo, sizeof(*raidinfo));
1212 	return error;
1213 }
1214 
1215 static int
1216 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1217 {
1218 	uint8_t			request[2];
1219 	int			error = 0;
1220 	int			nvols = 0, i;
1221 
1222 	request[0] = ARC_FW_VOLINFO;
1223 	for (i = 0; i < sc->sc_maxvolset; i++) {
1224 		request[1] = i;
1225 		error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1226 		    sizeof(struct arc_fw_volinfo));
1227 		if (error != 0)
1228 			goto out;
1229 
1230 		if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1231 			continue;
1232 
1233 		if (nvols == vol)
1234 			break;
1235 
1236 		nvols++;
1237 	}
1238 
1239 	if (nvols != vol ||
1240 	    (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1241 		error = ENODEV;
1242 		goto out;
1243 	}
1244 
1245 out:
1246 	return error;
1247 }
1248 
1249 static int
1250 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1251 {
1252 	struct arc_fw_volinfo	*volinfo;
1253 	uint64_t		blocks;
1254 	uint32_t		status;
1255 	int			error = 0;
1256 
1257 	volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1258 
1259 	error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1260 	if (error != 0)
1261 		goto out;
1262 
1263 	bv->bv_percent = -1;
1264 	bv->bv_seconds = 0;
1265 
1266 	status = htole32(volinfo->volume_status);
1267 	if (status == 0x0) {
1268 		if (htole32(volinfo->fail_mask) == 0x0)
1269 			bv->bv_status = BIOC_SVONLINE;
1270 		else
1271 			bv->bv_status = BIOC_SVDEGRADED;
1272 	} else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1273 		bv->bv_status = BIOC_SVDEGRADED;
1274 	} else if (status & ARC_FW_VOL_STATUS_FAILED) {
1275 		bv->bv_status = BIOC_SVOFFLINE;
1276 	} else if (status & ARC_FW_VOL_STATUS_INITTING) {
1277 		bv->bv_status = BIOC_SVBUILDING;
1278 		bv->bv_percent = htole32(volinfo->progress);
1279 	} else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1280 		bv->bv_status = BIOC_SVREBUILD;
1281 		bv->bv_percent = htole32(volinfo->progress);
1282 	} else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1283 		bv->bv_status = BIOC_SVMIGRATING;
1284 		bv->bv_percent = htole32(volinfo->progress);
1285 	} else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1286 		bv->bv_status = BIOC_SVCHECKING;
1287 		bv->bv_percent = htole32(volinfo->progress);
1288 	} else if (status & ARC_FW_VOL_STATUS_NEED_INIT) {
1289 		bv->bv_status = BIOC_SVOFFLINE;
1290 	} else {
1291 		printf("%s: volume %d status 0x%x\n",
1292 		    device_xname(sc->sc_dev), bv->bv_volid, status);
1293 	}
1294 
1295 	blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1296 	blocks += (uint64_t)htole32(volinfo->capacity);
1297 	bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1298 
1299 	switch (volinfo->raid_level) {
1300 	case ARC_FW_VOL_RAIDLEVEL_0:
1301 		bv->bv_level = 0;
1302 		break;
1303 	case ARC_FW_VOL_RAIDLEVEL_1:
1304 		if (volinfo->member_disks > 2)
1305 			bv->bv_level = BIOC_SVOL_RAID10;
1306 		else
1307 			bv->bv_level = 1;
1308 		break;
1309 	case ARC_FW_VOL_RAIDLEVEL_3:
1310 		bv->bv_level = 3;
1311 		break;
1312 	case ARC_FW_VOL_RAIDLEVEL_5:
1313 		bv->bv_level = 5;
1314 		break;
1315 	case ARC_FW_VOL_RAIDLEVEL_6:
1316 		bv->bv_level = 6;
1317 		break;
1318 	case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1319 		bv->bv_level = BIOC_SVOL_PASSTHRU;
1320 		break;
1321 	default:
1322 		bv->bv_level = -1;
1323 		break;
1324 	}
1325 
1326 	bv->bv_nodisk = volinfo->member_disks;
1327 	bv->bv_stripe_size = volinfo->stripe_size / 2;
1328 	snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1329 	scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1330 	    sizeof(volinfo->set_name));
1331 
1332 out:
1333 	kmem_free(volinfo, sizeof(*volinfo));
1334 	return error;
1335 }
1336 
1337 static int
1338 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1339 {
1340 	struct arc_fw_diskinfo	*diskinfo;
1341 	uint8_t			request[2];
1342 	int			error = 0;
1343 
1344 	diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1345 
1346 	if (bd->bd_diskid >= sc->sc_cchans) {
1347 		error = ENODEV;
1348 		goto out;
1349 	}
1350 
1351 	request[0] = ARC_FW_DISKINFO;
1352 	request[1] = bd->bd_diskid;
1353 	error = arc_msgbuf(sc, request, sizeof(request),
1354 	    diskinfo, sizeof(struct arc_fw_diskinfo));
1355 	if (error != 0)
1356 		goto out;
1357 
1358 	/* skip disks with no capacity */
1359 	if (htole32(diskinfo->capacity) == 0 &&
1360 	    htole32(diskinfo->capacity2) == 0)
1361 		goto out;
1362 
1363 	bd->bd_disknovol = true;
1364 	arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1365 
1366 out:
1367 	kmem_free(diskinfo, sizeof(*diskinfo));
1368 	return error;
1369 }
1370 
1371 static void
1372 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1373 		     struct arc_fw_diskinfo *diskinfo, int diskid)
1374 {
1375 	uint64_t		blocks;
1376 	char			model[81];
1377 	char			serial[41];
1378 	char			rev[17];
1379 
1380 	/* Ignore bit zero for now, we don't know what it means */
1381 	diskinfo->device_state &= ~0x1;
1382 
1383 	switch (diskinfo->device_state) {
1384 	case ARC_FW_DISK_FAILED:
1385 		bd->bd_status = BIOC_SDFAILED;
1386 		break;
1387 	case ARC_FW_DISK_PASSTHRU:
1388 		bd->bd_status = BIOC_SDPASSTHRU;
1389 		break;
1390 	case ARC_FW_DISK_NORMAL:
1391 		bd->bd_status = BIOC_SDONLINE;
1392 		break;
1393 	case ARC_FW_DISK_HOTSPARE:
1394 		bd->bd_status = BIOC_SDHOTSPARE;
1395 		break;
1396 	case ARC_FW_DISK_UNUSED:
1397 		bd->bd_status = BIOC_SDUNUSED;
1398 		break;
1399 	case 0:
1400 		/* disk has been disconnected */
1401 		bd->bd_status = BIOC_SDOFFLINE;
1402 		bd->bd_channel = 1;
1403 		bd->bd_target = 0;
1404 		bd->bd_lun = 0;
1405 		strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1406 		break;
1407 	default:
1408 		printf("%s: unknown disk device_state: 0x%x\n", __func__,
1409 		    diskinfo->device_state);
1410 		bd->bd_status = BIOC_SDINVALID;
1411 		return;
1412 	}
1413 
1414 	blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1415 	blocks += (uint64_t)htole32(diskinfo->capacity);
1416 	bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1417 
1418 	scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1419 	scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1420 	scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1421 	    sizeof(diskinfo->firmware_rev));
1422 
1423 	snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1424 	strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1425 
1426 #if 0
1427 	bd->bd_channel = diskinfo->scsi_attr.channel;
1428 	bd->bd_target = diskinfo->scsi_attr.target;
1429 	bd->bd_lun = diskinfo->scsi_attr.lun;
1430 #endif
1431 
1432 	/*
1433 	 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1434 	 * the diskid.
1435 	 */
1436 	bd->bd_channel = 0;
1437 	bd->bd_target = diskid;
1438 	bd->bd_lun = 0;
1439 }
1440 
1441 static int
1442 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1443 {
1444 	struct arc_fw_raidinfo	*raidinfo;
1445 	struct arc_fw_volinfo	*volinfo;
1446 	struct arc_fw_diskinfo	*diskinfo;
1447 	uint8_t			request[2];
1448 	int			error = 0;
1449 
1450 	volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1451 	raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1452 	diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1453 
1454 	error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1455 	if (error != 0)
1456 		goto out;
1457 
1458 	request[0] = ARC_FW_RAIDINFO;
1459 	request[1] = volinfo->raid_set_number;
1460 
1461 	error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1462 	    sizeof(struct arc_fw_raidinfo));
1463 	if (error != 0)
1464 		goto out;
1465 
1466 	if (bd->bd_diskid >= sc->sc_cchans ||
1467 	    bd->bd_diskid >= raidinfo->member_devices) {
1468 		error = ENODEV;
1469 		goto out;
1470 	}
1471 
1472 	if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1473 		/*
1474 		 * The disk has been disconnected, mark it offline
1475 		 * and put it on another bus.
1476 		 */
1477 		bd->bd_channel = 1;
1478 		bd->bd_target = 0;
1479 		bd->bd_lun = 0;
1480 		bd->bd_status = BIOC_SDOFFLINE;
1481 		strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1482 		goto out;
1483 	}
1484 
1485 	request[0] = ARC_FW_DISKINFO;
1486 	request[1] = raidinfo->device_array[bd->bd_diskid];
1487 	error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1488 	    sizeof(struct arc_fw_diskinfo));
1489 	if (error != 0)
1490 		goto out;
1491 
1492 	/* now fill our bio disk with data from the firmware */
1493 	arc_bio_disk_filldata(sc, bd, diskinfo,
1494 	    raidinfo->device_array[bd->bd_diskid]);
1495 
1496 out:
1497 	kmem_free(raidinfo, sizeof(*raidinfo));
1498 	kmem_free(volinfo, sizeof(*volinfo));
1499 	kmem_free(diskinfo, sizeof(*diskinfo));
1500 	return error;
1501 }
1502 #endif /* NBIO > 0 */
1503 
1504 uint8_t
1505 arc_msg_cksum(void *cmd, uint16_t len)
1506 {
1507 	uint8_t	*buf = cmd;
1508 	uint8_t	cksum;
1509 	int	i;
1510 
1511 	cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1512 	for (i = 0; i < len; i++)
1513 		cksum += buf[i];
1514 
1515 	return cksum;
1516 }
1517 
1518 
1519 int
1520 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1521 	   size_t rbuflen)
1522 {
1523 	uint8_t			rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1524 	uint8_t			*wbuf, *rbuf;
1525 	int			wlen, wdone = 0, rlen, rdone = 0;
1526 	struct arc_fw_bufhdr	*bufhdr;
1527 	uint32_t		reg, rwlen;
1528 	int			error = 0;
1529 #ifdef ARC_DEBUG
1530 	int			i;
1531 #endif
1532 
1533 	wbuf = rbuf = NULL;
1534 
1535 	DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1536 	    device_xname(sc->sc_dev), wbuflen, rbuflen);
1537 
1538 	wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1539 	wbuf = kmem_alloc(wlen, KM_SLEEP);
1540 
1541 	rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1542 	rbuf = kmem_alloc(rlen, KM_SLEEP);
1543 
1544 	DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1545 	    device_xname(sc->sc_dev), wlen, rlen);
1546 
1547 	bufhdr = (struct arc_fw_bufhdr *)wbuf;
1548 	bufhdr->hdr = arc_fw_hdr;
1549 	bufhdr->len = htole16(wbuflen);
1550 	memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1551 	wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1552 
1553 	arc_lock(sc);
1554 	if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1555 		error = EBUSY;
1556 		goto out;
1557 	}
1558 
1559 	reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1560 
1561 	do {
1562 		if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1563 			memset(rwbuf, 0, sizeof(rwbuf));
1564 			rwlen = (wlen - wdone) % sizeof(rwbuf);
1565 			memcpy(rwbuf, &wbuf[wdone], rwlen);
1566 
1567 #ifdef ARC_DEBUG
1568 			if (arcdebug & ARC_D_DB) {
1569 				printf("%s: write %d:",
1570 				    device_xname(sc->sc_dev), rwlen);
1571 				for (i = 0; i < rwlen; i++)
1572 					printf(" 0x%02x", rwbuf[i]);
1573 				printf("\n");
1574 			}
1575 #endif
1576 
1577 			/* copy the chunk to the hw */
1578 			arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1579 			arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1580 			    sizeof(rwbuf));
1581 
1582 			/* say we have a buffer for the hw */
1583 			arc_write(sc, ARC_REG_INB_DOORBELL,
1584 			    ARC_REG_INB_DOORBELL_WRITE_OK);
1585 
1586 			wdone += rwlen;
1587 		}
1588 
1589 		while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1590 			arc_wait(sc);
1591 
1592 		arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1593 
1594 		DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1595 		    device_xname(sc->sc_dev), reg);
1596 
1597 		if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1598 			rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1599 			if (rwlen > sizeof(rwbuf)) {
1600 				DNPRINTF(ARC_D_DB, "%s:  rwlen too big\n",
1601 				    device_xname(sc->sc_dev));
1602 				error = EIO;
1603 				goto out;
1604 			}
1605 
1606 			arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1607 			    sizeof(rwbuf));
1608 
1609 			arc_write(sc, ARC_REG_INB_DOORBELL,
1610 			    ARC_REG_INB_DOORBELL_READ_OK);
1611 
1612 #ifdef ARC_DEBUG
1613 			printf("%s:  len: %d+%d=%d/%d\n",
1614 			    device_xname(sc->sc_dev),
1615 			    rwlen, rdone, rwlen + rdone, rlen);
1616 			if (arcdebug & ARC_D_DB) {
1617 				printf("%s: read:",
1618 				    device_xname(sc->sc_dev));
1619 				for (i = 0; i < rwlen; i++)
1620 					printf(" 0x%02x", rwbuf[i]);
1621 				printf("\n");
1622 			}
1623 #endif
1624 
1625 			if ((rdone + rwlen) > rlen) {
1626 				DNPRINTF(ARC_D_DB, "%s:  rwbuf too big\n",
1627 				    device_xname(sc->sc_dev));
1628 				error = EIO;
1629 				goto out;
1630 			}
1631 
1632 			memcpy(&rbuf[rdone], rwbuf, rwlen);
1633 			rdone += rwlen;
1634 		}
1635 	} while (rdone != rlen);
1636 
1637 	bufhdr = (struct arc_fw_bufhdr *)rbuf;
1638 	if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1639 	    bufhdr->len != htole16(rbuflen)) {
1640 		DNPRINTF(ARC_D_DB, "%s:  rbuf hdr is wrong\n",
1641 		    device_xname(sc->sc_dev));
1642 		error = EIO;
1643 		goto out;
1644 	}
1645 
1646 	memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1647 
1648 	if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1649 		DNPRINTF(ARC_D_DB, "%s:  invalid cksum\n",
1650 		    device_xname(sc->sc_dev));
1651 		error = EIO;
1652 		goto out;
1653 	}
1654 
1655 out:
1656 	arc_unlock(sc);
1657 	kmem_free(wbuf, wlen);
1658 	kmem_free(rbuf, rlen);
1659 
1660 	return error;
1661 }
1662 
1663 void
1664 arc_lock(struct arc_softc *sc)
1665 {
1666 	rw_enter(&sc->sc_rwlock, RW_WRITER);
1667 	mutex_spin_enter(&sc->sc_mutex);
1668 	arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1669 	sc->sc_talking = 1;
1670 }
1671 
1672 void
1673 arc_unlock(struct arc_softc *sc)
1674 {
1675 	KASSERT(mutex_owned(&sc->sc_mutex));
1676 
1677 	arc_write(sc, ARC_REG_INTRMASK,
1678 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1679 	sc->sc_talking = 0;
1680 	mutex_spin_exit(&sc->sc_mutex);
1681 	rw_exit(&sc->sc_rwlock);
1682 }
1683 
1684 void
1685 arc_wait(struct arc_softc *sc)
1686 {
1687 	KASSERT(mutex_owned(&sc->sc_mutex));
1688 
1689 	arc_write(sc, ARC_REG_INTRMASK,
1690 	    ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1691 	if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1692 		arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1693 }
1694 
1695 #if NBIO > 0
1696 static void
1697 arc_create_sensors(void *arg)
1698 {
1699 	struct arc_softc	*sc = arg;
1700 	struct bioc_inq		bi;
1701 	struct bioc_vol		bv;
1702 	int			i, j;
1703 	size_t			slen, count = 0;
1704 
1705 	memset(&bi, 0, sizeof(bi));
1706 	if (arc_bio_inq(sc, &bi) != 0) {
1707 		aprint_error("%s: unable to query firmware for sensor info\n",
1708 		    device_xname(sc->sc_dev));
1709 		kthread_exit(0);
1710 	}
1711 
1712 	/* There's no point to continue if there are no volumes */
1713 	if (!bi.bi_novol)
1714 		kthread_exit(0);
1715 
1716 	for (i = 0; i < bi.bi_novol; i++) {
1717 		memset(&bv, 0, sizeof(bv));
1718 		bv.bv_volid = i;
1719 		if (arc_bio_vol(sc, &bv) != 0)
1720 			kthread_exit(0);
1721 
1722 		/* Skip passthrough volumes */
1723 		if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1724 			continue;
1725 
1726 		/* new volume found */
1727 		sc->sc_nsensors++;
1728 		/* new disk in a volume found */
1729 		sc->sc_nsensors+= bv.bv_nodisk;
1730 	}
1731 
1732 	/* No valid volumes */
1733 	if (!sc->sc_nsensors)
1734 		kthread_exit(0);
1735 
1736 	sc->sc_sme = sysmon_envsys_create();
1737 	slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1738 	sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1739 
1740 	/* Attach sensors for volumes and disks */
1741 	for (i = 0; i < bi.bi_novol; i++) {
1742 		memset(&bv, 0, sizeof(bv));
1743 		bv.bv_volid = i;
1744 		if (arc_bio_vol(sc, &bv) != 0)
1745 			goto bad;
1746 
1747 		sc->sc_sensors[count].units = ENVSYS_DRIVE;
1748 		sc->sc_sensors[count].monitor = true;
1749 		sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1750 
1751 		/* Skip passthrough volumes */
1752 		if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1753 			continue;
1754 
1755 		if (bv.bv_level == BIOC_SVOL_RAID10)
1756 			snprintf(sc->sc_sensors[count].desc,
1757 			    sizeof(sc->sc_sensors[count].desc),
1758 			    "RAID 1+0 volume%d (%s)", i, bv.bv_dev);
1759 		else
1760 			snprintf(sc->sc_sensors[count].desc,
1761 			    sizeof(sc->sc_sensors[count].desc),
1762 			    "RAID %d volume%d (%s)", bv.bv_level, i,
1763 			    bv.bv_dev);
1764 
1765 		sc->sc_sensors[count].value_max = i;
1766 
1767 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
1768 		    &sc->sc_sensors[count]))
1769 			goto bad;
1770 
1771 		count++;
1772 
1773 		/* Attach disk sensors for this volume */
1774 		for (j = 0; j < bv.bv_nodisk; j++) {
1775 			sc->sc_sensors[count].units = ENVSYS_DRIVE;
1776 			sc->sc_sensors[count].monitor = true;
1777 			sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1778 
1779 			snprintf(sc->sc_sensors[count].desc,
1780 			    sizeof(sc->sc_sensors[count].desc),
1781 			    "disk%d volume%d (%s)", j, i, bv.bv_dev);
1782 			sc->sc_sensors[count].value_max = i;
1783 			sc->sc_sensors[count].value_avg = j + 10;
1784 
1785 			if (sysmon_envsys_sensor_attach(sc->sc_sme,
1786 			    &sc->sc_sensors[count]))
1787 				goto bad;
1788 
1789 			count++;
1790 		}
1791 	}
1792 
1793 	/*
1794 	 * Register our envsys driver with the framework now that the
1795 	 * sensors were all attached.
1796 	 */
1797 	sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1798 	sc->sc_sme->sme_cookie = sc;
1799 	sc->sc_sme->sme_refresh = arc_refresh_sensors;
1800 
1801 	if (sysmon_envsys_register(sc->sc_sme)) {
1802 		aprint_debug("%s: unable to register with sysmon\n",
1803 		    device_xname(sc->sc_dev));
1804 		goto bad;
1805 	}
1806 	kthread_exit(0);
1807 
1808 bad:
1809 	kmem_free(sc->sc_sensors, slen);
1810 	sysmon_envsys_destroy(sc->sc_sme);
1811 	kthread_exit(0);
1812 }
1813 
1814 static void
1815 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1816 {
1817 	struct arc_softc	*sc = sme->sme_cookie;
1818 	struct bioc_vol		bv;
1819 	struct bioc_disk	bd;
1820 
1821 	/* sanity check */
1822 	if (edata->units != ENVSYS_DRIVE)
1823 		return;
1824 
1825 	memset(&bv, 0, sizeof(bv));
1826 	bv.bv_volid = edata->value_max;
1827 
1828 	if (arc_bio_vol(sc, &bv)) {
1829 		edata->value_cur = ENVSYS_DRIVE_EMPTY;
1830 		edata->state = ENVSYS_SINVALID;
1831 		return;
1832 	}
1833 
1834 	/* Current sensor is handling a disk volume member */
1835 	if (edata->value_avg) {
1836 		memset(&bd, 0, sizeof(bd));
1837 		bd.bd_volid = edata->value_max;
1838 		bd.bd_diskid = edata->value_avg - 10;
1839 
1840 		if (arc_bio_disk_volume(sc, &bd)) {
1841 			edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1842 			edata->state = ENVSYS_SCRITICAL;
1843 			return;
1844 		}
1845 
1846 		switch (bd.bd_status) {
1847 		case BIOC_SDONLINE:
1848 			edata->value_cur = ENVSYS_DRIVE_ONLINE;
1849 			edata->state = ENVSYS_SVALID;
1850 			break;
1851 		case BIOC_SDOFFLINE:
1852 			edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1853 			edata->state = ENVSYS_SCRITICAL;
1854 			break;
1855 		default:
1856 			edata->value_cur = ENVSYS_DRIVE_FAIL;
1857 			edata->state = ENVSYS_SCRITICAL;
1858 			break;
1859 		}
1860 
1861 		return;
1862 	}
1863 
1864 	/* Current sensor is handling a volume */
1865 	switch (bv.bv_status) {
1866 	case BIOC_SVOFFLINE:
1867 		edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1868 		edata->state = ENVSYS_SCRITICAL;
1869 		break;
1870 	case BIOC_SVDEGRADED:
1871 		edata->value_cur = ENVSYS_DRIVE_PFAIL;
1872 		edata->state = ENVSYS_SCRITICAL;
1873 		break;
1874 	case BIOC_SVBUILDING:
1875 		edata->value_cur = ENVSYS_DRIVE_BUILD;
1876 		edata->state = ENVSYS_SVALID;
1877 		break;
1878 	case BIOC_SVMIGRATING:
1879 		edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1880 		edata->state = ENVSYS_SVALID;
1881 		break;
1882 	case BIOC_SVCHECKING:
1883 		edata->value_cur = ENVSYS_DRIVE_CHECK;
1884 		edata->state = ENVSYS_SVALID;
1885 		break;
1886 	case BIOC_SVREBUILD:
1887 		edata->value_cur = ENVSYS_DRIVE_REBUILD;
1888 		edata->state = ENVSYS_SCRITICAL;
1889 		break;
1890 	case BIOC_SVSCRUB:
1891 	case BIOC_SVONLINE:
1892 		edata->value_cur = ENVSYS_DRIVE_ONLINE;
1893 		edata->state = ENVSYS_SVALID;
1894 		break;
1895 	case BIOC_SVINVALID:
1896 		/* FALLTHROUGH */
1897 	default:
1898 		edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1899 		edata->state = ENVSYS_SINVALID;
1900 		break;
1901 	}
1902 }
1903 #endif /* NBIO > 0 */
1904 
1905 uint32_t
1906 arc_read(struct arc_softc *sc, bus_size_t r)
1907 {
1908 	uint32_t			v;
1909 
1910 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1911 	    BUS_SPACE_BARRIER_READ);
1912 	v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1913 
1914 	DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1915 	    device_xname(sc->sc_dev), r, v);
1916 
1917 	return v;
1918 }
1919 
1920 void
1921 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1922 {
1923 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1924 	    BUS_SPACE_BARRIER_READ);
1925 	bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1926 	    (uint32_t *)buf, len >> 2);
1927 }
1928 
1929 void
1930 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1931 {
1932 	DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1933 	    device_xname(sc->sc_dev), r, v);
1934 
1935 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1936 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1937 	    BUS_SPACE_BARRIER_WRITE);
1938 }
1939 
1940 void
1941 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1942 {
1943 	bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1944 	    (const uint32_t *)buf, len >> 2);
1945 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1946 	    BUS_SPACE_BARRIER_WRITE);
1947 }
1948 
1949 int
1950 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1951 	    uint32_t target)
1952 {
1953 	int i;
1954 
1955 	DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1956 	    device_xname(sc->sc_dev), r, mask, target);
1957 
1958 	for (i = 0; i < 10000; i++) {
1959 		if ((arc_read(sc, r) & mask) == target)
1960 			return 0;
1961 		delay(1000);
1962 	}
1963 
1964 	return 1;
1965 }
1966 
1967 int
1968 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1969 	    uint32_t target)
1970 {
1971 	int i;
1972 
1973 	DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1974 	    device_xname(sc->sc_dev), r, mask, target);
1975 
1976 	for (i = 0; i < 10000; i++) {
1977 		if ((arc_read(sc, r) & mask) != target)
1978 			return 0;
1979 		delay(1000);
1980 	}
1981 
1982 	return 1;
1983 }
1984 
1985 int
1986 arc_msg0(struct arc_softc *sc, uint32_t m)
1987 {
1988 	/* post message */
1989 	arc_write(sc, ARC_REG_INB_MSG0, m);
1990 	/* wait for the fw to do it */
1991 	if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1992 	    ARC_REG_INTRSTAT_MSG0) != 0)
1993 		return 1;
1994 
1995 	/* ack it */
1996 	arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1997 
1998 	return 0;
1999 }
2000 
2001 struct arc_dmamem *
2002 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
2003 {
2004 	struct arc_dmamem		*adm;
2005 	int				nsegs;
2006 
2007 	adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
2008 	if (adm == NULL)
2009 		return NULL;
2010 
2011 	adm->adm_size = size;
2012 
2013 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2014 	    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2015 		goto admfree;
2016 
2017 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
2018 	    1, &nsegs, BUS_DMA_NOWAIT) != 0)
2019 		goto destroy;
2020 
2021 	if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
2022 	    &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
2023 		goto free;
2024 
2025 	if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
2026 	    NULL, BUS_DMA_NOWAIT) != 0)
2027 		goto unmap;
2028 
2029 	memset(adm->adm_kva, 0, size);
2030 
2031 	return adm;
2032 
2033 unmap:
2034 	bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2035 free:
2036 	bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2037 destroy:
2038 	bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2039 admfree:
2040 	kmem_free(adm, sizeof(*adm));
2041 
2042 	return NULL;
2043 }
2044 
2045 void
2046 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2047 {
2048 	bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2049 	bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2050 	bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2051 	bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2052 	kmem_free(adm, sizeof(*adm));
2053 }
2054 
2055 int
2056 arc_alloc_ccbs(device_t self)
2057 {
2058 	struct arc_softc 	*sc = device_private(self);
2059 	struct arc_ccb		*ccb;
2060 	uint8_t			*cmd;
2061 	int			i;
2062 	size_t			ccbslen;
2063 
2064 	TAILQ_INIT(&sc->sc_ccb_free);
2065 
2066 	ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2067 	sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2068 
2069 	sc->sc_requests = arc_dmamem_alloc(sc,
2070 	    ARC_MAX_IOCMDLEN * sc->sc_req_count);
2071 	if (sc->sc_requests == NULL) {
2072 		aprint_error_dev(self, "unable to allocate ccb dmamem\n");
2073 		goto free_ccbs;
2074 	}
2075 	cmd = ARC_DMA_KVA(sc->sc_requests);
2076 
2077 	for (i = 0; i < sc->sc_req_count; i++) {
2078 		ccb = &sc->sc_ccbs[i];
2079 
2080 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2081 		    MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2082 			aprint_error_dev(self,
2083 			    "unable to create dmamap for ccb %d\n", i);
2084 			goto free_maps;
2085 		}
2086 
2087 		ccb->ccb_sc = sc;
2088 		ccb->ccb_id = i;
2089 		ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2090 
2091 		ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2092 		ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2093 		    ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2094 
2095 		arc_put_ccb(sc, ccb);
2096 	}
2097 
2098 	return 0;
2099 
2100 free_maps:
2101 	while ((ccb = arc_get_ccb(sc)) != NULL)
2102 	    bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2103 	arc_dmamem_free(sc, sc->sc_requests);
2104 
2105 free_ccbs:
2106 	kmem_free(sc->sc_ccbs, ccbslen);
2107 
2108 	return 1;
2109 }
2110 
2111 struct arc_ccb *
2112 arc_get_ccb(struct arc_softc *sc)
2113 {
2114 	struct arc_ccb			*ccb;
2115 
2116 	ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2117 	if (ccb != NULL)
2118 		TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2119 
2120 	return ccb;
2121 }
2122 
2123 void
2124 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2125 {
2126 	ccb->ccb_xs = NULL;
2127 	memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2128 	TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2129 }
2130