xref: /openbsd-src/sys/dev/ic/mfi.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: mfi.c,v 1.154 2014/07/13 23:10:23 deraadt Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/types.h>
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/ioctl.h>
25 #include <sys/device.h>
26 #include <sys/kernel.h>
27 #include <sys/malloc.h>
28 #include <sys/proc.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/pool.h>
32 
33 #include <machine/bus.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsi_disk.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/biovar.h>
40 #include <dev/ic/mfireg.h>
41 #include <dev/ic/mfivar.h>
42 
43 #ifdef MFI_DEBUG
44 uint32_t	mfi_debug = 0
45 /*		    | MFI_D_CMD */
46 /*		    | MFI_D_INTR */
47 /*		    | MFI_D_MISC */
48 /*		    | MFI_D_DMA */
49 /*		    | MFI_D_IOCTL */
50 /*		    | MFI_D_RW */
51 /*		    | MFI_D_MEM */
52 /*		    | MFI_D_CCB */
53 		;
54 #endif
55 
56 struct cfdriver mfi_cd = {
57 	NULL, "mfi", DV_DULL
58 };
59 
60 void	mfi_scsi_cmd(struct scsi_xfer *);
61 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
62 void	mfiminphys(struct buf *bp, struct scsi_link *sl);
63 
64 void	mfi_pd_scsi_cmd(struct scsi_xfer *);
65 int	mfi_pd_scsi_probe(struct scsi_link *);
66 
67 struct scsi_adapter mfi_switch = {
68 	mfi_scsi_cmd, mfiminphys, 0, 0, mfi_scsi_ioctl
69 };
70 
71 struct scsi_adapter mfi_pd_switch = {
72 	mfi_pd_scsi_cmd,
73 	mfiminphys,
74 	mfi_pd_scsi_probe,
75 	0,
76 	mfi_scsi_ioctl
77 };
78 
79 void *		mfi_get_ccb(void *);
80 void		mfi_put_ccb(void *, void *);
81 void		mfi_scrub_ccb(struct mfi_ccb *);
82 int		mfi_init_ccb(struct mfi_softc *);
83 
84 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
85 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
86 
87 int		mfi_transition_firmware(struct mfi_softc *);
88 int		mfi_initialize_firmware(struct mfi_softc *);
89 int		mfi_get_info(struct mfi_softc *);
90 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
91 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
92 void		mfi_poll(struct mfi_softc *, struct mfi_ccb *);
93 void		mfi_exec(struct mfi_softc *, struct mfi_ccb *);
94 void		mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
95 int		mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
96 int		mfi_syspd(struct mfi_softc *);
97 
98 /* commands */
99 int		mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
100 		    struct scsi_xfer *);
101 int		mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
102 		    struct scsi_xfer *, uint64_t, uint32_t);
103 void		mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
104 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
105 		    void *, uint8_t *);
106 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
107 		    uint32_t, uint32_t, void *, uint8_t *);
108 void		mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
109 
110 #if NBIO > 0
111 int		mfi_ioctl(struct device *, u_long, caddr_t);
112 int		mfi_bio_getitall(struct mfi_softc *);
113 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
114 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
115 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
116 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
117 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
118 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
119 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
120 #ifndef SMALL_KERNEL
121 int		mfi_create_sensors(struct mfi_softc *);
122 void		mfi_refresh_sensors(void *);
123 int		mfi_bbu(struct mfi_softc *);
124 #endif /* SMALL_KERNEL */
125 #endif /* NBIO > 0 */
126 
127 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
128 void		mfi_done(struct mfi_softc *, struct mfi_ccb *);
129 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
130 void		mfi_xscale_intr_ena(struct mfi_softc *);
131 int		mfi_xscale_intr(struct mfi_softc *);
132 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
133 
134 static const struct mfi_iop_ops mfi_iop_xscale = {
135 	mfi_xscale_fw_state,
136 	mfi_xscale_intr_ena,
137 	mfi_xscale_intr,
138 	mfi_xscale_post,
139 	0,
140 };
141 
142 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
143 void		mfi_ppc_intr_ena(struct mfi_softc *);
144 int		mfi_ppc_intr(struct mfi_softc *);
145 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
146 
147 static const struct mfi_iop_ops mfi_iop_ppc = {
148 	mfi_ppc_fw_state,
149 	mfi_ppc_intr_ena,
150 	mfi_ppc_intr,
151 	mfi_ppc_post,
152 	MFI_IDB,
153 	0
154 };
155 
156 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
157 void		mfi_gen2_intr_ena(struct mfi_softc *);
158 int		mfi_gen2_intr(struct mfi_softc *);
159 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
160 
161 static const struct mfi_iop_ops mfi_iop_gen2 = {
162 	mfi_gen2_fw_state,
163 	mfi_gen2_intr_ena,
164 	mfi_gen2_intr,
165 	mfi_gen2_post,
166 	MFI_IDB,
167 	0
168 };
169 
170 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
171 void		mfi_skinny_intr_ena(struct mfi_softc *);
172 int		mfi_skinny_intr(struct mfi_softc *);
173 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
174 
175 static const struct mfi_iop_ops mfi_iop_skinny = {
176 	mfi_skinny_fw_state,
177 	mfi_skinny_intr_ena,
178 	mfi_skinny_intr,
179 	mfi_skinny_post,
180 	MFI_SKINNY_IDB,
181 	MFI_IOP_F_SYSPD
182 };
183 
184 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
185 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
186 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
187 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
188 
189 void *
190 mfi_get_ccb(void *cookie)
191 {
192 	struct mfi_softc	*sc = cookie;
193 	struct mfi_ccb		*ccb;
194 
195 	mtx_enter(&sc->sc_ccb_mtx);
196 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
197 	if (ccb != NULL) {
198 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
199 		ccb->ccb_state = MFI_CCB_READY;
200 	}
201 	mtx_leave(&sc->sc_ccb_mtx);
202 
203 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
204 
205 	return (ccb);
206 }
207 
208 void
209 mfi_put_ccb(void *cookie, void *io)
210 {
211 	struct mfi_softc	*sc = cookie;
212 	struct mfi_ccb		*ccb = io;
213 
214 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
215 
216 	mtx_enter(&sc->sc_ccb_mtx);
217 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
218 	mtx_leave(&sc->sc_ccb_mtx);
219 }
220 
221 void
222 mfi_scrub_ccb(struct mfi_ccb *ccb)
223 {
224 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
225 
226 	hdr->mfh_cmd_status = 0x0;
227 	hdr->mfh_flags = 0x0;
228 	ccb->ccb_state = MFI_CCB_FREE;
229 	ccb->ccb_cookie = NULL;
230 	ccb->ccb_flags = 0;
231 	ccb->ccb_done = NULL;
232 	ccb->ccb_direction = 0;
233 	ccb->ccb_frame_size = 0;
234 	ccb->ccb_extra_frames = 0;
235 	ccb->ccb_sgl = NULL;
236 	ccb->ccb_data = NULL;
237 	ccb->ccb_len = 0;
238 }
239 
240 int
241 mfi_init_ccb(struct mfi_softc *sc)
242 {
243 	struct mfi_ccb		*ccb;
244 	uint32_t		i;
245 	int			error;
246 
247 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
248 
249 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
250 	    M_DEVBUF, M_WAITOK|M_ZERO);
251 
252 	for (i = 0; i < sc->sc_max_cmds; i++) {
253 		ccb = &sc->sc_ccb[i];
254 
255 		/* select i'th frame */
256 		ccb->ccb_frame = (union mfi_frame *)
257 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
258 		ccb->ccb_pframe =
259 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
260 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
261 		ccb->ccb_frame->mfr_header.mfh_context = i;
262 
263 		/* select i'th sense */
264 		ccb->ccb_sense = (struct mfi_sense *)
265 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
266 		ccb->ccb_psense =
267 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
268 
269 		/* create a dma map for transfer */
270 		error = bus_dmamap_create(sc->sc_dmat,
271 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
272 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
273 		if (error) {
274 			printf("%s: cannot create ccb dmamap (%d)\n",
275 			    DEVNAME(sc), error);
276 			goto destroy;
277 		}
278 
279 		DNPRINTF(MFI_D_CCB,
280 		    "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
281 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
282 		    ccb->ccb_frame, ccb->ccb_pframe,
283 		    ccb->ccb_sense, ccb->ccb_psense,
284 		    ccb->ccb_dmamap);
285 
286 		/* add ccb to queue */
287 		mfi_put_ccb(sc, ccb);
288 	}
289 
290 	return (0);
291 destroy:
292 	/* free dma maps and ccb memory */
293 	while ((ccb = mfi_get_ccb(sc)) != NULL)
294 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
295 
296 	free(sc->sc_ccb, M_DEVBUF, 0);
297 
298 	return (1);
299 }
300 
301 uint32_t
302 mfi_read(struct mfi_softc *sc, bus_size_t r)
303 {
304 	uint32_t rv;
305 
306 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
307 	    BUS_SPACE_BARRIER_READ);
308 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
309 
310 	DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
311 	return (rv);
312 }
313 
314 void
315 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
316 {
317 	DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
318 
319 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
320 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
321 	    BUS_SPACE_BARRIER_WRITE);
322 }
323 
324 struct mfi_mem *
325 mfi_allocmem(struct mfi_softc *sc, size_t size)
326 {
327 	struct mfi_mem		*mm;
328 	int			nsegs;
329 
330 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
331 	    size);
332 
333 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
334 	if (mm == NULL)
335 		return (NULL);
336 
337 	mm->am_size = size;
338 
339 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
340 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
341 		goto amfree;
342 
343 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
344 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
345 		goto destroy;
346 
347 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
348 	    BUS_DMA_NOWAIT) != 0)
349 		goto free;
350 
351 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
352 	    BUS_DMA_NOWAIT) != 0)
353 		goto unmap;
354 
355 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
356 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
357 
358 	return (mm);
359 
360 unmap:
361 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
362 free:
363 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
364 destroy:
365 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
366 amfree:
367 	free(mm, M_DEVBUF, 0);
368 
369 	return (NULL);
370 }
371 
372 void
373 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
374 {
375 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
376 
377 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
378 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
379 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
380 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
381 	free(mm, M_DEVBUF, 0);
382 }
383 
384 int
385 mfi_transition_firmware(struct mfi_softc *sc)
386 {
387 	int32_t			fw_state, cur_state;
388 	u_int32_t		idb = sc->sc_iop->mio_idb;
389 	int			max_wait, i;
390 
391 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
392 
393 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
394 	    fw_state);
395 
396 	while (fw_state != MFI_STATE_READY) {
397 		DNPRINTF(MFI_D_MISC,
398 		    "%s: waiting for firmware to become ready\n",
399 		    DEVNAME(sc));
400 		cur_state = fw_state;
401 		switch (fw_state) {
402 		case MFI_STATE_FAULT:
403 			printf("%s: firmware fault\n", DEVNAME(sc));
404 			return (1);
405 		case MFI_STATE_WAIT_HANDSHAKE:
406 			mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
407 			max_wait = 2;
408 			break;
409 		case MFI_STATE_OPERATIONAL:
410 			mfi_write(sc, idb, MFI_INIT_READY);
411 			max_wait = 10;
412 			break;
413 		case MFI_STATE_UNDEFINED:
414 		case MFI_STATE_BB_INIT:
415 			max_wait = 2;
416 			break;
417 		case MFI_STATE_FW_INIT:
418 		case MFI_STATE_DEVICE_SCAN:
419 		case MFI_STATE_FLUSH_CACHE:
420 			max_wait = 20;
421 			break;
422 		default:
423 			printf("%s: unknown firmware state %d\n",
424 			    DEVNAME(sc), fw_state);
425 			return (1);
426 		}
427 		for (i = 0; i < (max_wait * 10); i++) {
428 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
429 			if (fw_state == cur_state)
430 				DELAY(100000);
431 			else
432 				break;
433 		}
434 		if (fw_state == cur_state) {
435 			printf("%s: firmware stuck in state %#x\n",
436 			    DEVNAME(sc), fw_state);
437 			return (1);
438 		}
439 	}
440 
441 	return (0);
442 }
443 
444 int
445 mfi_initialize_firmware(struct mfi_softc *sc)
446 {
447 	struct mfi_ccb		*ccb;
448 	struct mfi_init_frame	*init;
449 	struct mfi_init_qinfo	*qinfo;
450 	int			rv = 0;
451 
452 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
453 
454 	ccb = scsi_io_get(&sc->sc_iopool, 0);
455 	mfi_scrub_ccb(ccb);
456 
457 	init = &ccb->ccb_frame->mfr_init;
458 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
459 
460 	memset(qinfo, 0, sizeof(*qinfo));
461 	qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
462 
463 	qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
464 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
465 
466 	qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
467 	    offsetof(struct mfi_prod_cons, mpc_producer));
468 
469 	qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
470 	    offsetof(struct mfi_prod_cons, mpc_consumer));
471 
472 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
473 	init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
474 	init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
475 
476 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
477 	    0, MFIMEM_LEN(sc->sc_pcq),
478 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
479 
480 	ccb->ccb_done = mfi_empty_done;
481 	mfi_poll(sc, ccb);
482 	if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
483 		rv = 1;
484 
485 	mfi_put_ccb(sc, ccb);
486 
487 	return (rv);
488 }
489 
490 void
491 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
492 {
493 	/* nop */
494 }
495 
496 int
497 mfi_get_info(struct mfi_softc *sc)
498 {
499 #ifdef MFI_DEBUG
500 	int i;
501 #endif
502 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
503 
504 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
505 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
506 		return (1);
507 
508 #ifdef MFI_DEBUG
509 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
510 		printf("%s: active FW %s Version %s date %s time %s\n",
511 		    DEVNAME(sc),
512 		    sc->sc_info.mci_image_component[i].mic_name,
513 		    sc->sc_info.mci_image_component[i].mic_version,
514 		    sc->sc_info.mci_image_component[i].mic_build_date,
515 		    sc->sc_info.mci_image_component[i].mic_build_time);
516 	}
517 
518 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
519 		printf("%s: pending FW %s Version %s date %s time %s\n",
520 		    DEVNAME(sc),
521 		    sc->sc_info.mci_pending_image_component[i].mic_name,
522 		    sc->sc_info.mci_pending_image_component[i].mic_version,
523 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
524 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
525 	}
526 
527 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
528 	    DEVNAME(sc),
529 	    sc->sc_info.mci_max_arms,
530 	    sc->sc_info.mci_max_spans,
531 	    sc->sc_info.mci_max_arrays,
532 	    sc->sc_info.mci_max_lds,
533 	    sc->sc_info.mci_product_name);
534 
535 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
536 	    DEVNAME(sc),
537 	    sc->sc_info.mci_serial_number,
538 	    sc->sc_info.mci_hw_present,
539 	    sc->sc_info.mci_current_fw_time,
540 	    sc->sc_info.mci_max_cmds,
541 	    sc->sc_info.mci_max_sg_elements);
542 
543 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
544 	    DEVNAME(sc),
545 	    sc->sc_info.mci_max_request_size,
546 	    sc->sc_info.mci_lds_present,
547 	    sc->sc_info.mci_lds_degraded,
548 	    sc->sc_info.mci_lds_offline,
549 	    sc->sc_info.mci_pd_present);
550 
551 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
552 	    DEVNAME(sc),
553 	    sc->sc_info.mci_pd_disks_present,
554 	    sc->sc_info.mci_pd_disks_pred_failure,
555 	    sc->sc_info.mci_pd_disks_failed);
556 
557 	printf("%s: nvram %d mem %d flash %d\n",
558 	    DEVNAME(sc),
559 	    sc->sc_info.mci_nvram_size,
560 	    sc->sc_info.mci_memory_size,
561 	    sc->sc_info.mci_flash_size);
562 
563 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
564 	    DEVNAME(sc),
565 	    sc->sc_info.mci_ram_correctable_errors,
566 	    sc->sc_info.mci_ram_uncorrectable_errors,
567 	    sc->sc_info.mci_cluster_allowed,
568 	    sc->sc_info.mci_cluster_active);
569 
570 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
571 	    DEVNAME(sc),
572 	    sc->sc_info.mci_max_strips_per_io,
573 	    sc->sc_info.mci_raid_levels,
574 	    sc->sc_info.mci_adapter_ops,
575 	    sc->sc_info.mci_ld_ops);
576 
577 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
578 	    DEVNAME(sc),
579 	    sc->sc_info.mci_stripe_sz_ops.min,
580 	    sc->sc_info.mci_stripe_sz_ops.max,
581 	    sc->sc_info.mci_pd_ops,
582 	    sc->sc_info.mci_pd_mix_support);
583 
584 	printf("%s: ecc_bucket %d pckg_prop %s\n",
585 	    DEVNAME(sc),
586 	    sc->sc_info.mci_ecc_bucket_count,
587 	    sc->sc_info.mci_package_version);
588 
589 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
590 	    DEVNAME(sc),
591 	    sc->sc_info.mci_properties.mcp_seq_num,
592 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
593 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
594 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
595 
596 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
597 	    DEVNAME(sc),
598 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
599 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
600 	    sc->sc_info.mci_properties.mcp_bgi_rate,
601 	    sc->sc_info.mci_properties.mcp_cc_rate);
602 
603 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
604 	    DEVNAME(sc),
605 	    sc->sc_info.mci_properties.mcp_recon_rate,
606 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
607 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
608 	    sc->sc_info.mci_properties.mcp_spinup_delay,
609 	    sc->sc_info.mci_properties.mcp_cluster_enable);
610 
611 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
612 	    DEVNAME(sc),
613 	    sc->sc_info.mci_properties.mcp_coercion_mode,
614 	    sc->sc_info.mci_properties.mcp_alarm_enable,
615 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
616 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
617 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
618 
619 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
620 	    DEVNAME(sc),
621 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
622 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
623 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
624 
625 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
626 	    DEVNAME(sc),
627 	    sc->sc_info.mci_pci.mip_vendor,
628 	    sc->sc_info.mci_pci.mip_device,
629 	    sc->sc_info.mci_pci.mip_subvendor,
630 	    sc->sc_info.mci_pci.mip_subdevice);
631 
632 	printf("%s: type %#x port_count %d port_addr ",
633 	    DEVNAME(sc),
634 	    sc->sc_info.mci_host.mih_type,
635 	    sc->sc_info.mci_host.mih_port_count);
636 
637 	for (i = 0; i < 8; i++)
638 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
639 	printf("\n");
640 
641 	printf("%s: type %.x port_count %d port_addr ",
642 	    DEVNAME(sc),
643 	    sc->sc_info.mci_device.mid_type,
644 	    sc->sc_info.mci_device.mid_port_count);
645 
646 	for (i = 0; i < 8; i++)
647 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
648 	printf("\n");
649 #endif /* MFI_DEBUG */
650 
651 	return (0);
652 }
653 
654 void
655 mfiminphys(struct buf *bp, struct scsi_link *sl)
656 {
657 	DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
658 
659 	/* XXX currently using MFI_MAXFER = MAXPHYS */
660 	if (bp->b_bcount > MFI_MAXFER)
661 		bp->b_bcount = MFI_MAXFER;
662 	minphys(bp);
663 }
664 
665 int
666 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
667 {
668 	struct scsibus_attach_args saa;
669 	uint32_t		status, frames, max_sgl;
670 	int			i;
671 
672 	switch (iop) {
673 	case MFI_IOP_XSCALE:
674 		sc->sc_iop = &mfi_iop_xscale;
675 		break;
676 	case MFI_IOP_PPC:
677 		sc->sc_iop = &mfi_iop_ppc;
678 		break;
679 	case MFI_IOP_GEN2:
680 		sc->sc_iop = &mfi_iop_gen2;
681 		break;
682 	case MFI_IOP_SKINNY:
683 		sc->sc_iop = &mfi_iop_skinny;
684 		break;
685 	default:
686 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
687 	}
688 
689 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
690 
691 	if (mfi_transition_firmware(sc))
692 		return (1);
693 
694 	SLIST_INIT(&sc->sc_ccb_freeq);
695 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
696 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
697 
698 	rw_init(&sc->sc_lock, "mfi_lock");
699 
700 	status = mfi_fw_state(sc);
701 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
702 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
703 	if (sc->sc_64bit_dma) {
704 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
705 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
706 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
707 	} else {
708 		sc->sc_max_sgl = max_sgl;
709 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
710 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
711 	}
712 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
713 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
714 
715 	/* consumer/producer and reply queue memory */
716 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
717 	    sizeof(struct mfi_prod_cons));
718 	if (sc->sc_pcq == NULL) {
719 		printf("%s: unable to allocate reply queue memory\n",
720 		    DEVNAME(sc));
721 		goto nopcq;
722 	}
723 
724 	/* frame memory */
725 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
726 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
727 	    MFI_FRAME_SIZE + 1;
728 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
729 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
730 	if (sc->sc_frames == NULL) {
731 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
732 		goto noframe;
733 	}
734 	/* XXX hack, fix this */
735 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
736 		printf("%s: improper frame alignment (%#lx) FIXME\n",
737 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
738 		goto noframe;
739 	}
740 
741 	/* sense memory */
742 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
743 	if (sc->sc_sense == NULL) {
744 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
745 		goto nosense;
746 	}
747 
748 	/* now that we have all memory bits go initialize ccbs */
749 	if (mfi_init_ccb(sc)) {
750 		printf("%s: could not init ccb list\n", DEVNAME(sc));
751 		goto noinit;
752 	}
753 
754 	/* kickstart firmware with all addresses and pointers */
755 	if (mfi_initialize_firmware(sc)) {
756 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
757 		goto noinit;
758 	}
759 
760 	if (mfi_get_info(sc)) {
761 		printf("%s: could not retrieve controller information\n",
762 		    DEVNAME(sc));
763 		goto noinit;
764 	}
765 
766 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
767 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
768 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
769 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
770 	printf("\n");
771 
772 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
773 	for (i = 0; i < sc->sc_ld_cnt; i++)
774 		sc->sc_ld[i].ld_present = 1;
775 
776 	sc->sc_link.adapter = &mfi_switch;
777 	sc->sc_link.adapter_softc = sc;
778 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
779 	sc->sc_link.adapter_target = -1;
780 	sc->sc_link.luns = 1;
781 	sc->sc_link.openings = sc->sc_max_cmds - 1;
782 	sc->sc_link.pool = &sc->sc_iopool;
783 
784 	bzero(&saa, sizeof(saa));
785 	saa.saa_sc_link = &sc->sc_link;
786 
787 	sc->sc_scsibus = (struct scsibus_softc *)
788 	    config_found(&sc->sc_dev, &saa, scsiprint);
789 
790 	if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
791 		mfi_syspd(sc);
792 
793 	/* enable interrupts */
794 	mfi_intr_enable(sc);
795 
796 #if NBIO > 0
797 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
798 		panic("%s: controller registration failed", DEVNAME(sc));
799 	else
800 		sc->sc_ioctl = mfi_ioctl;
801 
802 #ifndef SMALL_KERNEL
803 	if (mfi_create_sensors(sc) != 0)
804 		printf("%s: unable to create sensors\n", DEVNAME(sc));
805 #endif
806 #endif /* NBIO > 0 */
807 
808 	return (0);
809 noinit:
810 	mfi_freemem(sc, sc->sc_sense);
811 nosense:
812 	mfi_freemem(sc, sc->sc_frames);
813 noframe:
814 	mfi_freemem(sc, sc->sc_pcq);
815 nopcq:
816 	return (1);
817 }
818 
819 int
820 mfi_syspd(struct mfi_softc *sc)
821 {
822 	struct scsibus_attach_args saa;
823 	struct scsi_link *link;
824 	struct mfi_pd_link *pl;
825 	struct mfi_pd_list *pd;
826 	u_int npds, i;
827 
828 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
829 	if (sc->sc_pd == NULL)
830 		return (1);
831 
832 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
833 	if (pd == NULL)
834 		goto nopdsc;
835 
836 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
837 	    sizeof(*pd), pd, NULL) != 0)
838 		goto nopd;
839 
840 	npds = letoh32(pd->mpl_no_pd);
841 	for (i = 0; i < npds; i++) {
842 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
843 		if (pl == NULL)
844 			goto nopl;
845 
846 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
847 		sc->sc_pd->pd_links[i] = pl;
848 	}
849 
850 	free(pd, M_TEMP, 0);
851 
852 	link = &sc->sc_pd->pd_link;
853 	link->adapter = &mfi_pd_switch;
854 	link->adapter_softc = sc;
855 	link->adapter_buswidth = MFI_MAX_PD;
856 	link->adapter_target = -1;
857 	link->openings = sc->sc_max_cmds - 1;
858 	link->pool = &sc->sc_iopool;
859 
860 	bzero(&saa, sizeof(saa));
861 	saa.saa_sc_link = link;
862 
863 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
864 	    config_found(&sc->sc_dev, &saa, scsiprint);
865 
866 	return (0);
867 nopl:
868 	for (i = 0; i < npds; i++) {
869 		pl = sc->sc_pd->pd_links[i];
870 		if (pl == NULL)
871 			break;
872 
873 		free(pl, M_DEVBUF, 0);
874 	}
875 nopd:
876 	free(pd, M_TEMP, 0);
877 nopdsc:
878 	free(sc->sc_pd, M_DEVBUF, 0);
879 	return (1);
880 }
881 
882 void
883 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
884 {
885 	struct mfi_frame_header *hdr;
886 	int to = 0;
887 
888 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
889 
890 	hdr = &ccb->ccb_frame->mfr_header;
891 	hdr->mfh_cmd_status = 0xff;
892 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
893 
894 	mfi_start(sc, ccb);
895 
896 	for (;;) {
897 		delay(1000);
898 
899 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
900 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
901 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
902 
903 		if (hdr->mfh_cmd_status != 0xff)
904 			break;
905 
906 		if (to++ > 5000) {
907 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
908 			    hdr->mfh_context);
909 			ccb->ccb_flags |= MFI_CCB_F_ERR;
910 			break;
911 		}
912 
913 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
914 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
915 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
916 	}
917 
918 	if (ccb->ccb_len > 0) {
919 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
920 		    ccb->ccb_dmamap->dm_mapsize,
921 		    (ccb->ccb_direction & MFI_DATA_IN) ?
922 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
923 
924 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
925 	}
926 
927 	ccb->ccb_done(sc, ccb);
928 }
929 
930 void
931 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
932 {
933 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
934 
935 #ifdef DIAGNOSTIC
936 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
937 		panic("mfi_exec called with cookie or done set");
938 #endif
939 
940 	ccb->ccb_cookie = &m;
941 	ccb->ccb_done = mfi_exec_done;
942 
943 	mfi_start(sc, ccb);
944 
945 	mtx_enter(&m);
946 	while (ccb->ccb_cookie != NULL)
947 		msleep(ccb, &m, PRIBIO, "mfiexec", 0);
948 	mtx_leave(&m);
949 }
950 
951 void
952 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
953 {
954 	struct mutex *m = ccb->ccb_cookie;
955 
956 	mtx_enter(m);
957 	ccb->ccb_cookie = NULL;
958 	wakeup_one(ccb);
959 	mtx_leave(m);
960 }
961 
962 int
963 mfi_intr(void *arg)
964 {
965 	struct mfi_softc	*sc = arg;
966 	struct mfi_prod_cons	*pcq = MFIMEM_KVA(sc->sc_pcq);
967 	struct mfi_ccb		*ccb;
968 	uint32_t		producer, consumer, ctx;
969 	int			claimed = 0;
970 
971 	if (!mfi_my_intr(sc))
972 		return (0);
973 
974 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
975 	    0, MFIMEM_LEN(sc->sc_pcq),
976 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
977 
978 	producer = letoh32(pcq->mpc_producer);
979 	consumer = letoh32(pcq->mpc_consumer);
980 
981 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
982 
983 	while (consumer != producer) {
984 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
985 		    DEVNAME(sc), producer, consumer);
986 
987 		ctx = pcq->mpc_reply_q[consumer];
988 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
989 		if (ctx == MFI_INVALID_CTX)
990 			printf("%s: invalid context, p: %d c: %d\n",
991 			    DEVNAME(sc), producer, consumer);
992 		else {
993 			/* XXX remove from queue and call scsi_done */
994 			ccb = &sc->sc_ccb[ctx];
995 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
996 			    DEVNAME(sc), ctx);
997 			mfi_done(sc, ccb);
998 
999 			claimed = 1;
1000 		}
1001 		consumer++;
1002 		if (consumer == (sc->sc_max_cmds + 1))
1003 			consumer = 0;
1004 	}
1005 
1006 	pcq->mpc_consumer = htole32(consumer);
1007 
1008 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1009 	    0, MFIMEM_LEN(sc->sc_pcq),
1010 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1011 
1012 	return (claimed);
1013 }
1014 
1015 int
1016 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1017     struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1018 {
1019 	struct scsi_link	*link = xs->sc_link;
1020 	struct mfi_io_frame	*io;
1021 
1022 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1023 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1024 
1025 	if (!xs->data)
1026 		return (1);
1027 
1028 	io = &ccb->ccb_frame->mfr_io;
1029 	if (xs->flags & SCSI_DATA_IN) {
1030 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1031 		ccb->ccb_direction = MFI_DATA_IN;
1032 	} else {
1033 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1034 		ccb->ccb_direction = MFI_DATA_OUT;
1035 	}
1036 	io->mif_header.mfh_target_id = link->target;
1037 	io->mif_header.mfh_timeout = 0;
1038 	io->mif_header.mfh_flags = 0;
1039 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1040 	io->mif_header.mfh_data_len = htole32(blockcnt);
1041 	io->mif_lba = htole64(blockno);
1042 	io->mif_sense_addr = htole64(ccb->ccb_psense);
1043 
1044 	ccb->ccb_done = mfi_scsi_xs_done;
1045 	ccb->ccb_cookie = xs;
1046 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1047 	ccb->ccb_sgl = &io->mif_sgl;
1048 	ccb->ccb_data = xs->data;
1049 	ccb->ccb_len = xs->datalen;
1050 
1051 	if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1052 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1053 		return (1);
1054 
1055 	return (0);
1056 }
1057 
1058 void
1059 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1060 {
1061 	struct scsi_xfer	*xs = ccb->ccb_cookie;
1062 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1063 
1064 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
1065 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1066 
1067 	switch (hdr->mfh_cmd_status) {
1068 	case MFI_STAT_OK:
1069 		xs->resid = 0;
1070 		break;
1071 
1072 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1073 		xs->error = XS_SENSE;
1074 		xs->resid = 0;
1075 		memset(&xs->sense, 0, sizeof(xs->sense));
1076 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1077 		break;
1078 
1079 	case MFI_STAT_DEVICE_NOT_FOUND:
1080 		xs->error = XS_SELTIMEOUT;
1081 		break;
1082 
1083 	default:
1084 		xs->error = XS_DRIVER_STUFFUP;
1085 		DPRINTF(MFI_D_CMD,
1086 		    "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1087 		    DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd->opcode);
1088 
1089 		if (hdr->mfh_scsi_status != 0) {
1090 			DNPRINTF(MFI_D_INTR,
1091 			    "%s: mfi_scsi_xs_done sense %#x %x %x\n",
1092 			    DEVNAME(sc), hdr->mfh_scsi_status,
1093 			    &xs->sense, ccb->ccb_sense);
1094 			memset(&xs->sense, 0, sizeof(xs->sense));
1095 			memcpy(&xs->sense, ccb->ccb_sense,
1096 			    sizeof(struct scsi_sense_data));
1097 			xs->error = XS_SENSE;
1098 		}
1099 		break;
1100 	}
1101 
1102 	scsi_done(xs);
1103 }
1104 
1105 int
1106 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1107 {
1108 	struct scsi_link	*link = xs->sc_link;
1109 	struct mfi_pass_frame	*pf;
1110 
1111 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1112 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1113 
1114 	pf = &ccb->ccb_frame->mfr_pass;
1115 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1116 	pf->mpf_header.mfh_target_id = link->target;
1117 	pf->mpf_header.mfh_lun_id = 0;
1118 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1119 	pf->mpf_header.mfh_timeout = 0;
1120 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1121 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1122 
1123 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1124 
1125 	memset(pf->mpf_cdb, 0, 16);
1126 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
1127 
1128 	ccb->ccb_done = mfi_scsi_xs_done;
1129 	ccb->ccb_cookie = xs;
1130 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1131 	ccb->ccb_sgl = &pf->mpf_sgl;
1132 
1133 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1134 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1135 		    MFI_DATA_IN : MFI_DATA_OUT;
1136 	else
1137 		ccb->ccb_direction = MFI_DATA_NONE;
1138 
1139 	if (xs->data) {
1140 		ccb->ccb_data = xs->data;
1141 		ccb->ccb_len = xs->datalen;
1142 
1143 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1144 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1145 			return (1);
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 void
1152 mfi_scsi_cmd(struct scsi_xfer *xs)
1153 {
1154 	struct scsi_link	*link = xs->sc_link;
1155 	struct mfi_softc	*sc = link->adapter_softc;
1156 	struct mfi_ccb		*ccb = xs->io;
1157 	struct scsi_rw		*rw;
1158 	struct scsi_rw_big	*rwb;
1159 	struct scsi_rw_16	*rw16;
1160 	uint64_t		blockno;
1161 	uint32_t		blockcnt;
1162 	uint8_t			target = link->target;
1163 	uint8_t			mbox[MFI_MBOX_SIZE];
1164 
1165 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1166 	    DEVNAME(sc), xs->cmd->opcode);
1167 
1168 	if (!sc->sc_ld[target].ld_present) {
1169 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1170 		    DEVNAME(sc), target);
1171 		goto stuffup;
1172 	}
1173 
1174 	mfi_scrub_ccb(ccb);
1175 
1176 	xs->error = XS_NOERROR;
1177 
1178 	switch (xs->cmd->opcode) {
1179 	/* IO path */
1180 	case READ_BIG:
1181 	case WRITE_BIG:
1182 		rwb = (struct scsi_rw_big *)xs->cmd;
1183 		blockno = (uint64_t)_4btol(rwb->addr);
1184 		blockcnt = _2btol(rwb->length);
1185 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1186 			goto stuffup;
1187 		break;
1188 
1189 	case READ_COMMAND:
1190 	case WRITE_COMMAND:
1191 		rw = (struct scsi_rw *)xs->cmd;
1192 		blockno =
1193 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1194 		blockcnt = rw->length ? rw->length : 0x100;
1195 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1196 			goto stuffup;
1197 		break;
1198 
1199 	case READ_16:
1200 	case WRITE_16:
1201 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1202 		blockno = _8btol(rw16->addr);
1203 		blockcnt = _4btol(rw16->length);
1204 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1205 			goto stuffup;
1206 		break;
1207 
1208 	case SYNCHRONIZE_CACHE:
1209 		mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1210 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1211 		    MFI_DATA_NONE, 0, NULL, mbox))
1212 			goto stuffup;
1213 
1214 		goto complete;
1215 		/* NOTREACHED */
1216 
1217 	default:
1218 		if (mfi_scsi_ld(sc, ccb, xs))
1219 			goto stuffup;
1220 		break;
1221 	}
1222 
1223 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1224 
1225 	if (xs->flags & SCSI_POLL)
1226 		mfi_poll(sc, ccb);
1227 	else
1228 		mfi_start(sc, ccb);
1229 
1230 	return;
1231 
1232 stuffup:
1233 	xs->error = XS_DRIVER_STUFFUP;
1234 complete:
1235 	scsi_done(xs);
1236 }
1237 
1238 int
1239 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1240 {
1241 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1242 	bus_dma_segment_t	*sgd;
1243 	union mfi_sgl		*sgl;
1244 	int			error, i;
1245 
1246 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1247 	    ccb->ccb_data);
1248 
1249 	if (!ccb->ccb_data) {
1250 		hdr->mfh_sg_count = 0;
1251 		return (1);
1252 	}
1253 
1254 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1255 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1256 	if (error) {
1257 		if (error == EFBIG)
1258 			printf("more than %d dma segs\n",
1259 			    sc->sc_max_sgl);
1260 		else
1261 			printf("error %d loading dma map\n", error);
1262 		return (1);
1263 	}
1264 
1265 	sgl = ccb->ccb_sgl;
1266 	sgd = ccb->ccb_dmamap->dm_segs;
1267 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1268 		if (sc->sc_64bit_dma) {
1269 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1270 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1271 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1272 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1273 		} else {
1274 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1275 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1276 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1277 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1278 		}
1279 	}
1280 
1281 	if (ccb->ccb_direction == MFI_DATA_IN) {
1282 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1283 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1284 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1285 	} else {
1286 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1287 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1288 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1289 	}
1290 
1291 	hdr->mfh_flags |= sc->sc_sgl_flags;
1292 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1293 	ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1294 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1295 
1296 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1297 	    "  dm_nsegs: %d  extra_frames: %d\n",
1298 	    DEVNAME(sc),
1299 	    hdr->mfh_sg_count,
1300 	    ccb->ccb_frame_size,
1301 	    sc->sc_frames_size,
1302 	    ccb->ccb_dmamap->dm_nsegs,
1303 	    ccb->ccb_extra_frames);
1304 
1305 	return (0);
1306 }
1307 
1308 int
1309 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1310     void *buf, uint8_t *mbox)
1311 {
1312 	struct mfi_ccb *ccb;
1313 	int rv;
1314 
1315 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1316 	mfi_scrub_ccb(ccb);
1317 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1318 	scsi_io_put(&sc->sc_iopool, ccb);
1319 
1320 	return (rv);
1321 }
1322 
1323 int
1324 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1325     uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1326 {
1327 	struct mfi_dcmd_frame *dcmd;
1328 	uint8_t *dma_buf = NULL;
1329 	int rv = EINVAL;
1330 
1331 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1332 
1333 	dma_buf = dma_alloc(len, PR_WAITOK);
1334 	if (dma_buf == NULL)
1335 		goto done;
1336 
1337 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1338 	memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1339 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1340 	dcmd->mdf_header.mfh_timeout = 0;
1341 
1342 	dcmd->mdf_opcode = opc;
1343 	dcmd->mdf_header.mfh_data_len = 0;
1344 	ccb->ccb_direction = dir;
1345 
1346 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1347 
1348 	/* handle special opcodes */
1349 	if (mbox)
1350 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1351 
1352 	if (dir != MFI_DATA_NONE) {
1353 		if (dir == MFI_DATA_OUT)
1354 			bcopy(buf, dma_buf, len);
1355 		dcmd->mdf_header.mfh_data_len = len;
1356 		ccb->ccb_data = dma_buf;
1357 		ccb->ccb_len = len;
1358 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1359 
1360 		if (mfi_create_sgl(sc, ccb, BUS_DMA_WAITOK)) {
1361 			rv = EINVAL;
1362 			goto done;
1363 		}
1364 	}
1365 
1366 	if (cold) {
1367 		ccb->ccb_done = mfi_empty_done;
1368 		mfi_poll(sc, ccb);
1369 	} else
1370 		mfi_exec(sc, ccb);
1371 
1372 	if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1373 		rv = EIO;
1374 		goto done;
1375 	}
1376 
1377 	if (dir == MFI_DATA_IN)
1378 		bcopy(dma_buf, buf, len);
1379 
1380 	rv = 0;
1381 done:
1382 	if (dma_buf)
1383 		dma_free(dma_buf, len);
1384 
1385 	return (rv);
1386 }
1387 
1388 int
1389 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1390 {
1391 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1392 
1393 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1394 
1395 	if (sc->sc_ioctl)
1396 		return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
1397 	else
1398 		return (ENOTTY);
1399 }
1400 
1401 #if NBIO > 0
1402 int
1403 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1404 {
1405 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1406 	int error = 0;
1407 
1408 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1409 
1410 	rw_enter_write(&sc->sc_lock);
1411 
1412 	switch (cmd) {
1413 	case BIOCINQ:
1414 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1415 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1416 		break;
1417 
1418 	case BIOCVOL:
1419 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1420 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1421 		break;
1422 
1423 	case BIOCDISK:
1424 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1425 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1426 		break;
1427 
1428 	case BIOCALARM:
1429 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1430 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1431 		break;
1432 
1433 	case BIOCBLINK:
1434 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1435 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1436 		break;
1437 
1438 	case BIOCSETSTATE:
1439 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1440 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1441 		break;
1442 
1443 	default:
1444 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1445 		error = EINVAL;
1446 	}
1447 
1448 	rw_exit_write(&sc->sc_lock);
1449 
1450 	return (error);
1451 }
1452 
1453 int
1454 mfi_bio_getitall(struct mfi_softc *sc)
1455 {
1456 	int			i, d, size, rv = EINVAL;
1457 	uint8_t			mbox[MFI_MBOX_SIZE];
1458 	struct mfi_conf		*cfg = NULL;
1459 	struct mfi_ld_details	*ld_det = NULL;
1460 
1461 	/* get info */
1462 	if (mfi_get_info(sc)) {
1463 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1464 		    DEVNAME(sc));
1465 		goto done;
1466 	}
1467 
1468 	/* send single element command to retrieve size for full structure */
1469 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1470 	if (cfg == NULL)
1471 		goto done;
1472 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1473 	    NULL)) {
1474 		free(cfg, M_DEVBUF, 0);
1475 		goto done;
1476 	}
1477 
1478 	size = cfg->mfc_size;
1479 	free(cfg, M_DEVBUF, 0);
1480 
1481 	/* memory for read config */
1482 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1483 	if (cfg == NULL)
1484 		goto done;
1485 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1486 		free(cfg, M_DEVBUF, 0);
1487 		goto done;
1488 	}
1489 
1490 	/* replace current pointer with new one */
1491 	if (sc->sc_cfg)
1492 		free(sc->sc_cfg, M_DEVBUF, 0);
1493 	sc->sc_cfg = cfg;
1494 
1495 	/* get all ld info */
1496 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1497 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1498 		goto done;
1499 
1500 	/* get memory for all ld structures */
1501 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1502 	if (sc->sc_ld_sz != size) {
1503 		if (sc->sc_ld_details)
1504 			free(sc->sc_ld_details, M_DEVBUF, 0);
1505 
1506 		ld_det = malloc( size, M_DEVBUF, M_NOWAIT | M_ZERO);
1507 		if (ld_det == NULL)
1508 			goto done;
1509 		sc->sc_ld_sz = size;
1510 		sc->sc_ld_details = ld_det;
1511 	}
1512 
1513 	/* find used physical disks */
1514 	size = sizeof(struct mfi_ld_details);
1515 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1516 		mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1517 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1518 		    &sc->sc_ld_details[i], mbox))
1519 			goto done;
1520 
1521 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1522 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1523 	}
1524 	sc->sc_no_pd = d;
1525 
1526 	rv = 0;
1527 done:
1528 	return (rv);
1529 }
1530 
1531 int
1532 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1533 {
1534 	int			rv = EINVAL;
1535 	struct mfi_conf		*cfg = NULL;
1536 
1537 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1538 
1539 	if (mfi_bio_getitall(sc)) {
1540 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1541 		    DEVNAME(sc));
1542 		goto done;
1543 	}
1544 
1545 	/* count unused disks as volumes */
1546 	if (sc->sc_cfg == NULL)
1547 		goto done;
1548 	cfg = sc->sc_cfg;
1549 
1550 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1551 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1552 #if notyet
1553 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1554 	    (bi->bi_nodisk - sc->sc_no_pd);
1555 #endif
1556 	/* tell bio who we are */
1557 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1558 
1559 	rv = 0;
1560 done:
1561 	return (rv);
1562 }
1563 
1564 int
1565 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1566 {
1567 	int			i, per, rv = EINVAL;
1568 	struct scsi_link	*link;
1569 	struct device		*dev;
1570 
1571 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1572 	    DEVNAME(sc), bv->bv_volid);
1573 
1574 	/* we really could skip and expect that inq took care of it */
1575 	if (mfi_bio_getitall(sc)) {
1576 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1577 		    DEVNAME(sc));
1578 		goto done;
1579 	}
1580 
1581 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1582 		/* go do hotspares & unused disks */
1583 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1584 		goto done;
1585 	}
1586 
1587 	i = bv->bv_volid;
1588 	link = scsi_get_link(sc->sc_scsibus, i, 0);
1589 	if (link != NULL && link->device_softc != NULL) {
1590 		dev = link->device_softc;
1591 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1592 	}
1593 
1594 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1595 	case MFI_LD_OFFLINE:
1596 		bv->bv_status = BIOC_SVOFFLINE;
1597 		break;
1598 
1599 	case MFI_LD_PART_DEGRADED:
1600 	case MFI_LD_DEGRADED:
1601 		bv->bv_status = BIOC_SVDEGRADED;
1602 		break;
1603 
1604 	case MFI_LD_ONLINE:
1605 		bv->bv_status = BIOC_SVONLINE;
1606 		break;
1607 
1608 	default:
1609 		bv->bv_status = BIOC_SVINVALID;
1610 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1611 		    DEVNAME(sc),
1612 		    sc->sc_ld_list.mll_list[i].mll_state);
1613 	}
1614 
1615 	/* additional status can modify MFI status */
1616 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1617 	case MFI_LD_PROG_CC:
1618 	case MFI_LD_PROG_BGI:
1619 		bv->bv_status = BIOC_SVSCRUB;
1620 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1621 		bv->bv_percent = (per * 100) / 0xffff;
1622 		bv->bv_seconds =
1623 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1624 		break;
1625 
1626 	case MFI_LD_PROG_FGI:
1627 	case MFI_LD_PROG_RECONSTRUCT:
1628 		/* nothing yet */
1629 		break;
1630 	}
1631 
1632 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1633 		bv->bv_cache = BIOC_CVWRITEBACK;
1634 	else
1635 		bv->bv_cache = BIOC_CVWRITETHROUGH;
1636 
1637 	/*
1638 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1639 	 * a subset that is valid for the MFI controller.
1640 	 */
1641 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1642 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1643 	    MFI_DDF_SRL_SPANNED)
1644 		bv->bv_level *= 10;
1645 
1646 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1647 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1648 
1649 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1650 
1651 	rv = 0;
1652 done:
1653 	return (rv);
1654 }
1655 
1656 int
1657 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1658 {
1659 	struct mfi_conf		*cfg;
1660 	struct mfi_array	*ar;
1661 	struct mfi_ld_cfg	*ld;
1662 	struct mfi_pd_details	*pd;
1663 	struct scsi_inquiry_data *inqbuf;
1664 	char			vend[8+16+4+1], *vendp;
1665 	int			rv = EINVAL;
1666 	int			arr, vol, disk, span;
1667 	uint8_t			mbox[MFI_MBOX_SIZE];
1668 
1669 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1670 	    DEVNAME(sc), bd->bd_diskid);
1671 
1672 	/* we really could skip and expect that inq took care of it */
1673 	if (mfi_bio_getitall(sc)) {
1674 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1675 		    DEVNAME(sc));
1676 		return (rv);
1677 	}
1678 	cfg = sc->sc_cfg;
1679 
1680 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1681 
1682 	ar = cfg->mfc_array;
1683 	vol = bd->bd_volid;
1684 	if (vol >= cfg->mfc_no_ld) {
1685 		/* do hotspares */
1686 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1687 		goto freeme;
1688 	}
1689 
1690 	/* calculate offset to ld structure */
1691 	ld = (struct mfi_ld_cfg *)(
1692 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1693 	    cfg->mfc_array_size * cfg->mfc_no_array);
1694 
1695 	/* use span 0 only when raid group is not spanned */
1696 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1697 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1698 	else
1699 		span = 0;
1700 	arr = ld[vol].mlc_span[span].mls_index;
1701 
1702 	/* offset disk into pd list */
1703 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1704 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1705 
1706 	/* get status */
1707 	switch (ar[arr].pd[disk].mar_pd_state){
1708 	case MFI_PD_UNCONFIG_GOOD:
1709 	case MFI_PD_FAILED:
1710 		bd->bd_status = BIOC_SDFAILED;
1711 		break;
1712 
1713 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1714 		bd->bd_status = BIOC_SDHOTSPARE;
1715 		break;
1716 
1717 	case MFI_PD_OFFLINE:
1718 		bd->bd_status = BIOC_SDOFFLINE;
1719 		break;
1720 
1721 	case MFI_PD_REBUILD:
1722 		bd->bd_status = BIOC_SDREBUILD;
1723 		break;
1724 
1725 	case MFI_PD_ONLINE:
1726 		bd->bd_status = BIOC_SDONLINE;
1727 		break;
1728 
1729 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1730 	default:
1731 		bd->bd_status = BIOC_SDINVALID;
1732 		break;
1733 	}
1734 
1735 	/* get the remaining fields */
1736 	*((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1737 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1738 	    sizeof *pd, pd, mbox)) {
1739 		/* disk is missing but succeed command */
1740 		rv = 0;
1741 		goto freeme;
1742 	}
1743 
1744 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1745 
1746 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1747 	bd->bd_channel = pd->mpd_enc_idx;
1748 
1749 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1750 	vendp = inqbuf->vendor;
1751 	memcpy(vend, vendp, sizeof vend - 1);
1752 	vend[sizeof vend - 1] = '\0';
1753 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1754 
1755 	/* XXX find a way to retrieve serial nr from drive */
1756 	/* XXX find a way to get bd_procdev */
1757 
1758 	rv = 0;
1759 freeme:
1760 	free(pd, M_DEVBUF, 0);
1761 
1762 	return (rv);
1763 }
1764 
1765 int
1766 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1767 {
1768 	uint32_t		opc, dir = MFI_DATA_NONE;
1769 	int			rv = 0;
1770 	int8_t			ret;
1771 
1772 	switch(ba->ba_opcode) {
1773 	case BIOC_SADISABLE:
1774 		opc = MR_DCMD_SPEAKER_DISABLE;
1775 		break;
1776 
1777 	case BIOC_SAENABLE:
1778 		opc = MR_DCMD_SPEAKER_ENABLE;
1779 		break;
1780 
1781 	case BIOC_SASILENCE:
1782 		opc = MR_DCMD_SPEAKER_SILENCE;
1783 		break;
1784 
1785 	case BIOC_GASTATUS:
1786 		opc = MR_DCMD_SPEAKER_GET;
1787 		dir = MFI_DATA_IN;
1788 		break;
1789 
1790 	case BIOC_SATEST:
1791 		opc = MR_DCMD_SPEAKER_TEST;
1792 		break;
1793 
1794 	default:
1795 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1796 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1797 		return (EINVAL);
1798 	}
1799 
1800 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1801 		rv = EINVAL;
1802 	else
1803 		if (ba->ba_opcode == BIOC_GASTATUS)
1804 			ba->ba_status = ret;
1805 		else
1806 			ba->ba_status = 0;
1807 
1808 	return (rv);
1809 }
1810 
1811 int
1812 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1813 {
1814 	int			i, found, rv = EINVAL;
1815 	uint8_t			mbox[MFI_MBOX_SIZE];
1816 	uint32_t		cmd;
1817 	struct mfi_pd_list	*pd;
1818 
1819 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1820 	    bb->bb_status);
1821 
1822 	/* channel 0 means not in an enclosure so can't be blinked */
1823 	if (bb->bb_channel == 0)
1824 		return (EINVAL);
1825 
1826 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1827 
1828 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1829 	    sizeof(*pd), pd, NULL))
1830 		goto done;
1831 
1832 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1833 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1834 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1835 			found = 1;
1836 			break;
1837 		}
1838 
1839 	if (!found)
1840 		goto done;
1841 
1842 	memset(mbox, 0, sizeof mbox);
1843 
1844 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1845 
1846 	switch (bb->bb_status) {
1847 	case BIOC_SBUNBLINK:
1848 		cmd = MR_DCMD_PD_UNBLINK;
1849 		break;
1850 
1851 	case BIOC_SBBLINK:
1852 		cmd = MR_DCMD_PD_BLINK;
1853 		break;
1854 
1855 	case BIOC_SBALARM:
1856 	default:
1857 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1858 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1859 		goto done;
1860 	}
1861 
1862 
1863 	if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1864 		goto done;
1865 
1866 	rv = 0;
1867 done:
1868 	free(pd, M_DEVBUF, 0);
1869 	return (rv);
1870 }
1871 
1872 int
1873 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1874 {
1875 	struct mfi_pd_list	*pd;
1876 	int			i, found, rv = EINVAL;
1877 	uint8_t			mbox[MFI_MBOX_SIZE];
1878 
1879 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1880 	    bs->bs_status);
1881 
1882 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1883 
1884 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1885 	    sizeof(*pd), pd, NULL))
1886 		goto done;
1887 
1888 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1889 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1890 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1891 			found = 1;
1892 			break;
1893 		}
1894 
1895 	if (!found)
1896 		goto done;
1897 
1898 	memset(mbox, 0, sizeof mbox);
1899 
1900 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1901 
1902 	switch (bs->bs_status) {
1903 	case BIOC_SSONLINE:
1904 		mbox[2] = MFI_PD_ONLINE;
1905 		break;
1906 
1907 	case BIOC_SSOFFLINE:
1908 		mbox[2] = MFI_PD_OFFLINE;
1909 		break;
1910 
1911 	case BIOC_SSHOTSPARE:
1912 		mbox[2] = MFI_PD_HOTSPARE;
1913 		break;
1914 /*
1915 	case BIOC_SSREBUILD:
1916 		break;
1917 */
1918 	default:
1919 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1920 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
1921 		goto done;
1922 	}
1923 
1924 
1925 	if (mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1926 		goto done;
1927 
1928 	rv = 0;
1929 done:
1930 	free(pd, M_DEVBUF, 0);
1931 	return (rv);
1932 }
1933 
1934 int
1935 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1936 {
1937 	struct mfi_conf		*cfg;
1938 	struct mfi_hotspare	*hs;
1939 	struct mfi_pd_details	*pd;
1940 	struct bioc_disk	*sdhs;
1941 	struct bioc_vol		*vdhs;
1942 	struct scsi_inquiry_data *inqbuf;
1943 	char			vend[8+16+4+1], *vendp;
1944 	int			i, rv = EINVAL;
1945 	uint32_t		size;
1946 	uint8_t			mbox[MFI_MBOX_SIZE];
1947 
1948 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1949 
1950 	if (!bio_hs)
1951 		return (EINVAL);
1952 
1953 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1954 
1955 	/* send single element command to retrieve size for full structure */
1956 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1957 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1958 		goto freeme;
1959 
1960 	size = cfg->mfc_size;
1961 	free(cfg, M_DEVBUF, 0);
1962 
1963 	/* memory for read config */
1964 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1965 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1966 		goto freeme;
1967 
1968 	/* calculate offset to hs structure */
1969 	hs = (struct mfi_hotspare *)(
1970 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1971 	    cfg->mfc_array_size * cfg->mfc_no_array +
1972 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
1973 
1974 	if (volid < cfg->mfc_no_ld)
1975 		goto freeme; /* not a hotspare */
1976 
1977 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1978 		goto freeme; /* not a hotspare */
1979 
1980 	/* offset into hotspare structure */
1981 	i = volid - cfg->mfc_no_ld;
1982 
1983 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1984 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1985 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1986 
1987 	/* get pd fields */
1988 	memset(mbox, 0, sizeof mbox);
1989 	*((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1990 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1991 	    sizeof *pd, pd, mbox)) {
1992 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1993 		    DEVNAME(sc));
1994 		goto freeme;
1995 	}
1996 
1997 	switch (type) {
1998 	case MFI_MGMT_VD:
1999 		vdhs = bio_hs;
2000 		vdhs->bv_status = BIOC_SVONLINE;
2001 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2002 		vdhs->bv_level = -1; /* hotspare */
2003 		vdhs->bv_nodisk = 1;
2004 		break;
2005 
2006 	case MFI_MGMT_SD:
2007 		sdhs = bio_hs;
2008 		sdhs->bd_status = BIOC_SDHOTSPARE;
2009 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2010 		sdhs->bd_channel = pd->mpd_enc_idx;
2011 		sdhs->bd_target = pd->mpd_enc_slot;
2012 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2013 		vendp = inqbuf->vendor;
2014 		memcpy(vend, vendp, sizeof vend - 1);
2015 		vend[sizeof vend - 1] = '\0';
2016 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2017 		break;
2018 
2019 	default:
2020 		goto freeme;
2021 	}
2022 
2023 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2024 	rv = 0;
2025 freeme:
2026 	free(pd, M_DEVBUF, 0);
2027 	free(cfg, M_DEVBUF, 0);
2028 
2029 	return (rv);
2030 }
2031 
2032 #ifndef SMALL_KERNEL
2033 
2034 static const char *mfi_bbu_indicators[] = {
2035 	"pack missing",
2036 	"voltage low",
2037 	"temp high",
2038 	"charge active",
2039 	"discharge active",
2040 	"learn cycle req'd",
2041 	"learn cycle active",
2042 	"learn cycle failed",
2043 	"learn cycle timeout",
2044 	"I2C errors",
2045 	"replace pack",
2046 	"low capacity",
2047 	"periodic learn req'd"
2048 };
2049 
2050 #define MFI_BBU_SENSORS 4
2051 
2052 int
2053 mfi_bbu(struct mfi_softc *sc)
2054 {
2055 	struct mfi_bbu_status bbu;
2056 	u_int32_t status;
2057 	u_int32_t mask;
2058 	u_int32_t soh_bad;
2059 	int i;
2060 
2061 	if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2062 	    sizeof(bbu), &bbu, NULL) != 0) {
2063 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
2064 			sc->sc_bbu[i].value = 0;
2065 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2066 		}
2067 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2068 			sc->sc_bbu_status[i].value = 0;
2069 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2070 		}
2071 		return (-1);
2072 	}
2073 
2074 	switch (bbu.battery_type) {
2075 	case MFI_BBU_TYPE_IBBU:
2076 		mask = MFI_BBU_STATE_BAD_IBBU;
2077 		soh_bad = 0;
2078 		break;
2079 	case MFI_BBU_TYPE_BBU:
2080 		mask = MFI_BBU_STATE_BAD_BBU;
2081 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2082 		break;
2083 
2084 	case MFI_BBU_TYPE_NONE:
2085 	default:
2086 		sc->sc_bbu[0].value = 0;
2087 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
2088 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2089 			sc->sc_bbu[i].value = 0;
2090 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2091 		}
2092 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2093 			sc->sc_bbu_status[i].value = 0;
2094 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2095 		}
2096 		return (0);
2097 	}
2098 
2099 	status = letoh32(bbu.fw_status);
2100 
2101 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2102 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2103 	    SENSOR_S_OK;
2104 
2105 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2106 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2107 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2108 	for (i = 1; i < MFI_BBU_SENSORS; i++)
2109 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2110 
2111 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2112 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2113 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2114 	}
2115 
2116 	return (0);
2117 }
2118 
2119 int
2120 mfi_create_sensors(struct mfi_softc *sc)
2121 {
2122 	struct device		*dev;
2123 	struct scsi_link	*link;
2124 	int			i;
2125 
2126 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2127 	    sizeof(sc->sc_sensordev.xname));
2128 
2129 	if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2130 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2131 		    M_DEVBUF, M_WAITOK | M_ZERO);
2132 
2133 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
2134 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2135 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2136 		    sizeof(sc->sc_bbu[0].desc));
2137 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2138 
2139 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2140 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2141 		sc->sc_bbu[2].type = SENSOR_AMPS;
2142 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2143 		sc->sc_bbu[3].type = SENSOR_TEMP;
2144 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2145 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2146 			strlcpy(sc->sc_bbu[i].desc, "bbu",
2147 			    sizeof(sc->sc_bbu[i].desc));
2148 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2149 		}
2150 
2151 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2152 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2153 
2154 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2155 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2156 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2157 			strlcpy(sc->sc_bbu_status[i].desc,
2158 			    mfi_bbu_indicators[i],
2159 			    sizeof(sc->sc_bbu_status[i].desc));
2160 
2161 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2162 		}
2163 	}
2164 
2165 	sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2166 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2167 	if (sc->sc_sensors == NULL)
2168 		return (1);
2169 
2170 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2171 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2172 		if (link == NULL)
2173 			goto bad;
2174 
2175 		dev = link->device_softc;
2176 
2177 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2178 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2179 
2180 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2181 		    sizeof(sc->sc_sensors[i].desc));
2182 
2183 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2184 	}
2185 
2186 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2187 		goto bad;
2188 
2189 	sensordev_install(&sc->sc_sensordev);
2190 
2191 	return (0);
2192 
2193 bad:
2194 	free(sc->sc_sensors, M_DEVBUF, 0);
2195 
2196 	return (1);
2197 }
2198 
2199 void
2200 mfi_refresh_sensors(void *arg)
2201 {
2202 	struct mfi_softc	*sc = arg;
2203 	int			i, rv;
2204 	struct bioc_vol		bv;
2205 
2206 	if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2207 		return;
2208 
2209 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2210 		bzero(&bv, sizeof(bv));
2211 		bv.bv_volid = i;
2212 
2213 		rw_enter_write(&sc->sc_lock);
2214 		rv = mfi_ioctl_vol(sc, &bv);
2215 		rw_exit_write(&sc->sc_lock);
2216 
2217 		if (rv != 0)
2218 			return;
2219 
2220 		switch(bv.bv_status) {
2221 		case BIOC_SVOFFLINE:
2222 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2223 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2224 			break;
2225 
2226 		case BIOC_SVDEGRADED:
2227 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2228 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2229 			break;
2230 
2231 		case BIOC_SVSCRUB:
2232 		case BIOC_SVONLINE:
2233 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2234 			sc->sc_sensors[i].status = SENSOR_S_OK;
2235 			break;
2236 
2237 		case BIOC_SVINVALID:
2238 			/* FALLTRHOUGH */
2239 		default:
2240 			sc->sc_sensors[i].value = 0; /* unknown */
2241 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2242 			break;
2243 		}
2244 	}
2245 }
2246 #endif /* SMALL_KERNEL */
2247 #endif /* NBIO > 0 */
2248 
2249 void
2250 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2251 {
2252 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2253 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2254 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2255 
2256 	mfi_post(sc, ccb);
2257 }
2258 
2259 void
2260 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2261 {
2262 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2263 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2264 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2265 
2266 	if (ccb->ccb_len > 0) {
2267 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2268 		    0, ccb->ccb_dmamap->dm_mapsize,
2269 		    (ccb->ccb_direction == MFI_DATA_IN) ?
2270 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2271 
2272 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2273 	}
2274 
2275 	ccb->ccb_done(sc, ccb);
2276 }
2277 
2278 u_int32_t
2279 mfi_xscale_fw_state(struct mfi_softc *sc)
2280 {
2281 	return (mfi_read(sc, MFI_OMSG0));
2282 }
2283 
2284 void
2285 mfi_xscale_intr_ena(struct mfi_softc *sc)
2286 {
2287 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2288 }
2289 
2290 int
2291 mfi_xscale_intr(struct mfi_softc *sc)
2292 {
2293 	u_int32_t status;
2294 
2295 	status = mfi_read(sc, MFI_OSTS);
2296 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2297 		return (0);
2298 
2299 	/* write status back to acknowledge interrupt */
2300 	mfi_write(sc, MFI_OSTS, status);
2301 
2302 	return (1);
2303 }
2304 
2305 void
2306 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2307 {
2308 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2309 	    ccb->ccb_extra_frames);
2310 }
2311 
2312 u_int32_t
2313 mfi_ppc_fw_state(struct mfi_softc *sc)
2314 {
2315 	return (mfi_read(sc, MFI_OSP));
2316 }
2317 
2318 void
2319 mfi_ppc_intr_ena(struct mfi_softc *sc)
2320 {
2321 	mfi_write(sc, MFI_ODC, 0xffffffff);
2322 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2323 }
2324 
2325 int
2326 mfi_ppc_intr(struct mfi_softc *sc)
2327 {
2328 	u_int32_t status;
2329 
2330 	status = mfi_read(sc, MFI_OSTS);
2331 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2332 		return (0);
2333 
2334 	/* write status back to acknowledge interrupt */
2335 	mfi_write(sc, MFI_ODC, status);
2336 
2337 	return (1);
2338 }
2339 
2340 void
2341 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2342 {
2343 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2344 	    (ccb->ccb_extra_frames << 1));
2345 }
2346 
2347 u_int32_t
2348 mfi_gen2_fw_state(struct mfi_softc *sc)
2349 {
2350 	return (mfi_read(sc, MFI_OSP));
2351 }
2352 
2353 void
2354 mfi_gen2_intr_ena(struct mfi_softc *sc)
2355 {
2356 	mfi_write(sc, MFI_ODC, 0xffffffff);
2357 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2358 }
2359 
2360 int
2361 mfi_gen2_intr(struct mfi_softc *sc)
2362 {
2363 	u_int32_t status;
2364 
2365 	status = mfi_read(sc, MFI_OSTS);
2366 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2367 		return (0);
2368 
2369 	/* write status back to acknowledge interrupt */
2370 	mfi_write(sc, MFI_ODC, status);
2371 
2372 	return (1);
2373 }
2374 
2375 void
2376 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2377 {
2378 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2379 	    (ccb->ccb_extra_frames << 1));
2380 }
2381 
2382 u_int32_t
2383 mfi_skinny_fw_state(struct mfi_softc *sc)
2384 {
2385 	return (mfi_read(sc, MFI_OSP));
2386 }
2387 
2388 void
2389 mfi_skinny_intr_ena(struct mfi_softc *sc)
2390 {
2391 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2392 }
2393 
2394 int
2395 mfi_skinny_intr(struct mfi_softc *sc)
2396 {
2397 	u_int32_t status;
2398 
2399 	status = mfi_read(sc, MFI_OSTS);
2400 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2401 		return (0);
2402 
2403 	/* write status back to acknowledge interrupt */
2404 	mfi_write(sc, MFI_OSTS, status);
2405 
2406 	return (1);
2407 }
2408 
2409 void
2410 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2411 {
2412 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2413 	    (ccb->ccb_extra_frames << 1));
2414 	mfi_write(sc, MFI_IQPH, 0x00000000);
2415 }
2416 
2417 int
2418 mfi_pd_scsi_probe(struct scsi_link *link)
2419 {
2420 	uint8_t mbox[MFI_MBOX_SIZE];
2421 	struct mfi_softc *sc = link->adapter_softc;
2422 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2423 
2424 	if (link->lun > 0)
2425 		return (0);
2426 
2427 	if (pl == NULL)
2428 		return (ENXIO);
2429 
2430 	bzero(mbox, sizeof(mbox));
2431 	bcopy(&pl->pd_id, &mbox[0], sizeof(pl->pd_id));
2432 
2433 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2434 	    sizeof(pl->pd_info), &pl->pd_info, mbox))
2435 		return (EIO);
2436 
2437 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2438 		return (ENXIO);
2439 
2440 	return (0);
2441 }
2442 
2443 void
2444 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2445 {
2446 	struct scsi_link *link = xs->sc_link;
2447 	struct mfi_softc *sc = link->adapter_softc;
2448 	struct mfi_ccb *ccb = xs->io;
2449 	struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2450 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2451 
2452 	mfi_scrub_ccb(ccb);
2453 	xs->error = XS_NOERROR;
2454 
2455 	pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2456 	pf->mpf_header.mfh_target_id = pl->pd_id;
2457 	pf->mpf_header.mfh_lun_id = link->lun;
2458 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2459 	pf->mpf_header.mfh_timeout = 0;
2460 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2461 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2462 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2463 
2464 	memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2465 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
2466 
2467 	ccb->ccb_done = mfi_scsi_xs_done;
2468 	ccb->ccb_cookie = xs;
2469 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2470 	ccb->ccb_sgl = &pf->mpf_sgl;
2471 
2472 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2473 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2474 		    MFI_DATA_IN : MFI_DATA_OUT;
2475 	else
2476 		ccb->ccb_direction = MFI_DATA_NONE;
2477 
2478 	if (xs->data) {
2479 		ccb->ccb_data = xs->data;
2480 		ccb->ccb_len = xs->datalen;
2481 
2482 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2483 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2484 			goto stuffup;
2485 	}
2486 
2487 	if (xs->flags & SCSI_POLL)
2488 		mfi_poll(sc, ccb);
2489 	else
2490 		mfi_start(sc, ccb);
2491 
2492 	return;
2493 
2494 stuffup:
2495 	xs->error = XS_DRIVER_STUFFUP;
2496 	scsi_done(xs);
2497 }
2498