xref: /openbsd-src/sys/dev/ic/mfi.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /* $OpenBSD: mfi.c,v 1.175 2020/02/13 15:11:32 krw Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/device.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/rwlock.h>
27 #include <sys/sensors.h>
28 #include <sys/dkio.h>
29 #include <sys/pool.h>
30 
31 #include <machine/bus.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsi_disk.h>
35 #include <scsi/scsiconf.h>
36 
37 #include <dev/biovar.h>
38 #include <dev/ic/mfireg.h>
39 #include <dev/ic/mfivar.h>
40 
41 #ifdef MFI_DEBUG
42 uint32_t	mfi_debug = 0
43 /*		    | MFI_D_CMD */
44 /*		    | MFI_D_INTR */
45 /*		    | MFI_D_MISC */
46 /*		    | MFI_D_DMA */
47 /*		    | MFI_D_IOCTL */
48 /*		    | MFI_D_RW */
49 /*		    | MFI_D_MEM */
50 /*		    | MFI_D_CCB */
51 		;
52 #endif
53 
54 struct cfdriver mfi_cd = {
55 	NULL, "mfi", DV_DULL
56 };
57 
58 void	mfi_scsi_cmd(struct scsi_xfer *);
59 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
60 int	mfi_ioctl_cache(struct scsi_link *, u_long,  struct dk_cache *);
61 
62 void	mfi_pd_scsi_cmd(struct scsi_xfer *);
63 int	mfi_pd_scsi_probe(struct scsi_link *);
64 
65 struct scsi_adapter mfi_switch = {
66 	mfi_scsi_cmd, NULL, NULL, NULL, mfi_scsi_ioctl
67 };
68 
69 struct scsi_adapter mfi_pd_switch = {
70 	mfi_pd_scsi_cmd, NULL, mfi_pd_scsi_probe, NULL, mfi_scsi_ioctl
71 };
72 
73 void *		mfi_get_ccb(void *);
74 void		mfi_put_ccb(void *, void *);
75 void		mfi_scrub_ccb(struct mfi_ccb *);
76 int		mfi_init_ccb(struct mfi_softc *);
77 
78 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
79 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
80 
81 int		mfi_transition_firmware(struct mfi_softc *);
82 int		mfi_initialize_firmware(struct mfi_softc *);
83 int		mfi_get_info(struct mfi_softc *);
84 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
85 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
86 void		mfi_poll(struct mfi_softc *, struct mfi_ccb *);
87 void		mfi_exec(struct mfi_softc *, struct mfi_ccb *);
88 void		mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
89 int		mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
90 u_int		mfi_default_sgd_load(struct mfi_softc *, struct mfi_ccb *);
91 int		mfi_syspd(struct mfi_softc *);
92 
93 /* commands */
94 int		mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
95 		    struct scsi_xfer *);
96 int		mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
97 		    struct scsi_xfer *, uint64_t, uint32_t);
98 void		mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
99 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
100 		    void *, const union mfi_mbox *);
101 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
102 		    uint32_t, uint32_t, void *, const union mfi_mbox *);
103 void		mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
104 
105 #if NBIO > 0
106 int		mfi_ioctl(struct device *, u_long, caddr_t);
107 int		mfi_bio_getitall(struct mfi_softc *);
108 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
109 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
110 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
111 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
112 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
113 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
114 int		mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *);
115 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
116 #ifndef SMALL_KERNEL
117 int		mfi_create_sensors(struct mfi_softc *);
118 void		mfi_refresh_sensors(void *);
119 int		mfi_bbu(struct mfi_softc *);
120 #endif /* SMALL_KERNEL */
121 #endif /* NBIO > 0 */
122 
123 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
124 void		mfi_done(struct mfi_softc *, struct mfi_ccb *);
125 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
126 void		mfi_xscale_intr_ena(struct mfi_softc *);
127 int		mfi_xscale_intr(struct mfi_softc *);
128 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
129 
130 static const struct mfi_iop_ops mfi_iop_xscale = {
131 	mfi_xscale_fw_state,
132 	mfi_xscale_intr_ena,
133 	mfi_xscale_intr,
134 	mfi_xscale_post,
135 	mfi_default_sgd_load,
136 	0,
137 };
138 
139 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
140 void		mfi_ppc_intr_ena(struct mfi_softc *);
141 int		mfi_ppc_intr(struct mfi_softc *);
142 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
143 
144 static const struct mfi_iop_ops mfi_iop_ppc = {
145 	mfi_ppc_fw_state,
146 	mfi_ppc_intr_ena,
147 	mfi_ppc_intr,
148 	mfi_ppc_post,
149 	mfi_default_sgd_load,
150 	MFI_IDB,
151 	0
152 };
153 
154 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
155 void		mfi_gen2_intr_ena(struct mfi_softc *);
156 int		mfi_gen2_intr(struct mfi_softc *);
157 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
158 
159 static const struct mfi_iop_ops mfi_iop_gen2 = {
160 	mfi_gen2_fw_state,
161 	mfi_gen2_intr_ena,
162 	mfi_gen2_intr,
163 	mfi_gen2_post,
164 	mfi_default_sgd_load,
165 	MFI_IDB,
166 	0
167 };
168 
169 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
170 void		mfi_skinny_intr_ena(struct mfi_softc *);
171 int		mfi_skinny_intr(struct mfi_softc *);
172 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
173 u_int		mfi_skinny_sgd_load(struct mfi_softc *, struct mfi_ccb *);
174 
175 static const struct mfi_iop_ops mfi_iop_skinny = {
176 	mfi_skinny_fw_state,
177 	mfi_skinny_intr_ena,
178 	mfi_skinny_intr,
179 	mfi_skinny_post,
180 	mfi_skinny_sgd_load,
181 	MFI_SKINNY_IDB,
182 	MFI_IOP_F_SYSPD
183 };
184 
185 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
186 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
187 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
188 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
189 #define mfi_sgd_load(_s, _c)	((_s)->sc_iop->mio_sgd_load((_s), (_c)))
190 
191 void *
192 mfi_get_ccb(void *cookie)
193 {
194 	struct mfi_softc	*sc = cookie;
195 	struct mfi_ccb		*ccb;
196 
197 	KERNEL_UNLOCK();
198 
199 	mtx_enter(&sc->sc_ccb_mtx);
200 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
201 	if (ccb != NULL) {
202 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
203 		ccb->ccb_state = MFI_CCB_READY;
204 	}
205 	mtx_leave(&sc->sc_ccb_mtx);
206 
207 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
208 	KERNEL_LOCK();
209 
210 	return (ccb);
211 }
212 
213 void
214 mfi_put_ccb(void *cookie, void *io)
215 {
216 	struct mfi_softc	*sc = cookie;
217 	struct mfi_ccb		*ccb = io;
218 
219 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
220 
221 	KERNEL_UNLOCK();
222 	mtx_enter(&sc->sc_ccb_mtx);
223 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
224 	mtx_leave(&sc->sc_ccb_mtx);
225 	KERNEL_LOCK();
226 }
227 
228 void
229 mfi_scrub_ccb(struct mfi_ccb *ccb)
230 {
231 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
232 
233 	hdr->mfh_cmd_status = 0x0;
234 	hdr->mfh_flags = 0x0;
235 	ccb->ccb_state = MFI_CCB_FREE;
236 	ccb->ccb_cookie = NULL;
237 	ccb->ccb_flags = 0;
238 	ccb->ccb_done = NULL;
239 	ccb->ccb_direction = 0;
240 	ccb->ccb_frame_size = 0;
241 	ccb->ccb_extra_frames = 0;
242 	ccb->ccb_sgl = NULL;
243 	ccb->ccb_data = NULL;
244 	ccb->ccb_len = 0;
245 }
246 
247 int
248 mfi_init_ccb(struct mfi_softc *sc)
249 {
250 	struct mfi_ccb		*ccb;
251 	uint32_t		i;
252 	int			error;
253 
254 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
255 
256 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
257 	    M_DEVBUF, M_WAITOK|M_ZERO);
258 
259 	for (i = 0; i < sc->sc_max_cmds; i++) {
260 		ccb = &sc->sc_ccb[i];
261 
262 		/* select i'th frame */
263 		ccb->ccb_frame = (union mfi_frame *)
264 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
265 		ccb->ccb_pframe =
266 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
267 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
268 		ccb->ccb_frame->mfr_header.mfh_context = i;
269 
270 		/* select i'th sense */
271 		ccb->ccb_sense = (struct mfi_sense *)
272 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
273 		ccb->ccb_psense =
274 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
275 
276 		/* create a dma map for transfer */
277 		error = bus_dmamap_create(sc->sc_dmat,
278 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
279 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
280 		if (error) {
281 			printf("%s: cannot create ccb dmamap (%d)\n",
282 			    DEVNAME(sc), error);
283 			goto destroy;
284 		}
285 
286 		DNPRINTF(MFI_D_CCB,
287 		    "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
288 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
289 		    ccb->ccb_frame, ccb->ccb_pframe,
290 		    ccb->ccb_sense, ccb->ccb_psense,
291 		    ccb->ccb_dmamap);
292 
293 		/* add ccb to queue */
294 		mfi_put_ccb(sc, ccb);
295 	}
296 
297 	return (0);
298 destroy:
299 	/* free dma maps and ccb memory */
300 	while ((ccb = mfi_get_ccb(sc)) != NULL)
301 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
302 
303 	free(sc->sc_ccb, M_DEVBUF, 0);
304 
305 	return (1);
306 }
307 
308 uint32_t
309 mfi_read(struct mfi_softc *sc, bus_size_t r)
310 {
311 	uint32_t rv;
312 
313 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
314 	    BUS_SPACE_BARRIER_READ);
315 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
316 
317 	DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
318 	return (rv);
319 }
320 
321 void
322 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
323 {
324 	DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
325 
326 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
327 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
328 	    BUS_SPACE_BARRIER_WRITE);
329 }
330 
331 struct mfi_mem *
332 mfi_allocmem(struct mfi_softc *sc, size_t size)
333 {
334 	struct mfi_mem		*mm;
335 	int			nsegs;
336 
337 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
338 	    size);
339 
340 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
341 	if (mm == NULL)
342 		return (NULL);
343 
344 	mm->am_size = size;
345 
346 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
347 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
348 		goto amfree;
349 
350 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
351 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
352 		goto destroy;
353 
354 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
355 	    BUS_DMA_NOWAIT) != 0)
356 		goto free;
357 
358 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
359 	    BUS_DMA_NOWAIT) != 0)
360 		goto unmap;
361 
362 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
363 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
364 
365 	return (mm);
366 
367 unmap:
368 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 	free(mm, M_DEVBUF, sizeof *mm);
375 
376 	return (NULL);
377 }
378 
379 void
380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
381 {
382 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
383 
384 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
385 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
386 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
387 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
388 	free(mm, M_DEVBUF, sizeof *mm);
389 }
390 
391 int
392 mfi_transition_firmware(struct mfi_softc *sc)
393 {
394 	int32_t			fw_state, cur_state;
395 	u_int32_t		idb = sc->sc_iop->mio_idb;
396 	int			max_wait, i;
397 
398 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
399 
400 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
401 	    fw_state);
402 
403 	while (fw_state != MFI_STATE_READY) {
404 		DNPRINTF(MFI_D_MISC,
405 		    "%s: waiting for firmware to become ready\n",
406 		    DEVNAME(sc));
407 		cur_state = fw_state;
408 		switch (fw_state) {
409 		case MFI_STATE_FAULT:
410 			printf("%s: firmware fault\n", DEVNAME(sc));
411 			return (1);
412 		case MFI_STATE_WAIT_HANDSHAKE:
413 			mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
414 			max_wait = 2;
415 			break;
416 		case MFI_STATE_OPERATIONAL:
417 			mfi_write(sc, idb, MFI_INIT_READY);
418 			max_wait = 10;
419 			break;
420 		case MFI_STATE_UNDEFINED:
421 		case MFI_STATE_BB_INIT:
422 			max_wait = 2;
423 			break;
424 		case MFI_STATE_FW_INIT:
425 		case MFI_STATE_DEVICE_SCAN:
426 		case MFI_STATE_FLUSH_CACHE:
427 			max_wait = 20;
428 			break;
429 		default:
430 			printf("%s: unknown firmware state %d\n",
431 			    DEVNAME(sc), fw_state);
432 			return (1);
433 		}
434 		for (i = 0; i < (max_wait * 10); i++) {
435 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
436 			if (fw_state == cur_state)
437 				DELAY(100000);
438 			else
439 				break;
440 		}
441 		if (fw_state == cur_state) {
442 			printf("%s: firmware stuck in state %#x\n",
443 			    DEVNAME(sc), fw_state);
444 			return (1);
445 		}
446 	}
447 
448 	return (0);
449 }
450 
451 int
452 mfi_initialize_firmware(struct mfi_softc *sc)
453 {
454 	struct mfi_ccb		*ccb;
455 	struct mfi_init_frame	*init;
456 	struct mfi_init_qinfo	*qinfo;
457 	int			rv = 0;
458 
459 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
460 
461 	ccb = scsi_io_get(&sc->sc_iopool, 0);
462 	mfi_scrub_ccb(ccb);
463 
464 	init = &ccb->ccb_frame->mfr_init;
465 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
466 
467 	memset(qinfo, 0, sizeof(*qinfo));
468 	qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
469 
470 	qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
471 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
472 
473 	qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
474 	    offsetof(struct mfi_prod_cons, mpc_producer));
475 
476 	qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
477 	    offsetof(struct mfi_prod_cons, mpc_consumer));
478 
479 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
480 	init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
481 	init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
482 
483 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
484 	    0, MFIMEM_LEN(sc->sc_pcq),
485 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
486 
487 	ccb->ccb_done = mfi_empty_done;
488 	mfi_poll(sc, ccb);
489 	if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
490 		rv = 1;
491 
492 	mfi_put_ccb(sc, ccb);
493 
494 	return (rv);
495 }
496 
497 void
498 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
499 {
500 	/* nop */
501 }
502 
503 int
504 mfi_get_info(struct mfi_softc *sc)
505 {
506 #ifdef MFI_DEBUG
507 	int i;
508 #endif
509 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
510 
511 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
512 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
513 		return (1);
514 
515 #ifdef MFI_DEBUG
516 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
517 		printf("%s: active FW %s Version %s date %s time %s\n",
518 		    DEVNAME(sc),
519 		    sc->sc_info.mci_image_component[i].mic_name,
520 		    sc->sc_info.mci_image_component[i].mic_version,
521 		    sc->sc_info.mci_image_component[i].mic_build_date,
522 		    sc->sc_info.mci_image_component[i].mic_build_time);
523 	}
524 
525 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
526 		printf("%s: pending FW %s Version %s date %s time %s\n",
527 		    DEVNAME(sc),
528 		    sc->sc_info.mci_pending_image_component[i].mic_name,
529 		    sc->sc_info.mci_pending_image_component[i].mic_version,
530 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
531 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
532 	}
533 
534 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
535 	    DEVNAME(sc),
536 	    sc->sc_info.mci_max_arms,
537 	    sc->sc_info.mci_max_spans,
538 	    sc->sc_info.mci_max_arrays,
539 	    sc->sc_info.mci_max_lds,
540 	    sc->sc_info.mci_product_name);
541 
542 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
543 	    DEVNAME(sc),
544 	    sc->sc_info.mci_serial_number,
545 	    sc->sc_info.mci_hw_present,
546 	    sc->sc_info.mci_current_fw_time,
547 	    sc->sc_info.mci_max_cmds,
548 	    sc->sc_info.mci_max_sg_elements);
549 
550 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
551 	    DEVNAME(sc),
552 	    sc->sc_info.mci_max_request_size,
553 	    sc->sc_info.mci_lds_present,
554 	    sc->sc_info.mci_lds_degraded,
555 	    sc->sc_info.mci_lds_offline,
556 	    sc->sc_info.mci_pd_present);
557 
558 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
559 	    DEVNAME(sc),
560 	    sc->sc_info.mci_pd_disks_present,
561 	    sc->sc_info.mci_pd_disks_pred_failure,
562 	    sc->sc_info.mci_pd_disks_failed);
563 
564 	printf("%s: nvram %d mem %d flash %d\n",
565 	    DEVNAME(sc),
566 	    sc->sc_info.mci_nvram_size,
567 	    sc->sc_info.mci_memory_size,
568 	    sc->sc_info.mci_flash_size);
569 
570 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
571 	    DEVNAME(sc),
572 	    sc->sc_info.mci_ram_correctable_errors,
573 	    sc->sc_info.mci_ram_uncorrectable_errors,
574 	    sc->sc_info.mci_cluster_allowed,
575 	    sc->sc_info.mci_cluster_active);
576 
577 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
578 	    DEVNAME(sc),
579 	    sc->sc_info.mci_max_strips_per_io,
580 	    sc->sc_info.mci_raid_levels,
581 	    sc->sc_info.mci_adapter_ops,
582 	    sc->sc_info.mci_ld_ops);
583 
584 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
585 	    DEVNAME(sc),
586 	    sc->sc_info.mci_stripe_sz_ops.min,
587 	    sc->sc_info.mci_stripe_sz_ops.max,
588 	    sc->sc_info.mci_pd_ops,
589 	    sc->sc_info.mci_pd_mix_support);
590 
591 	printf("%s: ecc_bucket %d pckg_prop %s\n",
592 	    DEVNAME(sc),
593 	    sc->sc_info.mci_ecc_bucket_count,
594 	    sc->sc_info.mci_package_version);
595 
596 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
597 	    DEVNAME(sc),
598 	    sc->sc_info.mci_properties.mcp_seq_num,
599 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
600 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
601 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
602 
603 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
604 	    DEVNAME(sc),
605 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
606 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
607 	    sc->sc_info.mci_properties.mcp_bgi_rate,
608 	    sc->sc_info.mci_properties.mcp_cc_rate);
609 
610 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
611 	    DEVNAME(sc),
612 	    sc->sc_info.mci_properties.mcp_recon_rate,
613 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
614 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
615 	    sc->sc_info.mci_properties.mcp_spinup_delay,
616 	    sc->sc_info.mci_properties.mcp_cluster_enable);
617 
618 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
619 	    DEVNAME(sc),
620 	    sc->sc_info.mci_properties.mcp_coercion_mode,
621 	    sc->sc_info.mci_properties.mcp_alarm_enable,
622 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
623 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
624 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
625 
626 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
627 	    DEVNAME(sc),
628 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
629 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
630 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
631 
632 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
633 	    DEVNAME(sc),
634 	    sc->sc_info.mci_pci.mip_vendor,
635 	    sc->sc_info.mci_pci.mip_device,
636 	    sc->sc_info.mci_pci.mip_subvendor,
637 	    sc->sc_info.mci_pci.mip_subdevice);
638 
639 	printf("%s: type %#x port_count %d port_addr ",
640 	    DEVNAME(sc),
641 	    sc->sc_info.mci_host.mih_type,
642 	    sc->sc_info.mci_host.mih_port_count);
643 
644 	for (i = 0; i < 8; i++)
645 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
646 	printf("\n");
647 
648 	printf("%s: type %.x port_count %d port_addr ",
649 	    DEVNAME(sc),
650 	    sc->sc_info.mci_device.mid_type,
651 	    sc->sc_info.mci_device.mid_port_count);
652 
653 	for (i = 0; i < 8; i++)
654 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
655 	printf("\n");
656 #endif /* MFI_DEBUG */
657 
658 	return (0);
659 }
660 
661 int
662 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
663 {
664 	struct scsibus_attach_args saa;
665 	uint32_t		status, frames, max_sgl;
666 	int			i;
667 
668 	switch (iop) {
669 	case MFI_IOP_XSCALE:
670 		sc->sc_iop = &mfi_iop_xscale;
671 		break;
672 	case MFI_IOP_PPC:
673 		sc->sc_iop = &mfi_iop_ppc;
674 		break;
675 	case MFI_IOP_GEN2:
676 		sc->sc_iop = &mfi_iop_gen2;
677 		break;
678 	case MFI_IOP_SKINNY:
679 		sc->sc_iop = &mfi_iop_skinny;
680 		break;
681 	default:
682 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
683 	}
684 
685 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
686 
687 	if (mfi_transition_firmware(sc))
688 		return (1);
689 
690 	SLIST_INIT(&sc->sc_ccb_freeq);
691 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
692 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
693 
694 	rw_init(&sc->sc_lock, "mfi_lock");
695 
696 	status = mfi_fw_state(sc);
697 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
698 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
699 	if (sc->sc_64bit_dma) {
700 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
701 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
702 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
703 	} else {
704 		sc->sc_max_sgl = max_sgl;
705 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
706 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
707 	}
708 	if (iop == MFI_IOP_SKINNY)
709 		sc->sc_sgl_size = sizeof(struct mfi_sg_skinny);
710 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
711 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
712 
713 	/* consumer/producer and reply queue memory */
714 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
715 	    sizeof(struct mfi_prod_cons));
716 	if (sc->sc_pcq == NULL) {
717 		printf("%s: unable to allocate reply queue memory\n",
718 		    DEVNAME(sc));
719 		goto nopcq;
720 	}
721 
722 	/* frame memory */
723 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
724 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
725 	    MFI_FRAME_SIZE + 1;
726 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
727 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
728 	if (sc->sc_frames == NULL) {
729 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
730 		goto noframe;
731 	}
732 	/* XXX hack, fix this */
733 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
734 		printf("%s: improper frame alignment (%#lx) FIXME\n",
735 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
736 		goto noframe;
737 	}
738 
739 	/* sense memory */
740 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
741 	if (sc->sc_sense == NULL) {
742 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
743 		goto nosense;
744 	}
745 
746 	/* now that we have all memory bits go initialize ccbs */
747 	if (mfi_init_ccb(sc)) {
748 		printf("%s: could not init ccb list\n", DEVNAME(sc));
749 		goto noinit;
750 	}
751 
752 	/* kickstart firmware with all addresses and pointers */
753 	if (mfi_initialize_firmware(sc)) {
754 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
755 		goto noinit;
756 	}
757 
758 	if (mfi_get_info(sc)) {
759 		printf("%s: could not retrieve controller information\n",
760 		    DEVNAME(sc));
761 		goto noinit;
762 	}
763 
764 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
765 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
766 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
767 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
768 	printf("\n");
769 
770 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
771 	for (i = 0; i < sc->sc_ld_cnt; i++)
772 		sc->sc_ld[i].ld_present = 1;
773 
774 	sc->sc_link.adapter = &mfi_switch;
775 	sc->sc_link.adapter_softc = sc;
776 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
777 	sc->sc_link.adapter_target = -1;
778 	sc->sc_link.luns = 1;
779 	sc->sc_link.openings = sc->sc_max_cmds - 1;
780 	sc->sc_link.pool = &sc->sc_iopool;
781 
782 	bzero(&saa, sizeof(saa));
783 	saa.saa_sc_link = &sc->sc_link;
784 
785 	sc->sc_scsibus = (struct scsibus_softc *)
786 	    config_found(&sc->sc_dev, &saa, scsiprint);
787 
788 	if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
789 		mfi_syspd(sc);
790 
791 	/* enable interrupts */
792 	mfi_intr_enable(sc);
793 
794 #if NBIO > 0
795 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
796 		panic("%s: controller registration failed", DEVNAME(sc));
797 	else
798 		sc->sc_ioctl = mfi_ioctl;
799 
800 #ifndef SMALL_KERNEL
801 	if (mfi_create_sensors(sc) != 0)
802 		printf("%s: unable to create sensors\n", DEVNAME(sc));
803 #endif
804 #endif /* NBIO > 0 */
805 
806 	return (0);
807 noinit:
808 	mfi_freemem(sc, sc->sc_sense);
809 nosense:
810 	mfi_freemem(sc, sc->sc_frames);
811 noframe:
812 	mfi_freemem(sc, sc->sc_pcq);
813 nopcq:
814 	return (1);
815 }
816 
817 int
818 mfi_syspd(struct mfi_softc *sc)
819 {
820 	struct scsibus_attach_args saa;
821 	struct scsi_link *link;
822 	struct mfi_pd_link *pl;
823 	struct mfi_pd_list *pd;
824 	u_int npds, i;
825 
826 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
827 	if (sc->sc_pd == NULL)
828 		return (1);
829 
830 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
831 	if (pd == NULL)
832 		goto nopdsc;
833 
834 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
835 	    sizeof(*pd), pd, NULL) != 0)
836 		goto nopd;
837 
838 	npds = letoh32(pd->mpl_no_pd);
839 	for (i = 0; i < npds; i++) {
840 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
841 		if (pl == NULL)
842 			goto nopl;
843 
844 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
845 		sc->sc_pd->pd_links[i] = pl;
846 	}
847 
848 	free(pd, M_TEMP, sizeof *pd);
849 
850 	link = &sc->sc_pd->pd_link;
851 	link->adapter = &mfi_pd_switch;
852 	link->adapter_softc = sc;
853 	link->adapter_buswidth = MFI_MAX_PD;
854 	link->adapter_target = -1;
855 	link->openings = sc->sc_max_cmds - 1;
856 	link->pool = &sc->sc_iopool;
857 
858 	bzero(&saa, sizeof(saa));
859 	saa.saa_sc_link = link;
860 
861 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
862 	    config_found(&sc->sc_dev, &saa, scsiprint);
863 
864 	return (0);
865 nopl:
866 	for (i = 0; i < npds; i++) {
867 		pl = sc->sc_pd->pd_links[i];
868 		if (pl == NULL)
869 			break;
870 
871 		free(pl, M_DEVBUF, sizeof *pl);
872 	}
873 nopd:
874 	free(pd, M_TEMP, sizeof *pd);
875 nopdsc:
876 	free(sc->sc_pd, M_DEVBUF, sizeof *sc->sc_pd);
877 	return (1);
878 }
879 
880 void
881 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
882 {
883 	struct mfi_frame_header *hdr;
884 	int to = 0;
885 
886 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
887 
888 	hdr = &ccb->ccb_frame->mfr_header;
889 	hdr->mfh_cmd_status = 0xff;
890 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
891 
892 	mfi_start(sc, ccb);
893 
894 	for (;;) {
895 		delay(1000);
896 
897 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
898 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
899 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
900 
901 		if (hdr->mfh_cmd_status != 0xff)
902 			break;
903 
904 		if (to++ > 5000) {
905 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
906 			    hdr->mfh_context);
907 			ccb->ccb_flags |= MFI_CCB_F_ERR;
908 			break;
909 		}
910 
911 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
912 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
913 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
914 	}
915 
916 	if (ccb->ccb_len > 0) {
917 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
918 		    ccb->ccb_dmamap->dm_mapsize,
919 		    (ccb->ccb_direction & MFI_DATA_IN) ?
920 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
921 
922 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
923 	}
924 
925 	ccb->ccb_done(sc, ccb);
926 }
927 
928 void
929 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
930 {
931 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
932 
933 #ifdef DIAGNOSTIC
934 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
935 		panic("mfi_exec called with cookie or done set");
936 #endif
937 
938 	ccb->ccb_cookie = &m;
939 	ccb->ccb_done = mfi_exec_done;
940 
941 	mfi_start(sc, ccb);
942 
943 	mtx_enter(&m);
944 	while (ccb->ccb_cookie != NULL)
945 		msleep_nsec(ccb, &m, PRIBIO, "mfiexec", INFSLP);
946 	mtx_leave(&m);
947 }
948 
949 void
950 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
951 {
952 	struct mutex *m = ccb->ccb_cookie;
953 
954 	mtx_enter(m);
955 	ccb->ccb_cookie = NULL;
956 	wakeup_one(ccb);
957 	mtx_leave(m);
958 }
959 
960 int
961 mfi_intr(void *arg)
962 {
963 	struct mfi_softc	*sc = arg;
964 	struct mfi_prod_cons	*pcq = MFIMEM_KVA(sc->sc_pcq);
965 	struct mfi_ccb		*ccb;
966 	uint32_t		producer, consumer, ctx;
967 	int			claimed = 0;
968 
969 	if (!mfi_my_intr(sc))
970 		return (0);
971 
972 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
973 	    0, MFIMEM_LEN(sc->sc_pcq),
974 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
975 
976 	producer = letoh32(pcq->mpc_producer);
977 	consumer = letoh32(pcq->mpc_consumer);
978 
979 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
980 
981 	while (consumer != producer) {
982 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
983 		    DEVNAME(sc), producer, consumer);
984 
985 		ctx = pcq->mpc_reply_q[consumer];
986 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
987 		if (ctx == MFI_INVALID_CTX)
988 			printf("%s: invalid context, p: %d c: %d\n",
989 			    DEVNAME(sc), producer, consumer);
990 		else {
991 			/* XXX remove from queue and call scsi_done */
992 			ccb = &sc->sc_ccb[ctx];
993 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
994 			    DEVNAME(sc), ctx);
995 			mfi_done(sc, ccb);
996 
997 			claimed = 1;
998 		}
999 		consumer++;
1000 		if (consumer == (sc->sc_max_cmds + 1))
1001 			consumer = 0;
1002 	}
1003 
1004 	pcq->mpc_consumer = htole32(consumer);
1005 
1006 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1007 	    0, MFIMEM_LEN(sc->sc_pcq),
1008 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1009 
1010 	return (claimed);
1011 }
1012 
1013 int
1014 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1015     struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1016 {
1017 	struct scsi_link	*link = xs->sc_link;
1018 	struct mfi_io_frame	*io;
1019 
1020 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1021 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1022 
1023 	if (!xs->data)
1024 		return (1);
1025 
1026 	io = &ccb->ccb_frame->mfr_io;
1027 	if (xs->flags & SCSI_DATA_IN) {
1028 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1029 		ccb->ccb_direction = MFI_DATA_IN;
1030 	} else {
1031 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1032 		ccb->ccb_direction = MFI_DATA_OUT;
1033 	}
1034 	io->mif_header.mfh_target_id = link->target;
1035 	io->mif_header.mfh_timeout = 0;
1036 	io->mif_header.mfh_flags = 0;
1037 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1038 	io->mif_header.mfh_data_len = htole32(blockcnt);
1039 	io->mif_lba = htole64(blockno);
1040 	io->mif_sense_addr = htole64(ccb->ccb_psense);
1041 
1042 	ccb->ccb_done = mfi_scsi_xs_done;
1043 	ccb->ccb_cookie = xs;
1044 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1045 	ccb->ccb_sgl = &io->mif_sgl;
1046 	ccb->ccb_data = xs->data;
1047 	ccb->ccb_len = xs->datalen;
1048 
1049 	if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1050 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1051 		return (1);
1052 
1053 	return (0);
1054 }
1055 
1056 void
1057 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1058 {
1059 	struct scsi_xfer	*xs = ccb->ccb_cookie;
1060 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1061 
1062 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
1063 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1064 
1065 	switch (hdr->mfh_cmd_status) {
1066 	case MFI_STAT_OK:
1067 		xs->resid = 0;
1068 		break;
1069 
1070 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1071 		xs->error = XS_SENSE;
1072 		xs->resid = 0;
1073 		memset(&xs->sense, 0, sizeof(xs->sense));
1074 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1075 		break;
1076 
1077 	case MFI_STAT_DEVICE_NOT_FOUND:
1078 		xs->error = XS_SELTIMEOUT;
1079 		break;
1080 
1081 	default:
1082 		xs->error = XS_DRIVER_STUFFUP;
1083 		DPRINTF(MFI_D_CMD,
1084 		    "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1085 		    DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd->opcode);
1086 
1087 		if (hdr->mfh_scsi_status != 0) {
1088 			DNPRINTF(MFI_D_INTR,
1089 			    "%s: mfi_scsi_xs_done sense %#x %x %x\n",
1090 			    DEVNAME(sc), hdr->mfh_scsi_status,
1091 			    &xs->sense, ccb->ccb_sense);
1092 			memset(&xs->sense, 0, sizeof(xs->sense));
1093 			memcpy(&xs->sense, ccb->ccb_sense,
1094 			    sizeof(struct scsi_sense_data));
1095 			xs->error = XS_SENSE;
1096 		}
1097 		break;
1098 	}
1099 
1100 	KERNEL_LOCK();
1101 	scsi_done(xs);
1102 	KERNEL_UNLOCK();
1103 }
1104 
1105 int
1106 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1107 {
1108 	struct scsi_link	*link = xs->sc_link;
1109 	struct mfi_pass_frame	*pf;
1110 
1111 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1112 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1113 
1114 	pf = &ccb->ccb_frame->mfr_pass;
1115 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1116 	pf->mpf_header.mfh_target_id = link->target;
1117 	pf->mpf_header.mfh_lun_id = 0;
1118 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1119 	pf->mpf_header.mfh_timeout = 0;
1120 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1121 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1122 
1123 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1124 
1125 	memset(pf->mpf_cdb, 0, 16);
1126 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
1127 
1128 	ccb->ccb_done = mfi_scsi_xs_done;
1129 	ccb->ccb_cookie = xs;
1130 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1131 	ccb->ccb_sgl = &pf->mpf_sgl;
1132 
1133 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1134 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1135 		    MFI_DATA_IN : MFI_DATA_OUT;
1136 	else
1137 		ccb->ccb_direction = MFI_DATA_NONE;
1138 
1139 	if (xs->data) {
1140 		ccb->ccb_data = xs->data;
1141 		ccb->ccb_len = xs->datalen;
1142 
1143 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1144 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1145 			return (1);
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 void
1152 mfi_scsi_cmd(struct scsi_xfer *xs)
1153 {
1154 	struct scsi_link	*link = xs->sc_link;
1155 	struct mfi_softc	*sc = link->adapter_softc;
1156 	struct mfi_ccb		*ccb = xs->io;
1157 	struct scsi_rw		*rw;
1158 	struct scsi_rw_big	*rwb;
1159 	struct scsi_rw_16	*rw16;
1160 	uint64_t		blockno;
1161 	uint32_t		blockcnt;
1162 	uint8_t			target = link->target;
1163 	union mfi_mbox		mbox;
1164 
1165 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1166 	    DEVNAME(sc), xs->cmd->opcode);
1167 
1168 	KERNEL_UNLOCK();
1169 
1170 	if (!sc->sc_ld[target].ld_present) {
1171 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1172 		    DEVNAME(sc), target);
1173 		goto stuffup;
1174 	}
1175 
1176 	mfi_scrub_ccb(ccb);
1177 
1178 	xs->error = XS_NOERROR;
1179 
1180 	switch (xs->cmd->opcode) {
1181 	/* IO path */
1182 	case READ_BIG:
1183 	case WRITE_BIG:
1184 		rwb = (struct scsi_rw_big *)xs->cmd;
1185 		blockno = (uint64_t)_4btol(rwb->addr);
1186 		blockcnt = _2btol(rwb->length);
1187 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1188 			goto stuffup;
1189 		break;
1190 
1191 	case READ_COMMAND:
1192 	case WRITE_COMMAND:
1193 		rw = (struct scsi_rw *)xs->cmd;
1194 		blockno =
1195 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1196 		blockcnt = rw->length ? rw->length : 0x100;
1197 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1198 			goto stuffup;
1199 		break;
1200 
1201 	case READ_16:
1202 	case WRITE_16:
1203 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1204 		blockno = _8btol(rw16->addr);
1205 		blockcnt = _4btol(rw16->length);
1206 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1207 			goto stuffup;
1208 		break;
1209 
1210 	case SYNCHRONIZE_CACHE:
1211 		mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1212 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1213 		    MFI_DATA_NONE, 0, NULL, &mbox))
1214 			goto stuffup;
1215 
1216 		goto complete;
1217 		/* NOTREACHED */
1218 
1219 	default:
1220 		if (mfi_scsi_ld(sc, ccb, xs))
1221 			goto stuffup;
1222 		break;
1223 	}
1224 
1225 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1226 
1227 	if (xs->flags & SCSI_POLL)
1228 		mfi_poll(sc, ccb);
1229 	else
1230 		mfi_start(sc, ccb);
1231 
1232 	KERNEL_LOCK();
1233 	return;
1234 
1235 stuffup:
1236 	xs->error = XS_DRIVER_STUFFUP;
1237 complete:
1238 	KERNEL_LOCK();
1239 	scsi_done(xs);
1240 }
1241 
1242 u_int
1243 mfi_default_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
1244 {
1245 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1246 	union mfi_sgl		*sgl = ccb->ccb_sgl;
1247 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
1248 	int			 i;
1249 
1250 	hdr->mfh_flags |= sc->sc_sgl_flags;
1251 
1252 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1253 		if (sc->sc_64bit_dma) {
1254 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1255 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1256 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1257 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1258 		} else {
1259 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1260 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1261 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1262 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1263 		}
1264 	}
1265 
1266 	return (ccb->ccb_dmamap->dm_nsegs *
1267 	    (sc->sc_64bit_dma ? sizeof(sgl->sg64) : sizeof(sgl->sg32)));
1268 }
1269 
1270 int
1271 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1272 {
1273 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1274 	int			error;
1275 
1276 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1277 	    ccb->ccb_data);
1278 
1279 	if (!ccb->ccb_data) {
1280 		hdr->mfh_sg_count = 0;
1281 		return (1);
1282 	}
1283 
1284 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1285 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1286 	if (error) {
1287 		if (error == EFBIG)
1288 			printf("more than %d dma segs\n",
1289 			    sc->sc_max_sgl);
1290 		else
1291 			printf("error %d loading dma map\n", error);
1292 		return (1);
1293 	}
1294 
1295 	ccb->ccb_frame_size += mfi_sgd_load(sc, ccb);
1296 
1297 	if (ccb->ccb_direction == MFI_DATA_IN) {
1298 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1299 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1300 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1301 	} else {
1302 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1303 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1304 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1305 	}
1306 
1307 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1308 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1309 
1310 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1311 	    "  dm_nsegs: %d  extra_frames: %d\n",
1312 	    DEVNAME(sc),
1313 	    hdr->mfh_sg_count,
1314 	    ccb->ccb_frame_size,
1315 	    sc->sc_frames_size,
1316 	    ccb->ccb_dmamap->dm_nsegs,
1317 	    ccb->ccb_extra_frames);
1318 
1319 	return (0);
1320 }
1321 
1322 int
1323 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1324     void *buf, const union mfi_mbox *mbox)
1325 {
1326 	struct mfi_ccb *ccb;
1327 	int rv;
1328 
1329 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1330 	mfi_scrub_ccb(ccb);
1331 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1332 	scsi_io_put(&sc->sc_iopool, ccb);
1333 
1334 	return (rv);
1335 }
1336 
1337 int
1338 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1339     uint32_t dir, uint32_t len, void *buf, const union mfi_mbox *mbox)
1340 {
1341 	struct mfi_dcmd_frame *dcmd;
1342 	uint8_t *dma_buf = NULL;
1343 	int rv = EINVAL;
1344 
1345 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1346 
1347 	dma_buf = dma_alloc(len, cold ? PR_NOWAIT : PR_WAITOK);
1348 	if (dma_buf == NULL)
1349 		goto done;
1350 
1351 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1352 	memset(&dcmd->mdf_mbox, 0, sizeof(dcmd->mdf_mbox));
1353 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1354 	dcmd->mdf_header.mfh_timeout = 0;
1355 
1356 	dcmd->mdf_opcode = opc;
1357 	dcmd->mdf_header.mfh_data_len = 0;
1358 	ccb->ccb_direction = dir;
1359 
1360 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1361 
1362 	/* handle special opcodes */
1363 	if (mbox != NULL)
1364 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1365 
1366 	if (dir != MFI_DATA_NONE) {
1367 		if (dir == MFI_DATA_OUT)
1368 			memcpy(dma_buf, buf, len);
1369 		dcmd->mdf_header.mfh_data_len = len;
1370 		ccb->ccb_data = dma_buf;
1371 		ccb->ccb_len = len;
1372 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1373 
1374 		if (mfi_create_sgl(sc, ccb, cold ? BUS_DMA_NOWAIT :
1375 		    BUS_DMA_WAITOK)) {
1376 			rv = EINVAL;
1377 			goto done;
1378 		}
1379 	}
1380 
1381 	if (cold) {
1382 		ccb->ccb_done = mfi_empty_done;
1383 		mfi_poll(sc, ccb);
1384 	} else
1385 		mfi_exec(sc, ccb);
1386 
1387 	if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1388 		if (dcmd->mdf_header.mfh_cmd_status == MFI_STAT_WRONG_STATE)
1389 			rv = ENXIO;
1390 		else
1391 			rv = EIO;
1392 		goto done;
1393 	}
1394 
1395 	if (dir == MFI_DATA_IN)
1396 		memcpy(buf, dma_buf, len);
1397 
1398 	rv = 0;
1399 done:
1400 	if (dma_buf)
1401 		dma_free(dma_buf, len);
1402 
1403 	return (rv);
1404 }
1405 
1406 int
1407 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1408 {
1409 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1410 
1411 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1412 
1413 	switch (cmd) {
1414 	case DIOCGCACHE:
1415 	case DIOCSCACHE:
1416 		return (mfi_ioctl_cache(link, cmd, (struct dk_cache *)addr));
1417 		break;
1418 
1419 	default:
1420 		if (sc->sc_ioctl)
1421 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
1422 		break;
1423 	}
1424 
1425 	return (ENOTTY);
1426 }
1427 
1428 int
1429 mfi_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
1430 {
1431 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1432 	int			 rv, wrenable, rdenable;
1433 	struct mfi_ld_prop	 ldp;
1434 	union mfi_mbox		 mbox;
1435 
1436 	if (mfi_get_info(sc)) {
1437 		rv = EIO;
1438 		goto done;
1439 	}
1440 
1441 	if (!sc->sc_ld[link->target].ld_present) {
1442 		rv = EIO;
1443 		goto done;
1444 	}
1445 
1446 	memset(&mbox, 0, sizeof(mbox));
1447 	mbox.b[0] = link->target;
1448 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, MFI_DATA_IN,
1449 	    sizeof(ldp), &ldp, &mbox)) != 0)
1450 		goto done;
1451 
1452 	if (sc->sc_info.mci_memory_size > 0) {
1453 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
1454 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
1455 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
1456 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
1457 	} else {
1458 		wrenable = ISSET(ldp.mlp_diskcache_policy,
1459 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
1460 		rdenable = 0;
1461 	}
1462 
1463 	if (cmd == DIOCGCACHE) {
1464 		dc->wrcache = wrenable;
1465 		dc->rdcache = rdenable;
1466 		goto done;
1467 	} /* else DIOCSCACHE */
1468 
1469 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
1470 	    ((dc->rdcache) ? 1 : 0) == rdenable)
1471 		goto done;
1472 
1473 	memset(&mbox, 0, sizeof(mbox));
1474 	mbox.b[0] = ldp.mlp_ld.mld_target;
1475 	mbox.b[1] = ldp.mlp_ld.mld_res;
1476 	mbox.s[1] = ldp.mlp_ld.mld_seq;
1477 
1478 	if (sc->sc_info.mci_memory_size > 0) {
1479 		if (dc->rdcache)
1480 			SET(ldp.mlp_cur_cache_policy,
1481 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1482 		else
1483 			CLR(ldp.mlp_cur_cache_policy,
1484 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1485 		if (dc->wrcache)
1486 			SET(ldp.mlp_cur_cache_policy,
1487 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1488 		else
1489 			CLR(ldp.mlp_cur_cache_policy,
1490 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1491 	} else {
1492 		if (dc->rdcache) {
1493 			rv = EOPNOTSUPP;
1494 			goto done;
1495 		}
1496 		if (dc->wrcache)
1497 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
1498 		else
1499 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
1500 	}
1501 
1502 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, MFI_DATA_OUT,
1503 	    sizeof(ldp), &ldp, &mbox)) != 0)
1504 		goto done;
1505 done:
1506 	return (rv);
1507 }
1508 
1509 #if NBIO > 0
1510 int
1511 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1512 {
1513 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1514 	int error = 0;
1515 
1516 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1517 
1518 	rw_enter_write(&sc->sc_lock);
1519 
1520 	switch (cmd) {
1521 	case BIOCINQ:
1522 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1523 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1524 		break;
1525 
1526 	case BIOCVOL:
1527 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1528 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1529 		break;
1530 
1531 	case BIOCDISK:
1532 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1533 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1534 		break;
1535 
1536 	case BIOCALARM:
1537 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1538 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1539 		break;
1540 
1541 	case BIOCBLINK:
1542 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1543 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1544 		break;
1545 
1546 	case BIOCSETSTATE:
1547 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1548 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1549 		break;
1550 
1551 	case BIOCPATROL:
1552 		DNPRINTF(MFI_D_IOCTL, "patrol\n");
1553 		error = mfi_ioctl_patrol(sc, (struct bioc_patrol *)addr);
1554 		break;
1555 
1556 	default:
1557 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1558 		error = ENOTTY;
1559 	}
1560 
1561 	rw_exit_write(&sc->sc_lock);
1562 
1563 	return (error);
1564 }
1565 
1566 int
1567 mfi_bio_getitall(struct mfi_softc *sc)
1568 {
1569 	int			i, d, size, rv = EINVAL;
1570 	union mfi_mbox		mbox;
1571 	struct mfi_conf		*cfg = NULL;
1572 	struct mfi_ld_details	*ld_det = NULL;
1573 
1574 	/* get info */
1575 	if (mfi_get_info(sc)) {
1576 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1577 		    DEVNAME(sc));
1578 		goto done;
1579 	}
1580 
1581 	/* send single element command to retrieve size for full structure */
1582 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1583 	if (cfg == NULL)
1584 		goto done;
1585 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1586 	    NULL)) {
1587 		free(cfg, M_DEVBUF, sizeof *cfg);
1588 		goto done;
1589 	}
1590 
1591 	size = cfg->mfc_size;
1592 	free(cfg, M_DEVBUF, sizeof *cfg);
1593 
1594 	/* memory for read config */
1595 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1596 	if (cfg == NULL)
1597 		goto done;
1598 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1599 		free(cfg, M_DEVBUF, size);
1600 		goto done;
1601 	}
1602 
1603 	/* replace current pointer with new one */
1604 	if (sc->sc_cfg)
1605 		free(sc->sc_cfg, M_DEVBUF, 0);
1606 	sc->sc_cfg = cfg;
1607 
1608 	/* get all ld info */
1609 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1610 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1611 		goto done;
1612 
1613 	/* get memory for all ld structures */
1614 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1615 	if (sc->sc_ld_sz != size) {
1616 		if (sc->sc_ld_details)
1617 			free(sc->sc_ld_details, M_DEVBUF, 0);
1618 
1619 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1620 		if (ld_det == NULL)
1621 			goto done;
1622 		sc->sc_ld_sz = size;
1623 		sc->sc_ld_details = ld_det;
1624 	}
1625 
1626 	/* find used physical disks */
1627 	size = sizeof(struct mfi_ld_details);
1628 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1629 		memset(&mbox, 0, sizeof(mbox));
1630 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1631 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1632 		    &sc->sc_ld_details[i], &mbox))
1633 			goto done;
1634 
1635 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1636 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1637 	}
1638 	sc->sc_no_pd = d;
1639 
1640 	rv = 0;
1641 done:
1642 	return (rv);
1643 }
1644 
1645 int
1646 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1647 {
1648 	int			rv = EINVAL;
1649 	struct mfi_conf		*cfg = NULL;
1650 
1651 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1652 
1653 	if (mfi_bio_getitall(sc)) {
1654 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1655 		    DEVNAME(sc));
1656 		goto done;
1657 	}
1658 
1659 	/* count unused disks as volumes */
1660 	if (sc->sc_cfg == NULL)
1661 		goto done;
1662 	cfg = sc->sc_cfg;
1663 
1664 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1665 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1666 #if notyet
1667 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1668 	    (bi->bi_nodisk - sc->sc_no_pd);
1669 #endif
1670 	/* tell bio who we are */
1671 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1672 
1673 	rv = 0;
1674 done:
1675 	return (rv);
1676 }
1677 
1678 int
1679 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1680 {
1681 	int			i, per, rv = EINVAL;
1682 	struct scsi_link	*link;
1683 	struct device		*dev;
1684 
1685 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1686 	    DEVNAME(sc), bv->bv_volid);
1687 
1688 	/* we really could skip and expect that inq took care of it */
1689 	if (mfi_bio_getitall(sc)) {
1690 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1691 		    DEVNAME(sc));
1692 		goto done;
1693 	}
1694 
1695 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1696 		/* go do hotspares & unused disks */
1697 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1698 		goto done;
1699 	}
1700 
1701 	i = bv->bv_volid;
1702 	link = scsi_get_link(sc->sc_scsibus, i, 0);
1703 	if (link != NULL && link->device_softc != NULL) {
1704 		dev = link->device_softc;
1705 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1706 	}
1707 
1708 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1709 	case MFI_LD_OFFLINE:
1710 		bv->bv_status = BIOC_SVOFFLINE;
1711 		break;
1712 
1713 	case MFI_LD_PART_DEGRADED:
1714 	case MFI_LD_DEGRADED:
1715 		bv->bv_status = BIOC_SVDEGRADED;
1716 		break;
1717 
1718 	case MFI_LD_ONLINE:
1719 		bv->bv_status = BIOC_SVONLINE;
1720 		break;
1721 
1722 	default:
1723 		bv->bv_status = BIOC_SVINVALID;
1724 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1725 		    DEVNAME(sc),
1726 		    sc->sc_ld_list.mll_list[i].mll_state);
1727 	}
1728 
1729 	/* additional status can modify MFI status */
1730 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1731 	case MFI_LD_PROG_CC:
1732 		bv->bv_status = BIOC_SVSCRUB;
1733 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1734 		bv->bv_percent = (per * 100) / 0xffff;
1735 		bv->bv_seconds =
1736 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1737 		break;
1738 
1739 	case MFI_LD_PROG_BGI:
1740 		bv->bv_status = BIOC_SVSCRUB;
1741 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
1742 		bv->bv_percent = (per * 100) / 0xffff;
1743 		bv->bv_seconds =
1744 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
1745 		break;
1746 
1747 	case MFI_LD_PROG_FGI:
1748 	case MFI_LD_PROG_RECONSTRUCT:
1749 		/* nothing yet */
1750 		break;
1751 	}
1752 
1753 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1754 		bv->bv_cache = BIOC_CVWRITEBACK;
1755 	else
1756 		bv->bv_cache = BIOC_CVWRITETHROUGH;
1757 
1758 	/*
1759 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1760 	 * a subset that is valid for the MFI controller.
1761 	 */
1762 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1763 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1764 	    MFI_DDF_SRL_SPANNED)
1765 		bv->bv_level *= 10;
1766 
1767 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1768 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1769 
1770 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1771 
1772 	rv = 0;
1773 done:
1774 	return (rv);
1775 }
1776 
1777 int
1778 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1779 {
1780 	struct mfi_conf		*cfg;
1781 	struct mfi_array	*ar;
1782 	struct mfi_ld_cfg	*ld;
1783 	struct mfi_pd_details	*pd;
1784 	struct mfi_pd_progress	*mfp;
1785 	struct mfi_progress	*mp;
1786 	struct scsi_inquiry_data *inqbuf;
1787 	char			vend[8+16+4+1], *vendp;
1788 	int			rv = EINVAL;
1789 	int			arr, vol, disk, span;
1790 	union mfi_mbox		mbox;
1791 
1792 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1793 	    DEVNAME(sc), bd->bd_diskid);
1794 
1795 	/* we really could skip and expect that inq took care of it */
1796 	if (mfi_bio_getitall(sc)) {
1797 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1798 		    DEVNAME(sc));
1799 		return (rv);
1800 	}
1801 	cfg = sc->sc_cfg;
1802 
1803 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1804 
1805 	ar = cfg->mfc_array;
1806 	vol = bd->bd_volid;
1807 	if (vol >= cfg->mfc_no_ld) {
1808 		/* do hotspares */
1809 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1810 		goto freeme;
1811 	}
1812 
1813 	/* calculate offset to ld structure */
1814 	ld = (struct mfi_ld_cfg *)(
1815 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1816 	    cfg->mfc_array_size * cfg->mfc_no_array);
1817 
1818 	/* use span 0 only when raid group is not spanned */
1819 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1820 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1821 	else
1822 		span = 0;
1823 	arr = ld[vol].mlc_span[span].mls_index;
1824 
1825 	/* offset disk into pd list */
1826 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1827 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1828 
1829 	/* get status */
1830 	switch (ar[arr].pd[disk].mar_pd_state){
1831 	case MFI_PD_UNCONFIG_GOOD:
1832 	case MFI_PD_FAILED:
1833 		bd->bd_status = BIOC_SDFAILED;
1834 		break;
1835 
1836 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1837 		bd->bd_status = BIOC_SDHOTSPARE;
1838 		break;
1839 
1840 	case MFI_PD_OFFLINE:
1841 		bd->bd_status = BIOC_SDOFFLINE;
1842 		break;
1843 
1844 	case MFI_PD_REBUILD:
1845 		bd->bd_status = BIOC_SDREBUILD;
1846 		break;
1847 
1848 	case MFI_PD_ONLINE:
1849 		bd->bd_status = BIOC_SDONLINE;
1850 		break;
1851 
1852 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1853 	default:
1854 		bd->bd_status = BIOC_SDINVALID;
1855 		break;
1856 	}
1857 
1858 	/* get the remaining fields */
1859 	memset(&mbox, 0, sizeof(mbox));
1860 	mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
1861 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1862 	    sizeof *pd, pd, &mbox)) {
1863 		/* disk is missing but succeed command */
1864 		rv = 0;
1865 		goto freeme;
1866 	}
1867 
1868 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1869 
1870 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1871 	bd->bd_channel = pd->mpd_enc_idx;
1872 
1873 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1874 	vendp = inqbuf->vendor;
1875 	memcpy(vend, vendp, sizeof vend - 1);
1876 	vend[sizeof vend - 1] = '\0';
1877 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1878 
1879 	/* XXX find a way to retrieve serial nr from drive */
1880 	/* XXX find a way to get bd_procdev */
1881 
1882 	mfp = &pd->mpd_progress;
1883 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
1884 		mp = &mfp->mfp_patrol_read;
1885 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
1886 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
1887 	}
1888 
1889 	rv = 0;
1890 freeme:
1891 	free(pd, M_DEVBUF, sizeof *pd);
1892 
1893 	return (rv);
1894 }
1895 
1896 int
1897 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1898 {
1899 	uint32_t		opc, dir = MFI_DATA_NONE;
1900 	int			rv = 0;
1901 	int8_t			ret;
1902 
1903 	switch(ba->ba_opcode) {
1904 	case BIOC_SADISABLE:
1905 		opc = MR_DCMD_SPEAKER_DISABLE;
1906 		break;
1907 
1908 	case BIOC_SAENABLE:
1909 		opc = MR_DCMD_SPEAKER_ENABLE;
1910 		break;
1911 
1912 	case BIOC_SASILENCE:
1913 		opc = MR_DCMD_SPEAKER_SILENCE;
1914 		break;
1915 
1916 	case BIOC_GASTATUS:
1917 		opc = MR_DCMD_SPEAKER_GET;
1918 		dir = MFI_DATA_IN;
1919 		break;
1920 
1921 	case BIOC_SATEST:
1922 		opc = MR_DCMD_SPEAKER_TEST;
1923 		break;
1924 
1925 	default:
1926 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1927 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1928 		return (EINVAL);
1929 	}
1930 
1931 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1932 		rv = EINVAL;
1933 	else
1934 		if (ba->ba_opcode == BIOC_GASTATUS)
1935 			ba->ba_status = ret;
1936 		else
1937 			ba->ba_status = 0;
1938 
1939 	return (rv);
1940 }
1941 
1942 int
1943 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1944 {
1945 	int			i, found, rv = EINVAL;
1946 	union mfi_mbox		mbox;
1947 	uint32_t		cmd;
1948 	struct mfi_pd_list	*pd;
1949 
1950 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1951 	    bb->bb_status);
1952 
1953 	/* channel 0 means not in an enclosure so can't be blinked */
1954 	if (bb->bb_channel == 0)
1955 		return (EINVAL);
1956 
1957 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1958 
1959 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1960 	    sizeof(*pd), pd, NULL))
1961 		goto done;
1962 
1963 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1964 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1965 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1966 			found = 1;
1967 			break;
1968 		}
1969 
1970 	if (!found)
1971 		goto done;
1972 
1973 	memset(&mbox, 0, sizeof(mbox));
1974 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
1975 
1976 	switch (bb->bb_status) {
1977 	case BIOC_SBUNBLINK:
1978 		cmd = MR_DCMD_PD_UNBLINK;
1979 		break;
1980 
1981 	case BIOC_SBBLINK:
1982 		cmd = MR_DCMD_PD_BLINK;
1983 		break;
1984 
1985 	case BIOC_SBALARM:
1986 	default:
1987 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1988 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1989 		goto done;
1990 	}
1991 
1992 
1993 	if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, &mbox))
1994 		goto done;
1995 
1996 	rv = 0;
1997 done:
1998 	free(pd, M_DEVBUF, sizeof *pd);
1999 	return (rv);
2000 }
2001 
2002 int
2003 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2004 {
2005 	struct mfi_pd_list	*pd;
2006 	struct mfi_pd_details	*info;
2007 	int			i, found, rv = EINVAL;
2008 	union mfi_mbox		mbox;
2009 
2010 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2011 	    bs->bs_status);
2012 
2013 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
2014 	info = malloc(sizeof *info, M_DEVBUF, M_WAITOK);
2015 
2016 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2017 	    sizeof(*pd), pd, NULL))
2018 		goto done;
2019 
2020 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2021 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2022 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2023 			found = 1;
2024 			break;
2025 		}
2026 
2027 	if (!found)
2028 		goto done;
2029 
2030 	memset(&mbox, 0, sizeof(mbox));
2031 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2032 
2033 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2034 	    sizeof *info, info, &mbox))
2035 		goto done;
2036 
2037 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2038 	mbox.s[1] = info->mpd_pd.mfp_seq;
2039 
2040 	switch (bs->bs_status) {
2041 	case BIOC_SSONLINE:
2042 		mbox.b[4] = MFI_PD_ONLINE;
2043 		break;
2044 
2045 	case BIOC_SSOFFLINE:
2046 		mbox.b[4] = MFI_PD_OFFLINE;
2047 		break;
2048 
2049 	case BIOC_SSHOTSPARE:
2050 		mbox.b[4] = MFI_PD_HOTSPARE;
2051 		break;
2052 
2053 	case BIOC_SSREBUILD:
2054 		mbox.b[4] = MFI_PD_REBUILD;
2055 		break;
2056 
2057 	default:
2058 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2059 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
2060 		goto done;
2061 	}
2062 
2063 
2064 	if ((rv = mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL,
2065 	    &mbox)))
2066 		goto done;
2067 
2068 	rv = 0;
2069 done:
2070 	free(pd, M_DEVBUF, sizeof *pd);
2071 	free(info, M_DEVBUF, sizeof *info);
2072 	return (rv);
2073 }
2074 
2075 int
2076 mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *bp)
2077 {
2078 	uint32_t		opc, dir = MFI_DATA_NONE;
2079 	int			rv = 0;
2080 	struct mfi_pr_properties prop;
2081 	struct mfi_pr_status	status;
2082 	uint32_t		time, exec_freq;
2083 
2084 	switch (bp->bp_opcode) {
2085 	case BIOC_SPSTOP:
2086 	case BIOC_SPSTART:
2087 		if (bp->bp_opcode == BIOC_SPSTART)
2088 			opc = MR_DCMD_PR_START;
2089 		else
2090 			opc = MR_DCMD_PR_STOP;
2091 		dir = MFI_DATA_IN;
2092 		if (mfi_mgmt(sc, opc, dir, 0, NULL, NULL))
2093 			return (EINVAL);
2094 		break;
2095 
2096 	case BIOC_SPMANUAL:
2097 	case BIOC_SPDISABLE:
2098 	case BIOC_SPAUTO:
2099 		/* Get device's time. */
2100 		opc = MR_DCMD_TIME_SECS_GET;
2101 		dir = MFI_DATA_IN;
2102 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2103 			return (EINVAL);
2104 
2105 		opc = MR_DCMD_PR_GET_PROPERTIES;
2106 		dir = MFI_DATA_IN;
2107 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2108 			return (EINVAL);
2109 
2110 		switch (bp->bp_opcode) {
2111 		case BIOC_SPMANUAL:
2112 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
2113 			break;
2114 		case BIOC_SPDISABLE:
2115 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
2116 			break;
2117 		case BIOC_SPAUTO:
2118 			if (bp->bp_autoival != 0) {
2119 				if (bp->bp_autoival == -1)
2120 					/* continuously */
2121 					exec_freq = 0xffffffffU;
2122 				else if (bp->bp_autoival > 0)
2123 					exec_freq = bp->bp_autoival;
2124 				else
2125 					return (EINVAL);
2126 				prop.exec_freq = exec_freq;
2127 			}
2128 			if (bp->bp_autonext != 0) {
2129 				if (bp->bp_autonext < 0)
2130 					return (EINVAL);
2131 				else
2132 					prop.next_exec = time + bp->bp_autonext;
2133 			}
2134 			prop.op_mode = MFI_PR_OPMODE_AUTO;
2135 			break;
2136 		}
2137 
2138 		opc = MR_DCMD_PR_SET_PROPERTIES;
2139 		dir = MFI_DATA_OUT;
2140 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2141 			return (EINVAL);
2142 
2143 		break;
2144 
2145 	case BIOC_GPSTATUS:
2146 		opc = MR_DCMD_PR_GET_PROPERTIES;
2147 		dir = MFI_DATA_IN;
2148 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2149 			return (EINVAL);
2150 
2151 		opc = MR_DCMD_PR_GET_STATUS;
2152 		dir = MFI_DATA_IN;
2153 		if (mfi_mgmt(sc, opc, dir, sizeof(status), &status, NULL))
2154 			return (EINVAL);
2155 
2156 		/* Get device's time. */
2157 		opc = MR_DCMD_TIME_SECS_GET;
2158 		dir = MFI_DATA_IN;
2159 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2160 			return (EINVAL);
2161 
2162 		switch (prop.op_mode) {
2163 		case MFI_PR_OPMODE_AUTO:
2164 			bp->bp_mode = BIOC_SPMAUTO;
2165 			bp->bp_autoival = prop.exec_freq;
2166 			bp->bp_autonext = prop.next_exec;
2167 			bp->bp_autonow = time;
2168 			break;
2169 		case MFI_PR_OPMODE_MANUAL:
2170 			bp->bp_mode = BIOC_SPMMANUAL;
2171 			break;
2172 		case MFI_PR_OPMODE_DISABLED:
2173 			bp->bp_mode = BIOC_SPMDISABLED;
2174 			break;
2175 		default:
2176 			printf("%s: unknown patrol mode %d\n",
2177 			    DEVNAME(sc), prop.op_mode);
2178 			break;
2179 		}
2180 
2181 		switch (status.state) {
2182 		case MFI_PR_STATE_STOPPED:
2183 			bp->bp_status = BIOC_SPSSTOPPED;
2184 			break;
2185 		case MFI_PR_STATE_READY:
2186 			bp->bp_status = BIOC_SPSREADY;
2187 			break;
2188 		case MFI_PR_STATE_ACTIVE:
2189 			bp->bp_status = BIOC_SPSACTIVE;
2190 			break;
2191 		case MFI_PR_STATE_ABORTED:
2192 			bp->bp_status = BIOC_SPSABORTED;
2193 			break;
2194 		default:
2195 			printf("%s: unknown patrol state %d\n",
2196 			    DEVNAME(sc), status.state);
2197 			break;
2198 		}
2199 
2200 		break;
2201 
2202 	default:
2203 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_patrol biocpatrol invalid "
2204 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
2205 		return (EINVAL);
2206 	}
2207 
2208 	return (rv);
2209 }
2210 
2211 int
2212 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2213 {
2214 	struct mfi_conf		*cfg;
2215 	struct mfi_hotspare	*hs;
2216 	struct mfi_pd_details	*pd;
2217 	struct bioc_disk	*sdhs;
2218 	struct bioc_vol		*vdhs;
2219 	struct scsi_inquiry_data *inqbuf;
2220 	char			vend[8+16+4+1], *vendp;
2221 	int			i, rv = EINVAL;
2222 	uint32_t		size;
2223 	union mfi_mbox		mbox;
2224 
2225 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2226 
2227 	if (!bio_hs)
2228 		return (EINVAL);
2229 
2230 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2231 
2232 	/* send single element command to retrieve size for full structure */
2233 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2234 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
2235 		goto freeme;
2236 
2237 	size = cfg->mfc_size;
2238 	free(cfg, M_DEVBUF, sizeof *cfg);
2239 
2240 	/* memory for read config */
2241 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2242 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
2243 		goto freeme;
2244 
2245 	/* calculate offset to hs structure */
2246 	hs = (struct mfi_hotspare *)(
2247 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2248 	    cfg->mfc_array_size * cfg->mfc_no_array +
2249 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
2250 
2251 	if (volid < cfg->mfc_no_ld)
2252 		goto freeme; /* not a hotspare */
2253 
2254 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2255 		goto freeme; /* not a hotspare */
2256 
2257 	/* offset into hotspare structure */
2258 	i = volid - cfg->mfc_no_ld;
2259 
2260 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2261 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2262 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2263 
2264 	/* get pd fields */
2265 	memset(&mbox, 0, sizeof(mbox));
2266 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
2267 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2268 	    sizeof *pd, pd, &mbox)) {
2269 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2270 		    DEVNAME(sc));
2271 		goto freeme;
2272 	}
2273 
2274 	switch (type) {
2275 	case MFI_MGMT_VD:
2276 		vdhs = bio_hs;
2277 		vdhs->bv_status = BIOC_SVONLINE;
2278 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2279 		vdhs->bv_level = -1; /* hotspare */
2280 		vdhs->bv_nodisk = 1;
2281 		break;
2282 
2283 	case MFI_MGMT_SD:
2284 		sdhs = bio_hs;
2285 		sdhs->bd_status = BIOC_SDHOTSPARE;
2286 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2287 		sdhs->bd_channel = pd->mpd_enc_idx;
2288 		sdhs->bd_target = pd->mpd_enc_slot;
2289 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2290 		vendp = inqbuf->vendor;
2291 		memcpy(vend, vendp, sizeof vend - 1);
2292 		vend[sizeof vend - 1] = '\0';
2293 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2294 		break;
2295 
2296 	default:
2297 		goto freeme;
2298 	}
2299 
2300 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2301 	rv = 0;
2302 freeme:
2303 	free(pd, M_DEVBUF, sizeof *pd);
2304 	free(cfg, M_DEVBUF, 0);
2305 
2306 	return (rv);
2307 }
2308 
2309 #ifndef SMALL_KERNEL
2310 
2311 static const char *mfi_bbu_indicators[] = {
2312 	"pack missing",
2313 	"voltage low",
2314 	"temp high",
2315 	"charge active",
2316 	"discharge active",
2317 	"learn cycle req'd",
2318 	"learn cycle active",
2319 	"learn cycle failed",
2320 	"learn cycle timeout",
2321 	"I2C errors",
2322 	"replace pack",
2323 	"low capacity",
2324 	"periodic learn req'd"
2325 };
2326 
2327 #define MFI_BBU_SENSORS 4
2328 
2329 int
2330 mfi_bbu(struct mfi_softc *sc)
2331 {
2332 	struct mfi_bbu_status bbu;
2333 	u_int32_t status;
2334 	u_int32_t mask;
2335 	u_int32_t soh_bad;
2336 	int i;
2337 
2338 	if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2339 	    sizeof(bbu), &bbu, NULL) != 0) {
2340 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
2341 			sc->sc_bbu[i].value = 0;
2342 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2343 		}
2344 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2345 			sc->sc_bbu_status[i].value = 0;
2346 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2347 		}
2348 		return (-1);
2349 	}
2350 
2351 	switch (bbu.battery_type) {
2352 	case MFI_BBU_TYPE_IBBU:
2353 		mask = MFI_BBU_STATE_BAD_IBBU;
2354 		soh_bad = 0;
2355 		break;
2356 	case MFI_BBU_TYPE_BBU:
2357 		mask = MFI_BBU_STATE_BAD_BBU;
2358 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2359 		break;
2360 
2361 	case MFI_BBU_TYPE_NONE:
2362 	default:
2363 		sc->sc_bbu[0].value = 0;
2364 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
2365 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2366 			sc->sc_bbu[i].value = 0;
2367 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2368 		}
2369 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2370 			sc->sc_bbu_status[i].value = 0;
2371 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2372 		}
2373 		return (0);
2374 	}
2375 
2376 	status = letoh32(bbu.fw_status);
2377 
2378 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2379 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2380 	    SENSOR_S_OK;
2381 
2382 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2383 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2384 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2385 	for (i = 1; i < MFI_BBU_SENSORS; i++)
2386 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2387 
2388 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2389 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2390 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2391 	}
2392 
2393 	return (0);
2394 }
2395 
2396 int
2397 mfi_create_sensors(struct mfi_softc *sc)
2398 {
2399 	struct device		*dev;
2400 	struct scsi_link	*link;
2401 	int			i;
2402 
2403 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2404 	    sizeof(sc->sc_sensordev.xname));
2405 
2406 	if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2407 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2408 		    M_DEVBUF, M_WAITOK | M_ZERO);
2409 
2410 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
2411 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2412 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2413 		    sizeof(sc->sc_bbu[0].desc));
2414 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2415 
2416 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2417 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2418 		sc->sc_bbu[2].type = SENSOR_AMPS;
2419 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2420 		sc->sc_bbu[3].type = SENSOR_TEMP;
2421 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2422 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2423 			strlcpy(sc->sc_bbu[i].desc, "bbu",
2424 			    sizeof(sc->sc_bbu[i].desc));
2425 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2426 		}
2427 
2428 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2429 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2430 
2431 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2432 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2433 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2434 			strlcpy(sc->sc_bbu_status[i].desc,
2435 			    mfi_bbu_indicators[i],
2436 			    sizeof(sc->sc_bbu_status[i].desc));
2437 
2438 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2439 		}
2440 	}
2441 
2442 	sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2443 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2444 	if (sc->sc_sensors == NULL)
2445 		return (1);
2446 
2447 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2448 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2449 		if (link == NULL)
2450 			goto bad;
2451 
2452 		dev = link->device_softc;
2453 
2454 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2455 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2456 
2457 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2458 		    sizeof(sc->sc_sensors[i].desc));
2459 
2460 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2461 	}
2462 
2463 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2464 		goto bad;
2465 
2466 	sensordev_install(&sc->sc_sensordev);
2467 
2468 	return (0);
2469 
2470 bad:
2471 	free(sc->sc_sensors, M_DEVBUF,
2472 	    sc->sc_ld_cnt * sizeof(struct ksensor));
2473 
2474 	return (1);
2475 }
2476 
2477 void
2478 mfi_refresh_sensors(void *arg)
2479 {
2480 	struct mfi_softc	*sc = arg;
2481 	int			i, rv;
2482 	struct bioc_vol		bv;
2483 
2484 	if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2485 		return;
2486 
2487 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2488 		bzero(&bv, sizeof(bv));
2489 		bv.bv_volid = i;
2490 
2491 		rw_enter_write(&sc->sc_lock);
2492 		rv = mfi_ioctl_vol(sc, &bv);
2493 		rw_exit_write(&sc->sc_lock);
2494 
2495 		if (rv != 0)
2496 			return;
2497 
2498 		switch(bv.bv_status) {
2499 		case BIOC_SVOFFLINE:
2500 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2501 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2502 			break;
2503 
2504 		case BIOC_SVDEGRADED:
2505 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2506 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2507 			break;
2508 
2509 		case BIOC_SVSCRUB:
2510 		case BIOC_SVONLINE:
2511 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2512 			sc->sc_sensors[i].status = SENSOR_S_OK;
2513 			break;
2514 
2515 		case BIOC_SVINVALID:
2516 			/* FALLTRHOUGH */
2517 		default:
2518 			sc->sc_sensors[i].value = 0; /* unknown */
2519 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2520 			break;
2521 		}
2522 	}
2523 }
2524 #endif /* SMALL_KERNEL */
2525 #endif /* NBIO > 0 */
2526 
2527 void
2528 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2529 {
2530 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2531 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2532 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2533 
2534 	mfi_post(sc, ccb);
2535 }
2536 
2537 void
2538 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2539 {
2540 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2541 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2542 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2543 
2544 	if (ccb->ccb_len > 0) {
2545 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2546 		    0, ccb->ccb_dmamap->dm_mapsize,
2547 		    (ccb->ccb_direction == MFI_DATA_IN) ?
2548 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2549 
2550 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2551 	}
2552 
2553 	ccb->ccb_done(sc, ccb);
2554 }
2555 
2556 u_int32_t
2557 mfi_xscale_fw_state(struct mfi_softc *sc)
2558 {
2559 	return (mfi_read(sc, MFI_OMSG0));
2560 }
2561 
2562 void
2563 mfi_xscale_intr_ena(struct mfi_softc *sc)
2564 {
2565 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2566 }
2567 
2568 int
2569 mfi_xscale_intr(struct mfi_softc *sc)
2570 {
2571 	u_int32_t status;
2572 
2573 	status = mfi_read(sc, MFI_OSTS);
2574 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2575 		return (0);
2576 
2577 	/* write status back to acknowledge interrupt */
2578 	mfi_write(sc, MFI_OSTS, status);
2579 
2580 	return (1);
2581 }
2582 
2583 void
2584 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2585 {
2586 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2587 	    ccb->ccb_extra_frames);
2588 }
2589 
2590 u_int32_t
2591 mfi_ppc_fw_state(struct mfi_softc *sc)
2592 {
2593 	return (mfi_read(sc, MFI_OSP));
2594 }
2595 
2596 void
2597 mfi_ppc_intr_ena(struct mfi_softc *sc)
2598 {
2599 	mfi_write(sc, MFI_ODC, 0xffffffff);
2600 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2601 }
2602 
2603 int
2604 mfi_ppc_intr(struct mfi_softc *sc)
2605 {
2606 	u_int32_t status;
2607 
2608 	status = mfi_read(sc, MFI_OSTS);
2609 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2610 		return (0);
2611 
2612 	/* write status back to acknowledge interrupt */
2613 	mfi_write(sc, MFI_ODC, status);
2614 
2615 	return (1);
2616 }
2617 
2618 void
2619 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2620 {
2621 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2622 	    (ccb->ccb_extra_frames << 1));
2623 }
2624 
2625 u_int32_t
2626 mfi_gen2_fw_state(struct mfi_softc *sc)
2627 {
2628 	return (mfi_read(sc, MFI_OSP));
2629 }
2630 
2631 void
2632 mfi_gen2_intr_ena(struct mfi_softc *sc)
2633 {
2634 	mfi_write(sc, MFI_ODC, 0xffffffff);
2635 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2636 }
2637 
2638 int
2639 mfi_gen2_intr(struct mfi_softc *sc)
2640 {
2641 	u_int32_t status;
2642 
2643 	status = mfi_read(sc, MFI_OSTS);
2644 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2645 		return (0);
2646 
2647 	/* write status back to acknowledge interrupt */
2648 	mfi_write(sc, MFI_ODC, status);
2649 
2650 	return (1);
2651 }
2652 
2653 void
2654 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2655 {
2656 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2657 	    (ccb->ccb_extra_frames << 1));
2658 }
2659 
2660 u_int32_t
2661 mfi_skinny_fw_state(struct mfi_softc *sc)
2662 {
2663 	return (mfi_read(sc, MFI_OSP));
2664 }
2665 
2666 void
2667 mfi_skinny_intr_ena(struct mfi_softc *sc)
2668 {
2669 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2670 }
2671 
2672 int
2673 mfi_skinny_intr(struct mfi_softc *sc)
2674 {
2675 	u_int32_t status;
2676 
2677 	status = mfi_read(sc, MFI_OSTS);
2678 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2679 		return (0);
2680 
2681 	/* write status back to acknowledge interrupt */
2682 	mfi_write(sc, MFI_OSTS, status);
2683 
2684 	return (1);
2685 }
2686 
2687 void
2688 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2689 {
2690 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2691 	    (ccb->ccb_extra_frames << 1));
2692 	mfi_write(sc, MFI_IQPH, 0x00000000);
2693 }
2694 
2695 u_int
2696 mfi_skinny_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
2697 {
2698 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
2699 	union mfi_sgl		*sgl = ccb->ccb_sgl;
2700 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
2701 	int			 i;
2702 
2703 	switch (hdr->mfh_cmd) {
2704 	case MFI_CMD_LD_READ:
2705 	case MFI_CMD_LD_WRITE:
2706 	case MFI_CMD_PD_SCSI_IO:
2707 		/* Use MF_FRAME_IEEE for some IO commands on skinny adapters */
2708 		for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
2709 			sgl->sg_skinny[i].addr = htole64(sgd[i].ds_addr);
2710 			sgl->sg_skinny[i].len = htole32(sgd[i].ds_len);
2711 			sgl->sg_skinny[i].flag = 0;
2712 		}
2713 		hdr->mfh_flags |= MFI_FRAME_IEEE | MFI_FRAME_SGL64;
2714 
2715 		return (ccb->ccb_dmamap->dm_nsegs * sizeof(sgl->sg_skinny));
2716 	default:
2717 		return (mfi_default_sgd_load(sc, ccb));
2718 	}
2719 }
2720 
2721 int
2722 mfi_pd_scsi_probe(struct scsi_link *link)
2723 {
2724 	union mfi_mbox mbox;
2725 	struct mfi_softc *sc = link->adapter_softc;
2726 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2727 
2728 	if (link->lun > 0)
2729 		return (0);
2730 
2731 	if (pl == NULL)
2732 		return (ENXIO);
2733 
2734 	memset(&mbox, 0, sizeof(mbox));
2735 	mbox.s[0] = pl->pd_id;
2736 
2737 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2738 	    sizeof(pl->pd_info), &pl->pd_info, &mbox))
2739 		return (EIO);
2740 
2741 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2742 		return (ENXIO);
2743 
2744 	return (0);
2745 }
2746 
2747 void
2748 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2749 {
2750 	struct scsi_link *link = xs->sc_link;
2751 	struct mfi_softc *sc = link->adapter_softc;
2752 	struct mfi_ccb *ccb = xs->io;
2753 	struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2754 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2755 
2756 	KERNEL_UNLOCK();
2757 
2758 	mfi_scrub_ccb(ccb);
2759 	xs->error = XS_NOERROR;
2760 
2761 	pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2762 	pf->mpf_header.mfh_target_id = pl->pd_id;
2763 	pf->mpf_header.mfh_lun_id = link->lun;
2764 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2765 	pf->mpf_header.mfh_timeout = 0;
2766 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2767 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2768 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2769 
2770 	memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2771 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
2772 
2773 	ccb->ccb_done = mfi_scsi_xs_done;
2774 	ccb->ccb_cookie = xs;
2775 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2776 	ccb->ccb_sgl = &pf->mpf_sgl;
2777 
2778 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2779 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2780 		    MFI_DATA_IN : MFI_DATA_OUT;
2781 	else
2782 		ccb->ccb_direction = MFI_DATA_NONE;
2783 
2784 	if (xs->data) {
2785 		ccb->ccb_data = xs->data;
2786 		ccb->ccb_len = xs->datalen;
2787 
2788 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2789 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2790 			goto stuffup;
2791 	}
2792 
2793 	if (xs->flags & SCSI_POLL)
2794 		mfi_poll(sc, ccb);
2795 	else
2796 		mfi_start(sc, ccb);
2797 
2798 	KERNEL_LOCK();
2799 	return;
2800 
2801 stuffup:
2802 	xs->error = XS_DRIVER_STUFFUP;
2803 	KERNEL_LOCK();
2804 	scsi_done(xs);
2805 }
2806