xref: /openbsd-src/sys/dev/ic/mfi.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: mfi.c,v 1.165 2015/09/09 18:23:55 deraadt Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/types.h>
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/device.h>
25 #include <sys/kernel.h>
26 #include <sys/malloc.h>
27 #include <sys/rwlock.h>
28 #include <sys/sensors.h>
29 #include <sys/dkio.h>
30 #include <sys/pool.h>
31 
32 #include <machine/bus.h>
33 
34 #include <scsi/scsi_all.h>
35 #include <scsi/scsi_disk.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/biovar.h>
39 #include <dev/ic/mfireg.h>
40 #include <dev/ic/mfivar.h>
41 
42 #ifdef MFI_DEBUG
43 uint32_t	mfi_debug = 0
44 /*		    | MFI_D_CMD */
45 /*		    | MFI_D_INTR */
46 /*		    | MFI_D_MISC */
47 /*		    | MFI_D_DMA */
48 /*		    | MFI_D_IOCTL */
49 /*		    | MFI_D_RW */
50 /*		    | MFI_D_MEM */
51 /*		    | MFI_D_CCB */
52 		;
53 #endif
54 
55 struct cfdriver mfi_cd = {
56 	NULL, "mfi", DV_DULL
57 };
58 
59 void	mfi_scsi_cmd(struct scsi_xfer *);
60 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
61 int	mfi_ioctl_cache(struct scsi_link *, u_long,  struct dk_cache *);
62 void	mfiminphys(struct buf *bp, struct scsi_link *sl);
63 
64 void	mfi_pd_scsi_cmd(struct scsi_xfer *);
65 int	mfi_pd_scsi_probe(struct scsi_link *);
66 
67 struct scsi_adapter mfi_switch = {
68 	mfi_scsi_cmd, mfiminphys, 0, 0, mfi_scsi_ioctl
69 };
70 
71 struct scsi_adapter mfi_pd_switch = {
72 	mfi_pd_scsi_cmd,
73 	mfiminphys,
74 	mfi_pd_scsi_probe,
75 	0,
76 	mfi_scsi_ioctl
77 };
78 
79 void *		mfi_get_ccb(void *);
80 void		mfi_put_ccb(void *, void *);
81 void		mfi_scrub_ccb(struct mfi_ccb *);
82 int		mfi_init_ccb(struct mfi_softc *);
83 
84 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
85 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
86 
87 int		mfi_transition_firmware(struct mfi_softc *);
88 int		mfi_initialize_firmware(struct mfi_softc *);
89 int		mfi_get_info(struct mfi_softc *);
90 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
91 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
92 void		mfi_poll(struct mfi_softc *, struct mfi_ccb *);
93 void		mfi_exec(struct mfi_softc *, struct mfi_ccb *);
94 void		mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
95 int		mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
96 u_int		mfi_default_sgd_load(struct mfi_softc *, struct mfi_ccb *);
97 int		mfi_syspd(struct mfi_softc *);
98 
99 /* commands */
100 int		mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
101 		    struct scsi_xfer *);
102 int		mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
103 		    struct scsi_xfer *, uint64_t, uint32_t);
104 void		mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
105 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
106 		    void *, uint8_t *);
107 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
108 		    uint32_t, uint32_t, void *, uint8_t *);
109 void		mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
110 
111 #if NBIO > 0
112 int		mfi_ioctl(struct device *, u_long, caddr_t);
113 int		mfi_bio_getitall(struct mfi_softc *);
114 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
115 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
116 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
117 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
118 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
119 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
120 int		mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *);
121 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
122 #ifndef SMALL_KERNEL
123 int		mfi_create_sensors(struct mfi_softc *);
124 void		mfi_refresh_sensors(void *);
125 int		mfi_bbu(struct mfi_softc *);
126 #endif /* SMALL_KERNEL */
127 #endif /* NBIO > 0 */
128 
129 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
130 void		mfi_done(struct mfi_softc *, struct mfi_ccb *);
131 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
132 void		mfi_xscale_intr_ena(struct mfi_softc *);
133 int		mfi_xscale_intr(struct mfi_softc *);
134 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
135 
136 static const struct mfi_iop_ops mfi_iop_xscale = {
137 	mfi_xscale_fw_state,
138 	mfi_xscale_intr_ena,
139 	mfi_xscale_intr,
140 	mfi_xscale_post,
141 	mfi_default_sgd_load,
142 	0,
143 };
144 
145 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
146 void		mfi_ppc_intr_ena(struct mfi_softc *);
147 int		mfi_ppc_intr(struct mfi_softc *);
148 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
149 
150 static const struct mfi_iop_ops mfi_iop_ppc = {
151 	mfi_ppc_fw_state,
152 	mfi_ppc_intr_ena,
153 	mfi_ppc_intr,
154 	mfi_ppc_post,
155 	mfi_default_sgd_load,
156 	MFI_IDB,
157 	0
158 };
159 
160 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
161 void		mfi_gen2_intr_ena(struct mfi_softc *);
162 int		mfi_gen2_intr(struct mfi_softc *);
163 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
164 
165 static const struct mfi_iop_ops mfi_iop_gen2 = {
166 	mfi_gen2_fw_state,
167 	mfi_gen2_intr_ena,
168 	mfi_gen2_intr,
169 	mfi_gen2_post,
170 	mfi_default_sgd_load,
171 	MFI_IDB,
172 	0
173 };
174 
175 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
176 void		mfi_skinny_intr_ena(struct mfi_softc *);
177 int		mfi_skinny_intr(struct mfi_softc *);
178 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
179 u_int		mfi_skinny_sgd_load(struct mfi_softc *, struct mfi_ccb *);
180 
181 static const struct mfi_iop_ops mfi_iop_skinny = {
182 	mfi_skinny_fw_state,
183 	mfi_skinny_intr_ena,
184 	mfi_skinny_intr,
185 	mfi_skinny_post,
186 	mfi_skinny_sgd_load,
187 	MFI_SKINNY_IDB,
188 	MFI_IOP_F_SYSPD
189 };
190 
191 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
192 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
193 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
194 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
195 #define mfi_sgd_load(_s, _c)	((_s)->sc_iop->mio_sgd_load((_s), (_c)))
196 
197 void *
198 mfi_get_ccb(void *cookie)
199 {
200 	struct mfi_softc	*sc = cookie;
201 	struct mfi_ccb		*ccb;
202 
203 	KERNEL_UNLOCK();
204 
205 	mtx_enter(&sc->sc_ccb_mtx);
206 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
207 	if (ccb != NULL) {
208 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
209 		ccb->ccb_state = MFI_CCB_READY;
210 	}
211 	mtx_leave(&sc->sc_ccb_mtx);
212 
213 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
214 	KERNEL_LOCK();
215 
216 	return (ccb);
217 }
218 
219 void
220 mfi_put_ccb(void *cookie, void *io)
221 {
222 	struct mfi_softc	*sc = cookie;
223 	struct mfi_ccb		*ccb = io;
224 
225 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
226 
227 	KERNEL_UNLOCK();
228 	mtx_enter(&sc->sc_ccb_mtx);
229 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
230 	mtx_leave(&sc->sc_ccb_mtx);
231 	KERNEL_LOCK();
232 }
233 
234 void
235 mfi_scrub_ccb(struct mfi_ccb *ccb)
236 {
237 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
238 
239 	hdr->mfh_cmd_status = 0x0;
240 	hdr->mfh_flags = 0x0;
241 	ccb->ccb_state = MFI_CCB_FREE;
242 	ccb->ccb_cookie = NULL;
243 	ccb->ccb_flags = 0;
244 	ccb->ccb_done = NULL;
245 	ccb->ccb_direction = 0;
246 	ccb->ccb_frame_size = 0;
247 	ccb->ccb_extra_frames = 0;
248 	ccb->ccb_sgl = NULL;
249 	ccb->ccb_data = NULL;
250 	ccb->ccb_len = 0;
251 }
252 
253 int
254 mfi_init_ccb(struct mfi_softc *sc)
255 {
256 	struct mfi_ccb		*ccb;
257 	uint32_t		i;
258 	int			error;
259 
260 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
261 
262 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
263 	    M_DEVBUF, M_WAITOK|M_ZERO);
264 
265 	for (i = 0; i < sc->sc_max_cmds; i++) {
266 		ccb = &sc->sc_ccb[i];
267 
268 		/* select i'th frame */
269 		ccb->ccb_frame = (union mfi_frame *)
270 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
271 		ccb->ccb_pframe =
272 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
273 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
274 		ccb->ccb_frame->mfr_header.mfh_context = i;
275 
276 		/* select i'th sense */
277 		ccb->ccb_sense = (struct mfi_sense *)
278 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
279 		ccb->ccb_psense =
280 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
281 
282 		/* create a dma map for transfer */
283 		error = bus_dmamap_create(sc->sc_dmat,
284 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
285 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
286 		if (error) {
287 			printf("%s: cannot create ccb dmamap (%d)\n",
288 			    DEVNAME(sc), error);
289 			goto destroy;
290 		}
291 
292 		DNPRINTF(MFI_D_CCB,
293 		    "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
294 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
295 		    ccb->ccb_frame, ccb->ccb_pframe,
296 		    ccb->ccb_sense, ccb->ccb_psense,
297 		    ccb->ccb_dmamap);
298 
299 		/* add ccb to queue */
300 		mfi_put_ccb(sc, ccb);
301 	}
302 
303 	return (0);
304 destroy:
305 	/* free dma maps and ccb memory */
306 	while ((ccb = mfi_get_ccb(sc)) != NULL)
307 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
308 
309 	free(sc->sc_ccb, M_DEVBUF, 0);
310 
311 	return (1);
312 }
313 
314 uint32_t
315 mfi_read(struct mfi_softc *sc, bus_size_t r)
316 {
317 	uint32_t rv;
318 
319 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
320 	    BUS_SPACE_BARRIER_READ);
321 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
322 
323 	DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
324 	return (rv);
325 }
326 
327 void
328 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
329 {
330 	DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
331 
332 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
333 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
334 	    BUS_SPACE_BARRIER_WRITE);
335 }
336 
337 struct mfi_mem *
338 mfi_allocmem(struct mfi_softc *sc, size_t size)
339 {
340 	struct mfi_mem		*mm;
341 	int			nsegs;
342 
343 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
344 	    size);
345 
346 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
347 	if (mm == NULL)
348 		return (NULL);
349 
350 	mm->am_size = size;
351 
352 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
353 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
354 		goto amfree;
355 
356 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
357 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
358 		goto destroy;
359 
360 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
361 	    BUS_DMA_NOWAIT) != 0)
362 		goto free;
363 
364 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
365 	    BUS_DMA_NOWAIT) != 0)
366 		goto unmap;
367 
368 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
369 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
370 
371 	return (mm);
372 
373 unmap:
374 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
375 free:
376 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
377 destroy:
378 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
379 amfree:
380 	free(mm, M_DEVBUF, sizeof *mm);
381 
382 	return (NULL);
383 }
384 
385 void
386 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
387 {
388 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
389 
390 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
391 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
392 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
393 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
394 	free(mm, M_DEVBUF, sizeof *mm);
395 }
396 
397 int
398 mfi_transition_firmware(struct mfi_softc *sc)
399 {
400 	int32_t			fw_state, cur_state;
401 	u_int32_t		idb = sc->sc_iop->mio_idb;
402 	int			max_wait, i;
403 
404 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
405 
406 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
407 	    fw_state);
408 
409 	while (fw_state != MFI_STATE_READY) {
410 		DNPRINTF(MFI_D_MISC,
411 		    "%s: waiting for firmware to become ready\n",
412 		    DEVNAME(sc));
413 		cur_state = fw_state;
414 		switch (fw_state) {
415 		case MFI_STATE_FAULT:
416 			printf("%s: firmware fault\n", DEVNAME(sc));
417 			return (1);
418 		case MFI_STATE_WAIT_HANDSHAKE:
419 			mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
420 			max_wait = 2;
421 			break;
422 		case MFI_STATE_OPERATIONAL:
423 			mfi_write(sc, idb, MFI_INIT_READY);
424 			max_wait = 10;
425 			break;
426 		case MFI_STATE_UNDEFINED:
427 		case MFI_STATE_BB_INIT:
428 			max_wait = 2;
429 			break;
430 		case MFI_STATE_FW_INIT:
431 		case MFI_STATE_DEVICE_SCAN:
432 		case MFI_STATE_FLUSH_CACHE:
433 			max_wait = 20;
434 			break;
435 		default:
436 			printf("%s: unknown firmware state %d\n",
437 			    DEVNAME(sc), fw_state);
438 			return (1);
439 		}
440 		for (i = 0; i < (max_wait * 10); i++) {
441 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
442 			if (fw_state == cur_state)
443 				DELAY(100000);
444 			else
445 				break;
446 		}
447 		if (fw_state == cur_state) {
448 			printf("%s: firmware stuck in state %#x\n",
449 			    DEVNAME(sc), fw_state);
450 			return (1);
451 		}
452 	}
453 
454 	return (0);
455 }
456 
457 int
458 mfi_initialize_firmware(struct mfi_softc *sc)
459 {
460 	struct mfi_ccb		*ccb;
461 	struct mfi_init_frame	*init;
462 	struct mfi_init_qinfo	*qinfo;
463 	int			rv = 0;
464 
465 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
466 
467 	ccb = scsi_io_get(&sc->sc_iopool, 0);
468 	mfi_scrub_ccb(ccb);
469 
470 	init = &ccb->ccb_frame->mfr_init;
471 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
472 
473 	memset(qinfo, 0, sizeof(*qinfo));
474 	qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
475 
476 	qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
477 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
478 
479 	qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
480 	    offsetof(struct mfi_prod_cons, mpc_producer));
481 
482 	qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
483 	    offsetof(struct mfi_prod_cons, mpc_consumer));
484 
485 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
486 	init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
487 	init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
488 
489 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
490 	    0, MFIMEM_LEN(sc->sc_pcq),
491 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
492 
493 	ccb->ccb_done = mfi_empty_done;
494 	mfi_poll(sc, ccb);
495 	if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
496 		rv = 1;
497 
498 	mfi_put_ccb(sc, ccb);
499 
500 	return (rv);
501 }
502 
503 void
504 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
505 {
506 	/* nop */
507 }
508 
509 int
510 mfi_get_info(struct mfi_softc *sc)
511 {
512 #ifdef MFI_DEBUG
513 	int i;
514 #endif
515 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
516 
517 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
518 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
519 		return (1);
520 
521 #ifdef MFI_DEBUG
522 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
523 		printf("%s: active FW %s Version %s date %s time %s\n",
524 		    DEVNAME(sc),
525 		    sc->sc_info.mci_image_component[i].mic_name,
526 		    sc->sc_info.mci_image_component[i].mic_version,
527 		    sc->sc_info.mci_image_component[i].mic_build_date,
528 		    sc->sc_info.mci_image_component[i].mic_build_time);
529 	}
530 
531 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
532 		printf("%s: pending FW %s Version %s date %s time %s\n",
533 		    DEVNAME(sc),
534 		    sc->sc_info.mci_pending_image_component[i].mic_name,
535 		    sc->sc_info.mci_pending_image_component[i].mic_version,
536 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
537 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
538 	}
539 
540 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
541 	    DEVNAME(sc),
542 	    sc->sc_info.mci_max_arms,
543 	    sc->sc_info.mci_max_spans,
544 	    sc->sc_info.mci_max_arrays,
545 	    sc->sc_info.mci_max_lds,
546 	    sc->sc_info.mci_product_name);
547 
548 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
549 	    DEVNAME(sc),
550 	    sc->sc_info.mci_serial_number,
551 	    sc->sc_info.mci_hw_present,
552 	    sc->sc_info.mci_current_fw_time,
553 	    sc->sc_info.mci_max_cmds,
554 	    sc->sc_info.mci_max_sg_elements);
555 
556 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
557 	    DEVNAME(sc),
558 	    sc->sc_info.mci_max_request_size,
559 	    sc->sc_info.mci_lds_present,
560 	    sc->sc_info.mci_lds_degraded,
561 	    sc->sc_info.mci_lds_offline,
562 	    sc->sc_info.mci_pd_present);
563 
564 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
565 	    DEVNAME(sc),
566 	    sc->sc_info.mci_pd_disks_present,
567 	    sc->sc_info.mci_pd_disks_pred_failure,
568 	    sc->sc_info.mci_pd_disks_failed);
569 
570 	printf("%s: nvram %d mem %d flash %d\n",
571 	    DEVNAME(sc),
572 	    sc->sc_info.mci_nvram_size,
573 	    sc->sc_info.mci_memory_size,
574 	    sc->sc_info.mci_flash_size);
575 
576 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
577 	    DEVNAME(sc),
578 	    sc->sc_info.mci_ram_correctable_errors,
579 	    sc->sc_info.mci_ram_uncorrectable_errors,
580 	    sc->sc_info.mci_cluster_allowed,
581 	    sc->sc_info.mci_cluster_active);
582 
583 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
584 	    DEVNAME(sc),
585 	    sc->sc_info.mci_max_strips_per_io,
586 	    sc->sc_info.mci_raid_levels,
587 	    sc->sc_info.mci_adapter_ops,
588 	    sc->sc_info.mci_ld_ops);
589 
590 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
591 	    DEVNAME(sc),
592 	    sc->sc_info.mci_stripe_sz_ops.min,
593 	    sc->sc_info.mci_stripe_sz_ops.max,
594 	    sc->sc_info.mci_pd_ops,
595 	    sc->sc_info.mci_pd_mix_support);
596 
597 	printf("%s: ecc_bucket %d pckg_prop %s\n",
598 	    DEVNAME(sc),
599 	    sc->sc_info.mci_ecc_bucket_count,
600 	    sc->sc_info.mci_package_version);
601 
602 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
603 	    DEVNAME(sc),
604 	    sc->sc_info.mci_properties.mcp_seq_num,
605 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
606 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
607 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
608 
609 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
610 	    DEVNAME(sc),
611 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
612 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
613 	    sc->sc_info.mci_properties.mcp_bgi_rate,
614 	    sc->sc_info.mci_properties.mcp_cc_rate);
615 
616 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
617 	    DEVNAME(sc),
618 	    sc->sc_info.mci_properties.mcp_recon_rate,
619 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
620 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
621 	    sc->sc_info.mci_properties.mcp_spinup_delay,
622 	    sc->sc_info.mci_properties.mcp_cluster_enable);
623 
624 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
625 	    DEVNAME(sc),
626 	    sc->sc_info.mci_properties.mcp_coercion_mode,
627 	    sc->sc_info.mci_properties.mcp_alarm_enable,
628 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
629 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
630 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
631 
632 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
633 	    DEVNAME(sc),
634 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
635 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
636 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
637 
638 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
639 	    DEVNAME(sc),
640 	    sc->sc_info.mci_pci.mip_vendor,
641 	    sc->sc_info.mci_pci.mip_device,
642 	    sc->sc_info.mci_pci.mip_subvendor,
643 	    sc->sc_info.mci_pci.mip_subdevice);
644 
645 	printf("%s: type %#x port_count %d port_addr ",
646 	    DEVNAME(sc),
647 	    sc->sc_info.mci_host.mih_type,
648 	    sc->sc_info.mci_host.mih_port_count);
649 
650 	for (i = 0; i < 8; i++)
651 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
652 	printf("\n");
653 
654 	printf("%s: type %.x port_count %d port_addr ",
655 	    DEVNAME(sc),
656 	    sc->sc_info.mci_device.mid_type,
657 	    sc->sc_info.mci_device.mid_port_count);
658 
659 	for (i = 0; i < 8; i++)
660 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
661 	printf("\n");
662 #endif /* MFI_DEBUG */
663 
664 	return (0);
665 }
666 
667 void
668 mfiminphys(struct buf *bp, struct scsi_link *sl)
669 {
670 	DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
671 
672 	/* XXX currently using MFI_MAXFER = MAXPHYS */
673 	if (bp->b_bcount > MFI_MAXFER)
674 		bp->b_bcount = MFI_MAXFER;
675 	minphys(bp);
676 }
677 
678 int
679 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
680 {
681 	struct scsibus_attach_args saa;
682 	uint32_t		status, frames, max_sgl;
683 	int			i;
684 
685 	switch (iop) {
686 	case MFI_IOP_XSCALE:
687 		sc->sc_iop = &mfi_iop_xscale;
688 		break;
689 	case MFI_IOP_PPC:
690 		sc->sc_iop = &mfi_iop_ppc;
691 		break;
692 	case MFI_IOP_GEN2:
693 		sc->sc_iop = &mfi_iop_gen2;
694 		break;
695 	case MFI_IOP_SKINNY:
696 		sc->sc_iop = &mfi_iop_skinny;
697 		break;
698 	default:
699 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
700 	}
701 
702 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
703 
704 	if (mfi_transition_firmware(sc))
705 		return (1);
706 
707 	SLIST_INIT(&sc->sc_ccb_freeq);
708 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
709 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
710 
711 	rw_init(&sc->sc_lock, "mfi_lock");
712 
713 	status = mfi_fw_state(sc);
714 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
715 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
716 	if (sc->sc_64bit_dma) {
717 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
718 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
719 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
720 	} else {
721 		sc->sc_max_sgl = max_sgl;
722 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
723 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
724 	}
725 	if (iop == MFI_IOP_SKINNY)
726 		sc->sc_sgl_size = sizeof(struct mfi_sg_skinny);
727 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
728 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
729 
730 	/* consumer/producer and reply queue memory */
731 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
732 	    sizeof(struct mfi_prod_cons));
733 	if (sc->sc_pcq == NULL) {
734 		printf("%s: unable to allocate reply queue memory\n",
735 		    DEVNAME(sc));
736 		goto nopcq;
737 	}
738 
739 	/* frame memory */
740 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
741 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
742 	    MFI_FRAME_SIZE + 1;
743 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
744 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
745 	if (sc->sc_frames == NULL) {
746 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
747 		goto noframe;
748 	}
749 	/* XXX hack, fix this */
750 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
751 		printf("%s: improper frame alignment (%#lx) FIXME\n",
752 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
753 		goto noframe;
754 	}
755 
756 	/* sense memory */
757 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
758 	if (sc->sc_sense == NULL) {
759 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
760 		goto nosense;
761 	}
762 
763 	/* now that we have all memory bits go initialize ccbs */
764 	if (mfi_init_ccb(sc)) {
765 		printf("%s: could not init ccb list\n", DEVNAME(sc));
766 		goto noinit;
767 	}
768 
769 	/* kickstart firmware with all addresses and pointers */
770 	if (mfi_initialize_firmware(sc)) {
771 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
772 		goto noinit;
773 	}
774 
775 	if (mfi_get_info(sc)) {
776 		printf("%s: could not retrieve controller information\n",
777 		    DEVNAME(sc));
778 		goto noinit;
779 	}
780 
781 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
782 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
783 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
784 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
785 	printf("\n");
786 
787 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
788 	for (i = 0; i < sc->sc_ld_cnt; i++)
789 		sc->sc_ld[i].ld_present = 1;
790 
791 	sc->sc_link.adapter = &mfi_switch;
792 	sc->sc_link.adapter_softc = sc;
793 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
794 	sc->sc_link.adapter_target = -1;
795 	sc->sc_link.luns = 1;
796 	sc->sc_link.openings = sc->sc_max_cmds - 1;
797 	sc->sc_link.pool = &sc->sc_iopool;
798 
799 	bzero(&saa, sizeof(saa));
800 	saa.saa_sc_link = &sc->sc_link;
801 
802 	sc->sc_scsibus = (struct scsibus_softc *)
803 	    config_found(&sc->sc_dev, &saa, scsiprint);
804 
805 	if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
806 		mfi_syspd(sc);
807 
808 	/* enable interrupts */
809 	mfi_intr_enable(sc);
810 
811 #if NBIO > 0
812 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
813 		panic("%s: controller registration failed", DEVNAME(sc));
814 	else
815 		sc->sc_ioctl = mfi_ioctl;
816 
817 #ifndef SMALL_KERNEL
818 	if (mfi_create_sensors(sc) != 0)
819 		printf("%s: unable to create sensors\n", DEVNAME(sc));
820 #endif
821 #endif /* NBIO > 0 */
822 
823 	return (0);
824 noinit:
825 	mfi_freemem(sc, sc->sc_sense);
826 nosense:
827 	mfi_freemem(sc, sc->sc_frames);
828 noframe:
829 	mfi_freemem(sc, sc->sc_pcq);
830 nopcq:
831 	return (1);
832 }
833 
834 int
835 mfi_syspd(struct mfi_softc *sc)
836 {
837 	struct scsibus_attach_args saa;
838 	struct scsi_link *link;
839 	struct mfi_pd_link *pl;
840 	struct mfi_pd_list *pd;
841 	u_int npds, i;
842 
843 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
844 	if (sc->sc_pd == NULL)
845 		return (1);
846 
847 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
848 	if (pd == NULL)
849 		goto nopdsc;
850 
851 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
852 	    sizeof(*pd), pd, NULL) != 0)
853 		goto nopd;
854 
855 	npds = letoh32(pd->mpl_no_pd);
856 	for (i = 0; i < npds; i++) {
857 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
858 		if (pl == NULL)
859 			goto nopl;
860 
861 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
862 		sc->sc_pd->pd_links[i] = pl;
863 	}
864 
865 	free(pd, M_TEMP, sizeof *pd);
866 
867 	link = &sc->sc_pd->pd_link;
868 	link->adapter = &mfi_pd_switch;
869 	link->adapter_softc = sc;
870 	link->adapter_buswidth = MFI_MAX_PD;
871 	link->adapter_target = -1;
872 	link->openings = sc->sc_max_cmds - 1;
873 	link->pool = &sc->sc_iopool;
874 
875 	bzero(&saa, sizeof(saa));
876 	saa.saa_sc_link = link;
877 
878 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
879 	    config_found(&sc->sc_dev, &saa, scsiprint);
880 
881 	return (0);
882 nopl:
883 	for (i = 0; i < npds; i++) {
884 		pl = sc->sc_pd->pd_links[i];
885 		if (pl == NULL)
886 			break;
887 
888 		free(pl, M_DEVBUF, sizeof *pl);
889 	}
890 nopd:
891 	free(pd, M_TEMP, sizeof *pd);
892 nopdsc:
893 	free(sc->sc_pd, M_DEVBUF, sizeof *sc->sc_pd);
894 	return (1);
895 }
896 
897 void
898 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
899 {
900 	struct mfi_frame_header *hdr;
901 	int to = 0;
902 
903 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
904 
905 	hdr = &ccb->ccb_frame->mfr_header;
906 	hdr->mfh_cmd_status = 0xff;
907 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
908 
909 	mfi_start(sc, ccb);
910 
911 	for (;;) {
912 		delay(1000);
913 
914 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
915 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
916 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
917 
918 		if (hdr->mfh_cmd_status != 0xff)
919 			break;
920 
921 		if (to++ > 5000) {
922 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
923 			    hdr->mfh_context);
924 			ccb->ccb_flags |= MFI_CCB_F_ERR;
925 			break;
926 		}
927 
928 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
929 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
930 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
931 	}
932 
933 	if (ccb->ccb_len > 0) {
934 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
935 		    ccb->ccb_dmamap->dm_mapsize,
936 		    (ccb->ccb_direction & MFI_DATA_IN) ?
937 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
938 
939 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
940 	}
941 
942 	ccb->ccb_done(sc, ccb);
943 }
944 
945 void
946 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
947 {
948 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
949 
950 #ifdef DIAGNOSTIC
951 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
952 		panic("mfi_exec called with cookie or done set");
953 #endif
954 
955 	ccb->ccb_cookie = &m;
956 	ccb->ccb_done = mfi_exec_done;
957 
958 	mfi_start(sc, ccb);
959 
960 	mtx_enter(&m);
961 	while (ccb->ccb_cookie != NULL)
962 		msleep(ccb, &m, PRIBIO, "mfiexec", 0);
963 	mtx_leave(&m);
964 }
965 
966 void
967 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
968 {
969 	struct mutex *m = ccb->ccb_cookie;
970 
971 	mtx_enter(m);
972 	ccb->ccb_cookie = NULL;
973 	wakeup_one(ccb);
974 	mtx_leave(m);
975 }
976 
977 int
978 mfi_intr(void *arg)
979 {
980 	struct mfi_softc	*sc = arg;
981 	struct mfi_prod_cons	*pcq = MFIMEM_KVA(sc->sc_pcq);
982 	struct mfi_ccb		*ccb;
983 	uint32_t		producer, consumer, ctx;
984 	int			claimed = 0;
985 
986 	if (!mfi_my_intr(sc))
987 		return (0);
988 
989 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
990 	    0, MFIMEM_LEN(sc->sc_pcq),
991 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
992 
993 	producer = letoh32(pcq->mpc_producer);
994 	consumer = letoh32(pcq->mpc_consumer);
995 
996 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
997 
998 	while (consumer != producer) {
999 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1000 		    DEVNAME(sc), producer, consumer);
1001 
1002 		ctx = pcq->mpc_reply_q[consumer];
1003 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1004 		if (ctx == MFI_INVALID_CTX)
1005 			printf("%s: invalid context, p: %d c: %d\n",
1006 			    DEVNAME(sc), producer, consumer);
1007 		else {
1008 			/* XXX remove from queue and call scsi_done */
1009 			ccb = &sc->sc_ccb[ctx];
1010 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1011 			    DEVNAME(sc), ctx);
1012 			mfi_done(sc, ccb);
1013 
1014 			claimed = 1;
1015 		}
1016 		consumer++;
1017 		if (consumer == (sc->sc_max_cmds + 1))
1018 			consumer = 0;
1019 	}
1020 
1021 	pcq->mpc_consumer = htole32(consumer);
1022 
1023 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1024 	    0, MFIMEM_LEN(sc->sc_pcq),
1025 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1026 
1027 	return (claimed);
1028 }
1029 
1030 int
1031 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1032     struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1033 {
1034 	struct scsi_link	*link = xs->sc_link;
1035 	struct mfi_io_frame	*io;
1036 
1037 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1038 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1039 
1040 	if (!xs->data)
1041 		return (1);
1042 
1043 	io = &ccb->ccb_frame->mfr_io;
1044 	if (xs->flags & SCSI_DATA_IN) {
1045 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1046 		ccb->ccb_direction = MFI_DATA_IN;
1047 	} else {
1048 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1049 		ccb->ccb_direction = MFI_DATA_OUT;
1050 	}
1051 	io->mif_header.mfh_target_id = link->target;
1052 	io->mif_header.mfh_timeout = 0;
1053 	io->mif_header.mfh_flags = 0;
1054 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1055 	io->mif_header.mfh_data_len = htole32(blockcnt);
1056 	io->mif_lba = htole64(blockno);
1057 	io->mif_sense_addr = htole64(ccb->ccb_psense);
1058 
1059 	ccb->ccb_done = mfi_scsi_xs_done;
1060 	ccb->ccb_cookie = xs;
1061 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1062 	ccb->ccb_sgl = &io->mif_sgl;
1063 	ccb->ccb_data = xs->data;
1064 	ccb->ccb_len = xs->datalen;
1065 
1066 	if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1067 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1068 		return (1);
1069 
1070 	return (0);
1071 }
1072 
1073 void
1074 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1075 {
1076 	struct scsi_xfer	*xs = ccb->ccb_cookie;
1077 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1078 
1079 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
1080 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1081 
1082 	switch (hdr->mfh_cmd_status) {
1083 	case MFI_STAT_OK:
1084 		xs->resid = 0;
1085 		break;
1086 
1087 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1088 		xs->error = XS_SENSE;
1089 		xs->resid = 0;
1090 		memset(&xs->sense, 0, sizeof(xs->sense));
1091 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1092 		break;
1093 
1094 	case MFI_STAT_DEVICE_NOT_FOUND:
1095 		xs->error = XS_SELTIMEOUT;
1096 		break;
1097 
1098 	default:
1099 		xs->error = XS_DRIVER_STUFFUP;
1100 		DPRINTF(MFI_D_CMD,
1101 		    "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1102 		    DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd->opcode);
1103 
1104 		if (hdr->mfh_scsi_status != 0) {
1105 			DNPRINTF(MFI_D_INTR,
1106 			    "%s: mfi_scsi_xs_done sense %#x %x %x\n",
1107 			    DEVNAME(sc), hdr->mfh_scsi_status,
1108 			    &xs->sense, ccb->ccb_sense);
1109 			memset(&xs->sense, 0, sizeof(xs->sense));
1110 			memcpy(&xs->sense, ccb->ccb_sense,
1111 			    sizeof(struct scsi_sense_data));
1112 			xs->error = XS_SENSE;
1113 		}
1114 		break;
1115 	}
1116 
1117 	KERNEL_LOCK();
1118 	scsi_done(xs);
1119 	KERNEL_UNLOCK();
1120 }
1121 
1122 int
1123 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1124 {
1125 	struct scsi_link	*link = xs->sc_link;
1126 	struct mfi_pass_frame	*pf;
1127 
1128 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1129 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1130 
1131 	pf = &ccb->ccb_frame->mfr_pass;
1132 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1133 	pf->mpf_header.mfh_target_id = link->target;
1134 	pf->mpf_header.mfh_lun_id = 0;
1135 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1136 	pf->mpf_header.mfh_timeout = 0;
1137 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1138 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1139 
1140 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1141 
1142 	memset(pf->mpf_cdb, 0, 16);
1143 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
1144 
1145 	ccb->ccb_done = mfi_scsi_xs_done;
1146 	ccb->ccb_cookie = xs;
1147 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1148 	ccb->ccb_sgl = &pf->mpf_sgl;
1149 
1150 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1151 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1152 		    MFI_DATA_IN : MFI_DATA_OUT;
1153 	else
1154 		ccb->ccb_direction = MFI_DATA_NONE;
1155 
1156 	if (xs->data) {
1157 		ccb->ccb_data = xs->data;
1158 		ccb->ccb_len = xs->datalen;
1159 
1160 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1161 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1162 			return (1);
1163 	}
1164 
1165 	return (0);
1166 }
1167 
1168 void
1169 mfi_scsi_cmd(struct scsi_xfer *xs)
1170 {
1171 	struct scsi_link	*link = xs->sc_link;
1172 	struct mfi_softc	*sc = link->adapter_softc;
1173 	struct mfi_ccb		*ccb = xs->io;
1174 	struct scsi_rw		*rw;
1175 	struct scsi_rw_big	*rwb;
1176 	struct scsi_rw_16	*rw16;
1177 	uint64_t		blockno;
1178 	uint32_t		blockcnt;
1179 	uint8_t			target = link->target;
1180 	uint8_t			mbox[MFI_MBOX_SIZE];
1181 
1182 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1183 	    DEVNAME(sc), xs->cmd->opcode);
1184 
1185 	KERNEL_UNLOCK();
1186 
1187 	if (!sc->sc_ld[target].ld_present) {
1188 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1189 		    DEVNAME(sc), target);
1190 		goto stuffup;
1191 	}
1192 
1193 	mfi_scrub_ccb(ccb);
1194 
1195 	xs->error = XS_NOERROR;
1196 
1197 	switch (xs->cmd->opcode) {
1198 	/* IO path */
1199 	case READ_BIG:
1200 	case WRITE_BIG:
1201 		rwb = (struct scsi_rw_big *)xs->cmd;
1202 		blockno = (uint64_t)_4btol(rwb->addr);
1203 		blockcnt = _2btol(rwb->length);
1204 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1205 			goto stuffup;
1206 		break;
1207 
1208 	case READ_COMMAND:
1209 	case WRITE_COMMAND:
1210 		rw = (struct scsi_rw *)xs->cmd;
1211 		blockno =
1212 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1213 		blockcnt = rw->length ? rw->length : 0x100;
1214 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1215 			goto stuffup;
1216 		break;
1217 
1218 	case READ_16:
1219 	case WRITE_16:
1220 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1221 		blockno = _8btol(rw16->addr);
1222 		blockcnt = _4btol(rw16->length);
1223 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1224 			goto stuffup;
1225 		break;
1226 
1227 	case SYNCHRONIZE_CACHE:
1228 		mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1229 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1230 		    MFI_DATA_NONE, 0, NULL, mbox))
1231 			goto stuffup;
1232 
1233 		goto complete;
1234 		/* NOTREACHED */
1235 
1236 	default:
1237 		if (mfi_scsi_ld(sc, ccb, xs))
1238 			goto stuffup;
1239 		break;
1240 	}
1241 
1242 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1243 
1244 	if (xs->flags & SCSI_POLL)
1245 		mfi_poll(sc, ccb);
1246 	else
1247 		mfi_start(sc, ccb);
1248 
1249 	KERNEL_LOCK();
1250 	return;
1251 
1252 stuffup:
1253 	xs->error = XS_DRIVER_STUFFUP;
1254 complete:
1255 	KERNEL_LOCK();
1256 	scsi_done(xs);
1257 }
1258 
1259 u_int
1260 mfi_default_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
1261 {
1262 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1263 	union mfi_sgl		*sgl = ccb->ccb_sgl;
1264 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
1265 	int			 i;
1266 
1267 	hdr->mfh_flags |= sc->sc_sgl_flags;
1268 
1269 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1270 		if (sc->sc_64bit_dma) {
1271 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1272 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1273 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1274 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1275 		} else {
1276 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1277 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1278 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1279 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1280 		}
1281 	}
1282 
1283 	return (ccb->ccb_dmamap->dm_nsegs *
1284 	    (sc->sc_64bit_dma ? sizeof(sgl->sg64) : sizeof(sgl->sg32)));
1285 }
1286 
1287 int
1288 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1289 {
1290 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1291 	int			error;
1292 
1293 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1294 	    ccb->ccb_data);
1295 
1296 	if (!ccb->ccb_data) {
1297 		hdr->mfh_sg_count = 0;
1298 		return (1);
1299 	}
1300 
1301 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1302 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1303 	if (error) {
1304 		if (error == EFBIG)
1305 			printf("more than %d dma segs\n",
1306 			    sc->sc_max_sgl);
1307 		else
1308 			printf("error %d loading dma map\n", error);
1309 		return (1);
1310 	}
1311 
1312 	ccb->ccb_frame_size += mfi_sgd_load(sc, ccb);
1313 
1314 	if (ccb->ccb_direction == MFI_DATA_IN) {
1315 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1316 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1317 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1318 	} else {
1319 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1320 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1321 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1322 	}
1323 
1324 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1325 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1326 
1327 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1328 	    "  dm_nsegs: %d  extra_frames: %d\n",
1329 	    DEVNAME(sc),
1330 	    hdr->mfh_sg_count,
1331 	    ccb->ccb_frame_size,
1332 	    sc->sc_frames_size,
1333 	    ccb->ccb_dmamap->dm_nsegs,
1334 	    ccb->ccb_extra_frames);
1335 
1336 	return (0);
1337 }
1338 
1339 int
1340 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1341     void *buf, uint8_t *mbox)
1342 {
1343 	struct mfi_ccb *ccb;
1344 	int rv;
1345 
1346 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1347 	mfi_scrub_ccb(ccb);
1348 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1349 	scsi_io_put(&sc->sc_iopool, ccb);
1350 
1351 	return (rv);
1352 }
1353 
1354 int
1355 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1356     uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1357 {
1358 	struct mfi_dcmd_frame *dcmd;
1359 	uint8_t *dma_buf = NULL;
1360 	int rv = EINVAL;
1361 
1362 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1363 
1364 	dma_buf = dma_alloc(len, cold ? PR_NOWAIT : PR_WAITOK);
1365 	if (dma_buf == NULL)
1366 		goto done;
1367 
1368 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1369 	memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1370 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1371 	dcmd->mdf_header.mfh_timeout = 0;
1372 
1373 	dcmd->mdf_opcode = opc;
1374 	dcmd->mdf_header.mfh_data_len = 0;
1375 	ccb->ccb_direction = dir;
1376 
1377 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1378 
1379 	/* handle special opcodes */
1380 	if (mbox)
1381 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1382 
1383 	if (dir != MFI_DATA_NONE) {
1384 		if (dir == MFI_DATA_OUT)
1385 			memcpy(dma_buf, buf, len);
1386 		dcmd->mdf_header.mfh_data_len = len;
1387 		ccb->ccb_data = dma_buf;
1388 		ccb->ccb_len = len;
1389 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1390 
1391 		if (mfi_create_sgl(sc, ccb, cold ? BUS_DMA_NOWAIT :
1392 		    BUS_DMA_WAITOK)) {
1393 			rv = EINVAL;
1394 			goto done;
1395 		}
1396 	}
1397 
1398 	if (cold) {
1399 		ccb->ccb_done = mfi_empty_done;
1400 		mfi_poll(sc, ccb);
1401 	} else
1402 		mfi_exec(sc, ccb);
1403 
1404 	if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1405 		if (dcmd->mdf_header.mfh_cmd_status == MFI_STAT_WRONG_STATE)
1406 			rv = ENXIO;
1407 		else
1408 			rv = EIO;
1409 		goto done;
1410 	}
1411 
1412 	if (dir == MFI_DATA_IN)
1413 		memcpy(buf, dma_buf, len);
1414 
1415 	rv = 0;
1416 done:
1417 	if (dma_buf)
1418 		dma_free(dma_buf, len);
1419 
1420 	return (rv);
1421 }
1422 
1423 int
1424 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1425 {
1426 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1427 
1428 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1429 
1430 	switch (cmd) {
1431 	case DIOCGCACHE:
1432 	case DIOCSCACHE:
1433 		return (mfi_ioctl_cache(link, cmd, (struct dk_cache *)addr));
1434 		break;
1435 
1436 	default:
1437 		if (sc->sc_ioctl)
1438 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
1439 		break;
1440 	}
1441 
1442 	return (ENOTTY);
1443 }
1444 
1445 int
1446 mfi_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
1447 {
1448 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1449 	int			 rv, wrenable, rdenable;
1450 	struct mfi_ld_prop	 ldp;
1451 	uint8_t			 mbox[MFI_MBOX_SIZE];
1452 
1453 	if (mfi_get_info(sc)) {
1454 		rv = EIO;
1455 		goto done;
1456 	}
1457 
1458 	if (!sc->sc_ld[link->target].ld_present) {
1459 		rv = EIO;
1460 		goto done;
1461 	}
1462 
1463 	mbox[0] = link->target;
1464 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, MFI_DATA_IN,
1465 	    sizeof(ldp), &ldp, mbox)) != 0)
1466 		goto done;
1467 
1468 	if (sc->sc_info.mci_memory_size > 0) {
1469 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
1470 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
1471 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
1472 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
1473 	} else {
1474 		wrenable = ISSET(ldp.mlp_diskcache_policy,
1475 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
1476 		rdenable = 0;
1477 	}
1478 
1479 	if (cmd == DIOCGCACHE) {
1480 		dc->wrcache = wrenable;
1481 		dc->rdcache = rdenable;
1482 		goto done;
1483 	} /* else DIOCSCACHE */
1484 
1485 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
1486 	    ((dc->rdcache) ? 1 : 0) == rdenable)
1487 		goto done;
1488 
1489 	mbox[0] = ldp.mlp_ld.mld_target;
1490 	mbox[1] = ldp.mlp_ld.mld_res;
1491 	*(uint16_t *)&mbox[2] = ldp.mlp_ld.mld_seq;
1492 
1493 	if (sc->sc_info.mci_memory_size > 0) {
1494 		if (dc->rdcache)
1495 			SET(ldp.mlp_cur_cache_policy,
1496 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1497 		else
1498 			CLR(ldp.mlp_cur_cache_policy,
1499 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1500 		if (dc->wrcache)
1501 			SET(ldp.mlp_cur_cache_policy,
1502 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1503 		else
1504 			CLR(ldp.mlp_cur_cache_policy,
1505 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1506 	} else {
1507 		if (dc->rdcache) {
1508 			rv = EOPNOTSUPP;
1509 			goto done;
1510 		}
1511 		if (dc->wrcache)
1512 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
1513 		else
1514 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
1515 	}
1516 
1517 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, MFI_DATA_OUT,
1518 	    sizeof(ldp), &ldp, mbox)) != 0)
1519 		goto done;
1520 done:
1521 	return (rv);
1522 }
1523 
1524 #if NBIO > 0
1525 int
1526 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1527 {
1528 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1529 	int error = 0;
1530 
1531 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1532 
1533 	rw_enter_write(&sc->sc_lock);
1534 
1535 	switch (cmd) {
1536 	case BIOCINQ:
1537 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1538 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1539 		break;
1540 
1541 	case BIOCVOL:
1542 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1543 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1544 		break;
1545 
1546 	case BIOCDISK:
1547 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1548 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1549 		break;
1550 
1551 	case BIOCALARM:
1552 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1553 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1554 		break;
1555 
1556 	case BIOCBLINK:
1557 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1558 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1559 		break;
1560 
1561 	case BIOCSETSTATE:
1562 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1563 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1564 		break;
1565 
1566 	case BIOCPATROL:
1567 		DNPRINTF(MFI_D_IOCTL, "patrol\n");
1568 		error = mfi_ioctl_patrol(sc, (struct bioc_patrol *)addr);
1569 		break;
1570 
1571 	default:
1572 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1573 		error = EINVAL;
1574 	}
1575 
1576 	rw_exit_write(&sc->sc_lock);
1577 
1578 	return (error);
1579 }
1580 
1581 int
1582 mfi_bio_getitall(struct mfi_softc *sc)
1583 {
1584 	int			i, d, size, rv = EINVAL;
1585 	uint8_t			mbox[MFI_MBOX_SIZE];
1586 	struct mfi_conf		*cfg = NULL;
1587 	struct mfi_ld_details	*ld_det = NULL;
1588 
1589 	/* get info */
1590 	if (mfi_get_info(sc)) {
1591 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1592 		    DEVNAME(sc));
1593 		goto done;
1594 	}
1595 
1596 	/* send single element command to retrieve size for full structure */
1597 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1598 	if (cfg == NULL)
1599 		goto done;
1600 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1601 	    NULL)) {
1602 		free(cfg, M_DEVBUF, sizeof *cfg);
1603 		goto done;
1604 	}
1605 
1606 	size = cfg->mfc_size;
1607 	free(cfg, M_DEVBUF, sizeof *cfg);
1608 
1609 	/* memory for read config */
1610 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1611 	if (cfg == NULL)
1612 		goto done;
1613 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1614 		free(cfg, M_DEVBUF, size);
1615 		goto done;
1616 	}
1617 
1618 	/* replace current pointer with new one */
1619 	if (sc->sc_cfg)
1620 		free(sc->sc_cfg, M_DEVBUF, 0);
1621 	sc->sc_cfg = cfg;
1622 
1623 	/* get all ld info */
1624 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1625 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1626 		goto done;
1627 
1628 	/* get memory for all ld structures */
1629 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1630 	if (sc->sc_ld_sz != size) {
1631 		if (sc->sc_ld_details)
1632 			free(sc->sc_ld_details, M_DEVBUF, 0);
1633 
1634 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1635 		if (ld_det == NULL)
1636 			goto done;
1637 		sc->sc_ld_sz = size;
1638 		sc->sc_ld_details = ld_det;
1639 	}
1640 
1641 	/* find used physical disks */
1642 	size = sizeof(struct mfi_ld_details);
1643 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1644 		mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1645 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1646 		    &sc->sc_ld_details[i], mbox))
1647 			goto done;
1648 
1649 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1650 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1651 	}
1652 	sc->sc_no_pd = d;
1653 
1654 	rv = 0;
1655 done:
1656 	return (rv);
1657 }
1658 
1659 int
1660 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1661 {
1662 	int			rv = EINVAL;
1663 	struct mfi_conf		*cfg = NULL;
1664 
1665 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1666 
1667 	if (mfi_bio_getitall(sc)) {
1668 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1669 		    DEVNAME(sc));
1670 		goto done;
1671 	}
1672 
1673 	/* count unused disks as volumes */
1674 	if (sc->sc_cfg == NULL)
1675 		goto done;
1676 	cfg = sc->sc_cfg;
1677 
1678 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1679 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1680 #if notyet
1681 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1682 	    (bi->bi_nodisk - sc->sc_no_pd);
1683 #endif
1684 	/* tell bio who we are */
1685 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1686 
1687 	rv = 0;
1688 done:
1689 	return (rv);
1690 }
1691 
1692 int
1693 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1694 {
1695 	int			i, per, rv = EINVAL;
1696 	struct scsi_link	*link;
1697 	struct device		*dev;
1698 
1699 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1700 	    DEVNAME(sc), bv->bv_volid);
1701 
1702 	/* we really could skip and expect that inq took care of it */
1703 	if (mfi_bio_getitall(sc)) {
1704 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1705 		    DEVNAME(sc));
1706 		goto done;
1707 	}
1708 
1709 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1710 		/* go do hotspares & unused disks */
1711 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1712 		goto done;
1713 	}
1714 
1715 	i = bv->bv_volid;
1716 	link = scsi_get_link(sc->sc_scsibus, i, 0);
1717 	if (link != NULL && link->device_softc != NULL) {
1718 		dev = link->device_softc;
1719 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1720 	}
1721 
1722 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1723 	case MFI_LD_OFFLINE:
1724 		bv->bv_status = BIOC_SVOFFLINE;
1725 		break;
1726 
1727 	case MFI_LD_PART_DEGRADED:
1728 	case MFI_LD_DEGRADED:
1729 		bv->bv_status = BIOC_SVDEGRADED;
1730 		break;
1731 
1732 	case MFI_LD_ONLINE:
1733 		bv->bv_status = BIOC_SVONLINE;
1734 		break;
1735 
1736 	default:
1737 		bv->bv_status = BIOC_SVINVALID;
1738 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1739 		    DEVNAME(sc),
1740 		    sc->sc_ld_list.mll_list[i].mll_state);
1741 	}
1742 
1743 	/* additional status can modify MFI status */
1744 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1745 	case MFI_LD_PROG_CC:
1746 	case MFI_LD_PROG_BGI:
1747 		bv->bv_status = BIOC_SVSCRUB;
1748 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1749 		bv->bv_percent = (per * 100) / 0xffff;
1750 		bv->bv_seconds =
1751 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1752 		break;
1753 
1754 	case MFI_LD_PROG_FGI:
1755 	case MFI_LD_PROG_RECONSTRUCT:
1756 		/* nothing yet */
1757 		break;
1758 	}
1759 
1760 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1761 		bv->bv_cache = BIOC_CVWRITEBACK;
1762 	else
1763 		bv->bv_cache = BIOC_CVWRITETHROUGH;
1764 
1765 	/*
1766 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1767 	 * a subset that is valid for the MFI controller.
1768 	 */
1769 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1770 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1771 	    MFI_DDF_SRL_SPANNED)
1772 		bv->bv_level *= 10;
1773 
1774 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1775 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1776 
1777 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1778 
1779 	rv = 0;
1780 done:
1781 	return (rv);
1782 }
1783 
1784 int
1785 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1786 {
1787 	struct mfi_conf		*cfg;
1788 	struct mfi_array	*ar;
1789 	struct mfi_ld_cfg	*ld;
1790 	struct mfi_pd_details	*pd;
1791 	struct mfi_pd_progress	*mfp;
1792 	struct mfi_progress	*mp;
1793 	struct scsi_inquiry_data *inqbuf;
1794 	char			vend[8+16+4+1], *vendp;
1795 	int			rv = EINVAL;
1796 	int			arr, vol, disk, span;
1797 	uint8_t			mbox[MFI_MBOX_SIZE];
1798 
1799 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1800 	    DEVNAME(sc), bd->bd_diskid);
1801 
1802 	/* we really could skip and expect that inq took care of it */
1803 	if (mfi_bio_getitall(sc)) {
1804 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1805 		    DEVNAME(sc));
1806 		return (rv);
1807 	}
1808 	cfg = sc->sc_cfg;
1809 
1810 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1811 
1812 	ar = cfg->mfc_array;
1813 	vol = bd->bd_volid;
1814 	if (vol >= cfg->mfc_no_ld) {
1815 		/* do hotspares */
1816 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1817 		goto freeme;
1818 	}
1819 
1820 	/* calculate offset to ld structure */
1821 	ld = (struct mfi_ld_cfg *)(
1822 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1823 	    cfg->mfc_array_size * cfg->mfc_no_array);
1824 
1825 	/* use span 0 only when raid group is not spanned */
1826 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1827 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1828 	else
1829 		span = 0;
1830 	arr = ld[vol].mlc_span[span].mls_index;
1831 
1832 	/* offset disk into pd list */
1833 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1834 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1835 
1836 	/* get status */
1837 	switch (ar[arr].pd[disk].mar_pd_state){
1838 	case MFI_PD_UNCONFIG_GOOD:
1839 	case MFI_PD_FAILED:
1840 		bd->bd_status = BIOC_SDFAILED;
1841 		break;
1842 
1843 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1844 		bd->bd_status = BIOC_SDHOTSPARE;
1845 		break;
1846 
1847 	case MFI_PD_OFFLINE:
1848 		bd->bd_status = BIOC_SDOFFLINE;
1849 		break;
1850 
1851 	case MFI_PD_REBUILD:
1852 		bd->bd_status = BIOC_SDREBUILD;
1853 		break;
1854 
1855 	case MFI_PD_ONLINE:
1856 		bd->bd_status = BIOC_SDONLINE;
1857 		break;
1858 
1859 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1860 	default:
1861 		bd->bd_status = BIOC_SDINVALID;
1862 		break;
1863 	}
1864 
1865 	/* get the remaining fields */
1866 	*((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1867 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1868 	    sizeof *pd, pd, mbox)) {
1869 		/* disk is missing but succeed command */
1870 		rv = 0;
1871 		goto freeme;
1872 	}
1873 
1874 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1875 
1876 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1877 	bd->bd_channel = pd->mpd_enc_idx;
1878 
1879 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1880 	vendp = inqbuf->vendor;
1881 	memcpy(vend, vendp, sizeof vend - 1);
1882 	vend[sizeof vend - 1] = '\0';
1883 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1884 
1885 	/* XXX find a way to retrieve serial nr from drive */
1886 	/* XXX find a way to get bd_procdev */
1887 
1888 	mfp = &pd->mpd_progress;
1889 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
1890 		mp = &mfp->mfp_patrol_read;
1891 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
1892 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
1893 	}
1894 
1895 	rv = 0;
1896 freeme:
1897 	free(pd, M_DEVBUF, sizeof *pd);
1898 
1899 	return (rv);
1900 }
1901 
1902 int
1903 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1904 {
1905 	uint32_t		opc, dir = MFI_DATA_NONE;
1906 	int			rv = 0;
1907 	int8_t			ret;
1908 
1909 	switch(ba->ba_opcode) {
1910 	case BIOC_SADISABLE:
1911 		opc = MR_DCMD_SPEAKER_DISABLE;
1912 		break;
1913 
1914 	case BIOC_SAENABLE:
1915 		opc = MR_DCMD_SPEAKER_ENABLE;
1916 		break;
1917 
1918 	case BIOC_SASILENCE:
1919 		opc = MR_DCMD_SPEAKER_SILENCE;
1920 		break;
1921 
1922 	case BIOC_GASTATUS:
1923 		opc = MR_DCMD_SPEAKER_GET;
1924 		dir = MFI_DATA_IN;
1925 		break;
1926 
1927 	case BIOC_SATEST:
1928 		opc = MR_DCMD_SPEAKER_TEST;
1929 		break;
1930 
1931 	default:
1932 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1933 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1934 		return (EINVAL);
1935 	}
1936 
1937 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1938 		rv = EINVAL;
1939 	else
1940 		if (ba->ba_opcode == BIOC_GASTATUS)
1941 			ba->ba_status = ret;
1942 		else
1943 			ba->ba_status = 0;
1944 
1945 	return (rv);
1946 }
1947 
1948 int
1949 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1950 {
1951 	int			i, found, rv = EINVAL;
1952 	uint8_t			mbox[MFI_MBOX_SIZE];
1953 	uint32_t		cmd;
1954 	struct mfi_pd_list	*pd;
1955 
1956 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1957 	    bb->bb_status);
1958 
1959 	/* channel 0 means not in an enclosure so can't be blinked */
1960 	if (bb->bb_channel == 0)
1961 		return (EINVAL);
1962 
1963 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1964 
1965 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1966 	    sizeof(*pd), pd, NULL))
1967 		goto done;
1968 
1969 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1970 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1971 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1972 			found = 1;
1973 			break;
1974 		}
1975 
1976 	if (!found)
1977 		goto done;
1978 
1979 	memset(mbox, 0, sizeof mbox);
1980 
1981 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1982 
1983 	switch (bb->bb_status) {
1984 	case BIOC_SBUNBLINK:
1985 		cmd = MR_DCMD_PD_UNBLINK;
1986 		break;
1987 
1988 	case BIOC_SBBLINK:
1989 		cmd = MR_DCMD_PD_BLINK;
1990 		break;
1991 
1992 	case BIOC_SBALARM:
1993 	default:
1994 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1995 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1996 		goto done;
1997 	}
1998 
1999 
2000 	if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
2001 		goto done;
2002 
2003 	rv = 0;
2004 done:
2005 	free(pd, M_DEVBUF, sizeof *pd);
2006 	return (rv);
2007 }
2008 
2009 int
2010 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2011 {
2012 	struct mfi_pd_list	*pd;
2013 	struct mfi_pd_details	*info;
2014 	int			i, found, rv = EINVAL;
2015 	uint8_t			mbox[MFI_MBOX_SIZE];
2016 
2017 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2018 	    bs->bs_status);
2019 
2020 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
2021 	info = malloc(sizeof *info, M_DEVBUF, M_WAITOK);
2022 
2023 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2024 	    sizeof(*pd), pd, NULL))
2025 		goto done;
2026 
2027 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2028 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2029 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2030 			found = 1;
2031 			break;
2032 		}
2033 
2034 	if (!found)
2035 		goto done;
2036 
2037 	memset(mbox, 0, sizeof mbox);
2038 
2039 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2040 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2041 	    sizeof *info, info, mbox))
2042 		goto done;
2043 
2044 	*((uint16_t *)&mbox[0]) = pd->mpl_address[i].mpa_pd_id;
2045 	*((uint16_t *)&mbox[2]) = info->mpd_pd.mfp_seq;
2046 
2047 	switch (bs->bs_status) {
2048 	case BIOC_SSONLINE:
2049 		mbox[4] = MFI_PD_ONLINE;
2050 		break;
2051 
2052 	case BIOC_SSOFFLINE:
2053 		mbox[4] = MFI_PD_OFFLINE;
2054 		break;
2055 
2056 	case BIOC_SSHOTSPARE:
2057 		mbox[4] = MFI_PD_HOTSPARE;
2058 		break;
2059 
2060 	case BIOC_SSREBUILD:
2061 		mbox[4] = MFI_PD_REBUILD;
2062 		break;
2063 
2064 	default:
2065 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2066 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
2067 		goto done;
2068 	}
2069 
2070 
2071 	if ((rv = mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL,
2072 	    mbox)))
2073 		goto done;
2074 
2075 	rv = 0;
2076 done:
2077 	free(pd, M_DEVBUF, sizeof *pd);
2078 	free(info, M_DEVBUF, sizeof *info);
2079 	return (rv);
2080 }
2081 
2082 int
2083 mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *bp)
2084 {
2085 	uint32_t		opc, dir = MFI_DATA_NONE;
2086 	int			rv = 0;
2087 	struct mfi_pr_properties prop;
2088 	struct mfi_pr_status	status;
2089 	uint32_t		time, exec_freq;
2090 
2091 	switch (bp->bp_opcode) {
2092 	case BIOC_SPSTOP:
2093 	case BIOC_SPSTART:
2094 		if (bp->bp_opcode == BIOC_SPSTART)
2095 			opc = MR_DCMD_PR_START;
2096 		else
2097 			opc = MR_DCMD_PR_STOP;
2098 		dir = MFI_DATA_IN;
2099 		if (mfi_mgmt(sc, opc, dir, 0, NULL, NULL))
2100 			return (EINVAL);
2101 		break;
2102 
2103 	case BIOC_SPMANUAL:
2104 	case BIOC_SPDISABLE:
2105 	case BIOC_SPAUTO:
2106 		/* Get device's time. */
2107 		opc = MR_DCMD_TIME_SECS_GET;
2108 		dir = MFI_DATA_IN;
2109 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2110 			return (EINVAL);
2111 
2112 		opc = MR_DCMD_PR_GET_PROPERTIES;
2113 		dir = MFI_DATA_IN;
2114 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2115 			return (EINVAL);
2116 
2117 		switch (bp->bp_opcode) {
2118 		case BIOC_SPMANUAL:
2119 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
2120 			break;
2121 		case BIOC_SPDISABLE:
2122 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
2123 			break;
2124 		case BIOC_SPAUTO:
2125 			if (bp->bp_autoival != 0) {
2126 				if (bp->bp_autoival == -1)
2127 					/* continuously */
2128 					exec_freq = 0xffffffffU;
2129 				else if (bp->bp_autoival > 0)
2130 					exec_freq = bp->bp_autoival;
2131 				else
2132 					return (EINVAL);
2133 				prop.exec_freq = exec_freq;
2134 			}
2135 			if (bp->bp_autonext != 0) {
2136 				if (bp->bp_autonext < 0)
2137 					return (EINVAL);
2138 				else
2139 					prop.next_exec = time + bp->bp_autonext;
2140 			}
2141 			prop.op_mode = MFI_PR_OPMODE_AUTO;
2142 			break;
2143 		}
2144 
2145 		opc = MR_DCMD_PR_SET_PROPERTIES;
2146 		dir = MFI_DATA_OUT;
2147 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2148 			return (EINVAL);
2149 
2150 		break;
2151 
2152 	case BIOC_GPSTATUS:
2153 		opc = MR_DCMD_PR_GET_PROPERTIES;
2154 		dir = MFI_DATA_IN;
2155 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2156 			return (EINVAL);
2157 
2158 		opc = MR_DCMD_PR_GET_STATUS;
2159 		dir = MFI_DATA_IN;
2160 		if (mfi_mgmt(sc, opc, dir, sizeof(status), &status, NULL))
2161 			return (EINVAL);
2162 
2163 		/* Get device's time. */
2164 		opc = MR_DCMD_TIME_SECS_GET;
2165 		dir = MFI_DATA_IN;
2166 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2167 			return (EINVAL);
2168 
2169 		switch (prop.op_mode) {
2170 		case MFI_PR_OPMODE_AUTO:
2171 			bp->bp_mode = BIOC_SPMAUTO;
2172 			bp->bp_autoival = prop.exec_freq;
2173 			bp->bp_autonext = prop.next_exec;
2174 			bp->bp_autonow = time;
2175 			break;
2176 		case MFI_PR_OPMODE_MANUAL:
2177 			bp->bp_mode = BIOC_SPMMANUAL;
2178 			break;
2179 		case MFI_PR_OPMODE_DISABLED:
2180 			bp->bp_mode = BIOC_SPMDISABLED;
2181 			break;
2182 		default:
2183 			printf("%s: unknown patrol mode %d\n",
2184 			    DEVNAME(sc), prop.op_mode);
2185 			break;
2186 		}
2187 
2188 		switch (status.state) {
2189 		case MFI_PR_STATE_STOPPED:
2190 			bp->bp_status = BIOC_SPSSTOPPED;
2191 			break;
2192 		case MFI_PR_STATE_READY:
2193 			bp->bp_status = BIOC_SPSREADY;
2194 			break;
2195 		case MFI_PR_STATE_ACTIVE:
2196 			bp->bp_status = BIOC_SPSACTIVE;
2197 			break;
2198 		case MFI_PR_STATE_ABORTED:
2199 			bp->bp_status = BIOC_SPSABORTED;
2200 			break;
2201 		default:
2202 			printf("%s: unknown patrol state %d\n",
2203 			    DEVNAME(sc), status.state);
2204 			break;
2205 		}
2206 
2207 		break;
2208 
2209 	default:
2210 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_patrol biocpatrol invalid "
2211 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
2212 		return (EINVAL);
2213 	}
2214 
2215 	return (rv);
2216 }
2217 
2218 int
2219 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2220 {
2221 	struct mfi_conf		*cfg;
2222 	struct mfi_hotspare	*hs;
2223 	struct mfi_pd_details	*pd;
2224 	struct bioc_disk	*sdhs;
2225 	struct bioc_vol		*vdhs;
2226 	struct scsi_inquiry_data *inqbuf;
2227 	char			vend[8+16+4+1], *vendp;
2228 	int			i, rv = EINVAL;
2229 	uint32_t		size;
2230 	uint8_t			mbox[MFI_MBOX_SIZE];
2231 
2232 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2233 
2234 	if (!bio_hs)
2235 		return (EINVAL);
2236 
2237 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2238 
2239 	/* send single element command to retrieve size for full structure */
2240 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2241 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
2242 		goto freeme;
2243 
2244 	size = cfg->mfc_size;
2245 	free(cfg, M_DEVBUF, sizeof *cfg);
2246 
2247 	/* memory for read config */
2248 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2249 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
2250 		goto freeme;
2251 
2252 	/* calculate offset to hs structure */
2253 	hs = (struct mfi_hotspare *)(
2254 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2255 	    cfg->mfc_array_size * cfg->mfc_no_array +
2256 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
2257 
2258 	if (volid < cfg->mfc_no_ld)
2259 		goto freeme; /* not a hotspare */
2260 
2261 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2262 		goto freeme; /* not a hotspare */
2263 
2264 	/* offset into hotspare structure */
2265 	i = volid - cfg->mfc_no_ld;
2266 
2267 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2268 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2269 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2270 
2271 	/* get pd fields */
2272 	memset(mbox, 0, sizeof mbox);
2273 	*((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2274 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2275 	    sizeof *pd, pd, mbox)) {
2276 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2277 		    DEVNAME(sc));
2278 		goto freeme;
2279 	}
2280 
2281 	switch (type) {
2282 	case MFI_MGMT_VD:
2283 		vdhs = bio_hs;
2284 		vdhs->bv_status = BIOC_SVONLINE;
2285 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2286 		vdhs->bv_level = -1; /* hotspare */
2287 		vdhs->bv_nodisk = 1;
2288 		break;
2289 
2290 	case MFI_MGMT_SD:
2291 		sdhs = bio_hs;
2292 		sdhs->bd_status = BIOC_SDHOTSPARE;
2293 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2294 		sdhs->bd_channel = pd->mpd_enc_idx;
2295 		sdhs->bd_target = pd->mpd_enc_slot;
2296 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2297 		vendp = inqbuf->vendor;
2298 		memcpy(vend, vendp, sizeof vend - 1);
2299 		vend[sizeof vend - 1] = '\0';
2300 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2301 		break;
2302 
2303 	default:
2304 		goto freeme;
2305 	}
2306 
2307 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2308 	rv = 0;
2309 freeme:
2310 	free(pd, M_DEVBUF, sizeof *pd);
2311 	free(cfg, M_DEVBUF, 0);
2312 
2313 	return (rv);
2314 }
2315 
2316 #ifndef SMALL_KERNEL
2317 
2318 static const char *mfi_bbu_indicators[] = {
2319 	"pack missing",
2320 	"voltage low",
2321 	"temp high",
2322 	"charge active",
2323 	"discharge active",
2324 	"learn cycle req'd",
2325 	"learn cycle active",
2326 	"learn cycle failed",
2327 	"learn cycle timeout",
2328 	"I2C errors",
2329 	"replace pack",
2330 	"low capacity",
2331 	"periodic learn req'd"
2332 };
2333 
2334 #define MFI_BBU_SENSORS 4
2335 
2336 int
2337 mfi_bbu(struct mfi_softc *sc)
2338 {
2339 	struct mfi_bbu_status bbu;
2340 	u_int32_t status;
2341 	u_int32_t mask;
2342 	u_int32_t soh_bad;
2343 	int i;
2344 
2345 	if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2346 	    sizeof(bbu), &bbu, NULL) != 0) {
2347 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
2348 			sc->sc_bbu[i].value = 0;
2349 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2350 		}
2351 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2352 			sc->sc_bbu_status[i].value = 0;
2353 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2354 		}
2355 		return (-1);
2356 	}
2357 
2358 	switch (bbu.battery_type) {
2359 	case MFI_BBU_TYPE_IBBU:
2360 		mask = MFI_BBU_STATE_BAD_IBBU;
2361 		soh_bad = 0;
2362 		break;
2363 	case MFI_BBU_TYPE_BBU:
2364 		mask = MFI_BBU_STATE_BAD_BBU;
2365 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2366 		break;
2367 
2368 	case MFI_BBU_TYPE_NONE:
2369 	default:
2370 		sc->sc_bbu[0].value = 0;
2371 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
2372 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2373 			sc->sc_bbu[i].value = 0;
2374 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2375 		}
2376 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2377 			sc->sc_bbu_status[i].value = 0;
2378 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2379 		}
2380 		return (0);
2381 	}
2382 
2383 	status = letoh32(bbu.fw_status);
2384 
2385 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2386 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2387 	    SENSOR_S_OK;
2388 
2389 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2390 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2391 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2392 	for (i = 1; i < MFI_BBU_SENSORS; i++)
2393 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2394 
2395 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2396 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2397 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2398 	}
2399 
2400 	return (0);
2401 }
2402 
2403 int
2404 mfi_create_sensors(struct mfi_softc *sc)
2405 {
2406 	struct device		*dev;
2407 	struct scsi_link	*link;
2408 	int			i;
2409 
2410 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2411 	    sizeof(sc->sc_sensordev.xname));
2412 
2413 	if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2414 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2415 		    M_DEVBUF, M_WAITOK | M_ZERO);
2416 
2417 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
2418 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2419 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2420 		    sizeof(sc->sc_bbu[0].desc));
2421 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2422 
2423 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2424 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2425 		sc->sc_bbu[2].type = SENSOR_AMPS;
2426 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2427 		sc->sc_bbu[3].type = SENSOR_TEMP;
2428 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2429 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2430 			strlcpy(sc->sc_bbu[i].desc, "bbu",
2431 			    sizeof(sc->sc_bbu[i].desc));
2432 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2433 		}
2434 
2435 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2436 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2437 
2438 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2439 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2440 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2441 			strlcpy(sc->sc_bbu_status[i].desc,
2442 			    mfi_bbu_indicators[i],
2443 			    sizeof(sc->sc_bbu_status[i].desc));
2444 
2445 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2446 		}
2447 	}
2448 
2449 	sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2450 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2451 	if (sc->sc_sensors == NULL)
2452 		return (1);
2453 
2454 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2455 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2456 		if (link == NULL)
2457 			goto bad;
2458 
2459 		dev = link->device_softc;
2460 
2461 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2462 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2463 
2464 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2465 		    sizeof(sc->sc_sensors[i].desc));
2466 
2467 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2468 	}
2469 
2470 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2471 		goto bad;
2472 
2473 	sensordev_install(&sc->sc_sensordev);
2474 
2475 	return (0);
2476 
2477 bad:
2478 	free(sc->sc_sensors, M_DEVBUF,
2479 	    sc->sc_ld_cnt * sizeof(struct ksensor));
2480 
2481 	return (1);
2482 }
2483 
2484 void
2485 mfi_refresh_sensors(void *arg)
2486 {
2487 	struct mfi_softc	*sc = arg;
2488 	int			i, rv;
2489 	struct bioc_vol		bv;
2490 
2491 	if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2492 		return;
2493 
2494 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2495 		bzero(&bv, sizeof(bv));
2496 		bv.bv_volid = i;
2497 
2498 		rw_enter_write(&sc->sc_lock);
2499 		rv = mfi_ioctl_vol(sc, &bv);
2500 		rw_exit_write(&sc->sc_lock);
2501 
2502 		if (rv != 0)
2503 			return;
2504 
2505 		switch(bv.bv_status) {
2506 		case BIOC_SVOFFLINE:
2507 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2508 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2509 			break;
2510 
2511 		case BIOC_SVDEGRADED:
2512 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2513 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2514 			break;
2515 
2516 		case BIOC_SVSCRUB:
2517 		case BIOC_SVONLINE:
2518 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2519 			sc->sc_sensors[i].status = SENSOR_S_OK;
2520 			break;
2521 
2522 		case BIOC_SVINVALID:
2523 			/* FALLTRHOUGH */
2524 		default:
2525 			sc->sc_sensors[i].value = 0; /* unknown */
2526 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2527 			break;
2528 		}
2529 	}
2530 }
2531 #endif /* SMALL_KERNEL */
2532 #endif /* NBIO > 0 */
2533 
2534 void
2535 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2536 {
2537 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2538 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2539 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2540 
2541 	mfi_post(sc, ccb);
2542 }
2543 
2544 void
2545 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2546 {
2547 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2548 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2549 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2550 
2551 	if (ccb->ccb_len > 0) {
2552 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2553 		    0, ccb->ccb_dmamap->dm_mapsize,
2554 		    (ccb->ccb_direction == MFI_DATA_IN) ?
2555 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2556 
2557 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2558 	}
2559 
2560 	ccb->ccb_done(sc, ccb);
2561 }
2562 
2563 u_int32_t
2564 mfi_xscale_fw_state(struct mfi_softc *sc)
2565 {
2566 	return (mfi_read(sc, MFI_OMSG0));
2567 }
2568 
2569 void
2570 mfi_xscale_intr_ena(struct mfi_softc *sc)
2571 {
2572 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2573 }
2574 
2575 int
2576 mfi_xscale_intr(struct mfi_softc *sc)
2577 {
2578 	u_int32_t status;
2579 
2580 	status = mfi_read(sc, MFI_OSTS);
2581 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2582 		return (0);
2583 
2584 	/* write status back to acknowledge interrupt */
2585 	mfi_write(sc, MFI_OSTS, status);
2586 
2587 	return (1);
2588 }
2589 
2590 void
2591 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2592 {
2593 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2594 	    ccb->ccb_extra_frames);
2595 }
2596 
2597 u_int32_t
2598 mfi_ppc_fw_state(struct mfi_softc *sc)
2599 {
2600 	return (mfi_read(sc, MFI_OSP));
2601 }
2602 
2603 void
2604 mfi_ppc_intr_ena(struct mfi_softc *sc)
2605 {
2606 	mfi_write(sc, MFI_ODC, 0xffffffff);
2607 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2608 }
2609 
2610 int
2611 mfi_ppc_intr(struct mfi_softc *sc)
2612 {
2613 	u_int32_t status;
2614 
2615 	status = mfi_read(sc, MFI_OSTS);
2616 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2617 		return (0);
2618 
2619 	/* write status back to acknowledge interrupt */
2620 	mfi_write(sc, MFI_ODC, status);
2621 
2622 	return (1);
2623 }
2624 
2625 void
2626 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2627 {
2628 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2629 	    (ccb->ccb_extra_frames << 1));
2630 }
2631 
2632 u_int32_t
2633 mfi_gen2_fw_state(struct mfi_softc *sc)
2634 {
2635 	return (mfi_read(sc, MFI_OSP));
2636 }
2637 
2638 void
2639 mfi_gen2_intr_ena(struct mfi_softc *sc)
2640 {
2641 	mfi_write(sc, MFI_ODC, 0xffffffff);
2642 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2643 }
2644 
2645 int
2646 mfi_gen2_intr(struct mfi_softc *sc)
2647 {
2648 	u_int32_t status;
2649 
2650 	status = mfi_read(sc, MFI_OSTS);
2651 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2652 		return (0);
2653 
2654 	/* write status back to acknowledge interrupt */
2655 	mfi_write(sc, MFI_ODC, status);
2656 
2657 	return (1);
2658 }
2659 
2660 void
2661 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2662 {
2663 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2664 	    (ccb->ccb_extra_frames << 1));
2665 }
2666 
2667 u_int32_t
2668 mfi_skinny_fw_state(struct mfi_softc *sc)
2669 {
2670 	return (mfi_read(sc, MFI_OSP));
2671 }
2672 
2673 void
2674 mfi_skinny_intr_ena(struct mfi_softc *sc)
2675 {
2676 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2677 }
2678 
2679 int
2680 mfi_skinny_intr(struct mfi_softc *sc)
2681 {
2682 	u_int32_t status;
2683 
2684 	status = mfi_read(sc, MFI_OSTS);
2685 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2686 		return (0);
2687 
2688 	/* write status back to acknowledge interrupt */
2689 	mfi_write(sc, MFI_OSTS, status);
2690 
2691 	return (1);
2692 }
2693 
2694 void
2695 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2696 {
2697 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2698 	    (ccb->ccb_extra_frames << 1));
2699 	mfi_write(sc, MFI_IQPH, 0x00000000);
2700 }
2701 
2702 u_int
2703 mfi_skinny_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
2704 {
2705 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
2706 	union mfi_sgl		*sgl = ccb->ccb_sgl;
2707 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
2708 	int			 i;
2709 
2710 	switch (hdr->mfh_cmd) {
2711 	case MFI_CMD_LD_READ:
2712 	case MFI_CMD_LD_WRITE:
2713 	case MFI_CMD_PD_SCSI_IO:
2714 		/* Use MF_FRAME_IEEE for some IO commands on skinny adapters */
2715 		for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
2716 			sgl->sg_skinny[i].addr = htole64(sgd[i].ds_addr);
2717 			sgl->sg_skinny[i].len = htole32(sgd[i].ds_len);
2718 			sgl->sg_skinny[i].flag = 0;
2719 		}
2720 		hdr->mfh_flags |= MFI_FRAME_IEEE | MFI_FRAME_SGL64;
2721 
2722 		return (ccb->ccb_dmamap->dm_nsegs * sizeof(sgl->sg_skinny));
2723 	default:
2724 		return (mfi_default_sgd_load(sc, ccb));
2725 	}
2726 }
2727 
2728 int
2729 mfi_pd_scsi_probe(struct scsi_link *link)
2730 {
2731 	uint8_t mbox[MFI_MBOX_SIZE];
2732 	struct mfi_softc *sc = link->adapter_softc;
2733 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2734 
2735 	if (link->lun > 0)
2736 		return (0);
2737 
2738 	if (pl == NULL)
2739 		return (ENXIO);
2740 
2741 	bzero(mbox, sizeof(mbox));
2742 	memcpy(&mbox[0], &pl->pd_id, sizeof(pl->pd_id));
2743 
2744 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2745 	    sizeof(pl->pd_info), &pl->pd_info, mbox))
2746 		return (EIO);
2747 
2748 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2749 		return (ENXIO);
2750 
2751 	return (0);
2752 }
2753 
2754 void
2755 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2756 {
2757 	struct scsi_link *link = xs->sc_link;
2758 	struct mfi_softc *sc = link->adapter_softc;
2759 	struct mfi_ccb *ccb = xs->io;
2760 	struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2761 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2762 
2763 	KERNEL_UNLOCK();
2764 
2765 	mfi_scrub_ccb(ccb);
2766 	xs->error = XS_NOERROR;
2767 
2768 	pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2769 	pf->mpf_header.mfh_target_id = pl->pd_id;
2770 	pf->mpf_header.mfh_lun_id = link->lun;
2771 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2772 	pf->mpf_header.mfh_timeout = 0;
2773 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2774 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2775 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2776 
2777 	memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2778 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
2779 
2780 	ccb->ccb_done = mfi_scsi_xs_done;
2781 	ccb->ccb_cookie = xs;
2782 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2783 	ccb->ccb_sgl = &pf->mpf_sgl;
2784 
2785 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2786 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2787 		    MFI_DATA_IN : MFI_DATA_OUT;
2788 	else
2789 		ccb->ccb_direction = MFI_DATA_NONE;
2790 
2791 	if (xs->data) {
2792 		ccb->ccb_data = xs->data;
2793 		ccb->ccb_len = xs->datalen;
2794 
2795 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2796 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2797 			goto stuffup;
2798 	}
2799 
2800 	if (xs->flags & SCSI_POLL)
2801 		mfi_poll(sc, ccb);
2802 	else
2803 		mfi_start(sc, ccb);
2804 
2805 	KERNEL_LOCK();
2806 	return;
2807 
2808 stuffup:
2809 	xs->error = XS_DRIVER_STUFFUP;
2810 	KERNEL_LOCK();
2811 	scsi_done(xs);
2812 }
2813