xref: /openbsd-src/sys/dev/ic/mfi.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /* $OpenBSD: mfi.c,v 1.122 2012/01/12 06:12:30 dlg Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/types.h>
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/ioctl.h>
25 #include <sys/device.h>
26 #include <sys/kernel.h>
27 #include <sys/malloc.h>
28 #include <sys/proc.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/pool.h>
32 
33 #include <machine/bus.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsi_disk.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/biovar.h>
40 #include <dev/ic/mfireg.h>
41 #include <dev/ic/mfivar.h>
42 
43 #ifdef MFI_DEBUG
44 uint32_t	mfi_debug = 0
45 /*		    | MFI_D_CMD */
46 /*		    | MFI_D_INTR */
47 /*		    | MFI_D_MISC */
48 /*		    | MFI_D_DMA */
49 /*		    | MFI_D_IOCTL */
50 /*		    | MFI_D_RW */
51 /*		    | MFI_D_MEM */
52 /*		    | MFI_D_CCB */
53 		;
54 #endif
55 
56 struct cfdriver mfi_cd = {
57 	NULL, "mfi", DV_DULL
58 };
59 
60 void	mfi_scsi_cmd(struct scsi_xfer *);
61 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
62 void	mfiminphys(struct buf *bp, struct scsi_link *sl);
63 
64 struct scsi_adapter mfi_switch = {
65 	mfi_scsi_cmd, mfiminphys, 0, 0, mfi_scsi_ioctl
66 };
67 
68 void *		mfi_get_ccb(void *);
69 void		mfi_put_ccb(void *, void *);
70 int		mfi_init_ccb(struct mfi_softc *);
71 
72 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
73 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
74 
75 int		mfi_transition_firmware(struct mfi_softc *);
76 int		mfi_initialize_firmware(struct mfi_softc *);
77 int		mfi_get_info(struct mfi_softc *);
78 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
79 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 int		mfi_poll(struct mfi_ccb *);
81 int		mfi_create_sgl(struct mfi_ccb *, int);
82 
83 /* commands */
84 int		mfi_scsi_ld(struct mfi_ccb *, struct scsi_xfer *);
85 int		mfi_scsi_io(struct mfi_ccb *, struct scsi_xfer *, uint64_t,
86 		    uint32_t);
87 void		mfi_scsi_xs_done(struct mfi_ccb *);
88 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
89 		    void *, uint8_t *);
90 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
91 		    uint32_t, uint32_t, void *, uint8_t *);
92 void		mfi_mgmt_done(struct mfi_ccb *);
93 
94 #if NBIO > 0
95 int		mfi_ioctl(struct device *, u_long, caddr_t);
96 int		mfi_bio_getitall(struct mfi_softc *);
97 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
98 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
99 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
100 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
101 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
102 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
103 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
104 #ifndef SMALL_KERNEL
105 int		mfi_create_sensors(struct mfi_softc *);
106 void		mfi_refresh_sensors(void *);
107 #endif /* SMALL_KERNEL */
108 #endif /* NBIO > 0 */
109 
110 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
111 void		mfi_done(struct mfi_ccb *);
112 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
113 void		mfi_xscale_intr_ena(struct mfi_softc *);
114 int		mfi_xscale_intr(struct mfi_softc *);
115 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
116 
117 static const struct mfi_iop_ops mfi_iop_xscale = {
118 	mfi_xscale_fw_state,
119 	mfi_xscale_intr_ena,
120 	mfi_xscale_intr,
121 	mfi_xscale_post
122 };
123 
124 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
125 void		mfi_ppc_intr_ena(struct mfi_softc *);
126 int		mfi_ppc_intr(struct mfi_softc *);
127 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
128 
129 static const struct mfi_iop_ops mfi_iop_ppc = {
130 	mfi_ppc_fw_state,
131 	mfi_ppc_intr_ena,
132 	mfi_ppc_intr,
133 	mfi_ppc_post
134 };
135 
136 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
137 void		mfi_gen2_intr_ena(struct mfi_softc *);
138 int		mfi_gen2_intr(struct mfi_softc *);
139 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
140 
141 static const struct mfi_iop_ops mfi_iop_gen2 = {
142 	mfi_gen2_fw_state,
143 	mfi_gen2_intr_ena,
144 	mfi_gen2_intr,
145 	mfi_gen2_post
146 };
147 
148 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
149 void		mfi_skinny_intr_ena(struct mfi_softc *);
150 int		mfi_skinny_intr(struct mfi_softc *);
151 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
152 
153 static const struct mfi_iop_ops mfi_iop_skinny = {
154 	mfi_skinny_fw_state,
155 	mfi_skinny_intr_ena,
156 	mfi_skinny_intr,
157 	mfi_skinny_post
158 };
159 
160 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
161 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
162 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
163 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
164 
165 void *
166 mfi_get_ccb(void *cookie)
167 {
168 	struct mfi_softc	*sc = cookie;
169 	struct mfi_ccb		*ccb;
170 
171 	mtx_enter(&sc->sc_ccb_mtx);
172 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
173 	if (ccb != NULL) {
174 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
175 		ccb->ccb_state = MFI_CCB_READY;
176 	}
177 	mtx_leave(&sc->sc_ccb_mtx);
178 
179 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
180 
181 	return (ccb);
182 }
183 
184 void
185 mfi_put_ccb(void *cookie, void *io)
186 {
187 	struct mfi_softc	*sc = cookie;
188 	struct mfi_ccb		*ccb = io;
189 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
190 
191 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
192 
193 	hdr->mfh_cmd_status = 0x0;
194 	hdr->mfh_flags = 0x0;
195 	ccb->ccb_state = MFI_CCB_FREE;
196 	ccb->ccb_cookie = NULL;
197 	ccb->ccb_flags = 0;
198 	ccb->ccb_done = NULL;
199 	ccb->ccb_direction = 0;
200 	ccb->ccb_frame_size = 0;
201 	ccb->ccb_extra_frames = 0;
202 	ccb->ccb_sgl = NULL;
203 	ccb->ccb_data = NULL;
204 	ccb->ccb_len = 0;
205 
206 	mtx_enter(&sc->sc_ccb_mtx);
207 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
208 	mtx_leave(&sc->sc_ccb_mtx);
209 }
210 
211 int
212 mfi_init_ccb(struct mfi_softc *sc)
213 {
214 	struct mfi_ccb		*ccb;
215 	uint32_t		i;
216 	int			error;
217 
218 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
219 
220 	sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
221 	    M_DEVBUF, M_WAITOK|M_ZERO);
222 
223 	for (i = 0; i < sc->sc_max_cmds; i++) {
224 		ccb = &sc->sc_ccb[i];
225 
226 		ccb->ccb_sc = sc;
227 
228 		/* select i'th frame */
229 		ccb->ccb_frame = (union mfi_frame *)
230 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
231 		ccb->ccb_pframe =
232 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
233 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
234 		ccb->ccb_frame->mfr_header.mfh_context = i;
235 
236 		/* select i'th sense */
237 		ccb->ccb_sense = (struct mfi_sense *)
238 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
239 		ccb->ccb_psense =
240 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
241 
242 		/* create a dma map for transfer */
243 		error = bus_dmamap_create(sc->sc_dmat,
244 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
245 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
246 		if (error) {
247 			printf("%s: cannot create ccb dmamap (%d)\n",
248 			    DEVNAME(sc), error);
249 			goto destroy;
250 		}
251 
252 		DNPRINTF(MFI_D_CCB,
253 		    "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
254 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
255 		    ccb->ccb_frame, ccb->ccb_pframe,
256 		    ccb->ccb_sense, ccb->ccb_psense,
257 		    ccb->ccb_dmamap);
258 
259 		/* add ccb to queue */
260 		mfi_put_ccb(sc, ccb);
261 	}
262 
263 	return (0);
264 destroy:
265 	/* free dma maps and ccb memory */
266 	while (i) {
267 		ccb = &sc->sc_ccb[i];
268 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
269 		i--;
270 	}
271 
272 	free(sc->sc_ccb, M_DEVBUF);
273 
274 	return (1);
275 }
276 
277 uint32_t
278 mfi_read(struct mfi_softc *sc, bus_size_t r)
279 {
280 	uint32_t rv;
281 
282 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
283 	    BUS_SPACE_BARRIER_READ);
284 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
285 
286 	DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
287 	return (rv);
288 }
289 
290 void
291 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
292 {
293 	DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
294 
295 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
296 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
297 	    BUS_SPACE_BARRIER_WRITE);
298 }
299 
300 struct mfi_mem *
301 mfi_allocmem(struct mfi_softc *sc, size_t size)
302 {
303 	struct mfi_mem		*mm;
304 	int			nsegs;
305 
306 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
307 	    size);
308 
309 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
310 	if (mm == NULL)
311 		return (NULL);
312 
313 	mm->am_size = size;
314 
315 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
316 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
317 		goto amfree;
318 
319 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
320 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
321 		goto destroy;
322 
323 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
324 	    BUS_DMA_NOWAIT) != 0)
325 		goto free;
326 
327 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
328 	    BUS_DMA_NOWAIT) != 0)
329 		goto unmap;
330 
331 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
332 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
333 
334 	return (mm);
335 
336 unmap:
337 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
338 free:
339 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
340 destroy:
341 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
342 amfree:
343 	free(mm, M_DEVBUF);
344 
345 	return (NULL);
346 }
347 
348 void
349 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
350 {
351 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
352 
353 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
354 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
355 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
356 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
357 	free(mm, M_DEVBUF);
358 }
359 
360 int
361 mfi_transition_firmware(struct mfi_softc *sc)
362 {
363 	int32_t			fw_state, cur_state;
364 	int			max_wait, i;
365 
366 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
367 
368 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
369 	    fw_state);
370 
371 	while (fw_state != MFI_STATE_READY) {
372 		DNPRINTF(MFI_D_MISC,
373 		    "%s: waiting for firmware to become ready\n",
374 		    DEVNAME(sc));
375 		cur_state = fw_state;
376 		switch (fw_state) {
377 		case MFI_STATE_FAULT:
378 			printf("%s: firmware fault\n", DEVNAME(sc));
379 			return (1);
380 		case MFI_STATE_WAIT_HANDSHAKE:
381 			if (sc->sc_flags & MFI_IOP_SKINNY)
382 				mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
383 			else
384 				mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
385 			max_wait = 2;
386 			break;
387 		case MFI_STATE_OPERATIONAL:
388 			if (sc->sc_flags & MFI_IOP_SKINNY)
389 				mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
390 			else
391 				mfi_write(sc, MFI_IDB, MFI_INIT_READY);
392 			max_wait = 10;
393 			break;
394 		case MFI_STATE_UNDEFINED:
395 		case MFI_STATE_BB_INIT:
396 			max_wait = 2;
397 			break;
398 		case MFI_STATE_FW_INIT:
399 		case MFI_STATE_DEVICE_SCAN:
400 		case MFI_STATE_FLUSH_CACHE:
401 			max_wait = 20;
402 			break;
403 		default:
404 			printf("%s: unknown firmware state %d\n",
405 			    DEVNAME(sc), fw_state);
406 			return (1);
407 		}
408 		for (i = 0; i < (max_wait * 10); i++) {
409 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
410 			if (fw_state == cur_state)
411 				DELAY(100000);
412 			else
413 				break;
414 		}
415 		if (fw_state == cur_state) {
416 			printf("%s: firmware stuck in state %#x\n",
417 			    DEVNAME(sc), fw_state);
418 			return (1);
419 		}
420 	}
421 
422 	return (0);
423 }
424 
425 int
426 mfi_initialize_firmware(struct mfi_softc *sc)
427 {
428 	struct mfi_ccb		*ccb;
429 	struct mfi_init_frame	*init;
430 	struct mfi_init_qinfo	*qinfo;
431 	uint64_t		handy;
432 
433 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
434 
435 	if ((ccb = mfi_get_ccb(sc)) == NULL)
436 		return (1);
437 
438 	init = &ccb->ccb_frame->mfr_init;
439 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
440 
441 	memset(qinfo, 0, sizeof *qinfo);
442 	qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
443 
444 	handy = MFIMEM_DVA(sc->sc_pcq) +
445 	    offsetof(struct mfi_prod_cons, mpc_reply_q);
446 	qinfo->miq_rq_addr_hi = htole32(handy >> 32);
447 	qinfo->miq_rq_addr_lo = htole32(handy);
448 
449 	handy = MFIMEM_DVA(sc->sc_pcq) +
450 	    offsetof(struct mfi_prod_cons, mpc_producer);
451 	qinfo->miq_pi_addr_hi = htole32(handy >> 32);
452 	qinfo->miq_pi_addr_lo = htole32(handy);
453 
454 	handy = MFIMEM_DVA(sc->sc_pcq) +
455 	    offsetof(struct mfi_prod_cons, mpc_consumer);
456 	qinfo->miq_ci_addr_hi = htole32(handy >> 32);
457 	qinfo->miq_ci_addr_lo = htole32(handy);
458 
459 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
460 	init->mif_header.mfh_data_len = sizeof *qinfo;
461 	init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
462 
463 	DNPRINTF(MFI_D_MISC, "%s: entries: %08x%08x rq: %08x%08x pi: %#x "
464 	    "ci: %08x%08x\n",
465 	    DEVNAME(sc),
466 	    qinfo->miq_rq_entries,
467 	    qinfo->miq_rq_addr_hi, qinfo->miq_rq_addr_lo,
468 	    qinfo->miq_pi_addr_hi, qinfo->miq_pi_addr_lo,
469 	    qinfo->miq_ci_addr_hi, qinfo->miq_ci_addr_lo);
470 
471 	if (mfi_poll(ccb)) {
472 		printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
473 		return (1);
474 	}
475 
476 	mfi_put_ccb(sc, ccb);
477 
478 	return (0);
479 }
480 
481 int
482 mfi_get_info(struct mfi_softc *sc)
483 {
484 #ifdef MFI_DEBUG
485 	int i;
486 #endif
487 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
488 
489 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
490 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
491 		return (1);
492 
493 #ifdef MFI_DEBUG
494 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
495 		printf("%s: active FW %s Version %s date %s time %s\n",
496 		    DEVNAME(sc),
497 		    sc->sc_info.mci_image_component[i].mic_name,
498 		    sc->sc_info.mci_image_component[i].mic_version,
499 		    sc->sc_info.mci_image_component[i].mic_build_date,
500 		    sc->sc_info.mci_image_component[i].mic_build_time);
501 	}
502 
503 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
504 		printf("%s: pending FW %s Version %s date %s time %s\n",
505 		    DEVNAME(sc),
506 		    sc->sc_info.mci_pending_image_component[i].mic_name,
507 		    sc->sc_info.mci_pending_image_component[i].mic_version,
508 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
509 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
510 	}
511 
512 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
513 	    DEVNAME(sc),
514 	    sc->sc_info.mci_max_arms,
515 	    sc->sc_info.mci_max_spans,
516 	    sc->sc_info.mci_max_arrays,
517 	    sc->sc_info.mci_max_lds,
518 	    sc->sc_info.mci_product_name);
519 
520 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
521 	    DEVNAME(sc),
522 	    sc->sc_info.mci_serial_number,
523 	    sc->sc_info.mci_hw_present,
524 	    sc->sc_info.mci_current_fw_time,
525 	    sc->sc_info.mci_max_cmds,
526 	    sc->sc_info.mci_max_sg_elements);
527 
528 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
529 	    DEVNAME(sc),
530 	    sc->sc_info.mci_max_request_size,
531 	    sc->sc_info.mci_lds_present,
532 	    sc->sc_info.mci_lds_degraded,
533 	    sc->sc_info.mci_lds_offline,
534 	    sc->sc_info.mci_pd_present);
535 
536 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
537 	    DEVNAME(sc),
538 	    sc->sc_info.mci_pd_disks_present,
539 	    sc->sc_info.mci_pd_disks_pred_failure,
540 	    sc->sc_info.mci_pd_disks_failed);
541 
542 	printf("%s: nvram %d mem %d flash %d\n",
543 	    DEVNAME(sc),
544 	    sc->sc_info.mci_nvram_size,
545 	    sc->sc_info.mci_memory_size,
546 	    sc->sc_info.mci_flash_size);
547 
548 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
549 	    DEVNAME(sc),
550 	    sc->sc_info.mci_ram_correctable_errors,
551 	    sc->sc_info.mci_ram_uncorrectable_errors,
552 	    sc->sc_info.mci_cluster_allowed,
553 	    sc->sc_info.mci_cluster_active);
554 
555 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
556 	    DEVNAME(sc),
557 	    sc->sc_info.mci_max_strips_per_io,
558 	    sc->sc_info.mci_raid_levels,
559 	    sc->sc_info.mci_adapter_ops,
560 	    sc->sc_info.mci_ld_ops);
561 
562 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
563 	    DEVNAME(sc),
564 	    sc->sc_info.mci_stripe_sz_ops.min,
565 	    sc->sc_info.mci_stripe_sz_ops.max,
566 	    sc->sc_info.mci_pd_ops,
567 	    sc->sc_info.mci_pd_mix_support);
568 
569 	printf("%s: ecc_bucket %d pckg_prop %s\n",
570 	    DEVNAME(sc),
571 	    sc->sc_info.mci_ecc_bucket_count,
572 	    sc->sc_info.mci_package_version);
573 
574 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
575 	    DEVNAME(sc),
576 	    sc->sc_info.mci_properties.mcp_seq_num,
577 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
578 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
579 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
580 
581 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
582 	    DEVNAME(sc),
583 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
584 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
585 	    sc->sc_info.mci_properties.mcp_bgi_rate,
586 	    sc->sc_info.mci_properties.mcp_cc_rate);
587 
588 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
589 	    DEVNAME(sc),
590 	    sc->sc_info.mci_properties.mcp_recon_rate,
591 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
592 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
593 	    sc->sc_info.mci_properties.mcp_spinup_delay,
594 	    sc->sc_info.mci_properties.mcp_cluster_enable);
595 
596 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
597 	    DEVNAME(sc),
598 	    sc->sc_info.mci_properties.mcp_coercion_mode,
599 	    sc->sc_info.mci_properties.mcp_alarm_enable,
600 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
601 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
602 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
603 
604 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
605 	    DEVNAME(sc),
606 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
607 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
608 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
609 
610 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
611 	    DEVNAME(sc),
612 	    sc->sc_info.mci_pci.mip_vendor,
613 	    sc->sc_info.mci_pci.mip_device,
614 	    sc->sc_info.mci_pci.mip_subvendor,
615 	    sc->sc_info.mci_pci.mip_subdevice);
616 
617 	printf("%s: type %#x port_count %d port_addr ",
618 	    DEVNAME(sc),
619 	    sc->sc_info.mci_host.mih_type,
620 	    sc->sc_info.mci_host.mih_port_count);
621 
622 	for (i = 0; i < 8; i++)
623 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
624 	printf("\n");
625 
626 	printf("%s: type %.x port_count %d port_addr ",
627 	    DEVNAME(sc),
628 	    sc->sc_info.mci_device.mid_type,
629 	    sc->sc_info.mci_device.mid_port_count);
630 
631 	for (i = 0; i < 8; i++)
632 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
633 	printf("\n");
634 #endif /* MFI_DEBUG */
635 
636 	return (0);
637 }
638 
639 void
640 mfiminphys(struct buf *bp, struct scsi_link *sl)
641 {
642 	DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
643 
644 	/* XXX currently using MFI_MAXFER = MAXPHYS */
645 	if (bp->b_bcount > MFI_MAXFER)
646 		bp->b_bcount = MFI_MAXFER;
647 	minphys(bp);
648 }
649 
650 int
651 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
652 {
653 	struct scsibus_attach_args saa;
654 	uint32_t		status, frames, max_sgl;
655 	int			i;
656 
657 	switch (iop) {
658 	case MFI_IOP_XSCALE:
659 		sc->sc_iop = &mfi_iop_xscale;
660 		break;
661 	case MFI_IOP_PPC:
662 		sc->sc_iop = &mfi_iop_ppc;
663 		break;
664 	case MFI_IOP_GEN2:
665 		sc->sc_iop = &mfi_iop_gen2;
666 		break;
667 	case MFI_IOP_SKINNY:
668 		sc->sc_iop = &mfi_iop_skinny;
669 		break;
670 	default:
671 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
672 	}
673 
674 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
675 
676 	if (mfi_transition_firmware(sc))
677 		return (1);
678 
679 	SLIST_INIT(&sc->sc_ccb_freeq);
680 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
681 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
682 
683 	rw_init(&sc->sc_lock, "mfi_lock");
684 
685 	status = mfi_fw_state(sc);
686 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
687 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
688 	if (sc->sc_64bit_dma) {
689 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
690 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
691 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
692 	} else {
693 		sc->sc_max_sgl = max_sgl;
694 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
695 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
696 	}
697 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
698 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
699 
700 	/* consumer/producer and reply queue memory */
701 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
702 	    sizeof(struct mfi_prod_cons));
703 	if (sc->sc_pcq == NULL) {
704 		printf("%s: unable to allocate reply queue memory\n",
705 		    DEVNAME(sc));
706 		goto nopcq;
707 	}
708 
709 	/* frame memory */
710 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
711 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
712 	    MFI_FRAME_SIZE + 1;
713 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
714 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
715 	if (sc->sc_frames == NULL) {
716 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
717 		goto noframe;
718 	}
719 	/* XXX hack, fix this */
720 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
721 		printf("%s: improper frame alignment (%#x) FIXME\n",
722 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
723 		goto noframe;
724 	}
725 
726 	/* sense memory */
727 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
728 	if (sc->sc_sense == NULL) {
729 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
730 		goto nosense;
731 	}
732 
733 	/* now that we have all memory bits go initialize ccbs */
734 	if (mfi_init_ccb(sc)) {
735 		printf("%s: could not init ccb list\n", DEVNAME(sc));
736 		goto noinit;
737 	}
738 
739 	/* kickstart firmware with all addresses and pointers */
740 	if (mfi_initialize_firmware(sc)) {
741 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
742 		goto noinit;
743 	}
744 
745 	if (mfi_get_info(sc)) {
746 		printf("%s: could not retrieve controller information\n",
747 		    DEVNAME(sc));
748 		goto noinit;
749 	}
750 
751 	printf("%s: logical drives %d, version %s, %dMB RAM\n",
752 	    DEVNAME(sc),
753 	    sc->sc_info.mci_lds_present,
754 	    sc->sc_info.mci_package_version,
755 	    sc->sc_info.mci_memory_size);
756 
757 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
758 	sc->sc_max_ld = sc->sc_ld_cnt;
759 	for (i = 0; i < sc->sc_ld_cnt; i++)
760 		sc->sc_ld[i].ld_present = 1;
761 
762 	if (sc->sc_ld_cnt)
763 		sc->sc_link.openings = sc->sc_max_cmds / sc->sc_ld_cnt;
764 	else
765 		sc->sc_link.openings = sc->sc_max_cmds;
766 
767 	sc->sc_link.adapter_softc = sc;
768 	sc->sc_link.adapter = &mfi_switch;
769 	sc->sc_link.adapter_target = MFI_MAX_LD;
770 	sc->sc_link.adapter_buswidth = sc->sc_max_ld;
771 	sc->sc_link.pool = &sc->sc_iopool;
772 
773 	bzero(&saa, sizeof(saa));
774 	saa.saa_sc_link = &sc->sc_link;
775 
776 	config_found(&sc->sc_dev, &saa, scsiprint);
777 
778 	/* enable interrupts */
779 	mfi_intr_enable(sc);
780 
781 #if NBIO > 0
782 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
783 		panic("%s: controller registration failed", DEVNAME(sc));
784 	else
785 		sc->sc_ioctl = mfi_ioctl;
786 
787 #ifndef SMALL_KERNEL
788 	if (mfi_create_sensors(sc) != 0)
789 		printf("%s: unable to create sensors\n", DEVNAME(sc));
790 #endif
791 #endif /* NBIO > 0 */
792 
793 	return (0);
794 noinit:
795 	mfi_freemem(sc, sc->sc_sense);
796 nosense:
797 	mfi_freemem(sc, sc->sc_frames);
798 noframe:
799 	mfi_freemem(sc, sc->sc_pcq);
800 nopcq:
801 	return (1);
802 }
803 
804 int
805 mfi_poll(struct mfi_ccb *ccb)
806 {
807 	struct mfi_softc *sc = ccb->ccb_sc;
808 	struct mfi_frame_header	*hdr;
809 	int			to = 0, rv = 0;
810 
811 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
812 
813 	hdr = &ccb->ccb_frame->mfr_header;
814 	hdr->mfh_cmd_status = 0xff;
815 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
816 
817 	mfi_start(sc, ccb);
818 
819 	while (hdr->mfh_cmd_status == 0xff) {
820 		delay(1000);
821 		if (to++ > 5000) /* XXX 5 seconds busywait sucks */
822 			break;
823 	}
824 	if (hdr->mfh_cmd_status == 0xff) {
825 		printf("%s: timeout on ccb %d\n", DEVNAME(sc),
826 		    hdr->mfh_context);
827 		ccb->ccb_flags |= MFI_CCB_F_ERR;
828 		rv = 1;
829 	}
830 
831 	if (ccb->ccb_direction != MFI_DATA_NONE) {
832 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
833 		    ccb->ccb_dmamap->dm_mapsize,
834 		    (ccb->ccb_direction & MFI_DATA_IN) ?
835 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
836 
837 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
838 	}
839 
840 	return (rv);
841 }
842 
843 int
844 mfi_intr(void *arg)
845 {
846 	struct mfi_softc	*sc = arg;
847 	struct mfi_prod_cons	*pcq;
848 	struct mfi_ccb		*ccb;
849 	uint32_t		producer, consumer, ctx;
850 	int			claimed = 0;
851 
852 	if (!mfi_my_intr(sc))
853 		return (0);
854 
855 	pcq = MFIMEM_KVA(sc->sc_pcq);
856 	producer = pcq->mpc_producer;
857 	consumer = pcq->mpc_consumer;
858 
859 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
860 
861 	while (consumer != producer) {
862 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
863 		    DEVNAME(sc), producer, consumer);
864 
865 		ctx = pcq->mpc_reply_q[consumer];
866 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
867 		if (ctx == MFI_INVALID_CTX)
868 			printf("%s: invalid context, p: %d c: %d\n",
869 			    DEVNAME(sc), producer, consumer);
870 		else {
871 			/* XXX remove from queue and call scsi_done */
872 			ccb = &sc->sc_ccb[ctx];
873 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
874 			    DEVNAME(sc), ctx);
875 			mfi_done(ccb);
876 
877 			claimed = 1;
878 		}
879 		consumer++;
880 		if (consumer == (sc->sc_max_cmds + 1))
881 			consumer = 0;
882 	}
883 
884 	pcq->mpc_consumer = consumer;
885 
886 	return (claimed);
887 }
888 
889 int
890 mfi_scsi_io(struct mfi_ccb *ccb, struct scsi_xfer *xs, uint64_t blockno,
891     uint32_t blockcnt)
892 {
893 	struct scsi_link	*link = xs->sc_link;
894 	struct mfi_io_frame	*io;
895 	uint64_t		handy;
896 
897 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
898 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
899 
900 	if (!xs->data)
901 		return (1);
902 
903 	io = &ccb->ccb_frame->mfr_io;
904 	if (xs->flags & SCSI_DATA_IN) {
905 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
906 		ccb->ccb_direction = MFI_DATA_IN;
907 	} else {
908 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
909 		ccb->ccb_direction = MFI_DATA_OUT;
910 	}
911 	io->mif_header.mfh_target_id = link->target;
912 	io->mif_header.mfh_timeout = 0;
913 	io->mif_header.mfh_flags = 0;
914 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
915 	io->mif_header.mfh_data_len= blockcnt;
916 	io->mif_lba_hi = (uint32_t)(blockno >> 32);
917 	io->mif_lba_lo = (uint32_t)(blockno & 0xffffffffull);
918 
919 	handy = ccb->ccb_psense;
920 	io->mif_sense_addr_hi = htole32((u_int32_t)(handy >> 32));
921 	io->mif_sense_addr_lo = htole32(handy);
922 
923 	ccb->ccb_done = mfi_scsi_xs_done;
924 	ccb->ccb_cookie = xs;
925 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
926 	ccb->ccb_sgl = &io->mif_sgl;
927 	ccb->ccb_data = xs->data;
928 	ccb->ccb_len = xs->datalen;
929 
930 	if (mfi_create_sgl(ccb, (xs->flags & SCSI_NOSLEEP) ?
931 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
932 		return (1);
933 
934 	return (0);
935 }
936 
937 void
938 mfi_scsi_xs_done(struct mfi_ccb *ccb)
939 {
940 	struct scsi_xfer	*xs = ccb->ccb_cookie;
941 	struct mfi_softc	*sc = ccb->ccb_sc;
942 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
943 
944 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
945 	    DEVNAME(sc), ccb, ccb->ccb_frame);
946 
947 	if (xs->data != NULL) {
948 		DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
949 		    DEVNAME(sc));
950 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
951 		    ccb->ccb_dmamap->dm_mapsize,
952 		    (xs->flags & SCSI_DATA_IN) ?
953 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
954 
955 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
956 	}
957 
958 	switch (hdr->mfh_cmd_status) {
959 	case MFI_STAT_OK:
960 		xs->resid = 0;
961 		break;
962 
963 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
964 		xs->error = XS_SENSE;
965 		xs->resid = 0;
966 		memset(&xs->sense, 0, sizeof(xs->sense));
967 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
968 		break;
969 
970 	default:
971 		xs->error = XS_DRIVER_STUFFUP;
972 		printf("%s: mfi_scsi_xs_done stuffup %#x\n",
973 		    DEVNAME(sc), hdr->mfh_cmd_status);
974 
975 		if (hdr->mfh_scsi_status != 0) {
976 			DNPRINTF(MFI_D_INTR,
977 			    "%s: mfi_scsi_xs_done sense %#x %x %x\n",
978 			    DEVNAME(sc), hdr->mfh_scsi_status,
979 			    &xs->sense, ccb->ccb_sense);
980 			memset(&xs->sense, 0, sizeof(xs->sense));
981 			memcpy(&xs->sense, ccb->ccb_sense,
982 			    sizeof(struct scsi_sense_data));
983 			xs->error = XS_SENSE;
984 		}
985 		break;
986 	}
987 
988 	scsi_done(xs);
989 }
990 
991 int
992 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsi_xfer *xs)
993 {
994 	struct scsi_link	*link = xs->sc_link;
995 	struct mfi_pass_frame	*pf;
996 	uint64_t		handy;
997 
998 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
999 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1000 
1001 	pf = &ccb->ccb_frame->mfr_pass;
1002 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1003 	pf->mpf_header.mfh_target_id = link->target;
1004 	pf->mpf_header.mfh_lun_id = 0;
1005 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1006 	pf->mpf_header.mfh_timeout = 0;
1007 	pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1008 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1009 
1010 	handy = ccb->ccb_psense;
1011 	pf->mpf_sense_addr_hi = htole32((u_int32_t)(handy >> 32));
1012 	pf->mpf_sense_addr_lo = htole32(handy);
1013 
1014 	memset(pf->mpf_cdb, 0, 16);
1015 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
1016 
1017 	ccb->ccb_done = mfi_scsi_xs_done;
1018 	ccb->ccb_cookie = xs;
1019 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1020 	ccb->ccb_sgl = &pf->mpf_sgl;
1021 
1022 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1023 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1024 		    MFI_DATA_IN : MFI_DATA_OUT;
1025 	else
1026 		ccb->ccb_direction = MFI_DATA_NONE;
1027 
1028 	if (xs->data) {
1029 		ccb->ccb_data = xs->data;
1030 		ccb->ccb_len = xs->datalen;
1031 
1032 		if (mfi_create_sgl(ccb, (xs->flags & SCSI_NOSLEEP) ?
1033 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1034 			return (1);
1035 	}
1036 
1037 	return (0);
1038 }
1039 
1040 void
1041 mfi_scsi_cmd(struct scsi_xfer *xs)
1042 {
1043 	struct scsi_link	*link = xs->sc_link;
1044 	struct mfi_softc	*sc = link->adapter_softc;
1045 	struct device		*dev = link->device_softc;
1046 	struct mfi_ccb		*ccb = xs->io;
1047 	struct scsi_rw		*rw;
1048 	struct scsi_rw_big	*rwb;
1049 	struct scsi_rw_16	*rw16;
1050 	uint64_t		blockno;
1051 	uint32_t		blockcnt;
1052 	uint8_t			target = link->target;
1053 	uint8_t			mbox[MFI_MBOX_SIZE];
1054 
1055 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1056 	    DEVNAME(sc), xs->cmd->opcode);
1057 
1058 	if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1059 	    link->lun != 0) {
1060 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1061 		    DEVNAME(sc), target);
1062 		goto stuffup;
1063 	}
1064 
1065 	xs->error = XS_NOERROR;
1066 
1067 	switch (xs->cmd->opcode) {
1068 	/* IO path */
1069 	case READ_BIG:
1070 	case WRITE_BIG:
1071 		rwb = (struct scsi_rw_big *)xs->cmd;
1072 		blockno = (uint64_t)_4btol(rwb->addr);
1073 		blockcnt = _2btol(rwb->length);
1074 		if (mfi_scsi_io(ccb, xs, blockno, blockcnt))
1075 			goto stuffup;
1076 		break;
1077 
1078 	case READ_COMMAND:
1079 	case WRITE_COMMAND:
1080 		rw = (struct scsi_rw *)xs->cmd;
1081 		blockno =
1082 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1083 		blockcnt = rw->length ? rw->length : 0x100;
1084 		if (mfi_scsi_io(ccb, xs, blockno, blockcnt))
1085 			goto stuffup;
1086 		break;
1087 
1088 	case READ_16:
1089 	case WRITE_16:
1090 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1091 		blockno = _8btol(rw16->addr);
1092 		blockcnt = _4btol(rw16->length);
1093 		if (mfi_scsi_io(ccb, xs, blockno, blockcnt))
1094 			goto stuffup;
1095 		break;
1096 
1097 	case SYNCHRONIZE_CACHE:
1098 		mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1099 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1100 		    MFI_DATA_NONE, 0, NULL, mbox))
1101 			goto stuffup;
1102 
1103 		goto complete;
1104 		/* NOTREACHED */
1105 
1106 	/* hand it of to the firmware and let it deal with it */
1107 	case TEST_UNIT_READY:
1108 		/* save off sd? after autoconf */
1109 		if (!cold)	/* XXX bogus */
1110 			strlcpy(sc->sc_ld[target].ld_dev, dev->dv_xname,
1111 			    sizeof(sc->sc_ld[target].ld_dev));
1112 		/* FALLTHROUGH */
1113 
1114 	default:
1115 		if (mfi_scsi_ld(ccb, xs))
1116 			goto stuffup;
1117 		break;
1118 	}
1119 
1120 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1121 
1122 	if (xs->flags & SCSI_POLL) {
1123 		if (mfi_poll(ccb)) {
1124 			/* XXX check for sense in ccb->ccb_sense? */
1125 			printf("%s: mfi_scsi_cmd poll failed\n",
1126 			    DEVNAME(sc));
1127 			bzero(&xs->sense, sizeof(xs->sense));
1128 			xs->sense.error_code = SSD_ERRCODE_VALID |
1129 			    SSD_ERRCODE_CURRENT;
1130 			xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1131 			xs->sense.add_sense_code = 0x20; /* invalid opcode */
1132 			xs->error = XS_SENSE;
1133 		}
1134 
1135 		scsi_done(xs);
1136 		return;
1137 	}
1138 
1139 	mfi_start(sc, ccb);
1140 
1141 	DNPRINTF(MFI_D_DMA, "%s: mfi_scsi_cmd queued %d\n", DEVNAME(sc),
1142 	    ccb->ccb_dmamap->dm_nsegs);
1143 
1144 	return;
1145 
1146 stuffup:
1147 	xs->error = XS_DRIVER_STUFFUP;
1148 complete:
1149 	scsi_done(xs);
1150 }
1151 
1152 int
1153 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1154 {
1155 	struct mfi_softc	*sc = ccb->ccb_sc;
1156 	struct mfi_frame_header	*hdr;
1157 	bus_dma_segment_t	*sgd;
1158 	union mfi_sgl		*sgl;
1159 	int			error, i;
1160 
1161 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1162 	    ccb->ccb_data);
1163 
1164 	if (!ccb->ccb_data)
1165 		return (1);
1166 
1167 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1168 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1169 	if (error) {
1170 		if (error == EFBIG)
1171 			printf("more than %d dma segs\n",
1172 			    sc->sc_max_sgl);
1173 		else
1174 			printf("error %d loading dma map\n", error);
1175 		return (1);
1176 	}
1177 
1178 	hdr = &ccb->ccb_frame->mfr_header;
1179 	sgl = ccb->ccb_sgl;
1180 	sgd = ccb->ccb_dmamap->dm_segs;
1181 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1182 		if (sc->sc_64bit_dma) {
1183 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1184 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1185 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1186 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1187 		} else {
1188 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1189 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1190 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1191 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1192 		}
1193 	}
1194 
1195 	if (ccb->ccb_direction == MFI_DATA_IN) {
1196 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1197 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1198 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1199 	} else {
1200 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1201 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1202 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1203 	}
1204 
1205 	hdr->mfh_flags |= sc->sc_sgl_flags;
1206 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1207 	ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1208 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1209 
1210 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1211 	    "  dm_nsegs: %d  extra_frames: %d\n",
1212 	    DEVNAME(sc),
1213 	    hdr->mfh_sg_count,
1214 	    ccb->ccb_frame_size,
1215 	    sc->sc_frames_size,
1216 	    ccb->ccb_dmamap->dm_nsegs,
1217 	    ccb->ccb_extra_frames);
1218 
1219 	return (0);
1220 }
1221 
1222 int
1223 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1224     void *buf, uint8_t *mbox)
1225 {
1226 	struct mfi_ccb *ccb;
1227 	int rv;
1228 
1229 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1230 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1231 	scsi_io_put(&sc->sc_iopool, ccb);
1232 
1233 	return (rv);
1234 }
1235 
1236 int
1237 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1238     uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1239 {
1240 	struct mfi_dcmd_frame	*dcmd;
1241 	int			s, rv = EINVAL;
1242 	uint8_t			*dma_buf = NULL;
1243 
1244 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1245 
1246 	dma_buf = dma_alloc(len, PR_WAITOK);
1247 	if (dma_buf == NULL)
1248 		goto done;
1249 
1250 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1251 	memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1252 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1253 	dcmd->mdf_header.mfh_timeout = 0;
1254 
1255 	dcmd->mdf_opcode = opc;
1256 	dcmd->mdf_header.mfh_data_len = 0;
1257 	ccb->ccb_direction = dir;
1258 	ccb->ccb_done = mfi_mgmt_done;
1259 
1260 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1261 
1262 	/* handle special opcodes */
1263 	if (mbox)
1264 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1265 
1266 	if (dir != MFI_DATA_NONE) {
1267 		if (dir == MFI_DATA_OUT)
1268 			bcopy(buf, dma_buf, len);
1269 		dcmd->mdf_header.mfh_data_len = len;
1270 		ccb->ccb_data = dma_buf;
1271 		ccb->ccb_len = len;
1272 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1273 
1274 		if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) {
1275 			rv = EINVAL;
1276 			goto done;
1277 		}
1278 	}
1279 
1280 	if (cold) {
1281 		if (mfi_poll(ccb)) {
1282 			rv = EIO;
1283 			goto done;
1284 		}
1285 	} else {
1286 		s = splbio();
1287 		mfi_start(sc, ccb);
1288 
1289 		DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt sleeping\n", DEVNAME(sc));
1290 		while (ccb->ccb_state != MFI_CCB_DONE)
1291 			tsleep(ccb, PRIBIO, "mfimgmt", 0);
1292 		splx(s);
1293 
1294 		if (ccb->ccb_flags & MFI_CCB_F_ERR) {
1295 			rv = EIO;
1296 			goto done;
1297 		}
1298 	}
1299 
1300 	if (dir == MFI_DATA_IN)
1301 		bcopy(dma_buf, buf, len);
1302 
1303 	rv = 0;
1304 done:
1305 	if (dma_buf)
1306 		dma_free(dma_buf, len);
1307 
1308 	return (rv);
1309 }
1310 
1311 void
1312 mfi_mgmt_done(struct mfi_ccb *ccb)
1313 {
1314 	struct mfi_softc	*sc = ccb->ccb_sc;
1315 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1316 
1317 	DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#x %#x\n",
1318 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1319 
1320 	if (ccb->ccb_data != NULL) {
1321 		DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1322 		    DEVNAME(sc));
1323 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1324 		    ccb->ccb_dmamap->dm_mapsize,
1325 		    (ccb->ccb_direction & MFI_DATA_IN) ?
1326 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1327 
1328 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1329 	}
1330 
1331 	if (hdr->mfh_cmd_status != MFI_STAT_OK)
1332 		ccb->ccb_flags |= MFI_CCB_F_ERR;
1333 
1334 	ccb->ccb_state = MFI_CCB_DONE;
1335 
1336 	wakeup(ccb);
1337 }
1338 
1339 int
1340 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1341 {
1342 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1343 
1344 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1345 
1346 	if (sc->sc_ioctl)
1347 		return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
1348 	else
1349 		return (ENOTTY);
1350 }
1351 
1352 #if NBIO > 0
1353 int
1354 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1355 {
1356 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1357 	int error = 0;
1358 
1359 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1360 
1361 	rw_enter_write(&sc->sc_lock);
1362 
1363 	switch (cmd) {
1364 	case BIOCINQ:
1365 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1366 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1367 		break;
1368 
1369 	case BIOCVOL:
1370 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1371 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1372 		break;
1373 
1374 	case BIOCDISK:
1375 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1376 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1377 		break;
1378 
1379 	case BIOCALARM:
1380 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1381 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1382 		break;
1383 
1384 	case BIOCBLINK:
1385 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1386 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1387 		break;
1388 
1389 	case BIOCSETSTATE:
1390 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1391 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1392 		break;
1393 
1394 	default:
1395 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1396 		error = EINVAL;
1397 	}
1398 
1399 	rw_exit_write(&sc->sc_lock);
1400 
1401 	return (error);
1402 }
1403 
1404 int
1405 mfi_bio_getitall(struct mfi_softc *sc)
1406 {
1407 	int			i, d, size, rv = EINVAL;
1408 	uint8_t			mbox[MFI_MBOX_SIZE];
1409 	struct mfi_conf		*cfg = NULL;
1410 	struct mfi_ld_details	*ld_det = NULL;
1411 
1412 	/* get info */
1413 	if (mfi_get_info(sc)) {
1414 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1415 		    DEVNAME(sc));
1416 		goto done;
1417 	}
1418 
1419 	/* send single element command to retrieve size for full structure */
1420 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1421 	if (cfg == NULL)
1422 		goto done;
1423 	if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1424 	    NULL))
1425 		goto done;
1426 
1427 	size = cfg->mfc_size;
1428 	free(cfg, M_DEVBUF);
1429 
1430 	/* memory for read config */
1431 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1432 	if (cfg == NULL)
1433 		goto done;
1434 	if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1435 		goto done;
1436 
1437 	/* replace current pointer with enw one */
1438 	if (sc->sc_cfg)
1439 		free(sc->sc_cfg, M_DEVBUF);
1440 	sc->sc_cfg = cfg;
1441 
1442 	/* get all ld info */
1443 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1444 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1445 		goto done;
1446 
1447 	/* get memory for all ld structures */
1448 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1449 	if (sc->sc_ld_sz != size) {
1450 		if (sc->sc_ld_details)
1451 			free(sc->sc_ld_details, M_DEVBUF);
1452 
1453 		ld_det = malloc( size, M_DEVBUF, M_NOWAIT | M_ZERO);
1454 		if (ld_det == NULL)
1455 			goto done;
1456 		sc->sc_ld_sz = size;
1457 		sc->sc_ld_details = ld_det;
1458 	}
1459 
1460 	/* find used physical disks */
1461 	size = sizeof(struct mfi_ld_details);
1462 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1463 		mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1464 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1465 		    &sc->sc_ld_details[i], mbox))
1466 			goto done;
1467 
1468 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1469 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1470 	}
1471 	sc->sc_no_pd = d;
1472 
1473 	rv = 0;
1474 done:
1475 	return (rv);
1476 }
1477 
1478 int
1479 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1480 {
1481 	int			rv = EINVAL;
1482 	struct mfi_conf		*cfg = NULL;
1483 
1484 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1485 
1486 	if (mfi_bio_getitall(sc)) {
1487 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1488 		    DEVNAME(sc));
1489 		goto done;
1490 	}
1491 
1492 	/* count unused disks as volumes */
1493 	if (sc->sc_cfg == NULL)
1494 		goto done;
1495 	cfg = sc->sc_cfg;
1496 
1497 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1498 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1499 #if notyet
1500 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1501 	    (bi->bi_nodisk - sc->sc_no_pd);
1502 #endif
1503 	/* tell bio who we are */
1504 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1505 
1506 	rv = 0;
1507 done:
1508 	return (rv);
1509 }
1510 
1511 int
1512 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1513 {
1514 	int			i, per, rv = EINVAL;
1515 
1516 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1517 	    DEVNAME(sc), bv->bv_volid);
1518 
1519 	/* we really could skip and expect that inq took care of it */
1520 	if (mfi_bio_getitall(sc)) {
1521 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1522 		    DEVNAME(sc));
1523 		goto done;
1524 	}
1525 
1526 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1527 		/* go do hotspares & unused disks */
1528 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1529 		goto done;
1530 	}
1531 
1532 	i = bv->bv_volid;
1533 	strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1534 
1535 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1536 	case MFI_LD_OFFLINE:
1537 		bv->bv_status = BIOC_SVOFFLINE;
1538 		break;
1539 
1540 	case MFI_LD_PART_DEGRADED:
1541 	case MFI_LD_DEGRADED:
1542 		bv->bv_status = BIOC_SVDEGRADED;
1543 		break;
1544 
1545 	case MFI_LD_ONLINE:
1546 		bv->bv_status = BIOC_SVONLINE;
1547 		break;
1548 
1549 	default:
1550 		bv->bv_status = BIOC_SVINVALID;
1551 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1552 		    DEVNAME(sc),
1553 		    sc->sc_ld_list.mll_list[i].mll_state);
1554 	}
1555 
1556 	/* additional status can modify MFI status */
1557 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1558 	case MFI_LD_PROG_CC:
1559 	case MFI_LD_PROG_BGI:
1560 		bv->bv_status = BIOC_SVSCRUB;
1561 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1562 		bv->bv_percent = (per * 100) / 0xffff;
1563 		bv->bv_seconds =
1564 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1565 		break;
1566 
1567 	case MFI_LD_PROG_FGI:
1568 	case MFI_LD_PROG_RECONSTRUCT:
1569 		/* nothing yet */
1570 		break;
1571 	}
1572 
1573 	/*
1574 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1575 	 * a subset that is valid for the MFI controller.
1576 	 */
1577 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1578 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1579 	    MFI_DDF_SRL_SPANNED)
1580 		bv->bv_level *= 10;
1581 
1582 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1583 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1584 
1585 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1586 
1587 	rv = 0;
1588 done:
1589 	return (rv);
1590 }
1591 
1592 int
1593 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1594 {
1595 	struct mfi_conf		*cfg;
1596 	struct mfi_array	*ar;
1597 	struct mfi_ld_cfg	*ld;
1598 	struct mfi_pd_details	*pd;
1599 	struct scsi_inquiry_data *inqbuf;
1600 	char			vend[8+16+4+1], *vendp;
1601 	int			rv = EINVAL;
1602 	int			arr, vol, disk, span;
1603 	uint8_t			mbox[MFI_MBOX_SIZE];
1604 
1605 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1606 	    DEVNAME(sc), bd->bd_diskid);
1607 
1608 	/* we really could skip and expect that inq took care of it */
1609 	if (mfi_bio_getitall(sc)) {
1610 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1611 		    DEVNAME(sc));
1612 		return (rv);
1613 	}
1614 	cfg = sc->sc_cfg;
1615 
1616 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1617 
1618 	ar = cfg->mfc_array;
1619 	vol = bd->bd_volid;
1620 	if (vol >= cfg->mfc_no_ld) {
1621 		/* do hotspares */
1622 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1623 		goto freeme;
1624 	}
1625 
1626 	/* calculate offset to ld structure */
1627 	ld = (struct mfi_ld_cfg *)(
1628 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1629 	    cfg->mfc_array_size * cfg->mfc_no_array);
1630 
1631 	/* use span 0 only when raid group is not spanned */
1632 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1633 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1634 	else
1635 		span = 0;
1636 	arr = ld[vol].mlc_span[span].mls_index;
1637 
1638 	/* offset disk into pd list */
1639 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1640 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1641 
1642 	/* get status */
1643 	switch (ar[arr].pd[disk].mar_pd_state){
1644 	case MFI_PD_UNCONFIG_GOOD:
1645 	case MFI_PD_FAILED:
1646 		bd->bd_status = BIOC_SDFAILED;
1647 		break;
1648 
1649 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1650 		bd->bd_status = BIOC_SDHOTSPARE;
1651 		break;
1652 
1653 	case MFI_PD_OFFLINE:
1654 		bd->bd_status = BIOC_SDOFFLINE;
1655 		break;
1656 
1657 	case MFI_PD_REBUILD:
1658 		bd->bd_status = BIOC_SDREBUILD;
1659 		break;
1660 
1661 	case MFI_PD_ONLINE:
1662 		bd->bd_status = BIOC_SDONLINE;
1663 		break;
1664 
1665 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1666 	default:
1667 		bd->bd_status = BIOC_SDINVALID;
1668 		break;
1669 	}
1670 
1671 	/* get the remaining fields */
1672 	*((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1673 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1674 	    sizeof *pd, pd, mbox)) {
1675 		/* disk is missing but succeed command */
1676 		rv = 0;
1677 		goto freeme;
1678 	}
1679 
1680 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1681 
1682 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1683 	bd->bd_channel = pd->mpd_enc_idx;
1684 
1685 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1686 	vendp = inqbuf->vendor;
1687 	memcpy(vend, vendp, sizeof vend - 1);
1688 	vend[sizeof vend - 1] = '\0';
1689 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1690 
1691 	/* XXX find a way to retrieve serial nr from drive */
1692 	/* XXX find a way to get bd_procdev */
1693 
1694 	rv = 0;
1695 freeme:
1696 	free(pd, M_DEVBUF);
1697 
1698 	return (rv);
1699 }
1700 
1701 int
1702 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1703 {
1704 	uint32_t		opc, dir = MFI_DATA_NONE;
1705 	int			rv = 0;
1706 	int8_t			ret;
1707 
1708 	switch(ba->ba_opcode) {
1709 	case BIOC_SADISABLE:
1710 		opc = MR_DCMD_SPEAKER_DISABLE;
1711 		break;
1712 
1713 	case BIOC_SAENABLE:
1714 		opc = MR_DCMD_SPEAKER_ENABLE;
1715 		break;
1716 
1717 	case BIOC_SASILENCE:
1718 		opc = MR_DCMD_SPEAKER_SILENCE;
1719 		break;
1720 
1721 	case BIOC_GASTATUS:
1722 		opc = MR_DCMD_SPEAKER_GET;
1723 		dir = MFI_DATA_IN;
1724 		break;
1725 
1726 	case BIOC_SATEST:
1727 		opc = MR_DCMD_SPEAKER_TEST;
1728 		break;
1729 
1730 	default:
1731 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1732 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1733 		return (EINVAL);
1734 	}
1735 
1736 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1737 		rv = EINVAL;
1738 	else
1739 		if (ba->ba_opcode == BIOC_GASTATUS)
1740 			ba->ba_status = ret;
1741 		else
1742 			ba->ba_status = 0;
1743 
1744 	return (rv);
1745 }
1746 
1747 int
1748 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1749 {
1750 	int			i, found, rv = EINVAL;
1751 	uint8_t			mbox[MFI_MBOX_SIZE];
1752 	uint32_t		cmd;
1753 	struct mfi_pd_list	*pd;
1754 
1755 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1756 	    bb->bb_status);
1757 
1758 	/* channel 0 means not in an enclosure so can't be blinked */
1759 	if (bb->bb_channel == 0)
1760 		return (EINVAL);
1761 
1762 	pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1763 
1764 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1765 	    MFI_PD_LIST_SIZE, pd, NULL))
1766 		goto done;
1767 
1768 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1769 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1770 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1771 			found = 1;
1772 			break;
1773 		}
1774 
1775 	if (!found)
1776 		goto done;
1777 
1778 	memset(mbox, 0, sizeof mbox);
1779 
1780 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1781 
1782 	switch (bb->bb_status) {
1783 	case BIOC_SBUNBLINK:
1784 		cmd = MR_DCMD_PD_UNBLINK;
1785 		break;
1786 
1787 	case BIOC_SBBLINK:
1788 		cmd = MR_DCMD_PD_BLINK;
1789 		break;
1790 
1791 	case BIOC_SBALARM:
1792 	default:
1793 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1794 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1795 		goto done;
1796 	}
1797 
1798 
1799 	if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1800 		goto done;
1801 
1802 	rv = 0;
1803 done:
1804 	free(pd, M_DEVBUF);
1805 	return (rv);
1806 }
1807 
1808 int
1809 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1810 {
1811 	struct mfi_pd_list	*pd;
1812 	int			i, found, rv = EINVAL;
1813 	uint8_t			mbox[MFI_MBOX_SIZE];
1814 
1815 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1816 	    bs->bs_status);
1817 
1818 	pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1819 
1820 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1821 	    MFI_PD_LIST_SIZE, pd, NULL))
1822 		goto done;
1823 
1824 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1825 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1826 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1827 			found = 1;
1828 			break;
1829 		}
1830 
1831 	if (!found)
1832 		goto done;
1833 
1834 	memset(mbox, 0, sizeof mbox);
1835 
1836 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1837 
1838 	switch (bs->bs_status) {
1839 	case BIOC_SSONLINE:
1840 		mbox[2] = MFI_PD_ONLINE;
1841 		break;
1842 
1843 	case BIOC_SSOFFLINE:
1844 		mbox[2] = MFI_PD_OFFLINE;
1845 		break;
1846 
1847 	case BIOC_SSHOTSPARE:
1848 		mbox[2] = MFI_PD_HOTSPARE;
1849 		break;
1850 /*
1851 	case BIOC_SSREBUILD:
1852 		break;
1853 */
1854 	default:
1855 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1856 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
1857 		goto done;
1858 	}
1859 
1860 
1861 	if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1862 		goto done;
1863 
1864 	rv = 0;
1865 done:
1866 	free(pd, M_DEVBUF);
1867 	return (rv);
1868 }
1869 
1870 int
1871 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1872 {
1873 	struct mfi_conf		*cfg;
1874 	struct mfi_hotspare	*hs;
1875 	struct mfi_pd_details	*pd;
1876 	struct bioc_disk	*sdhs;
1877 	struct bioc_vol		*vdhs;
1878 	struct scsi_inquiry_data *inqbuf;
1879 	char			vend[8+16+4+1], *vendp;
1880 	int			i, rv = EINVAL;
1881 	uint32_t		size;
1882 	uint8_t			mbox[MFI_MBOX_SIZE];
1883 
1884 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1885 
1886 	if (!bio_hs)
1887 		return (EINVAL);
1888 
1889 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1890 
1891 	/* send single element command to retrieve size for full structure */
1892 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1893 	if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1894 		goto freeme;
1895 
1896 	size = cfg->mfc_size;
1897 	free(cfg, M_DEVBUF);
1898 
1899 	/* memory for read config */
1900 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1901 	if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1902 		goto freeme;
1903 
1904 	/* calculate offset to hs structure */
1905 	hs = (struct mfi_hotspare *)(
1906 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1907 	    cfg->mfc_array_size * cfg->mfc_no_array +
1908 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
1909 
1910 	if (volid < cfg->mfc_no_ld)
1911 		goto freeme; /* not a hotspare */
1912 
1913 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1914 		goto freeme; /* not a hotspare */
1915 
1916 	/* offset into hotspare structure */
1917 	i = volid - cfg->mfc_no_ld;
1918 
1919 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1920 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1921 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1922 
1923 	/* get pd fields */
1924 	memset(mbox, 0, sizeof mbox);
1925 	*((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1926 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1927 	    sizeof *pd, pd, mbox)) {
1928 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1929 		    DEVNAME(sc));
1930 		goto freeme;
1931 	}
1932 
1933 	switch (type) {
1934 	case MFI_MGMT_VD:
1935 		vdhs = bio_hs;
1936 		vdhs->bv_status = BIOC_SVONLINE;
1937 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
1938 		vdhs->bv_level = -1; /* hotspare */
1939 		vdhs->bv_nodisk = 1;
1940 		break;
1941 
1942 	case MFI_MGMT_SD:
1943 		sdhs = bio_hs;
1944 		sdhs->bd_status = BIOC_SDHOTSPARE;
1945 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
1946 		sdhs->bd_channel = pd->mpd_enc_idx;
1947 		sdhs->bd_target = pd->mpd_enc_slot;
1948 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1949 		vendp = inqbuf->vendor;
1950 		memcpy(vend, vendp, sizeof vend - 1);
1951 		vend[sizeof vend - 1] = '\0';
1952 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1953 		break;
1954 
1955 	default:
1956 		goto freeme;
1957 	}
1958 
1959 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1960 	rv = 0;
1961 freeme:
1962 	free(pd, M_DEVBUF);
1963 	free(cfg, M_DEVBUF);
1964 
1965 	return (rv);
1966 }
1967 
1968 #ifndef SMALL_KERNEL
1969 int
1970 mfi_create_sensors(struct mfi_softc *sc)
1971 {
1972 	struct device		*dev;
1973 	struct scsibus_softc	*ssc = NULL;
1974 	struct scsi_link	*link;
1975 	int			i;
1976 
1977 	TAILQ_FOREACH(dev, &alldevs, dv_list) {
1978 		if (dev->dv_parent != &sc->sc_dev)
1979 			continue;
1980 
1981 		/* check if this is the scsibus for the logical disks */
1982 		ssc = (struct scsibus_softc *)dev;
1983 		if (ssc->adapter_link == &sc->sc_link)
1984 			break;
1985 	}
1986 
1987 	if (ssc == NULL)
1988 		return (1);
1989 
1990 	sc->sc_sensors = malloc(sizeof(struct ksensor) * sc->sc_ld_cnt,
1991 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1992 	if (sc->sc_sensors == NULL)
1993 		return (1);
1994 
1995 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
1996 	    sizeof(sc->sc_sensordev.xname));
1997 
1998 	for (i = 0; i < sc->sc_ld_cnt; i++) {
1999 		link = scsi_get_link(ssc, i, 0);
2000 		if (link == NULL)
2001 			goto bad;
2002 
2003 		dev = link->device_softc;
2004 
2005 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2006 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2007 
2008 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2009 		    sizeof(sc->sc_sensors[i].desc));
2010 
2011 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2012 	}
2013 
2014 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2015 		goto bad;
2016 
2017 	sensordev_install(&sc->sc_sensordev);
2018 
2019 	return (0);
2020 
2021 bad:
2022 	free(sc->sc_sensors, M_DEVBUF);
2023 
2024 	return (1);
2025 }
2026 
2027 void
2028 mfi_refresh_sensors(void *arg)
2029 {
2030 	struct mfi_softc	*sc = arg;
2031 	int			i;
2032 	struct bioc_vol		bv;
2033 
2034 
2035 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2036 		bzero(&bv, sizeof(bv));
2037 		bv.bv_volid = i;
2038 		if (mfi_ioctl_vol(sc, &bv))
2039 			return;
2040 
2041 		switch(bv.bv_status) {
2042 		case BIOC_SVOFFLINE:
2043 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2044 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2045 			break;
2046 
2047 		case BIOC_SVDEGRADED:
2048 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2049 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2050 			break;
2051 
2052 		case BIOC_SVSCRUB:
2053 		case BIOC_SVONLINE:
2054 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2055 			sc->sc_sensors[i].status = SENSOR_S_OK;
2056 			break;
2057 
2058 		case BIOC_SVINVALID:
2059 			/* FALLTRHOUGH */
2060 		default:
2061 			sc->sc_sensors[i].value = 0; /* unknown */
2062 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2063 		}
2064 
2065 	}
2066 }
2067 #endif /* SMALL_KERNEL */
2068 #endif /* NBIO > 0 */
2069 
2070 void
2071 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2072 {
2073 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2074 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2075 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2076 
2077 	mfi_post(sc, ccb);
2078 }
2079 
2080 void
2081 mfi_done(struct mfi_ccb *ccb)
2082 {
2083 	struct mfi_softc	*sc = ccb->ccb_sc;
2084 
2085 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2086 	    ccb->ccb_pframe_offset, sc->sc_frames_size, BUS_DMASYNC_PREREAD);
2087 
2088 	ccb->ccb_done(ccb);
2089 }
2090 
2091 u_int32_t
2092 mfi_xscale_fw_state(struct mfi_softc *sc)
2093 {
2094 	return (mfi_read(sc, MFI_OMSG0));
2095 }
2096 
2097 void
2098 mfi_xscale_intr_ena(struct mfi_softc *sc)
2099 {
2100 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2101 }
2102 
2103 int
2104 mfi_xscale_intr(struct mfi_softc *sc)
2105 {
2106 	u_int32_t status;
2107 
2108 	status = mfi_read(sc, MFI_OSTS);
2109 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2110 		return (0);
2111 
2112 	/* write status back to acknowledge interrupt */
2113 	mfi_write(sc, MFI_OSTS, status);
2114 
2115 	return (1);
2116 }
2117 
2118 void
2119 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2120 {
2121 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2122 	    ccb->ccb_extra_frames);
2123 }
2124 
2125 u_int32_t
2126 mfi_ppc_fw_state(struct mfi_softc *sc)
2127 {
2128 	return (mfi_read(sc, MFI_OSP));
2129 }
2130 
2131 void
2132 mfi_ppc_intr_ena(struct mfi_softc *sc)
2133 {
2134 	mfi_write(sc, MFI_ODC, 0xffffffff);
2135 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2136 }
2137 
2138 int
2139 mfi_ppc_intr(struct mfi_softc *sc)
2140 {
2141 	u_int32_t status;
2142 
2143 	status = mfi_read(sc, MFI_OSTS);
2144 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2145 		return (0);
2146 
2147 	/* write status back to acknowledge interrupt */
2148 	mfi_write(sc, MFI_ODC, status);
2149 
2150 	return (1);
2151 }
2152 
2153 void
2154 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2155 {
2156 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2157 	    (ccb->ccb_extra_frames << 1));
2158 }
2159 
2160 u_int32_t
2161 mfi_gen2_fw_state(struct mfi_softc *sc)
2162 {
2163 	return (mfi_read(sc, MFI_OSP));
2164 }
2165 
2166 void
2167 mfi_gen2_intr_ena(struct mfi_softc *sc)
2168 {
2169 	mfi_write(sc, MFI_ODC, 0xffffffff);
2170 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2171 }
2172 
2173 int
2174 mfi_gen2_intr(struct mfi_softc *sc)
2175 {
2176 	u_int32_t status;
2177 
2178 	status = mfi_read(sc, MFI_OSTS);
2179 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2180 		return (0);
2181 
2182 	/* write status back to acknowledge interrupt */
2183 	mfi_write(sc, MFI_ODC, status);
2184 
2185 	return (1);
2186 }
2187 
2188 void
2189 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2190 {
2191 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2192 	    (ccb->ccb_extra_frames << 1));
2193 }
2194 
2195 u_int32_t
2196 mfi_skinny_fw_state(struct mfi_softc *sc)
2197 {
2198 	return (mfi_read(sc, MFI_OSP));
2199 }
2200 
2201 void
2202 mfi_skinny_intr_ena(struct mfi_softc *sc)
2203 {
2204 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2205 }
2206 
2207 int
2208 mfi_skinny_intr(struct mfi_softc *sc)
2209 {
2210 	u_int32_t status;
2211 
2212 	status = mfi_read(sc, MFI_OSTS);
2213 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2214 		return (0);
2215 
2216 	/* write status back to acknowledge interrupt */
2217 	mfi_write(sc, MFI_OSTS, status);
2218 
2219 	return (1);
2220 }
2221 
2222 void
2223 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2224 {
2225 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2226 	    (ccb->ccb_extra_frames << 1));
2227 	mfi_write(sc, MFI_IQPH, 0x00000000);
2228 }
2229