xref: /netbsd-src/sys/dev/ic/mfi.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /* $NetBSD: mfi.c,v 1.38 2012/03/21 14:22:36 sborrill Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.38 2012/03/21 14:22:36 sborrill Exp $");
21 
22 #include "bio.h"
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32 
33 #include <uvm/uvm_param.h>
34 
35 #include <sys/bus.h>
36 
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46 
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50 
51 #ifdef MFI_DEBUG
52 uint32_t	mfi_debug = 0
53 /*		    | MFI_D_CMD */
54 /*		    | MFI_D_INTR */
55 /*		    | MFI_D_MISC */
56 /*		    | MFI_D_DMA */
57 		    | MFI_D_IOCTL
58 /*		    | MFI_D_RW */
59 /*		    | MFI_D_MEM */
60 /*		    | MFI_D_CCB */
61 		;
62 #endif
63 
64 static void		mfi_scsipi_request(struct scsipi_channel *,
65 				scsipi_adapter_req_t, void *);
66 static void		mfiminphys(struct buf *bp);
67 
68 static struct mfi_ccb	*mfi_get_ccb(struct mfi_softc *);
69 static void		mfi_put_ccb(struct mfi_ccb *);
70 static int		mfi_init_ccb(struct mfi_softc *);
71 
72 static struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
73 static void		mfi_freemem(struct mfi_softc *, struct mfi_mem **);
74 
75 static int		mfi_transition_firmware(struct mfi_softc *);
76 static int		mfi_initialize_firmware(struct mfi_softc *);
77 static int		mfi_get_info(struct mfi_softc *);
78 static uint32_t		mfi_read(struct mfi_softc *, bus_size_t);
79 static void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int		mfi_poll(struct mfi_ccb *);
81 static int		mfi_create_sgl(struct mfi_ccb *, int);
82 
83 /* commands */
84 static int		mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int		mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 				uint32_t, uint32_t);
87 static void		mfi_scsi_xs_done(struct mfi_ccb *);
88 static int		mfi_mgmt_internal(struct mfi_softc *,
89 			    uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int		mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 			    uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void		mfi_mgmt_done(struct mfi_ccb *);
93 
94 #if NBIO > 0
95 static int		mfi_ioctl(device_t, u_long, void *);
96 static int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int		mfi_ioctl_alarm(struct mfi_softc *,
100 				struct bioc_alarm *);
101 static int		mfi_ioctl_blink(struct mfi_softc *sc,
102 				struct bioc_blink *);
103 static int		mfi_ioctl_setstate(struct mfi_softc *,
104 				struct bioc_setstate *);
105 static int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int		mfi_create_sensors(struct mfi_softc *);
107 static int		mfi_destroy_sensors(struct mfi_softc *);
108 static void		mfi_sensor_refresh(struct sysmon_envsys *,
109 				envsys_data_t *);
110 #endif /* NBIO > 0 */
111 
112 static uint32_t 	mfi_xscale_fw_state(struct mfi_softc *sc);
113 static void 		mfi_xscale_intr_ena(struct mfi_softc *sc);
114 static void 		mfi_xscale_intr_dis(struct mfi_softc *sc);
115 static int 		mfi_xscale_intr(struct mfi_softc *sc);
116 static void 		mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
117 
118 static const struct mfi_iop_ops mfi_iop_xscale = {
119 	mfi_xscale_fw_state,
120 	mfi_xscale_intr_dis,
121 	mfi_xscale_intr_ena,
122 	mfi_xscale_intr,
123 	mfi_xscale_post
124 };
125 
126 static uint32_t 	mfi_ppc_fw_state(struct mfi_softc *sc);
127 static void 		mfi_ppc_intr_ena(struct mfi_softc *sc);
128 static void 		mfi_ppc_intr_dis(struct mfi_softc *sc);
129 static int 		mfi_ppc_intr(struct mfi_softc *sc);
130 static void 		mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
131 
132 static const struct mfi_iop_ops mfi_iop_ppc = {
133 	mfi_ppc_fw_state,
134 	mfi_ppc_intr_dis,
135 	mfi_ppc_intr_ena,
136 	mfi_ppc_intr,
137 	mfi_ppc_post
138 };
139 
140 uint32_t	mfi_gen2_fw_state(struct mfi_softc *sc);
141 void		mfi_gen2_intr_ena(struct mfi_softc *sc);
142 void		mfi_gen2_intr_dis(struct mfi_softc *sc);
143 int		mfi_gen2_intr(struct mfi_softc *sc);
144 void		mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
145 
146 static const struct mfi_iop_ops mfi_iop_gen2 = {
147 	mfi_gen2_fw_state,
148 	mfi_gen2_intr_dis,
149 	mfi_gen2_intr_ena,
150 	mfi_gen2_intr,
151 	mfi_gen2_post
152 };
153 
154 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
155 void		mfi_skinny_intr_dis(struct mfi_softc *);
156 void		mfi_skinny_intr_ena(struct mfi_softc *);
157 int		mfi_skinny_intr(struct mfi_softc *);
158 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
159 
160 static const struct mfi_iop_ops mfi_iop_skinny = {
161 	mfi_skinny_fw_state,
162 	mfi_skinny_intr_dis,
163 	mfi_skinny_intr_ena,
164 	mfi_skinny_intr,
165 	mfi_skinny_post
166 };
167 
168 #define mfi_fw_state(_s) 	((_s)->sc_iop->mio_fw_state(_s))
169 #define mfi_intr_enable(_s) 	((_s)->sc_iop->mio_intr_ena(_s))
170 #define mfi_intr_disable(_s) 	((_s)->sc_iop->mio_intr_dis(_s))
171 #define mfi_my_intr(_s) 	((_s)->sc_iop->mio_intr(_s))
172 #define mfi_post(_s, _c) 	((_s)->sc_iop->mio_post((_s), (_c)))
173 
174 static struct mfi_ccb *
175 mfi_get_ccb(struct mfi_softc *sc)
176 {
177 	struct mfi_ccb		*ccb;
178 	int			s;
179 
180 	s = splbio();
181 	ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
182 	if (ccb) {
183 		TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
184 		ccb->ccb_state = MFI_CCB_READY;
185 	}
186 	splx(s);
187 
188 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
189 
190 	return ccb;
191 }
192 
193 static void
194 mfi_put_ccb(struct mfi_ccb *ccb)
195 {
196 	struct mfi_softc	*sc = ccb->ccb_sc;
197 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
198 	int			s;
199 
200 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
201 
202 	hdr->mfh_cmd_status = 0x0;
203 	hdr->mfh_flags = 0x0;
204 	ccb->ccb_state = MFI_CCB_FREE;
205 	ccb->ccb_xs = NULL;
206 	ccb->ccb_flags = 0;
207 	ccb->ccb_done = NULL;
208 	ccb->ccb_direction = 0;
209 	ccb->ccb_frame_size = 0;
210 	ccb->ccb_extra_frames = 0;
211 	ccb->ccb_sgl = NULL;
212 	ccb->ccb_data = NULL;
213 	ccb->ccb_len = 0;
214 
215 	s = splbio();
216 	TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
217 	splx(s);
218 }
219 
220 static int
221 mfi_destroy_ccb(struct mfi_softc *sc)
222 {
223 	struct mfi_ccb		*ccb;
224 	uint32_t		i;
225 
226 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
227 
228 
229 	for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
230 		/* create a dma map for transfer */
231 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
232 	}
233 
234 	if (i < sc->sc_max_cmds)
235 		return EBUSY;
236 
237 	free(sc->sc_ccb, M_DEVBUF);
238 
239 	return 0;
240 }
241 
242 static int
243 mfi_init_ccb(struct mfi_softc *sc)
244 {
245 	struct mfi_ccb		*ccb;
246 	uint32_t		i;
247 	int			error;
248 
249 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
250 
251 	sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
252 	    M_DEVBUF, M_WAITOK|M_ZERO);
253 
254 	for (i = 0; i < sc->sc_max_cmds; i++) {
255 		ccb = &sc->sc_ccb[i];
256 
257 		ccb->ccb_sc = sc;
258 
259 		/* select i'th frame */
260 		ccb->ccb_frame = (union mfi_frame *)
261 		    ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
262 		ccb->ccb_pframe =
263 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
264 		ccb->ccb_frame->mfr_header.mfh_context = i;
265 
266 		/* select i'th sense */
267 		ccb->ccb_sense = (struct mfi_sense *)
268 		    ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
269 		ccb->ccb_psense =
270 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
271 
272 		/* create a dma map for transfer */
273 		error = bus_dmamap_create(sc->sc_dmat,
274 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
275 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
276 		if (error) {
277 			printf("%s: cannot create ccb dmamap (%d)\n",
278 			    DEVNAME(sc), error);
279 			goto destroy;
280 		}
281 
282 		DNPRINTF(MFI_D_CCB,
283 		    "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
284 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
285 		    (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
286 		    (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
287 		    (u_long)ccb->ccb_dmamap);
288 
289 		/* add ccb to queue */
290 		mfi_put_ccb(ccb);
291 	}
292 
293 	return 0;
294 destroy:
295 	/* free dma maps and ccb memory */
296 	while (i) {
297 		i--;
298 		ccb = &sc->sc_ccb[i];
299 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
300 	}
301 
302 	free(sc->sc_ccb, M_DEVBUF);
303 
304 	return 1;
305 }
306 
307 static uint32_t
308 mfi_read(struct mfi_softc *sc, bus_size_t r)
309 {
310 	uint32_t rv;
311 
312 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
313 	    BUS_SPACE_BARRIER_READ);
314 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
315 
316 	DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
317 	return rv;
318 }
319 
320 static void
321 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
322 {
323 	DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
324 
325 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
326 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
327 	    BUS_SPACE_BARRIER_WRITE);
328 }
329 
330 static struct mfi_mem *
331 mfi_allocmem(struct mfi_softc *sc, size_t size)
332 {
333 	struct mfi_mem		*mm;
334 	int			nsegs;
335 
336 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
337 	    (long)size);
338 
339 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
340 	if (mm == NULL)
341 		return NULL;
342 
343 	mm->am_size = size;
344 
345 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
346 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
347 		goto amfree;
348 
349 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
350 	    &nsegs, BUS_DMA_NOWAIT) != 0)
351 		goto destroy;
352 
353 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
354 	    BUS_DMA_NOWAIT) != 0)
355 		goto free;
356 
357 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
358 	    BUS_DMA_NOWAIT) != 0)
359 		goto unmap;
360 
361 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
362 	    mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
363 
364 	memset(mm->am_kva, 0, size);
365 	return mm;
366 
367 unmap:
368 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 	free(mm, M_DEVBUF);
375 
376 	return NULL;
377 }
378 
379 static void
380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
381 {
382 	struct mfi_mem *mm = *mmp;
383 
384 	if (mm == NULL)
385 		return;
386 
387 	*mmp = NULL;
388 
389 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
390 
391 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
392 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
393 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
394 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
395 	free(mm, M_DEVBUF);
396 }
397 
398 static int
399 mfi_transition_firmware(struct mfi_softc *sc)
400 {
401 	uint32_t		fw_state, cur_state;
402 	int			max_wait, i;
403 
404 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
405 
406 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
407 	    fw_state);
408 
409 	while (fw_state != MFI_STATE_READY) {
410 		DNPRINTF(MFI_D_MISC,
411 		    "%s: waiting for firmware to become ready\n",
412 		    DEVNAME(sc));
413 		cur_state = fw_state;
414 		switch (fw_state) {
415 		case MFI_STATE_FAULT:
416 			printf("%s: firmware fault\n", DEVNAME(sc));
417 			return 1;
418 		case MFI_STATE_WAIT_HANDSHAKE:
419 			if (sc->sc_flags & MFI_IOP_SKINNY)
420 				mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
421 			else
422 				mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
423 			max_wait = 2;
424 			break;
425 		case MFI_STATE_OPERATIONAL:
426 			if (sc->sc_flags & MFI_IOP_SKINNY)
427 				mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
428 			else
429 				mfi_write(sc, MFI_IDB, MFI_INIT_READY);
430 			max_wait = 10;
431 			break;
432 		case MFI_STATE_UNDEFINED:
433 		case MFI_STATE_BB_INIT:
434 			max_wait = 2;
435 			break;
436 		case MFI_STATE_FW_INIT:
437 		case MFI_STATE_DEVICE_SCAN:
438 		case MFI_STATE_FLUSH_CACHE:
439 			max_wait = 20;
440 			break;
441 		default:
442 			printf("%s: unknown firmware state %d\n",
443 			    DEVNAME(sc), fw_state);
444 			return 1;
445 		}
446 		for (i = 0; i < (max_wait * 10); i++) {
447 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
448 			if (fw_state == cur_state)
449 				DELAY(100000);
450 			else
451 				break;
452 		}
453 		if (fw_state == cur_state) {
454 			printf("%s: firmware stuck in state %#x\n",
455 			    DEVNAME(sc), fw_state);
456 			return 1;
457 		}
458 	}
459 
460 	return 0;
461 }
462 
463 static int
464 mfi_initialize_firmware(struct mfi_softc *sc)
465 {
466 	struct mfi_ccb		*ccb;
467 	struct mfi_init_frame	*init;
468 	struct mfi_init_qinfo	*qinfo;
469 
470 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
471 
472 	if ((ccb = mfi_get_ccb(sc)) == NULL)
473 		return 1;
474 
475 	init = &ccb->ccb_frame->mfr_init;
476 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
477 
478 	memset(qinfo, 0, sizeof *qinfo);
479 	qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
480 	qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
481 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
482 	qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
483 	    offsetof(struct mfi_prod_cons, mpc_producer));
484 	qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
485 	    offsetof(struct mfi_prod_cons, mpc_consumer));
486 
487 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
488 	init->mif_header.mfh_data_len = sizeof *qinfo;
489 	init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
490 
491 	DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
492 	    DEVNAME(sc),
493 	    qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
494 	    qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
495 
496 	if (mfi_poll(ccb)) {
497 		printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
498 		return 1;
499 	}
500 
501 	mfi_put_ccb(ccb);
502 
503 	return 0;
504 }
505 
506 static int
507 mfi_get_info(struct mfi_softc *sc)
508 {
509 #ifdef MFI_DEBUG
510 	int i;
511 #endif
512 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
513 
514 	if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
515 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
516 		return 1;
517 
518 #ifdef MFI_DEBUG
519 
520 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
521 		printf("%s: active FW %s Version %s date %s time %s\n",
522 		    DEVNAME(sc),
523 		    sc->sc_info.mci_image_component[i].mic_name,
524 		    sc->sc_info.mci_image_component[i].mic_version,
525 		    sc->sc_info.mci_image_component[i].mic_build_date,
526 		    sc->sc_info.mci_image_component[i].mic_build_time);
527 	}
528 
529 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
530 		printf("%s: pending FW %s Version %s date %s time %s\n",
531 		    DEVNAME(sc),
532 		    sc->sc_info.mci_pending_image_component[i].mic_name,
533 		    sc->sc_info.mci_pending_image_component[i].mic_version,
534 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
535 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
536 	}
537 
538 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
539 	    DEVNAME(sc),
540 	    sc->sc_info.mci_max_arms,
541 	    sc->sc_info.mci_max_spans,
542 	    sc->sc_info.mci_max_arrays,
543 	    sc->sc_info.mci_max_lds,
544 	    sc->sc_info.mci_product_name);
545 
546 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
547 	    DEVNAME(sc),
548 	    sc->sc_info.mci_serial_number,
549 	    sc->sc_info.mci_hw_present,
550 	    sc->sc_info.mci_current_fw_time,
551 	    sc->sc_info.mci_max_cmds,
552 	    sc->sc_info.mci_max_sg_elements);
553 
554 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
555 	    DEVNAME(sc),
556 	    sc->sc_info.mci_max_request_size,
557 	    sc->sc_info.mci_lds_present,
558 	    sc->sc_info.mci_lds_degraded,
559 	    sc->sc_info.mci_lds_offline,
560 	    sc->sc_info.mci_pd_present);
561 
562 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
563 	    DEVNAME(sc),
564 	    sc->sc_info.mci_pd_disks_present,
565 	    sc->sc_info.mci_pd_disks_pred_failure,
566 	    sc->sc_info.mci_pd_disks_failed);
567 
568 	printf("%s: nvram %d mem %d flash %d\n",
569 	    DEVNAME(sc),
570 	    sc->sc_info.mci_nvram_size,
571 	    sc->sc_info.mci_memory_size,
572 	    sc->sc_info.mci_flash_size);
573 
574 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
575 	    DEVNAME(sc),
576 	    sc->sc_info.mci_ram_correctable_errors,
577 	    sc->sc_info.mci_ram_uncorrectable_errors,
578 	    sc->sc_info.mci_cluster_allowed,
579 	    sc->sc_info.mci_cluster_active);
580 
581 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
582 	    DEVNAME(sc),
583 	    sc->sc_info.mci_max_strips_per_io,
584 	    sc->sc_info.mci_raid_levels,
585 	    sc->sc_info.mci_adapter_ops,
586 	    sc->sc_info.mci_ld_ops);
587 
588 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
589 	    DEVNAME(sc),
590 	    sc->sc_info.mci_stripe_sz_ops.min,
591 	    sc->sc_info.mci_stripe_sz_ops.max,
592 	    sc->sc_info.mci_pd_ops,
593 	    sc->sc_info.mci_pd_mix_support);
594 
595 	printf("%s: ecc_bucket %d pckg_prop %s\n",
596 	    DEVNAME(sc),
597 	    sc->sc_info.mci_ecc_bucket_count,
598 	    sc->sc_info.mci_package_version);
599 
600 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
601 	    DEVNAME(sc),
602 	    sc->sc_info.mci_properties.mcp_seq_num,
603 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
604 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
605 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
606 
607 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
608 	    DEVNAME(sc),
609 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
610 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
611 	    sc->sc_info.mci_properties.mcp_bgi_rate,
612 	    sc->sc_info.mci_properties.mcp_cc_rate);
613 
614 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
615 	    DEVNAME(sc),
616 	    sc->sc_info.mci_properties.mcp_recon_rate,
617 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
618 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
619 	    sc->sc_info.mci_properties.mcp_spinup_delay,
620 	    sc->sc_info.mci_properties.mcp_cluster_enable);
621 
622 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
623 	    DEVNAME(sc),
624 	    sc->sc_info.mci_properties.mcp_coercion_mode,
625 	    sc->sc_info.mci_properties.mcp_alarm_enable,
626 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
627 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
628 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
629 
630 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
631 	    DEVNAME(sc),
632 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
633 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
634 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
635 
636 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
637 	    DEVNAME(sc),
638 	    sc->sc_info.mci_pci.mip_vendor,
639 	    sc->sc_info.mci_pci.mip_device,
640 	    sc->sc_info.mci_pci.mip_subvendor,
641 	    sc->sc_info.mci_pci.mip_subdevice);
642 
643 	printf("%s: type %#x port_count %d port_addr ",
644 	    DEVNAME(sc),
645 	    sc->sc_info.mci_host.mih_type,
646 	    sc->sc_info.mci_host.mih_port_count);
647 
648 	for (i = 0; i < 8; i++)
649 		printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
650 	printf("\n");
651 
652 	printf("%s: type %.x port_count %d port_addr ",
653 	    DEVNAME(sc),
654 	    sc->sc_info.mci_device.mid_type,
655 	    sc->sc_info.mci_device.mid_port_count);
656 
657 	for (i = 0; i < 8; i++)
658 		printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
659 	printf("\n");
660 #endif /* MFI_DEBUG */
661 
662 	return 0;
663 }
664 
665 static void
666 mfiminphys(struct buf *bp)
667 {
668 	DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
669 
670 	/* XXX currently using MFI_MAXFER = MAXPHYS */
671 	if (bp->b_bcount > MFI_MAXFER)
672 		bp->b_bcount = MFI_MAXFER;
673 	minphys(bp);
674 }
675 
676 int
677 mfi_rescan(device_t self, const char *ifattr, const int *locators)
678 {
679 	struct mfi_softc *sc = device_private(self);
680 
681 	if (sc->sc_child != NULL)
682 		return 0;
683 
684 	sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
685 	    scsiprint, NULL);
686 
687 	return 0;
688 }
689 
690 void
691 mfi_childdetached(device_t self, device_t child)
692 {
693 	struct mfi_softc *sc = device_private(self);
694 
695 	KASSERT(self == sc->sc_dev);
696 	KASSERT(child == sc->sc_child);
697 
698 	if (child == sc->sc_child)
699 		sc->sc_child = NULL;
700 }
701 
702 int
703 mfi_detach(struct mfi_softc *sc, int flags)
704 {
705 	int			error;
706 
707 	DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
708 
709 	if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
710 		return error;
711 
712 #if NBIO > 0
713 	mfi_destroy_sensors(sc);
714 	bio_unregister(sc->sc_dev);
715 #endif /* NBIO > 0 */
716 
717 	mfi_intr_disable(sc);
718 
719 	/* TBD: shutdown firmware */
720 
721 	if ((error = mfi_destroy_ccb(sc)) != 0)
722 		return error;
723 
724 	mfi_freemem(sc, &sc->sc_sense);
725 
726 	mfi_freemem(sc, &sc->sc_frames);
727 
728 	mfi_freemem(sc, &sc->sc_pcq);
729 
730 	return 0;
731 }
732 
733 int
734 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
735 {
736 	struct scsipi_adapter *adapt = &sc->sc_adapt;
737 	struct scsipi_channel *chan = &sc->sc_chan;
738 	uint32_t		status, frames;
739 	int			i;
740 
741 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
742 
743 	switch (iop) {
744 	case MFI_IOP_XSCALE:
745 		sc->sc_iop = &mfi_iop_xscale;
746 		break;
747 	case MFI_IOP_PPC:
748 		sc->sc_iop = &mfi_iop_ppc;
749 		break;
750 	case MFI_IOP_GEN2:
751 		sc->sc_iop = &mfi_iop_gen2;
752 		break;
753 	case MFI_IOP_SKINNY:
754 		sc->sc_iop = &mfi_iop_skinny;
755 		break;
756 	default:
757 		 panic("%s: unknown iop %d", DEVNAME(sc), iop);
758 	}
759 
760 	if (mfi_transition_firmware(sc))
761 		return 1;
762 
763 	TAILQ_INIT(&sc->sc_ccb_freeq);
764 
765 	status = mfi_fw_state(sc);
766 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
767 	sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
768 	DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
769 	    DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
770 
771 	/* consumer/producer and reply queue memory */
772 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
773 	    sizeof(struct mfi_prod_cons));
774 	if (sc->sc_pcq == NULL) {
775 		aprint_error("%s: unable to allocate reply queue memory\n",
776 		    DEVNAME(sc));
777 		goto nopcq;
778 	}
779 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
780 	    sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
781 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
782 
783 	/* frame memory */
784 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
785 	frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
786 	    MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
787 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
788 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
789 	if (sc->sc_frames == NULL) {
790 		aprint_error("%s: unable to allocate frame memory\n",
791 		    DEVNAME(sc));
792 		goto noframe;
793 	}
794 	/* XXX hack, fix this */
795 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
796 		aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
797 		    DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
798 		goto noframe;
799 	}
800 
801 	/* sense memory */
802 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
803 	if (sc->sc_sense == NULL) {
804 		aprint_error("%s: unable to allocate sense memory\n",
805 		    DEVNAME(sc));
806 		goto nosense;
807 	}
808 
809 	/* now that we have all memory bits go initialize ccbs */
810 	if (mfi_init_ccb(sc)) {
811 		aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
812 		goto noinit;
813 	}
814 
815 	/* kickstart firmware with all addresses and pointers */
816 	if (mfi_initialize_firmware(sc)) {
817 		aprint_error("%s: could not initialize firmware\n",
818 		    DEVNAME(sc));
819 		goto noinit;
820 	}
821 
822 	if (mfi_get_info(sc)) {
823 		aprint_error("%s: could not retrieve controller information\n",
824 		    DEVNAME(sc));
825 		goto noinit;
826 	}
827 
828 	aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
829 	    DEVNAME(sc),
830 	    sc->sc_info.mci_lds_present,
831 	    sc->sc_info.mci_package_version,
832 	    sc->sc_info.mci_memory_size);
833 
834 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
835 	sc->sc_max_ld = sc->sc_ld_cnt;
836 	for (i = 0; i < sc->sc_ld_cnt; i++)
837 		sc->sc_ld[i].ld_present = 1;
838 
839 	memset(adapt, 0, sizeof(*adapt));
840 	adapt->adapt_dev = sc->sc_dev;
841 	adapt->adapt_nchannels = 1;
842 	if (sc->sc_ld_cnt)
843 		adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
844 	else
845 		adapt->adapt_openings = sc->sc_max_cmds;
846 	adapt->adapt_max_periph = adapt->adapt_openings;
847 	adapt->adapt_request = mfi_scsipi_request;
848 	adapt->adapt_minphys = mfiminphys;
849 
850 	memset(chan, 0, sizeof(*chan));
851 	chan->chan_adapter = adapt;
852 	chan->chan_bustype = &scsi_bustype;
853 	chan->chan_channel = 0;
854 	chan->chan_flags = 0;
855 	chan->chan_nluns = 8;
856 	chan->chan_ntargets = MFI_MAX_LD;
857 	chan->chan_id = MFI_MAX_LD;
858 
859 	mfi_rescan(sc->sc_dev, "scsi", NULL);
860 
861 	/* enable interrupts */
862 	mfi_intr_enable(sc);
863 
864 #if NBIO > 0
865 	if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
866 		panic("%s: controller registration failed", DEVNAME(sc));
867 	if (mfi_create_sensors(sc) != 0)
868 		aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
869 #endif /* NBIO > 0 */
870 
871 	return 0;
872 noinit:
873 	mfi_freemem(sc, &sc->sc_sense);
874 nosense:
875 	mfi_freemem(sc, &sc->sc_frames);
876 noframe:
877 	mfi_freemem(sc, &sc->sc_pcq);
878 nopcq:
879 	return 1;
880 }
881 
882 static int
883 mfi_poll(struct mfi_ccb *ccb)
884 {
885 	struct mfi_softc *sc = ccb->ccb_sc;
886 	struct mfi_frame_header	*hdr;
887 	int			to = 0;
888 
889 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
890 
891 	hdr = &ccb->ccb_frame->mfr_header;
892 	hdr->mfh_cmd_status = 0xff;
893 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
894 
895 	mfi_post(sc, ccb);
896 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
897 	    ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
898 	    sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
899 
900 	while (hdr->mfh_cmd_status == 0xff) {
901 		delay(1000);
902 		if (to++ > 5000) /* XXX 5 seconds busywait sucks */
903 			break;
904 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
905 		    ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
906 		    sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
907 	}
908 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
909 	    ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
910 	    sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
911 
912 	if (ccb->ccb_data != NULL) {
913 		DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
914 		    DEVNAME(sc));
915 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
916 		    ccb->ccb_dmamap->dm_mapsize,
917 		    (ccb->ccb_direction & MFI_DATA_IN) ?
918 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
919 
920 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
921 	}
922 
923 	if (hdr->mfh_cmd_status == 0xff) {
924 		printf("%s: timeout on ccb %d\n", DEVNAME(sc),
925 		    hdr->mfh_context);
926 		ccb->ccb_flags |= MFI_CCB_F_ERR;
927 		return 1;
928 	}
929 
930 	return 0;
931 }
932 
933 int
934 mfi_intr(void *arg)
935 {
936 	struct mfi_softc	*sc = arg;
937 	struct mfi_prod_cons	*pcq;
938 	struct mfi_ccb		*ccb;
939 	uint32_t		producer, consumer, ctx;
940 	int			claimed = 0;
941 
942 	if (!mfi_my_intr(sc))
943 		return 0;
944 
945 	pcq = MFIMEM_KVA(sc->sc_pcq);
946 
947 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
948 	    (u_long)sc, (u_long)pcq);
949 
950 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
951 	    sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
952 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
953 
954 	producer = pcq->mpc_producer;
955 	consumer = pcq->mpc_consumer;
956 
957 	while (consumer != producer) {
958 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
959 		    DEVNAME(sc), producer, consumer);
960 
961 		ctx = pcq->mpc_reply_q[consumer];
962 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
963 		if (ctx == MFI_INVALID_CTX)
964 			printf("%s: invalid context, p: %d c: %d\n",
965 			    DEVNAME(sc), producer, consumer);
966 		else {
967 			/* XXX remove from queue and call scsi_done */
968 			ccb = &sc->sc_ccb[ctx];
969 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
970 			    DEVNAME(sc), ctx);
971 			bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
972 			    ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
973 			    sc->sc_frames_size,
974 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
975 			ccb->ccb_done(ccb);
976 
977 			claimed = 1;
978 		}
979 		consumer++;
980 		if (consumer == (sc->sc_max_cmds + 1))
981 			consumer = 0;
982 	}
983 
984 	pcq->mpc_consumer = consumer;
985 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
986 	    sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
987 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
988 
989 	return claimed;
990 }
991 
992 static int
993 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
994     uint32_t blockcnt)
995 {
996 	struct scsipi_periph *periph = xs->xs_periph;
997 	struct mfi_io_frame   *io;
998 
999 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1000 	    device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1001 	    periph->periph_target);
1002 
1003 	if (!xs->data)
1004 		return 1;
1005 
1006 	io = &ccb->ccb_frame->mfr_io;
1007 	if (xs->xs_control & XS_CTL_DATA_IN) {
1008 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1009 		ccb->ccb_direction = MFI_DATA_IN;
1010 	} else {
1011 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1012 		ccb->ccb_direction = MFI_DATA_OUT;
1013 	}
1014 	io->mif_header.mfh_target_id = periph->periph_target;
1015 	io->mif_header.mfh_timeout = 0;
1016 	io->mif_header.mfh_flags = 0;
1017 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1018 	io->mif_header.mfh_data_len= blockcnt;
1019 	io->mif_lba_hi = 0;
1020 	io->mif_lba_lo = blockno;
1021 	io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1022 	io->mif_sense_addr_hi = 0;
1023 
1024 	ccb->ccb_done = mfi_scsi_xs_done;
1025 	ccb->ccb_xs = xs;
1026 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1027 	ccb->ccb_sgl = &io->mif_sgl;
1028 	ccb->ccb_data = xs->data;
1029 	ccb->ccb_len = xs->datalen;
1030 
1031 	if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1032 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1033 		return 1;
1034 
1035 	return 0;
1036 }
1037 
1038 static void
1039 mfi_scsi_xs_done(struct mfi_ccb *ccb)
1040 {
1041 	struct scsipi_xfer	*xs = ccb->ccb_xs;
1042 	struct mfi_softc	*sc = ccb->ccb_sc;
1043 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1044 
1045 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1046 	    DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1047 
1048 	if (xs->data != NULL) {
1049 		DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1050 		    DEVNAME(sc));
1051 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1052 		    ccb->ccb_dmamap->dm_mapsize,
1053 		    (xs->xs_control & XS_CTL_DATA_IN) ?
1054 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1055 
1056 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1057 	}
1058 
1059 	if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1060 		xs->error = XS_DRIVER_STUFFUP;
1061 		DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1062 		    DEVNAME(sc), hdr->mfh_cmd_status);
1063 
1064 		if (hdr->mfh_scsi_status != 0) {
1065 			bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1066 			    ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1067 			    MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1068 			DNPRINTF(MFI_D_INTR,
1069 			    "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1070 			    DEVNAME(sc), hdr->mfh_scsi_status,
1071 			    (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1072 			memset(&xs->sense, 0, sizeof(xs->sense));
1073 			memcpy(&xs->sense, ccb->ccb_sense,
1074 			    sizeof(struct scsi_sense_data));
1075 			xs->error = XS_SENSE;
1076 		}
1077 	} else {
1078 		xs->error = XS_NOERROR;
1079 		xs->status = SCSI_OK;
1080 		xs->resid = 0;
1081 	}
1082 
1083 	mfi_put_ccb(ccb);
1084 	scsipi_done(xs);
1085 }
1086 
1087 static int
1088 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1089 {
1090 	struct mfi_pass_frame	*pf;
1091 	struct scsipi_periph *periph = xs->xs_periph;
1092 
1093 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1094 	    device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1095 	    periph->periph_target);
1096 
1097 	pf = &ccb->ccb_frame->mfr_pass;
1098 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1099 	pf->mpf_header.mfh_target_id = periph->periph_target;
1100 	pf->mpf_header.mfh_lun_id = 0;
1101 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1102 	pf->mpf_header.mfh_timeout = 0;
1103 	pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1104 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1105 
1106 	pf->mpf_sense_addr_hi = 0;
1107 	pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1108 
1109 	memset(pf->mpf_cdb, 0, 16);
1110 	memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1111 
1112 	ccb->ccb_done = mfi_scsi_xs_done;
1113 	ccb->ccb_xs = xs;
1114 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1115 	ccb->ccb_sgl = &pf->mpf_sgl;
1116 
1117 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1118 		ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1119 		    MFI_DATA_IN : MFI_DATA_OUT;
1120 	else
1121 		ccb->ccb_direction = MFI_DATA_NONE;
1122 
1123 	if (xs->data) {
1124 		ccb->ccb_data = xs->data;
1125 		ccb->ccb_len = xs->datalen;
1126 
1127 		if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1128 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1129 			return 1;
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static void
1136 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1137     void *arg)
1138 {
1139 	struct scsipi_periph	*periph;
1140 	struct scsipi_xfer	*xs;
1141 	struct scsipi_adapter	*adapt = chan->chan_adapter;
1142 	struct mfi_softc	*sc = device_private(adapt->adapt_dev);
1143 	struct mfi_ccb		*ccb;
1144 	struct scsi_rw_6	*rw;
1145 	struct scsipi_rw_10	*rwb;
1146 	uint32_t		blockno, blockcnt;
1147 	uint8_t			target;
1148 	uint8_t			mbox[MFI_MBOX_SIZE];
1149 	int			s;
1150 
1151 	switch (req) {
1152 	case ADAPTER_REQ_GROW_RESOURCES:
1153 		/* Not supported. */
1154 		return;
1155 	case ADAPTER_REQ_SET_XFER_MODE:
1156 		/* Not supported. */
1157 		return;
1158 	case ADAPTER_REQ_RUN_XFER:
1159 		break;
1160 	}
1161 
1162 	xs = arg;
1163 
1164 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1165 	    DEVNAME(sc), req, xs->cmd->opcode);
1166 
1167 	periph = xs->xs_periph;
1168 	target = periph->periph_target;
1169 
1170 	s = splbio();
1171 	if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1172 	    periph->periph_lun != 0) {
1173 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1174 		    DEVNAME(sc), target);
1175 		xs->error = XS_SELTIMEOUT;
1176 		scsipi_done(xs);
1177 		splx(s);
1178 		return;
1179 	}
1180 
1181 	if ((ccb = mfi_get_ccb(sc)) == NULL) {
1182 		DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1183 		xs->error = XS_RESOURCE_SHORTAGE;
1184 		scsipi_done(xs);
1185 		splx(s);
1186 		return;
1187 	}
1188 
1189 	switch (xs->cmd->opcode) {
1190 	/* IO path */
1191 	case READ_10:
1192 	case WRITE_10:
1193 		rwb = (struct scsipi_rw_10 *)xs->cmd;
1194 		blockno = _4btol(rwb->addr);
1195 		blockcnt = _2btol(rwb->length);
1196 		if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1197 			mfi_put_ccb(ccb);
1198 			goto stuffup;
1199 		}
1200 		break;
1201 
1202 	case SCSI_READ_6_COMMAND:
1203 	case SCSI_WRITE_6_COMMAND:
1204 		rw = (struct scsi_rw_6 *)xs->cmd;
1205 		blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1206 		blockcnt = rw->length ? rw->length : 0x100;
1207 		if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1208 			mfi_put_ccb(ccb);
1209 			goto stuffup;
1210 		}
1211 		break;
1212 
1213 	case SCSI_SYNCHRONIZE_CACHE_10:
1214 		mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1215 		if (mfi_mgmt(ccb, xs,
1216 		    MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1217 			mfi_put_ccb(ccb);
1218 			goto stuffup;
1219 		}
1220 		break;
1221 
1222 	/* hand it of to the firmware and let it deal with it */
1223 	case SCSI_TEST_UNIT_READY:
1224 		/* save off sd? after autoconf */
1225 		if (!cold)	/* XXX bogus */
1226 			strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1227 			    sizeof(sc->sc_ld[target].ld_dev));
1228 		/* FALLTHROUGH */
1229 
1230 	default:
1231 		if (mfi_scsi_ld(ccb, xs)) {
1232 			mfi_put_ccb(ccb);
1233 			goto stuffup;
1234 		}
1235 		break;
1236 	}
1237 
1238 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1239 
1240 	if (xs->xs_control & XS_CTL_POLL) {
1241 		if (mfi_poll(ccb)) {
1242 			/* XXX check for sense in ccb->ccb_sense? */
1243 			printf("%s: mfi_scsipi_request poll failed\n",
1244 			    DEVNAME(sc));
1245 			memset(&xs->sense, 0, sizeof(xs->sense));
1246 			xs->sense.scsi_sense.response_code =
1247 			    SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1248 			xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1249 			xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1250 			xs->error = XS_SENSE;
1251 			xs->status = SCSI_CHECK;
1252 		} else {
1253 			DNPRINTF(MFI_D_DMA,
1254 			    "%s: mfi_scsipi_request poll complete %d\n",
1255 			    DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1256 			xs->error = XS_NOERROR;
1257 			xs->status = SCSI_OK;
1258 			xs->resid = 0;
1259 		}
1260 		mfi_put_ccb(ccb);
1261 		scsipi_done(xs);
1262 		splx(s);
1263 		return;
1264 	}
1265 
1266 	mfi_post(sc, ccb);
1267 
1268 	DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1269 	    ccb->ccb_dmamap->dm_nsegs);
1270 
1271 	splx(s);
1272 	return;
1273 
1274 stuffup:
1275 	xs->error = XS_DRIVER_STUFFUP;
1276 	scsipi_done(xs);
1277 	splx(s);
1278 }
1279 
1280 static int
1281 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1282 {
1283 	struct mfi_softc	*sc = ccb->ccb_sc;
1284 	struct mfi_frame_header	*hdr;
1285 	bus_dma_segment_t	*sgd;
1286 	union mfi_sgl		*sgl;
1287 	int			error, i;
1288 
1289 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1290 	    (u_long)ccb->ccb_data);
1291 
1292 	if (!ccb->ccb_data)
1293 		return 1;
1294 
1295 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1296 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1297 	if (error) {
1298 		if (error == EFBIG)
1299 			printf("more than %d dma segs\n",
1300 			    sc->sc_max_sgl);
1301 		else
1302 			printf("error %d loading dma map\n", error);
1303 		return 1;
1304 	}
1305 
1306 	hdr = &ccb->ccb_frame->mfr_header;
1307 	sgl = ccb->ccb_sgl;
1308 	sgd = ccb->ccb_dmamap->dm_segs;
1309 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1310 		sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1311 		sgl->sg32[i].len = htole32(sgd[i].ds_len);
1312 		DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1313 		    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1314 	}
1315 
1316 	if (ccb->ccb_direction == MFI_DATA_IN) {
1317 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1318 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1319 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1320 	} else {
1321 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1322 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1323 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1324 	}
1325 
1326 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1327 	/* for 64 bit io make the sizeof a variable to hold whatever sg size */
1328 	ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1329 	    ccb->ccb_dmamap->dm_nsegs;
1330 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1331 
1332 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1333 	    "  dm_nsegs: %d  extra_frames: %d\n",
1334 	    DEVNAME(sc),
1335 	    hdr->mfh_sg_count,
1336 	    ccb->ccb_frame_size,
1337 	    sc->sc_frames_size,
1338 	    ccb->ccb_dmamap->dm_nsegs,
1339 	    ccb->ccb_extra_frames);
1340 
1341 	return 0;
1342 }
1343 
1344 static int
1345 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1346     uint32_t len, void *buf, uint8_t *mbox)
1347 {
1348 	struct mfi_ccb		*ccb;
1349 	int			rv = 1;
1350 
1351 	if ((ccb = mfi_get_ccb(sc)) == NULL)
1352 		return rv;
1353 	rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1354 	if (rv)
1355 		return rv;
1356 
1357 	if (cold) {
1358 		if (mfi_poll(ccb))
1359 			goto done;
1360 	} else {
1361 		mfi_post(sc, ccb);
1362 
1363 		DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1364 		    DEVNAME(sc));
1365 		while (ccb->ccb_state != MFI_CCB_DONE)
1366 			tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1367 
1368 		if (ccb->ccb_flags & MFI_CCB_F_ERR)
1369 			goto done;
1370 	}
1371 	rv = 0;
1372 
1373 done:
1374 	mfi_put_ccb(ccb);
1375 	return rv;
1376 }
1377 
1378 static int
1379 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1380     uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1381 {
1382 	struct mfi_dcmd_frame	*dcmd;
1383 
1384 	DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1385 
1386 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1387 	memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1388 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1389 	dcmd->mdf_header.mfh_timeout = 0;
1390 
1391 	dcmd->mdf_opcode = opc;
1392 	dcmd->mdf_header.mfh_data_len = 0;
1393 	ccb->ccb_direction = dir;
1394 	ccb->ccb_xs = xs;
1395 	ccb->ccb_done = mfi_mgmt_done;
1396 
1397 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1398 
1399 	/* handle special opcodes */
1400 	if (mbox)
1401 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1402 
1403 	if (dir != MFI_DATA_NONE) {
1404 		dcmd->mdf_header.mfh_data_len = len;
1405 		ccb->ccb_data = buf;
1406 		ccb->ccb_len = len;
1407 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1408 
1409 		if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1410 			return 1;
1411 	}
1412 	return 0;
1413 }
1414 
1415 static void
1416 mfi_mgmt_done(struct mfi_ccb *ccb)
1417 {
1418 	struct scsipi_xfer	*xs = ccb->ccb_xs;
1419 	struct mfi_softc	*sc = ccb->ccb_sc;
1420 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1421 
1422 	DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1423 	    DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1424 
1425 	if (ccb->ccb_data != NULL) {
1426 		DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1427 		    DEVNAME(sc));
1428 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1429 		    ccb->ccb_dmamap->dm_mapsize,
1430 		    (ccb->ccb_direction & MFI_DATA_IN) ?
1431 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1432 
1433 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1434 	}
1435 
1436 	if (hdr->mfh_cmd_status != MFI_STAT_OK)
1437 		ccb->ccb_flags |= MFI_CCB_F_ERR;
1438 
1439 	ccb->ccb_state = MFI_CCB_DONE;
1440 	if (xs) {
1441 		if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1442 			xs->error = XS_DRIVER_STUFFUP;
1443 		} else {
1444 			xs->error = XS_NOERROR;
1445 			xs->status = SCSI_OK;
1446 			xs->resid = 0;
1447 		}
1448 		mfi_put_ccb(ccb);
1449 		scsipi_done(xs);
1450 	} else
1451 		wakeup(ccb);
1452 }
1453 
1454 #if NBIO > 0
1455 int
1456 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1457 {
1458 	struct mfi_softc *sc = device_private(dev);
1459 	int error = 0;
1460 	int s;
1461 
1462 	KERNEL_LOCK(1, curlwp);
1463 	s = splbio();
1464 
1465 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1466 
1467 	switch (cmd) {
1468 	case BIOCINQ:
1469 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1470 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1471 		break;
1472 
1473 	case BIOCVOL:
1474 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1475 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1476 		break;
1477 
1478 	case BIOCDISK:
1479 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1480 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1481 		break;
1482 
1483 	case BIOCALARM:
1484 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1485 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1486 		break;
1487 
1488 	case BIOCBLINK:
1489 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1490 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1491 		break;
1492 
1493 	case BIOCSETSTATE:
1494 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1495 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1496 		break;
1497 
1498 	default:
1499 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1500 		error = EINVAL;
1501 	}
1502 	splx(s);
1503 	KERNEL_UNLOCK_ONE(curlwp);
1504 
1505 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1506 	return error;
1507 }
1508 
1509 static int
1510 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1511 {
1512 	struct mfi_conf		*cfg;
1513 	int			rv = EINVAL;
1514 
1515 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1516 
1517 	if (mfi_get_info(sc)) {
1518 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1519 		    DEVNAME(sc));
1520 		return EIO;
1521 	}
1522 
1523 	/* get figures */
1524 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1525 	if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1526 	    sizeof *cfg, cfg, NULL))
1527 		goto freeme;
1528 
1529 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1530 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1531 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1532 
1533 	rv = 0;
1534 freeme:
1535 	free(cfg, M_DEVBUF);
1536 	return rv;
1537 }
1538 
1539 static int
1540 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1541 {
1542 	int			i, per, rv = EINVAL;
1543 	uint8_t			mbox[MFI_MBOX_SIZE];
1544 
1545 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1546 	    DEVNAME(sc), bv->bv_volid);
1547 
1548 	if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1549 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1550 		goto done;
1551 
1552 	i = bv->bv_volid;
1553 	mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1554 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1555 	    DEVNAME(sc), mbox[0]);
1556 
1557 	if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1558 	    sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1559 		goto done;
1560 
1561 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1562 		/* go do hotspares */
1563 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1564 		goto done;
1565 	}
1566 
1567 	strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1568 
1569 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1570 	case MFI_LD_OFFLINE:
1571 		bv->bv_status = BIOC_SVOFFLINE;
1572 		break;
1573 
1574 	case MFI_LD_PART_DEGRADED:
1575 	case MFI_LD_DEGRADED:
1576 		bv->bv_status = BIOC_SVDEGRADED;
1577 		break;
1578 
1579 	case MFI_LD_ONLINE:
1580 		bv->bv_status = BIOC_SVONLINE;
1581 		break;
1582 
1583 	default:
1584 		bv->bv_status = BIOC_SVINVALID;
1585 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1586 		    DEVNAME(sc),
1587 		    sc->sc_ld_list.mll_list[i].mll_state);
1588 	}
1589 
1590 	/* additional status can modify MFI status */
1591 	switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1592 	case MFI_LD_PROG_CC:
1593 	case MFI_LD_PROG_BGI:
1594 		bv->bv_status = BIOC_SVSCRUB;
1595 		per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1596 		bv->bv_percent = (per * 100) / 0xffff;
1597 		bv->bv_seconds =
1598 		    sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1599 		break;
1600 
1601 	case MFI_LD_PROG_FGI:
1602 	case MFI_LD_PROG_RECONSTRUCT:
1603 		/* nothing yet */
1604 		break;
1605 	}
1606 
1607 	/*
1608 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1609 	 * a subset that is valid for the MFI contrller.
1610 	 */
1611 	bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1612 	if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1613 	    MFI_DDF_SRL_SPANNED)
1614 		bv->bv_level *= 10;
1615 
1616 	bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1617 	    sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1618 
1619 	bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1620 
1621 	rv = 0;
1622 done:
1623 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1624 	    DEVNAME(sc), rv);
1625 	return rv;
1626 }
1627 
1628 static int
1629 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1630 {
1631 	struct mfi_conf		*cfg;
1632 	struct mfi_array	*ar;
1633 	struct mfi_ld_cfg	*ld;
1634 	struct mfi_pd_details	*pd;
1635 	struct scsipi_inquiry_data *inqbuf;
1636 	char			vend[8+16+4+1];
1637 	int			i, rv = EINVAL;
1638 	int			arr, vol, disk;
1639 	uint32_t		size;
1640 	uint8_t			mbox[MFI_MBOX_SIZE];
1641 
1642 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1643 	    DEVNAME(sc), bd->bd_diskid);
1644 
1645 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1646 
1647 	/* send single element command to retrieve size for full structure */
1648 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1649 	if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1650 	    sizeof *cfg, cfg, NULL))
1651 		goto freeme;
1652 
1653 	size = cfg->mfc_size;
1654 	free(cfg, M_DEVBUF);
1655 
1656 	/* memory for read config */
1657 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1658 	if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1659 	    size, cfg, NULL))
1660 		goto freeme;
1661 
1662 	ar = cfg->mfc_array;
1663 
1664 	/* calculate offset to ld structure */
1665 	ld = (struct mfi_ld_cfg *)(
1666 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1667 	    cfg->mfc_array_size * cfg->mfc_no_array);
1668 
1669 	vol = bd->bd_volid;
1670 
1671 	if (vol >= cfg->mfc_no_ld) {
1672 		/* do hotspares */
1673 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1674 		goto freeme;
1675 	}
1676 
1677 	/* find corresponding array for ld */
1678 	for (i = 0, arr = 0; i < vol; i++)
1679 		arr += ld[i].mlc_parm.mpa_span_depth;
1680 
1681 	/* offset disk into pd list */
1682 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1683 
1684 	/* offset array index into the next spans */
1685 	arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1686 
1687 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1688 	switch (ar[arr].pd[disk].mar_pd_state){
1689 	case MFI_PD_UNCONFIG_GOOD:
1690 		bd->bd_status = BIOC_SDUNUSED;
1691 		break;
1692 
1693 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1694 		bd->bd_status = BIOC_SDHOTSPARE;
1695 		break;
1696 
1697 	case MFI_PD_OFFLINE:
1698 		bd->bd_status = BIOC_SDOFFLINE;
1699 		break;
1700 
1701 	case MFI_PD_FAILED:
1702 		bd->bd_status = BIOC_SDFAILED;
1703 		break;
1704 
1705 	case MFI_PD_REBUILD:
1706 		bd->bd_status = BIOC_SDREBUILD;
1707 		break;
1708 
1709 	case MFI_PD_ONLINE:
1710 		bd->bd_status = BIOC_SDONLINE;
1711 		break;
1712 
1713 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1714 	default:
1715 		bd->bd_status = BIOC_SDINVALID;
1716 		break;
1717 
1718 	}
1719 
1720 	/* get the remaining fields */
1721 	*((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1722 	memset(pd, 0, sizeof(*pd));
1723 	if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1724 	    sizeof *pd, pd, mbox))
1725 		goto freeme;
1726 
1727 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1728 
1729 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1730 	bd->bd_channel = pd->mpd_enc_idx;
1731 
1732 	inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1733 	memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1734 	vend[sizeof vend - 1] = '\0';
1735 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1736 
1737 	/* XXX find a way to retrieve serial nr from drive */
1738 	/* XXX find a way to get bd_procdev */
1739 
1740 	rv = 0;
1741 freeme:
1742 	free(pd, M_DEVBUF);
1743 	free(cfg, M_DEVBUF);
1744 
1745 	return rv;
1746 }
1747 
1748 static int
1749 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1750 {
1751 	uint32_t		opc, dir = MFI_DATA_NONE;
1752 	int			rv = 0;
1753 	int8_t			ret;
1754 
1755 	switch(ba->ba_opcode) {
1756 	case BIOC_SADISABLE:
1757 		opc = MR_DCMD_SPEAKER_DISABLE;
1758 		break;
1759 
1760 	case BIOC_SAENABLE:
1761 		opc = MR_DCMD_SPEAKER_ENABLE;
1762 		break;
1763 
1764 	case BIOC_SASILENCE:
1765 		opc = MR_DCMD_SPEAKER_SILENCE;
1766 		break;
1767 
1768 	case BIOC_GASTATUS:
1769 		opc = MR_DCMD_SPEAKER_GET;
1770 		dir = MFI_DATA_IN;
1771 		break;
1772 
1773 	case BIOC_SATEST:
1774 		opc = MR_DCMD_SPEAKER_TEST;
1775 		break;
1776 
1777 	default:
1778 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1779 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1780 		return EINVAL;
1781 	}
1782 
1783 	if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1784 		rv = EINVAL;
1785 	else
1786 		if (ba->ba_opcode == BIOC_GASTATUS)
1787 			ba->ba_status = ret;
1788 		else
1789 			ba->ba_status = 0;
1790 
1791 	return rv;
1792 }
1793 
1794 static int
1795 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1796 {
1797 	int			i, found, rv = EINVAL;
1798 	uint8_t			mbox[MFI_MBOX_SIZE];
1799 	uint32_t		cmd;
1800 	struct mfi_pd_list	*pd;
1801 
1802 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1803 	    bb->bb_status);
1804 
1805 	/* channel 0 means not in an enclosure so can't be blinked */
1806 	if (bb->bb_channel == 0)
1807 		return EINVAL;
1808 
1809 	pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1810 
1811 	if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1812 	    MFI_PD_LIST_SIZE, pd, NULL))
1813 		goto done;
1814 
1815 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1816 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1817 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1818 		    	found = 1;
1819 			break;
1820 		}
1821 
1822 	if (!found)
1823 		goto done;
1824 
1825 	memset(mbox, 0, sizeof mbox);
1826 
1827 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1828 
1829 	switch (bb->bb_status) {
1830 	case BIOC_SBUNBLINK:
1831 		cmd = MR_DCMD_PD_UNBLINK;
1832 		break;
1833 
1834 	case BIOC_SBBLINK:
1835 		cmd = MR_DCMD_PD_BLINK;
1836 		break;
1837 
1838 	case BIOC_SBALARM:
1839 	default:
1840 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1841 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1842 		goto done;
1843 	}
1844 
1845 
1846 	if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1847 		goto done;
1848 
1849 	rv = 0;
1850 done:
1851 	free(pd, M_DEVBUF);
1852 	return rv;
1853 }
1854 
1855 static int
1856 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1857 {
1858 	struct mfi_pd_list	*pd;
1859 	int			i, found, rv = EINVAL;
1860 	uint8_t			mbox[MFI_MBOX_SIZE];
1861 	uint32_t		cmd;
1862 
1863 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1864 	    bs->bs_status);
1865 
1866 	pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1867 
1868 	if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1869 	    MFI_PD_LIST_SIZE, pd, NULL))
1870 		goto done;
1871 
1872 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1873 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1874 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1875 		    	found = 1;
1876 			break;
1877 		}
1878 
1879 	if (!found)
1880 		goto done;
1881 
1882 	memset(mbox, 0, sizeof mbox);
1883 
1884 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1885 
1886 	switch (bs->bs_status) {
1887 	case BIOC_SSONLINE:
1888 		mbox[2] = MFI_PD_ONLINE;
1889 		cmd = MD_DCMD_PD_SET_STATE;
1890 		break;
1891 
1892 	case BIOC_SSOFFLINE:
1893 		mbox[2] = MFI_PD_OFFLINE;
1894 		cmd = MD_DCMD_PD_SET_STATE;
1895 		break;
1896 
1897 	case BIOC_SSHOTSPARE:
1898 		mbox[2] = MFI_PD_HOTSPARE;
1899 		cmd = MD_DCMD_PD_SET_STATE;
1900 		break;
1901 /*
1902 	case BIOC_SSREBUILD:
1903 		cmd = MD_DCMD_PD_REBUILD;
1904 		break;
1905 */
1906 	default:
1907 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1908 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
1909 		goto done;
1910 	}
1911 
1912 
1913 	if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1914 	    0, NULL, mbox))
1915 		goto done;
1916 
1917 	rv = 0;
1918 done:
1919 	free(pd, M_DEVBUF);
1920 	return rv;
1921 }
1922 
1923 static int
1924 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1925 {
1926 	struct mfi_conf		*cfg;
1927 	struct mfi_hotspare	*hs;
1928 	struct mfi_pd_details	*pd;
1929 	struct bioc_disk	*sdhs;
1930 	struct bioc_vol		*vdhs;
1931 	struct scsipi_inquiry_data *inqbuf;
1932 	char			vend[8+16+4+1];
1933 	int			i, rv = EINVAL;
1934 	uint32_t		size;
1935 	uint8_t			mbox[MFI_MBOX_SIZE];
1936 
1937 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1938 
1939 	if (!bio_hs)
1940 		return EINVAL;
1941 
1942 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1943 
1944 	/* send single element command to retrieve size for full structure */
1945 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1946 	if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1947 	    sizeof *cfg, cfg, NULL))
1948 		goto freeme;
1949 
1950 	size = cfg->mfc_size;
1951 	free(cfg, M_DEVBUF);
1952 
1953 	/* memory for read config */
1954 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1955 	if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1956 	    size, cfg, NULL))
1957 		goto freeme;
1958 
1959 	/* calculate offset to hs structure */
1960 	hs = (struct mfi_hotspare *)(
1961 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1962 	    cfg->mfc_array_size * cfg->mfc_no_array +
1963 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
1964 
1965 	if (volid < cfg->mfc_no_ld)
1966 		goto freeme; /* not a hotspare */
1967 
1968 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1969 		goto freeme; /* not a hotspare */
1970 
1971 	/* offset into hotspare structure */
1972 	i = volid - cfg->mfc_no_ld;
1973 
1974 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1975 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1976 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1977 
1978 	/* get pd fields */
1979 	memset(mbox, 0, sizeof mbox);
1980 	*((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1981 	if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1982 	    sizeof *pd, pd, mbox)) {
1983 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1984 		    DEVNAME(sc));
1985 		goto freeme;
1986 	}
1987 
1988 	switch (type) {
1989 	case MFI_MGMT_VD:
1990 		vdhs = bio_hs;
1991 		vdhs->bv_status = BIOC_SVONLINE;
1992 		vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
1993 		vdhs->bv_level = -1; /* hotspare */
1994 		vdhs->bv_nodisk = 1;
1995 		break;
1996 
1997 	case MFI_MGMT_SD:
1998 		sdhs = bio_hs;
1999 		sdhs->bd_status = BIOC_SDHOTSPARE;
2000 		sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2001 		sdhs->bd_channel = pd->mpd_enc_idx;
2002 		sdhs->bd_target = pd->mpd_enc_slot;
2003 		inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2004 		memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2005 		vend[sizeof vend - 1] = '\0';
2006 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2007 		break;
2008 
2009 	default:
2010 		goto freeme;
2011 	}
2012 
2013 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2014 	rv = 0;
2015 freeme:
2016 	free(pd, M_DEVBUF);
2017 	free(cfg, M_DEVBUF);
2018 
2019 	return rv;
2020 }
2021 
2022 static int
2023 mfi_destroy_sensors(struct mfi_softc *sc)
2024 {
2025 	if (sc->sc_sme == NULL)
2026 		return 0;
2027 	sysmon_envsys_unregister(sc->sc_sme);
2028 	sc->sc_sme = NULL;
2029 	free(sc->sc_sensor, M_DEVBUF);
2030 	return 0;
2031 }
2032 
2033 static int
2034 mfi_create_sensors(struct mfi_softc *sc)
2035 {
2036 	int i;
2037 	int nsensors = sc->sc_ld_cnt;
2038 	int rv;
2039 
2040 	sc->sc_sme = sysmon_envsys_create();
2041 	sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2042 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2043 	if (sc->sc_sensor == NULL) {
2044 		aprint_error("%s: can't allocate envsys_data_t\n",
2045 		    DEVNAME(sc));
2046 		return ENOMEM;
2047 	}
2048 
2049 	for (i = 0; i < nsensors; i++) {
2050 		sc->sc_sensor[i].units = ENVSYS_DRIVE;
2051 		sc->sc_sensor[i].state = ENVSYS_SINVALID;
2052 		sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2053 		/* Enable monitoring for drive state changes */
2054 		sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2055 		/* logical drives */
2056 		snprintf(sc->sc_sensor[i].desc,
2057 		    sizeof(sc->sc_sensor[i].desc), "%s:%d",
2058 		    DEVNAME(sc), i);
2059 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
2060 						&sc->sc_sensor[i]))
2061 			goto out;
2062 	}
2063 
2064 	sc->sc_sme->sme_name = DEVNAME(sc);
2065 	sc->sc_sme->sme_cookie = sc;
2066 	sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2067 	rv = sysmon_envsys_register(sc->sc_sme);
2068 	if (rv != 0) {
2069 		aprint_error("%s: unable to register with sysmon (rv = %d)\n",
2070 		    DEVNAME(sc), rv);
2071 		goto out;
2072 	}
2073 	return 0;
2074 
2075 out:
2076 	free(sc->sc_sensor, M_DEVBUF);
2077 	sysmon_envsys_destroy(sc->sc_sme);
2078 	sc->sc_sme = NULL;
2079 	return EINVAL;
2080 }
2081 
2082 static void
2083 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2084 {
2085 	struct mfi_softc	*sc = sme->sme_cookie;
2086 	struct bioc_vol		bv;
2087 	int s;
2088 	int error;
2089 
2090 	if (edata->sensor >= sc->sc_ld_cnt)
2091 		return;
2092 
2093 	memset(&bv, 0, sizeof(bv));
2094 	bv.bv_volid = edata->sensor;
2095 	KERNEL_LOCK(1, curlwp);
2096 	s = splbio();
2097 	error = mfi_ioctl_vol(sc, &bv);
2098 	splx(s);
2099 	KERNEL_UNLOCK_ONE(curlwp);
2100 	if (error)
2101 		return;
2102 
2103 	switch(bv.bv_status) {
2104 	case BIOC_SVOFFLINE:
2105 		edata->value_cur = ENVSYS_DRIVE_FAIL;
2106 		edata->state = ENVSYS_SCRITICAL;
2107 		break;
2108 
2109 	case BIOC_SVDEGRADED:
2110 		edata->value_cur = ENVSYS_DRIVE_PFAIL;
2111 		edata->state = ENVSYS_SCRITICAL;
2112 		break;
2113 
2114 	case BIOC_SVSCRUB:
2115 	case BIOC_SVONLINE:
2116 		edata->value_cur = ENVSYS_DRIVE_ONLINE;
2117 		edata->state = ENVSYS_SVALID;
2118 		break;
2119 
2120 	case BIOC_SVINVALID:
2121 		/* FALLTRHOUGH */
2122 	default:
2123 		edata->value_cur = 0; /* unknown */
2124 		edata->state = ENVSYS_SINVALID;
2125 	}
2126 }
2127 
2128 #endif /* NBIO > 0 */
2129 
2130 static uint32_t
2131 mfi_xscale_fw_state(struct mfi_softc *sc)
2132 {
2133 	return mfi_read(sc, MFI_OMSG0);
2134 }
2135 
2136 static void
2137 mfi_xscale_intr_dis(struct mfi_softc *sc)
2138 {
2139 	mfi_write(sc, MFI_OMSK, 0);
2140 }
2141 
2142 static void
2143 mfi_xscale_intr_ena(struct mfi_softc *sc)
2144 {
2145 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2146 }
2147 
2148 static int
2149 mfi_xscale_intr(struct mfi_softc *sc)
2150 {
2151 	uint32_t status;
2152 
2153 	status = mfi_read(sc, MFI_OSTS);
2154 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2155 		return 0;
2156 
2157 	/* write status back to acknowledge interrupt */
2158 	mfi_write(sc, MFI_OSTS, status);
2159 	return 1;
2160 }
2161 
2162 static void
2163 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2164 {
2165 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2166 	    ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2167 	    sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2168 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2169 	    ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2170 	    MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2171 
2172 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2173 	    ccb->ccb_extra_frames);
2174 }
2175 
2176 static uint32_t
2177 mfi_ppc_fw_state(struct mfi_softc *sc)
2178 {
2179 	return mfi_read(sc, MFI_OSP);
2180 }
2181 
2182 static void
2183 mfi_ppc_intr_dis(struct mfi_softc *sc)
2184 {
2185 	/* Taking a wild guess --dyoung */
2186 	mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2187 	mfi_write(sc, MFI_ODC, 0xffffffff);
2188 }
2189 
2190 static void
2191 mfi_ppc_intr_ena(struct mfi_softc *sc)
2192 {
2193 	mfi_write(sc, MFI_ODC, 0xffffffff);
2194 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2195 }
2196 
2197 static int
2198 mfi_ppc_intr(struct mfi_softc *sc)
2199 {
2200 	uint32_t status;
2201 
2202 	status = mfi_read(sc, MFI_OSTS);
2203 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2204 		return 0;
2205 
2206 	/* write status back to acknowledge interrupt */
2207 	mfi_write(sc, MFI_ODC, status);
2208 	return 1;
2209 }
2210 
2211 static void
2212 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2213 {
2214 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2215 	    (ccb->ccb_extra_frames << 1));
2216 }
2217 
2218 u_int32_t
2219 mfi_gen2_fw_state(struct mfi_softc *sc)
2220 {
2221 	return (mfi_read(sc, MFI_OSP));
2222 }
2223 
2224 void
2225 mfi_gen2_intr_dis(struct mfi_softc *sc)
2226 {
2227 	mfi_write(sc, MFI_OMSK, 0xffffffff);
2228 	mfi_write(sc, MFI_ODC, 0xffffffff);
2229 }
2230 
2231 void
2232 mfi_gen2_intr_ena(struct mfi_softc *sc)
2233 {
2234 	mfi_write(sc, MFI_ODC, 0xffffffff);
2235 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2236 }
2237 
2238 int
2239 mfi_gen2_intr(struct mfi_softc *sc)
2240 {
2241 	u_int32_t status;
2242 
2243 	status = mfi_read(sc, MFI_OSTS);
2244 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2245 		return (0);
2246 
2247 	/* write status back to acknowledge interrupt */
2248 	mfi_write(sc, MFI_ODC, status);
2249 
2250 	return (1);
2251 }
2252 
2253 void
2254 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2255 {
2256 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2257 	    (ccb->ccb_extra_frames << 1));
2258 }
2259 
2260 u_int32_t
2261 mfi_skinny_fw_state(struct mfi_softc *sc)
2262 {
2263 	return (mfi_read(sc, MFI_OSP));
2264 }
2265 
2266 void
2267 mfi_skinny_intr_dis(struct mfi_softc *sc)
2268 {
2269 	mfi_write(sc, MFI_OMSK, 0);
2270 }
2271 
2272 void
2273 mfi_skinny_intr_ena(struct mfi_softc *sc)
2274 {
2275 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2276 }
2277 
2278 int
2279 mfi_skinny_intr(struct mfi_softc *sc)
2280 {
2281 	u_int32_t status;
2282 
2283 	status = mfi_read(sc, MFI_OSTS);
2284 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2285 		return (0);
2286 
2287 	/* write status back to acknowledge interrupt */
2288 	mfi_write(sc, MFI_OSTS, status);
2289 
2290 	return (1);
2291 }
2292 
2293 void
2294 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2295 {
2296 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2297 	    (ccb->ccb_extra_frames << 1));
2298 	mfi_write(sc, MFI_IQPH, 0x00000000);
2299 }
2300