xref: /openbsd-src/sys/dev/pci/mfii.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /* $OpenBSD: mfii.c,v 1.42 2017/02/11 04:12:28 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/types.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcivar.h>
32 
33 #include <machine/bus.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsi_disk.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/ic/mfireg.h>
40 #include <dev/pci/mpiireg.h>
41 
42 #define	MFII_BAR		0x14
43 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
44 
45 #define MFII_OSTS_INTR_VALID	0x00000009
46 #define MFII_RPI		0x6c /* reply post host index */
47 
48 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
49 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
50 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
51 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
52 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
53 
54 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
55 
56 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
57 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
58 
59 struct mfii_request_descr {
60 	u_int8_t	flags;
61 	u_int8_t	msix_index;
62 	u_int16_t	smid;
63 
64 	u_int16_t	lmid;
65 	u_int16_t	dev_handle;
66 } __packed;
67 
68 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
69 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
70 
71 struct mfii_raid_context {
72 	u_int8_t	type_nseg;
73 	u_int8_t	_reserved1;
74 	u_int16_t	timeout_value;
75 
76 	u_int8_t	reg_lock_flags;
77 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
78 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
79 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
80 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
81 	u_int8_t	_reserved2;
82 	u_int16_t	virtual_disk_target_id;
83 
84 	u_int64_t	reg_lock_row_lba;
85 
86 	u_int32_t	reg_lock_length;
87 
88 	u_int16_t	next_lm_id;
89 	u_int8_t	ex_status;
90 	u_int8_t	status;
91 
92 	u_int8_t	raid_flags;
93 	u_int8_t	num_sge;
94 	u_int16_t	config_seq_num;
95 
96 	u_int8_t	span_arm;
97 	u_int8_t	_reserved3[3];
98 } __packed;
99 
100 struct mfii_sge {
101 	u_int64_t	sg_addr;
102 	u_int32_t	sg_len;
103 	u_int16_t	_reserved;
104 	u_int8_t	sg_next_chain_offset;
105 	u_int8_t	sg_flags;
106 } __packed;
107 
108 #define MFII_SGE_ADDR_MASK		(0x03)
109 #define MFII_SGE_ADDR_SYSTEM		(0x00)
110 #define MFII_SGE_ADDR_IOCDDR		(0x01)
111 #define MFII_SGE_ADDR_IOCPLB		(0x02)
112 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
113 #define MFII_SGE_END_OF_LIST		(0x40)
114 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
115 
116 #define MFII_REQUEST_SIZE	256
117 
118 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
119 
120 #define MFII_MAX_ROW		32
121 #define MFII_MAX_ARRAY		128
122 
123 struct mfii_array_map {
124 	uint16_t		mam_pd[MFII_MAX_ROW];
125 } __packed;
126 
127 struct mfii_dev_handle {
128 	uint16_t		mdh_cur_handle;
129 	uint8_t			mdh_valid;
130 	uint8_t			mdh_reserved;
131 	uint16_t		mdh_handle[2];
132 } __packed;
133 
134 struct mfii_ld_map {
135 	uint32_t		mlm_total_size;
136 	uint32_t		mlm_reserved1[5];
137 	uint32_t		mlm_num_lds;
138 	uint32_t		mlm_reserved2;
139 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
140 	uint8_t			mlm_pd_timeout;
141 	uint8_t			mlm_reserved3[7];
142 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
143 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
144 } __packed;
145 
146 struct mfii_task_mgmt {
147 	union {
148 		uint8_t			request[128];
149 		struct mpii_msg_scsi_task_request
150 					mpii_request;
151 	} __packed __aligned(8);
152 
153 	union {
154 		uint8_t			reply[128];
155 		uint32_t		flags;
156 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
157 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
158 		struct mpii_msg_scsi_task_reply
159 					mpii_reply;
160 	} __packed __aligned(8);
161 } __packed __aligned(8);
162 
163 struct mfii_dmamem {
164 	bus_dmamap_t		mdm_map;
165 	bus_dma_segment_t	mdm_seg;
166 	size_t			mdm_size;
167 	caddr_t			mdm_kva;
168 };
169 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
170 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
171 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
172 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
173 
174 struct mfii_softc;
175 
176 struct mfii_ccb {
177 	void			*ccb_request;
178 	u_int64_t		ccb_request_dva;
179 	bus_addr_t		ccb_request_offset;
180 
181 	struct mfi_sense	*ccb_sense;
182 	u_int64_t		ccb_sense_dva;
183 	bus_addr_t		ccb_sense_offset;
184 
185 	struct mfii_sge		*ccb_sgl;
186 	u_int64_t		ccb_sgl_dva;
187 	bus_addr_t		ccb_sgl_offset;
188 	u_int			ccb_sgl_len;
189 
190 	struct mfii_request_descr ccb_req;
191 
192 	bus_dmamap_t		ccb_dmamap;
193 
194 	/* data for sgl */
195 	void			*ccb_data;
196 	size_t			ccb_len;
197 
198 	int			ccb_direction;
199 #define MFII_DATA_NONE			0
200 #define MFII_DATA_IN			1
201 #define MFII_DATA_OUT			2
202 
203 	void			*ccb_cookie;
204 	void			(*ccb_done)(struct mfii_softc *,
205 				    struct mfii_ccb *);
206 
207 	u_int32_t		ccb_flags;
208 #define MFI_CCB_F_ERR			(1<<0)
209 	u_int			ccb_smid;
210 	u_int			ccb_refcnt;
211 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
212 };
213 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
214 
215 struct mfii_pd_softc {
216 	struct scsi_link	pd_link;
217 	struct scsibus_softc	*pd_scsibus;
218 	struct srp		pd_dev_handles;
219 	uint8_t			pd_timeout;
220 };
221 
222 struct mfii_iop {
223 	u_int8_t ldio_req_type;
224 	u_int8_t ldio_ctx_type_nseg;
225 	u_int8_t ldio_ctx_reg_lock_flags;
226 	u_int8_t sge_flag_chain;
227 	u_int8_t sge_flag_eol;
228 };
229 
230 struct mfii_softc {
231 	struct device		sc_dev;
232 	const struct mfii_iop	*sc_iop;
233 
234 	pci_chipset_tag_t	sc_pc;
235 	pcitag_t		sc_tag;
236 
237 	bus_space_tag_t		sc_iot;
238 	bus_space_handle_t	sc_ioh;
239 	bus_size_t		sc_ios;
240 	bus_dma_tag_t		sc_dmat;
241 
242 	void			*sc_ih;
243 
244 	struct mutex		sc_ccb_mtx;
245 	struct mutex		sc_post_mtx;
246 
247 	u_int			sc_max_cmds;
248 	u_int			sc_max_sgl;
249 
250 	u_int			sc_reply_postq_depth;
251 	u_int			sc_reply_postq_index;
252 	struct mutex		sc_reply_postq_mtx;
253 	struct mfii_dmamem	*sc_reply_postq;
254 
255 	struct mfii_dmamem	*sc_requests;
256 	struct mfii_dmamem	*sc_sense;
257 	struct mfii_dmamem	*sc_sgl;
258 
259 	struct mfii_ccb		*sc_ccb;
260 	struct mfii_ccb_list	sc_ccb_freeq;
261 
262 	struct mfii_ccb		*sc_aen_ccb;
263 	struct task		sc_aen_task;
264 
265 	struct mutex		sc_abort_mtx;
266 	struct mfii_ccb_list	sc_abort_list;
267 	struct task		sc_abort_task;
268 
269 	struct scsi_link	sc_link;
270 	struct scsibus_softc	*sc_scsibus;
271 	struct mfii_pd_softc	*sc_pd;
272 	struct scsi_iopool	sc_iopool;
273 
274 	struct mfi_ctrl_info	sc_info;
275 };
276 
277 int		mfii_match(struct device *, void *, void *);
278 void		mfii_attach(struct device *, struct device *, void *);
279 int		mfii_detach(struct device *, int);
280 
281 struct cfattach mfii_ca = {
282 	sizeof(struct mfii_softc),
283 	mfii_match,
284 	mfii_attach,
285 	mfii_detach
286 };
287 
288 struct cfdriver mfii_cd = {
289 	NULL,
290 	"mfii",
291 	DV_DULL
292 };
293 
294 void		mfii_scsi_cmd(struct scsi_xfer *);
295 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
296 
297 struct scsi_adapter mfii_switch = {
298 	mfii_scsi_cmd,
299 	scsi_minphys,
300 	NULL, /* probe */
301 	NULL, /* unprobe */
302 	NULL  /* ioctl */
303 };
304 
305 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
306 int		mfii_pd_scsi_probe(struct scsi_link *);
307 
308 struct scsi_adapter mfii_pd_switch = {
309 	mfii_pd_scsi_cmd,
310 	scsi_minphys,
311 	mfii_pd_scsi_probe
312 };
313 
314 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
315 
316 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
317 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
318 
319 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
320 void			mfii_dmamem_free(struct mfii_softc *,
321 			    struct mfii_dmamem *);
322 
323 void *			mfii_get_ccb(void *);
324 void			mfii_put_ccb(void *, void *);
325 int			mfii_init_ccb(struct mfii_softc *);
326 void			mfii_scrub_ccb(struct mfii_ccb *);
327 
328 int			mfii_transition_firmware(struct mfii_softc *);
329 int			mfii_initialise_firmware(struct mfii_softc *);
330 int			mfii_get_info(struct mfii_softc *);
331 int			mfii_syspd(struct mfii_softc *);
332 
333 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
334 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
335 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
336 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
337 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
338 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
339 int			mfii_my_intr(struct mfii_softc *);
340 int			mfii_intr(void *);
341 void			mfii_postq(struct mfii_softc *);
342 
343 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
344 			    void *, int);
345 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
346 			    void *, int);
347 
348 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
349 
350 int			mfii_mgmt(struct mfii_softc *, struct mfii_ccb *,
351 			    u_int32_t, const union mfi_mbox *,
352 			    void *, size_t, int);
353 
354 int			mfii_scsi_cmd_io(struct mfii_softc *,
355 			    struct scsi_xfer *);
356 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
357 			    struct scsi_xfer *);
358 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
359 			    struct scsi_xfer *);
360 void			mfii_scsi_cmd_tmo(void *);
361 
362 int			mfii_dev_handles_update(struct mfii_softc *sc);
363 void			mfii_dev_handles_dtor(void *, void *);
364 
365 void			mfii_abort_task(void *);
366 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
367 			    uint16_t, uint16_t, uint8_t, uint32_t);
368 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
369 			    struct mfii_ccb *);
370 
371 int			mfii_aen_register(struct mfii_softc *);
372 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
373 			    struct mfii_dmamem *, uint32_t);
374 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
375 void			mfii_aen(void *);
376 void			mfii_aen_unregister(struct mfii_softc *);
377 
378 void			mfii_aen_pd_insert(struct mfii_softc *,
379 			    const struct mfi_evtarg_pd_address *);
380 void			mfii_aen_pd_remove(struct mfii_softc *,
381 			    const struct mfi_evtarg_pd_address *);
382 void			mfii_aen_pd_state_change(struct mfii_softc *,
383 			    const struct mfi_evtarg_pd_state *);
384 
385 /*
386  * mfii boards support asynchronous (and non-polled) completion of
387  * dcmds by proxying them through a passthru mpii command that points
388  * at a dcmd frame. since the passthru command is submitted like
389  * the scsi commands using an SMID in the request descriptor,
390  * ccb_request memory * must contain the passthru command because
391  * that is what the SMID refers to. this means ccb_request cannot
392  * contain the dcmd. rather than allocating separate dma memory to
393  * hold the dcmd, we reuse the sense memory buffer for it.
394  */
395 
396 void			mfii_dcmd_start(struct mfii_softc *,
397 			    struct mfii_ccb *);
398 
399 static inline void
400 mfii_dcmd_scrub(struct mfii_ccb *ccb)
401 {
402 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
403 }
404 
405 static inline struct mfi_dcmd_frame *
406 mfii_dcmd_frame(struct mfii_ccb *ccb)
407 {
408 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
409 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
410 }
411 
412 static inline void
413 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
414 {
415 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
416 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
417 }
418 
419 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
420 
421 const struct mfii_iop mfii_iop_thunderbolt = {
422 	MFII_REQ_TYPE_LDIO,
423 	0,
424 	0,
425 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
426 	0
427 };
428 
429 /*
430  * a lot of these values depend on us not implementing fastpath yet.
431  */
432 const struct mfii_iop mfii_iop_25 = {
433 	MFII_REQ_TYPE_NO_LOCK,
434 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
435 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
436 	MFII_SGE_CHAIN_ELEMENT,
437 	MFII_SGE_END_OF_LIST
438 };
439 
440 struct mfii_device {
441 	pcireg_t		mpd_vendor;
442 	pcireg_t		mpd_product;
443 	const struct mfii_iop	*mpd_iop;
444 };
445 
446 const struct mfii_device mfii_devices[] = {
447 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
448 	    &mfii_iop_thunderbolt },
449 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
450 	    &mfii_iop_25 },
451 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
452 	    &mfii_iop_25 }
453 };
454 
455 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
456 
457 const struct mfii_iop *
458 mfii_find_iop(struct pci_attach_args *pa)
459 {
460 	const struct mfii_device *mpd;
461 	int i;
462 
463 	for (i = 0; i < nitems(mfii_devices); i++) {
464 		mpd = &mfii_devices[i];
465 
466 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
467 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
468 			return (mpd->mpd_iop);
469 	}
470 
471 	return (NULL);
472 }
473 
474 int
475 mfii_match(struct device *parent, void *match, void *aux)
476 {
477 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
478 }
479 
480 void
481 mfii_attach(struct device *parent, struct device *self, void *aux)
482 {
483 	struct mfii_softc *sc = (struct mfii_softc *)self;
484 	struct pci_attach_args *pa = aux;
485 	pcireg_t memtype;
486 	pci_intr_handle_t ih;
487 	struct scsibus_attach_args saa;
488 	u_int32_t status;
489 
490 	/* init sc */
491 	sc->sc_iop = mfii_find_iop(aux);
492 	sc->sc_dmat = pa->pa_dmat;
493 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
494 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
495 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
496 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
497 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
498 
499 	sc->sc_aen_ccb = NULL;
500 	task_set(&sc->sc_aen_task, mfii_aen, sc);
501 
502 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
503 	SIMPLEQ_INIT(&sc->sc_abort_list);
504 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
505 
506 	/* wire up the bus shizz */
507 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MFII_BAR);
508 	if (pci_mapreg_map(pa, MFII_BAR, memtype, 0,
509 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
510 		printf(": unable to map registers\n");
511 		return;
512 	}
513 
514 	/* disable interrupts */
515 	mfii_write(sc, MFI_OMSK, 0xffffffff);
516 
517 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
518 		printf(": unable to map interrupt\n");
519 		goto pci_unmap;
520 	}
521 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
522 
523 	/* lets get started */
524 	if (mfii_transition_firmware(sc))
525 		goto pci_unmap;
526 
527 	status = mfii_fw_state(sc);
528 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
529 	sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
530 
531 	/* sense memory */
532 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
533 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
534 	if (sc->sc_sense == NULL) {
535 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
536 		goto pci_unmap;
537 	}
538 
539 	sc->sc_reply_postq_depth = roundup(sc->sc_max_cmds, 16);
540 
541 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
542 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
543 	if (sc->sc_reply_postq == NULL)
544 		goto free_sense;
545 
546 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
547 	    MFII_DMA_LEN(sc->sc_reply_postq));
548 
549 	sc->sc_requests = mfii_dmamem_alloc(sc,
550 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
551 	if (sc->sc_requests == NULL)
552 		goto free_reply_postq;
553 
554 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
555 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
556 	if (sc->sc_sgl == NULL)
557 		goto free_requests;
558 
559 	if (mfii_init_ccb(sc) != 0) {
560 		printf("%s: could not init ccb list\n", DEVNAME(sc));
561 		goto free_sgl;
562 	}
563 
564 	/* kickstart firmware with all addresses and pointers */
565 	if (mfii_initialise_firmware(sc) != 0) {
566 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
567 		goto free_sgl;
568 	}
569 
570 	if (mfii_get_info(sc) != 0) {
571 		printf("%s: could not retrieve controller information\n",
572 		    DEVNAME(sc));
573 		goto free_sgl;
574 	}
575 
576 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
577 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
578 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
579 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
580 	printf("\n");
581 
582 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
583 	    mfii_intr, sc, DEVNAME(sc));
584 	if (sc->sc_ih == NULL)
585 		goto free_sgl;
586 
587 	sc->sc_link.openings = sc->sc_max_cmds;
588 	sc->sc_link.adapter_softc = sc;
589 	sc->sc_link.adapter = &mfii_switch;
590 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
591 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
592 	sc->sc_link.pool = &sc->sc_iopool;
593 
594 	memset(&saa, 0, sizeof(saa));
595 	saa.saa_sc_link = &sc->sc_link;
596 
597 	config_found(&sc->sc_dev, &saa, scsiprint);
598 
599 	mfii_syspd(sc);
600 
601 #ifdef notyet
602 	if (mfii_aen_register(sc) != 0) {
603 		/* error printed by mfii_aen_register */
604 		goto intr_disestablish;
605 	}
606 #endif
607 
608 	/* enable interrupts */
609 	mfii_write(sc, MFI_OSTS, 0xffffffff);
610 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
611 
612 	return;
613 #ifdef notyet
614 intr_disestablish:
615 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
616 #endif
617 free_sgl:
618 	mfii_dmamem_free(sc, sc->sc_sgl);
619 free_requests:
620 	mfii_dmamem_free(sc, sc->sc_requests);
621 free_reply_postq:
622 	mfii_dmamem_free(sc, sc->sc_reply_postq);
623 free_sense:
624 	mfii_dmamem_free(sc, sc->sc_sense);
625 pci_unmap:
626 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
627 }
628 
629 struct srp_gc mfii_dev_handles_gc =
630     SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
631 
632 static inline uint16_t
633 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
634 {
635 	struct srp_ref sr;
636 	uint16_t *map, handle;
637 
638 	map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
639 	handle = map[target];
640 	srp_leave(&sr);
641 
642 	return (handle);
643 }
644 
645 int
646 mfii_dev_handles_update(struct mfii_softc *sc)
647 {
648 	struct mfii_ld_map *lm;
649 	uint16_t *dev_handles = NULL;
650 	struct mfii_ccb *ccb;
651 	int i;
652 	int rv = 0;
653 
654 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
655 	ccb = scsi_io_get(&sc->sc_iopool, 0);
656 
657 	rv = mfii_mgmt(sc, ccb, MR_DCMD_LD_MAP_GET_INFO, NULL,
658 	    lm, sizeof(*lm), SCSI_DATA_IN|SCSI_NOSLEEP);
659 
660 	scsi_io_put(&sc->sc_iopool, ccb);
661 	if (rv != 0) {
662 		rv = EIO;
663 		goto free_lm;
664 	}
665 
666 	dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
667 	    M_DEVBUF, M_WAITOK);
668 
669 	for (i = 0; i < MFI_MAX_PD; i++)
670 		dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
671 
672 	/* commit the updated info */
673 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
674 	srp_update_locked(&mfii_dev_handles_gc,
675 	    &sc->sc_pd->pd_dev_handles, dev_handles);
676 
677 free_lm:
678 	free(lm, M_TEMP, sizeof(*lm));
679 
680 	return (rv);
681 }
682 
683 void
684 mfii_dev_handles_dtor(void *null, void *v)
685 {
686 	uint16_t *dev_handles = v;
687 
688 	free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
689 }
690 
691 int
692 mfii_syspd(struct mfii_softc *sc)
693 {
694 	struct scsibus_attach_args saa;
695 	struct scsi_link *link;
696 
697 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
698 	if (sc->sc_pd == NULL)
699 		return (1);
700 
701 	srp_init(&sc->sc_pd->pd_dev_handles);
702 	if (mfii_dev_handles_update(sc) != 0)
703 		goto free_pdsc;
704 
705 	link = &sc->sc_pd->pd_link;
706 	link->adapter = &mfii_pd_switch;
707 	link->adapter_softc = sc;
708 	link->adapter_buswidth = MFI_MAX_PD;
709 	link->adapter_target = -1;
710 	link->openings = sc->sc_max_cmds - 1;
711 	link->pool = &sc->sc_iopool;
712 
713 	memset(&saa, 0, sizeof(saa));
714 	saa.saa_sc_link = link;
715 
716 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
717 	    config_found(&sc->sc_dev, &saa, scsiprint);
718 
719 	return (0);
720 
721 free_pdsc:
722 	free(sc->sc_pd, M_DEVBUF, 0);
723 	return (1);
724 }
725 
726 int
727 mfii_detach(struct device *self, int flags)
728 {
729 	struct mfii_softc *sc = (struct mfii_softc *)self;
730 
731 	if (sc->sc_ih == NULL)
732 		return (0);
733 
734 	mfii_aen_unregister(sc);
735 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
736 	mfii_dmamem_free(sc, sc->sc_sgl);
737 	mfii_dmamem_free(sc, sc->sc_requests);
738 	mfii_dmamem_free(sc, sc->sc_reply_postq);
739 	mfii_dmamem_free(sc, sc->sc_sense);
740 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
741 
742 	return (0);
743 }
744 
745 u_int32_t
746 mfii_read(struct mfii_softc *sc, bus_size_t r)
747 {
748 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
749 	    BUS_SPACE_BARRIER_READ);
750 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
751 }
752 
753 void
754 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
755 {
756 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
757 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
758 	    BUS_SPACE_BARRIER_WRITE);
759 }
760 
761 struct mfii_dmamem *
762 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
763 {
764 	struct mfii_dmamem *m;
765 	int nsegs;
766 
767 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
768 	if (m == NULL)
769 		return (NULL);
770 
771 	m->mdm_size = size;
772 
773 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
774 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
775 		goto mdmfree;
776 
777 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
778 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
779 		goto destroy;
780 
781 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
782 	    BUS_DMA_NOWAIT) != 0)
783 		goto free;
784 
785 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
786 	    BUS_DMA_NOWAIT) != 0)
787 		goto unmap;
788 
789 	return (m);
790 
791 unmap:
792 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
793 free:
794 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
795 destroy:
796 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
797 mdmfree:
798 	free(m, M_DEVBUF, 0);
799 
800 	return (NULL);
801 }
802 
803 void
804 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
805 {
806 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
807 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
808 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
809 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
810 	free(m, M_DEVBUF, 0);
811 }
812 
813 void
814 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
815 {
816 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
817 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
818 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
819 
820 	io->function = MFII_FUNCTION_PASSTHRU_IO;
821 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
822 
823 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
824 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
825 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
826 
827 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
828 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
829 
830 	mfii_start(sc, ccb);
831 }
832 
833 int
834 mfii_aen_register(struct mfii_softc *sc)
835 {
836 	struct mfi_evt_log_info mel;
837 	struct mfii_ccb *ccb;
838 	struct mfii_dmamem *mdm;
839 	int rv;
840 
841 	ccb = scsi_io_get(&sc->sc_iopool, 0);
842 	if (ccb == NULL) {
843 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
844 		return (ENOMEM);
845 	}
846 
847 	memset(&mel, 0, sizeof(mel));
848 
849 	rv = mfii_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
850 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
851 	if (rv != 0) {
852 		scsi_io_put(&sc->sc_iopool, ccb);
853 		printf("%s: unable to get event info\n", DEVNAME(sc));
854 		return (EIO);
855 	}
856 
857 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
858 	if (mdm == NULL) {
859 		scsi_io_put(&sc->sc_iopool, ccb);
860 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
861 		return (ENOMEM);
862 	}
863 
864 	/* replay all the events from boot */
865 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
866 
867 	return (0);
868 }
869 
870 void
871 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
872     struct mfii_dmamem *mdm, uint32_t seq)
873 {
874 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
875 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
876 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
877 	union mfi_evt_class_locale mec;
878 
879 	mfii_scrub_ccb(ccb);
880 	mfii_dcmd_scrub(ccb);
881 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
882 
883 	ccb->ccb_cookie = mdm;
884 	ccb->ccb_done = mfii_aen_done;
885 	sc->sc_aen_ccb = ccb;
886 
887 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
888 	mec.mec_members.reserved = 0;
889 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
890 
891 	hdr->mfh_cmd = MFI_CMD_DCMD;
892 	hdr->mfh_sg_count = 1;
893 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
894 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
895 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
896 	htolem32(&dcmd->mdf_mbox.w[0], seq);
897 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
898 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
899 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
900 
901 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
902 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
903 
904 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
905 	mfii_dcmd_start(sc, ccb);
906 }
907 
908 void
909 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
910 {
911 	KASSERT(sc->sc_aen_ccb == ccb);
912 
913 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
914 	task_add(systq, &sc->sc_aen_task);
915 }
916 
917 void
918 mfii_aen(void *arg)
919 {
920 	struct mfii_softc *sc = arg;
921 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
922 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
923 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
924 
925 	mfii_dcmd_sync(sc, ccb,
926 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
927 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
928 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
929 
930 #if 0
931 	printf("%s: %u %08x %02x %s\n", DEVNAME(sc),
932 	    lemtoh32(&med->med_seq_num), lemtoh32(&med->med_code),
933 	    med->med_arg_type, med->med_description);
934 #endif
935 
936 	switch (lemtoh32(&med->med_code)) {
937 	case MFI_EVT_PD_INSERTED_EXT:
938 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
939 			break;
940 
941 		mfii_aen_pd_insert(sc, &med->args.pd_address);
942 		break;
943  	case MFI_EVT_PD_REMOVED_EXT:
944 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
945 			break;
946 
947 		mfii_aen_pd_remove(sc, &med->args.pd_address);
948 		break;
949 
950 	case MFI_EVT_PD_STATE_CHANGE:
951 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
952 			break;
953 
954 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
955 		break;
956 
957 	default:
958 		break;
959 	}
960 
961 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
962 }
963 
964 void
965 mfii_aen_pd_insert(struct mfii_softc *sc,
966     const struct mfi_evtarg_pd_address *pd)
967 {
968 #if 0
969 	printf("%s: pd inserted ext\n", DEVNAME(sc));
970 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
971 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
972 	    pd->scsi_dev_type);
973 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
974 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
975 	    lemtoh64(&pd->sas_addr[1]));
976 #endif
977 
978 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
979 		return;
980 
981 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
982 }
983 
984 void
985 mfii_aen_pd_remove(struct mfii_softc *sc,
986     const struct mfi_evtarg_pd_address *pd)
987 {
988 #if 0
989 	printf("%s: pd removed ext\n", DEVNAME(sc));
990 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
991 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
992 	    pd->scsi_dev_type);
993 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
994 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
995 	    lemtoh64(&pd->sas_addr[1]));
996 #endif
997 	uint16_t target = lemtoh16(&pd->device_id);
998 
999 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1000 
1001 	/* the firmware will abort outstanding commands for us */
1002 
1003 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1004 }
1005 
1006 void
1007 mfii_aen_pd_state_change(struct mfii_softc *sc,
1008     const struct mfi_evtarg_pd_state *state)
1009 {
1010 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1011 
1012 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1013 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1014 		/* it's been pulled or configured for raid */
1015 
1016 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1017 		    DVACT_DEACTIVATE);
1018 		/* outstanding commands will simply complete or get aborted */
1019 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1020 		    DETACH_FORCE);
1021 
1022 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1023 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1024 		/* the firmware is handing the disk over */
1025 
1026 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1027 	}
1028 }
1029 
1030 void
1031 mfii_aen_unregister(struct mfii_softc *sc)
1032 {
1033 	/* XXX */
1034 }
1035 
1036 int
1037 mfii_transition_firmware(struct mfii_softc *sc)
1038 {
1039 	int32_t			fw_state, cur_state;
1040 	int			max_wait, i;
1041 
1042 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1043 
1044 	while (fw_state != MFI_STATE_READY) {
1045 		cur_state = fw_state;
1046 		switch (fw_state) {
1047 		case MFI_STATE_FAULT:
1048 			printf("%s: firmware fault\n", DEVNAME(sc));
1049 			return (1);
1050 		case MFI_STATE_WAIT_HANDSHAKE:
1051 			mfii_write(sc, MFI_SKINNY_IDB,
1052 			    MFI_INIT_CLEAR_HANDSHAKE);
1053 			max_wait = 2;
1054 			break;
1055 		case MFI_STATE_OPERATIONAL:
1056 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1057 			max_wait = 10;
1058 			break;
1059 		case MFI_STATE_UNDEFINED:
1060 		case MFI_STATE_BB_INIT:
1061 			max_wait = 2;
1062 			break;
1063 		case MFI_STATE_FW_INIT:
1064 		case MFI_STATE_DEVICE_SCAN:
1065 		case MFI_STATE_FLUSH_CACHE:
1066 			max_wait = 20;
1067 			break;
1068 		default:
1069 			printf("%s: unknown firmware state %d\n",
1070 			    DEVNAME(sc), fw_state);
1071 			return (1);
1072 		}
1073 		for (i = 0; i < (max_wait * 10); i++) {
1074 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1075 			if (fw_state == cur_state)
1076 				DELAY(100000);
1077 			else
1078 				break;
1079 		}
1080 		if (fw_state == cur_state) {
1081 			printf("%s: firmware stuck in state %#x\n",
1082 			    DEVNAME(sc), fw_state);
1083 			return (1);
1084 		}
1085 	}
1086 
1087 	return (0);
1088 }
1089 
1090 int
1091 mfii_get_info(struct mfii_softc *sc)
1092 {
1093 	struct mfii_ccb *ccb;
1094 	int rv;
1095 
1096 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1097 	rv = mfii_mgmt(sc, ccb, MR_DCMD_CTRL_GET_INFO, NULL,
1098 	    &sc->sc_info, sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1099 	scsi_io_put(&sc->sc_iopool, ccb);
1100 
1101 	if (rv != 0)
1102 		return (rv);
1103 
1104 #ifdef MFI_DEBUG
1105 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1106 		printf("%s: active FW %s Version %s date %s time %s\n",
1107 		    DEVNAME(sc),
1108 		    sc->sc_info.mci_image_component[i].mic_name,
1109 		    sc->sc_info.mci_image_component[i].mic_version,
1110 		    sc->sc_info.mci_image_component[i].mic_build_date,
1111 		    sc->sc_info.mci_image_component[i].mic_build_time);
1112 	}
1113 
1114 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1115 		printf("%s: pending FW %s Version %s date %s time %s\n",
1116 		    DEVNAME(sc),
1117 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1118 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1119 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1120 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1121 	}
1122 
1123 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1124 	    DEVNAME(sc),
1125 	    sc->sc_info.mci_max_arms,
1126 	    sc->sc_info.mci_max_spans,
1127 	    sc->sc_info.mci_max_arrays,
1128 	    sc->sc_info.mci_max_lds,
1129 	    sc->sc_info.mci_product_name);
1130 
1131 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1132 	    DEVNAME(sc),
1133 	    sc->sc_info.mci_serial_number,
1134 	    sc->sc_info.mci_hw_present,
1135 	    sc->sc_info.mci_current_fw_time,
1136 	    sc->sc_info.mci_max_cmds,
1137 	    sc->sc_info.mci_max_sg_elements);
1138 
1139 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1140 	    DEVNAME(sc),
1141 	    sc->sc_info.mci_max_request_size,
1142 	    sc->sc_info.mci_lds_present,
1143 	    sc->sc_info.mci_lds_degraded,
1144 	    sc->sc_info.mci_lds_offline,
1145 	    sc->sc_info.mci_pd_present);
1146 
1147 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1148 	    DEVNAME(sc),
1149 	    sc->sc_info.mci_pd_disks_present,
1150 	    sc->sc_info.mci_pd_disks_pred_failure,
1151 	    sc->sc_info.mci_pd_disks_failed);
1152 
1153 	printf("%s: nvram %d mem %d flash %d\n",
1154 	    DEVNAME(sc),
1155 	    sc->sc_info.mci_nvram_size,
1156 	    sc->sc_info.mci_memory_size,
1157 	    sc->sc_info.mci_flash_size);
1158 
1159 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1160 	    DEVNAME(sc),
1161 	    sc->sc_info.mci_ram_correctable_errors,
1162 	    sc->sc_info.mci_ram_uncorrectable_errors,
1163 	    sc->sc_info.mci_cluster_allowed,
1164 	    sc->sc_info.mci_cluster_active);
1165 
1166 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1167 	    DEVNAME(sc),
1168 	    sc->sc_info.mci_max_strips_per_io,
1169 	    sc->sc_info.mci_raid_levels,
1170 	    sc->sc_info.mci_adapter_ops,
1171 	    sc->sc_info.mci_ld_ops);
1172 
1173 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1174 	    DEVNAME(sc),
1175 	    sc->sc_info.mci_stripe_sz_ops.min,
1176 	    sc->sc_info.mci_stripe_sz_ops.max,
1177 	    sc->sc_info.mci_pd_ops,
1178 	    sc->sc_info.mci_pd_mix_support);
1179 
1180 	printf("%s: ecc_bucket %d pckg_prop %s\n",
1181 	    DEVNAME(sc),
1182 	    sc->sc_info.mci_ecc_bucket_count,
1183 	    sc->sc_info.mci_package_version);
1184 
1185 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1186 	    DEVNAME(sc),
1187 	    sc->sc_info.mci_properties.mcp_seq_num,
1188 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1189 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1190 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1191 
1192 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1193 	    DEVNAME(sc),
1194 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1195 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1196 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1197 	    sc->sc_info.mci_properties.mcp_cc_rate);
1198 
1199 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1200 	    DEVNAME(sc),
1201 	    sc->sc_info.mci_properties.mcp_recon_rate,
1202 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1203 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1204 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1205 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1206 
1207 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1208 	    DEVNAME(sc),
1209 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1210 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1211 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1212 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1213 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1214 
1215 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1216 	    DEVNAME(sc),
1217 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1218 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1219 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1220 
1221 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1222 	    DEVNAME(sc),
1223 	    sc->sc_info.mci_pci.mip_vendor,
1224 	    sc->sc_info.mci_pci.mip_device,
1225 	    sc->sc_info.mci_pci.mip_subvendor,
1226 	    sc->sc_info.mci_pci.mip_subdevice);
1227 
1228 	printf("%s: type %#x port_count %d port_addr ",
1229 	    DEVNAME(sc),
1230 	    sc->sc_info.mci_host.mih_type,
1231 	    sc->sc_info.mci_host.mih_port_count);
1232 
1233 	for (i = 0; i < 8; i++)
1234 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1235 	printf("\n");
1236 
1237 	printf("%s: type %.x port_count %d port_addr ",
1238 	    DEVNAME(sc),
1239 	    sc->sc_info.mci_device.mid_type,
1240 	    sc->sc_info.mci_device.mid_port_count);
1241 
1242 	for (i = 0; i < 8; i++)
1243 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1244 	printf("\n");
1245 #endif /* MFI_DEBUG */
1246 
1247 	return (0);
1248 }
1249 
1250 int
1251 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1252 {
1253 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1254 	u_int64_t r;
1255 	int to = 0, rv = 0;
1256 
1257 #ifdef DIAGNOSTIC
1258 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1259 		panic("mfii_mfa_poll called with cookie or done set");
1260 #endif
1261 
1262 	hdr->mfh_context = ccb->ccb_smid;
1263 	hdr->mfh_cmd_status = 0xff;
1264 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1265 
1266 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1267 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1268 
1269 	mfii_start(sc, ccb);
1270 
1271 	for (;;) {
1272 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1273 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1274 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1275 
1276 		if (hdr->mfh_cmd_status != 0xff)
1277 			break;
1278 
1279 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1280 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1281 			    ccb->ccb_smid);
1282 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1283 			rv = 1;
1284 			break;
1285 		}
1286 
1287 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1288 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1289 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1290 
1291 		delay(1000);
1292 	}
1293 
1294 	if (ccb->ccb_len > 0) {
1295 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1296 		    0, ccb->ccb_dmamap->dm_mapsize,
1297 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1298 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1299 
1300 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1301 	}
1302 
1303 	return (rv);
1304 }
1305 
1306 int
1307 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1308 {
1309 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1310 	void *cookie;
1311 	int rv = 1;
1312 
1313 	done = ccb->ccb_done;
1314 	cookie = ccb->ccb_cookie;
1315 
1316 	ccb->ccb_done = mfii_poll_done;
1317 	ccb->ccb_cookie = &rv;
1318 
1319 	mfii_start(sc, ccb);
1320 
1321 	do {
1322 		delay(10);
1323 		mfii_postq(sc);
1324 	} while (rv == 1);
1325 
1326 	ccb->ccb_cookie = cookie;
1327 	done(sc, ccb);
1328 
1329 	return (0);
1330 }
1331 
1332 void
1333 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1334 {
1335 	int *rv = ccb->ccb_cookie;
1336 
1337 	*rv = 0;
1338 }
1339 
1340 int
1341 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1342 {
1343 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1344 
1345 #ifdef DIAGNOSTIC
1346 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1347 		panic("mfii_exec called with cookie or done set");
1348 #endif
1349 
1350 	ccb->ccb_cookie = &m;
1351 	ccb->ccb_done = mfii_exec_done;
1352 
1353 	mtx_enter(&m);
1354 	while (ccb->ccb_cookie != NULL)
1355 		msleep(ccb, &m, PRIBIO, "mfiiexec", 0);
1356 	mtx_leave(&m);
1357 
1358 	return (0);
1359 }
1360 
1361 void
1362 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1363 {
1364 	struct mutex *m = ccb->ccb_cookie;
1365 
1366 	mtx_enter(m);
1367 	ccb->ccb_cookie = NULL;
1368 	wakeup_one(ccb);
1369 	mtx_leave(m);
1370 }
1371 
1372 int
1373 mfii_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb,
1374     u_int32_t opc, const union mfi_mbox *mbox, void *buf, size_t len,
1375     int flags)
1376 {
1377 	struct mfi_dcmd_frame *dcmd = ccb->ccb_request;
1378 	struct mfi_frame_header	*hdr = &dcmd->mdf_header;
1379 	u_int64_t r;
1380 	u_int8_t *dma_buf;
1381 	int rv = EIO;
1382 
1383 	dma_buf = dma_alloc(len, PR_WAITOK);
1384 	if (dma_buf == NULL)
1385 		return (ENOMEM);
1386 
1387 	mfii_scrub_ccb(ccb);
1388 	ccb->ccb_data = dma_buf;
1389 	ccb->ccb_len = len;
1390 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1391 	case SCSI_DATA_IN:
1392 		ccb->ccb_direction = MFII_DATA_IN;
1393 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1394 		break;
1395 	case SCSI_DATA_OUT:
1396 		ccb->ccb_direction = MFII_DATA_OUT;
1397 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1398 		memcpy(dma_buf, buf, len);
1399 		break;
1400 	}
1401 
1402 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1403 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1404 		rv = ENOMEM;
1405 		goto done;
1406 	}
1407 
1408 	hdr->mfh_cmd = MFI_CMD_DCMD;
1409 	hdr->mfh_context = ccb->ccb_smid;
1410 	hdr->mfh_data_len = htole32(len);
1411 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1412 
1413 	dcmd->mdf_opcode = opc;
1414 	/* handle special opcodes */
1415 	if (mbox != NULL)
1416 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1417 
1418 	if (ISSET(flags, SCSI_NOSLEEP))
1419 		mfii_mfa_poll(sc, ccb);
1420 	else {
1421 		r = MFII_REQ_MFA(ccb->ccb_request_dva);
1422 		memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1423 		mfii_exec(sc, ccb);
1424 	}
1425 
1426 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1427 		rv = 0;
1428 
1429 		if (ccb->ccb_direction == MFII_DATA_IN)
1430 			memcpy(buf, dma_buf, len);
1431 	}
1432 
1433 done:
1434 	dma_free(dma_buf, len);
1435 
1436 	return (rv);
1437 }
1438 
1439 int
1440 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1441     void *sglp, int nosleep)
1442 {
1443 	union mfi_sgl *sgl = sglp;
1444 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1445 	int error;
1446 	int i;
1447 
1448 	if (ccb->ccb_len == 0)
1449 		return (0);
1450 
1451 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1452 	    ccb->ccb_data, ccb->ccb_len, NULL,
1453 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1454 	if (error) {
1455 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1456 		return (1);
1457 	}
1458 
1459 	for (i = 0; i < dmap->dm_nsegs; i++) {
1460 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1461 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1462 	}
1463 
1464 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1465 	    ccb->ccb_direction == MFII_DATA_OUT ?
1466 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1467 
1468 	return (0);
1469 }
1470 
1471 void
1472 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1473 {
1474 	u_long *r = (u_long *)&ccb->ccb_req;
1475 
1476 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1477 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1478 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1479 
1480 #if defined(__LP64__)
1481 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1482 #else
1483 	mtx_enter(&sc->sc_post_mtx);
1484 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1485 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1486 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1487 
1488 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1489 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1490 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1491 	mtx_leave(&sc->sc_post_mtx);
1492 #endif
1493 }
1494 
1495 void
1496 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1497 {
1498 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1499 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1500 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1501 
1502 	if (ccb->ccb_sgl_len > 0) {
1503 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1504 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1505 		    BUS_DMASYNC_POSTWRITE);
1506 	}
1507 
1508 	if (ccb->ccb_len > 0) {
1509 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1510 		    0, ccb->ccb_dmamap->dm_mapsize,
1511 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1512 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1513 
1514 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1515 	}
1516 
1517 	ccb->ccb_done(sc, ccb);
1518 }
1519 
1520 int
1521 mfii_initialise_firmware(struct mfii_softc *sc)
1522 {
1523 	struct mpii_msg_iocinit_request *iiq;
1524 	struct mfii_dmamem *m;
1525 	struct mfii_ccb *ccb;
1526 	struct mfi_init_frame *init;
1527 	int rv;
1528 
1529 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1530 	if (m == NULL)
1531 		return (1);
1532 
1533 	iiq = MFII_DMA_KVA(m);
1534 	memset(iiq, 0, sizeof(*iiq));
1535 
1536 	iiq->function = MPII_FUNCTION_IOC_INIT;
1537 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1538 
1539 	iiq->msg_version_maj = 0x02;
1540 	iiq->msg_version_min = 0x00;
1541 	iiq->hdr_version_unit = 0x10;
1542 	iiq->hdr_version_dev = 0x0;
1543 
1544 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1545 
1546 	iiq->reply_descriptor_post_queue_depth =
1547 	    htole16(sc->sc_reply_postq_depth);
1548 	iiq->reply_free_queue_depth = htole16(0);
1549 
1550 	htolem32(&iiq->sense_buffer_address_high,
1551 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1552 
1553 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1554 	    MFII_DMA_DVA(sc->sc_reply_postq));
1555 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1556 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1557 
1558 	htolem32(&iiq->system_request_frame_base_address_lo,
1559 	    MFII_DMA_DVA(sc->sc_requests));
1560 	htolem32(&iiq->system_request_frame_base_address_hi,
1561 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1562 
1563 	iiq->timestamp = htole64(time_uptime);
1564 
1565 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1566 	mfii_scrub_ccb(ccb);
1567 	init = ccb->ccb_request;
1568 
1569 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1570 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1571 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1572 
1573 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1574 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1575 	    BUS_DMASYNC_PREREAD);
1576 
1577 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1578 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1579 
1580 	rv = mfii_mfa_poll(sc, ccb);
1581 
1582 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1583 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1584 
1585 	scsi_io_put(&sc->sc_iopool, ccb);
1586 	mfii_dmamem_free(sc, m);
1587 
1588 	return (rv);
1589 }
1590 
1591 int
1592 mfii_my_intr(struct mfii_softc *sc)
1593 {
1594 	u_int32_t status;
1595 
1596 	status = mfii_read(sc, MFI_OSTS);
1597 	if (ISSET(status, 0x1)) {
1598 		mfii_write(sc, MFI_OSTS, status);
1599 		return (1);
1600 	}
1601 
1602 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1603 }
1604 
1605 int
1606 mfii_intr(void *arg)
1607 {
1608 	struct mfii_softc *sc = arg;
1609 
1610 	if (!mfii_my_intr(sc))
1611 		return (0);
1612 
1613 	mfii_postq(sc);
1614 
1615 	return (1);
1616 }
1617 
1618 void
1619 mfii_postq(struct mfii_softc *sc)
1620 {
1621 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
1622 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
1623 	struct mpii_reply_descr *rdp;
1624 	struct mfii_ccb *ccb;
1625 	int rpi = 0;
1626 
1627 	mtx_enter(&sc->sc_reply_postq_mtx);
1628 
1629 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1630 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1631 	    BUS_DMASYNC_POSTREAD);
1632 
1633 	for (;;) {
1634 		rdp = &postq[sc->sc_reply_postq_index];
1635 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
1636 		    MPII_REPLY_DESCR_UNUSED)
1637 			break;
1638 		if (rdp->data == 0xffffffff) {
1639 			/*
1640 			 * ioc is still writing to the reply post queue
1641 			 * race condition - bail!
1642 			 */
1643 			break;
1644 		}
1645 
1646 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
1647 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
1648 		memset(rdp, 0xff, sizeof(*rdp));
1649 
1650 		sc->sc_reply_postq_index++;
1651 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
1652 		rpi = 1;
1653 	}
1654 
1655 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1656 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1657 	    BUS_DMASYNC_PREREAD);
1658 
1659 	if (rpi)
1660 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
1661 
1662 	mtx_leave(&sc->sc_reply_postq_mtx);
1663 
1664 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
1665 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
1666 		mfii_done(sc, ccb);
1667 	}
1668 }
1669 
1670 void
1671 mfii_scsi_cmd(struct scsi_xfer *xs)
1672 {
1673 	struct scsi_link *link = xs->sc_link;
1674 	struct mfii_softc *sc = link->adapter_softc;
1675 	struct mfii_ccb *ccb = xs->io;
1676 
1677 	mfii_scrub_ccb(ccb);
1678 	ccb->ccb_cookie = xs;
1679 	ccb->ccb_done = mfii_scsi_cmd_done;
1680 	ccb->ccb_data = xs->data;
1681 	ccb->ccb_len = xs->datalen;
1682 
1683 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
1684 
1685 	switch (xs->cmd->opcode) {
1686 	case READ_COMMAND:
1687 	case READ_BIG:
1688 	case READ_12:
1689 	case READ_16:
1690 	case WRITE_COMMAND:
1691 	case WRITE_BIG:
1692 	case WRITE_12:
1693 	case WRITE_16:
1694 		if (mfii_scsi_cmd_io(sc, xs) != 0)
1695 			goto stuffup;
1696 
1697 		break;
1698 
1699 	default:
1700 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
1701 			goto stuffup;
1702 		break;
1703 	}
1704 
1705 	xs->error = XS_NOERROR;
1706 	xs->resid = 0;
1707 
1708 	if (ISSET(xs->flags, SCSI_POLL)) {
1709 		if (mfii_poll(sc, ccb) != 0)
1710 			goto stuffup;
1711 		return;
1712 	}
1713 
1714 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
1715 	timeout_add_msec(&xs->stimeout, xs->timeout);
1716 	mfii_start(sc, ccb);
1717 
1718 	return;
1719 
1720 stuffup:
1721 	xs->error = XS_DRIVER_STUFFUP;
1722 	scsi_done(xs);
1723 }
1724 
1725 void
1726 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1727 {
1728 	struct scsi_xfer *xs = ccb->ccb_cookie;
1729 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1730 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1731 	u_int refs = 1;
1732 
1733 	if (timeout_del(&xs->stimeout))
1734 		refs = 2;
1735 
1736 	switch (ctx->status) {
1737 	case MFI_STAT_OK:
1738 		break;
1739 
1740 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1741 		xs->error = XS_SENSE;
1742 		memset(&xs->sense, 0, sizeof(xs->sense));
1743 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1744 		break;
1745 
1746 	case MFI_STAT_LD_OFFLINE:
1747 	case MFI_STAT_DEVICE_NOT_FOUND:
1748 		xs->error = XS_SELTIMEOUT;
1749 		break;
1750 
1751 	default:
1752 		xs->error = XS_DRIVER_STUFFUP;
1753 		break;
1754 	}
1755 
1756 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
1757 		scsi_done(xs);
1758 }
1759 
1760 int
1761 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
1762 {
1763 	struct scsi_link *link = xs->sc_link;
1764 	struct mfii_ccb *ccb = xs->io;
1765 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1766 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1767 
1768 	io->dev_handle = htole16(link->target);
1769 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1770 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1771 	io->sgl_flags = htole16(0x02); /* XXX */
1772 	io->sense_buffer_length = sizeof(xs->sense);
1773 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1774 	io->data_length = htole32(xs->datalen);
1775 	io->io_flags = htole16(xs->cmdlen);
1776 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1777 	case SCSI_DATA_IN:
1778 		ccb->ccb_direction = MFII_DATA_IN;
1779 		io->direction = MPII_SCSIIO_DIR_READ;
1780 		break;
1781 	case SCSI_DATA_OUT:
1782 		ccb->ccb_direction = MFII_DATA_OUT;
1783 		io->direction = MPII_SCSIIO_DIR_WRITE;
1784 		break;
1785 	default:
1786 		ccb->ccb_direction = MFII_DATA_NONE;
1787 		io->direction = MPII_SCSIIO_DIR_NONE;
1788 		break;
1789 	}
1790 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1791 
1792 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
1793 	ctx->timeout_value = htole16(0x14); /* XXX */
1794 	ctx->reg_lock_flags = sc->sc_iop->ldio_ctx_reg_lock_flags;
1795 	ctx->virtual_disk_target_id = htole16(link->target);
1796 
1797 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1798 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1799 		return (1);
1800 
1801 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1802 
1803 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
1804 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1805 
1806 	return (0);
1807 }
1808 
1809 int
1810 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1811 {
1812 	struct scsi_link *link = xs->sc_link;
1813 	struct mfii_ccb *ccb = xs->io;
1814 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1815 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1816 
1817 	io->dev_handle = htole16(link->target);
1818 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1819 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1820 	io->sgl_flags = htole16(0x02); /* XXX */
1821 	io->sense_buffer_length = sizeof(xs->sense);
1822 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1823 	io->data_length = htole32(xs->datalen);
1824 	io->io_flags = htole16(xs->cmdlen);
1825 	io->lun[0] = htobe16(link->lun);
1826 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1827 	case SCSI_DATA_IN:
1828 		ccb->ccb_direction = MFII_DATA_IN;
1829 		io->direction = MPII_SCSIIO_DIR_READ;
1830 		break;
1831 	case SCSI_DATA_OUT:
1832 		ccb->ccb_direction = MFII_DATA_OUT;
1833 		io->direction = MPII_SCSIIO_DIR_WRITE;
1834 		break;
1835 	default:
1836 		ccb->ccb_direction = MFII_DATA_NONE;
1837 		io->direction = MPII_SCSIIO_DIR_NONE;
1838 		break;
1839 	}
1840 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1841 
1842 	ctx->virtual_disk_target_id = htole16(link->target);
1843 
1844 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1845 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1846 		return (1);
1847 
1848 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1849 
1850 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1851 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1852 
1853 	return (0);
1854 }
1855 
1856 void
1857 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
1858 {
1859 	struct scsi_link *link = xs->sc_link;
1860 	struct mfii_softc *sc = link->adapter_softc;
1861 	struct mfii_ccb *ccb = xs->io;
1862 
1863 	mfii_scrub_ccb(ccb);
1864 	ccb->ccb_cookie = xs;
1865 	ccb->ccb_done = mfii_scsi_cmd_done;
1866 	ccb->ccb_data = xs->data;
1867 	ccb->ccb_len = xs->datalen;
1868 
1869 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
1870 
1871 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
1872 	if (xs->error != XS_NOERROR)
1873 		goto done;
1874 
1875 	xs->resid = 0;
1876 
1877 	if (ISSET(xs->flags, SCSI_POLL)) {
1878 		if (mfii_poll(sc, ccb) != 0)
1879 			goto stuffup;
1880 		return;
1881 	}
1882 
1883 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
1884 	timeout_add_msec(&xs->stimeout, xs->timeout);
1885 	mfii_start(sc, ccb);
1886 
1887 	return;
1888 
1889 stuffup:
1890 	xs->error = XS_DRIVER_STUFFUP;
1891 done:
1892 	scsi_done(xs);
1893 }
1894 
1895 int
1896 mfii_pd_scsi_probe(struct scsi_link *link)
1897 {
1898 	struct mfii_softc *sc = link->adapter_softc;
1899 	struct mfii_ccb *ccb;
1900 	struct mfi_pd_details mpd;
1901 	union mfi_mbox mbox;
1902 	int rv;
1903 
1904 	if (link->lun > 0)
1905 		return (0);
1906 
1907 	memset(&mbox, 0, sizeof(mbox));
1908 	mbox.s[0] = htole16(link->target);
1909 
1910 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1911 	rv = mfii_mgmt(sc, ccb, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
1912 	    SCSI_DATA_IN|SCSI_NOSLEEP);
1913 	scsi_io_put(&sc->sc_iopool, ccb);
1914 	if (rv != 0)
1915 		return (EIO);
1916 
1917 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
1918 		return (ENXIO);
1919 
1920 	return (0);
1921 }
1922 
1923 int
1924 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1925 {
1926 	struct scsi_link *link = xs->sc_link;
1927 	struct mfii_ccb *ccb = xs->io;
1928 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1929 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1930 	uint16_t dev_handle;
1931 
1932 	dev_handle = mfii_dev_handle(sc, link->target);
1933 	if (dev_handle == htole16(0xffff))
1934 		return (XS_SELTIMEOUT);
1935 
1936 	io->dev_handle = dev_handle;
1937 	io->function = 0;
1938 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1939 	io->sgl_flags = htole16(0x02); /* XXX */
1940 	io->sense_buffer_length = sizeof(xs->sense);
1941 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1942 	io->data_length = htole32(xs->datalen);
1943 	io->io_flags = htole16(xs->cmdlen);
1944 	io->lun[0] = htobe16(link->lun);
1945 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1946 	case SCSI_DATA_IN:
1947 		ccb->ccb_direction = MFII_DATA_IN;
1948 		io->direction = MPII_SCSIIO_DIR_READ;
1949 		break;
1950 	case SCSI_DATA_OUT:
1951 		ccb->ccb_direction = MFII_DATA_OUT;
1952 		io->direction = MPII_SCSIIO_DIR_WRITE;
1953 		break;
1954 	default:
1955 		ccb->ccb_direction = MFII_DATA_NONE;
1956 		io->direction = MPII_SCSIIO_DIR_NONE;
1957 		break;
1958 	}
1959 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1960 
1961 	ctx->virtual_disk_target_id = htole16(link->target);
1962 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
1963 	ctx->timeout_value = sc->sc_pd->pd_timeout;
1964 
1965 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1966 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1967 		return (XS_DRIVER_STUFFUP);
1968 
1969 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1970 
1971 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
1972 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1973 	ccb->ccb_req.dev_handle = dev_handle;
1974 
1975 	return (XS_NOERROR);
1976 }
1977 
1978 int
1979 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
1980     int nosleep)
1981 {
1982 	struct mpii_msg_request *req = ccb->ccb_request;
1983 	struct mfii_sge *sge = NULL, *nsge = sglp;
1984 	struct mfii_sge *ce = NULL;
1985 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1986 	u_int space;
1987 	int i;
1988 
1989 	int error;
1990 
1991 	if (ccb->ccb_len == 0)
1992 		return (0);
1993 
1994 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1995 	    ccb->ccb_data, ccb->ccb_len, NULL,
1996 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1997 	if (error) {
1998 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1999 		return (1);
2000 	}
2001 
2002 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2003 	    sizeof(*nsge);
2004 	if (dmap->dm_nsegs > space) {
2005 		space--;
2006 
2007 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2008 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2009 
2010 		ce = nsge + space;
2011 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2012 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2013 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2014 
2015 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2016 	}
2017 
2018 	for (i = 0; i < dmap->dm_nsegs; i++) {
2019 		if (nsge == ce)
2020 			nsge = ccb->ccb_sgl;
2021 
2022 		sge = nsge;
2023 
2024 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2025 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2026 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2027 
2028 		nsge = sge + 1;
2029 	}
2030 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2031 
2032 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2033 	    ccb->ccb_direction == MFII_DATA_OUT ?
2034 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2035 
2036 	if (ccb->ccb_sgl_len > 0) {
2037 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2038 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2039 		    BUS_DMASYNC_PREWRITE);
2040 	}
2041 
2042 	return (0);
2043 }
2044 
2045 void
2046 mfii_scsi_cmd_tmo(void *xsp)
2047 {
2048 	struct scsi_xfer *xs = xsp;
2049 	struct scsi_link *link = xs->sc_link;
2050 	struct mfii_softc *sc = link->adapter_softc;
2051 	struct mfii_ccb *ccb = xs->io;
2052 
2053 	mtx_enter(&sc->sc_abort_mtx);
2054 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2055 	mtx_leave(&sc->sc_abort_mtx);
2056 
2057 	task_add(systqmp, &sc->sc_abort_task);
2058 }
2059 
2060 void
2061 mfii_abort_task(void *scp)
2062 {
2063 	struct mfii_softc *sc = scp;
2064 	struct mfii_ccb *list;
2065 
2066 	mtx_enter(&sc->sc_abort_mtx);
2067 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2068 	SIMPLEQ_INIT(&sc->sc_abort_list);
2069 	mtx_leave(&sc->sc_abort_mtx);
2070 
2071 	while (list != NULL) {
2072 		struct mfii_ccb *ccb = list;
2073 		struct scsi_xfer *xs = ccb->ccb_cookie;
2074 		struct scsi_link *link = xs->sc_link;
2075 
2076 		uint16_t dev_handle;
2077 		struct mfii_ccb *accb;
2078 
2079 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2080 
2081 		dev_handle = mfii_dev_handle(sc, link->target);
2082 		if (dev_handle == htole16(0xffff)) {
2083 			/* device is gone */
2084 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2085 				scsi_done(xs);
2086 			continue;
2087 		}
2088 
2089 		accb = scsi_io_get(&sc->sc_iopool, 0);
2090 		mfii_scrub_ccb(accb);
2091 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2092 		    MPII_SCSI_TASK_ABORT_TASK,
2093 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2094 
2095 		accb->ccb_cookie = ccb;
2096 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2097 
2098 		mfii_start(sc, accb);
2099 	}
2100 }
2101 
2102 void
2103 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2104     uint16_t smid, uint8_t type, uint32_t flags)
2105 {
2106 	struct mfii_task_mgmt *msg;
2107 	struct mpii_msg_scsi_task_request *req;
2108 
2109 	msg = accb->ccb_request;
2110 	req = &msg->mpii_request;
2111 	req->dev_handle = dev_handle;
2112 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2113 	req->task_type = type;
2114 	htolem16(&req->task_mid, smid);
2115 	msg->flags = flags;
2116 
2117 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2118 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2119 }
2120 
2121 void
2122 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2123 {
2124 	struct mfii_ccb *ccb = accb->ccb_cookie;
2125 	struct scsi_xfer *xs = ccb->ccb_cookie;
2126 
2127 	/* XXX check accb completion? */
2128 
2129 	scsi_io_put(&sc->sc_iopool, accb);
2130 
2131 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2132 		scsi_done(xs);
2133 }
2134 
2135 void *
2136 mfii_get_ccb(void *cookie)
2137 {
2138 	struct mfii_softc *sc = cookie;
2139 	struct mfii_ccb *ccb;
2140 
2141 	mtx_enter(&sc->sc_ccb_mtx);
2142 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2143 	if (ccb != NULL)
2144 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2145 	mtx_leave(&sc->sc_ccb_mtx);
2146 
2147 	return (ccb);
2148 }
2149 
2150 void
2151 mfii_scrub_ccb(struct mfii_ccb *ccb)
2152 {
2153 	ccb->ccb_cookie = NULL;
2154 	ccb->ccb_done = NULL;
2155 	ccb->ccb_flags = 0;
2156 	ccb->ccb_data = NULL;
2157 	ccb->ccb_direction = 0;
2158 	ccb->ccb_len = 0;
2159 	ccb->ccb_sgl_len = 0;
2160 	ccb->ccb_refcnt = 1;
2161 
2162 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2163 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2164 }
2165 
2166 void
2167 mfii_put_ccb(void *cookie, void *io)
2168 {
2169 	struct mfii_softc *sc = cookie;
2170 	struct mfii_ccb *ccb = io;
2171 
2172 	mtx_enter(&sc->sc_ccb_mtx);
2173 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2174 	mtx_leave(&sc->sc_ccb_mtx);
2175 }
2176 
2177 int
2178 mfii_init_ccb(struct mfii_softc *sc)
2179 {
2180 	struct mfii_ccb *ccb;
2181 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2182 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2183 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2184 	u_int i;
2185 	int error;
2186 
2187 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2188 	    M_DEVBUF, M_WAITOK|M_ZERO);
2189 
2190 	for (i = 0; i < sc->sc_max_cmds; i++) {
2191 		ccb = &sc->sc_ccb[i];
2192 
2193 		/* create a dma map for transfer */
2194 		error = bus_dmamap_create(sc->sc_dmat,
2195 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2196 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2197 		if (error) {
2198 			printf("%s: cannot create ccb dmamap (%d)\n",
2199 			    DEVNAME(sc), error);
2200 			goto destroy;
2201 		}
2202 
2203 		/* select i + 1'th request. 0 is reserved for events */
2204 		ccb->ccb_smid = i + 1;
2205 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2206 		ccb->ccb_request = request + ccb->ccb_request_offset;
2207 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2208 		    ccb->ccb_request_offset;
2209 
2210 		/* select i'th sense */
2211 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2212 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2213 		    ccb->ccb_sense_offset);
2214 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2215 		    ccb->ccb_sense_offset;
2216 
2217 		/* select i'th sgl */
2218 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2219 		    sc->sc_max_sgl * i;
2220 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2221 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2222 		    ccb->ccb_sgl_offset;
2223 
2224 		/* add ccb to queue */
2225 		mfii_put_ccb(sc, ccb);
2226 	}
2227 
2228 	return (0);
2229 
2230 destroy:
2231 	/* free dma maps and ccb memory */
2232 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2233 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2234 
2235 	free(sc->sc_ccb, M_DEVBUF, 0);
2236 
2237 	return (1);
2238 }
2239 
2240