xref: /openbsd-src/sys/dev/pci/mfii.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /* $OpenBSD: mfii.c,v 1.25 2015/03/14 03:38:48 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/types.h>
26 #include <sys/pool.h>
27 
28 #include <dev/pci/pcidevs.h>
29 #include <dev/pci/pcivar.h>
30 
31 #include <machine/bus.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsi_disk.h>
35 #include <scsi/scsiconf.h>
36 
37 #include <dev/ic/mfireg.h>
38 #include <dev/pci/mpiireg.h>
39 
40 #define	MFII_BAR		0x14
41 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
42 
43 #define MFII_OSTS_INTR_VALID	0x00000009
44 #define MFII_RPI		0x6c /* reply post host index */
45 
46 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
47 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
48 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
49 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
50 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
51 
52 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
53 
54 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
55 
56 struct mfii_request_descr {
57 	u_int8_t	flags;
58 	u_int8_t	msix_index;
59 	u_int16_t	smid;
60 
61 	u_int16_t	lmid;
62 	u_int16_t	dev_handle;
63 } __packed;
64 
65 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
66 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
67 
68 struct mfii_raid_context {
69 	u_int8_t	type_nseg;
70 	u_int8_t	_reserved1;
71 	u_int16_t	timeout_value;
72 
73 	u_int8_t	reg_lock_flags;
74 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
75 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
76 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
77 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
78 	u_int8_t	_reserved2;
79 	u_int16_t	virtual_disk_target_id;
80 
81 	u_int64_t	reg_lock_row_lba;
82 
83 	u_int32_t	reg_lock_length;
84 
85 	u_int16_t	next_lm_id;
86 	u_int8_t	ex_status;
87 	u_int8_t	status;
88 
89 	u_int8_t	raid_flags;
90 	u_int8_t	num_sge;
91 	u_int16_t	config_seq_num;
92 
93 	u_int8_t	span_arm;
94 	u_int8_t	_reserved3[3];
95 } __packed;
96 
97 struct mfii_sge {
98 	u_int64_t	sg_addr;
99 	u_int32_t	sg_len;
100 	u_int16_t	_reserved;
101 	u_int8_t	sg_next_chain_offset;
102 	u_int8_t	sg_flags;
103 } __packed;
104 
105 #define MFII_SGE_ADDR_MASK		(0x03)
106 #define MFII_SGE_ADDR_SYSTEM		(0x00)
107 #define MFII_SGE_ADDR_IOCDDR		(0x01)
108 #define MFII_SGE_ADDR_IOCPLB		(0x02)
109 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
110 #define MFII_SGE_END_OF_LIST		(0x40)
111 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
112 
113 #define MFII_REQUEST_SIZE	256
114 
115 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
116 
117 #define MFII_MAX_ROW		32
118 #define MFII_MAX_ARRAY		128
119 
120 struct mfii_array_map {
121 	uint16_t		mam_pd[MFII_MAX_ROW];
122 } __packed;
123 
124 struct mfii_dev_handle {
125 	uint16_t		mdh_cur_handle;
126 	uint8_t			mdh_valid;
127 	uint8_t			mdh_reserved;
128 	uint16_t		mdh_handle[2];
129 } __packed;
130 
131 struct mfii_ld_map {
132 	uint32_t		mlm_total_size;
133 	uint32_t		mlm_reserved1[5];
134 	uint32_t		mlm_num_lds;
135 	uint32_t		mlm_reserved2;
136 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
137 	uint8_t			mlm_pd_timeout;
138 	uint8_t			mlm_reserved3[7];
139 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
140 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
141 } __packed;
142 
143 struct mfii_dmamem {
144 	bus_dmamap_t		mdm_map;
145 	bus_dma_segment_t	mdm_seg;
146 	size_t			mdm_size;
147 	caddr_t			mdm_kva;
148 };
149 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
150 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
151 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
152 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
153 
154 struct mfii_softc;
155 
156 struct mfii_ccb {
157 	void			*ccb_request;
158 	u_int64_t		ccb_request_dva;
159 	bus_addr_t		ccb_request_offset;
160 
161 	struct mfi_sense	*ccb_sense;
162 	u_int32_t		ccb_sense_dva;
163 	bus_addr_t		ccb_sense_offset;
164 
165 	struct mfii_sge		*ccb_sgl;
166 	u_int64_t		ccb_sgl_dva;
167 	bus_addr_t		ccb_sgl_offset;
168 	u_int			ccb_sgl_len;
169 
170 	struct mfii_request_descr ccb_req;
171 
172 	bus_dmamap_t		ccb_dmamap;
173 
174 	/* data for sgl */
175 	void			*ccb_data;
176 	size_t			ccb_len;
177 
178 	int			ccb_direction;
179 #define MFII_DATA_NONE			0
180 #define MFII_DATA_IN			1
181 #define MFII_DATA_OUT			2
182 
183 	void			*ccb_cookie;
184 	void			(*ccb_done)(struct mfii_softc *,
185 				    struct mfii_ccb *);
186 
187 	u_int32_t		ccb_flags;
188 #define MFI_CCB_F_ERR			(1<<0)
189 	u_int			ccb_smid;
190 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
191 };
192 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
193 
194 struct mfii_pd_link {
195 	u_int16_t		pd_id;
196 	struct mfi_pd_details	pd_info;
197 	u_int16_t		pd_handle;
198 };
199 
200 struct mfii_pd_softc {
201 	struct scsi_link	pd_link;
202 	struct scsibus_softc	*pd_scsibus;
203 	struct mfii_pd_link	*pd_links[MFI_MAX_PD];
204 	uint8_t			pd_timeout;
205 };
206 
207 struct mfii_iop {
208 	u_int8_t ldio_req_type;
209 	u_int8_t ldio_ctx_type_nseg;
210 	u_int8_t ldio_ctx_reg_lock_flags;
211 	u_int8_t sge_flag_chain;
212 	u_int8_t sge_flag_eol;
213 };
214 
215 struct mfii_softc {
216 	struct device		sc_dev;
217 	const struct mfii_iop	*sc_iop;
218 
219 	pci_chipset_tag_t	sc_pc;
220 	pcitag_t		sc_tag;
221 
222 	bus_space_tag_t		sc_iot;
223 	bus_space_handle_t	sc_ioh;
224 	bus_size_t		sc_ios;
225 	bus_dma_tag_t		sc_dmat;
226 
227 	void			*sc_ih;
228 
229 	struct mutex		sc_ccb_mtx;
230 	struct mutex		sc_post_mtx;
231 
232 	u_int			sc_max_cmds;
233 	u_int			sc_max_sgl;
234 
235 	u_int			sc_reply_postq_depth;
236 	u_int			sc_reply_postq_index;
237 	struct mutex		sc_reply_postq_mtx;
238 	struct mfii_dmamem	*sc_reply_postq;
239 
240 	struct mfii_dmamem	*sc_requests;
241 	struct mfii_dmamem	*sc_sense;
242 	struct mfii_dmamem	*sc_sgl;
243 
244 	struct mfii_ccb		*sc_ccb;
245 	struct mfii_ccb_list	sc_ccb_freeq;
246 
247 	struct scsi_link	sc_link;
248 	struct scsibus_softc	*sc_scsibus;
249 	struct mfii_pd_softc	*sc_pd;
250 	struct scsi_iopool	sc_iopool;
251 
252 	struct mfi_ctrl_info	sc_info;
253 };
254 
255 int		mfii_match(struct device *, void *, void *);
256 void		mfii_attach(struct device *, struct device *, void *);
257 int		mfii_detach(struct device *, int);
258 
259 struct cfattach mfii_ca = {
260 	sizeof(struct mfii_softc),
261 	mfii_match,
262 	mfii_attach,
263 	mfii_detach
264 };
265 
266 struct cfdriver mfii_cd = {
267 	NULL,
268 	"mfii",
269 	DV_DULL
270 };
271 
272 void		mfii_scsi_cmd(struct scsi_xfer *);
273 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
274 
275 struct scsi_adapter mfii_switch = {
276 	mfii_scsi_cmd,
277 	scsi_minphys,
278 	NULL, /* probe */
279 	NULL, /* unprobe */
280 	NULL  /* ioctl */
281 };
282 
283 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
284 int		mfii_pd_scsi_probe(struct scsi_link *);
285 
286 struct scsi_adapter mfii_pd_switch = {
287 	mfii_pd_scsi_cmd,
288 	scsi_minphys,
289 	mfii_pd_scsi_probe
290 };
291 
292 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
293 
294 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
295 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
296 
297 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
298 void			mfii_dmamem_free(struct mfii_softc *,
299 			    struct mfii_dmamem *);
300 
301 void *			mfii_get_ccb(void *);
302 void			mfii_put_ccb(void *, void *);
303 int			mfii_init_ccb(struct mfii_softc *);
304 void			mfii_scrub_ccb(struct mfii_ccb *);
305 
306 int			mfii_transition_firmware(struct mfii_softc *);
307 int			mfii_initialise_firmware(struct mfii_softc *);
308 int			mfii_get_info(struct mfii_softc *);
309 int			mfii_syspd(struct mfii_softc *);
310 
311 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
312 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
313 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
314 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
315 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
316 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
317 int			mfii_my_intr(struct mfii_softc *);
318 int			mfii_intr(void *);
319 void			mfii_postq(struct mfii_softc *);
320 
321 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
322 			    void *, int);
323 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
324 			    void *, int);
325 
326 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
327 
328 int			mfii_mgmt(struct mfii_softc *, struct mfii_ccb *,
329 			    u_int32_t, u_int8_t *, void *, size_t, int);
330 
331 int			mfii_scsi_cmd_io(struct mfii_softc *,
332 			    struct scsi_xfer *);
333 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
334 			    struct scsi_xfer *);
335 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
336 			    struct scsi_xfer *);
337 
338 
339 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
340 
341 const struct mfii_iop mfii_iop_thunderbolt = {
342 	MFII_REQ_TYPE_LDIO,
343 	0,
344 	0,
345 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
346 	0
347 };
348 
349 /*
350  * a lot of these values depend on us not implementing fastpath yet.
351  */
352 const struct mfii_iop mfii_iop_25 = {
353 	MFII_REQ_TYPE_NO_LOCK,
354 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
355 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
356 	MFII_SGE_CHAIN_ELEMENT,
357 	MFII_SGE_END_OF_LIST
358 };
359 
360 struct mfii_device {
361 	pcireg_t		mpd_vendor;
362 	pcireg_t		mpd_product;
363 	const struct mfii_iop	*mpd_iop;
364 };
365 
366 const struct mfii_device mfii_devices[] = {
367 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
368 	    &mfii_iop_thunderbolt },
369 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
370 	    &mfii_iop_25 },
371 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
372 	    &mfii_iop_25 }
373 };
374 
375 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
376 
377 const struct mfii_iop *
378 mfii_find_iop(struct pci_attach_args *pa)
379 {
380 	const struct mfii_device *mpd;
381 	int i;
382 
383 	for (i = 0; i < nitems(mfii_devices); i++) {
384 		mpd = &mfii_devices[i];
385 
386 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
387 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
388 			return (mpd->mpd_iop);
389 	}
390 
391 	return (NULL);
392 }
393 
394 int
395 mfii_match(struct device *parent, void *match, void *aux)
396 {
397 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
398 }
399 
400 void
401 mfii_attach(struct device *parent, struct device *self, void *aux)
402 {
403 	struct mfii_softc *sc = (struct mfii_softc *)self;
404 	struct pci_attach_args *pa = aux;
405 	pcireg_t memtype;
406 	pci_intr_handle_t ih;
407 	struct scsibus_attach_args saa;
408 	u_int32_t status;
409 
410 	/* init sc */
411 	sc->sc_iop = mfii_find_iop(aux);
412 	sc->sc_dmat = pa->pa_dmat;
413 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
414 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
415 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
416 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
417 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
418 
419 	/* wire up the bus shizz */
420 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MFII_BAR);
421 	if (pci_mapreg_map(pa, MFII_BAR, memtype, 0,
422 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
423 		printf(": unable to map registers\n");
424 		return;
425 	}
426 
427 	/* disable interrupts */
428 	mfii_write(sc, MFI_OMSK, 0xffffffff);
429 
430 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
431 		printf(": unable to map interrupt\n");
432 		goto pci_unmap;
433 	}
434 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
435 
436 	/* lets get started */
437 	if (mfii_transition_firmware(sc))
438 		goto pci_unmap;
439 
440 	status = mfii_fw_state(sc);
441 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
442 	sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
443 
444 	/* sense memory */
445 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
446 	if (sc->sc_sense == NULL) {
447 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
448 		goto pci_unmap;
449 	}
450 
451 	sc->sc_reply_postq_depth = roundup(sc->sc_max_cmds, 16);
452 
453 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
454 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
455 	if (sc->sc_reply_postq == NULL)
456 		goto free_sense;
457 
458 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
459 	    MFII_DMA_LEN(sc->sc_reply_postq));
460 
461 	sc->sc_requests = mfii_dmamem_alloc(sc,
462 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
463 	if (sc->sc_requests == NULL)
464 		goto free_reply_postq;
465 
466 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
467 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
468 	if (sc->sc_sgl == NULL)
469 		goto free_requests;
470 
471 	if (mfii_init_ccb(sc) != 0) {
472 		printf("%s: could not init ccb list\n", DEVNAME(sc));
473 		goto free_sgl;
474 	}
475 
476 	/* kickstart firmware with all addresses and pointers */
477 	if (mfii_initialise_firmware(sc) != 0) {
478 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
479 		goto free_sgl;
480 	}
481 
482 	if (mfii_get_info(sc) != 0) {
483 		printf("%s: could not retrieve controller information\n",
484 		    DEVNAME(sc));
485 		goto free_sgl;
486 	}
487 
488 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
489 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
490 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
491 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
492 	printf("\n");
493 
494 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
495 	    mfii_intr, sc, DEVNAME(sc));
496 	if (sc->sc_ih == NULL)
497 		goto free_sgl;
498 
499 	sc->sc_link.openings = sc->sc_max_cmds;
500 	sc->sc_link.adapter_softc = sc;
501 	sc->sc_link.adapter = &mfii_switch;
502 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
503 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
504 	sc->sc_link.pool = &sc->sc_iopool;
505 
506 	memset(&saa, 0, sizeof(saa));
507 	saa.saa_sc_link = &sc->sc_link;
508 
509 	config_found(&sc->sc_dev, &saa, scsiprint);
510 
511 	mfii_syspd(sc);
512 
513 	/* enable interrupts */
514 	mfii_write(sc, MFI_OSTS, 0xffffffff);
515 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
516 
517 	return;
518 free_sgl:
519 	mfii_dmamem_free(sc, sc->sc_sgl);
520 free_requests:
521 	mfii_dmamem_free(sc, sc->sc_requests);
522 free_reply_postq:
523 	mfii_dmamem_free(sc, sc->sc_reply_postq);
524 free_sense:
525 	mfii_dmamem_free(sc, sc->sc_sense);
526 pci_unmap:
527 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
528 }
529 
530 int
531 mfii_syspd(struct mfii_softc *sc)
532 {
533 	struct scsibus_attach_args saa;
534 	struct scsi_link *link;
535 	struct mfii_ld_map *lm;
536 	struct mfii_pd_link *pl;
537 	struct mfi_pd_list *pd;
538 	struct mfii_ccb *ccb;
539 	u_int npds, i;
540 	int rv;
541 
542 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
543 	if (sc->sc_pd == NULL)
544 		return (1);
545 
546 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
547 	if (lm == NULL)
548 		goto free_pdsc;
549 
550 	ccb = scsi_io_get(&sc->sc_iopool, 0);
551 	rv = mfii_mgmt(sc, ccb, MR_DCMD_LD_MAP_GET_INFO, NULL,
552 	    lm, sizeof(*lm), SCSI_DATA_IN|SCSI_NOSLEEP);
553 	scsi_io_put(&sc->sc_iopool, ccb);
554 	if (rv != 0)
555 		goto free_lm;
556 
557 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
558 
559 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
560 	if (pd == NULL)
561 		goto free_lm;
562 
563 	ccb = scsi_io_get(&sc->sc_iopool, 0);
564 	rv = mfii_mgmt(sc, ccb, MR_DCMD_PD_GET_LIST, NULL,
565 	    pd, sizeof(*pd), SCSI_DATA_IN|SCSI_NOSLEEP);
566 	scsi_io_put(&sc->sc_iopool, ccb);
567 	if (rv != 0)
568 		goto free_pd;
569 
570 	npds = letoh32(pd->mpl_no_pd);
571 	for (i = 0; i < npds; i++) {
572 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
573 		if (pl == NULL)
574 			goto free_pl;
575 
576 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
577 		pl->pd_handle = lm->mlm_dev_handle[i].mdh_cur_handle;
578 		sc->sc_pd->pd_links[i] = pl;
579 	}
580 
581 	free(pd, M_TEMP, 0);
582 	free(lm, M_TEMP, 0);
583 
584 	link = &sc->sc_pd->pd_link;
585 	link->adapter = &mfii_pd_switch;
586 	link->adapter_softc = sc;
587 	link->adapter_buswidth = MFI_MAX_PD;
588 	link->adapter_target = -1;
589 	link->openings = sc->sc_max_cmds - 1;
590 	link->pool = &sc->sc_iopool;
591 
592 	memset(&saa, 0, sizeof(saa));
593 	saa.saa_sc_link = link;
594 
595 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
596 	    config_found(&sc->sc_dev, &saa, scsiprint);
597 
598 	return (0);
599 free_pl:
600 	for (i = 0; i < npds; i++) {
601 		pl = sc->sc_pd->pd_links[i];
602 		if (pl == NULL)
603 			break;
604 
605 		free(pl, M_DEVBUF, 0);
606 	}
607 free_pd:
608 	free(pd, M_TEMP, 0);
609 free_lm:
610 	free(lm, M_TEMP, 0);
611 free_pdsc:
612 	free(sc->sc_pd, M_DEVBUF, 0);
613 	return (1);
614 }
615 
616 int
617 mfii_detach(struct device *self, int flags)
618 {
619 	struct mfii_softc *sc = (struct mfii_softc *)self;
620 
621 	if (sc->sc_ih == NULL)
622 		return (0);
623 
624 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
625 	mfii_dmamem_free(sc, sc->sc_sgl);
626 	mfii_dmamem_free(sc, sc->sc_requests);
627 	mfii_dmamem_free(sc, sc->sc_reply_postq);
628 	mfii_dmamem_free(sc, sc->sc_sense);
629 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
630 
631 	return (0);
632 }
633 
634 u_int32_t
635 mfii_read(struct mfii_softc *sc, bus_size_t r)
636 {
637 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
638 	    BUS_SPACE_BARRIER_READ);
639 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
640 }
641 
642 void
643 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
644 {
645 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
646 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
647 	    BUS_SPACE_BARRIER_WRITE);
648 }
649 
650 struct mfii_dmamem *
651 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
652 {
653 	struct mfii_dmamem *m;
654 	int nsegs;
655 
656 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
657 	if (m == NULL)
658 		return (NULL);
659 
660 	m->mdm_size = size;
661 
662 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
663 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
664 		goto mdmfree;
665 
666 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
667 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
668 		goto destroy;
669 
670 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
671 	    BUS_DMA_NOWAIT) != 0)
672 		goto free;
673 
674 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
675 	    BUS_DMA_NOWAIT) != 0)
676 		goto unmap;
677 
678 	return (m);
679 
680 unmap:
681 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
682 free:
683 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
684 destroy:
685 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
686 mdmfree:
687 	free(m, M_DEVBUF, 0);
688 
689 	return (NULL);
690 }
691 
692 void
693 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
694 {
695 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
696 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
697 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
698 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
699 	free(m, M_DEVBUF, 0);
700 }
701 
702 
703 
704 
705 int
706 mfii_transition_firmware(struct mfii_softc *sc)
707 {
708 	int32_t			fw_state, cur_state;
709 	int			max_wait, i;
710 
711 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
712 
713 	while (fw_state != MFI_STATE_READY) {
714 		cur_state = fw_state;
715 		switch (fw_state) {
716 		case MFI_STATE_FAULT:
717 			printf("%s: firmware fault\n", DEVNAME(sc));
718 			return (1);
719 		case MFI_STATE_WAIT_HANDSHAKE:
720 			mfii_write(sc, MFI_SKINNY_IDB,
721 			    MFI_INIT_CLEAR_HANDSHAKE);
722 			max_wait = 2;
723 			break;
724 		case MFI_STATE_OPERATIONAL:
725 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
726 			max_wait = 10;
727 			break;
728 		case MFI_STATE_UNDEFINED:
729 		case MFI_STATE_BB_INIT:
730 			max_wait = 2;
731 			break;
732 		case MFI_STATE_FW_INIT:
733 		case MFI_STATE_DEVICE_SCAN:
734 		case MFI_STATE_FLUSH_CACHE:
735 			max_wait = 20;
736 			break;
737 		default:
738 			printf("%s: unknown firmware state %d\n",
739 			    DEVNAME(sc), fw_state);
740 			return (1);
741 		}
742 		for (i = 0; i < (max_wait * 10); i++) {
743 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
744 			if (fw_state == cur_state)
745 				DELAY(100000);
746 			else
747 				break;
748 		}
749 		if (fw_state == cur_state) {
750 			printf("%s: firmware stuck in state %#x\n",
751 			    DEVNAME(sc), fw_state);
752 			return (1);
753 		}
754 	}
755 
756 	return (0);
757 }
758 
759 int
760 mfii_get_info(struct mfii_softc *sc)
761 {
762 	struct mfii_ccb *ccb;
763 	int rv;
764 
765 	ccb = scsi_io_get(&sc->sc_iopool, 0);
766 	rv = mfii_mgmt(sc, ccb, MR_DCMD_CTRL_GET_INFO, NULL,
767 	    &sc->sc_info, sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
768 	scsi_io_put(&sc->sc_iopool, ccb);
769 
770 	if (rv != 0)
771 		return (rv);
772 
773 #ifdef MFI_DEBUG
774 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
775 		printf("%s: active FW %s Version %s date %s time %s\n",
776 		    DEVNAME(sc),
777 		    sc->sc_info.mci_image_component[i].mic_name,
778 		    sc->sc_info.mci_image_component[i].mic_version,
779 		    sc->sc_info.mci_image_component[i].mic_build_date,
780 		    sc->sc_info.mci_image_component[i].mic_build_time);
781 	}
782 
783 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
784 		printf("%s: pending FW %s Version %s date %s time %s\n",
785 		    DEVNAME(sc),
786 		    sc->sc_info.mci_pending_image_component[i].mic_name,
787 		    sc->sc_info.mci_pending_image_component[i].mic_version,
788 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
789 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
790 	}
791 
792 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
793 	    DEVNAME(sc),
794 	    sc->sc_info.mci_max_arms,
795 	    sc->sc_info.mci_max_spans,
796 	    sc->sc_info.mci_max_arrays,
797 	    sc->sc_info.mci_max_lds,
798 	    sc->sc_info.mci_product_name);
799 
800 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
801 	    DEVNAME(sc),
802 	    sc->sc_info.mci_serial_number,
803 	    sc->sc_info.mci_hw_present,
804 	    sc->sc_info.mci_current_fw_time,
805 	    sc->sc_info.mci_max_cmds,
806 	    sc->sc_info.mci_max_sg_elements);
807 
808 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
809 	    DEVNAME(sc),
810 	    sc->sc_info.mci_max_request_size,
811 	    sc->sc_info.mci_lds_present,
812 	    sc->sc_info.mci_lds_degraded,
813 	    sc->sc_info.mci_lds_offline,
814 	    sc->sc_info.mci_pd_present);
815 
816 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
817 	    DEVNAME(sc),
818 	    sc->sc_info.mci_pd_disks_present,
819 	    sc->sc_info.mci_pd_disks_pred_failure,
820 	    sc->sc_info.mci_pd_disks_failed);
821 
822 	printf("%s: nvram %d mem %d flash %d\n",
823 	    DEVNAME(sc),
824 	    sc->sc_info.mci_nvram_size,
825 	    sc->sc_info.mci_memory_size,
826 	    sc->sc_info.mci_flash_size);
827 
828 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
829 	    DEVNAME(sc),
830 	    sc->sc_info.mci_ram_correctable_errors,
831 	    sc->sc_info.mci_ram_uncorrectable_errors,
832 	    sc->sc_info.mci_cluster_allowed,
833 	    sc->sc_info.mci_cluster_active);
834 
835 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
836 	    DEVNAME(sc),
837 	    sc->sc_info.mci_max_strips_per_io,
838 	    sc->sc_info.mci_raid_levels,
839 	    sc->sc_info.mci_adapter_ops,
840 	    sc->sc_info.mci_ld_ops);
841 
842 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
843 	    DEVNAME(sc),
844 	    sc->sc_info.mci_stripe_sz_ops.min,
845 	    sc->sc_info.mci_stripe_sz_ops.max,
846 	    sc->sc_info.mci_pd_ops,
847 	    sc->sc_info.mci_pd_mix_support);
848 
849 	printf("%s: ecc_bucket %d pckg_prop %s\n",
850 	    DEVNAME(sc),
851 	    sc->sc_info.mci_ecc_bucket_count,
852 	    sc->sc_info.mci_package_version);
853 
854 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
855 	    DEVNAME(sc),
856 	    sc->sc_info.mci_properties.mcp_seq_num,
857 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
858 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
859 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
860 
861 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
862 	    DEVNAME(sc),
863 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
864 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
865 	    sc->sc_info.mci_properties.mcp_bgi_rate,
866 	    sc->sc_info.mci_properties.mcp_cc_rate);
867 
868 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
869 	    DEVNAME(sc),
870 	    sc->sc_info.mci_properties.mcp_recon_rate,
871 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
872 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
873 	    sc->sc_info.mci_properties.mcp_spinup_delay,
874 	    sc->sc_info.mci_properties.mcp_cluster_enable);
875 
876 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
877 	    DEVNAME(sc),
878 	    sc->sc_info.mci_properties.mcp_coercion_mode,
879 	    sc->sc_info.mci_properties.mcp_alarm_enable,
880 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
881 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
882 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
883 
884 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
885 	    DEVNAME(sc),
886 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
887 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
888 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
889 
890 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
891 	    DEVNAME(sc),
892 	    sc->sc_info.mci_pci.mip_vendor,
893 	    sc->sc_info.mci_pci.mip_device,
894 	    sc->sc_info.mci_pci.mip_subvendor,
895 	    sc->sc_info.mci_pci.mip_subdevice);
896 
897 	printf("%s: type %#x port_count %d port_addr ",
898 	    DEVNAME(sc),
899 	    sc->sc_info.mci_host.mih_type,
900 	    sc->sc_info.mci_host.mih_port_count);
901 
902 	for (i = 0; i < 8; i++)
903 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
904 	printf("\n");
905 
906 	printf("%s: type %.x port_count %d port_addr ",
907 	    DEVNAME(sc),
908 	    sc->sc_info.mci_device.mid_type,
909 	    sc->sc_info.mci_device.mid_port_count);
910 
911 	for (i = 0; i < 8; i++)
912 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
913 	printf("\n");
914 #endif /* MFI_DEBUG */
915 
916 	return (0);
917 }
918 
919 int
920 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
921 {
922 	struct mfi_frame_header	*hdr = ccb->ccb_request;
923 	u_int64_t r;
924 	int to = 0, rv = 0;
925 
926 #ifdef DIAGNOSTIC
927 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
928 		panic("mfii_mfa_poll called with cookie or done set");
929 #endif
930 
931 	hdr->mfh_context = ccb->ccb_smid;
932 	hdr->mfh_cmd_status = 0xff;
933 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
934 
935 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
936 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
937 
938 	mfii_start(sc, ccb);
939 
940 	for (;;) {
941 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
942 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
943 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
944 
945 		if (hdr->mfh_cmd_status != 0xff)
946 			break;
947 
948 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
949 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
950 			    ccb->ccb_smid);
951 			ccb->ccb_flags |= MFI_CCB_F_ERR;
952 			rv = 1;
953 			break;
954 		}
955 
956 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
957 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
958 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
959 
960 		delay(1000);
961 	}
962 
963 	if (ccb->ccb_len > 0) {
964 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
965 		    0, ccb->ccb_dmamap->dm_mapsize,
966 		    (ccb->ccb_direction == MFII_DATA_IN) ?
967 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
968 
969 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
970 	}
971 
972 	return (rv);
973 }
974 
975 int
976 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
977 {
978 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
979 	void *cookie;
980 	int rv = 1;
981 
982 	done = ccb->ccb_done;
983 	cookie = ccb->ccb_cookie;
984 
985 	ccb->ccb_done = mfii_poll_done;
986 	ccb->ccb_cookie = &rv;
987 
988 	mfii_start(sc, ccb);
989 
990 	do {
991 		delay(10);
992 		mfii_postq(sc);
993 	} while (rv == 1);
994 
995 	ccb->ccb_cookie = cookie;
996 	done(sc, ccb);
997 
998 	return (0);
999 }
1000 
1001 void
1002 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1003 {
1004 	int *rv = ccb->ccb_cookie;
1005 
1006 	*rv = 0;
1007 }
1008 
1009 int
1010 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1011 {
1012 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1013 
1014 #ifdef DIAGNOSTIC
1015 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1016 		panic("mfii_exec called with cookie or done set");
1017 #endif
1018 
1019 	ccb->ccb_cookie = &m;
1020 	ccb->ccb_done = mfii_exec_done;
1021 
1022 	mtx_enter(&m);
1023 	while (ccb->ccb_cookie != NULL)
1024 		msleep(ccb, &m, PRIBIO, "mfiiexec", 0);
1025 	mtx_leave(&m);
1026 
1027 	return (0);
1028 }
1029 
1030 void
1031 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1032 {
1033 	struct mutex *m = ccb->ccb_cookie;
1034 
1035 	mtx_enter(m);
1036 	ccb->ccb_cookie = NULL;
1037 	wakeup_one(ccb);
1038 	mtx_leave(m);
1039 }
1040 
1041 int
1042 mfii_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb,
1043     u_int32_t opc, u_int8_t *mbox, void *buf, size_t len, int flags)
1044 {
1045 	struct mfi_dcmd_frame *dcmd = ccb->ccb_request;
1046 	struct mfi_frame_header	*hdr = &dcmd->mdf_header;
1047 	u_int64_t r;
1048 	u_int8_t *dma_buf;
1049 	int rv = EIO;
1050 
1051 	dma_buf = dma_alloc(len, PR_WAITOK);
1052 	if (dma_buf == NULL)
1053 		return (ENOMEM);
1054 
1055 	mfii_scrub_ccb(ccb);
1056 	ccb->ccb_data = dma_buf;
1057 	ccb->ccb_len = len;
1058 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1059 	case SCSI_DATA_IN:
1060 		ccb->ccb_direction = MFII_DATA_IN;
1061 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1062 		break;
1063 	case SCSI_DATA_OUT:
1064 		ccb->ccb_direction = MFII_DATA_OUT;
1065 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1066 		memcpy(dma_buf, buf, len);
1067 		break;
1068 	}
1069 
1070 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1071 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1072 		rv = ENOMEM;
1073 		goto done;
1074 	}
1075 
1076 	hdr->mfh_cmd = MFI_CMD_DCMD;
1077 	hdr->mfh_context = ccb->ccb_smid;
1078 	hdr->mfh_data_len = htole32(len);
1079 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1080 
1081 	dcmd->mdf_opcode = opc;
1082 	/* handle special opcodes */
1083 	if (mbox != NULL)
1084 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1085 
1086 	if (ISSET(flags, SCSI_NOSLEEP))
1087 		mfii_mfa_poll(sc, ccb);
1088 	else {
1089 		r = MFII_REQ_MFA(ccb->ccb_request_dva);
1090 		memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1091 		mfii_exec(sc, ccb);
1092 	}
1093 
1094 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1095 		rv = 0;
1096 
1097 		if (ccb->ccb_direction == MFII_DATA_IN)
1098 			memcpy(buf, dma_buf, len);
1099 	}
1100 
1101 done:
1102 	dma_free(dma_buf, len);
1103 
1104 	return (rv);
1105 }
1106 
1107 int
1108 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1109     void *sglp, int nosleep)
1110 {
1111 	union mfi_sgl *sgl = sglp;
1112 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1113 	int error;
1114 	int i;
1115 
1116 	if (ccb->ccb_len == 0)
1117 		return (0);
1118 
1119 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1120 	    ccb->ccb_data, ccb->ccb_len, NULL,
1121 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1122 	if (error) {
1123 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1124 		return (1);
1125 	}
1126 
1127 	for (i = 0; i < dmap->dm_nsegs; i++) {
1128 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1129 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1130 	}
1131 
1132 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1133 	    ccb->ccb_direction == MFII_DATA_OUT ?
1134 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1135 
1136 	return (0);
1137 }
1138 
1139 void
1140 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1141 {
1142 	u_long *r = (u_long *)&ccb->ccb_req;
1143 
1144 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1145 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1146 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1147 
1148 #if defined(__LP64__)
1149         bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1150 #else
1151 	mtx_enter(&sc->sc_post_mtx);
1152 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1153 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1154 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1155 
1156 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1157 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1158 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1159 	mtx_leave(&sc->sc_post_mtx);
1160 #endif
1161 }
1162 
1163 void
1164 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1165 {
1166 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1167 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1168 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1169 
1170 	if (ccb->ccb_sgl_len > 0) {
1171 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1172 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1173 		    BUS_DMASYNC_POSTWRITE);
1174 	}
1175 
1176 	if (ccb->ccb_len > 0) {
1177 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1178 		    0, ccb->ccb_dmamap->dm_mapsize,
1179 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1180 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1181 
1182 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1183 	}
1184 
1185 	ccb->ccb_done(sc, ccb);
1186 }
1187 
1188 int
1189 mfii_initialise_firmware(struct mfii_softc *sc)
1190 {
1191 	struct mpii_msg_iocinit_request *iiq;
1192 	struct mfii_dmamem *m;
1193 	struct mfii_ccb *ccb;
1194 	struct mfi_init_frame *init;
1195 	int rv;
1196 
1197 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1198 	if (m == NULL)
1199 		return (1);
1200 
1201 	iiq = MFII_DMA_KVA(m);
1202 	memset(iiq, 0, sizeof(*iiq));
1203 
1204 	iiq->function = MPII_FUNCTION_IOC_INIT;
1205 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1206 
1207 	iiq->msg_version_maj = 0x02;
1208 	iiq->msg_version_min = 0x00;
1209 	iiq->hdr_version_unit = 0x10;
1210 	iiq->hdr_version_dev = 0x0;
1211 
1212 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1213 
1214 	iiq->reply_descriptor_post_queue_depth =
1215 	    htole16(sc->sc_reply_postq_depth);
1216 	iiq->reply_free_queue_depth = htole16(0);
1217 
1218 	htolem32(&iiq->sense_buffer_address_high,
1219 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1220 
1221 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1222 	    MFII_DMA_DVA(sc->sc_reply_postq));
1223 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1224 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1225 
1226 	htolem32(&iiq->system_request_frame_base_address_lo,
1227 	    MFII_DMA_DVA(sc->sc_requests));
1228 	htolem32(&iiq->system_request_frame_base_address_hi,
1229 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1230 
1231 	iiq->timestamp = htole64(time_uptime);
1232 
1233 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1234 	mfii_scrub_ccb(ccb);
1235 	init = ccb->ccb_request;
1236 
1237 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1238 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1239 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1240 
1241 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1242 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1243 	    BUS_DMASYNC_PREREAD);
1244 
1245 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1246 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1247 
1248 	rv = mfii_mfa_poll(sc, ccb);
1249 
1250 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1251 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1252 
1253 	scsi_io_put(&sc->sc_iopool, ccb);
1254 	mfii_dmamem_free(sc, m);
1255 
1256 	return (rv);
1257 }
1258 
1259 int
1260 mfii_my_intr(struct mfii_softc *sc)
1261 {
1262 	u_int32_t status;
1263 
1264 	status = mfii_read(sc, MFI_OSTS);
1265 	if (ISSET(status, 0x1)) {
1266 		mfii_write(sc, MFI_OSTS, status);
1267 		return (1);
1268 	}
1269 
1270 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1271 }
1272 
1273 int
1274 mfii_intr(void *arg)
1275 {
1276 	struct mfii_softc *sc = arg;
1277 
1278 	if (!mfii_my_intr(sc))
1279 		return (0);
1280 
1281 	mfii_postq(sc);
1282 
1283 	return (1);
1284 }
1285 
1286 void
1287 mfii_postq(struct mfii_softc *sc)
1288 {
1289 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
1290 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
1291 	struct mpii_reply_descr *rdp;
1292 	struct mfii_ccb *ccb;
1293 	int rpi = 0;
1294 
1295 	mtx_enter(&sc->sc_reply_postq_mtx);
1296 
1297 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1298 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1299 	    BUS_DMASYNC_POSTREAD);
1300 
1301 	for (;;) {
1302 		rdp = &postq[sc->sc_reply_postq_index];
1303 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
1304 		    MPII_REPLY_DESCR_UNUSED)
1305 			break;
1306 		if (rdp->data == 0xffffffff) {
1307 			/*
1308 			 * ioc is still writing to the reply post queue
1309 			 * race condition - bail!
1310 			 */
1311 			break;
1312 		}
1313 
1314 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
1315 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
1316 		memset(rdp, 0xff, sizeof(*rdp));
1317 
1318 		sc->sc_reply_postq_index++;
1319 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
1320 		rpi = 1;
1321 	}
1322 
1323 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1324 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1325 	    BUS_DMASYNC_PREREAD);
1326 
1327 	if (rpi)
1328 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
1329 
1330 	mtx_leave(&sc->sc_reply_postq_mtx);
1331 
1332 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
1333 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
1334 		mfii_done(sc, ccb);
1335 	}
1336 }
1337 
1338 void
1339 mfii_scsi_cmd(struct scsi_xfer *xs)
1340 {
1341 	struct scsi_link *link = xs->sc_link;
1342 	struct mfii_softc *sc = link->adapter_softc;
1343 	struct mfii_ccb *ccb = xs->io;
1344 
1345 	mfii_scrub_ccb(ccb);
1346 	ccb->ccb_cookie = xs;
1347 	ccb->ccb_done = mfii_scsi_cmd_done;
1348 	ccb->ccb_data = xs->data;
1349 	ccb->ccb_len = xs->datalen;
1350 
1351 	switch (xs->cmd->opcode) {
1352 	case READ_COMMAND:
1353 	case READ_BIG:
1354 	case READ_12:
1355 	case READ_16:
1356 	case WRITE_COMMAND:
1357 	case WRITE_BIG:
1358 	case WRITE_12:
1359 	case WRITE_16:
1360 		if (mfii_scsi_cmd_io(sc, xs) != 0)
1361 			goto stuffup;
1362 
1363 		break;
1364 
1365 	default:
1366 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
1367 			goto stuffup;
1368 		break;
1369 	}
1370 
1371 	xs->error = XS_NOERROR;
1372 	xs->resid = 0;
1373 
1374 	if (ISSET(xs->flags, SCSI_POLL)) {
1375 		if (mfii_poll(sc, ccb) != 0)
1376 			goto stuffup;
1377 		return;
1378 	}
1379 
1380 	mfii_start(sc, ccb);
1381 	return;
1382 
1383 stuffup:
1384 	xs->error = XS_DRIVER_STUFFUP;
1385 	scsi_done(xs);
1386 }
1387 
1388 void
1389 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1390 {
1391 	struct scsi_xfer *xs = ccb->ccb_cookie;
1392 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1393 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1394 
1395 	switch (ctx->status) {
1396 	case MFI_STAT_OK:
1397 		break;
1398 
1399 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1400 		xs->error = XS_SENSE;
1401 		memset(&xs->sense, 0, sizeof(xs->sense));
1402 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1403 		break;
1404 
1405 	case MFI_STAT_LD_OFFLINE:
1406 	case MFI_STAT_DEVICE_NOT_FOUND:
1407 		xs->error = XS_SELTIMEOUT;
1408 		break;
1409 
1410 	default:
1411 		xs->error = XS_DRIVER_STUFFUP;
1412 		break;
1413 	}
1414 
1415 	scsi_done(xs);
1416 }
1417 
1418 int
1419 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
1420 {
1421 	struct scsi_link *link = xs->sc_link;
1422 	struct mfii_ccb *ccb = xs->io;
1423 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1424 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1425 
1426 	io->dev_handle = htole16(link->target);
1427 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1428 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1429 	io->sgl_flags = htole16(0x02); /* XXX */
1430 	io->sense_buffer_length = sizeof(xs->sense);
1431 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1432 	io->data_length = htole32(xs->datalen);
1433 	io->io_flags = htole16(xs->cmdlen);
1434 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1435 	case SCSI_DATA_IN:
1436 		ccb->ccb_direction = MFII_DATA_IN;
1437 		io->direction = MPII_SCSIIO_DIR_READ;
1438 		break;
1439 	case SCSI_DATA_OUT:
1440 		ccb->ccb_direction = MFII_DATA_OUT;
1441 		io->direction = MPII_SCSIIO_DIR_WRITE;
1442 		break;
1443 	default:
1444 		ccb->ccb_direction = MFII_DATA_NONE;
1445 		io->direction = MPII_SCSIIO_DIR_NONE;
1446 		break;
1447 	}
1448 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1449 
1450 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
1451 	ctx->timeout_value = htole16(0x14); /* XXX */
1452 	ctx->reg_lock_flags = sc->sc_iop->ldio_ctx_reg_lock_flags;
1453 	ctx->virtual_disk_target_id = htole16(link->target);
1454 
1455 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1456 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1457 		return (1);
1458 
1459 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1460 
1461 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
1462 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1463 
1464 	return (0);
1465 }
1466 
1467 int
1468 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1469 {
1470 	struct scsi_link *link = xs->sc_link;
1471 	struct mfii_ccb *ccb = xs->io;
1472 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1473 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1474 
1475 	io->dev_handle = htole16(link->target);
1476 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1477 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1478 	io->sgl_flags = htole16(0x02); /* XXX */
1479 	io->sense_buffer_length = sizeof(xs->sense);
1480 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1481 	io->data_length = htole32(xs->datalen);
1482 	io->io_flags = htole16(xs->cmdlen);
1483 	io->lun[0] = htobe16(link->lun);
1484 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1485 	case SCSI_DATA_IN:
1486 		ccb->ccb_direction = MFII_DATA_IN;
1487 		io->direction = MPII_SCSIIO_DIR_READ;
1488 		break;
1489 	case SCSI_DATA_OUT:
1490 		ccb->ccb_direction = MFII_DATA_OUT;
1491 		io->direction = MPII_SCSIIO_DIR_WRITE;
1492 		break;
1493 	default:
1494 		ccb->ccb_direction = MFII_DATA_NONE;
1495 		io->direction = MPII_SCSIIO_DIR_NONE;
1496 		break;
1497 	}
1498 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1499 
1500 	ctx->virtual_disk_target_id = htole16(link->target);
1501 
1502 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1503 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1504 		return (1);
1505 
1506 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1507 
1508 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1509 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1510 
1511 	return (0);
1512 }
1513 
1514 void
1515 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
1516 {
1517 	struct scsi_link *link = xs->sc_link;
1518 	struct mfii_softc *sc = link->adapter_softc;
1519 	struct mfii_ccb *ccb = xs->io;
1520 
1521 	mfii_scrub_ccb(ccb);
1522 	ccb->ccb_cookie = xs;
1523 	ccb->ccb_done = mfii_scsi_cmd_done;
1524 	ccb->ccb_data = xs->data;
1525 	ccb->ccb_len = xs->datalen;
1526 
1527 	if (mfii_pd_scsi_cmd_cdb(sc, xs) != 0)
1528 		goto stuffup;
1529 
1530 	xs->error = XS_NOERROR;
1531 	xs->resid = 0;
1532 
1533 	if (ISSET(xs->flags, SCSI_POLL)) {
1534 		if (mfii_poll(sc, ccb) != 0)
1535 			goto stuffup;
1536 		return;
1537 	}
1538 
1539 	mfii_start(sc, ccb);
1540 	return;
1541 
1542 stuffup:
1543 	xs->error = XS_DRIVER_STUFFUP;
1544 	scsi_done(xs);
1545 }
1546 
1547 int
1548 mfii_pd_scsi_probe(struct scsi_link *link)
1549 {
1550 	struct mfii_ccb *ccb;
1551 	uint8_t mbox[MFI_MBOX_SIZE];
1552 	struct mfii_softc *sc = link->adapter_softc;
1553 	struct mfii_pd_link *pl = sc->sc_pd->pd_links[link->target];
1554 	int rv;
1555 
1556 	if (link->lun > 0)
1557 		return (0);
1558 
1559 	if (pl == NULL)
1560 		return (ENXIO);
1561 
1562 	memset(mbox, 0, sizeof(mbox));
1563 	memcpy(&mbox[0], &pl->pd_id, sizeof(pl->pd_id));
1564 
1565 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1566 	rv = mfii_mgmt(sc, ccb, MR_DCMD_PD_GET_INFO, mbox, &pl->pd_info,
1567 	    sizeof(pl->pd_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1568 	scsi_io_put(&sc->sc_iopool, ccb);
1569 	if (rv != 0)
1570 		return (EIO);
1571 
1572 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
1573 		return (ENXIO);
1574 
1575 	return (0);
1576 }
1577 
1578 int
1579 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1580 {
1581 	struct scsi_link *link = xs->sc_link;
1582 	struct mfii_ccb *ccb = xs->io;
1583 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1584 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1585 
1586 	io->dev_handle = sc->sc_pd->pd_links[link->target]->pd_handle;
1587 	io->function = 0;
1588 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1589 	io->sgl_flags = htole16(0x02); /* XXX */
1590 	io->sense_buffer_length = sizeof(xs->sense);
1591 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1592 	io->data_length = htole32(xs->datalen);
1593 	io->io_flags = htole16(xs->cmdlen);
1594 	io->lun[0] = htobe16(link->lun);
1595 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1596 	case SCSI_DATA_IN:
1597 		ccb->ccb_direction = MFII_DATA_IN;
1598 		io->direction = MPII_SCSIIO_DIR_READ;
1599 		break;
1600 	case SCSI_DATA_OUT:
1601 		ccb->ccb_direction = MFII_DATA_OUT;
1602 		io->direction = MPII_SCSIIO_DIR_WRITE;
1603 		break;
1604 	default:
1605 		ccb->ccb_direction = MFII_DATA_NONE;
1606 		io->direction = MPII_SCSIIO_DIR_NONE;
1607 		break;
1608 	}
1609 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1610 
1611 	ctx->virtual_disk_target_id = htole16(link->target);
1612 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
1613 	ctx->timeout_value = sc->sc_pd->pd_timeout;
1614 
1615 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1616 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1617 		return (1);
1618 
1619 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1620 
1621 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
1622 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1623 	ccb->ccb_req.dev_handle = sc->sc_pd->pd_links[link->target]->pd_handle;
1624 
1625 	return (0);
1626 }
1627 
1628 int
1629 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
1630     int nosleep)
1631 {
1632 	struct mpii_msg_request *req = ccb->ccb_request;
1633 	struct mfii_sge *sge = NULL, *nsge = sglp;
1634 	struct mfii_sge *ce = NULL;
1635 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1636 	u_int space;
1637 	int i;
1638 
1639 	int error;
1640 
1641 	if (ccb->ccb_len == 0)
1642 		return (0);
1643 
1644 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1645 	    ccb->ccb_data, ccb->ccb_len, NULL,
1646 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1647 	if (error) {
1648 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1649 		return (1);
1650 	}
1651 
1652 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
1653 	    sizeof(*nsge);
1654 	if (dmap->dm_nsegs > space) {
1655 		space--;
1656 
1657 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
1658 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
1659 
1660 		ce = nsge + space;
1661 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
1662 		ce->sg_len = htole32(ccb->ccb_sgl_len);
1663 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
1664 
1665 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
1666 	}
1667 
1668 	for (i = 0; i < dmap->dm_nsegs; i++) {
1669 		if (nsge == ce)
1670 			nsge = ccb->ccb_sgl;
1671 
1672 		sge = nsge;
1673 
1674 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
1675 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
1676 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
1677 
1678 		nsge = sge + 1;
1679 	}
1680 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
1681 
1682 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1683 	    ccb->ccb_direction == MFII_DATA_OUT ?
1684 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1685 
1686 	if (ccb->ccb_sgl_len > 0) {
1687 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1688 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1689 		    BUS_DMASYNC_PREWRITE);
1690 	}
1691 
1692 	return (0);
1693 }
1694 
1695 void *
1696 mfii_get_ccb(void *cookie)
1697 {
1698 	struct mfii_softc *sc = cookie;
1699 	struct mfii_ccb *ccb;
1700 
1701 	mtx_enter(&sc->sc_ccb_mtx);
1702 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
1703 	if (ccb != NULL)
1704 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
1705 	mtx_leave(&sc->sc_ccb_mtx);
1706 
1707 	return (ccb);
1708 }
1709 
1710 void
1711 mfii_scrub_ccb(struct mfii_ccb *ccb)
1712 {
1713 	ccb->ccb_cookie = NULL;
1714 	ccb->ccb_done = NULL;
1715 	ccb->ccb_flags = 0;
1716 	ccb->ccb_data = NULL;
1717 	ccb->ccb_direction = 0;
1718 	ccb->ccb_len = 0;
1719 	ccb->ccb_sgl_len = 0;
1720 
1721 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
1722 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
1723 }
1724 
1725 void
1726 mfii_put_ccb(void *cookie, void *io)
1727 {
1728 	struct mfii_softc *sc = cookie;
1729 	struct mfii_ccb *ccb = io;
1730 
1731 	mtx_enter(&sc->sc_ccb_mtx);
1732 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
1733 	mtx_leave(&sc->sc_ccb_mtx);
1734 }
1735 
1736 int
1737 mfii_init_ccb(struct mfii_softc *sc)
1738 {
1739 	struct mfii_ccb *ccb;
1740 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
1741 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
1742 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
1743 	u_int i;
1744 	int error;
1745 
1746 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
1747 	    M_DEVBUF, M_WAITOK|M_ZERO);
1748 
1749 	for (i = 0; i < sc->sc_max_cmds; i++) {
1750 		ccb = &sc->sc_ccb[i];
1751 
1752 		/* create a dma map for transfer */
1753 		error = bus_dmamap_create(sc->sc_dmat,
1754 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
1755 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
1756 		if (error) {
1757 			printf("%s: cannot create ccb dmamap (%d)\n",
1758 			    DEVNAME(sc), error);
1759 			goto destroy;
1760 		}
1761 
1762 		/* select i + 1'th request. 0 is reserved for events */
1763 		ccb->ccb_smid = i + 1;
1764 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
1765 		ccb->ccb_request = request + ccb->ccb_request_offset;
1766 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
1767 		    ccb->ccb_request_offset;
1768 
1769 		/* select i'th sense */
1770 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
1771 		ccb->ccb_sense = (struct mfi_sense *)(sense +
1772 		    ccb->ccb_sense_offset);
1773 		ccb->ccb_sense_dva = (u_int32_t)(MFII_DMA_DVA(sc->sc_sense) +
1774 		    ccb->ccb_sense_offset);
1775 
1776 		/* select i'th sgl */
1777 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
1778 		    sc->sc_max_sgl * i;
1779 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
1780 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
1781 		    ccb->ccb_sgl_offset;
1782 
1783 		/* add ccb to queue */
1784 		mfii_put_ccb(sc, ccb);
1785 	}
1786 
1787 	return (0);
1788 
1789 destroy:
1790 	/* free dma maps and ccb memory */
1791 	while ((ccb = mfii_get_ccb(sc)) != NULL)
1792 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1793 
1794 	free(sc->sc_ccb, M_DEVBUF, 0);
1795 
1796 	return (1);
1797 }
1798 
1799