xref: /openbsd-src/sys/dev/pci/mfii.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: mfii.c,v 1.17 2014/07/13 23:10:23 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/kernel.h>
24 #include <sys/malloc.h>
25 #include <sys/device.h>
26 #include <sys/types.h>
27 #include <sys/pool.h>
28 
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcivar.h>
31 
32 #include <machine/bus.h>
33 
34 #include <scsi/scsi_all.h>
35 #include <scsi/scsi_disk.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/ic/mfireg.h>
39 #include <dev/pci/mpiireg.h>
40 
41 #define	MFII_BAR		0x14
42 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
43 
44 #define MFII_OSTS_INTR_VALID	0x00000009
45 #define MFII_RPI		0x6c /* reply post host index */
46 
47 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
48 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
49 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
50 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
51 
52 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
53 
54 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
55 
56 struct mfii_request_descr {
57 	u_int8_t	flags;
58 	u_int8_t	msix_index;
59 	u_int16_t	smid;
60 
61 	u_int16_t	lmid;
62 	u_int16_t	field;
63 } __packed;
64 
65 struct mfii_raid_context {
66 	u_int8_t	type_nseg;
67 	u_int8_t	_reserved1;
68 	u_int16_t	timeout_value;
69 
70 	u_int8_t	reg_lock_flags;
71 	u_int8_t	_reserved2;
72 	u_int16_t	virtual_disk_target_id;
73 
74 	u_int64_t	reg_lock_row_lba;
75 
76 	u_int32_t	reg_lock_length;
77 
78 	u_int16_t	next_lm_id;
79 	u_int8_t	ex_status;
80 	u_int8_t	status;
81 
82 	u_int8_t	raid_flags;
83 	u_int8_t	num_sge;
84 	u_int16_t	config_seq_num;
85 
86 	u_int8_t	span_arm;
87 	u_int8_t	_reserved3[3];
88 } __packed;
89 
90 struct mfii_sge {
91 	u_int64_t	sg_addr;
92 	u_int32_t	sg_len;
93 	u_int16_t	_reserved;
94 	u_int8_t	sg_next_chain_offset;
95 	u_int8_t	sg_flags;
96 } __packed;
97 
98 #define MFII_SGE_ADDR_MASK		(0x03)
99 #define MFII_SGE_ADDR_SYSTEM		(0x00)
100 #define MFII_SGE_ADDR_IOCDDR		(0x01)
101 #define MFII_SGE_ADDR_IOCPLB		(0x02)
102 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
103 #define MFII_SGE_END_OF_LIST		(0x40)
104 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
105 
106 #define MFII_REQUEST_SIZE	256
107 
108 struct mfii_dmamem {
109 	bus_dmamap_t		mdm_map;
110 	bus_dma_segment_t	mdm_seg;
111 	size_t			mdm_size;
112 	caddr_t			mdm_kva;
113 };
114 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
115 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
116 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
117 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
118 
119 struct mfii_softc;
120 
121 struct mfii_ccb {
122 	void			*ccb_request;
123 	u_int64_t		ccb_request_dva;
124 	bus_addr_t		ccb_request_offset;
125 
126 	struct mfi_sense	*ccb_sense;
127 	u_int32_t		ccb_sense_dva;
128 	bus_addr_t		ccb_sense_offset;
129 
130 	struct mfii_sge		*ccb_sgl;
131 	u_int64_t		ccb_sgl_dva;
132 	bus_addr_t		ccb_sgl_offset;
133 	u_int			ccb_sgl_len;
134 
135 	struct mfii_request_descr ccb_req;
136 
137 	bus_dmamap_t		ccb_dmamap;
138 
139 	/* data for sgl */
140 	void			*ccb_data;
141 	size_t			ccb_len;
142 
143 	int			ccb_direction;
144 #define MFII_DATA_NONE			0
145 #define MFII_DATA_IN			1
146 #define MFII_DATA_OUT			2
147 
148 	void			*ccb_cookie;
149 	void			(*ccb_done)(struct mfii_softc *,
150 				    struct mfii_ccb *);
151 
152 	u_int32_t		ccb_flags;
153 #define MFI_CCB_F_ERR			(1<<0)
154 	u_int			ccb_smid;
155 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
156 };
157 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
158 
159 struct mfii_softc {
160 	struct device		sc_dev;
161 
162 	pci_chipset_tag_t	sc_pc;
163 	pcitag_t		sc_tag;
164 
165 	bus_space_tag_t		sc_iot;
166 	bus_space_handle_t	sc_ioh;
167 	bus_size_t		sc_ios;
168 	bus_dma_tag_t		sc_dmat;
169 
170 	void			*sc_ih;
171 
172 	struct mutex		sc_ccb_mtx;
173 	struct mutex		sc_post_mtx;
174 
175 	u_int			sc_max_cmds;
176 	u_int			sc_max_sgl;
177 
178 	u_int			sc_reply_postq_depth;
179 	u_int			sc_reply_postq_index;
180 	struct mutex		sc_reply_postq_mtx;
181 	struct mfii_dmamem	*sc_reply_postq;
182 
183 	struct mfii_dmamem	*sc_requests;
184 	struct mfii_dmamem	*sc_sense;
185 	struct mfii_dmamem	*sc_sgl;
186 
187 	struct mfii_ccb		*sc_ccb;
188 	struct mfii_ccb_list	sc_ccb_freeq;
189 
190 	struct scsi_link	sc_link;
191 	struct scsibus_softc	*sc_scsibus;
192 	struct scsi_iopool	sc_iopool;
193 
194 	struct mfi_ctrl_info	sc_info;
195 };
196 
197 int		mfii_match(struct device *, void *, void *);
198 void		mfii_attach(struct device *, struct device *, void *);
199 int		mfii_detach(struct device *, int);
200 
201 struct cfattach mfii_ca = {
202 	sizeof(struct mfii_softc),
203 	mfii_match,
204 	mfii_attach,
205 	mfii_detach
206 };
207 
208 struct cfdriver mfii_cd = {
209 	NULL,
210 	"mfii",
211 	DV_DULL
212 };
213 
214 void		mfii_scsi_cmd(struct scsi_xfer *);
215 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
216 
217 struct scsi_adapter mfii_switch = {
218 	mfii_scsi_cmd,
219 	scsi_minphys,
220 	NULL, /* probe */
221 	NULL, /* unprobe */
222 	NULL  /* ioctl */
223 };
224 
225 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
226 
227 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
228 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
229 
230 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
231 void			mfii_dmamem_free(struct mfii_softc *,
232 			    struct mfii_dmamem *);
233 
234 void *			mfii_get_ccb(void *);
235 void			mfii_put_ccb(void *, void *);
236 int			mfii_init_ccb(struct mfii_softc *);
237 void			mfii_scrub_ccb(struct mfii_ccb *);
238 
239 int			mfii_transition_firmware(struct mfii_softc *);
240 int			mfii_initialise_firmware(struct mfii_softc *);
241 int			mfii_get_info(struct mfii_softc *);
242 
243 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
244 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
245 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
246 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
247 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
248 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
249 int			mfii_my_intr(struct mfii_softc *);
250 int			mfii_intr(void *);
251 void			mfii_postq(struct mfii_softc *);
252 
253 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
254 			    void *, int);
255 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
256 			    void *, int);
257 
258 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
259 
260 int			mfii_mgmt(struct mfii_softc *, struct mfii_ccb *,
261 			    u_int32_t, u_int8_t *, void *, size_t, int);
262 
263 int			mfii_scsi_cmd_io(struct mfii_softc *,
264 			    struct scsi_xfer *);
265 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
266 			    struct scsi_xfer *);
267 
268 
269 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
270 
271 static const struct pci_matchid mfii_devices[] = {
272 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208 }
273 };
274 
275 int
276 mfii_match(struct device *parent, void *match, void *aux)
277 {
278 	return (pci_matchbyid(aux, mfii_devices, nitems(mfii_devices)));
279 }
280 
281 void
282 mfii_attach(struct device *parent, struct device *self, void *aux)
283 {
284 	struct mfii_softc *sc = (struct mfii_softc *)self;
285 	struct pci_attach_args *pa = aux;
286 	pcireg_t memtype;
287 	pci_intr_handle_t ih;
288 	struct scsibus_attach_args saa;
289 	u_int32_t status;
290 
291 	/* init sc */
292 	sc->sc_dmat = pa->pa_dmat;
293 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
294 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
295 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
296 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
297 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
298 
299 	/* wire up the bus shizz */
300 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MFII_BAR);
301 	if (pci_mapreg_map(pa, MFII_BAR, memtype, 0,
302 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
303 		printf(": unable to map registers\n");
304 		return;
305 	}
306 
307 	/* disable interrupts */
308 	mfii_write(sc, MFI_OMSK, 0xffffffff);
309 
310 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
311 		printf(": unable to map interrupt\n");
312 		goto pci_unmap;
313 	}
314 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
315 
316 	/* lets get started */
317 	if (mfii_transition_firmware(sc))
318 		goto pci_unmap;
319 
320 	status = mfii_fw_state(sc);
321 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
322 	sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
323 
324 	/* sense memory */
325 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
326 	if (sc->sc_sense == NULL) {
327 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
328 		goto pci_unmap;
329 	}
330 
331 	sc->sc_reply_postq_depth = roundup(sc->sc_max_cmds, 16);
332 
333 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
334 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
335 	if (sc->sc_reply_postq == NULL)
336 		goto free_sense;
337 
338 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
339 	    MFII_DMA_LEN(sc->sc_reply_postq));
340 
341 	sc->sc_requests = mfii_dmamem_alloc(sc,
342 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
343 	if (sc->sc_requests == NULL)
344 		goto free_reply_postq;
345 
346 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
347 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
348 	if (sc->sc_sgl == NULL)
349 		goto free_requests;
350 
351 	if (mfii_init_ccb(sc) != 0) {
352 		printf("%s: could not init ccb list\n", DEVNAME(sc));
353 		goto free_sgl;
354 	}
355 
356 	/* kickstart firmware with all addresses and pointers */
357 	if (mfii_initialise_firmware(sc) != 0) {
358 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
359 		goto free_sgl;
360 	}
361 
362 	if (mfii_get_info(sc) != 0) {
363 		printf("%s: could not retrieve controller information\n",
364 		    DEVNAME(sc));
365 		goto free_sgl;
366 	}
367 
368 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
369 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
370 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
371 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
372 	printf("\n");
373 
374 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
375 	    mfii_intr, sc, DEVNAME(sc));
376 	if (sc->sc_ih == NULL)
377 		goto free_sgl;
378 
379 	sc->sc_link.openings = sc->sc_max_cmds;
380 	sc->sc_link.adapter_softc = sc;
381 	sc->sc_link.adapter = &mfii_switch;
382 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
383 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
384 	sc->sc_link.pool = &sc->sc_iopool;
385 
386 	bzero(&saa, sizeof(saa));
387 	saa.saa_sc_link = &sc->sc_link;
388 
389 	config_found(&sc->sc_dev, &saa, scsiprint);
390 
391 	/* enable interrupts */
392 	mfii_write(sc, MFI_OSTS, 0xffffffff);
393 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
394 
395 	return;
396 free_sgl:
397 	mfii_dmamem_free(sc, sc->sc_sgl);
398 free_requests:
399 	mfii_dmamem_free(sc, sc->sc_requests);
400 free_reply_postq:
401 	mfii_dmamem_free(sc, sc->sc_reply_postq);
402 free_sense:
403 	mfii_dmamem_free(sc, sc->sc_sense);
404 pci_unmap:
405 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
406 }
407 
408 int
409 mfii_detach(struct device *self, int flags)
410 {
411 	struct mfii_softc *sc = (struct mfii_softc *)self;
412 
413 	if (sc->sc_ih == NULL)
414 		return (0);
415 
416 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
417 	mfii_dmamem_free(sc, sc->sc_sgl);
418 	mfii_dmamem_free(sc, sc->sc_requests);
419 	mfii_dmamem_free(sc, sc->sc_reply_postq);
420 	mfii_dmamem_free(sc, sc->sc_sense);
421 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
422 
423 	return (0);
424 }
425 
426 u_int32_t
427 mfii_read(struct mfii_softc *sc, bus_size_t r)
428 {
429 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
430 	    BUS_SPACE_BARRIER_READ);
431 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
432 }
433 
434 void
435 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
436 {
437 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
438 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
439 	    BUS_SPACE_BARRIER_WRITE);
440 }
441 
442 struct mfii_dmamem *
443 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
444 {
445 	struct mfii_dmamem *m;
446 	int nsegs;
447 
448 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
449 	if (m == NULL)
450 		return (NULL);
451 
452 	m->mdm_size = size;
453 
454 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
455 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
456 		goto mdmfree;
457 
458 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
459 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
460 		goto destroy;
461 
462 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
463 	    BUS_DMA_NOWAIT) != 0)
464 		goto free;
465 
466 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
467 	    BUS_DMA_NOWAIT) != 0)
468 		goto unmap;
469 
470 	return (m);
471 
472 unmap:
473 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
474 free:
475 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
476 destroy:
477 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
478 mdmfree:
479 	free(m, M_DEVBUF, 0);
480 
481 	return (NULL);
482 }
483 
484 void
485 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
486 {
487 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
488 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
489 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
490 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
491 	free(m, M_DEVBUF, 0);
492 }
493 
494 
495 
496 
497 int
498 mfii_transition_firmware(struct mfii_softc *sc)
499 {
500 	int32_t			fw_state, cur_state;
501 	int			max_wait, i;
502 
503 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
504 
505 	while (fw_state != MFI_STATE_READY) {
506 		cur_state = fw_state;
507 		switch (fw_state) {
508 		case MFI_STATE_FAULT:
509 			printf("%s: firmware fault\n", DEVNAME(sc));
510 			return (1);
511 		case MFI_STATE_WAIT_HANDSHAKE:
512 			mfii_write(sc, MFI_SKINNY_IDB,
513 			    MFI_INIT_CLEAR_HANDSHAKE);
514 			max_wait = 2;
515 			break;
516 		case MFI_STATE_OPERATIONAL:
517 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
518 			max_wait = 10;
519 			break;
520 		case MFI_STATE_UNDEFINED:
521 		case MFI_STATE_BB_INIT:
522 			max_wait = 2;
523 			break;
524 		case MFI_STATE_FW_INIT:
525 		case MFI_STATE_DEVICE_SCAN:
526 		case MFI_STATE_FLUSH_CACHE:
527 			max_wait = 20;
528 			break;
529 		default:
530 			printf("%s: unknown firmware state %d\n",
531 			    DEVNAME(sc), fw_state);
532 			return (1);
533 		}
534 		for (i = 0; i < (max_wait * 10); i++) {
535 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
536 			if (fw_state == cur_state)
537 				DELAY(100000);
538 			else
539 				break;
540 		}
541 		if (fw_state == cur_state) {
542 			printf("%s: firmware stuck in state %#x\n",
543 			    DEVNAME(sc), fw_state);
544 			return (1);
545 		}
546 	}
547 
548 	return (0);
549 }
550 
551 int
552 mfii_get_info(struct mfii_softc *sc)
553 {
554 	struct mfii_ccb *ccb;
555 	int rv;
556 
557 	ccb = scsi_io_get(&sc->sc_iopool, 0);
558 	rv = mfii_mgmt(sc, ccb, MR_DCMD_CTRL_GET_INFO, NULL,
559 	    &sc->sc_info, sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
560 	scsi_io_put(&sc->sc_iopool, ccb);
561 
562 	if (rv != 0)
563 		return (rv);
564 
565 #ifdef MFI_DEBUG
566 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
567 		printf("%s: active FW %s Version %s date %s time %s\n",
568 		    DEVNAME(sc),
569 		    sc->sc_info.mci_image_component[i].mic_name,
570 		    sc->sc_info.mci_image_component[i].mic_version,
571 		    sc->sc_info.mci_image_component[i].mic_build_date,
572 		    sc->sc_info.mci_image_component[i].mic_build_time);
573 	}
574 
575 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
576 		printf("%s: pending FW %s Version %s date %s time %s\n",
577 		    DEVNAME(sc),
578 		    sc->sc_info.mci_pending_image_component[i].mic_name,
579 		    sc->sc_info.mci_pending_image_component[i].mic_version,
580 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
581 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
582 	}
583 
584 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
585 	    DEVNAME(sc),
586 	    sc->sc_info.mci_max_arms,
587 	    sc->sc_info.mci_max_spans,
588 	    sc->sc_info.mci_max_arrays,
589 	    sc->sc_info.mci_max_lds,
590 	    sc->sc_info.mci_product_name);
591 
592 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
593 	    DEVNAME(sc),
594 	    sc->sc_info.mci_serial_number,
595 	    sc->sc_info.mci_hw_present,
596 	    sc->sc_info.mci_current_fw_time,
597 	    sc->sc_info.mci_max_cmds,
598 	    sc->sc_info.mci_max_sg_elements);
599 
600 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
601 	    DEVNAME(sc),
602 	    sc->sc_info.mci_max_request_size,
603 	    sc->sc_info.mci_lds_present,
604 	    sc->sc_info.mci_lds_degraded,
605 	    sc->sc_info.mci_lds_offline,
606 	    sc->sc_info.mci_pd_present);
607 
608 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
609 	    DEVNAME(sc),
610 	    sc->sc_info.mci_pd_disks_present,
611 	    sc->sc_info.mci_pd_disks_pred_failure,
612 	    sc->sc_info.mci_pd_disks_failed);
613 
614 	printf("%s: nvram %d mem %d flash %d\n",
615 	    DEVNAME(sc),
616 	    sc->sc_info.mci_nvram_size,
617 	    sc->sc_info.mci_memory_size,
618 	    sc->sc_info.mci_flash_size);
619 
620 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
621 	    DEVNAME(sc),
622 	    sc->sc_info.mci_ram_correctable_errors,
623 	    sc->sc_info.mci_ram_uncorrectable_errors,
624 	    sc->sc_info.mci_cluster_allowed,
625 	    sc->sc_info.mci_cluster_active);
626 
627 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
628 	    DEVNAME(sc),
629 	    sc->sc_info.mci_max_strips_per_io,
630 	    sc->sc_info.mci_raid_levels,
631 	    sc->sc_info.mci_adapter_ops,
632 	    sc->sc_info.mci_ld_ops);
633 
634 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
635 	    DEVNAME(sc),
636 	    sc->sc_info.mci_stripe_sz_ops.min,
637 	    sc->sc_info.mci_stripe_sz_ops.max,
638 	    sc->sc_info.mci_pd_ops,
639 	    sc->sc_info.mci_pd_mix_support);
640 
641 	printf("%s: ecc_bucket %d pckg_prop %s\n",
642 	    DEVNAME(sc),
643 	    sc->sc_info.mci_ecc_bucket_count,
644 	    sc->sc_info.mci_package_version);
645 
646 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
647 	    DEVNAME(sc),
648 	    sc->sc_info.mci_properties.mcp_seq_num,
649 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
650 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
651 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
652 
653 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
654 	    DEVNAME(sc),
655 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
656 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
657 	    sc->sc_info.mci_properties.mcp_bgi_rate,
658 	    sc->sc_info.mci_properties.mcp_cc_rate);
659 
660 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
661 	    DEVNAME(sc),
662 	    sc->sc_info.mci_properties.mcp_recon_rate,
663 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
664 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
665 	    sc->sc_info.mci_properties.mcp_spinup_delay,
666 	    sc->sc_info.mci_properties.mcp_cluster_enable);
667 
668 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
669 	    DEVNAME(sc),
670 	    sc->sc_info.mci_properties.mcp_coercion_mode,
671 	    sc->sc_info.mci_properties.mcp_alarm_enable,
672 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
673 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
674 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
675 
676 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
677 	    DEVNAME(sc),
678 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
679 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
680 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
681 
682 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
683 	    DEVNAME(sc),
684 	    sc->sc_info.mci_pci.mip_vendor,
685 	    sc->sc_info.mci_pci.mip_device,
686 	    sc->sc_info.mci_pci.mip_subvendor,
687 	    sc->sc_info.mci_pci.mip_subdevice);
688 
689 	printf("%s: type %#x port_count %d port_addr ",
690 	    DEVNAME(sc),
691 	    sc->sc_info.mci_host.mih_type,
692 	    sc->sc_info.mci_host.mih_port_count);
693 
694 	for (i = 0; i < 8; i++)
695 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
696 	printf("\n");
697 
698 	printf("%s: type %.x port_count %d port_addr ",
699 	    DEVNAME(sc),
700 	    sc->sc_info.mci_device.mid_type,
701 	    sc->sc_info.mci_device.mid_port_count);
702 
703 	for (i = 0; i < 8; i++)
704 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
705 	printf("\n");
706 #endif /* MFI_DEBUG */
707 
708 	return (0);
709 }
710 
711 int
712 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
713 {
714 	struct mfi_frame_header	*hdr = ccb->ccb_request;
715 	u_int64_t r;
716 	int to = 0, rv = 0;
717 
718 #ifdef DIAGNOSTIC
719 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
720 		panic("mfii_mfa_poll called with cookie or done set");
721 #endif
722 
723 	hdr->mfh_context = ccb->ccb_smid;
724 	hdr->mfh_cmd_status = 0xff;
725 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
726 
727 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
728 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
729 
730 	mfii_start(sc, ccb);
731 
732 	for (;;) {
733 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
734 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
735 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
736 
737 		if (hdr->mfh_cmd_status != 0xff)
738 			break;
739 
740 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
741 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
742 			    ccb->ccb_smid);
743 			ccb->ccb_flags |= MFI_CCB_F_ERR;
744 			rv = 1;
745 			break;
746 		}
747 
748 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
749 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
750 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
751 
752 		delay(1000);
753 	}
754 
755 	if (ccb->ccb_len > 0) {
756 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
757 		    0, ccb->ccb_dmamap->dm_mapsize,
758 		    (ccb->ccb_direction == MFII_DATA_IN) ?
759 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
760 
761 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
762 	}
763 
764 	return (rv);
765 }
766 
767 int
768 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
769 {
770 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
771 	void *cookie;
772 	int rv = 1;
773 
774 	done = ccb->ccb_done;
775 	cookie = ccb->ccb_cookie;
776 
777 	ccb->ccb_done = mfii_poll_done;
778 	ccb->ccb_cookie = &rv;
779 
780 	mfii_start(sc, ccb);
781 
782 	do {
783 		delay(10);
784 		mfii_postq(sc);
785 	} while (rv == 1);
786 
787 	ccb->ccb_cookie = cookie;
788 	done(sc, ccb);
789 
790 	return (0);
791 }
792 
793 void
794 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
795 {
796 	int *rv = ccb->ccb_cookie;
797 
798 	*rv = 0;
799 }
800 
801 int
802 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
803 {
804 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
805 
806 #ifdef DIAGNOSTIC
807 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
808 		panic("mfii_exec called with cookie or done set");
809 #endif
810 
811 	ccb->ccb_cookie = &m;
812 	ccb->ccb_done = mfii_exec_done;
813 
814 	mtx_enter(&m);
815 	while (ccb->ccb_cookie != NULL)
816 		msleep(ccb, &m, PRIBIO, "mfiiexec", 0);
817 	mtx_leave(&m);
818 
819 	return (0);
820 }
821 
822 void
823 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
824 {
825 	struct mutex *m = ccb->ccb_cookie;
826 
827 	mtx_enter(m);
828 	ccb->ccb_cookie = NULL;
829 	wakeup_one(ccb);
830 	mtx_leave(m);
831 }
832 
833 int
834 mfii_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb,
835     u_int32_t opc, u_int8_t *mbox, void *buf, size_t len, int flags)
836 {
837 	struct mfi_dcmd_frame *dcmd = ccb->ccb_request;
838 	struct mfi_frame_header	*hdr = &dcmd->mdf_header;
839 	u_int64_t r;
840 	u_int8_t *dma_buf;
841 	int rv = EIO;
842 
843 	dma_buf = dma_alloc(len, PR_WAITOK);
844 	if (dma_buf == NULL)
845 		return (ENOMEM);
846 
847 	mfii_scrub_ccb(ccb);
848 	ccb->ccb_data = dma_buf;
849 	ccb->ccb_len = len;
850 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
851 	case SCSI_DATA_IN:
852 		ccb->ccb_direction = MFII_DATA_IN;
853 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
854 		break;
855 	case SCSI_DATA_OUT:
856 		ccb->ccb_direction = MFII_DATA_OUT;
857 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
858 		bcopy(buf, dma_buf, len);
859 		break;
860 	}
861 
862 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
863 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
864 		rv = ENOMEM;
865 		goto done;
866 	}
867 
868 	hdr->mfh_cmd = MFI_CMD_DCMD;
869 	hdr->mfh_context = ccb->ccb_smid;
870 	hdr->mfh_data_len = htole32(len);
871 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
872 
873 	dcmd->mdf_opcode = opc;
874 	/* handle special opcodes */
875 	if (mbox != NULL)
876 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
877 
878 	if (ISSET(flags, SCSI_NOSLEEP))
879 		mfii_mfa_poll(sc, ccb);
880 	else {
881 		r = MFII_REQ_MFA(ccb->ccb_request_dva);
882 		memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
883 		mfii_exec(sc, ccb);
884 	}
885 
886 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
887 		rv = 0;
888 
889 		if (ccb->ccb_direction == MFII_DATA_IN)
890 			bcopy(dma_buf, buf, len);
891 	}
892 
893 done:
894 	dma_free(dma_buf, len);
895 
896 	return (rv);
897 }
898 
899 int
900 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
901     void *sglp, int nosleep)
902 {
903 	union mfi_sgl *sgl = sglp;
904 	bus_dmamap_t dmap = ccb->ccb_dmamap;
905 	int error;
906 	int i;
907 
908 	if (ccb->ccb_len == 0)
909 		return (0);
910 
911 	error = bus_dmamap_load(sc->sc_dmat, dmap,
912 	    ccb->ccb_data, ccb->ccb_len, NULL,
913 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
914 	if (error) {
915 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
916 		return (1);
917 	}
918 
919 	for (i = 0; i < dmap->dm_nsegs; i++) {
920 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
921 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
922 	}
923 
924 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
925 	    ccb->ccb_direction == MFII_DATA_OUT ?
926 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
927 
928 	return (0);
929 }
930 
931 void
932 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
933 {
934 	u_int32_t *r = (u_int32_t *)&ccb->ccb_req;
935 
936 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
937 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
938 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
939 
940 	mtx_enter(&sc->sc_post_mtx);
941 	mfii_write(sc, MFI_IQPL, r[0]);
942 	mfii_write(sc, MFI_IQPH, r[1]);
943 	mtx_leave(&sc->sc_post_mtx);
944 }
945 
946 void
947 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
948 {
949 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
950 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
951 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
952 
953 	if (ccb->ccb_sgl_len > 0) {
954 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
955 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
956 		    BUS_DMASYNC_POSTWRITE);
957 	}
958 
959 	if (ccb->ccb_len > 0) {
960 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
961 		    0, ccb->ccb_dmamap->dm_mapsize,
962 		    (ccb->ccb_direction == MFII_DATA_IN) ?
963 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
964 
965 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
966 	}
967 
968 	ccb->ccb_done(sc, ccb);
969 }
970 
971 int
972 mfii_initialise_firmware(struct mfii_softc *sc)
973 {
974 	struct mpii_msg_iocinit_request *iiq;
975 	struct mfii_dmamem *m;
976 	struct mfii_ccb *ccb;
977 	struct mfi_init_frame *init;
978 	int rv;
979 
980 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
981 	if (m == NULL)
982 		return (1);
983 
984 	iiq = MFII_DMA_KVA(m);
985 	bzero(iiq, sizeof(*iiq));
986 
987 	iiq->function = MPII_FUNCTION_IOC_INIT;
988 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
989 
990 	iiq->msg_version_maj = 0x02;
991 	iiq->msg_version_min = 0x00;
992 	iiq->hdr_version_unit = 0x10;
993 	iiq->hdr_version_dev = 0x0;
994 
995 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
996 
997 	iiq->reply_descriptor_post_queue_depth =
998 	    htole16(sc->sc_reply_postq_depth);
999 	iiq->reply_free_queue_depth = htole16(0);
1000 
1001 	htolem32(&iiq->sense_buffer_address_high,
1002 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1003 
1004 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1005 	    MFII_DMA_DVA(sc->sc_reply_postq));
1006 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1007 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1008 
1009 	htolem32(&iiq->system_request_frame_base_address_lo,
1010 	    MFII_DMA_DVA(sc->sc_requests));
1011 	htolem32(&iiq->system_request_frame_base_address_hi,
1012 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1013 
1014 	iiq->timestamp = htole64(time_uptime);
1015 
1016 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1017 	mfii_scrub_ccb(ccb);
1018 	init = ccb->ccb_request;
1019 
1020 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1021 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1022 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1023 
1024 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1025 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1026 	    BUS_DMASYNC_PREREAD);
1027 
1028 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1029 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1030 
1031 	rv = mfii_mfa_poll(sc, ccb);
1032 
1033 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1034 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1035 
1036 	scsi_io_put(&sc->sc_iopool, ccb);
1037 	mfii_dmamem_free(sc, m);
1038 
1039 	return (rv);
1040 }
1041 
1042 int
1043 mfii_my_intr(struct mfii_softc *sc)
1044 {
1045 	u_int32_t status;
1046 
1047 	status = mfii_read(sc, MFI_OSTS);
1048 	if (ISSET(status, 0x1)) {
1049 		mfii_write(sc, MFI_OSTS, status);
1050 		return (1);
1051 	}
1052 
1053 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1054 }
1055 
1056 int
1057 mfii_intr(void *arg)
1058 {
1059 	struct mfii_softc *sc = arg;
1060 
1061 	if (!mfii_my_intr(sc))
1062 		return (0);
1063 
1064 	mfii_postq(sc);
1065 
1066 	return (1);
1067 }
1068 
1069 void
1070 mfii_postq(struct mfii_softc *sc)
1071 {
1072 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
1073 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
1074 	struct mpii_reply_descr *rdp;
1075 	struct mfii_ccb *ccb;
1076 	int rpi = 0;
1077 
1078 	mtx_enter(&sc->sc_reply_postq_mtx);
1079 
1080 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1081 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1082 	    BUS_DMASYNC_POSTREAD);
1083 
1084 	for (;;) {
1085 		rdp = &postq[sc->sc_reply_postq_index];
1086 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
1087 		    MPII_REPLY_DESCR_UNUSED)
1088 			break;
1089 		if (rdp->data == 0xffffffff) {
1090 			/*
1091 			 * ioc is still writing to the reply post queue
1092 			 * race condition - bail!
1093 			 */
1094 			break;
1095 		}
1096 
1097 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
1098 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
1099 		memset(rdp, 0xff, sizeof(*rdp));
1100 
1101 		sc->sc_reply_postq_index++;
1102 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
1103 		rpi = 1;
1104 	}
1105 
1106 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1107 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1108 	    BUS_DMASYNC_PREREAD);
1109 
1110 	if (rpi)
1111 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
1112 
1113 	mtx_leave(&sc->sc_reply_postq_mtx);
1114 
1115 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
1116 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
1117 		mfii_done(sc, ccb);
1118 	}
1119 }
1120 
1121 void
1122 mfii_scsi_cmd(struct scsi_xfer *xs)
1123 {
1124 	struct scsi_link *link = xs->sc_link;
1125 	struct mfii_softc *sc = link->adapter_softc;
1126 	struct mfii_ccb *ccb = xs->io;
1127 
1128 	mfii_scrub_ccb(ccb);
1129 	ccb->ccb_cookie = xs;
1130 	ccb->ccb_done = mfii_scsi_cmd_done;
1131 	ccb->ccb_data = xs->data;
1132 	ccb->ccb_len = xs->datalen;
1133 
1134 #if 0
1135 	switch (xs->cmd->opcode) {
1136 	case READ_COMMAND:
1137 	case READ_BIG:
1138 	case READ_12:
1139 	case READ_16:
1140 	case WRITE_COMMAND:
1141 	case WRITE_BIG:
1142 	case WRITE_12:
1143 	case WRITE_16:
1144 		if (mfii_scsi_cmd_io(sc, xs) != 0)
1145 			goto stuffup;
1146 
1147 		break;
1148 
1149 	default:
1150 #endif
1151 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
1152 			goto stuffup;
1153 #if 0
1154 		break;
1155 	}
1156 #endif
1157 
1158 	xs->error = XS_NOERROR;
1159 	xs->resid = 0;
1160 
1161 	if (ISSET(xs->flags, SCSI_POLL)) {
1162 		if (mfii_poll(sc, ccb) != 0)
1163 			goto stuffup;
1164 		return;
1165 	}
1166 
1167 	mfii_start(sc, ccb);
1168 	return;
1169 
1170 stuffup:
1171 	xs->error = XS_DRIVER_STUFFUP;
1172 	scsi_done(xs);
1173 }
1174 
1175 void
1176 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1177 {
1178 	struct scsi_xfer *xs = ccb->ccb_cookie;
1179 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1180 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1181 
1182 	switch (ctx->status) {
1183 	case MFI_STAT_OK:
1184 		break;
1185 
1186 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1187 		xs->error = XS_SENSE;
1188 		memset(&xs->sense, 0, sizeof(xs->sense));
1189 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1190 		break;
1191 
1192 	case MFI_STAT_LD_OFFLINE:
1193 	case MFI_STAT_DEVICE_NOT_FOUND:
1194 		xs->error = XS_SELTIMEOUT;
1195 		break;
1196 
1197 	default:
1198 		xs->error = XS_DRIVER_STUFFUP;
1199 		break;
1200 	}
1201 
1202 	scsi_done(xs);
1203 }
1204 
1205 int
1206 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
1207 {
1208 	struct mfii_ccb *ccb = xs->io;
1209 	u_int64_t blkno;
1210 	u_int32_t nblks;
1211 
1212 	ccb->ccb_req.flags = MFII_REQ_TYPE_LDIO;
1213 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1214 
1215 	scsi_cmd_rw_decode(xs->cmd, &blkno, &nblks);
1216 
1217 	return (1);
1218 }
1219 
1220 int
1221 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1222 {
1223 	struct scsi_link *link = xs->sc_link;
1224 	struct mfii_ccb *ccb = xs->io;
1225 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1226 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1227 
1228 	io->dev_handle = htole16(link->target);
1229 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1230 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1231 	io->sgl_flags = htole16(0x02); /* XXX */
1232 	io->sense_buffer_length = sizeof(xs->sense);
1233 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1234 	io->data_length = htole32(xs->datalen);
1235 	io->io_flags = htole16(xs->cmdlen);
1236 	io->lun[0] = htobe16(link->lun);
1237 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1238 	case SCSI_DATA_IN:
1239 		ccb->ccb_direction = MFII_DATA_IN;
1240 		io->direction = MPII_SCSIIO_DIR_READ;
1241 		break;
1242 	case SCSI_DATA_OUT:
1243 		ccb->ccb_direction = MFII_DATA_OUT;
1244 		io->direction = MPII_SCSIIO_DIR_WRITE;
1245 		break;
1246 	default:
1247 		ccb->ccb_direction = MFII_DATA_NONE;
1248 		io->direction = MPII_SCSIIO_DIR_NONE;
1249 		break;
1250 	}
1251 	bcopy(xs->cmd, io->cdb, xs->cmdlen);
1252 
1253 	ctx->virtual_disk_target_id = htole16(link->target);
1254 
1255 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1256 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1257 		return (1);
1258 
1259 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1260 
1261 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1262 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1263 
1264 	return (0);
1265 }
1266 
1267 int
1268 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
1269     int nosleep)
1270 {
1271 	struct mpii_msg_request *req = ccb->ccb_request;
1272 	struct mfii_sge *sge = NULL, *nsge = sglp;
1273 	struct mfii_sge *ce = NULL;
1274 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1275 	u_int space;
1276 	int i;
1277 
1278 	int error;
1279 
1280 	if (ccb->ccb_len == 0)
1281 		return (0);
1282 
1283 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1284 	    ccb->ccb_data, ccb->ccb_len, NULL,
1285 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1286 	if (error) {
1287 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1288 		return (1);
1289 	}
1290 
1291 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
1292 	    sizeof(*nsge);
1293 	if (dmap->dm_nsegs > space) {
1294 		space--;
1295 
1296 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
1297 		bzero(ccb->ccb_sgl, ccb->ccb_sgl_len);
1298 
1299 		ce = nsge + space;
1300 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
1301 		ce->sg_len = htole32(ccb->ccb_sgl_len);
1302 		ce->sg_flags = MFII_SGE_CHAIN_ELEMENT |
1303 		    MFII_SGE_ADDR_IOCPLBNTA;
1304 
1305 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
1306 	}
1307 
1308 	for (i = 0; i < dmap->dm_nsegs; i++) {
1309 		if (nsge == ce)
1310 			nsge = ccb->ccb_sgl;
1311 
1312 		sge = nsge;
1313 
1314 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
1315 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
1316 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
1317 
1318 		nsge = sge + 1;
1319 	}
1320 
1321 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1322 	    ccb->ccb_direction == MFII_DATA_OUT ?
1323 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1324 
1325 	if (ccb->ccb_sgl_len > 0) {
1326 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1327 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1328 		    BUS_DMASYNC_PREWRITE);
1329 	}
1330 
1331 	return (0);
1332 }
1333 
1334 void *
1335 mfii_get_ccb(void *cookie)
1336 {
1337 	struct mfii_softc *sc = cookie;
1338 	struct mfii_ccb *ccb;
1339 
1340 	mtx_enter(&sc->sc_ccb_mtx);
1341 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
1342 	if (ccb != NULL)
1343 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
1344 	mtx_leave(&sc->sc_ccb_mtx);
1345 
1346 	return (ccb);
1347 }
1348 
1349 void
1350 mfii_scrub_ccb(struct mfii_ccb *ccb)
1351 {
1352 	ccb->ccb_cookie = NULL;
1353 	ccb->ccb_done = NULL;
1354 	ccb->ccb_flags = 0;
1355 	ccb->ccb_data = NULL;
1356 	ccb->ccb_direction = 0;
1357 	ccb->ccb_len = 0;
1358 	ccb->ccb_sgl_len = 0;
1359 
1360 	bzero(&ccb->ccb_req, sizeof(ccb->ccb_req));
1361 	bzero(ccb->ccb_request, MFII_REQUEST_SIZE);
1362 }
1363 
1364 void
1365 mfii_put_ccb(void *cookie, void *io)
1366 {
1367 	struct mfii_softc *sc = cookie;
1368 	struct mfii_ccb *ccb = io;
1369 
1370 	mtx_enter(&sc->sc_ccb_mtx);
1371 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
1372 	mtx_leave(&sc->sc_ccb_mtx);
1373 }
1374 
1375 int
1376 mfii_init_ccb(struct mfii_softc *sc)
1377 {
1378 	struct mfii_ccb *ccb;
1379 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
1380 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
1381 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
1382 	u_int i;
1383 	int error;
1384 
1385 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
1386 	    M_DEVBUF, M_WAITOK|M_ZERO);
1387 
1388 	for (i = 0; i < sc->sc_max_cmds; i++) {
1389 		ccb = &sc->sc_ccb[i];
1390 
1391 		/* create a dma map for transfer */
1392 		error = bus_dmamap_create(sc->sc_dmat,
1393 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
1394 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
1395 		if (error) {
1396 			printf("%s: cannot create ccb dmamap (%d)\n",
1397 			    DEVNAME(sc), error);
1398 			goto destroy;
1399 		}
1400 
1401 		/* select i + 1'th request. 0 is reserved for events */
1402 		ccb->ccb_smid = i + 1;
1403 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
1404 		ccb->ccb_request = request + ccb->ccb_request_offset;
1405 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
1406 		    ccb->ccb_request_offset;
1407 
1408 		/* select i'th sense */
1409 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
1410 		ccb->ccb_sense = (struct mfi_sense *)(sense +
1411 		    ccb->ccb_sense_offset);
1412 		ccb->ccb_sense_dva = (u_int32_t)(MFII_DMA_DVA(sc->sc_sense) +
1413 		    ccb->ccb_sense_offset);
1414 
1415 		/* select i'th sgl */
1416 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
1417 		    sc->sc_max_sgl * i;
1418 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
1419 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
1420 		    ccb->ccb_sgl_offset;
1421 
1422 		/* add ccb to queue */
1423 		mfii_put_ccb(sc, ccb);
1424 	}
1425 
1426 	return (0);
1427 
1428 destroy:
1429 	/* free dma maps and ccb memory */
1430 	while ((ccb = mfii_get_ccb(sc)) != NULL)
1431 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1432 
1433 	free(sc->sc_ccb, M_DEVBUF, 0);
1434 
1435 	return (1);
1436 }
1437 
1438