xref: /openbsd-src/sys/dev/pci/mpii.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: mpii.c,v 1.105 2016/09/14 01:14:54 jmatthew Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	struct scsi_link	sc_link;
163 
164 	int			sc_flags;
165 #define MPII_F_RAID		(1<<1)
166 #define MPII_F_SAS3		(1<<2)
167 
168 	struct scsibus_softc	*sc_scsibus;
169 
170 	struct mpii_device	**sc_devs;
171 
172 	bus_space_tag_t		sc_iot;
173 	bus_space_handle_t	sc_ioh;
174 	bus_size_t		sc_ios;
175 	bus_dma_tag_t		sc_dmat;
176 
177 	struct mutex		sc_req_mtx;
178 	struct mutex		sc_rep_mtx;
179 
180 	ushort			sc_reply_size;
181 	ushort			sc_request_size;
182 
183 	ushort			sc_max_cmds;
184 	ushort			sc_num_reply_frames;
185 	u_int			sc_reply_free_qdepth;
186 	u_int			sc_reply_post_qdepth;
187 
188 	ushort			sc_chain_sge;
189 	ushort			sc_max_sgl;
190 
191 	u_int8_t		sc_ioc_event_replay;
192 
193 	u_int8_t		sc_porttype;
194 	u_int8_t		sc_max_volumes;
195 	u_int16_t		sc_max_devices;
196 	u_int16_t		sc_vd_count;
197 	u_int16_t		sc_vd_id_low;
198 	u_int16_t		sc_pd_id_start;
199 	int			sc_ioc_number;
200 	u_int8_t		sc_vf_id;
201 
202 	struct mpii_ccb		*sc_ccbs;
203 	struct mpii_ccb_list	sc_ccb_free;
204 	struct mutex		sc_ccb_free_mtx;
205 
206 	struct mutex		sc_ccb_mtx;
207 				/*
208 				 * this protects the ccb state and list entry
209 				 * between mpii_scsi_cmd and scsidone.
210 				 */
211 
212 	struct mpii_ccb_list	sc_ccb_tmos;
213 	struct scsi_iohandler	sc_ccb_tmo_handler;
214 
215 	struct scsi_iopool	sc_iopool;
216 
217 	struct mpii_dmamem	*sc_requests;
218 
219 	struct mpii_dmamem	*sc_replies;
220 	struct mpii_rcb		*sc_rcbs;
221 
222 	struct mpii_dmamem	*sc_reply_postq;
223 	struct mpii_reply_descr	*sc_reply_postq_kva;
224 	u_int			sc_reply_post_host_index;
225 
226 	struct mpii_dmamem	*sc_reply_freeq;
227 	u_int			sc_reply_free_host_index;
228 
229 	struct mpii_rcb_list	sc_evt_sas_queue;
230 	struct mutex		sc_evt_sas_mtx;
231 	struct task		sc_evt_sas_task;
232 
233 	struct mpii_rcb_list	sc_evt_ack_queue;
234 	struct mutex		sc_evt_ack_mtx;
235 	struct scsi_iohandler	sc_evt_ack_handler;
236 
237 	/* scsi ioctl from sd device */
238 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
239 
240 	int			sc_nsensors;
241 	struct ksensor		*sc_sensors;
242 	struct ksensordev	sc_sensordev;
243 };
244 
245 int	mpii_match(struct device *, void *, void *);
246 void	mpii_attach(struct device *, struct device *, void *);
247 int	mpii_detach(struct device *, int);
248 
249 int	mpii_intr(void *);
250 
251 struct cfattach mpii_ca = {
252 	sizeof(struct mpii_softc),
253 	mpii_match,
254 	mpii_attach,
255 	mpii_detach
256 };
257 
258 struct cfdriver mpii_cd = {
259 	NULL,
260 	"mpii",
261 	DV_DULL
262 };
263 
264 void		mpii_scsi_cmd(struct scsi_xfer *);
265 void		mpii_scsi_cmd_done(struct mpii_ccb *);
266 int		mpii_scsi_probe(struct scsi_link *);
267 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
268 
269 struct scsi_adapter mpii_switch = {
270 	mpii_scsi_cmd,
271 	scsi_minphys,
272 	mpii_scsi_probe,
273 	NULL,
274 	mpii_scsi_ioctl
275 };
276 
277 struct mpii_dmamem *
278 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
279 void		mpii_dmamem_free(struct mpii_softc *,
280 		    struct mpii_dmamem *);
281 int		mpii_alloc_ccbs(struct mpii_softc *);
282 void *		mpii_get_ccb(void *);
283 void		mpii_put_ccb(void *, void *);
284 int		mpii_alloc_replies(struct mpii_softc *);
285 int		mpii_alloc_queues(struct mpii_softc *);
286 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
287 void		mpii_push_replies(struct mpii_softc *);
288 
289 void		mpii_scsi_cmd_tmo(void *);
290 void		mpii_scsi_cmd_tmo_handler(void *, void *);
291 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
292 
293 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
294 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
295 struct mpii_device *
296 		mpii_find_dev(struct mpii_softc *, u_int16_t);
297 
298 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
299 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
300 void		mpii_poll_done(struct mpii_ccb *);
301 struct mpii_rcb *
302 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
303 
304 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
305 void		mpii_wait_done(struct mpii_ccb *);
306 
307 void		mpii_init_queues(struct mpii_softc *);
308 
309 int		mpii_load_xs(struct mpii_ccb *);
310 int		mpii_load_xs_sas3(struct mpii_ccb *);
311 
312 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
313 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
314 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
315 		    u_int32_t);
316 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
317 		    u_int32_t);
318 
319 int		mpii_init(struct mpii_softc *);
320 int		mpii_reset_soft(struct mpii_softc *);
321 int		mpii_reset_hard(struct mpii_softc *);
322 
323 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
324 int		mpii_handshake_recv_dword(struct mpii_softc *,
325 		    u_int32_t *);
326 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
327 
328 void		mpii_empty_done(struct mpii_ccb *);
329 
330 int		mpii_iocinit(struct mpii_softc *);
331 int		mpii_iocfacts(struct mpii_softc *);
332 int		mpii_portfacts(struct mpii_softc *);
333 int		mpii_portenable(struct mpii_softc *);
334 int		mpii_cfg_coalescing(struct mpii_softc *);
335 int		mpii_board_info(struct mpii_softc *);
336 int		mpii_target_map(struct mpii_softc *);
337 
338 int		mpii_eventnotify(struct mpii_softc *);
339 void		mpii_eventnotify_done(struct mpii_ccb *);
340 void		mpii_eventack(void *, void *);
341 void		mpii_eventack_done(struct mpii_ccb *);
342 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
343 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
344 void		mpii_event_sas(void *);
345 void		mpii_event_raid(struct mpii_softc *,
346 		    struct mpii_msg_event_reply *);
347 
348 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
349 
350 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
351 		    u_int8_t, u_int32_t, int, void *);
352 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
353 		    void *, int, void *, size_t);
354 
355 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
356 
357 #if NBIO > 0
358 int		mpii_ioctl(struct device *, u_long, caddr_t);
359 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
360 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
361 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
362 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
363 		    int, int *);
364 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
365 		    u_int8_t);
366 struct mpii_device *
367 		mpii_find_vol(struct mpii_softc *, int);
368 #ifndef SMALL_KERNEL
369  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
370 int		mpii_create_sensors(struct mpii_softc *);
371 void		mpii_refresh_sensors(void *);
372 #endif /* SMALL_KERNEL */
373 #endif /* NBIO > 0 */
374 
375 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
376 
377 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
378 
379 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
380 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
381 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
382 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
383 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
384 				    == MPII_INTR_STATUS_REPLY)
385 
386 #define mpii_write_reply_free(s, v) \
387     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
388     MPII_REPLY_FREE_HOST_INDEX, (v))
389 #define mpii_write_reply_post(s, v) \
390     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
391     MPII_REPLY_POST_HOST_INDEX, (v))
392 
393 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
394 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
395 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
396 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
397 
398 static inline void
399 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
400 {
401 	htolem32(&sge->sg_addr_lo, dva);
402 	htolem32(&sge->sg_addr_hi, dva >> 32);
403 }
404 
405 #define MPII_PG_EXTENDED	(1<<0)
406 #define MPII_PG_POLL		(1<<1)
407 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
408 
409 static const struct pci_matchid mpii_devices[] = {
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 }
432 };
433 
434 int
435 mpii_match(struct device *parent, void *match, void *aux)
436 {
437 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
438 }
439 
440 void
441 mpii_attach(struct device *parent, struct device *self, void *aux)
442 {
443 	struct mpii_softc		*sc = (struct mpii_softc *)self;
444 	struct pci_attach_args		*pa = aux;
445 	pcireg_t			memtype;
446 	int				r;
447 	pci_intr_handle_t		ih;
448 	struct scsibus_attach_args	saa;
449 	struct mpii_ccb			*ccb;
450 
451 	sc->sc_pc = pa->pa_pc;
452 	sc->sc_tag = pa->pa_tag;
453 	sc->sc_dmat = pa->pa_dmat;
454 
455 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
456 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
457 
458 	/* find the appropriate memory base */
459 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
460 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
461 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
462 			break;
463 	}
464 	if (r >= PCI_MAPREG_END) {
465 		printf(": unable to locate system interface registers\n");
466 		return;
467 	}
468 
469 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
470 	    NULL, &sc->sc_ios, 0xFF) != 0) {
471 		printf(": unable to map system interface registers\n");
472 		return;
473 	}
474 
475 	/* disable the expansion rom */
476 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
477 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
478 	    ~PCI_ROM_ENABLE);
479 
480 	/* disable interrupts */
481 	mpii_write(sc, MPII_INTR_MASK,
482 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
483 	    MPII_INTR_MASK_DOORBELL);
484 
485 	/* hook up the interrupt */
486 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
487 		printf(": unable to map interrupt\n");
488 		goto unmap;
489 	}
490 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
491 
492 	if (mpii_init(sc) != 0) {
493 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
494 		goto unmap;
495 	}
496 
497 	if (mpii_iocfacts(sc) != 0) {
498 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
499 		goto unmap;
500 	}
501 
502 	if (mpii_alloc_ccbs(sc) != 0) {
503 		/* error already printed */
504 		goto unmap;
505 	}
506 
507 	if (mpii_alloc_replies(sc) != 0) {
508 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
509 		goto free_ccbs;
510 	}
511 
512 	if (mpii_alloc_queues(sc) != 0) {
513 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
514 		goto free_replies;
515 	}
516 
517 	if (mpii_iocinit(sc) != 0) {
518 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
519 		goto free_queues;
520 	}
521 
522 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
523 	    MPII_DOORBELL_STATE_OPER) != 0) {
524 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
525 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
526 		printf("%s: operational state timeout\n", DEVNAME(sc));
527 		goto free_queues;
528 	}
529 
530 	mpii_push_replies(sc);
531 	mpii_init_queues(sc);
532 
533 	if (mpii_board_info(sc) != 0) {
534 		printf("%s: unable to get manufacturing page 0\n",
535 		    DEVNAME(sc));
536 		goto free_queues;
537 	}
538 
539 	if (mpii_portfacts(sc) != 0) {
540 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
541 		goto free_queues;
542 	}
543 
544 	if (mpii_target_map(sc) != 0) {
545 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
546 		goto free_queues;
547 	}
548 
549 	if (mpii_cfg_coalescing(sc) != 0) {
550 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
551 		goto free_queues;
552 	}
553 
554 	/* XXX bail on unsupported porttype? */
555 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
556 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL)) {
557 		if (mpii_eventnotify(sc) != 0) {
558 			printf("%s: unable to enable events\n", DEVNAME(sc));
559 			goto free_queues;
560 		}
561 	}
562 
563 	sc->sc_devs = mallocarray(sc->sc_max_devices,
564 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
565 	if (sc->sc_devs == NULL) {
566 		printf("%s: unable to allocate memory for mpii_device\n",
567 		    DEVNAME(sc));
568 		goto free_queues;
569 	}
570 
571 	if (mpii_portenable(sc) != 0) {
572 		printf("%s: unable to enable port\n", DEVNAME(sc));
573 		goto free_devs;
574 	}
575 
576 	/* we should be good to go now, attach scsibus */
577 	sc->sc_link.adapter = &mpii_switch;
578 	sc->sc_link.adapter_softc = sc;
579 	sc->sc_link.adapter_target = -1;
580 	sc->sc_link.adapter_buswidth = sc->sc_max_devices;
581 	sc->sc_link.luns = 1;
582 	sc->sc_link.openings = sc->sc_max_cmds - 1;
583 	sc->sc_link.pool = &sc->sc_iopool;
584 
585 	memset(&saa, 0, sizeof(saa));
586 	saa.saa_sc_link = &sc->sc_link;
587 
588 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
589 	    mpii_intr, sc, sc->sc_dev.dv_xname);
590 	if (sc->sc_ih == NULL)
591 		goto free_devs;
592 
593 	/* config_found() returns the scsibus attached to us */
594 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
595 	    &saa, scsiprint);
596 
597 	/* enable interrupts */
598 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
599 	    | MPII_INTR_MASK_RESET);
600 
601 #if NBIO > 0
602 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
603 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
604 			panic("%s: controller registration failed",
605 			    DEVNAME(sc));
606 		else
607 			sc->sc_ioctl = mpii_ioctl;
608 
609 #ifndef SMALL_KERNEL
610 		if (mpii_create_sensors(sc) != 0)
611 			printf("%s: unable to create sensors\n", DEVNAME(sc));
612 #endif
613 	}
614 #endif
615 
616 	return;
617 
618 free_devs:
619 	free(sc->sc_devs, M_DEVBUF, 0);
620 	sc->sc_devs = NULL;
621 
622 free_queues:
623 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
624 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
625 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
626 
627 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
628 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
629 	mpii_dmamem_free(sc, sc->sc_reply_postq);
630 
631 free_replies:
632 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
633 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
634 	mpii_dmamem_free(sc, sc->sc_replies);
635 
636 free_ccbs:
637 	while ((ccb = mpii_get_ccb(sc)) != NULL)
638 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
639 	mpii_dmamem_free(sc, sc->sc_requests);
640 	free(sc->sc_ccbs, M_DEVBUF, 0);
641 
642 unmap:
643 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
644 	sc->sc_ios = 0;
645 }
646 
647 int
648 mpii_detach(struct device *self, int flags)
649 {
650 	struct mpii_softc		*sc = (struct mpii_softc *)self;
651 
652 	if (sc->sc_ih != NULL) {
653 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
654 		sc->sc_ih = NULL;
655 	}
656 	if (sc->sc_ios != 0) {
657 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
658 		sc->sc_ios = 0;
659 	}
660 
661 	return (0);
662 }
663 
664 int
665 mpii_intr(void *arg)
666 {
667 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
668 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
669 	struct mpii_softc		*sc = arg;
670 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
671 	struct mpii_ccb			*ccb;
672 	struct mpii_rcb			*rcb;
673 	int				smid;
674 	u_int				idx;
675 	int				rv = 0;
676 
677 	mtx_enter(&sc->sc_rep_mtx);
678 	bus_dmamap_sync(sc->sc_dmat,
679 	    MPII_DMA_MAP(sc->sc_reply_postq),
680 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
681 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
682 
683 	idx = sc->sc_reply_post_host_index;
684 	for (;;) {
685 		rdp = &postq[idx];
686 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
687 		    MPII_REPLY_DESCR_UNUSED)
688 			break;
689 		if (rdp->data == 0xffffffff) {
690 			/*
691 			 * ioc is still writing to the reply post queue
692 			 * race condition - bail!
693 			 */
694 			break;
695 		}
696 
697 		smid = lemtoh16(&rdp->smid);
698 		rcb = mpii_reply(sc, rdp);
699 
700 		if (smid) {
701 			ccb = &sc->sc_ccbs[smid - 1];
702 			ccb->ccb_state = MPII_CCB_READY;
703 			ccb->ccb_rcb = rcb;
704 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
705 		} else
706 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
707 
708 		if (++idx >= sc->sc_reply_post_qdepth)
709 			idx = 0;
710 
711 		rv = 1;
712 	}
713 
714 	bus_dmamap_sync(sc->sc_dmat,
715 	    MPII_DMA_MAP(sc->sc_reply_postq),
716 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
717 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
718 
719 	if (rv)
720 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
721 
722 	mtx_leave(&sc->sc_rep_mtx);
723 
724 	if (rv == 0)
725 		return (0);
726 
727 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
728 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
729 		ccb->ccb_done(ccb);
730 	}
731 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
732 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
733 		mpii_event_process(sc, rcb);
734 	}
735 
736 	return (1);
737 }
738 
739 int
740 mpii_load_xs_sas3(struct mpii_ccb *ccb)
741 {
742 	struct mpii_softc	*sc = ccb->ccb_sc;
743 	struct scsi_xfer	*xs = ccb->ccb_cookie;
744 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
745 	struct mpii_ieee_sge	*csge, *nsge, *sge;
746 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
747 	int			i, error;
748 
749 	/* Request frame structure is described in the mpii_iocfacts */
750 	nsge = (struct mpii_ieee_sge *)(io + 1);
751 	csge = nsge + sc->sc_chain_sge;
752 
753 	/* zero length transfer still requires an SGE */
754 	if (xs->datalen == 0) {
755 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
756 		return (0);
757 	}
758 
759 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
760 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
761 	if (error) {
762 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
763 		return (1);
764 	}
765 
766 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
767 		if (nsge == csge) {
768 			nsge++;
769 			/* offset to the chain sge from the beginning */
770 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
771 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
772 			    MPII_IEEE_SGE_ADDR_SYSTEM;
773 			/* address of the next sge */
774 			csge->sg_addr = htole64(ccb->ccb_cmd_dva +
775 			    ((caddr_t)nsge - (caddr_t)io));
776 			csge->sg_len = htole32((dmap->dm_nsegs - i) *
777 			    sizeof(*sge));
778 		}
779 
780 		sge = nsge;
781 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
782 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
783 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
784 	}
785 
786 	/* terminate list */
787 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
788 
789 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
790 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
791 	    BUS_DMASYNC_PREWRITE);
792 
793 	return (0);
794 }
795 
796 int
797 mpii_load_xs(struct mpii_ccb *ccb)
798 {
799 	struct mpii_softc	*sc = ccb->ccb_sc;
800 	struct scsi_xfer	*xs = ccb->ccb_cookie;
801 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
802 	struct mpii_sge		*csge, *nsge, *sge;
803 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
804 	u_int32_t		flags;
805 	u_int16_t		len;
806 	int			i, error;
807 
808 	/* Request frame structure is described in the mpii_iocfacts */
809 	nsge = (struct mpii_sge *)(io + 1);
810 	csge = nsge + sc->sc_chain_sge;
811 
812 	/* zero length transfer still requires an SGE */
813 	if (xs->datalen == 0) {
814 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
815 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
816 		return (0);
817 	}
818 
819 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
820 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
821 	if (error) {
822 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
823 		return (1);
824 	}
825 
826 	/* safe default starting flags */
827 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
828 	if (xs->flags & SCSI_DATA_OUT)
829 		flags |= MPII_SGE_FL_DIR_OUT;
830 
831 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
832 		if (nsge == csge) {
833 			nsge++;
834 			sge->sg_hdr |= htole32(MPII_SGE_FL_LAST);
835 			/* offset to the chain sge from the beginning */
836 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
837 			/* length of the sgl segment we're pointing to */
838 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
839 			csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
840 			    MPII_SGE_FL_SIZE_64 | len);
841 			/* address of the next sge */
842 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
843 			    ((caddr_t)nsge - (caddr_t)io));
844 		}
845 
846 		sge = nsge;
847 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
848 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
849 	}
850 
851 	/* terminate list */
852 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
853 	    MPII_SGE_FL_EOL);
854 
855 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
856 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
857 	    BUS_DMASYNC_PREWRITE);
858 
859 	return (0);
860 }
861 
862 int
863 mpii_scsi_probe(struct scsi_link *link)
864 {
865 	struct mpii_softc	*sc = link->adapter_softc;
866 	int			flags;
867 
868 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
869 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL))
870 		return (ENXIO);
871 
872 	if (sc->sc_devs[link->target] == NULL)
873 		return (1);
874 
875 	flags = sc->sc_devs[link->target]->flags;
876 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
877 		return (1);
878 
879 	return (0);
880 }
881 
882 u_int32_t
883 mpii_read(struct mpii_softc *sc, bus_size_t r)
884 {
885 	u_int32_t			rv;
886 
887 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
888 	    BUS_SPACE_BARRIER_READ);
889 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
890 
891 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#x %#x\n", DEVNAME(sc), r, rv);
892 
893 	return (rv);
894 }
895 
896 void
897 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
898 {
899 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#x %#x\n", DEVNAME(sc), r, v);
900 
901 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
902 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
903 	    BUS_SPACE_BARRIER_WRITE);
904 }
905 
906 
907 int
908 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
909     u_int32_t target)
910 {
911 	int			i;
912 
913 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
914 	    mask, target);
915 
916 	for (i = 0; i < 15000; i++) {
917 		if ((mpii_read(sc, r) & mask) == target)
918 			return (0);
919 		delay(1000);
920 	}
921 
922 	return (1);
923 }
924 
925 int
926 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
927     u_int32_t target)
928 {
929 	int			i;
930 
931 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
932 	    mask, target);
933 
934 	for (i = 0; i < 15000; i++) {
935 		if ((mpii_read(sc, r) & mask) != target)
936 			return (0);
937 		delay(1000);
938 	}
939 
940 	return (1);
941 }
942 
943 int
944 mpii_init(struct mpii_softc *sc)
945 {
946 	u_int32_t		db;
947 	int			i;
948 
949 	/* spin until the ioc leaves the reset state */
950 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
951 	    MPII_DOORBELL_STATE_RESET) != 0) {
952 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
953 		    "reset state\n", DEVNAME(sc));
954 		return (1);
955 	}
956 
957 	/* check current ownership */
958 	db = mpii_read_db(sc);
959 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
960 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
961 		    DEVNAME(sc));
962 		return (0);
963 	}
964 
965 	for (i = 0; i < 5; i++) {
966 		switch (db & MPII_DOORBELL_STATE) {
967 		case MPII_DOORBELL_STATE_READY:
968 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
969 			    DEVNAME(sc));
970 			return (0);
971 
972 		case MPII_DOORBELL_STATE_OPER:
973 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
974 			    DEVNAME(sc));
975 			if (sc->sc_ioc_event_replay)
976 				mpii_reset_soft(sc);
977 			else
978 				mpii_reset_hard(sc);
979 			break;
980 
981 		case MPII_DOORBELL_STATE_FAULT:
982 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
983 			    "reset hard\n" , DEVNAME(sc));
984 			mpii_reset_hard(sc);
985 			break;
986 
987 		case MPII_DOORBELL_STATE_RESET:
988 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
989 			    "out of reset\n", DEVNAME(sc));
990 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
991 			    MPII_DOORBELL_STATE_RESET) != 0)
992 				return (1);
993 			break;
994 		}
995 		db = mpii_read_db(sc);
996 	}
997 
998 	return (1);
999 }
1000 
1001 int
1002 mpii_reset_soft(struct mpii_softc *sc)
1003 {
1004 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1005 
1006 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1007 		return (1);
1008 	}
1009 
1010 	mpii_write_db(sc,
1011 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1012 
1013 	/* XXX LSI waits 15 sec */
1014 	if (mpii_wait_db_ack(sc) != 0)
1015 		return (1);
1016 
1017 	/* XXX LSI waits 15 sec */
1018 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1019 	    MPII_DOORBELL_STATE_READY) != 0)
1020 		return (1);
1021 
1022 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1023 
1024 	return (0);
1025 }
1026 
1027 int
1028 mpii_reset_hard(struct mpii_softc *sc)
1029 {
1030 	u_int16_t		i;
1031 
1032 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1033 
1034 	mpii_write_intr(sc, 0);
1035 
1036 	/* enable diagnostic register */
1037 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1038 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1039 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1040 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1041 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1042 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1043 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1044 
1045 	delay(100);
1046 
1047 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1048 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1049 		    "diagnostic read/write\n", DEVNAME(sc));
1050 		return(1);
1051 	}
1052 
1053 	/* reset ioc */
1054 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1055 
1056 	/* 240 milliseconds */
1057 	delay(240000);
1058 
1059 
1060 	/* XXX this whole function should be more robust */
1061 
1062 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1063 	for (i = 0; i < 30000; i++) {
1064 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1065 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1066 			break;
1067 		delay(10000);
1068 	}
1069 
1070 	/* disable diagnostic register */
1071 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1072 
1073 	/* XXX what else? */
1074 
1075 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1076 
1077 	return(0);
1078 }
1079 
1080 int
1081 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1082 {
1083 	u_int32_t		*query = buf;
1084 	int			i;
1085 
1086 	/* make sure the doorbell is not in use. */
1087 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1088 		return (1);
1089 
1090 	/* clear pending doorbell interrupts */
1091 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1092 		mpii_write_intr(sc, 0);
1093 
1094 	/*
1095 	 * first write the doorbell with the handshake function and the
1096 	 * dword count.
1097 	 */
1098 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1099 	    MPII_DOORBELL_DWORDS(dwords));
1100 
1101 	/*
1102 	 * the doorbell used bit will be set because a doorbell function has
1103 	 * started. wait for the interrupt and then ack it.
1104 	 */
1105 	if (mpii_wait_db_int(sc) != 0)
1106 		return (1);
1107 	mpii_write_intr(sc, 0);
1108 
1109 	/* poll for the acknowledgement. */
1110 	if (mpii_wait_db_ack(sc) != 0)
1111 		return (1);
1112 
1113 	/* write the query through the doorbell. */
1114 	for (i = 0; i < dwords; i++) {
1115 		mpii_write_db(sc, htole32(query[i]));
1116 		if (mpii_wait_db_ack(sc) != 0)
1117 			return (1);
1118 	}
1119 
1120 	return (0);
1121 }
1122 
1123 int
1124 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1125 {
1126 	u_int16_t		*words = (u_int16_t *)dword;
1127 	int			i;
1128 
1129 	for (i = 0; i < 2; i++) {
1130 		if (mpii_wait_db_int(sc) != 0)
1131 			return (1);
1132 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1133 		mpii_write_intr(sc, 0);
1134 	}
1135 
1136 	return (0);
1137 }
1138 
1139 int
1140 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1141 {
1142 	struct mpii_msg_reply	*reply = buf;
1143 	u_int32_t		*dbuf = buf, dummy;
1144 	int			i;
1145 
1146 	/* get the first dword so we can read the length out of the header. */
1147 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1148 		return (1);
1149 
1150 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %d reply: %d\n",
1151 	    DEVNAME(sc), dwords, reply->msg_length);
1152 
1153 	/*
1154 	 * the total length, in dwords, is in the message length field of the
1155 	 * reply header.
1156 	 */
1157 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1158 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1159 			return (1);
1160 	}
1161 
1162 	/* if there's extra stuff to come off the ioc, discard it */
1163 	while (i++ < reply->msg_length) {
1164 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1165 			return (1);
1166 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1167 		    "0x%08x\n", DEVNAME(sc), dummy);
1168 	}
1169 
1170 	/* wait for the doorbell used bit to be reset and clear the intr */
1171 	if (mpii_wait_db_int(sc) != 0)
1172 		return (1);
1173 
1174 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1175 		return (1);
1176 
1177 	mpii_write_intr(sc, 0);
1178 
1179 	return (0);
1180 }
1181 
1182 void
1183 mpii_empty_done(struct mpii_ccb *ccb)
1184 {
1185 	/* nothing to do */
1186 }
1187 
1188 int
1189 mpii_iocfacts(struct mpii_softc *sc)
1190 {
1191 	struct mpii_msg_iocfacts_request	ifq;
1192 	struct mpii_msg_iocfacts_reply		ifp;
1193 	int					irs;
1194 	int					sge_size;
1195 	u_int					qdepth;
1196 
1197 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1198 
1199 	memset(&ifq, 0, sizeof(ifq));
1200 	memset(&ifp, 0, sizeof(ifp));
1201 
1202 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1203 
1204 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1205 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1206 		    DEVNAME(sc));
1207 		return (1);
1208 	}
1209 
1210 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1211 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1212 		    DEVNAME(sc));
1213 		return (1);
1214 	}
1215 
1216 	sc->sc_ioc_number = ifp.ioc_number;
1217 	sc->sc_vf_id = ifp.vf_id;
1218 
1219 	sc->sc_max_volumes = ifp.max_volumes;
1220 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1221 
1222 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1223 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1224 		SET(sc->sc_flags, MPII_F_RAID);
1225 
1226 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1227 	    MPII_REQUEST_CREDIT);
1228 
1229 	/* SAS3 controllers have different sgl layouts */
1230 	if (ifp.msg_version_maj == 2 && ifp.msg_version_min == 5)
1231 		SET(sc->sc_flags, MPII_F_SAS3);
1232 
1233 	/*
1234 	 * The host driver must ensure that there is at least one
1235 	 * unused entry in the Reply Free Queue. One way to ensure
1236 	 * that this requirement is met is to never allocate a number
1237 	 * of reply frames that is a multiple of 16.
1238 	 */
1239 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1240 	if (!(sc->sc_num_reply_frames % 16))
1241 		sc->sc_num_reply_frames--;
1242 
1243 	/* must be multiple of 16 */
1244 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1245 	    sc->sc_num_reply_frames;
1246 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1247 
1248 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1249 	if (sc->sc_reply_post_qdepth > qdepth) {
1250 		sc->sc_reply_post_qdepth = qdepth;
1251 		if (sc->sc_reply_post_qdepth < 16) {
1252 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1253 			return (1);
1254 		}
1255 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1256 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1257 	}
1258 
1259 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1260 	    16 - (sc->sc_num_reply_frames % 16);
1261 
1262 	/*
1263 	 * Our request frame for an I/O operation looks like this:
1264 	 *
1265 	 * +-------------------+ -.
1266 	 * | mpii_msg_scsi_io  |  |
1267 	 * +-------------------|  |
1268 	 * | mpii_sge          |  |
1269 	 * + - - - - - - - - - +  |
1270 	 * | ...               |  > ioc_request_frame_size
1271 	 * + - - - - - - - - - +  |
1272 	 * | mpii_sge (tail)   |  |
1273 	 * + - - - - - - - - - +  |
1274 	 * | mpii_sge (csge)   |  | --.
1275 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1276 	 * | mpii_sge          |<-----'
1277 	 * + - - - - - - - - - +
1278 	 * | ...               |
1279 	 * + - - - - - - - - - +
1280 	 * | mpii_sge (tail)   |
1281 	 * +-------------------+
1282 	 * |                   |
1283 	 * ~~~~~~~~~~~~~~~~~~~~~
1284 	 * |                   |
1285 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1286 	 * | scsi_sense_data   |
1287 	 * +-------------------+
1288 	 */
1289 
1290 	/* both sizes are in 32-bit words */
1291 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1292 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1293 	sc->sc_request_size = MPII_REQUEST_SIZE;
1294 	/* make sure we have enough space for scsi sense data */
1295 	if (irs > sc->sc_request_size) {
1296 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1297 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1298 	}
1299 
1300 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1301 		sge_size = sizeof(struct mpii_ieee_sge);
1302 	} else {
1303 		sge_size = sizeof(struct mpii_sge);
1304 	}
1305 
1306 	/* offset to the chain sge */
1307 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1308 	    sge_size - 1;
1309 
1310 	/*
1311 	 * A number of simple scatter-gather elements we can fit into the
1312 	 * request buffer after the I/O command minus the chain element.
1313 	 */
1314 	sc->sc_max_sgl = (sc->sc_request_size -
1315  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1316 	    sge_size - 1;
1317 
1318 	return (0);
1319 }
1320 
1321 int
1322 mpii_iocinit(struct mpii_softc *sc)
1323 {
1324 	struct mpii_msg_iocinit_request		iiq;
1325 	struct mpii_msg_iocinit_reply		iip;
1326 
1327 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1328 
1329 	memset(&iiq, 0, sizeof(iiq));
1330 	memset(&iip, 0, sizeof(iip));
1331 
1332 	iiq.function = MPII_FUNCTION_IOC_INIT;
1333 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1334 
1335 	/* XXX JPG do something about vf_id */
1336 	iiq.vf_id = 0;
1337 
1338 	iiq.msg_version_maj = 0x02;
1339 	iiq.msg_version_min = 0x00;
1340 
1341 	/* XXX JPG ensure compliance with some level and hard-code? */
1342 	iiq.hdr_version_unit = 0x00;
1343 	iiq.hdr_version_dev = 0x00;
1344 
1345 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1346 
1347 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1348 	    sc->sc_reply_post_qdepth);
1349 
1350 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1351 
1352 	htolem32(&iiq.sense_buffer_address_high,
1353 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1354 
1355 	htolem32(&iiq.system_reply_address_high,
1356 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1357 
1358 	htolem32(&iiq.system_request_frame_base_address_lo,
1359 	    MPII_DMA_DVA(sc->sc_requests));
1360 	htolem32(&iiq.system_request_frame_base_address_hi,
1361 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1362 
1363 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1364 	    MPII_DMA_DVA(sc->sc_reply_postq));
1365 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1366 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1367 
1368 	htolem32(&iiq.reply_free_queue_address_lo,
1369 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1370 	htolem32(&iiq.reply_free_queue_address_hi,
1371 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1372 
1373 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1374 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1375 		    DEVNAME(sc));
1376 		return (1);
1377 	}
1378 
1379 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1380 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1381 		    DEVNAME(sc));
1382 		return (1);
1383 	}
1384 
1385 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1386 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1387 	    iip.msg_length, iip.whoinit);
1388 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1389 	    iip.msg_flags);
1390 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1391 	    iip.vf_id, iip.vp_id);
1392 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1393 	    letoh16(iip.ioc_status));
1394 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1395 	    letoh32(iip.ioc_loginfo));
1396 
1397 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1398 	    lemtoh32(&iip.ioc_loginfo))
1399 		return (1);
1400 
1401 	return (0);
1402 }
1403 
1404 void
1405 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1406 {
1407 	u_int32_t		*rfp;
1408 	u_int			idx;
1409 
1410 	if (rcb == NULL)
1411 		return;
1412 
1413 	idx = sc->sc_reply_free_host_index;
1414 
1415 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1416 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1417 
1418 	if (++idx >= sc->sc_reply_free_qdepth)
1419 		idx = 0;
1420 
1421 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1422 }
1423 
1424 int
1425 mpii_portfacts(struct mpii_softc *sc)
1426 {
1427 	struct mpii_msg_portfacts_request	*pfq;
1428 	struct mpii_msg_portfacts_reply		*pfp;
1429 	struct mpii_ccb				*ccb;
1430 	int					rv = 1;
1431 
1432 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1433 
1434 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1435 	if (ccb == NULL) {
1436 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1437 		    DEVNAME(sc));
1438 		return (rv);
1439 	}
1440 
1441 	ccb->ccb_done = mpii_empty_done;
1442 	pfq = ccb->ccb_cmd;
1443 
1444 	memset(pfq, 0, sizeof(*pfq));
1445 
1446 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1447 	pfq->chain_offset = 0;
1448 	pfq->msg_flags = 0;
1449 	pfq->port_number = 0;
1450 	pfq->vp_id = 0;
1451 	pfq->vf_id = 0;
1452 
1453 	if (mpii_poll(sc, ccb) != 0) {
1454 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1455 		    DEVNAME(sc));
1456 		goto err;
1457 	}
1458 
1459 	if (ccb->ccb_rcb == NULL) {
1460 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1461 		    DEVNAME(sc));
1462 		goto err;
1463 	}
1464 
1465 	pfp = ccb->ccb_rcb->rcb_reply;
1466 	sc->sc_porttype = pfp->port_type;
1467 
1468 	mpii_push_reply(sc, ccb->ccb_rcb);
1469 	rv = 0;
1470 err:
1471 	scsi_io_put(&sc->sc_iopool, ccb);
1472 
1473 	return (rv);
1474 }
1475 
1476 void
1477 mpii_eventack(void *cookie, void *io)
1478 {
1479 	struct mpii_softc			*sc = cookie;
1480 	struct mpii_ccb				*ccb = io;
1481 	struct mpii_rcb				*rcb, *next;
1482 	struct mpii_msg_event_reply		*enp;
1483 	struct mpii_msg_eventack_request	*eaq;
1484 
1485 	mtx_enter(&sc->sc_evt_ack_mtx);
1486 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1487 	if (rcb != NULL) {
1488 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1489 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1490 	}
1491 	mtx_leave(&sc->sc_evt_ack_mtx);
1492 
1493 	if (rcb == NULL) {
1494 		scsi_io_put(&sc->sc_iopool, ccb);
1495 		return;
1496 	}
1497 
1498 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1499 
1500 	ccb->ccb_done = mpii_eventack_done;
1501 	eaq = ccb->ccb_cmd;
1502 
1503 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1504 
1505 	eaq->event = enp->event;
1506 	eaq->event_context = enp->event_context;
1507 
1508 	mpii_push_reply(sc, rcb);
1509 
1510 	mpii_start(sc, ccb);
1511 
1512 	if (next != NULL)
1513 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1514 }
1515 
1516 void
1517 mpii_eventack_done(struct mpii_ccb *ccb)
1518 {
1519 	struct mpii_softc			*sc = ccb->ccb_sc;
1520 
1521 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1522 
1523 	mpii_push_reply(sc, ccb->ccb_rcb);
1524 	scsi_io_put(&sc->sc_iopool, ccb);
1525 }
1526 
1527 int
1528 mpii_portenable(struct mpii_softc *sc)
1529 {
1530 	struct mpii_msg_portenable_request	*peq;
1531 	struct mpii_msg_portenable_repy		*pep;
1532 	struct mpii_ccb				*ccb;
1533 
1534 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1535 
1536 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1537 	if (ccb == NULL) {
1538 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1539 		    DEVNAME(sc));
1540 		return (1);
1541 	}
1542 
1543 	ccb->ccb_done = mpii_empty_done;
1544 	peq = ccb->ccb_cmd;
1545 
1546 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1547 	peq->vf_id = sc->sc_vf_id;
1548 
1549 	if (mpii_poll(sc, ccb) != 0) {
1550 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1551 		    DEVNAME(sc));
1552 		return (1);
1553 	}
1554 
1555 	if (ccb->ccb_rcb == NULL) {
1556 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1557 		    DEVNAME(sc));
1558 		return (1);
1559 	}
1560 	pep = ccb->ccb_rcb->rcb_reply;
1561 
1562 	mpii_push_reply(sc, ccb->ccb_rcb);
1563 	scsi_io_put(&sc->sc_iopool, ccb);
1564 
1565 	return (0);
1566 }
1567 
1568 int
1569 mpii_cfg_coalescing(struct mpii_softc *sc)
1570 {
1571 	struct mpii_cfg_hdr			hdr;
1572 	struct mpii_cfg_ioc_pg1			ipg;
1573 
1574 	hdr.page_version = 0;
1575 	hdr.page_length = sizeof(ipg) / 4;
1576 	hdr.page_number = 1;
1577 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1578 	memset(&ipg, 0, sizeof(ipg));
1579 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1580 	    sizeof(ipg)) != 0) {
1581 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1582 		    "page 1\n", DEVNAME(sc));
1583 		return (1);
1584 	}
1585 
1586 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1587 		return (0);
1588 
1589 	/* Disable coalescing */
1590 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1591 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1592 	    sizeof(ipg)) != 0) {
1593 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1594 		    DEVNAME(sc));
1595 		return (1);
1596 	}
1597 
1598 	return (0);
1599 }
1600 
1601 #define MPII_EVENT_MASKALL(enq)		do {			\
1602 		enq->event_masks[0] = 0xffffffff;		\
1603 		enq->event_masks[1] = 0xffffffff;		\
1604 		enq->event_masks[2] = 0xffffffff;		\
1605 		enq->event_masks[3] = 0xffffffff;		\
1606 	} while (0)
1607 
1608 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1609 		enq->event_masks[evt / 32] &=			\
1610 		    htole32(~(1 << (evt % 32)));		\
1611 	} while (0)
1612 
1613 int
1614 mpii_eventnotify(struct mpii_softc *sc)
1615 {
1616 	struct mpii_msg_event_request		*enq;
1617 	struct mpii_ccb				*ccb;
1618 
1619 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1620 	if (ccb == NULL) {
1621 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1622 		    DEVNAME(sc));
1623 		return (1);
1624 	}
1625 
1626 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1627 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1628 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1629 
1630 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1631 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1632 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1633 	    mpii_eventack, sc);
1634 
1635 	ccb->ccb_done = mpii_eventnotify_done;
1636 	enq = ccb->ccb_cmd;
1637 
1638 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1639 
1640 	/*
1641 	 * Enable reporting of the following events:
1642 	 *
1643 	 * MPII_EVENT_SAS_DISCOVERY
1644 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1645 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1646 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1647 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1648 	 * MPII_EVENT_IR_VOLUME
1649 	 * MPII_EVENT_IR_PHYSICAL_DISK
1650 	 * MPII_EVENT_IR_OPERATION_STATUS
1651 	 */
1652 
1653 	MPII_EVENT_MASKALL(enq);
1654 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1655 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1656 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1657 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1658 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1659 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1660 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1661 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1662 
1663 	mpii_start(sc, ccb);
1664 
1665 	return (0);
1666 }
1667 
1668 void
1669 mpii_eventnotify_done(struct mpii_ccb *ccb)
1670 {
1671 	struct mpii_softc			*sc = ccb->ccb_sc;
1672 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1673 
1674 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1675 
1676 	scsi_io_put(&sc->sc_iopool, ccb);
1677 	mpii_event_process(sc, rcb);
1678 }
1679 
1680 void
1681 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1682 {
1683 	struct mpii_evt_ir_cfg_change_list	*ccl;
1684 	struct mpii_evt_ir_cfg_element		*ce;
1685 	struct mpii_device			*dev;
1686 	u_int16_t				type;
1687 	int					i;
1688 
1689 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1690 	if (ccl->num_elements == 0)
1691 		return;
1692 
1693 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1694 		/* bail on foreign configurations */
1695 		return;
1696 	}
1697 
1698 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1699 
1700 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1701 		type = (lemtoh16(&ce->element_flags) &
1702 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1703 
1704 		switch (type) {
1705 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1706 			switch (ce->reason_code) {
1707 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1708 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1709 				if (mpii_find_dev(sc,
1710 				    lemtoh16(&ce->vol_dev_handle))) {
1711 					printf("%s: device %#x is already "
1712 					    "configured\n", DEVNAME(sc),
1713 					    lemtoh16(&ce->vol_dev_handle));
1714 					break;
1715 				}
1716 				dev = malloc(sizeof(*dev), M_DEVBUF,
1717 				    M_NOWAIT | M_ZERO);
1718 				if (!dev) {
1719 					printf("%s: failed to allocate a "
1720 					    "device structure\n", DEVNAME(sc));
1721 					break;
1722 				}
1723 				SET(dev->flags, MPII_DF_VOLUME);
1724 				dev->slot = sc->sc_vd_id_low;
1725 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1726 				if (mpii_insert_dev(sc, dev)) {
1727 					free(dev, M_DEVBUF, 0);
1728 					break;
1729 				}
1730 				sc->sc_vd_count++;
1731 				break;
1732 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1733 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1734 				if (!(dev = mpii_find_dev(sc,
1735 				    lemtoh16(&ce->vol_dev_handle))))
1736 					break;
1737 				mpii_remove_dev(sc, dev);
1738 				sc->sc_vd_count--;
1739 				break;
1740 			}
1741 			break;
1742 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1743 			if (ce->reason_code ==
1744 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1745 			    ce->reason_code ==
1746 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1747 				/* there should be an underlying sas drive */
1748 				if (!(dev = mpii_find_dev(sc,
1749 				    lemtoh16(&ce->phys_disk_dev_handle))))
1750 					break;
1751 				/* promoted from a hot spare? */
1752 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1753 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1754 				    MPII_DF_HIDDEN);
1755 			}
1756 			break;
1757 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1758 			if (ce->reason_code ==
1759 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1760 				/* there should be an underlying sas drive */
1761 				if (!(dev = mpii_find_dev(sc,
1762 				    lemtoh16(&ce->phys_disk_dev_handle))))
1763 					break;
1764 				SET(dev->flags, MPII_DF_HOT_SPARE |
1765 				    MPII_DF_HIDDEN);
1766 			}
1767 			break;
1768 		}
1769 	}
1770 }
1771 
1772 void
1773 mpii_event_sas(void *xsc)
1774 {
1775 	struct mpii_softc *sc = xsc;
1776 	struct mpii_rcb *rcb, *next;
1777 	struct mpii_msg_event_reply *enp;
1778 	struct mpii_evt_sas_tcl		*tcl;
1779 	struct mpii_evt_phy_entry	*pe;
1780 	struct mpii_device		*dev;
1781 	int				i;
1782 	u_int16_t			handle;
1783 
1784 	mtx_enter(&sc->sc_evt_sas_mtx);
1785 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1786 	if (rcb != NULL) {
1787 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1788 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1789 	}
1790 	mtx_leave(&sc->sc_evt_sas_mtx);
1791 
1792 	if (rcb == NULL)
1793 		return;
1794 	if (next != NULL)
1795 		task_add(systq, &sc->sc_evt_sas_task);
1796 
1797 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1798 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1799 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1800 
1801 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1802 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1803 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1804 			handle = lemtoh16(&pe->dev_handle);
1805 			if (mpii_find_dev(sc, handle)) {
1806 				printf("%s: device %#x is already "
1807 				    "configured\n", DEVNAME(sc), handle);
1808 				break;
1809 			}
1810 
1811 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1812 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1813 			dev->dev_handle = handle;
1814 			dev->phy_num = tcl->start_phy_num + i;
1815 			if (tcl->enclosure_handle)
1816 				dev->physical_port = tcl->physical_port;
1817 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1818 			dev->expander = lemtoh16(&tcl->expander_handle);
1819 
1820 			if (mpii_insert_dev(sc, dev)) {
1821 				free(dev, M_DEVBUF, 0);
1822 				break;
1823 			}
1824 
1825 			if (sc->sc_scsibus != NULL)
1826 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1827 			break;
1828 
1829 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1830 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1831 			if (dev == NULL)
1832 				break;
1833 
1834 			mpii_remove_dev(sc, dev);
1835 			mpii_sas_remove_device(sc, dev->dev_handle);
1836 			if (sc->sc_scsibus != NULL &&
1837 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1838 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1839 				    DVACT_DEACTIVATE);
1840 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1841 				    DETACH_FORCE);
1842 			}
1843 
1844 			free(dev, M_DEVBUF, 0);
1845 			break;
1846 		}
1847 	}
1848 
1849 	mpii_event_done(sc, rcb);
1850 }
1851 
1852 void
1853 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1854 {
1855 	struct mpii_msg_event_reply		*enp;
1856 
1857 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1858 
1859 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1860 	    letoh16(enp->event));
1861 
1862 	switch (lemtoh16(&enp->event)) {
1863 	case MPII_EVENT_EVENT_CHANGE:
1864 		/* should be properly ignored */
1865 		break;
1866 	case MPII_EVENT_SAS_DISCOVERY: {
1867 		struct mpii_evt_sas_discovery	*esd =
1868 		    (struct mpii_evt_sas_discovery *)(enp + 1);
1869 
1870 		if (esd->reason_code ==
1871 		    MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED &&
1872 		    esd->discovery_status != 0)
1873 			printf("%s: sas discovery completed with status %#x\n",
1874 			    DEVNAME(sc), esd->discovery_status);
1875 		}
1876 		break;
1877 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1878 		mtx_enter(&sc->sc_evt_sas_mtx);
1879 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1880 		mtx_leave(&sc->sc_evt_sas_mtx);
1881 		task_add(systq, &sc->sc_evt_sas_task);
1882 		return;
1883 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
1884 		break;
1885 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1886 		break;
1887 	case MPII_EVENT_IR_VOLUME: {
1888 		struct mpii_evt_ir_volume	*evd =
1889 		    (struct mpii_evt_ir_volume *)(enp + 1);
1890 		struct mpii_device		*dev;
1891 #if NBIO > 0
1892 		const char *vol_states[] = {
1893 			BIOC_SVINVALID_S,
1894 			BIOC_SVOFFLINE_S,
1895 			BIOC_SVBUILDING_S,
1896 			BIOC_SVONLINE_S,
1897 			BIOC_SVDEGRADED_S,
1898 			BIOC_SVONLINE_S,
1899 		};
1900 #endif
1901 
1902 		if (cold)
1903 			break;
1904 		KERNEL_LOCK();
1905 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
1906 		KERNEL_UNLOCK();
1907 		if (dev == NULL)
1908 			break;
1909 #if NBIO > 0
1910 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
1911 			printf("%s: volume %d state changed from %s to %s\n",
1912 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
1913 			    vol_states[evd->prev_value],
1914 			    vol_states[evd->new_value]);
1915 #endif
1916 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
1917 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
1918 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
1919 			printf("%s: started resync on a volume %d\n",
1920 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
1921 		}
1922 		break;
1923 	case MPII_EVENT_IR_PHYSICAL_DISK:
1924 		break;
1925 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1926 		mpii_event_raid(sc, enp);
1927 		break;
1928 	case MPII_EVENT_IR_OPERATION_STATUS: {
1929 		struct mpii_evt_ir_status	*evs =
1930 		    (struct mpii_evt_ir_status *)(enp + 1);
1931 		struct mpii_device		*dev;
1932 
1933 		KERNEL_LOCK();
1934 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
1935 		KERNEL_UNLOCK();
1936 		if (dev != NULL &&
1937 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
1938 			dev->percent = evs->percent;
1939 		break;
1940 		}
1941 	default:
1942 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
1943 		    DEVNAME(sc), lemtoh16(&enp->event));
1944 	}
1945 
1946 	mpii_event_done(sc, rcb);
1947 }
1948 
1949 void
1950 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
1951 {
1952 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
1953 
1954 	if (enp->ack_required) {
1955 		mtx_enter(&sc->sc_evt_ack_mtx);
1956 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
1957 		mtx_leave(&sc->sc_evt_ack_mtx);
1958 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1959 	} else
1960 		mpii_push_reply(sc, rcb);
1961 }
1962 
1963 void
1964 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
1965 {
1966 	struct mpii_msg_scsi_task_request	*stq;
1967 	struct mpii_msg_sas_oper_request	*soq;
1968 	struct mpii_ccb				*ccb;
1969 
1970 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1971 	if (ccb == NULL)
1972 		return;
1973 
1974 	stq = ccb->ccb_cmd;
1975 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
1976 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
1977 	htolem16(&stq->dev_handle, handle);
1978 
1979 	ccb->ccb_done = mpii_empty_done;
1980 	mpii_wait(sc, ccb);
1981 
1982 	if (ccb->ccb_rcb != NULL)
1983 		mpii_push_reply(sc, ccb->ccb_rcb);
1984 
1985 	/* reuse a ccb */
1986 	ccb->ccb_state = MPII_CCB_READY;
1987 	ccb->ccb_rcb = NULL;
1988 
1989 	soq = ccb->ccb_cmd;
1990 	memset(soq, 0, sizeof(*soq));
1991 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
1992 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
1993 	htolem16(&soq->dev_handle, handle);
1994 
1995 	ccb->ccb_done = mpii_empty_done;
1996 	mpii_wait(sc, ccb);
1997 	if (ccb->ccb_rcb != NULL)
1998 		mpii_push_reply(sc, ccb->ccb_rcb);
1999 
2000 	scsi_io_put(&sc->sc_iopool, ccb);
2001 }
2002 
2003 int
2004 mpii_board_info(struct mpii_softc *sc)
2005 {
2006 	struct mpii_msg_iocfacts_request	ifq;
2007 	struct mpii_msg_iocfacts_reply		ifp;
2008 	struct mpii_cfg_manufacturing_pg0	mpg;
2009 	struct mpii_cfg_hdr			hdr;
2010 
2011 	memset(&ifq, 0, sizeof(ifq));
2012 	memset(&ifp, 0, sizeof(ifp));
2013 
2014 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2015 
2016 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2017 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2018 		    DEVNAME(sc));
2019 		return (1);
2020 	}
2021 
2022 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2023 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2024 		    DEVNAME(sc));
2025 		return (1);
2026 	}
2027 
2028 	hdr.page_version = 0;
2029 	hdr.page_length = sizeof(mpg) / 4;
2030 	hdr.page_number = 0;
2031 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2032 	memset(&mpg, 0, sizeof(mpg));
2033 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2034 	    sizeof(mpg)) != 0) {
2035 		printf("%s: unable to fetch manufacturing page 0\n",
2036 		    DEVNAME(sc));
2037 		return (EINVAL);
2038 	}
2039 
2040 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2041 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2042 	    ifp.fw_version_unit, ifp.fw_version_dev,
2043 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2044 	    ifp.msg_version_maj, ifp.msg_version_min);
2045 
2046 	return (0);
2047 }
2048 
2049 int
2050 mpii_target_map(struct mpii_softc *sc)
2051 {
2052 	struct mpii_cfg_hdr			hdr;
2053 	struct mpii_cfg_ioc_pg8			ipg;
2054 	int					flags, pad = 0;
2055 
2056 	hdr.page_version = 0;
2057 	hdr.page_length = sizeof(ipg) / 4;
2058 	hdr.page_number = 8;
2059 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2060 	memset(&ipg, 0, sizeof(ipg));
2061 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2062 	    sizeof(ipg)) != 0) {
2063 		printf("%s: unable to fetch ioc page 8\n",
2064 		    DEVNAME(sc));
2065 		return (EINVAL);
2066 	}
2067 
2068 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2069 		pad = 1;
2070 
2071 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2072 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2073 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2074 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2075 			sc->sc_vd_id_low += pad;
2076 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2077 		} else
2078 			sc->sc_vd_id_low = sc->sc_max_devices -
2079 			    sc->sc_max_volumes;
2080 	}
2081 
2082 	sc->sc_pd_id_start += pad;
2083 
2084 	return (0);
2085 }
2086 
2087 int
2088 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2089     u_int32_t address, int flags, void *p)
2090 {
2091 	struct mpii_msg_config_request		*cq;
2092 	struct mpii_msg_config_reply		*cp;
2093 	struct mpii_ccb				*ccb;
2094 	struct mpii_cfg_hdr			*hdr = p;
2095 	struct mpii_ecfg_hdr			*ehdr = p;
2096 	int					etype = 0;
2097 	int					rv = 0;
2098 
2099 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2100 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2101 	    address, flags, MPII_PG_FMT);
2102 
2103 	ccb = scsi_io_get(&sc->sc_iopool,
2104 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2105 	if (ccb == NULL) {
2106 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2107 		    DEVNAME(sc));
2108 		return (1);
2109 	}
2110 
2111 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2112 		etype = type;
2113 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2114 	}
2115 
2116 	cq = ccb->ccb_cmd;
2117 
2118 	cq->function = MPII_FUNCTION_CONFIG;
2119 
2120 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2121 
2122 	cq->config_header.page_number = number;
2123 	cq->config_header.page_type = type;
2124 	cq->ext_page_type = etype;
2125 	htolem32(&cq->page_address, address);
2126 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2127 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2128 
2129 	ccb->ccb_done = mpii_empty_done;
2130 	if (ISSET(flags, MPII_PG_POLL)) {
2131 		if (mpii_poll(sc, ccb) != 0) {
2132 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2133 			    DEVNAME(sc));
2134 			return (1);
2135 		}
2136 	} else
2137 		mpii_wait(sc, ccb);
2138 
2139 	if (ccb->ccb_rcb == NULL) {
2140 		scsi_io_put(&sc->sc_iopool, ccb);
2141 		return (1);
2142 	}
2143 	cp = ccb->ccb_rcb->rcb_reply;
2144 
2145 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2146 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2147 	    cp->sgl_flags, cp->msg_length, cp->function);
2148 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2149 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2150 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2151 	    cp->msg_flags);
2152 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2153 	    cp->vp_id, cp->vf_id);
2154 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2155 	    letoh16(cp->ioc_status));
2156 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2157 	    letoh32(cp->ioc_loginfo));
2158 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2159 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2160 	    cp->config_header.page_version,
2161 	    cp->config_header.page_length,
2162 	    cp->config_header.page_number,
2163 	    cp->config_header.page_type);
2164 
2165 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2166 		rv = 1;
2167 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2168 		memset(ehdr, 0, sizeof(*ehdr));
2169 		ehdr->page_version = cp->config_header.page_version;
2170 		ehdr->page_number = cp->config_header.page_number;
2171 		ehdr->page_type = cp->config_header.page_type;
2172 		ehdr->ext_page_length = cp->ext_page_length;
2173 		ehdr->ext_page_type = cp->ext_page_type;
2174 	} else
2175 		*hdr = cp->config_header;
2176 
2177 	mpii_push_reply(sc, ccb->ccb_rcb);
2178 	scsi_io_put(&sc->sc_iopool, ccb);
2179 
2180 	return (rv);
2181 }
2182 
2183 int
2184 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2185     void *p, int read, void *page, size_t len)
2186 {
2187 	struct mpii_msg_config_request		*cq;
2188 	struct mpii_msg_config_reply		*cp;
2189 	struct mpii_ccb				*ccb;
2190 	struct mpii_cfg_hdr			*hdr = p;
2191 	struct mpii_ecfg_hdr			*ehdr = p;
2192 	caddr_t					kva;
2193 	int					page_length;
2194 	int					rv = 0;
2195 
2196 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2197 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2198 
2199 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2200 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2201 
2202 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2203 		return (1);
2204 
2205 	ccb = scsi_io_get(&sc->sc_iopool,
2206 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2207 	if (ccb == NULL) {
2208 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2209 		    DEVNAME(sc));
2210 		return (1);
2211 	}
2212 
2213 	cq = ccb->ccb_cmd;
2214 
2215 	cq->function = MPII_FUNCTION_CONFIG;
2216 
2217 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2218 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2219 
2220 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2221 		cq->config_header.page_version = ehdr->page_version;
2222 		cq->config_header.page_number = ehdr->page_number;
2223 		cq->config_header.page_type = ehdr->page_type;
2224 		cq->ext_page_len = ehdr->ext_page_length;
2225 		cq->ext_page_type = ehdr->ext_page_type;
2226 	} else
2227 		cq->config_header = *hdr;
2228 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2229 	htolem32(&cq->page_address, address);
2230 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2231 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2232 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2233 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2234 
2235 	/* bounce the page via the request space to avoid more bus_dma games */
2236 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2237 	    sizeof(struct mpii_msg_config_request));
2238 
2239 	kva = ccb->ccb_cmd;
2240 	kva += sizeof(struct mpii_msg_config_request);
2241 
2242 	if (!read)
2243 		memcpy(kva, page, len);
2244 
2245 	ccb->ccb_done = mpii_empty_done;
2246 	if (ISSET(flags, MPII_PG_POLL)) {
2247 		if (mpii_poll(sc, ccb) != 0) {
2248 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2249 			    DEVNAME(sc));
2250 			return (1);
2251 		}
2252 	} else
2253 		mpii_wait(sc, ccb);
2254 
2255 	if (ccb->ccb_rcb == NULL) {
2256 		scsi_io_put(&sc->sc_iopool, ccb);
2257 		return (1);
2258 	}
2259 	cp = ccb->ccb_rcb->rcb_reply;
2260 
2261 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2262 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2263 	    cp->function);
2264 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2265 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2266 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2267 	    cp->msg_flags);
2268 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2269 	    cp->vp_id, cp->vf_id);
2270 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2271 	    letoh16(cp->ioc_status));
2272 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2273 	    letoh32(cp->ioc_loginfo));
2274 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2275 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2276 	    cp->config_header.page_version,
2277 	    cp->config_header.page_length,
2278 	    cp->config_header.page_number,
2279 	    cp->config_header.page_type);
2280 
2281 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2282 		rv = 1;
2283 	else if (read)
2284 		memcpy(page, kva, len);
2285 
2286 	mpii_push_reply(sc, ccb->ccb_rcb);
2287 	scsi_io_put(&sc->sc_iopool, ccb);
2288 
2289 	return (rv);
2290 }
2291 
2292 struct mpii_rcb *
2293 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2294 {
2295 	struct mpii_rcb		*rcb = NULL;
2296 	u_int32_t		rfid;
2297 
2298 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2299 
2300 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2301 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2302 		rfid = (lemtoh32(&rdp->frame_addr) -
2303 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2304 		    sc->sc_reply_size;
2305 
2306 		bus_dmamap_sync(sc->sc_dmat,
2307 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2308 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2309 
2310 		rcb = &sc->sc_rcbs[rfid];
2311 	}
2312 
2313 	memset(rdp, 0xff, sizeof(*rdp));
2314 
2315 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2316 	    8 * sc->sc_reply_post_host_index, 8,
2317 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2318 
2319 	return (rcb);
2320 }
2321 
2322 struct mpii_dmamem *
2323 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2324 {
2325 	struct mpii_dmamem	*mdm;
2326 	int			nsegs;
2327 
2328 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2329 	if (mdm == NULL)
2330 		return (NULL);
2331 
2332 	mdm->mdm_size = size;
2333 
2334 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2335 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2336 		goto mdmfree;
2337 
2338 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2339 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2340 		goto destroy;
2341 
2342 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2343 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2344 		goto free;
2345 
2346 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2347 	    NULL, BUS_DMA_NOWAIT) != 0)
2348 		goto unmap;
2349 
2350 	return (mdm);
2351 
2352 unmap:
2353 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2354 free:
2355 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2356 destroy:
2357 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2358 mdmfree:
2359 	free(mdm, M_DEVBUF, 0);
2360 
2361 	return (NULL);
2362 }
2363 
2364 void
2365 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2366 {
2367 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %#x\n", DEVNAME(sc), mdm);
2368 
2369 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2370 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2371 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2372 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2373 	free(mdm, M_DEVBUF, 0);
2374 }
2375 
2376 int
2377 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2378 {
2379 	int		slot;	/* initial hint */
2380 
2381 	if (dev == NULL || dev->slot < 0)
2382 		return (1);
2383 	slot = dev->slot;
2384 
2385 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2386 		slot++;
2387 
2388 	if (slot >= sc->sc_max_devices)
2389 		return (1);
2390 
2391 	dev->slot = slot;
2392 	sc->sc_devs[slot] = dev;
2393 
2394 	return (0);
2395 }
2396 
2397 int
2398 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2399 {
2400 	int			i;
2401 
2402 	if (dev == NULL)
2403 		return (1);
2404 
2405 	for (i = 0; i < sc->sc_max_devices; i++) {
2406 		if (sc->sc_devs[i] == NULL)
2407 			continue;
2408 
2409 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2410 			sc->sc_devs[i] = NULL;
2411 			return (0);
2412 		}
2413 	}
2414 
2415 	return (1);
2416 }
2417 
2418 struct mpii_device *
2419 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2420 {
2421 	int			i;
2422 
2423 	for (i = 0; i < sc->sc_max_devices; i++) {
2424 		if (sc->sc_devs[i] == NULL)
2425 			continue;
2426 
2427 		if (sc->sc_devs[i]->dev_handle == handle)
2428 			return (sc->sc_devs[i]);
2429 	}
2430 
2431 	return (NULL);
2432 }
2433 
2434 int
2435 mpii_alloc_ccbs(struct mpii_softc *sc)
2436 {
2437 	struct mpii_ccb		*ccb;
2438 	u_int8_t		*cmd;
2439 	int			i;
2440 
2441 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2442 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2443 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2444 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2445 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2446 	    mpii_scsi_cmd_tmo_handler, sc);
2447 
2448 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2449 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2450 	if (sc->sc_ccbs == NULL) {
2451 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2452 		return (1);
2453 	}
2454 
2455 	sc->sc_requests = mpii_dmamem_alloc(sc,
2456 	    sc->sc_request_size * sc->sc_max_cmds);
2457 	if (sc->sc_requests == NULL) {
2458 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2459 		goto free_ccbs;
2460 	}
2461 	cmd = MPII_DMA_KVA(sc->sc_requests);
2462 
2463 	/*
2464 	 * we have sc->sc_max_cmds system request message
2465 	 * frames, but smid zero cannot be used. so we then
2466 	 * have (sc->sc_max_cmds - 1) number of ccbs
2467 	 */
2468 	for (i = 1; i < sc->sc_max_cmds; i++) {
2469 		ccb = &sc->sc_ccbs[i - 1];
2470 
2471 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2472 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2473 		    &ccb->ccb_dmamap) != 0) {
2474 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2475 			goto free_maps;
2476 		}
2477 
2478 		ccb->ccb_sc = sc;
2479 		htolem16(&ccb->ccb_smid, i);
2480 		ccb->ccb_offset = sc->sc_request_size * i;
2481 
2482 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2483 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2484 		    ccb->ccb_offset;
2485 
2486 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %#x map: %#x "
2487 		    "sc: %#x smid: %#x offs: %#x cmd: %#x dva: %#x\n",
2488 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2489 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2490 		    ccb->ccb_cmd_dva);
2491 
2492 		mpii_put_ccb(sc, ccb);
2493 	}
2494 
2495 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2496 
2497 	return (0);
2498 
2499 free_maps:
2500 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2501 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2502 
2503 	mpii_dmamem_free(sc, sc->sc_requests);
2504 free_ccbs:
2505 	free(sc->sc_ccbs, M_DEVBUF, 0);
2506 
2507 	return (1);
2508 }
2509 
2510 void
2511 mpii_put_ccb(void *cookie, void *io)
2512 {
2513 	struct mpii_softc	*sc = cookie;
2514 	struct mpii_ccb		*ccb = io;
2515 
2516 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %#x\n", DEVNAME(sc), ccb);
2517 
2518 	ccb->ccb_state = MPII_CCB_FREE;
2519 	ccb->ccb_cookie = NULL;
2520 	ccb->ccb_done = NULL;
2521 	ccb->ccb_rcb = NULL;
2522 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2523 
2524 	KERNEL_UNLOCK();
2525 	mtx_enter(&sc->sc_ccb_free_mtx);
2526 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2527 	mtx_leave(&sc->sc_ccb_free_mtx);
2528 	KERNEL_LOCK();
2529 }
2530 
2531 void *
2532 mpii_get_ccb(void *cookie)
2533 {
2534 	struct mpii_softc	*sc = cookie;
2535 	struct mpii_ccb		*ccb;
2536 
2537 	KERNEL_UNLOCK();
2538 
2539 	mtx_enter(&sc->sc_ccb_free_mtx);
2540 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2541 	if (ccb != NULL) {
2542 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2543 		ccb->ccb_state = MPII_CCB_READY;
2544 	}
2545 	mtx_leave(&sc->sc_ccb_free_mtx);
2546 
2547 	KERNEL_LOCK();
2548 
2549 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %#x\n", DEVNAME(sc), ccb);
2550 
2551 	return (ccb);
2552 }
2553 
2554 int
2555 mpii_alloc_replies(struct mpii_softc *sc)
2556 {
2557 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2558 
2559 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2560 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2561 	if (sc->sc_rcbs == NULL)
2562 		return (1);
2563 
2564 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2565 	    sc->sc_num_reply_frames);
2566 	if (sc->sc_replies == NULL) {
2567 		free(sc->sc_rcbs, M_DEVBUF, 0);
2568 		return (1);
2569 	}
2570 
2571 	return (0);
2572 }
2573 
2574 void
2575 mpii_push_replies(struct mpii_softc *sc)
2576 {
2577 	struct mpii_rcb		*rcb;
2578 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2579 	int			i;
2580 
2581 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2582 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2583 	    BUS_DMASYNC_PREREAD);
2584 
2585 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2586 		rcb = &sc->sc_rcbs[i];
2587 
2588 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2589 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2590 		    sc->sc_reply_size * i;
2591 		mpii_push_reply(sc, rcb);
2592 	}
2593 }
2594 
2595 void
2596 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2597 {
2598 	struct mpii_request_header	*rhp;
2599 	struct mpii_request_descr	descr;
2600 	u_long				 *rdp = (u_long *)&descr;
2601 
2602 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#x\n", DEVNAME(sc),
2603 	    ccb->ccb_cmd_dva);
2604 
2605 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2606 	    ccb->ccb_offset, sc->sc_request_size,
2607 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2608 
2609 	ccb->ccb_state = MPII_CCB_QUEUED;
2610 
2611 	rhp = ccb->ccb_cmd;
2612 
2613 	memset(&descr, 0, sizeof(descr));
2614 
2615 	switch (rhp->function) {
2616 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2617 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2618 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2619 		break;
2620 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2621 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2622 		break;
2623 	default:
2624 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2625 	}
2626 
2627 	descr.vf_id = sc->sc_vf_id;
2628 	descr.smid = ccb->ccb_smid;
2629 
2630 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2631 	    "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2632 
2633 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2634 	    "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2635 
2636 #if defined(__LP64__)
2637 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2638 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2639 #else
2640 	mtx_enter(&sc->sc_req_mtx);
2641 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2642 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2643 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2644 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2645 
2646 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2647 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2648 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2649 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2650 	mtx_leave(&sc->sc_req_mtx);
2651 #endif
2652 }
2653 
2654 int
2655 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2656 {
2657 	void				(*done)(struct mpii_ccb *);
2658 	void				*cookie;
2659 	int				rv = 1;
2660 
2661 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2662 
2663 	done = ccb->ccb_done;
2664 	cookie = ccb->ccb_cookie;
2665 
2666 	ccb->ccb_done = mpii_poll_done;
2667 	ccb->ccb_cookie = &rv;
2668 
2669 	mpii_start(sc, ccb);
2670 
2671 	while (rv == 1) {
2672 		/* avoid excessive polling */
2673 		if (mpii_reply_waiting(sc))
2674 			mpii_intr(sc);
2675 		else
2676 			delay(10);
2677 	}
2678 
2679 	ccb->ccb_cookie = cookie;
2680 	done(ccb);
2681 
2682 	return (0);
2683 }
2684 
2685 void
2686 mpii_poll_done(struct mpii_ccb *ccb)
2687 {
2688 	int				*rv = ccb->ccb_cookie;
2689 
2690 	*rv = 0;
2691 }
2692 
2693 int
2694 mpii_alloc_queues(struct mpii_softc *sc)
2695 {
2696 	u_int32_t		*rfp;
2697 	int			i;
2698 
2699 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2700 
2701 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2702 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2703 	if (sc->sc_reply_freeq == NULL)
2704 		return (1);
2705 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2706 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2707 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2708 		    sc->sc_reply_size * i;
2709 	}
2710 
2711 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2712 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2713 	if (sc->sc_reply_postq == NULL)
2714 		goto free_reply_freeq;
2715 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2716 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2717 	    sizeof(struct mpii_reply_descr));
2718 
2719 	return (0);
2720 
2721 free_reply_freeq:
2722 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2723 	return (1);
2724 }
2725 
2726 void
2727 mpii_init_queues(struct mpii_softc *sc)
2728 {
2729 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2730 
2731 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2732 	sc->sc_reply_post_host_index = 0;
2733 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2734 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2735 }
2736 
2737 void
2738 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2739 {
2740 	struct mutex		mtx = MUTEX_INITIALIZER(IPL_BIO);
2741 	void			(*done)(struct mpii_ccb *);
2742 	void			*cookie;
2743 
2744 	done = ccb->ccb_done;
2745 	cookie = ccb->ccb_cookie;
2746 
2747 	ccb->ccb_done = mpii_wait_done;
2748 	ccb->ccb_cookie = &mtx;
2749 
2750 	/* XXX this will wait forever for the ccb to complete */
2751 
2752 	mpii_start(sc, ccb);
2753 
2754 	mtx_enter(&mtx);
2755 	while (ccb->ccb_cookie != NULL)
2756 		msleep(ccb, &mtx, PRIBIO, "mpiiwait", 0);
2757 	mtx_leave(&mtx);
2758 
2759 	ccb->ccb_cookie = cookie;
2760 	done(ccb);
2761 }
2762 
2763 void
2764 mpii_wait_done(struct mpii_ccb *ccb)
2765 {
2766 	struct mutex		*mtx = ccb->ccb_cookie;
2767 
2768 	mtx_enter(mtx);
2769 	ccb->ccb_cookie = NULL;
2770 	mtx_leave(mtx);
2771 
2772 	wakeup_one(ccb);
2773 }
2774 
2775 void
2776 mpii_scsi_cmd(struct scsi_xfer *xs)
2777 {
2778 	struct scsi_link	*link = xs->sc_link;
2779 	struct mpii_softc	*sc = link->adapter_softc;
2780 	struct mpii_ccb		*ccb = xs->io;
2781 	struct mpii_msg_scsi_io	*io;
2782 	struct mpii_device	*dev;
2783 	int			 ret;
2784 
2785 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2786 
2787 	if (xs->cmdlen > MPII_CDB_LEN) {
2788 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2789 		    DEVNAME(sc), xs->cmdlen);
2790 		memset(&xs->sense, 0, sizeof(xs->sense));
2791 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2792 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2793 		xs->sense.add_sense_code = 0x20;
2794 		xs->error = XS_SENSE;
2795 		scsi_done(xs);
2796 		return;
2797 	}
2798 
2799 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2800 		/* device no longer exists */
2801 		xs->error = XS_SELTIMEOUT;
2802 		scsi_done(xs);
2803 		return;
2804 	}
2805 
2806 	KERNEL_UNLOCK();
2807 
2808 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2809 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2810 
2811 	ccb->ccb_cookie = xs;
2812 	ccb->ccb_done = mpii_scsi_cmd_done;
2813 	ccb->ccb_dev_handle = dev->dev_handle;
2814 
2815 	io = ccb->ccb_cmd;
2816 	memset(io, 0, sizeof(*io));
2817 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2818 	io->sense_buffer_length = sizeof(xs->sense);
2819 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2820 	htolem16(&io->io_flags, xs->cmdlen);
2821 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2822 	htobem16(&io->lun[0], link->lun);
2823 
2824 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2825 	case SCSI_DATA_IN:
2826 		io->direction = MPII_SCSIIO_DIR_READ;
2827 		break;
2828 	case SCSI_DATA_OUT:
2829 		io->direction = MPII_SCSIIO_DIR_WRITE;
2830 		break;
2831 	default:
2832 		io->direction = MPII_SCSIIO_DIR_NONE;
2833 		break;
2834 	}
2835 
2836 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2837 
2838 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2839 
2840 	htolem32(&io->data_length, xs->datalen);
2841 
2842 	/* sense data is at the end of a request */
2843 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2844 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2845 
2846 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2847 		ret = mpii_load_xs_sas3(ccb);
2848 	else
2849 		ret = mpii_load_xs(ccb);
2850 
2851 	if (ret != 0) {
2852 		xs->error = XS_DRIVER_STUFFUP;
2853 		goto done;
2854 	}
2855 
2856 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2857 	if (xs->flags & SCSI_POLL) {
2858 		if (mpii_poll(sc, ccb) != 0) {
2859 			xs->error = XS_DRIVER_STUFFUP;
2860 			goto done;
2861 		}
2862 	} else {
2863 		timeout_add_msec(&xs->stimeout, xs->timeout);
2864 		mpii_start(sc, ccb);
2865 	}
2866 
2867 	KERNEL_LOCK();
2868 	return;
2869 
2870 done:
2871 	KERNEL_LOCK();
2872 	scsi_done(xs);
2873 }
2874 
2875 void
2876 mpii_scsi_cmd_tmo(void *xccb)
2877 {
2878 	struct mpii_ccb		*ccb = xccb;
2879 	struct mpii_softc	*sc = ccb->ccb_sc;
2880 
2881 	printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
2882 
2883 	mtx_enter(&sc->sc_ccb_mtx);
2884 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
2885 		ccb->ccb_state = MPII_CCB_TIMEOUT;
2886 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
2887 	}
2888 	mtx_leave(&sc->sc_ccb_mtx);
2889 
2890 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
2891 }
2892 
2893 void
2894 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
2895 {
2896 	struct mpii_softc			*sc = cookie;
2897 	struct mpii_ccb				*tccb = io;
2898 	struct mpii_ccb				*ccb;
2899 	struct mpii_msg_scsi_task_request	*stq;
2900 
2901 	mtx_enter(&sc->sc_ccb_mtx);
2902 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
2903 	if (ccb != NULL) {
2904 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2905 		ccb->ccb_state = MPII_CCB_QUEUED;
2906 	}
2907 	/* should remove any other ccbs for the same dev handle */
2908 	mtx_leave(&sc->sc_ccb_mtx);
2909 
2910 	if (ccb == NULL) {
2911 		scsi_io_put(&sc->sc_iopool, tccb);
2912 		return;
2913 	}
2914 
2915 	stq = tccb->ccb_cmd;
2916 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2917 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2918 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
2919 
2920 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
2921 	mpii_start(sc, tccb);
2922 }
2923 
2924 void
2925 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
2926 {
2927 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
2928 }
2929 
2930 void
2931 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
2932 {
2933 	struct mpii_ccb		*tccb;
2934 	struct mpii_msg_scsi_io_error	*sie;
2935 	struct mpii_softc	*sc = ccb->ccb_sc;
2936 	struct scsi_xfer	*xs = ccb->ccb_cookie;
2937 	struct scsi_sense_data	*sense;
2938 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
2939 
2940 	timeout_del(&xs->stimeout);
2941 	mtx_enter(&sc->sc_ccb_mtx);
2942 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
2943 		/* ENOSIMPLEQ_REMOVE :( */
2944 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
2945 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2946 		else {
2947 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
2948 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
2949 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
2950 					    tccb, ccb_link);
2951 					break;
2952 				}
2953 			}
2954 		}
2955 	}
2956 
2957 	ccb->ccb_state = MPII_CCB_READY;
2958 	mtx_leave(&sc->sc_ccb_mtx);
2959 
2960 	if (xs->datalen != 0) {
2961 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2962 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
2963 		    BUS_DMASYNC_POSTWRITE);
2964 
2965 		bus_dmamap_unload(sc->sc_dmat, dmap);
2966 	}
2967 
2968 	xs->error = XS_NOERROR;
2969 	xs->resid = 0;
2970 
2971 	if (ccb->ccb_rcb == NULL) {
2972 		/* no scsi error, we're ok so drop out early */
2973 		xs->status = SCSI_OK;
2974 		goto done;
2975 	}
2976 
2977 	sie = ccb->ccb_rcb->rcb_reply;
2978 
2979 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
2980 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
2981 	    xs->flags);
2982 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
2983 	    "function: 0x%02x\n", DEVNAME(sc), letoh16(sie->dev_handle),
2984 	    sie->msg_length, sie->function);
2985 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2986 	    sie->vp_id, sie->vf_id);
2987 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
2988 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
2989 	    sie->scsi_state, letoh16(sie->ioc_status));
2990 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2991 	    letoh32(sie->ioc_loginfo));
2992 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
2993 	    letoh32(sie->transfer_count));
2994 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
2995 	    letoh32(sie->sense_count));
2996 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
2997 	    letoh32(sie->response_info));
2998 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
2999 	    letoh16(sie->task_tag));
3000 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3001 	    DEVNAME(sc), letoh32(sie->bidirectional_transfer_count));
3002 
3003 	if (sie->scsi_state & MPII_SCSIIO_ERR_STATE_NO_SCSI_STATUS)
3004 		xs->status = SCSI_TERMINATED;
3005 	else
3006 		xs->status = sie->scsi_status;
3007 	xs->resid = 0;
3008 
3009 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3010 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3011 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3012 		/* FALLTHROUGH */
3013 
3014 	case MPII_IOCSTATUS_SUCCESS:
3015 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3016 		switch (xs->status) {
3017 		case SCSI_OK:
3018 			xs->error = XS_NOERROR;
3019 			break;
3020 
3021 		case SCSI_CHECK:
3022 			xs->error = XS_SENSE;
3023 			break;
3024 
3025 		case SCSI_BUSY:
3026 		case SCSI_QUEUE_FULL:
3027 			xs->error = XS_BUSY;
3028 			break;
3029 
3030 		default:
3031 			xs->error = XS_DRIVER_STUFFUP;
3032 		}
3033 		break;
3034 
3035 	case MPII_IOCSTATUS_BUSY:
3036 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3037 		xs->error = XS_BUSY;
3038 		break;
3039 
3040 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3041 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3042 		xs->error = XS_RESET;
3043 		break;
3044 
3045 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3046 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3047 		xs->error = XS_SELTIMEOUT;
3048 		break;
3049 
3050 	default:
3051 		xs->error = XS_DRIVER_STUFFUP;
3052 		break;
3053 	}
3054 
3055 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3056 	    sc->sc_request_size - sizeof(*sense));
3057 	if (sie->scsi_state & MPII_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
3058 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3059 
3060 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3061 	    xs->error, xs->status);
3062 
3063 	mpii_push_reply(sc, ccb->ccb_rcb);
3064 done:
3065 	KERNEL_LOCK();
3066 	scsi_done(xs);
3067 	KERNEL_UNLOCK();
3068 }
3069 
3070 int
3071 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3072 {
3073 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
3074 	struct mpii_device	*dev = sc->sc_devs[link->target];
3075 
3076 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3077 
3078 	switch (cmd) {
3079 	case DIOCGCACHE:
3080 	case DIOCSCACHE:
3081 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3082 			return (mpii_ioctl_cache(link, cmd,
3083 			    (struct dk_cache *)addr));
3084 		}
3085 		break;
3086 
3087 	default:
3088 		if (sc->sc_ioctl)
3089 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3090 
3091 		break;
3092 	}
3093 
3094 	return (ENOTTY);
3095 }
3096 
3097 int
3098 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3099 {
3100 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3101 	struct mpii_device *dev = sc->sc_devs[link->target];
3102 	struct mpii_cfg_raid_vol_pg0 *vpg;
3103 	struct mpii_msg_raid_action_request *req;
3104 	struct mpii_msg_raid_action_reply *rep;
3105 	struct mpii_cfg_hdr hdr;
3106 	struct mpii_ccb	*ccb;
3107 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3108 	size_t pagelen;
3109 	int rv = 0;
3110 	int enabled;
3111 
3112 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3113 	    addr, MPII_PG_POLL, &hdr) != 0)
3114 		return (EINVAL);
3115 
3116 	pagelen = hdr.page_length * 4;
3117 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3118 	if (vpg == NULL)
3119 		return (ENOMEM);
3120 
3121 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3122 	    vpg, pagelen) != 0) {
3123 		rv = EINVAL;
3124 		goto done;
3125 	}
3126 
3127 	enabled = ((lemtoh16(&vpg->volume_settings) &
3128 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3129 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3130 
3131 	if (cmd == DIOCGCACHE) {
3132 		dc->wrcache = enabled;
3133 		dc->rdcache = 0;
3134 		goto done;
3135 	} /* else DIOCSCACHE */
3136 
3137 	if (dc->rdcache) {
3138 		rv = EOPNOTSUPP;
3139 		goto done;
3140 	}
3141 
3142 	if (((dc->wrcache) ? 1 : 0) == enabled)
3143 		goto done;
3144 
3145 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3146 	if (ccb == NULL) {
3147 		rv = ENOMEM;
3148 		goto done;
3149 	}
3150 
3151 	ccb->ccb_done = mpii_empty_done;
3152 
3153 	req = ccb->ccb_cmd;
3154 	memset(req, 0, sizeof(*req));
3155 	req->function = MPII_FUNCTION_RAID_ACTION;
3156 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3157 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3158 	htolem32(&req->action_data, dc->wrcache ?
3159 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3160 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3161 
3162 	if (mpii_poll(sc, ccb) != 0) {
3163 		rv = EIO;
3164 		goto done;
3165 	}
3166 
3167 	if (ccb->ccb_rcb != NULL) {
3168 		rep = ccb->ccb_rcb->rcb_reply;
3169 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3170 		    ((rep->action_data[0] &
3171 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3172 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3173 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3174 			rv = EINVAL;
3175 		mpii_push_reply(sc, ccb->ccb_rcb);
3176 	}
3177 
3178 	scsi_io_put(&sc->sc_iopool, ccb);
3179 
3180 done:
3181 	free(vpg, M_TEMP, 0);
3182 	return (rv);
3183 }
3184 
3185 #if NBIO > 0
3186 int
3187 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3188 {
3189 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3190 	int			error = 0;
3191 
3192 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3193 
3194 	switch (cmd) {
3195 	case BIOCINQ:
3196 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3197 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3198 		break;
3199 	case BIOCVOL:
3200 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3201 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3202 		break;
3203 	case BIOCDISK:
3204 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3205 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3206 		break;
3207 	default:
3208 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3209 		error = EINVAL;
3210 	}
3211 
3212 	return (error);
3213 }
3214 
3215 int
3216 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3217 {
3218 	int			i;
3219 
3220 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3221 
3222 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3223 	for (i = 0; i < sc->sc_max_devices; i++)
3224 		if (sc->sc_devs[i] &&
3225 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3226 			bi->bi_novol++;
3227 	return (0);
3228 }
3229 
3230 int
3231 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3232 {
3233 	struct mpii_cfg_raid_vol_pg0	*vpg;
3234 	struct mpii_cfg_hdr		hdr;
3235 	struct mpii_device		*dev;
3236 	struct scsi_link		*lnk;
3237 	struct device			*scdev;
3238 	size_t				pagelen;
3239 	u_int16_t			volh;
3240 	int				rv, hcnt = 0;
3241 
3242 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3243 	    DEVNAME(sc), bv->bv_volid);
3244 
3245 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3246 		return (ENODEV);
3247 	volh = dev->dev_handle;
3248 
3249 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3250 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3251 		printf("%s: unable to fetch header for raid volume page 0\n",
3252 		    DEVNAME(sc));
3253 		return (EINVAL);
3254 	}
3255 
3256 	pagelen = hdr.page_length * 4;
3257 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3258 	if (vpg == NULL) {
3259 		printf("%s: unable to allocate space for raid "
3260 		    "volume page 0\n", DEVNAME(sc));
3261 		return (ENOMEM);
3262 	}
3263 
3264 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3265 	    &hdr, 1, vpg, pagelen) != 0) {
3266 		printf("%s: unable to fetch raid volume page 0\n",
3267 		    DEVNAME(sc));
3268 		free(vpg, M_TEMP, 0);
3269 		return (EINVAL);
3270 	}
3271 
3272 	switch (vpg->volume_state) {
3273 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3274 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3275 		bv->bv_status = BIOC_SVONLINE;
3276 		break;
3277 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3278 		if (ISSET(lemtoh32(&vpg->volume_status),
3279 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3280 			bv->bv_status = BIOC_SVREBUILD;
3281 			bv->bv_percent = dev->percent;
3282 		} else
3283 			bv->bv_status = BIOC_SVDEGRADED;
3284 		break;
3285 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3286 		bv->bv_status = BIOC_SVOFFLINE;
3287 		break;
3288 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3289 		bv->bv_status = BIOC_SVBUILDING;
3290 		break;
3291 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3292 	default:
3293 		bv->bv_status = BIOC_SVINVALID;
3294 		break;
3295 	}
3296 
3297 	switch (vpg->volume_type) {
3298 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3299 		bv->bv_level = 0;
3300 		break;
3301 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3302 		bv->bv_level = 1;
3303 		break;
3304 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3305 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3306 		bv->bv_level = 10;
3307 		break;
3308 	default:
3309 		bv->bv_level = -1;
3310 	}
3311 
3312 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3313 		free(vpg, M_TEMP, 0);
3314 		return (rv);
3315 	}
3316 
3317 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3318 
3319 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3320 
3321 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3322 	if (lnk != NULL) {
3323 		scdev = lnk->device_softc;
3324 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3325 	}
3326 
3327 	free(vpg, M_TEMP, 0);
3328 	return (0);
3329 }
3330 
3331 int
3332 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3333 {
3334 	struct mpii_cfg_raid_vol_pg0		*vpg;
3335 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3336 	struct mpii_cfg_hdr			hdr;
3337 	struct mpii_device			*dev;
3338 	size_t					pagelen;
3339 	u_int16_t				volh;
3340 	u_int8_t				dn;
3341 
3342 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3343 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3344 
3345 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3346 		return (ENODEV);
3347 	volh = dev->dev_handle;
3348 
3349 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3350 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3351 		printf("%s: unable to fetch header for raid volume page 0\n",
3352 		    DEVNAME(sc));
3353 		return (EINVAL);
3354 	}
3355 
3356 	pagelen = hdr.page_length * 4;
3357 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3358 	if (vpg == NULL) {
3359 		printf("%s: unable to allocate space for raid "
3360 		    "volume page 0\n", DEVNAME(sc));
3361 		return (ENOMEM);
3362 	}
3363 
3364 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3365 	    &hdr, 1, vpg, pagelen) != 0) {
3366 		printf("%s: unable to fetch raid volume page 0\n",
3367 		    DEVNAME(sc));
3368 		free(vpg, M_TEMP, 0);
3369 		return (EINVAL);
3370 	}
3371 
3372 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3373 		int		nvdsk = vpg->num_phys_disks;
3374 		int		hsmap = vpg->hot_spare_pool;
3375 
3376 		free(vpg, M_TEMP, 0);
3377 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3378 	}
3379 
3380 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3381 	    bd->bd_diskid;
3382 	dn = pd->phys_disk_num;
3383 
3384 	free(vpg, M_TEMP, 0);
3385 	return (mpii_bio_disk(sc, bd, dn));
3386 }
3387 
3388 int
3389 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3390      int hsmap, int *hscnt)
3391 {
3392 	struct mpii_cfg_raid_config_pg0	*cpg;
3393 	struct mpii_raid_config_element	*el;
3394 	struct mpii_ecfg_hdr		ehdr;
3395 	size_t				pagelen;
3396 	int				i, nhs = 0;
3397 
3398 	if (bd)
3399 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3400 		    bd->bd_diskid - nvdsk);
3401 	else
3402 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3403 
3404 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3405 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3406 	    &ehdr) != 0) {
3407 		printf("%s: unable to fetch header for raid config page 0\n",
3408 		    DEVNAME(sc));
3409 		return (EINVAL);
3410 	}
3411 
3412 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3413 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3414 	if (cpg == NULL) {
3415 		printf("%s: unable to allocate space for raid config page 0\n",
3416 		    DEVNAME(sc));
3417 		return (ENOMEM);
3418 	}
3419 
3420 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3421 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3422 		printf("%s: unable to fetch raid config page 0\n",
3423 		    DEVNAME(sc));
3424 		free(cpg, M_TEMP, 0);
3425 		return (EINVAL);
3426 	}
3427 
3428 	el = (struct mpii_raid_config_element *)(cpg + 1);
3429 	for (i = 0; i < cpg->num_elements; i++, el++) {
3430 		if (ISSET(lemtoh16(&el->element_flags),
3431 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3432 		    el->hot_spare_pool == hsmap) {
3433 			/*
3434 			 * diskid comparison is based on the idea that all
3435 			 * disks are counted by the bio(4) in sequence, thus
3436 			 * substracting the number of disks in the volume
3437 			 * from the diskid yields us a "relative" hotspare
3438 			 * number, which is good enough for us.
3439 			 */
3440 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3441 				u_int8_t dn = el->phys_disk_num;
3442 
3443 				free(cpg, M_TEMP, 0);
3444 				return (mpii_bio_disk(sc, bd, dn));
3445 			}
3446 			nhs++;
3447 		}
3448 	}
3449 
3450 	if (hscnt)
3451 		*hscnt = nhs;
3452 
3453 	free(cpg, M_TEMP, 0);
3454 	return (0);
3455 }
3456 
3457 int
3458 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3459 {
3460 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3461 	struct mpii_cfg_hdr			hdr;
3462 	struct mpii_device			*dev;
3463 	int					len;
3464 
3465 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3466 	    bd->bd_diskid);
3467 
3468 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3469 	if (ppg == NULL) {
3470 		printf("%s: unable to allocate space for raid physical disk "
3471 		    "page 0\n", DEVNAME(sc));
3472 		return (ENOMEM);
3473 	}
3474 
3475 	hdr.page_version = 0;
3476 	hdr.page_length = sizeof(*ppg) / 4;
3477 	hdr.page_number = 0;
3478 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3479 
3480 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3481 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3482 		printf("%s: unable to fetch raid drive page 0\n",
3483 		    DEVNAME(sc));
3484 		free(ppg, M_TEMP, 0);
3485 		return (EINVAL);
3486 	}
3487 
3488 	bd->bd_target = ppg->phys_disk_num;
3489 
3490 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3491 		bd->bd_status = BIOC_SDINVALID;
3492 		free(ppg, M_TEMP, 0);
3493 		return (0);
3494 	}
3495 
3496 	switch (ppg->phys_disk_state) {
3497 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3498 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3499 		bd->bd_status = BIOC_SDONLINE;
3500 		break;
3501 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3502 		if (ppg->offline_reason ==
3503 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3504 		    ppg->offline_reason ==
3505 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3506 			bd->bd_status = BIOC_SDFAILED;
3507 		else
3508 			bd->bd_status = BIOC_SDOFFLINE;
3509 		break;
3510 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3511 		bd->bd_status = BIOC_SDFAILED;
3512 		break;
3513 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3514 		bd->bd_status = BIOC_SDREBUILD;
3515 		break;
3516 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3517 		bd->bd_status = BIOC_SDHOTSPARE;
3518 		break;
3519 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3520 		bd->bd_status = BIOC_SDUNUSED;
3521 		break;
3522 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3523 	default:
3524 		bd->bd_status = BIOC_SDINVALID;
3525 		break;
3526 	}
3527 
3528 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3529 
3530 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3531 	len = strlen(bd->bd_vendor);
3532 	bd->bd_vendor[len] = ' ';
3533 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3534 	    sizeof(ppg->product_id));
3535 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3536 
3537 	free(ppg, M_TEMP, 0);
3538 	return (0);
3539 }
3540 
3541 struct mpii_device *
3542 mpii_find_vol(struct mpii_softc *sc, int volid)
3543 {
3544 	struct mpii_device	*dev = NULL;
3545 
3546 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3547 		return (NULL);
3548 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3549 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3550 		return (dev);
3551 	return (NULL);
3552 }
3553 
3554 #ifndef SMALL_KERNEL
3555 /*
3556  * Non-sleeping lightweight version of the mpii_ioctl_vol
3557  */
3558 int
3559 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3560 {
3561 	struct mpii_cfg_raid_vol_pg0	*vpg;
3562 	struct mpii_cfg_hdr		hdr;
3563 	struct mpii_device		*dev = NULL;
3564 	size_t				pagelen;
3565 	u_int16_t			volh;
3566 
3567 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3568 		return (ENODEV);
3569 	volh = dev->dev_handle;
3570 
3571 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3572 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3573 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3574 		    "volume page 0\n", DEVNAME(sc));
3575 		return (EINVAL);
3576 	}
3577 
3578 	pagelen = hdr.page_length * 4;
3579 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3580 	if (vpg == NULL) {
3581 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3582 		    "volume page 0\n", DEVNAME(sc));
3583 		return (ENOMEM);
3584 	}
3585 
3586 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3587 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3588 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3589 		    "page 0\n", DEVNAME(sc));
3590 		free(vpg, M_TEMP, 0);
3591 		return (EINVAL);
3592 	}
3593 
3594 	switch (vpg->volume_state) {
3595 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3596 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3597 		bv->bv_status = BIOC_SVONLINE;
3598 		break;
3599 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3600 		if (ISSET(lemtoh32(&vpg->volume_status),
3601 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3602 			bv->bv_status = BIOC_SVREBUILD;
3603 		else
3604 			bv->bv_status = BIOC_SVDEGRADED;
3605 		break;
3606 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3607 		bv->bv_status = BIOC_SVOFFLINE;
3608 		break;
3609 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3610 		bv->bv_status = BIOC_SVBUILDING;
3611 		break;
3612 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3613 	default:
3614 		bv->bv_status = BIOC_SVINVALID;
3615 		break;
3616 	}
3617 
3618 	free(vpg, M_TEMP, 0);
3619 	return (0);
3620 }
3621 
3622 int
3623 mpii_create_sensors(struct mpii_softc *sc)
3624 {
3625 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3626 	struct device		*dev;
3627 	struct scsi_link	*link;
3628 	int			i;
3629 
3630 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3631 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3632 	if (sc->sc_sensors == NULL)
3633 		return (1);
3634 	sc->sc_nsensors = sc->sc_vd_count;
3635 
3636 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3637 	    sizeof(sc->sc_sensordev.xname));
3638 
3639 	for (i = 0; i < sc->sc_vd_count; i++) {
3640 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3641 		if (link == NULL)
3642 			goto bad;
3643 
3644 		dev = link->device_softc;
3645 
3646 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3647 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3648 
3649 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3650 		    sizeof(sc->sc_sensors[i].desc));
3651 
3652 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3653 	}
3654 
3655 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3656 		goto bad;
3657 
3658 	sensordev_install(&sc->sc_sensordev);
3659 
3660 	return (0);
3661 
3662 bad:
3663 	free(sc->sc_sensors, M_DEVBUF, 0);
3664 
3665 	return (1);
3666 }
3667 
3668 void
3669 mpii_refresh_sensors(void *arg)
3670 {
3671 	struct mpii_softc	*sc = arg;
3672 	struct bioc_vol		bv;
3673 	int			i;
3674 
3675 	for (i = 0; i < sc->sc_nsensors; i++) {
3676 		memset(&bv, 0, sizeof(bv));
3677 		bv.bv_volid = i;
3678 		if (mpii_bio_volstate(sc, &bv))
3679 			return;
3680 		switch(bv.bv_status) {
3681 		case BIOC_SVOFFLINE:
3682 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3683 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3684 			break;
3685 		case BIOC_SVDEGRADED:
3686 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3687 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3688 			break;
3689 		case BIOC_SVREBUILD:
3690 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3691 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3692 			break;
3693 		case BIOC_SVONLINE:
3694 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3695 			sc->sc_sensors[i].status = SENSOR_S_OK;
3696 			break;
3697 		case BIOC_SVINVALID:
3698 			/* FALLTHROUGH */
3699 		default:
3700 			sc->sc_sensors[i].value = 0; /* unknown */
3701 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3702 		}
3703 	}
3704 }
3705 #endif /* SMALL_KERNEL */
3706 #endif /* NBIO > 0 */
3707