xref: /openbsd-src/sys/dev/pci/mpii.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: mpii.c,v 1.117 2019/06/05 00:36:20 dlg Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	struct scsi_link	sc_link;
163 
164 	int			sc_flags;
165 #define MPII_F_RAID		(1<<1)
166 #define MPII_F_SAS3		(1<<2)
167 #define MPII_F_CONFIG_PENDING	(1<<3)
168 
169 	struct scsibus_softc	*sc_scsibus;
170 
171 	struct mpii_device	**sc_devs;
172 
173 	bus_space_tag_t		sc_iot;
174 	bus_space_handle_t	sc_ioh;
175 	bus_size_t		sc_ios;
176 	bus_dma_tag_t		sc_dmat;
177 
178 	struct mutex		sc_req_mtx;
179 	struct mutex		sc_rep_mtx;
180 
181 	ushort			sc_reply_size;
182 	ushort			sc_request_size;
183 
184 	ushort			sc_max_cmds;
185 	ushort			sc_num_reply_frames;
186 	u_int			sc_reply_free_qdepth;
187 	u_int			sc_reply_post_qdepth;
188 
189 	ushort			sc_chain_sge;
190 	ushort			sc_max_sgl;
191 
192 	u_int8_t		sc_ioc_event_replay;
193 
194 	u_int8_t		sc_porttype;
195 	u_int8_t		sc_max_volumes;
196 	u_int16_t		sc_max_devices;
197 	u_int16_t		sc_vd_count;
198 	u_int16_t		sc_vd_id_low;
199 	u_int16_t		sc_pd_id_start;
200 	int			sc_ioc_number;
201 	u_int8_t		sc_vf_id;
202 
203 	struct mpii_ccb		*sc_ccbs;
204 	struct mpii_ccb_list	sc_ccb_free;
205 	struct mutex		sc_ccb_free_mtx;
206 
207 	struct mutex		sc_ccb_mtx;
208 				/*
209 				 * this protects the ccb state and list entry
210 				 * between mpii_scsi_cmd and scsidone.
211 				 */
212 
213 	struct mpii_ccb_list	sc_ccb_tmos;
214 	struct scsi_iohandler	sc_ccb_tmo_handler;
215 
216 	struct scsi_iopool	sc_iopool;
217 
218 	struct mpii_dmamem	*sc_requests;
219 
220 	struct mpii_dmamem	*sc_replies;
221 	struct mpii_rcb		*sc_rcbs;
222 
223 	struct mpii_dmamem	*sc_reply_postq;
224 	struct mpii_reply_descr	*sc_reply_postq_kva;
225 	u_int			sc_reply_post_host_index;
226 
227 	struct mpii_dmamem	*sc_reply_freeq;
228 	u_int			sc_reply_free_host_index;
229 
230 	struct mpii_rcb_list	sc_evt_sas_queue;
231 	struct mutex		sc_evt_sas_mtx;
232 	struct task		sc_evt_sas_task;
233 
234 	struct mpii_rcb_list	sc_evt_ack_queue;
235 	struct mutex		sc_evt_ack_mtx;
236 	struct scsi_iohandler	sc_evt_ack_handler;
237 
238 	/* scsi ioctl from sd device */
239 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
240 
241 	int			sc_nsensors;
242 	struct ksensor		*sc_sensors;
243 	struct ksensordev	sc_sensordev;
244 };
245 
246 int	mpii_match(struct device *, void *, void *);
247 void	mpii_attach(struct device *, struct device *, void *);
248 int	mpii_detach(struct device *, int);
249 
250 int	mpii_intr(void *);
251 
252 struct cfattach mpii_ca = {
253 	sizeof(struct mpii_softc),
254 	mpii_match,
255 	mpii_attach,
256 	mpii_detach
257 };
258 
259 struct cfdriver mpii_cd = {
260 	NULL,
261 	"mpii",
262 	DV_DULL
263 };
264 
265 void		mpii_scsi_cmd(struct scsi_xfer *);
266 void		mpii_scsi_cmd_done(struct mpii_ccb *);
267 int		mpii_scsi_probe(struct scsi_link *);
268 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
269 
270 struct scsi_adapter mpii_switch = {
271 	mpii_scsi_cmd,
272 	scsi_minphys,
273 	mpii_scsi_probe,
274 	NULL,
275 	mpii_scsi_ioctl
276 };
277 
278 struct mpii_dmamem *
279 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
280 void		mpii_dmamem_free(struct mpii_softc *,
281 		    struct mpii_dmamem *);
282 int		mpii_alloc_ccbs(struct mpii_softc *);
283 void *		mpii_get_ccb(void *);
284 void		mpii_put_ccb(void *, void *);
285 int		mpii_alloc_replies(struct mpii_softc *);
286 int		mpii_alloc_queues(struct mpii_softc *);
287 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
288 void		mpii_push_replies(struct mpii_softc *);
289 
290 void		mpii_scsi_cmd_tmo(void *);
291 void		mpii_scsi_cmd_tmo_handler(void *, void *);
292 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
293 
294 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
295 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
296 struct mpii_device *
297 		mpii_find_dev(struct mpii_softc *, u_int16_t);
298 
299 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
300 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
301 void		mpii_poll_done(struct mpii_ccb *);
302 struct mpii_rcb *
303 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
304 
305 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
306 void		mpii_wait_done(struct mpii_ccb *);
307 
308 void		mpii_init_queues(struct mpii_softc *);
309 
310 int		mpii_load_xs(struct mpii_ccb *);
311 int		mpii_load_xs_sas3(struct mpii_ccb *);
312 
313 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
314 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
315 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
316 		    u_int32_t);
317 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
318 		    u_int32_t);
319 
320 int		mpii_init(struct mpii_softc *);
321 int		mpii_reset_soft(struct mpii_softc *);
322 int		mpii_reset_hard(struct mpii_softc *);
323 
324 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
325 int		mpii_handshake_recv_dword(struct mpii_softc *,
326 		    u_int32_t *);
327 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
328 
329 void		mpii_empty_done(struct mpii_ccb *);
330 
331 int		mpii_iocinit(struct mpii_softc *);
332 int		mpii_iocfacts(struct mpii_softc *);
333 int		mpii_portfacts(struct mpii_softc *);
334 int		mpii_portenable(struct mpii_softc *);
335 int		mpii_cfg_coalescing(struct mpii_softc *);
336 int		mpii_board_info(struct mpii_softc *);
337 int		mpii_target_map(struct mpii_softc *);
338 
339 int		mpii_eventnotify(struct mpii_softc *);
340 void		mpii_eventnotify_done(struct mpii_ccb *);
341 void		mpii_eventack(void *, void *);
342 void		mpii_eventack_done(struct mpii_ccb *);
343 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
344 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
345 void		mpii_event_sas(void *);
346 void		mpii_event_raid(struct mpii_softc *,
347 		    struct mpii_msg_event_reply *);
348 void		mpii_event_discovery(struct mpii_softc *,
349 		    struct mpii_msg_event_reply *);
350 
351 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
352 
353 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
354 		    u_int8_t, u_int32_t, int, void *);
355 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
356 		    void *, int, void *, size_t);
357 
358 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
359 
360 #if NBIO > 0
361 int		mpii_ioctl(struct device *, u_long, caddr_t);
362 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
363 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
364 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
365 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
366 		    int, int *);
367 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
368 		    u_int8_t);
369 struct mpii_device *
370 		mpii_find_vol(struct mpii_softc *, int);
371 #ifndef SMALL_KERNEL
372  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
373 int		mpii_create_sensors(struct mpii_softc *);
374 void		mpii_refresh_sensors(void *);
375 #endif /* SMALL_KERNEL */
376 #endif /* NBIO > 0 */
377 
378 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
379 
380 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
381 
382 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
383 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
384 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
385 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
386 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
387 				    == MPII_INTR_STATUS_REPLY)
388 
389 #define mpii_write_reply_free(s, v) \
390     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
391     MPII_REPLY_FREE_HOST_INDEX, (v))
392 #define mpii_write_reply_post(s, v) \
393     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
394     MPII_REPLY_POST_HOST_INDEX, (v))
395 
396 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
397 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
398 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
399 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
400 
401 static inline void
402 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
403 {
404 	htolem32(&sge->sg_addr_lo, dva);
405 	htolem32(&sge->sg_addr_hi, dva >> 32);
406 }
407 
408 #define MPII_PG_EXTENDED	(1<<0)
409 #define MPII_PG_POLL		(1<<1)
410 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
411 
412 static const struct pci_matchid mpii_devices[] = {
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 },
435 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3408 },
436 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3416 },
437 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508 },
438 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508_1 },
439 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516 },
440 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516_1 }
441 };
442 
443 int
444 mpii_match(struct device *parent, void *match, void *aux)
445 {
446 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
447 }
448 
449 void
450 mpii_attach(struct device *parent, struct device *self, void *aux)
451 {
452 	struct mpii_softc		*sc = (struct mpii_softc *)self;
453 	struct pci_attach_args		*pa = aux;
454 	pcireg_t			memtype;
455 	int				r;
456 	pci_intr_handle_t		ih;
457 	struct scsibus_attach_args	saa;
458 	struct mpii_ccb			*ccb;
459 
460 	sc->sc_pc = pa->pa_pc;
461 	sc->sc_tag = pa->pa_tag;
462 	sc->sc_dmat = pa->pa_dmat;
463 
464 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
465 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
466 
467 	/* find the appropriate memory base */
468 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
469 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
470 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
471 			break;
472 	}
473 	if (r >= PCI_MAPREG_END) {
474 		printf(": unable to locate system interface registers\n");
475 		return;
476 	}
477 
478 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
479 	    NULL, &sc->sc_ios, 0xFF) != 0) {
480 		printf(": unable to map system interface registers\n");
481 		return;
482 	}
483 
484 	/* disable the expansion rom */
485 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
486 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
487 	    ~PCI_ROM_ENABLE);
488 
489 	/* disable interrupts */
490 	mpii_write(sc, MPII_INTR_MASK,
491 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
492 	    MPII_INTR_MASK_DOORBELL);
493 
494 	/* hook up the interrupt */
495 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
496 		printf(": unable to map interrupt\n");
497 		goto unmap;
498 	}
499 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
500 
501 	if (mpii_iocfacts(sc) != 0) {
502 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
503 		goto unmap;
504 	}
505 
506 	if (mpii_init(sc) != 0) {
507 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
508 		goto unmap;
509 	}
510 
511 	if (mpii_alloc_ccbs(sc) != 0) {
512 		/* error already printed */
513 		goto unmap;
514 	}
515 
516 	if (mpii_alloc_replies(sc) != 0) {
517 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
518 		goto free_ccbs;
519 	}
520 
521 	if (mpii_alloc_queues(sc) != 0) {
522 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
523 		goto free_replies;
524 	}
525 
526 	if (mpii_iocinit(sc) != 0) {
527 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
528 		goto free_queues;
529 	}
530 
531 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
532 	    MPII_DOORBELL_STATE_OPER) != 0) {
533 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
534 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
535 		printf("%s: operational state timeout\n", DEVNAME(sc));
536 		goto free_queues;
537 	}
538 
539 	mpii_push_replies(sc);
540 	mpii_init_queues(sc);
541 
542 	if (mpii_board_info(sc) != 0) {
543 		printf("%s: unable to get manufacturing page 0\n",
544 		    DEVNAME(sc));
545 		goto free_queues;
546 	}
547 
548 	if (mpii_portfacts(sc) != 0) {
549 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
550 		goto free_queues;
551 	}
552 
553 	if (mpii_target_map(sc) != 0) {
554 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
555 		goto free_queues;
556 	}
557 
558 	if (mpii_cfg_coalescing(sc) != 0) {
559 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
560 		goto free_queues;
561 	}
562 
563 	/* XXX bail on unsupported porttype? */
564 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
565 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
566 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
567 		if (mpii_eventnotify(sc) != 0) {
568 			printf("%s: unable to enable events\n", DEVNAME(sc));
569 			goto free_queues;
570 		}
571 	}
572 
573 	sc->sc_devs = mallocarray(sc->sc_max_devices,
574 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
575 	if (sc->sc_devs == NULL) {
576 		printf("%s: unable to allocate memory for mpii_device\n",
577 		    DEVNAME(sc));
578 		goto free_queues;
579 	}
580 
581 	if (mpii_portenable(sc) != 0) {
582 		printf("%s: unable to enable port\n", DEVNAME(sc));
583 		goto free_devs;
584 	}
585 
586 	/* we should be good to go now, attach scsibus */
587 	sc->sc_link.adapter = &mpii_switch;
588 	sc->sc_link.adapter_softc = sc;
589 	sc->sc_link.adapter_target = -1;
590 	sc->sc_link.adapter_buswidth = sc->sc_max_devices;
591 	sc->sc_link.luns = 1;
592 	sc->sc_link.openings = sc->sc_max_cmds - 1;
593 	sc->sc_link.pool = &sc->sc_iopool;
594 
595 	memset(&saa, 0, sizeof(saa));
596 	saa.saa_sc_link = &sc->sc_link;
597 
598 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
599 	    mpii_intr, sc, sc->sc_dev.dv_xname);
600 	if (sc->sc_ih == NULL)
601 		goto free_devs;
602 
603 	/* force autoconf to wait for the first sas discovery to complete */
604 	SET(sc->sc_flags, MPII_F_CONFIG_PENDING);
605 	config_pending_incr();
606 
607 	/* config_found() returns the scsibus attached to us */
608 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
609 	    &saa, scsiprint);
610 
611 	/* enable interrupts */
612 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
613 	    | MPII_INTR_MASK_RESET);
614 
615 #if NBIO > 0
616 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
617 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
618 			panic("%s: controller registration failed",
619 			    DEVNAME(sc));
620 		else
621 			sc->sc_ioctl = mpii_ioctl;
622 
623 #ifndef SMALL_KERNEL
624 		if (mpii_create_sensors(sc) != 0)
625 			printf("%s: unable to create sensors\n", DEVNAME(sc));
626 #endif
627 	}
628 #endif
629 
630 	return;
631 
632 free_devs:
633 	free(sc->sc_devs, M_DEVBUF, 0);
634 	sc->sc_devs = NULL;
635 
636 free_queues:
637 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
638 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
639 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
640 
641 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
642 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
643 	mpii_dmamem_free(sc, sc->sc_reply_postq);
644 
645 free_replies:
646 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
647 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
648 	mpii_dmamem_free(sc, sc->sc_replies);
649 
650 free_ccbs:
651 	while ((ccb = mpii_get_ccb(sc)) != NULL)
652 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
653 	mpii_dmamem_free(sc, sc->sc_requests);
654 	free(sc->sc_ccbs, M_DEVBUF, 0);
655 
656 unmap:
657 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
658 	sc->sc_ios = 0;
659 }
660 
661 int
662 mpii_detach(struct device *self, int flags)
663 {
664 	struct mpii_softc		*sc = (struct mpii_softc *)self;
665 
666 	if (sc->sc_ih != NULL) {
667 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
668 		sc->sc_ih = NULL;
669 	}
670 	if (sc->sc_ios != 0) {
671 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
672 		sc->sc_ios = 0;
673 	}
674 
675 	return (0);
676 }
677 
678 int
679 mpii_intr(void *arg)
680 {
681 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
682 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
683 	struct mpii_softc		*sc = arg;
684 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
685 	struct mpii_ccb			*ccb;
686 	struct mpii_rcb			*rcb;
687 	int				smid;
688 	u_int				idx;
689 	int				rv = 0;
690 
691 	mtx_enter(&sc->sc_rep_mtx);
692 	bus_dmamap_sync(sc->sc_dmat,
693 	    MPII_DMA_MAP(sc->sc_reply_postq),
694 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
695 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
696 
697 	idx = sc->sc_reply_post_host_index;
698 	for (;;) {
699 		rdp = &postq[idx];
700 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
701 		    MPII_REPLY_DESCR_UNUSED)
702 			break;
703 		if (rdp->data == 0xffffffff) {
704 			/*
705 			 * ioc is still writing to the reply post queue
706 			 * race condition - bail!
707 			 */
708 			break;
709 		}
710 
711 		smid = lemtoh16(&rdp->smid);
712 		rcb = mpii_reply(sc, rdp);
713 
714 		if (smid) {
715 			ccb = &sc->sc_ccbs[smid - 1];
716 			ccb->ccb_state = MPII_CCB_READY;
717 			ccb->ccb_rcb = rcb;
718 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
719 		} else
720 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
721 
722 		if (++idx >= sc->sc_reply_post_qdepth)
723 			idx = 0;
724 
725 		rv = 1;
726 	}
727 
728 	bus_dmamap_sync(sc->sc_dmat,
729 	    MPII_DMA_MAP(sc->sc_reply_postq),
730 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
731 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
732 
733 	if (rv)
734 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
735 
736 	mtx_leave(&sc->sc_rep_mtx);
737 
738 	if (rv == 0)
739 		return (0);
740 
741 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
742 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
743 		ccb->ccb_done(ccb);
744 	}
745 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
746 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
747 		mpii_event_process(sc, rcb);
748 	}
749 
750 	return (1);
751 }
752 
753 int
754 mpii_load_xs_sas3(struct mpii_ccb *ccb)
755 {
756 	struct mpii_softc	*sc = ccb->ccb_sc;
757 	struct scsi_xfer	*xs = ccb->ccb_cookie;
758 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
759 	struct mpii_ieee_sge	*csge, *nsge, *sge;
760 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
761 	int			i, error;
762 
763 	/* Request frame structure is described in the mpii_iocfacts */
764 	nsge = (struct mpii_ieee_sge *)(io + 1);
765 	csge = nsge + sc->sc_chain_sge;
766 
767 	/* zero length transfer still requires an SGE */
768 	if (xs->datalen == 0) {
769 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
770 		return (0);
771 	}
772 
773 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
774 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
775 	if (error) {
776 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
777 		return (1);
778 	}
779 
780 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
781 		if (nsge == csge) {
782 			nsge++;
783 			/* offset to the chain sge from the beginning */
784 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
785 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
786 			    MPII_IEEE_SGE_ADDR_SYSTEM;
787 			/* address of the next sge */
788 			htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +
789 			    ((caddr_t)nsge - (caddr_t)io));
790 			htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *
791 			    sizeof(*sge));
792 		}
793 
794 		sge = nsge;
795 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
796 		htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len);
797 		htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr);
798 	}
799 
800 	/* terminate list */
801 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
802 
803 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
804 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
805 	    BUS_DMASYNC_PREWRITE);
806 
807 	return (0);
808 }
809 
810 int
811 mpii_load_xs(struct mpii_ccb *ccb)
812 {
813 	struct mpii_softc	*sc = ccb->ccb_sc;
814 	struct scsi_xfer	*xs = ccb->ccb_cookie;
815 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
816 	struct mpii_sge		*csge, *nsge, *sge;
817 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
818 	u_int32_t		flags;
819 	u_int16_t		len;
820 	int			i, error;
821 
822 	/* Request frame structure is described in the mpii_iocfacts */
823 	nsge = (struct mpii_sge *)(io + 1);
824 	csge = nsge + sc->sc_chain_sge;
825 
826 	/* zero length transfer still requires an SGE */
827 	if (xs->datalen == 0) {
828 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
829 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
830 		return (0);
831 	}
832 
833 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
834 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
835 	if (error) {
836 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
837 		return (1);
838 	}
839 
840 	/* safe default starting flags */
841 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
842 	if (xs->flags & SCSI_DATA_OUT)
843 		flags |= MPII_SGE_FL_DIR_OUT;
844 
845 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
846 		if (nsge == csge) {
847 			nsge++;
848 			/* offset to the chain sge from the beginning */
849 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
850 			/* length of the sgl segment we're pointing to */
851 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
852 			htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |
853 			    MPII_SGE_FL_SIZE_64 | len);
854 			/* address of the next sge */
855 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
856 			    ((caddr_t)nsge - (caddr_t)io));
857 		}
858 
859 		sge = nsge;
860 		htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len);
861 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
862 	}
863 
864 	/* terminate list */
865 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
866 	    MPII_SGE_FL_EOL);
867 
868 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
869 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
870 	    BUS_DMASYNC_PREWRITE);
871 
872 	return (0);
873 }
874 
875 int
876 mpii_scsi_probe(struct scsi_link *link)
877 {
878 	struct mpii_softc *sc = link->adapter_softc;
879 	struct mpii_cfg_sas_dev_pg0 pg0;
880 	struct mpii_ecfg_hdr ehdr;
881 	struct mpii_device *dev;
882 	uint32_t address;
883 	int flags;
884 
885 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
886 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) &&
887 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE))
888 		return (ENXIO);
889 
890 	dev = sc->sc_devs[link->target];
891 	if (dev == NULL)
892 		return (1);
893 
894 	flags = dev->flags;
895 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
896 		return (1);
897 
898 	if (ISSET(flags, MPII_DF_VOLUME))
899 		return (0);
900 
901 	memset(&ehdr, 0, sizeof(ehdr));
902 	ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
903 	ehdr.page_number = 0;
904 	ehdr.page_version = 0;
905 	ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
906 	ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
907 
908 	address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
909 	if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
910 	    &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
911 		printf("%s: unable to fetch SAS device page 0 for target %u\n",
912 		    DEVNAME(sc), link->target);
913 
914 		return (0); /* the handle should still work */
915 	}
916 
917 	link->port_wwn = letoh64(pg0.sas_addr);
918 	link->node_wwn = letoh64(pg0.device_name);
919 
920 	if (ISSET(lemtoh32(&pg0.device_info),
921 	    MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
922 		link->flags |= SDEV_ATAPI;
923 		link->quirks |= SDEV_ONLYBIG;
924 	}
925 
926 	return (0);
927 }
928 
929 u_int32_t
930 mpii_read(struct mpii_softc *sc, bus_size_t r)
931 {
932 	u_int32_t			rv;
933 
934 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
935 	    BUS_SPACE_BARRIER_READ);
936 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
937 
938 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
939 
940 	return (rv);
941 }
942 
943 void
944 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
945 {
946 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
947 
948 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
949 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
950 	    BUS_SPACE_BARRIER_WRITE);
951 }
952 
953 
954 int
955 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
956     u_int32_t target)
957 {
958 	int			i;
959 
960 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
961 	    mask, target);
962 
963 	for (i = 0; i < 15000; i++) {
964 		if ((mpii_read(sc, r) & mask) == target)
965 			return (0);
966 		delay(1000);
967 	}
968 
969 	return (1);
970 }
971 
972 int
973 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
974     u_int32_t target)
975 {
976 	int			i;
977 
978 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
979 	    mask, target);
980 
981 	for (i = 0; i < 15000; i++) {
982 		if ((mpii_read(sc, r) & mask) != target)
983 			return (0);
984 		delay(1000);
985 	}
986 
987 	return (1);
988 }
989 
990 int
991 mpii_init(struct mpii_softc *sc)
992 {
993 	u_int32_t		db;
994 	int			i;
995 
996 	/* spin until the ioc leaves the reset state */
997 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
998 	    MPII_DOORBELL_STATE_RESET) != 0) {
999 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1000 		    "reset state\n", DEVNAME(sc));
1001 		return (1);
1002 	}
1003 
1004 	/* check current ownership */
1005 	db = mpii_read_db(sc);
1006 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1007 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1008 		    DEVNAME(sc));
1009 		return (0);
1010 	}
1011 
1012 	for (i = 0; i < 5; i++) {
1013 		switch (db & MPII_DOORBELL_STATE) {
1014 		case MPII_DOORBELL_STATE_READY:
1015 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1016 			    DEVNAME(sc));
1017 			return (0);
1018 
1019 		case MPII_DOORBELL_STATE_OPER:
1020 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1021 			    DEVNAME(sc));
1022 			if (sc->sc_ioc_event_replay)
1023 				mpii_reset_soft(sc);
1024 			else
1025 				mpii_reset_hard(sc);
1026 			break;
1027 
1028 		case MPII_DOORBELL_STATE_FAULT:
1029 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1030 			    "reset hard\n" , DEVNAME(sc));
1031 			mpii_reset_hard(sc);
1032 			break;
1033 
1034 		case MPII_DOORBELL_STATE_RESET:
1035 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1036 			    "out of reset\n", DEVNAME(sc));
1037 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1038 			    MPII_DOORBELL_STATE_RESET) != 0)
1039 				return (1);
1040 			break;
1041 		}
1042 		db = mpii_read_db(sc);
1043 	}
1044 
1045 	return (1);
1046 }
1047 
1048 int
1049 mpii_reset_soft(struct mpii_softc *sc)
1050 {
1051 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1052 
1053 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1054 		return (1);
1055 	}
1056 
1057 	mpii_write_db(sc,
1058 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1059 
1060 	/* XXX LSI waits 15 sec */
1061 	if (mpii_wait_db_ack(sc) != 0)
1062 		return (1);
1063 
1064 	/* XXX LSI waits 15 sec */
1065 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1066 	    MPII_DOORBELL_STATE_READY) != 0)
1067 		return (1);
1068 
1069 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1070 
1071 	return (0);
1072 }
1073 
1074 int
1075 mpii_reset_hard(struct mpii_softc *sc)
1076 {
1077 	u_int16_t		i;
1078 
1079 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1080 
1081 	mpii_write_intr(sc, 0);
1082 
1083 	/* enable diagnostic register */
1084 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1085 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1086 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1087 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1088 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1089 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1090 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1091 
1092 	delay(100);
1093 
1094 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1095 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1096 		    "diagnostic read/write\n", DEVNAME(sc));
1097 		return(1);
1098 	}
1099 
1100 	/* reset ioc */
1101 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1102 
1103 	/* 240 milliseconds */
1104 	delay(240000);
1105 
1106 
1107 	/* XXX this whole function should be more robust */
1108 
1109 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1110 	for (i = 0; i < 30000; i++) {
1111 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1112 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1113 			break;
1114 		delay(10000);
1115 	}
1116 
1117 	/* disable diagnostic register */
1118 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1119 
1120 	/* XXX what else? */
1121 
1122 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1123 
1124 	return(0);
1125 }
1126 
1127 int
1128 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1129 {
1130 	u_int32_t		*query = buf;
1131 	int			i;
1132 
1133 	/* make sure the doorbell is not in use. */
1134 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1135 		return (1);
1136 
1137 	/* clear pending doorbell interrupts */
1138 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1139 		mpii_write_intr(sc, 0);
1140 
1141 	/*
1142 	 * first write the doorbell with the handshake function and the
1143 	 * dword count.
1144 	 */
1145 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1146 	    MPII_DOORBELL_DWORDS(dwords));
1147 
1148 	/*
1149 	 * the doorbell used bit will be set because a doorbell function has
1150 	 * started. wait for the interrupt and then ack it.
1151 	 */
1152 	if (mpii_wait_db_int(sc) != 0)
1153 		return (1);
1154 	mpii_write_intr(sc, 0);
1155 
1156 	/* poll for the acknowledgement. */
1157 	if (mpii_wait_db_ack(sc) != 0)
1158 		return (1);
1159 
1160 	/* write the query through the doorbell. */
1161 	for (i = 0; i < dwords; i++) {
1162 		mpii_write_db(sc, htole32(query[i]));
1163 		if (mpii_wait_db_ack(sc) != 0)
1164 			return (1);
1165 	}
1166 
1167 	return (0);
1168 }
1169 
1170 int
1171 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1172 {
1173 	u_int16_t		*words = (u_int16_t *)dword;
1174 	int			i;
1175 
1176 	for (i = 0; i < 2; i++) {
1177 		if (mpii_wait_db_int(sc) != 0)
1178 			return (1);
1179 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1180 		mpii_write_intr(sc, 0);
1181 	}
1182 
1183 	return (0);
1184 }
1185 
1186 int
1187 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1188 {
1189 	struct mpii_msg_reply	*reply = buf;
1190 	u_int32_t		*dbuf = buf, dummy;
1191 	int			i;
1192 
1193 	/* get the first dword so we can read the length out of the header. */
1194 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1195 		return (1);
1196 
1197 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1198 	    DEVNAME(sc), dwords, reply->msg_length);
1199 
1200 	/*
1201 	 * the total length, in dwords, is in the message length field of the
1202 	 * reply header.
1203 	 */
1204 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1205 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1206 			return (1);
1207 	}
1208 
1209 	/* if there's extra stuff to come off the ioc, discard it */
1210 	while (i++ < reply->msg_length) {
1211 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1212 			return (1);
1213 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1214 		    "0x%08x\n", DEVNAME(sc), dummy);
1215 	}
1216 
1217 	/* wait for the doorbell used bit to be reset and clear the intr */
1218 	if (mpii_wait_db_int(sc) != 0)
1219 		return (1);
1220 
1221 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1222 		return (1);
1223 
1224 	mpii_write_intr(sc, 0);
1225 
1226 	return (0);
1227 }
1228 
1229 void
1230 mpii_empty_done(struct mpii_ccb *ccb)
1231 {
1232 	/* nothing to do */
1233 }
1234 
1235 int
1236 mpii_iocfacts(struct mpii_softc *sc)
1237 {
1238 	struct mpii_msg_iocfacts_request	ifq;
1239 	struct mpii_msg_iocfacts_reply		ifp;
1240 	int					irs;
1241 	int					sge_size;
1242 	u_int					qdepth;
1243 
1244 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1245 
1246 	memset(&ifq, 0, sizeof(ifq));
1247 	memset(&ifp, 0, sizeof(ifp));
1248 
1249 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1250 
1251 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1252 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1253 		    DEVNAME(sc));
1254 		return (1);
1255 	}
1256 
1257 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1258 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1259 		    DEVNAME(sc));
1260 		return (1);
1261 	}
1262 
1263 	sc->sc_ioc_number = ifp.ioc_number;
1264 	sc->sc_vf_id = ifp.vf_id;
1265 
1266 	sc->sc_max_volumes = ifp.max_volumes;
1267 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1268 
1269 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1270 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1271 		SET(sc->sc_flags, MPII_F_RAID);
1272 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1273 	    MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1274 		sc->sc_ioc_event_replay = 1;
1275 
1276 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1277 	    MPII_REQUEST_CREDIT);
1278 
1279 	/* SAS3 and 3.5 controllers have different sgl layouts */
1280 	if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1281 	    || (ifp.msg_version_min == 6)))
1282 		SET(sc->sc_flags, MPII_F_SAS3);
1283 
1284 	/*
1285 	 * The host driver must ensure that there is at least one
1286 	 * unused entry in the Reply Free Queue. One way to ensure
1287 	 * that this requirement is met is to never allocate a number
1288 	 * of reply frames that is a multiple of 16.
1289 	 */
1290 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1291 	if (!(sc->sc_num_reply_frames % 16))
1292 		sc->sc_num_reply_frames--;
1293 
1294 	/* must be multiple of 16 */
1295 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1296 	    sc->sc_num_reply_frames;
1297 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1298 
1299 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1300 	if (sc->sc_reply_post_qdepth > qdepth) {
1301 		sc->sc_reply_post_qdepth = qdepth;
1302 		if (sc->sc_reply_post_qdepth < 16) {
1303 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1304 			return (1);
1305 		}
1306 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1307 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1308 	}
1309 
1310 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1311 	    16 - (sc->sc_num_reply_frames % 16);
1312 
1313 	/*
1314 	 * Our request frame for an I/O operation looks like this:
1315 	 *
1316 	 * +-------------------+ -.
1317 	 * | mpii_msg_scsi_io  |  |
1318 	 * +-------------------|  |
1319 	 * | mpii_sge          |  |
1320 	 * + - - - - - - - - - +  |
1321 	 * | ...               |  > ioc_request_frame_size
1322 	 * + - - - - - - - - - +  |
1323 	 * | mpii_sge (tail)   |  |
1324 	 * + - - - - - - - - - +  |
1325 	 * | mpii_sge (csge)   |  | --.
1326 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1327 	 * | mpii_sge          |<-----'
1328 	 * + - - - - - - - - - +
1329 	 * | ...               |
1330 	 * + - - - - - - - - - +
1331 	 * | mpii_sge (tail)   |
1332 	 * +-------------------+
1333 	 * |                   |
1334 	 * ~~~~~~~~~~~~~~~~~~~~~
1335 	 * |                   |
1336 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1337 	 * | scsi_sense_data   |
1338 	 * +-------------------+
1339 	 */
1340 
1341 	/* both sizes are in 32-bit words */
1342 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1343 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1344 	sc->sc_request_size = MPII_REQUEST_SIZE;
1345 	/* make sure we have enough space for scsi sense data */
1346 	if (irs > sc->sc_request_size) {
1347 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1348 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1349 	}
1350 
1351 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1352 		sge_size = sizeof(struct mpii_ieee_sge);
1353 	} else {
1354 		sge_size = sizeof(struct mpii_sge);
1355 	}
1356 
1357 	/* offset to the chain sge */
1358 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1359 	    sge_size - 1;
1360 
1361 	/*
1362 	 * A number of simple scatter-gather elements we can fit into the
1363 	 * request buffer after the I/O command minus the chain element.
1364 	 */
1365 	sc->sc_max_sgl = (sc->sc_request_size -
1366  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1367 	    sge_size - 1;
1368 
1369 	return (0);
1370 }
1371 
1372 int
1373 mpii_iocinit(struct mpii_softc *sc)
1374 {
1375 	struct mpii_msg_iocinit_request		iiq;
1376 	struct mpii_msg_iocinit_reply		iip;
1377 
1378 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1379 
1380 	memset(&iiq, 0, sizeof(iiq));
1381 	memset(&iip, 0, sizeof(iip));
1382 
1383 	iiq.function = MPII_FUNCTION_IOC_INIT;
1384 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1385 
1386 	/* XXX JPG do something about vf_id */
1387 	iiq.vf_id = 0;
1388 
1389 	iiq.msg_version_maj = 0x02;
1390 	iiq.msg_version_min = 0x00;
1391 
1392 	/* XXX JPG ensure compliance with some level and hard-code? */
1393 	iiq.hdr_version_unit = 0x00;
1394 	iiq.hdr_version_dev = 0x00;
1395 
1396 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1397 
1398 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1399 	    sc->sc_reply_post_qdepth);
1400 
1401 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1402 
1403 	htolem32(&iiq.sense_buffer_address_high,
1404 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1405 
1406 	htolem32(&iiq.system_reply_address_high,
1407 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1408 
1409 	htolem32(&iiq.system_request_frame_base_address_lo,
1410 	    MPII_DMA_DVA(sc->sc_requests));
1411 	htolem32(&iiq.system_request_frame_base_address_hi,
1412 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1413 
1414 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1415 	    MPII_DMA_DVA(sc->sc_reply_postq));
1416 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1417 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1418 
1419 	htolem32(&iiq.reply_free_queue_address_lo,
1420 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1421 	htolem32(&iiq.reply_free_queue_address_hi,
1422 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1423 
1424 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1425 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1426 		    DEVNAME(sc));
1427 		return (1);
1428 	}
1429 
1430 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1431 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1432 		    DEVNAME(sc));
1433 		return (1);
1434 	}
1435 
1436 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1437 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1438 	    iip.msg_length, iip.whoinit);
1439 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1440 	    iip.msg_flags);
1441 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1442 	    iip.vf_id, iip.vp_id);
1443 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1444 	    lemtoh16(&iip.ioc_status));
1445 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1446 	    lemtoh32(&iip.ioc_loginfo));
1447 
1448 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1449 	    lemtoh32(&iip.ioc_loginfo))
1450 		return (1);
1451 
1452 	return (0);
1453 }
1454 
1455 void
1456 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1457 {
1458 	u_int32_t		*rfp;
1459 	u_int			idx;
1460 
1461 	if (rcb == NULL)
1462 		return;
1463 
1464 	idx = sc->sc_reply_free_host_index;
1465 
1466 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1467 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1468 
1469 	if (++idx >= sc->sc_reply_free_qdepth)
1470 		idx = 0;
1471 
1472 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1473 }
1474 
1475 int
1476 mpii_portfacts(struct mpii_softc *sc)
1477 {
1478 	struct mpii_msg_portfacts_request	*pfq;
1479 	struct mpii_msg_portfacts_reply		*pfp;
1480 	struct mpii_ccb				*ccb;
1481 	int					rv = 1;
1482 
1483 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1484 
1485 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1486 	if (ccb == NULL) {
1487 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1488 		    DEVNAME(sc));
1489 		return (rv);
1490 	}
1491 
1492 	ccb->ccb_done = mpii_empty_done;
1493 	pfq = ccb->ccb_cmd;
1494 
1495 	memset(pfq, 0, sizeof(*pfq));
1496 
1497 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1498 	pfq->chain_offset = 0;
1499 	pfq->msg_flags = 0;
1500 	pfq->port_number = 0;
1501 	pfq->vp_id = 0;
1502 	pfq->vf_id = 0;
1503 
1504 	if (mpii_poll(sc, ccb) != 0) {
1505 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1506 		    DEVNAME(sc));
1507 		goto err;
1508 	}
1509 
1510 	if (ccb->ccb_rcb == NULL) {
1511 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1512 		    DEVNAME(sc));
1513 		goto err;
1514 	}
1515 
1516 	pfp = ccb->ccb_rcb->rcb_reply;
1517 	sc->sc_porttype = pfp->port_type;
1518 
1519 	mpii_push_reply(sc, ccb->ccb_rcb);
1520 	rv = 0;
1521 err:
1522 	scsi_io_put(&sc->sc_iopool, ccb);
1523 
1524 	return (rv);
1525 }
1526 
1527 void
1528 mpii_eventack(void *cookie, void *io)
1529 {
1530 	struct mpii_softc			*sc = cookie;
1531 	struct mpii_ccb				*ccb = io;
1532 	struct mpii_rcb				*rcb, *next;
1533 	struct mpii_msg_event_reply		*enp;
1534 	struct mpii_msg_eventack_request	*eaq;
1535 
1536 	mtx_enter(&sc->sc_evt_ack_mtx);
1537 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1538 	if (rcb != NULL) {
1539 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1540 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1541 	}
1542 	mtx_leave(&sc->sc_evt_ack_mtx);
1543 
1544 	if (rcb == NULL) {
1545 		scsi_io_put(&sc->sc_iopool, ccb);
1546 		return;
1547 	}
1548 
1549 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1550 
1551 	ccb->ccb_done = mpii_eventack_done;
1552 	eaq = ccb->ccb_cmd;
1553 
1554 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1555 
1556 	eaq->event = enp->event;
1557 	eaq->event_context = enp->event_context;
1558 
1559 	mpii_push_reply(sc, rcb);
1560 
1561 	mpii_start(sc, ccb);
1562 
1563 	if (next != NULL)
1564 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1565 }
1566 
1567 void
1568 mpii_eventack_done(struct mpii_ccb *ccb)
1569 {
1570 	struct mpii_softc			*sc = ccb->ccb_sc;
1571 
1572 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1573 
1574 	mpii_push_reply(sc, ccb->ccb_rcb);
1575 	scsi_io_put(&sc->sc_iopool, ccb);
1576 }
1577 
1578 int
1579 mpii_portenable(struct mpii_softc *sc)
1580 {
1581 	struct mpii_msg_portenable_request	*peq;
1582 	struct mpii_ccb				*ccb;
1583 
1584 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1585 
1586 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1587 	if (ccb == NULL) {
1588 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1589 		    DEVNAME(sc));
1590 		return (1);
1591 	}
1592 
1593 	ccb->ccb_done = mpii_empty_done;
1594 	peq = ccb->ccb_cmd;
1595 
1596 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1597 	peq->vf_id = sc->sc_vf_id;
1598 
1599 	if (mpii_poll(sc, ccb) != 0) {
1600 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1601 		    DEVNAME(sc));
1602 		return (1);
1603 	}
1604 
1605 	if (ccb->ccb_rcb == NULL) {
1606 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1607 		    DEVNAME(sc));
1608 		return (1);
1609 	}
1610 
1611 	mpii_push_reply(sc, ccb->ccb_rcb);
1612 	scsi_io_put(&sc->sc_iopool, ccb);
1613 
1614 	return (0);
1615 }
1616 
1617 int
1618 mpii_cfg_coalescing(struct mpii_softc *sc)
1619 {
1620 	struct mpii_cfg_hdr			hdr;
1621 	struct mpii_cfg_ioc_pg1			ipg;
1622 
1623 	hdr.page_version = 0;
1624 	hdr.page_length = sizeof(ipg) / 4;
1625 	hdr.page_number = 1;
1626 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1627 	memset(&ipg, 0, sizeof(ipg));
1628 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1629 	    sizeof(ipg)) != 0) {
1630 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1631 		    "page 1\n", DEVNAME(sc));
1632 		return (1);
1633 	}
1634 
1635 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1636 		return (0);
1637 
1638 	/* Disable coalescing */
1639 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1640 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1641 	    sizeof(ipg)) != 0) {
1642 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1643 		    DEVNAME(sc));
1644 		return (1);
1645 	}
1646 
1647 	return (0);
1648 }
1649 
1650 #define MPII_EVENT_MASKALL(enq)		do {			\
1651 		enq->event_masks[0] = 0xffffffff;		\
1652 		enq->event_masks[1] = 0xffffffff;		\
1653 		enq->event_masks[2] = 0xffffffff;		\
1654 		enq->event_masks[3] = 0xffffffff;		\
1655 	} while (0)
1656 
1657 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1658 		enq->event_masks[evt / 32] &=			\
1659 		    htole32(~(1 << (evt % 32)));		\
1660 	} while (0)
1661 
1662 int
1663 mpii_eventnotify(struct mpii_softc *sc)
1664 {
1665 	struct mpii_msg_event_request		*enq;
1666 	struct mpii_ccb				*ccb;
1667 
1668 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1669 	if (ccb == NULL) {
1670 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1671 		    DEVNAME(sc));
1672 		return (1);
1673 	}
1674 
1675 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1676 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1677 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1678 
1679 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1680 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1681 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1682 	    mpii_eventack, sc);
1683 
1684 	ccb->ccb_done = mpii_eventnotify_done;
1685 	enq = ccb->ccb_cmd;
1686 
1687 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1688 
1689 	/*
1690 	 * Enable reporting of the following events:
1691 	 *
1692 	 * MPII_EVENT_SAS_DISCOVERY
1693 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1694 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1695 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1696 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1697 	 * MPII_EVENT_IR_VOLUME
1698 	 * MPII_EVENT_IR_PHYSICAL_DISK
1699 	 * MPII_EVENT_IR_OPERATION_STATUS
1700 	 */
1701 
1702 	MPII_EVENT_MASKALL(enq);
1703 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1704 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1705 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1706 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1707 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1708 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1709 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1710 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1711 
1712 	mpii_start(sc, ccb);
1713 
1714 	return (0);
1715 }
1716 
1717 void
1718 mpii_eventnotify_done(struct mpii_ccb *ccb)
1719 {
1720 	struct mpii_softc			*sc = ccb->ccb_sc;
1721 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1722 
1723 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1724 
1725 	scsi_io_put(&sc->sc_iopool, ccb);
1726 	mpii_event_process(sc, rcb);
1727 }
1728 
1729 void
1730 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1731 {
1732 	struct mpii_evt_ir_cfg_change_list	*ccl;
1733 	struct mpii_evt_ir_cfg_element		*ce;
1734 	struct mpii_device			*dev;
1735 	u_int16_t				type;
1736 	int					i;
1737 
1738 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1739 	if (ccl->num_elements == 0)
1740 		return;
1741 
1742 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1743 		/* bail on foreign configurations */
1744 		return;
1745 	}
1746 
1747 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1748 
1749 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1750 		type = (lemtoh16(&ce->element_flags) &
1751 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1752 
1753 		switch (type) {
1754 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1755 			switch (ce->reason_code) {
1756 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1757 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1758 				if (mpii_find_dev(sc,
1759 				    lemtoh16(&ce->vol_dev_handle))) {
1760 					printf("%s: device %#x is already "
1761 					    "configured\n", DEVNAME(sc),
1762 					    lemtoh16(&ce->vol_dev_handle));
1763 					break;
1764 				}
1765 				dev = malloc(sizeof(*dev), M_DEVBUF,
1766 				    M_NOWAIT | M_ZERO);
1767 				if (!dev) {
1768 					printf("%s: failed to allocate a "
1769 					    "device structure\n", DEVNAME(sc));
1770 					break;
1771 				}
1772 				SET(dev->flags, MPII_DF_VOLUME);
1773 				dev->slot = sc->sc_vd_id_low;
1774 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1775 				if (mpii_insert_dev(sc, dev)) {
1776 					free(dev, M_DEVBUF, sizeof *dev);
1777 					break;
1778 				}
1779 				sc->sc_vd_count++;
1780 				break;
1781 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1782 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1783 				if (!(dev = mpii_find_dev(sc,
1784 				    lemtoh16(&ce->vol_dev_handle))))
1785 					break;
1786 				mpii_remove_dev(sc, dev);
1787 				sc->sc_vd_count--;
1788 				break;
1789 			}
1790 			break;
1791 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1792 			if (ce->reason_code ==
1793 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1794 			    ce->reason_code ==
1795 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1796 				/* there should be an underlying sas drive */
1797 				if (!(dev = mpii_find_dev(sc,
1798 				    lemtoh16(&ce->phys_disk_dev_handle))))
1799 					break;
1800 				/* promoted from a hot spare? */
1801 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1802 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1803 				    MPII_DF_HIDDEN);
1804 			}
1805 			break;
1806 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1807 			if (ce->reason_code ==
1808 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1809 				/* there should be an underlying sas drive */
1810 				if (!(dev = mpii_find_dev(sc,
1811 				    lemtoh16(&ce->phys_disk_dev_handle))))
1812 					break;
1813 				SET(dev->flags, MPII_DF_HOT_SPARE |
1814 				    MPII_DF_HIDDEN);
1815 			}
1816 			break;
1817 		}
1818 	}
1819 }
1820 
1821 void
1822 mpii_event_sas(void *xsc)
1823 {
1824 	struct mpii_softc *sc = xsc;
1825 	struct mpii_rcb *rcb, *next;
1826 	struct mpii_msg_event_reply *enp;
1827 	struct mpii_evt_sas_tcl		*tcl;
1828 	struct mpii_evt_phy_entry	*pe;
1829 	struct mpii_device		*dev;
1830 	int				i;
1831 	u_int16_t			handle;
1832 
1833 	mtx_enter(&sc->sc_evt_sas_mtx);
1834 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1835 	if (rcb != NULL) {
1836 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1837 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1838 	}
1839 	mtx_leave(&sc->sc_evt_sas_mtx);
1840 
1841 	if (rcb == NULL)
1842 		return;
1843 	if (next != NULL)
1844 		task_add(systq, &sc->sc_evt_sas_task);
1845 
1846 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1847 	switch (lemtoh16(&enp->event)) {
1848 	case MPII_EVENT_SAS_DISCOVERY:
1849 		mpii_event_discovery(sc, enp);
1850 		goto done;
1851 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1852 		/* handle below */
1853 		break;
1854 	default:
1855 		panic("%s: unexpected event %#x in sas event queue",
1856 		    DEVNAME(sc), lemtoh16(&enp->event));
1857 		/* NOTREACHED */
1858 	}
1859 
1860 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1861 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1862 
1863 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1864 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1865 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1866 			handle = lemtoh16(&pe->dev_handle);
1867 			if (mpii_find_dev(sc, handle)) {
1868 				printf("%s: device %#x is already "
1869 				    "configured\n", DEVNAME(sc), handle);
1870 				break;
1871 			}
1872 
1873 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1874 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1875 			dev->dev_handle = handle;
1876 			dev->phy_num = tcl->start_phy_num + i;
1877 			if (tcl->enclosure_handle)
1878 				dev->physical_port = tcl->physical_port;
1879 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1880 			dev->expander = lemtoh16(&tcl->expander_handle);
1881 
1882 			if (mpii_insert_dev(sc, dev)) {
1883 				free(dev, M_DEVBUF, sizeof *dev);
1884 				break;
1885 			}
1886 
1887 			if (sc->sc_scsibus != NULL)
1888 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1889 			break;
1890 
1891 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1892 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1893 			if (dev == NULL)
1894 				break;
1895 
1896 			mpii_remove_dev(sc, dev);
1897 			mpii_sas_remove_device(sc, dev->dev_handle);
1898 			if (sc->sc_scsibus != NULL &&
1899 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1900 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1901 				    DVACT_DEACTIVATE);
1902 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1903 				    DETACH_FORCE);
1904 			}
1905 
1906 			free(dev, M_DEVBUF, sizeof *dev);
1907 			break;
1908 		}
1909 	}
1910 
1911 done:
1912 	mpii_event_done(sc, rcb);
1913 }
1914 
1915 void
1916 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1917 {
1918 	struct mpii_evt_sas_discovery *esd =
1919 	    (struct mpii_evt_sas_discovery *)(enp + 1);
1920 
1921 	if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
1922 		if (esd->discovery_status != 0) {
1923 			printf("%s: sas discovery completed with status %#x\n",
1924 			    DEVNAME(sc), esd->discovery_status);
1925 		}
1926 
1927 		if (ISSET(sc->sc_flags, MPII_F_CONFIG_PENDING)) {
1928 			CLR(sc->sc_flags, MPII_F_CONFIG_PENDING);
1929 			config_pending_decr();
1930 		}
1931 	}
1932 }
1933 
1934 void
1935 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1936 {
1937 	struct mpii_msg_event_reply		*enp;
1938 
1939 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1940 
1941 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1942 	    lemtoh16(&enp->event));
1943 
1944 	switch (lemtoh16(&enp->event)) {
1945 	case MPII_EVENT_EVENT_CHANGE:
1946 		/* should be properly ignored */
1947 		break;
1948 	case MPII_EVENT_SAS_DISCOVERY:
1949 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1950 		mtx_enter(&sc->sc_evt_sas_mtx);
1951 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1952 		mtx_leave(&sc->sc_evt_sas_mtx);
1953 		task_add(systq, &sc->sc_evt_sas_task);
1954 		return;
1955 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
1956 		break;
1957 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1958 		break;
1959 	case MPII_EVENT_IR_VOLUME: {
1960 		struct mpii_evt_ir_volume	*evd =
1961 		    (struct mpii_evt_ir_volume *)(enp + 1);
1962 		struct mpii_device		*dev;
1963 #if NBIO > 0
1964 		const char *vol_states[] = {
1965 			BIOC_SVINVALID_S,
1966 			BIOC_SVOFFLINE_S,
1967 			BIOC_SVBUILDING_S,
1968 			BIOC_SVONLINE_S,
1969 			BIOC_SVDEGRADED_S,
1970 			BIOC_SVONLINE_S,
1971 		};
1972 #endif
1973 
1974 		if (cold)
1975 			break;
1976 		KERNEL_LOCK();
1977 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
1978 		KERNEL_UNLOCK();
1979 		if (dev == NULL)
1980 			break;
1981 #if NBIO > 0
1982 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
1983 			printf("%s: volume %d state changed from %s to %s\n",
1984 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
1985 			    vol_states[evd->prev_value],
1986 			    vol_states[evd->new_value]);
1987 #endif
1988 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
1989 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
1990 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
1991 			printf("%s: started resync on a volume %d\n",
1992 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
1993 		}
1994 		break;
1995 	case MPII_EVENT_IR_PHYSICAL_DISK:
1996 		break;
1997 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1998 		mpii_event_raid(sc, enp);
1999 		break;
2000 	case MPII_EVENT_IR_OPERATION_STATUS: {
2001 		struct mpii_evt_ir_status	*evs =
2002 		    (struct mpii_evt_ir_status *)(enp + 1);
2003 		struct mpii_device		*dev;
2004 
2005 		KERNEL_LOCK();
2006 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
2007 		KERNEL_UNLOCK();
2008 		if (dev != NULL &&
2009 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2010 			dev->percent = evs->percent;
2011 		break;
2012 		}
2013 	default:
2014 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2015 		    DEVNAME(sc), lemtoh16(&enp->event));
2016 	}
2017 
2018 	mpii_event_done(sc, rcb);
2019 }
2020 
2021 void
2022 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2023 {
2024 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2025 
2026 	if (enp->ack_required) {
2027 		mtx_enter(&sc->sc_evt_ack_mtx);
2028 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2029 		mtx_leave(&sc->sc_evt_ack_mtx);
2030 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2031 	} else
2032 		mpii_push_reply(sc, rcb);
2033 }
2034 
2035 void
2036 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2037 {
2038 	struct mpii_msg_scsi_task_request	*stq;
2039 	struct mpii_msg_sas_oper_request	*soq;
2040 	struct mpii_ccb				*ccb;
2041 
2042 	ccb = scsi_io_get(&sc->sc_iopool, 0);
2043 	if (ccb == NULL)
2044 		return;
2045 
2046 	stq = ccb->ccb_cmd;
2047 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2048 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2049 	htolem16(&stq->dev_handle, handle);
2050 
2051 	ccb->ccb_done = mpii_empty_done;
2052 	mpii_wait(sc, ccb);
2053 
2054 	if (ccb->ccb_rcb != NULL)
2055 		mpii_push_reply(sc, ccb->ccb_rcb);
2056 
2057 	/* reuse a ccb */
2058 	ccb->ccb_state = MPII_CCB_READY;
2059 	ccb->ccb_rcb = NULL;
2060 
2061 	soq = ccb->ccb_cmd;
2062 	memset(soq, 0, sizeof(*soq));
2063 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2064 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2065 	htolem16(&soq->dev_handle, handle);
2066 
2067 	ccb->ccb_done = mpii_empty_done;
2068 	mpii_wait(sc, ccb);
2069 	if (ccb->ccb_rcb != NULL)
2070 		mpii_push_reply(sc, ccb->ccb_rcb);
2071 
2072 	scsi_io_put(&sc->sc_iopool, ccb);
2073 }
2074 
2075 int
2076 mpii_board_info(struct mpii_softc *sc)
2077 {
2078 	struct mpii_msg_iocfacts_request	ifq;
2079 	struct mpii_msg_iocfacts_reply		ifp;
2080 	struct mpii_cfg_manufacturing_pg0	mpg;
2081 	struct mpii_cfg_hdr			hdr;
2082 
2083 	memset(&ifq, 0, sizeof(ifq));
2084 	memset(&ifp, 0, sizeof(ifp));
2085 
2086 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2087 
2088 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2089 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2090 		    DEVNAME(sc));
2091 		return (1);
2092 	}
2093 
2094 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2095 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2096 		    DEVNAME(sc));
2097 		return (1);
2098 	}
2099 
2100 	hdr.page_version = 0;
2101 	hdr.page_length = sizeof(mpg) / 4;
2102 	hdr.page_number = 0;
2103 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2104 	memset(&mpg, 0, sizeof(mpg));
2105 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2106 	    sizeof(mpg)) != 0) {
2107 		printf("%s: unable to fetch manufacturing page 0\n",
2108 		    DEVNAME(sc));
2109 		return (EINVAL);
2110 	}
2111 
2112 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2113 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2114 	    ifp.fw_version_unit, ifp.fw_version_dev,
2115 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2116 	    ifp.msg_version_maj, ifp.msg_version_min);
2117 
2118 	return (0);
2119 }
2120 
2121 int
2122 mpii_target_map(struct mpii_softc *sc)
2123 {
2124 	struct mpii_cfg_hdr			hdr;
2125 	struct mpii_cfg_ioc_pg8			ipg;
2126 	int					flags, pad = 0;
2127 
2128 	hdr.page_version = 0;
2129 	hdr.page_length = sizeof(ipg) / 4;
2130 	hdr.page_number = 8;
2131 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2132 	memset(&ipg, 0, sizeof(ipg));
2133 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2134 	    sizeof(ipg)) != 0) {
2135 		printf("%s: unable to fetch ioc page 8\n",
2136 		    DEVNAME(sc));
2137 		return (EINVAL);
2138 	}
2139 
2140 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2141 		pad = 1;
2142 
2143 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2144 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2145 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2146 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2147 			sc->sc_vd_id_low += pad;
2148 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2149 		} else
2150 			sc->sc_vd_id_low = sc->sc_max_devices -
2151 			    sc->sc_max_volumes;
2152 	}
2153 
2154 	sc->sc_pd_id_start += pad;
2155 
2156 	return (0);
2157 }
2158 
2159 int
2160 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2161     u_int32_t address, int flags, void *p)
2162 {
2163 	struct mpii_msg_config_request		*cq;
2164 	struct mpii_msg_config_reply		*cp;
2165 	struct mpii_ccb				*ccb;
2166 	struct mpii_cfg_hdr			*hdr = p;
2167 	struct mpii_ecfg_hdr			*ehdr = p;
2168 	int					etype = 0;
2169 	int					rv = 0;
2170 
2171 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2172 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2173 	    address, flags, MPII_PG_FMT);
2174 
2175 	ccb = scsi_io_get(&sc->sc_iopool,
2176 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2177 	if (ccb == NULL) {
2178 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2179 		    DEVNAME(sc));
2180 		return (1);
2181 	}
2182 
2183 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2184 		etype = type;
2185 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2186 	}
2187 
2188 	cq = ccb->ccb_cmd;
2189 
2190 	cq->function = MPII_FUNCTION_CONFIG;
2191 
2192 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2193 
2194 	cq->config_header.page_number = number;
2195 	cq->config_header.page_type = type;
2196 	cq->ext_page_type = etype;
2197 	htolem32(&cq->page_address, address);
2198 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2199 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2200 
2201 	ccb->ccb_done = mpii_empty_done;
2202 	if (ISSET(flags, MPII_PG_POLL)) {
2203 		if (mpii_poll(sc, ccb) != 0) {
2204 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2205 			    DEVNAME(sc));
2206 			return (1);
2207 		}
2208 	} else
2209 		mpii_wait(sc, ccb);
2210 
2211 	if (ccb->ccb_rcb == NULL) {
2212 		scsi_io_put(&sc->sc_iopool, ccb);
2213 		return (1);
2214 	}
2215 	cp = ccb->ccb_rcb->rcb_reply;
2216 
2217 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2218 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2219 	    cp->sgl_flags, cp->msg_length, cp->function);
2220 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2221 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2222 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2223 	    cp->msg_flags);
2224 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2225 	    cp->vp_id, cp->vf_id);
2226 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2227 	    lemtoh16(&cp->ioc_status));
2228 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2229 	    lemtoh32(&cp->ioc_loginfo));
2230 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2231 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2232 	    cp->config_header.page_version,
2233 	    cp->config_header.page_length,
2234 	    cp->config_header.page_number,
2235 	    cp->config_header.page_type);
2236 
2237 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2238 		rv = 1;
2239 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2240 		memset(ehdr, 0, sizeof(*ehdr));
2241 		ehdr->page_version = cp->config_header.page_version;
2242 		ehdr->page_number = cp->config_header.page_number;
2243 		ehdr->page_type = cp->config_header.page_type;
2244 		ehdr->ext_page_length = cp->ext_page_length;
2245 		ehdr->ext_page_type = cp->ext_page_type;
2246 	} else
2247 		*hdr = cp->config_header;
2248 
2249 	mpii_push_reply(sc, ccb->ccb_rcb);
2250 	scsi_io_put(&sc->sc_iopool, ccb);
2251 
2252 	return (rv);
2253 }
2254 
2255 int
2256 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2257     void *p, int read, void *page, size_t len)
2258 {
2259 	struct mpii_msg_config_request		*cq;
2260 	struct mpii_msg_config_reply		*cp;
2261 	struct mpii_ccb				*ccb;
2262 	struct mpii_cfg_hdr			*hdr = p;
2263 	struct mpii_ecfg_hdr			*ehdr = p;
2264 	caddr_t					kva;
2265 	int					page_length;
2266 	int					rv = 0;
2267 
2268 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2269 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2270 
2271 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2272 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2273 
2274 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2275 		return (1);
2276 
2277 	ccb = scsi_io_get(&sc->sc_iopool,
2278 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2279 	if (ccb == NULL) {
2280 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2281 		    DEVNAME(sc));
2282 		return (1);
2283 	}
2284 
2285 	cq = ccb->ccb_cmd;
2286 
2287 	cq->function = MPII_FUNCTION_CONFIG;
2288 
2289 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2290 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2291 
2292 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2293 		cq->config_header.page_version = ehdr->page_version;
2294 		cq->config_header.page_number = ehdr->page_number;
2295 		cq->config_header.page_type = ehdr->page_type;
2296 		cq->ext_page_len = ehdr->ext_page_length;
2297 		cq->ext_page_type = ehdr->ext_page_type;
2298 	} else
2299 		cq->config_header = *hdr;
2300 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2301 	htolem32(&cq->page_address, address);
2302 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2303 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2304 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2305 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2306 
2307 	/* bounce the page via the request space to avoid more bus_dma games */
2308 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2309 	    sizeof(struct mpii_msg_config_request));
2310 
2311 	kva = ccb->ccb_cmd;
2312 	kva += sizeof(struct mpii_msg_config_request);
2313 
2314 	if (!read)
2315 		memcpy(kva, page, len);
2316 
2317 	ccb->ccb_done = mpii_empty_done;
2318 	if (ISSET(flags, MPII_PG_POLL)) {
2319 		if (mpii_poll(sc, ccb) != 0) {
2320 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2321 			    DEVNAME(sc));
2322 			return (1);
2323 		}
2324 	} else
2325 		mpii_wait(sc, ccb);
2326 
2327 	if (ccb->ccb_rcb == NULL) {
2328 		scsi_io_put(&sc->sc_iopool, ccb);
2329 		return (1);
2330 	}
2331 	cp = ccb->ccb_rcb->rcb_reply;
2332 
2333 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2334 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2335 	    cp->function);
2336 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2337 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2338 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2339 	    cp->msg_flags);
2340 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2341 	    cp->vp_id, cp->vf_id);
2342 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2343 	    lemtoh16(&cp->ioc_status));
2344 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2345 	    lemtoh32(&cp->ioc_loginfo));
2346 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2347 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2348 	    cp->config_header.page_version,
2349 	    cp->config_header.page_length,
2350 	    cp->config_header.page_number,
2351 	    cp->config_header.page_type);
2352 
2353 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2354 		rv = 1;
2355 	else if (read)
2356 		memcpy(page, kva, len);
2357 
2358 	mpii_push_reply(sc, ccb->ccb_rcb);
2359 	scsi_io_put(&sc->sc_iopool, ccb);
2360 
2361 	return (rv);
2362 }
2363 
2364 struct mpii_rcb *
2365 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2366 {
2367 	struct mpii_rcb		*rcb = NULL;
2368 	u_int32_t		rfid;
2369 
2370 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2371 
2372 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2373 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2374 		rfid = (lemtoh32(&rdp->frame_addr) -
2375 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2376 		    sc->sc_reply_size;
2377 
2378 		bus_dmamap_sync(sc->sc_dmat,
2379 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2380 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2381 
2382 		rcb = &sc->sc_rcbs[rfid];
2383 	}
2384 
2385 	memset(rdp, 0xff, sizeof(*rdp));
2386 
2387 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2388 	    8 * sc->sc_reply_post_host_index, 8,
2389 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2390 
2391 	return (rcb);
2392 }
2393 
2394 struct mpii_dmamem *
2395 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2396 {
2397 	struct mpii_dmamem	*mdm;
2398 	int			nsegs;
2399 
2400 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2401 	if (mdm == NULL)
2402 		return (NULL);
2403 
2404 	mdm->mdm_size = size;
2405 
2406 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2407 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2408 		goto mdmfree;
2409 
2410 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2411 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2412 		goto destroy;
2413 
2414 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2415 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2416 		goto free;
2417 
2418 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2419 	    NULL, BUS_DMA_NOWAIT) != 0)
2420 		goto unmap;
2421 
2422 	return (mdm);
2423 
2424 unmap:
2425 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2426 free:
2427 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2428 destroy:
2429 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2430 mdmfree:
2431 	free(mdm, M_DEVBUF, sizeof *mdm);
2432 
2433 	return (NULL);
2434 }
2435 
2436 void
2437 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2438 {
2439 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2440 
2441 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2442 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2443 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2444 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2445 	free(mdm, M_DEVBUF, sizeof *mdm);
2446 }
2447 
2448 int
2449 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2450 {
2451 	int		slot;	/* initial hint */
2452 
2453 	if (dev == NULL || dev->slot < 0)
2454 		return (1);
2455 	slot = dev->slot;
2456 
2457 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2458 		slot++;
2459 
2460 	if (slot >= sc->sc_max_devices)
2461 		return (1);
2462 
2463 	dev->slot = slot;
2464 	sc->sc_devs[slot] = dev;
2465 
2466 	return (0);
2467 }
2468 
2469 int
2470 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2471 {
2472 	int			i;
2473 
2474 	if (dev == NULL)
2475 		return (1);
2476 
2477 	for (i = 0; i < sc->sc_max_devices; i++) {
2478 		if (sc->sc_devs[i] == NULL)
2479 			continue;
2480 
2481 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2482 			sc->sc_devs[i] = NULL;
2483 			return (0);
2484 		}
2485 	}
2486 
2487 	return (1);
2488 }
2489 
2490 struct mpii_device *
2491 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2492 {
2493 	int			i;
2494 
2495 	for (i = 0; i < sc->sc_max_devices; i++) {
2496 		if (sc->sc_devs[i] == NULL)
2497 			continue;
2498 
2499 		if (sc->sc_devs[i]->dev_handle == handle)
2500 			return (sc->sc_devs[i]);
2501 	}
2502 
2503 	return (NULL);
2504 }
2505 
2506 int
2507 mpii_alloc_ccbs(struct mpii_softc *sc)
2508 {
2509 	struct mpii_ccb		*ccb;
2510 	u_int8_t		*cmd;
2511 	int			i;
2512 
2513 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2514 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2515 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2516 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2517 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2518 	    mpii_scsi_cmd_tmo_handler, sc);
2519 
2520 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2521 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2522 	if (sc->sc_ccbs == NULL) {
2523 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2524 		return (1);
2525 	}
2526 
2527 	sc->sc_requests = mpii_dmamem_alloc(sc,
2528 	    sc->sc_request_size * sc->sc_max_cmds);
2529 	if (sc->sc_requests == NULL) {
2530 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2531 		goto free_ccbs;
2532 	}
2533 	cmd = MPII_DMA_KVA(sc->sc_requests);
2534 
2535 	/*
2536 	 * we have sc->sc_max_cmds system request message
2537 	 * frames, but smid zero cannot be used. so we then
2538 	 * have (sc->sc_max_cmds - 1) number of ccbs
2539 	 */
2540 	for (i = 1; i < sc->sc_max_cmds; i++) {
2541 		ccb = &sc->sc_ccbs[i - 1];
2542 
2543 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2544 		    MAXPHYS, 0,
2545 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2546 		    &ccb->ccb_dmamap) != 0) {
2547 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2548 			goto free_maps;
2549 		}
2550 
2551 		ccb->ccb_sc = sc;
2552 		htolem16(&ccb->ccb_smid, i);
2553 		ccb->ccb_offset = sc->sc_request_size * i;
2554 
2555 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2556 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2557 		    ccb->ccb_offset;
2558 
2559 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2560 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2561 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2562 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2563 		    ccb->ccb_cmd_dva);
2564 
2565 		mpii_put_ccb(sc, ccb);
2566 	}
2567 
2568 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2569 
2570 	return (0);
2571 
2572 free_maps:
2573 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2574 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2575 
2576 	mpii_dmamem_free(sc, sc->sc_requests);
2577 free_ccbs:
2578 	free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2579 
2580 	return (1);
2581 }
2582 
2583 void
2584 mpii_put_ccb(void *cookie, void *io)
2585 {
2586 	struct mpii_softc	*sc = cookie;
2587 	struct mpii_ccb		*ccb = io;
2588 
2589 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2590 
2591 	ccb->ccb_state = MPII_CCB_FREE;
2592 	ccb->ccb_cookie = NULL;
2593 	ccb->ccb_done = NULL;
2594 	ccb->ccb_rcb = NULL;
2595 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2596 
2597 	KERNEL_UNLOCK();
2598 	mtx_enter(&sc->sc_ccb_free_mtx);
2599 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2600 	mtx_leave(&sc->sc_ccb_free_mtx);
2601 	KERNEL_LOCK();
2602 }
2603 
2604 void *
2605 mpii_get_ccb(void *cookie)
2606 {
2607 	struct mpii_softc	*sc = cookie;
2608 	struct mpii_ccb		*ccb;
2609 
2610 	KERNEL_UNLOCK();
2611 
2612 	mtx_enter(&sc->sc_ccb_free_mtx);
2613 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2614 	if (ccb != NULL) {
2615 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2616 		ccb->ccb_state = MPII_CCB_READY;
2617 	}
2618 	mtx_leave(&sc->sc_ccb_free_mtx);
2619 
2620 	KERNEL_LOCK();
2621 
2622 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2623 
2624 	return (ccb);
2625 }
2626 
2627 int
2628 mpii_alloc_replies(struct mpii_softc *sc)
2629 {
2630 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2631 
2632 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2633 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2634 	if (sc->sc_rcbs == NULL)
2635 		return (1);
2636 
2637 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2638 	    sc->sc_num_reply_frames);
2639 	if (sc->sc_replies == NULL) {
2640 		free(sc->sc_rcbs, M_DEVBUF,
2641 		    sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2642 		return (1);
2643 	}
2644 
2645 	return (0);
2646 }
2647 
2648 void
2649 mpii_push_replies(struct mpii_softc *sc)
2650 {
2651 	struct mpii_rcb		*rcb;
2652 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2653 	int			i;
2654 
2655 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2656 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2657 	    BUS_DMASYNC_PREREAD);
2658 
2659 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2660 		rcb = &sc->sc_rcbs[i];
2661 
2662 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2663 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2664 		    sc->sc_reply_size * i;
2665 		mpii_push_reply(sc, rcb);
2666 	}
2667 }
2668 
2669 void
2670 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2671 {
2672 	struct mpii_request_header	*rhp;
2673 	struct mpii_request_descr	descr;
2674 	u_long				 *rdp = (u_long *)&descr;
2675 
2676 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2677 	    ccb->ccb_cmd_dva);
2678 
2679 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2680 	    ccb->ccb_offset, sc->sc_request_size,
2681 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2682 
2683 	ccb->ccb_state = MPII_CCB_QUEUED;
2684 
2685 	rhp = ccb->ccb_cmd;
2686 
2687 	memset(&descr, 0, sizeof(descr));
2688 
2689 	switch (rhp->function) {
2690 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2691 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2692 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2693 		break;
2694 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2695 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2696 		break;
2697 	default:
2698 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2699 	}
2700 
2701 	descr.vf_id = sc->sc_vf_id;
2702 	descr.smid = ccb->ccb_smid;
2703 
2704 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2705 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2706 
2707 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2708 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2709 
2710 #if defined(__LP64__)
2711 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2712 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2713 #else
2714 	mtx_enter(&sc->sc_req_mtx);
2715 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2716 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2717 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2718 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2719 
2720 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2721 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2722 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2723 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2724 	mtx_leave(&sc->sc_req_mtx);
2725 #endif
2726 }
2727 
2728 int
2729 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2730 {
2731 	void				(*done)(struct mpii_ccb *);
2732 	void				*cookie;
2733 	int				rv = 1;
2734 
2735 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2736 
2737 	done = ccb->ccb_done;
2738 	cookie = ccb->ccb_cookie;
2739 
2740 	ccb->ccb_done = mpii_poll_done;
2741 	ccb->ccb_cookie = &rv;
2742 
2743 	mpii_start(sc, ccb);
2744 
2745 	while (rv == 1) {
2746 		/* avoid excessive polling */
2747 		if (mpii_reply_waiting(sc))
2748 			mpii_intr(sc);
2749 		else
2750 			delay(10);
2751 	}
2752 
2753 	ccb->ccb_cookie = cookie;
2754 	done(ccb);
2755 
2756 	return (0);
2757 }
2758 
2759 void
2760 mpii_poll_done(struct mpii_ccb *ccb)
2761 {
2762 	int				*rv = ccb->ccb_cookie;
2763 
2764 	*rv = 0;
2765 }
2766 
2767 int
2768 mpii_alloc_queues(struct mpii_softc *sc)
2769 {
2770 	u_int32_t		*rfp;
2771 	int			i;
2772 
2773 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2774 
2775 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2776 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2777 	if (sc->sc_reply_freeq == NULL)
2778 		return (1);
2779 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2780 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2781 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2782 		    sc->sc_reply_size * i;
2783 	}
2784 
2785 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2786 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2787 	if (sc->sc_reply_postq == NULL)
2788 		goto free_reply_freeq;
2789 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2790 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2791 	    sizeof(struct mpii_reply_descr));
2792 
2793 	return (0);
2794 
2795 free_reply_freeq:
2796 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2797 	return (1);
2798 }
2799 
2800 void
2801 mpii_init_queues(struct mpii_softc *sc)
2802 {
2803 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2804 
2805 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2806 	sc->sc_reply_post_host_index = 0;
2807 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2808 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2809 }
2810 
2811 void
2812 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2813 {
2814 	struct mutex		mtx = MUTEX_INITIALIZER(IPL_BIO);
2815 	void			(*done)(struct mpii_ccb *);
2816 	void			*cookie;
2817 
2818 	done = ccb->ccb_done;
2819 	cookie = ccb->ccb_cookie;
2820 
2821 	ccb->ccb_done = mpii_wait_done;
2822 	ccb->ccb_cookie = &mtx;
2823 
2824 	/* XXX this will wait forever for the ccb to complete */
2825 
2826 	mpii_start(sc, ccb);
2827 
2828 	mtx_enter(&mtx);
2829 	while (ccb->ccb_cookie != NULL)
2830 		msleep(ccb, &mtx, PRIBIO, "mpiiwait", 0);
2831 	mtx_leave(&mtx);
2832 
2833 	ccb->ccb_cookie = cookie;
2834 	done(ccb);
2835 }
2836 
2837 void
2838 mpii_wait_done(struct mpii_ccb *ccb)
2839 {
2840 	struct mutex		*mtx = ccb->ccb_cookie;
2841 
2842 	mtx_enter(mtx);
2843 	ccb->ccb_cookie = NULL;
2844 	mtx_leave(mtx);
2845 
2846 	wakeup_one(ccb);
2847 }
2848 
2849 void
2850 mpii_scsi_cmd(struct scsi_xfer *xs)
2851 {
2852 	struct scsi_link	*link = xs->sc_link;
2853 	struct mpii_softc	*sc = link->adapter_softc;
2854 	struct mpii_ccb		*ccb = xs->io;
2855 	struct mpii_msg_scsi_io	*io;
2856 	struct mpii_device	*dev;
2857 	int			 ret;
2858 
2859 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2860 
2861 	if (xs->cmdlen > MPII_CDB_LEN) {
2862 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2863 		    DEVNAME(sc), xs->cmdlen);
2864 		memset(&xs->sense, 0, sizeof(xs->sense));
2865 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2866 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2867 		xs->sense.add_sense_code = 0x20;
2868 		xs->error = XS_SENSE;
2869 		scsi_done(xs);
2870 		return;
2871 	}
2872 
2873 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2874 		/* device no longer exists */
2875 		xs->error = XS_SELTIMEOUT;
2876 		scsi_done(xs);
2877 		return;
2878 	}
2879 
2880 	KERNEL_UNLOCK();
2881 
2882 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2883 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2884 
2885 	ccb->ccb_cookie = xs;
2886 	ccb->ccb_done = mpii_scsi_cmd_done;
2887 	ccb->ccb_dev_handle = dev->dev_handle;
2888 
2889 	io = ccb->ccb_cmd;
2890 	memset(io, 0, sizeof(*io));
2891 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2892 	io->sense_buffer_length = sizeof(xs->sense);
2893 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2894 	htolem16(&io->io_flags, xs->cmdlen);
2895 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2896 	htobem16(&io->lun[0], link->lun);
2897 
2898 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2899 	case SCSI_DATA_IN:
2900 		io->direction = MPII_SCSIIO_DIR_READ;
2901 		break;
2902 	case SCSI_DATA_OUT:
2903 		io->direction = MPII_SCSIIO_DIR_WRITE;
2904 		break;
2905 	default:
2906 		io->direction = MPII_SCSIIO_DIR_NONE;
2907 		break;
2908 	}
2909 
2910 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2911 
2912 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2913 
2914 	htolem32(&io->data_length, xs->datalen);
2915 
2916 	/* sense data is at the end of a request */
2917 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2918 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2919 
2920 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2921 		ret = mpii_load_xs_sas3(ccb);
2922 	else
2923 		ret = mpii_load_xs(ccb);
2924 
2925 	if (ret != 0) {
2926 		xs->error = XS_DRIVER_STUFFUP;
2927 		goto done;
2928 	}
2929 
2930 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2931 	if (xs->flags & SCSI_POLL) {
2932 		if (mpii_poll(sc, ccb) != 0) {
2933 			xs->error = XS_DRIVER_STUFFUP;
2934 			goto done;
2935 		}
2936 	} else {
2937 		timeout_add_msec(&xs->stimeout, xs->timeout);
2938 		mpii_start(sc, ccb);
2939 	}
2940 
2941 	KERNEL_LOCK();
2942 	return;
2943 
2944 done:
2945 	KERNEL_LOCK();
2946 	scsi_done(xs);
2947 }
2948 
2949 void
2950 mpii_scsi_cmd_tmo(void *xccb)
2951 {
2952 	struct mpii_ccb		*ccb = xccb;
2953 	struct mpii_softc	*sc = ccb->ccb_sc;
2954 
2955 	printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc),
2956 	    mpii_read_db(sc));
2957 
2958 	mtx_enter(&sc->sc_ccb_mtx);
2959 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
2960 		ccb->ccb_state = MPII_CCB_TIMEOUT;
2961 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
2962 	}
2963 	mtx_leave(&sc->sc_ccb_mtx);
2964 
2965 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
2966 }
2967 
2968 void
2969 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
2970 {
2971 	struct mpii_softc			*sc = cookie;
2972 	struct mpii_ccb				*tccb = io;
2973 	struct mpii_ccb				*ccb;
2974 	struct mpii_msg_scsi_task_request	*stq;
2975 
2976 	mtx_enter(&sc->sc_ccb_mtx);
2977 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
2978 	if (ccb != NULL) {
2979 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2980 		ccb->ccb_state = MPII_CCB_QUEUED;
2981 	}
2982 	/* should remove any other ccbs for the same dev handle */
2983 	mtx_leave(&sc->sc_ccb_mtx);
2984 
2985 	if (ccb == NULL) {
2986 		scsi_io_put(&sc->sc_iopool, tccb);
2987 		return;
2988 	}
2989 
2990 	stq = tccb->ccb_cmd;
2991 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2992 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2993 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
2994 
2995 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
2996 	mpii_start(sc, tccb);
2997 }
2998 
2999 void
3000 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3001 {
3002 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3003 }
3004 
3005 void
3006 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3007 {
3008 	struct mpii_ccb		*tccb;
3009 	struct mpii_msg_scsi_io_error	*sie;
3010 	struct mpii_softc	*sc = ccb->ccb_sc;
3011 	struct scsi_xfer	*xs = ccb->ccb_cookie;
3012 	struct scsi_sense_data	*sense;
3013 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3014 
3015 	timeout_del(&xs->stimeout);
3016 	mtx_enter(&sc->sc_ccb_mtx);
3017 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3018 		/* ENOSIMPLEQ_REMOVE :( */
3019 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3020 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3021 		else {
3022 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3023 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3024 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3025 					    tccb, ccb_link);
3026 					break;
3027 				}
3028 			}
3029 		}
3030 	}
3031 
3032 	ccb->ccb_state = MPII_CCB_READY;
3033 	mtx_leave(&sc->sc_ccb_mtx);
3034 
3035 	if (xs->datalen != 0) {
3036 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3037 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3038 		    BUS_DMASYNC_POSTWRITE);
3039 
3040 		bus_dmamap_unload(sc->sc_dmat, dmap);
3041 	}
3042 
3043 	xs->error = XS_NOERROR;
3044 	xs->resid = 0;
3045 
3046 	if (ccb->ccb_rcb == NULL) {
3047 		/* no scsi error, we're ok so drop out early */
3048 		xs->status = SCSI_OK;
3049 		goto done;
3050 	}
3051 
3052 	sie = ccb->ccb_rcb->rcb_reply;
3053 
3054 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3055 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3056 	    xs->flags);
3057 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3058 	    "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3059 	    sie->msg_length, sie->function);
3060 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3061 	    sie->vp_id, sie->vf_id);
3062 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3063 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3064 	    sie->scsi_state, lemtoh16(&sie->ioc_status));
3065 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3066 	    lemtoh32(&sie->ioc_loginfo));
3067 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3068 	    lemtoh32(&sie->transfer_count));
3069 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3070 	    lemtoh32(&sie->sense_count));
3071 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3072 	    lemtoh32(&sie->response_info));
3073 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3074 	    lemtoh16(&sie->task_tag));
3075 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3076 	    DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3077 
3078 	if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3079 		xs->status = SCSI_TERMINATED;
3080 	else
3081 		xs->status = sie->scsi_status;
3082 	xs->resid = 0;
3083 
3084 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3085 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3086 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3087 		/* FALLTHROUGH */
3088 
3089 	case MPII_IOCSTATUS_SUCCESS:
3090 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3091 		switch (xs->status) {
3092 		case SCSI_OK:
3093 			xs->error = XS_NOERROR;
3094 			break;
3095 
3096 		case SCSI_CHECK:
3097 			xs->error = XS_SENSE;
3098 			break;
3099 
3100 		case SCSI_BUSY:
3101 		case SCSI_QUEUE_FULL:
3102 			xs->error = XS_BUSY;
3103 			break;
3104 
3105 		default:
3106 			xs->error = XS_DRIVER_STUFFUP;
3107 		}
3108 		break;
3109 
3110 	case MPII_IOCSTATUS_BUSY:
3111 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3112 		xs->error = XS_BUSY;
3113 		break;
3114 
3115 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3116 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3117 		xs->error = XS_RESET;
3118 		break;
3119 
3120 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3121 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3122 		xs->error = XS_SELTIMEOUT;
3123 		break;
3124 
3125 	default:
3126 		xs->error = XS_DRIVER_STUFFUP;
3127 		break;
3128 	}
3129 
3130 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3131 	    sc->sc_request_size - sizeof(*sense));
3132 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3133 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3134 
3135 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3136 	    xs->error, xs->status);
3137 
3138 	mpii_push_reply(sc, ccb->ccb_rcb);
3139 done:
3140 	KERNEL_LOCK();
3141 	scsi_done(xs);
3142 	KERNEL_UNLOCK();
3143 }
3144 
3145 int
3146 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3147 {
3148 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
3149 	struct mpii_device	*dev = sc->sc_devs[link->target];
3150 
3151 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3152 
3153 	switch (cmd) {
3154 	case DIOCGCACHE:
3155 	case DIOCSCACHE:
3156 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3157 			return (mpii_ioctl_cache(link, cmd,
3158 			    (struct dk_cache *)addr));
3159 		}
3160 		break;
3161 
3162 	default:
3163 		if (sc->sc_ioctl)
3164 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3165 
3166 		break;
3167 	}
3168 
3169 	return (ENOTTY);
3170 }
3171 
3172 int
3173 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3174 {
3175 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3176 	struct mpii_device *dev = sc->sc_devs[link->target];
3177 	struct mpii_cfg_raid_vol_pg0 *vpg;
3178 	struct mpii_msg_raid_action_request *req;
3179 	struct mpii_msg_raid_action_reply *rep;
3180 	struct mpii_cfg_hdr hdr;
3181 	struct mpii_ccb	*ccb;
3182 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3183 	size_t pagelen;
3184 	int rv = 0;
3185 	int enabled;
3186 
3187 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3188 	    addr, MPII_PG_POLL, &hdr) != 0)
3189 		return (EINVAL);
3190 
3191 	pagelen = hdr.page_length * 4;
3192 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3193 	if (vpg == NULL)
3194 		return (ENOMEM);
3195 
3196 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3197 	    vpg, pagelen) != 0) {
3198 		rv = EINVAL;
3199 		goto done;
3200 	}
3201 
3202 	enabled = ((lemtoh16(&vpg->volume_settings) &
3203 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3204 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3205 
3206 	if (cmd == DIOCGCACHE) {
3207 		dc->wrcache = enabled;
3208 		dc->rdcache = 0;
3209 		goto done;
3210 	} /* else DIOCSCACHE */
3211 
3212 	if (dc->rdcache) {
3213 		rv = EOPNOTSUPP;
3214 		goto done;
3215 	}
3216 
3217 	if (((dc->wrcache) ? 1 : 0) == enabled)
3218 		goto done;
3219 
3220 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3221 	if (ccb == NULL) {
3222 		rv = ENOMEM;
3223 		goto done;
3224 	}
3225 
3226 	ccb->ccb_done = mpii_empty_done;
3227 
3228 	req = ccb->ccb_cmd;
3229 	memset(req, 0, sizeof(*req));
3230 	req->function = MPII_FUNCTION_RAID_ACTION;
3231 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3232 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3233 	htolem32(&req->action_data, dc->wrcache ?
3234 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3235 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3236 
3237 	if (mpii_poll(sc, ccb) != 0) {
3238 		rv = EIO;
3239 		goto done;
3240 	}
3241 
3242 	if (ccb->ccb_rcb != NULL) {
3243 		rep = ccb->ccb_rcb->rcb_reply;
3244 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3245 		    ((rep->action_data[0] &
3246 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3247 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3248 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3249 			rv = EINVAL;
3250 		mpii_push_reply(sc, ccb->ccb_rcb);
3251 	}
3252 
3253 	scsi_io_put(&sc->sc_iopool, ccb);
3254 
3255 done:
3256 	free(vpg, M_TEMP, pagelen);
3257 	return (rv);
3258 }
3259 
3260 #if NBIO > 0
3261 int
3262 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3263 {
3264 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3265 	int			error = 0;
3266 
3267 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3268 
3269 	switch (cmd) {
3270 	case BIOCINQ:
3271 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3272 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3273 		break;
3274 	case BIOCVOL:
3275 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3276 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3277 		break;
3278 	case BIOCDISK:
3279 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3280 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3281 		break;
3282 	default:
3283 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3284 		error = ENOTTY;
3285 	}
3286 
3287 	return (error);
3288 }
3289 
3290 int
3291 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3292 {
3293 	int			i;
3294 
3295 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3296 
3297 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3298 	for (i = 0; i < sc->sc_max_devices; i++)
3299 		if (sc->sc_devs[i] &&
3300 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3301 			bi->bi_novol++;
3302 	return (0);
3303 }
3304 
3305 int
3306 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3307 {
3308 	struct mpii_cfg_raid_vol_pg0	*vpg;
3309 	struct mpii_cfg_hdr		hdr;
3310 	struct mpii_device		*dev;
3311 	struct scsi_link		*lnk;
3312 	struct device			*scdev;
3313 	size_t				pagelen;
3314 	u_int16_t			volh;
3315 	int				rv, hcnt = 0;
3316 
3317 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3318 	    DEVNAME(sc), bv->bv_volid);
3319 
3320 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3321 		return (ENODEV);
3322 	volh = dev->dev_handle;
3323 
3324 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3325 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3326 		printf("%s: unable to fetch header for raid volume page 0\n",
3327 		    DEVNAME(sc));
3328 		return (EINVAL);
3329 	}
3330 
3331 	pagelen = hdr.page_length * 4;
3332 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3333 	if (vpg == NULL) {
3334 		printf("%s: unable to allocate space for raid "
3335 		    "volume page 0\n", DEVNAME(sc));
3336 		return (ENOMEM);
3337 	}
3338 
3339 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3340 	    &hdr, 1, vpg, pagelen) != 0) {
3341 		printf("%s: unable to fetch raid volume page 0\n",
3342 		    DEVNAME(sc));
3343 		free(vpg, M_TEMP, pagelen);
3344 		return (EINVAL);
3345 	}
3346 
3347 	switch (vpg->volume_state) {
3348 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3349 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3350 		bv->bv_status = BIOC_SVONLINE;
3351 		break;
3352 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3353 		if (ISSET(lemtoh32(&vpg->volume_status),
3354 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3355 			bv->bv_status = BIOC_SVREBUILD;
3356 			bv->bv_percent = dev->percent;
3357 		} else
3358 			bv->bv_status = BIOC_SVDEGRADED;
3359 		break;
3360 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3361 		bv->bv_status = BIOC_SVOFFLINE;
3362 		break;
3363 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3364 		bv->bv_status = BIOC_SVBUILDING;
3365 		break;
3366 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3367 	default:
3368 		bv->bv_status = BIOC_SVINVALID;
3369 		break;
3370 	}
3371 
3372 	switch (vpg->volume_type) {
3373 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3374 		bv->bv_level = 0;
3375 		break;
3376 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3377 		bv->bv_level = 1;
3378 		break;
3379 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3380 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3381 		bv->bv_level = 10;
3382 		break;
3383 	default:
3384 		bv->bv_level = -1;
3385 	}
3386 
3387 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3388 		free(vpg, M_TEMP, pagelen);
3389 		return (rv);
3390 	}
3391 
3392 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3393 
3394 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3395 
3396 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3397 	if (lnk != NULL) {
3398 		scdev = lnk->device_softc;
3399 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3400 	}
3401 
3402 	free(vpg, M_TEMP, pagelen);
3403 	return (0);
3404 }
3405 
3406 int
3407 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3408 {
3409 	struct mpii_cfg_raid_vol_pg0		*vpg;
3410 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3411 	struct mpii_cfg_hdr			hdr;
3412 	struct mpii_device			*dev;
3413 	size_t					pagelen;
3414 	u_int16_t				volh;
3415 	u_int8_t				dn;
3416 
3417 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3418 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3419 
3420 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3421 		return (ENODEV);
3422 	volh = dev->dev_handle;
3423 
3424 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3425 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3426 		printf("%s: unable to fetch header for raid volume page 0\n",
3427 		    DEVNAME(sc));
3428 		return (EINVAL);
3429 	}
3430 
3431 	pagelen = hdr.page_length * 4;
3432 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3433 	if (vpg == NULL) {
3434 		printf("%s: unable to allocate space for raid "
3435 		    "volume page 0\n", DEVNAME(sc));
3436 		return (ENOMEM);
3437 	}
3438 
3439 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3440 	    &hdr, 1, vpg, pagelen) != 0) {
3441 		printf("%s: unable to fetch raid volume page 0\n",
3442 		    DEVNAME(sc));
3443 		free(vpg, M_TEMP, pagelen);
3444 		return (EINVAL);
3445 	}
3446 
3447 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3448 		int		nvdsk = vpg->num_phys_disks;
3449 		int		hsmap = vpg->hot_spare_pool;
3450 
3451 		free(vpg, M_TEMP, pagelen);
3452 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3453 	}
3454 
3455 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3456 	    bd->bd_diskid;
3457 	dn = pd->phys_disk_num;
3458 
3459 	free(vpg, M_TEMP, pagelen);
3460 	return (mpii_bio_disk(sc, bd, dn));
3461 }
3462 
3463 int
3464 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3465      int hsmap, int *hscnt)
3466 {
3467 	struct mpii_cfg_raid_config_pg0	*cpg;
3468 	struct mpii_raid_config_element	*el;
3469 	struct mpii_ecfg_hdr		ehdr;
3470 	size_t				pagelen;
3471 	int				i, nhs = 0;
3472 
3473 	if (bd)
3474 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3475 		    bd->bd_diskid - nvdsk);
3476 	else
3477 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3478 
3479 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3480 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3481 	    &ehdr) != 0) {
3482 		printf("%s: unable to fetch header for raid config page 0\n",
3483 		    DEVNAME(sc));
3484 		return (EINVAL);
3485 	}
3486 
3487 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3488 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3489 	if (cpg == NULL) {
3490 		printf("%s: unable to allocate space for raid config page 0\n",
3491 		    DEVNAME(sc));
3492 		return (ENOMEM);
3493 	}
3494 
3495 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3496 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3497 		printf("%s: unable to fetch raid config page 0\n",
3498 		    DEVNAME(sc));
3499 		free(cpg, M_TEMP, pagelen);
3500 		return (EINVAL);
3501 	}
3502 
3503 	el = (struct mpii_raid_config_element *)(cpg + 1);
3504 	for (i = 0; i < cpg->num_elements; i++, el++) {
3505 		if (ISSET(lemtoh16(&el->element_flags),
3506 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3507 		    el->hot_spare_pool == hsmap) {
3508 			/*
3509 			 * diskid comparison is based on the idea that all
3510 			 * disks are counted by the bio(4) in sequence, thus
3511 			 * substracting the number of disks in the volume
3512 			 * from the diskid yields us a "relative" hotspare
3513 			 * number, which is good enough for us.
3514 			 */
3515 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3516 				u_int8_t dn = el->phys_disk_num;
3517 
3518 				free(cpg, M_TEMP, pagelen);
3519 				return (mpii_bio_disk(sc, bd, dn));
3520 			}
3521 			nhs++;
3522 		}
3523 	}
3524 
3525 	if (hscnt)
3526 		*hscnt = nhs;
3527 
3528 	free(cpg, M_TEMP, pagelen);
3529 	return (0);
3530 }
3531 
3532 int
3533 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3534 {
3535 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3536 	struct mpii_cfg_hdr			hdr;
3537 	struct mpii_device			*dev;
3538 	int					len;
3539 
3540 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3541 	    bd->bd_diskid);
3542 
3543 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3544 	if (ppg == NULL) {
3545 		printf("%s: unable to allocate space for raid physical disk "
3546 		    "page 0\n", DEVNAME(sc));
3547 		return (ENOMEM);
3548 	}
3549 
3550 	hdr.page_version = 0;
3551 	hdr.page_length = sizeof(*ppg) / 4;
3552 	hdr.page_number = 0;
3553 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3554 
3555 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3556 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3557 		printf("%s: unable to fetch raid drive page 0\n",
3558 		    DEVNAME(sc));
3559 		free(ppg, M_TEMP, sizeof(*ppg));
3560 		return (EINVAL);
3561 	}
3562 
3563 	bd->bd_target = ppg->phys_disk_num;
3564 
3565 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3566 		bd->bd_status = BIOC_SDINVALID;
3567 		free(ppg, M_TEMP, sizeof(*ppg));
3568 		return (0);
3569 	}
3570 
3571 	switch (ppg->phys_disk_state) {
3572 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3573 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3574 		bd->bd_status = BIOC_SDONLINE;
3575 		break;
3576 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3577 		if (ppg->offline_reason ==
3578 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3579 		    ppg->offline_reason ==
3580 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3581 			bd->bd_status = BIOC_SDFAILED;
3582 		else
3583 			bd->bd_status = BIOC_SDOFFLINE;
3584 		break;
3585 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3586 		bd->bd_status = BIOC_SDFAILED;
3587 		break;
3588 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3589 		bd->bd_status = BIOC_SDREBUILD;
3590 		break;
3591 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3592 		bd->bd_status = BIOC_SDHOTSPARE;
3593 		break;
3594 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3595 		bd->bd_status = BIOC_SDUNUSED;
3596 		break;
3597 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3598 	default:
3599 		bd->bd_status = BIOC_SDINVALID;
3600 		break;
3601 	}
3602 
3603 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3604 
3605 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3606 	len = strlen(bd->bd_vendor);
3607 	bd->bd_vendor[len] = ' ';
3608 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3609 	    sizeof(ppg->product_id));
3610 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3611 
3612 	free(ppg, M_TEMP, sizeof(*ppg));
3613 	return (0);
3614 }
3615 
3616 struct mpii_device *
3617 mpii_find_vol(struct mpii_softc *sc, int volid)
3618 {
3619 	struct mpii_device	*dev = NULL;
3620 
3621 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3622 		return (NULL);
3623 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3624 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3625 		return (dev);
3626 	return (NULL);
3627 }
3628 
3629 #ifndef SMALL_KERNEL
3630 /*
3631  * Non-sleeping lightweight version of the mpii_ioctl_vol
3632  */
3633 int
3634 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3635 {
3636 	struct mpii_cfg_raid_vol_pg0	*vpg;
3637 	struct mpii_cfg_hdr		hdr;
3638 	struct mpii_device		*dev = NULL;
3639 	size_t				pagelen;
3640 	u_int16_t			volh;
3641 
3642 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3643 		return (ENODEV);
3644 	volh = dev->dev_handle;
3645 
3646 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3647 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3648 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3649 		    "volume page 0\n", DEVNAME(sc));
3650 		return (EINVAL);
3651 	}
3652 
3653 	pagelen = hdr.page_length * 4;
3654 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3655 	if (vpg == NULL) {
3656 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3657 		    "volume page 0\n", DEVNAME(sc));
3658 		return (ENOMEM);
3659 	}
3660 
3661 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3662 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3663 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3664 		    "page 0\n", DEVNAME(sc));
3665 		free(vpg, M_TEMP, pagelen);
3666 		return (EINVAL);
3667 	}
3668 
3669 	switch (vpg->volume_state) {
3670 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3671 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3672 		bv->bv_status = BIOC_SVONLINE;
3673 		break;
3674 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3675 		if (ISSET(lemtoh32(&vpg->volume_status),
3676 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3677 			bv->bv_status = BIOC_SVREBUILD;
3678 		else
3679 			bv->bv_status = BIOC_SVDEGRADED;
3680 		break;
3681 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3682 		bv->bv_status = BIOC_SVOFFLINE;
3683 		break;
3684 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3685 		bv->bv_status = BIOC_SVBUILDING;
3686 		break;
3687 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3688 	default:
3689 		bv->bv_status = BIOC_SVINVALID;
3690 		break;
3691 	}
3692 
3693 	free(vpg, M_TEMP, pagelen);
3694 	return (0);
3695 }
3696 
3697 int
3698 mpii_create_sensors(struct mpii_softc *sc)
3699 {
3700 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3701 	struct device		*dev;
3702 	struct scsi_link	*link;
3703 	int			i;
3704 
3705 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3706 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3707 	if (sc->sc_sensors == NULL)
3708 		return (1);
3709 	sc->sc_nsensors = sc->sc_vd_count;
3710 
3711 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3712 	    sizeof(sc->sc_sensordev.xname));
3713 
3714 	for (i = 0; i < sc->sc_vd_count; i++) {
3715 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3716 		if (link == NULL)
3717 			goto bad;
3718 
3719 		dev = link->device_softc;
3720 
3721 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3722 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3723 
3724 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3725 		    sizeof(sc->sc_sensors[i].desc));
3726 
3727 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3728 	}
3729 
3730 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3731 		goto bad;
3732 
3733 	sensordev_install(&sc->sc_sensordev);
3734 
3735 	return (0);
3736 
3737 bad:
3738 	free(sc->sc_sensors, M_DEVBUF, 0);
3739 
3740 	return (1);
3741 }
3742 
3743 void
3744 mpii_refresh_sensors(void *arg)
3745 {
3746 	struct mpii_softc	*sc = arg;
3747 	struct bioc_vol		bv;
3748 	int			i;
3749 
3750 	for (i = 0; i < sc->sc_nsensors; i++) {
3751 		memset(&bv, 0, sizeof(bv));
3752 		bv.bv_volid = i;
3753 		if (mpii_bio_volstate(sc, &bv))
3754 			return;
3755 		switch(bv.bv_status) {
3756 		case BIOC_SVOFFLINE:
3757 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3758 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3759 			break;
3760 		case BIOC_SVDEGRADED:
3761 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3762 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3763 			break;
3764 		case BIOC_SVREBUILD:
3765 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3766 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3767 			break;
3768 		case BIOC_SVONLINE:
3769 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3770 			sc->sc_sensors[i].status = SENSOR_S_OK;
3771 			break;
3772 		case BIOC_SVINVALID:
3773 			/* FALLTHROUGH */
3774 		default:
3775 			sc->sc_sensors[i].value = 0; /* unknown */
3776 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3777 		}
3778 	}
3779 }
3780 #endif /* SMALL_KERNEL */
3781 #endif /* NBIO > 0 */
3782