xref: /openbsd-src/sys/dev/pci/mpii.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: mpii.c,v 1.128 2020/02/05 16:29:30 krw Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	struct scsi_link	sc_link;
163 
164 	int			sc_flags;
165 #define MPII_F_RAID		(1<<1)
166 #define MPII_F_SAS3		(1<<2)
167 
168 	struct scsibus_softc	*sc_scsibus;
169 	unsigned int		sc_pending;
170 
171 	struct mpii_device	**sc_devs;
172 
173 	bus_space_tag_t		sc_iot;
174 	bus_space_handle_t	sc_ioh;
175 	bus_size_t		sc_ios;
176 	bus_dma_tag_t		sc_dmat;
177 
178 	struct mutex		sc_req_mtx;
179 	struct mutex		sc_rep_mtx;
180 
181 	ushort			sc_reply_size;
182 	ushort			sc_request_size;
183 
184 	ushort			sc_max_cmds;
185 	ushort			sc_num_reply_frames;
186 	u_int			sc_reply_free_qdepth;
187 	u_int			sc_reply_post_qdepth;
188 
189 	ushort			sc_chain_sge;
190 	ushort			sc_max_sgl;
191 	int			sc_max_chain;
192 
193 	u_int8_t		sc_ioc_event_replay;
194 
195 	u_int8_t		sc_porttype;
196 	u_int8_t		sc_max_volumes;
197 	u_int16_t		sc_max_devices;
198 	u_int16_t		sc_vd_count;
199 	u_int16_t		sc_vd_id_low;
200 	u_int16_t		sc_pd_id_start;
201 	int			sc_ioc_number;
202 	u_int8_t		sc_vf_id;
203 
204 	struct mpii_ccb		*sc_ccbs;
205 	struct mpii_ccb_list	sc_ccb_free;
206 	struct mutex		sc_ccb_free_mtx;
207 
208 	struct mutex		sc_ccb_mtx;
209 				/*
210 				 * this protects the ccb state and list entry
211 				 * between mpii_scsi_cmd and scsidone.
212 				 */
213 
214 	struct mpii_ccb_list	sc_ccb_tmos;
215 	struct scsi_iohandler	sc_ccb_tmo_handler;
216 
217 	struct scsi_iopool	sc_iopool;
218 
219 	struct mpii_dmamem	*sc_requests;
220 
221 	struct mpii_dmamem	*sc_replies;
222 	struct mpii_rcb		*sc_rcbs;
223 
224 	struct mpii_dmamem	*sc_reply_postq;
225 	struct mpii_reply_descr	*sc_reply_postq_kva;
226 	u_int			sc_reply_post_host_index;
227 
228 	struct mpii_dmamem	*sc_reply_freeq;
229 	u_int			sc_reply_free_host_index;
230 
231 	struct mpii_rcb_list	sc_evt_sas_queue;
232 	struct mutex		sc_evt_sas_mtx;
233 	struct task		sc_evt_sas_task;
234 
235 	struct mpii_rcb_list	sc_evt_ack_queue;
236 	struct mutex		sc_evt_ack_mtx;
237 	struct scsi_iohandler	sc_evt_ack_handler;
238 
239 	/* scsi ioctl from sd device */
240 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
241 
242 	int			sc_nsensors;
243 	struct ksensor		*sc_sensors;
244 	struct ksensordev	sc_sensordev;
245 };
246 
247 int	mpii_match(struct device *, void *, void *);
248 void	mpii_attach(struct device *, struct device *, void *);
249 int	mpii_detach(struct device *, int);
250 
251 int	mpii_intr(void *);
252 
253 struct cfattach mpii_ca = {
254 	sizeof(struct mpii_softc),
255 	mpii_match,
256 	mpii_attach,
257 	mpii_detach
258 };
259 
260 struct cfdriver mpii_cd = {
261 	NULL,
262 	"mpii",
263 	DV_DULL
264 };
265 
266 void		mpii_scsi_cmd(struct scsi_xfer *);
267 void		mpii_scsi_cmd_done(struct mpii_ccb *);
268 int		mpii_scsi_probe(struct scsi_link *);
269 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
270 
271 struct scsi_adapter mpii_switch = {
272 	mpii_scsi_cmd, NULL, mpii_scsi_probe, NULL, mpii_scsi_ioctl
273 };
274 
275 struct mpii_dmamem *
276 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
277 void		mpii_dmamem_free(struct mpii_softc *,
278 		    struct mpii_dmamem *);
279 int		mpii_alloc_ccbs(struct mpii_softc *);
280 void *		mpii_get_ccb(void *);
281 void		mpii_put_ccb(void *, void *);
282 int		mpii_alloc_replies(struct mpii_softc *);
283 int		mpii_alloc_queues(struct mpii_softc *);
284 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
285 void		mpii_push_replies(struct mpii_softc *);
286 
287 void		mpii_scsi_cmd_tmo(void *);
288 void		mpii_scsi_cmd_tmo_handler(void *, void *);
289 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
290 
291 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
292 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
293 struct mpii_device *
294 		mpii_find_dev(struct mpii_softc *, u_int16_t);
295 
296 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
297 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
298 void		mpii_poll_done(struct mpii_ccb *);
299 struct mpii_rcb *
300 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
301 
302 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
303 void		mpii_wait_done(struct mpii_ccb *);
304 
305 void		mpii_init_queues(struct mpii_softc *);
306 
307 int		mpii_load_xs(struct mpii_ccb *);
308 int		mpii_load_xs_sas3(struct mpii_ccb *);
309 
310 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
311 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
312 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
313 		    u_int32_t);
314 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
315 		    u_int32_t);
316 
317 int		mpii_init(struct mpii_softc *);
318 int		mpii_reset_soft(struct mpii_softc *);
319 int		mpii_reset_hard(struct mpii_softc *);
320 
321 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
322 int		mpii_handshake_recv_dword(struct mpii_softc *,
323 		    u_int32_t *);
324 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
325 
326 void		mpii_empty_done(struct mpii_ccb *);
327 
328 int		mpii_iocinit(struct mpii_softc *);
329 int		mpii_iocfacts(struct mpii_softc *);
330 int		mpii_portfacts(struct mpii_softc *);
331 int		mpii_portenable(struct mpii_softc *);
332 int		mpii_cfg_coalescing(struct mpii_softc *);
333 int		mpii_board_info(struct mpii_softc *);
334 int		mpii_target_map(struct mpii_softc *);
335 
336 int		mpii_eventnotify(struct mpii_softc *);
337 void		mpii_eventnotify_done(struct mpii_ccb *);
338 void		mpii_eventack(void *, void *);
339 void		mpii_eventack_done(struct mpii_ccb *);
340 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
341 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
342 void		mpii_event_sas(void *);
343 void		mpii_event_raid(struct mpii_softc *,
344 		    struct mpii_msg_event_reply *);
345 void		mpii_event_discovery(struct mpii_softc *,
346 		    struct mpii_msg_event_reply *);
347 
348 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
349 
350 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
351 		    u_int8_t, u_int32_t, int, void *);
352 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
353 		    void *, int, void *, size_t);
354 
355 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
356 
357 #if NBIO > 0
358 int		mpii_ioctl(struct device *, u_long, caddr_t);
359 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
360 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
361 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
362 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
363 		    int, int *);
364 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
365 		    u_int8_t);
366 struct mpii_device *
367 		mpii_find_vol(struct mpii_softc *, int);
368 #ifndef SMALL_KERNEL
369  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
370 int		mpii_create_sensors(struct mpii_softc *);
371 void		mpii_refresh_sensors(void *);
372 #endif /* SMALL_KERNEL */
373 #endif /* NBIO > 0 */
374 
375 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
376 
377 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
378 
379 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
380 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
381 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
382 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
383 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
384 				    == MPII_INTR_STATUS_REPLY)
385 
386 #define mpii_write_reply_free(s, v) \
387     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
388     MPII_REPLY_FREE_HOST_INDEX, (v))
389 #define mpii_write_reply_post(s, v) \
390     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
391     MPII_REPLY_POST_HOST_INDEX, (v))
392 
393 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
394 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
395 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
396 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
397 
398 static inline void
399 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
400 {
401 	htolem32(&sge->sg_addr_lo, dva);
402 	htolem32(&sge->sg_addr_hi, dva >> 32);
403 }
404 
405 #define MPII_PG_EXTENDED	(1<<0)
406 #define MPII_PG_POLL		(1<<1)
407 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
408 
409 static const struct pci_matchid mpii_devices[] = {
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SSS6200 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3408 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3416 },
435 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508 },
436 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508_1 },
437 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516 },
438 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516_1 }
439 };
440 
441 int
442 mpii_match(struct device *parent, void *match, void *aux)
443 {
444 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
445 }
446 
447 void
448 mpii_attach(struct device *parent, struct device *self, void *aux)
449 {
450 	struct mpii_softc		*sc = (struct mpii_softc *)self;
451 	struct pci_attach_args		*pa = aux;
452 	pcireg_t			memtype;
453 	int				r;
454 	pci_intr_handle_t		ih;
455 	struct scsibus_attach_args	saa;
456 	struct mpii_ccb			*ccb;
457 
458 	sc->sc_pc = pa->pa_pc;
459 	sc->sc_tag = pa->pa_tag;
460 	sc->sc_dmat = pa->pa_dmat;
461 
462 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
463 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
464 
465 	/* find the appropriate memory base */
466 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
467 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
468 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
469 			break;
470 	}
471 	if (r >= PCI_MAPREG_END) {
472 		printf(": unable to locate system interface registers\n");
473 		return;
474 	}
475 
476 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
477 	    NULL, &sc->sc_ios, 0xFF) != 0) {
478 		printf(": unable to map system interface registers\n");
479 		return;
480 	}
481 
482 	/* disable the expansion rom */
483 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
484 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
485 	    ~PCI_ROM_ENABLE);
486 
487 	/* disable interrupts */
488 	mpii_write(sc, MPII_INTR_MASK,
489 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
490 	    MPII_INTR_MASK_DOORBELL);
491 
492 	/* hook up the interrupt */
493 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
494 		printf(": unable to map interrupt\n");
495 		goto unmap;
496 	}
497 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
498 
499 	if (mpii_iocfacts(sc) != 0) {
500 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
501 		goto unmap;
502 	}
503 
504 	if (mpii_init(sc) != 0) {
505 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
506 		goto unmap;
507 	}
508 
509 	if (mpii_alloc_ccbs(sc) != 0) {
510 		/* error already printed */
511 		goto unmap;
512 	}
513 
514 	if (mpii_alloc_replies(sc) != 0) {
515 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
516 		goto free_ccbs;
517 	}
518 
519 	if (mpii_alloc_queues(sc) != 0) {
520 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
521 		goto free_replies;
522 	}
523 
524 	if (mpii_iocinit(sc) != 0) {
525 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
526 		goto free_queues;
527 	}
528 
529 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
530 	    MPII_DOORBELL_STATE_OPER) != 0) {
531 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
532 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
533 		printf("%s: operational state timeout\n", DEVNAME(sc));
534 		goto free_queues;
535 	}
536 
537 	mpii_push_replies(sc);
538 	mpii_init_queues(sc);
539 
540 	if (mpii_board_info(sc) != 0) {
541 		printf("%s: unable to get manufacturing page 0\n",
542 		    DEVNAME(sc));
543 		goto free_queues;
544 	}
545 
546 	if (mpii_portfacts(sc) != 0) {
547 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
548 		goto free_queues;
549 	}
550 
551 	if (mpii_target_map(sc) != 0) {
552 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
553 		goto free_queues;
554 	}
555 
556 	if (mpii_cfg_coalescing(sc) != 0) {
557 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
558 		goto free_queues;
559 	}
560 
561 	/* XXX bail on unsupported porttype? */
562 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
563 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
564 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
565 		if (mpii_eventnotify(sc) != 0) {
566 			printf("%s: unable to enable events\n", DEVNAME(sc));
567 			goto free_queues;
568 		}
569 	}
570 
571 	sc->sc_devs = mallocarray(sc->sc_max_devices,
572 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
573 	if (sc->sc_devs == NULL) {
574 		printf("%s: unable to allocate memory for mpii_device\n",
575 		    DEVNAME(sc));
576 		goto free_queues;
577 	}
578 
579 	if (mpii_portenable(sc) != 0) {
580 		printf("%s: unable to enable port\n", DEVNAME(sc));
581 		goto free_devs;
582 	}
583 
584 	/* we should be good to go now, attach scsibus */
585 	sc->sc_link.adapter = &mpii_switch;
586 	sc->sc_link.adapter_softc = sc;
587 	sc->sc_link.adapter_target = -1;
588 	sc->sc_link.adapter_buswidth = sc->sc_max_devices;
589 	sc->sc_link.luns = 1;
590 	sc->sc_link.openings = sc->sc_max_cmds - 1;
591 	sc->sc_link.pool = &sc->sc_iopool;
592 
593 	memset(&saa, 0, sizeof(saa));
594 	saa.saa_sc_link = &sc->sc_link;
595 
596 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
597 	    mpii_intr, sc, sc->sc_dev.dv_xname);
598 	if (sc->sc_ih == NULL)
599 		goto free_devs;
600 
601 	/* force autoconf to wait for the first sas discovery to complete */
602 	sc->sc_pending = 1;
603 	config_pending_incr();
604 
605 	/* config_found() returns the scsibus attached to us */
606 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
607 	    &saa, scsiprint);
608 
609 	/* enable interrupts */
610 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
611 	    | MPII_INTR_MASK_RESET);
612 
613 #if NBIO > 0
614 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
615 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
616 			panic("%s: controller registration failed",
617 			    DEVNAME(sc));
618 		else
619 			sc->sc_ioctl = mpii_ioctl;
620 
621 #ifndef SMALL_KERNEL
622 		if (mpii_create_sensors(sc) != 0)
623 			printf("%s: unable to create sensors\n", DEVNAME(sc));
624 #endif
625 	}
626 #endif
627 
628 	return;
629 
630 free_devs:
631 	free(sc->sc_devs, M_DEVBUF, 0);
632 	sc->sc_devs = NULL;
633 
634 free_queues:
635 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
636 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
637 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
638 
639 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
640 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
641 	mpii_dmamem_free(sc, sc->sc_reply_postq);
642 
643 free_replies:
644 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
645 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
646 	mpii_dmamem_free(sc, sc->sc_replies);
647 
648 free_ccbs:
649 	while ((ccb = mpii_get_ccb(sc)) != NULL)
650 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
651 	mpii_dmamem_free(sc, sc->sc_requests);
652 	free(sc->sc_ccbs, M_DEVBUF, 0);
653 
654 unmap:
655 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
656 	sc->sc_ios = 0;
657 }
658 
659 int
660 mpii_detach(struct device *self, int flags)
661 {
662 	struct mpii_softc		*sc = (struct mpii_softc *)self;
663 
664 	if (sc->sc_ih != NULL) {
665 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
666 		sc->sc_ih = NULL;
667 	}
668 	if (sc->sc_ios != 0) {
669 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
670 		sc->sc_ios = 0;
671 	}
672 
673 	return (0);
674 }
675 
676 int
677 mpii_intr(void *arg)
678 {
679 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
680 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
681 	struct mpii_softc		*sc = arg;
682 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
683 	struct mpii_ccb			*ccb;
684 	struct mpii_rcb			*rcb;
685 	int				smid;
686 	u_int				idx;
687 	int				rv = 0;
688 
689 	mtx_enter(&sc->sc_rep_mtx);
690 	bus_dmamap_sync(sc->sc_dmat,
691 	    MPII_DMA_MAP(sc->sc_reply_postq),
692 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
693 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
694 
695 	idx = sc->sc_reply_post_host_index;
696 	for (;;) {
697 		rdp = &postq[idx];
698 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
699 		    MPII_REPLY_DESCR_UNUSED)
700 			break;
701 		if (rdp->data == 0xffffffff) {
702 			/*
703 			 * ioc is still writing to the reply post queue
704 			 * race condition - bail!
705 			 */
706 			break;
707 		}
708 
709 		smid = lemtoh16(&rdp->smid);
710 		rcb = mpii_reply(sc, rdp);
711 
712 		if (smid) {
713 			ccb = &sc->sc_ccbs[smid - 1];
714 			ccb->ccb_state = MPII_CCB_READY;
715 			ccb->ccb_rcb = rcb;
716 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
717 		} else
718 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
719 
720 		if (++idx >= sc->sc_reply_post_qdepth)
721 			idx = 0;
722 
723 		rv = 1;
724 	}
725 
726 	bus_dmamap_sync(sc->sc_dmat,
727 	    MPII_DMA_MAP(sc->sc_reply_postq),
728 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
729 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
730 
731 	if (rv)
732 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
733 
734 	mtx_leave(&sc->sc_rep_mtx);
735 
736 	if (rv == 0)
737 		return (0);
738 
739 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
740 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
741 		ccb->ccb_done(ccb);
742 	}
743 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
744 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
745 		mpii_event_process(sc, rcb);
746 	}
747 
748 	return (1);
749 }
750 
751 int
752 mpii_load_xs_sas3(struct mpii_ccb *ccb)
753 {
754 	struct mpii_softc	*sc = ccb->ccb_sc;
755 	struct scsi_xfer	*xs = ccb->ccb_cookie;
756 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
757 	struct mpii_ieee_sge	*csge, *nsge, *sge;
758 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
759 	int			i, error;
760 
761 	/* Request frame structure is described in the mpii_iocfacts */
762 	nsge = (struct mpii_ieee_sge *)(io + 1);
763 
764 	/* zero length transfer still requires an SGE */
765 	if (xs->datalen == 0) {
766 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
767 		return (0);
768 	}
769 
770 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
771 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
772 	if (error) {
773 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
774 		return (1);
775 	}
776 
777 	csge = NULL;
778 	if (dmap->dm_nsegs > sc->sc_chain_sge) {
779 		csge = nsge + sc->sc_chain_sge;
780 
781 		/* offset to the chain sge from the beginning */
782 		io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge);
783 	}
784 
785 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
786 		if (nsge == csge) {
787 			nsge++;
788 
789 			/* address of the next sge */
790 			htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +
791 			    ((caddr_t)nsge - (caddr_t)io));
792 			htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *
793 			    sizeof(*sge));
794 			csge->sg_next_chain_offset = 0;
795 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
796 			    MPII_IEEE_SGE_ADDR_SYSTEM;
797 
798 			if ((dmap->dm_nsegs - i) > sc->sc_max_chain) {
799 				csge->sg_next_chain_offset = sc->sc_max_chain;
800 				csge += sc->sc_max_chain;
801 			}
802 		}
803 
804 		sge = nsge;
805 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
806 		sge->sg_next_chain_offset = 0;
807 		htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len);
808 		htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr);
809 	}
810 
811 	/* terminate list */
812 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
813 
814 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
815 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
816 	    BUS_DMASYNC_PREWRITE);
817 
818 	return (0);
819 }
820 
821 int
822 mpii_load_xs(struct mpii_ccb *ccb)
823 {
824 	struct mpii_softc	*sc = ccb->ccb_sc;
825 	struct scsi_xfer	*xs = ccb->ccb_cookie;
826 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
827 	struct mpii_sge		*csge, *nsge, *sge;
828 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
829 	u_int32_t		flags;
830 	u_int16_t		len;
831 	int			i, error;
832 
833 	/* Request frame structure is described in the mpii_iocfacts */
834 	nsge = (struct mpii_sge *)(io + 1);
835 	csge = nsge + sc->sc_chain_sge;
836 
837 	/* zero length transfer still requires an SGE */
838 	if (xs->datalen == 0) {
839 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
840 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
841 		return (0);
842 	}
843 
844 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
845 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
846 	if (error) {
847 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
848 		return (1);
849 	}
850 
851 	/* safe default starting flags */
852 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
853 	if (xs->flags & SCSI_DATA_OUT)
854 		flags |= MPII_SGE_FL_DIR_OUT;
855 
856 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
857 		if (nsge == csge) {
858 			nsge++;
859 			/* offset to the chain sge from the beginning */
860 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
861 			/* length of the sgl segment we're pointing to */
862 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
863 			htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |
864 			    MPII_SGE_FL_SIZE_64 | len);
865 			/* address of the next sge */
866 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
867 			    ((caddr_t)nsge - (caddr_t)io));
868 		}
869 
870 		sge = nsge;
871 		htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len);
872 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
873 	}
874 
875 	/* terminate list */
876 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
877 	    MPII_SGE_FL_EOL);
878 
879 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
880 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
881 	    BUS_DMASYNC_PREWRITE);
882 
883 	return (0);
884 }
885 
886 int
887 mpii_scsi_probe(struct scsi_link *link)
888 {
889 	struct mpii_softc *sc = link->adapter_softc;
890 	struct mpii_cfg_sas_dev_pg0 pg0;
891 	struct mpii_ecfg_hdr ehdr;
892 	struct mpii_device *dev;
893 	uint32_t address;
894 	int flags;
895 
896 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
897 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) &&
898 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE))
899 		return (ENXIO);
900 
901 	dev = sc->sc_devs[link->target];
902 	if (dev == NULL)
903 		return (1);
904 
905 	flags = dev->flags;
906 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
907 		return (1);
908 
909 	if (ISSET(flags, MPII_DF_VOLUME)) {
910 		struct mpii_cfg_hdr hdr;
911 		struct mpii_cfg_raid_vol_pg1 vpg;
912 		size_t pagelen;
913 
914 		address = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
915 
916 		if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
917 		    1, address, MPII_PG_POLL, &hdr) != 0)
918 			return (EINVAL);
919 
920 		memset(&vpg, 0, sizeof(vpg));
921 		/* avoid stack trash on future page growth */
922 		pagelen = min(sizeof(vpg), hdr.page_length * 4);
923 
924 		if (mpii_req_cfg_page(sc, address, MPII_PG_POLL, &hdr, 1,
925 		    &vpg, pagelen) != 0)
926 			return (EINVAL);
927 
928 		link->port_wwn = letoh64(vpg.wwid);
929 		/*
930 		 * WWIDs generated by LSI firmware are not IEEE NAA compliant
931 		 * and historical practise in OBP on sparc64 is to set the top
932 		 * nibble to 3 to indicate that this is a RAID volume.
933 		 */
934 		link->port_wwn &= 0x0fffffffffffffff;
935 		link->port_wwn |= 0x3000000000000000;
936 
937 		return (0);
938 	}
939 
940 	memset(&ehdr, 0, sizeof(ehdr));
941 	ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
942 	ehdr.page_number = 0;
943 	ehdr.page_version = 0;
944 	ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
945 	ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
946 
947 	address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
948 	if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
949 	    &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
950 		printf("%s: unable to fetch SAS device page 0 for target %u\n",
951 		    DEVNAME(sc), link->target);
952 
953 		return (0); /* the handle should still work */
954 	}
955 
956 	link->port_wwn = letoh64(pg0.sas_addr);
957 	link->node_wwn = letoh64(pg0.device_name);
958 
959 	if (ISSET(lemtoh32(&pg0.device_info),
960 	    MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
961 		link->flags |= SDEV_ATAPI;
962 		link->quirks |= SDEV_ONLYBIG;
963 	}
964 
965 	return (0);
966 }
967 
968 u_int32_t
969 mpii_read(struct mpii_softc *sc, bus_size_t r)
970 {
971 	u_int32_t			rv;
972 
973 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
974 	    BUS_SPACE_BARRIER_READ);
975 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
976 
977 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
978 
979 	return (rv);
980 }
981 
982 void
983 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
984 {
985 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
986 
987 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
988 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
989 	    BUS_SPACE_BARRIER_WRITE);
990 }
991 
992 
993 int
994 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
995     u_int32_t target)
996 {
997 	int			i;
998 
999 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
1000 	    mask, target);
1001 
1002 	for (i = 0; i < 15000; i++) {
1003 		if ((mpii_read(sc, r) & mask) == target)
1004 			return (0);
1005 		delay(1000);
1006 	}
1007 
1008 	return (1);
1009 }
1010 
1011 int
1012 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1013     u_int32_t target)
1014 {
1015 	int			i;
1016 
1017 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1018 	    mask, target);
1019 
1020 	for (i = 0; i < 15000; i++) {
1021 		if ((mpii_read(sc, r) & mask) != target)
1022 			return (0);
1023 		delay(1000);
1024 	}
1025 
1026 	return (1);
1027 }
1028 
1029 int
1030 mpii_init(struct mpii_softc *sc)
1031 {
1032 	u_int32_t		db;
1033 	int			i;
1034 
1035 	/* spin until the ioc leaves the reset state */
1036 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1037 	    MPII_DOORBELL_STATE_RESET) != 0) {
1038 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1039 		    "reset state\n", DEVNAME(sc));
1040 		return (1);
1041 	}
1042 
1043 	/* check current ownership */
1044 	db = mpii_read_db(sc);
1045 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1046 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1047 		    DEVNAME(sc));
1048 		return (0);
1049 	}
1050 
1051 	for (i = 0; i < 5; i++) {
1052 		switch (db & MPII_DOORBELL_STATE) {
1053 		case MPII_DOORBELL_STATE_READY:
1054 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1055 			    DEVNAME(sc));
1056 			return (0);
1057 
1058 		case MPII_DOORBELL_STATE_OPER:
1059 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1060 			    DEVNAME(sc));
1061 			if (sc->sc_ioc_event_replay)
1062 				mpii_reset_soft(sc);
1063 			else
1064 				mpii_reset_hard(sc);
1065 			break;
1066 
1067 		case MPII_DOORBELL_STATE_FAULT:
1068 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1069 			    "reset hard\n" , DEVNAME(sc));
1070 			mpii_reset_hard(sc);
1071 			break;
1072 
1073 		case MPII_DOORBELL_STATE_RESET:
1074 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1075 			    "out of reset\n", DEVNAME(sc));
1076 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1077 			    MPII_DOORBELL_STATE_RESET) != 0)
1078 				return (1);
1079 			break;
1080 		}
1081 		db = mpii_read_db(sc);
1082 	}
1083 
1084 	return (1);
1085 }
1086 
1087 int
1088 mpii_reset_soft(struct mpii_softc *sc)
1089 {
1090 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1091 
1092 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1093 		return (1);
1094 	}
1095 
1096 	mpii_write_db(sc,
1097 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1098 
1099 	/* XXX LSI waits 15 sec */
1100 	if (mpii_wait_db_ack(sc) != 0)
1101 		return (1);
1102 
1103 	/* XXX LSI waits 15 sec */
1104 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1105 	    MPII_DOORBELL_STATE_READY) != 0)
1106 		return (1);
1107 
1108 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1109 
1110 	return (0);
1111 }
1112 
1113 int
1114 mpii_reset_hard(struct mpii_softc *sc)
1115 {
1116 	u_int16_t		i;
1117 
1118 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1119 
1120 	mpii_write_intr(sc, 0);
1121 
1122 	/* enable diagnostic register */
1123 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1124 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1125 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1126 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1127 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1128 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1129 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1130 
1131 	delay(100);
1132 
1133 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1134 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1135 		    "diagnostic read/write\n", DEVNAME(sc));
1136 		return(1);
1137 	}
1138 
1139 	/* reset ioc */
1140 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1141 
1142 	/* 240 milliseconds */
1143 	delay(240000);
1144 
1145 
1146 	/* XXX this whole function should be more robust */
1147 
1148 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1149 	for (i = 0; i < 30000; i++) {
1150 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1151 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1152 			break;
1153 		delay(10000);
1154 	}
1155 
1156 	/* disable diagnostic register */
1157 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1158 
1159 	/* XXX what else? */
1160 
1161 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1162 
1163 	return(0);
1164 }
1165 
1166 int
1167 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1168 {
1169 	u_int32_t		*query = buf;
1170 	int			i;
1171 
1172 	/* make sure the doorbell is not in use. */
1173 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1174 		return (1);
1175 
1176 	/* clear pending doorbell interrupts */
1177 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1178 		mpii_write_intr(sc, 0);
1179 
1180 	/*
1181 	 * first write the doorbell with the handshake function and the
1182 	 * dword count.
1183 	 */
1184 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1185 	    MPII_DOORBELL_DWORDS(dwords));
1186 
1187 	/*
1188 	 * the doorbell used bit will be set because a doorbell function has
1189 	 * started. wait for the interrupt and then ack it.
1190 	 */
1191 	if (mpii_wait_db_int(sc) != 0)
1192 		return (1);
1193 	mpii_write_intr(sc, 0);
1194 
1195 	/* poll for the acknowledgement. */
1196 	if (mpii_wait_db_ack(sc) != 0)
1197 		return (1);
1198 
1199 	/* write the query through the doorbell. */
1200 	for (i = 0; i < dwords; i++) {
1201 		mpii_write_db(sc, htole32(query[i]));
1202 		if (mpii_wait_db_ack(sc) != 0)
1203 			return (1);
1204 	}
1205 
1206 	return (0);
1207 }
1208 
1209 int
1210 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1211 {
1212 	u_int16_t		*words = (u_int16_t *)dword;
1213 	int			i;
1214 
1215 	for (i = 0; i < 2; i++) {
1216 		if (mpii_wait_db_int(sc) != 0)
1217 			return (1);
1218 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1219 		mpii_write_intr(sc, 0);
1220 	}
1221 
1222 	return (0);
1223 }
1224 
1225 int
1226 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1227 {
1228 	struct mpii_msg_reply	*reply = buf;
1229 	u_int32_t		*dbuf = buf, dummy;
1230 	int			i;
1231 
1232 	/* get the first dword so we can read the length out of the header. */
1233 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1234 		return (1);
1235 
1236 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1237 	    DEVNAME(sc), dwords, reply->msg_length);
1238 
1239 	/*
1240 	 * the total length, in dwords, is in the message length field of the
1241 	 * reply header.
1242 	 */
1243 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1244 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1245 			return (1);
1246 	}
1247 
1248 	/* if there's extra stuff to come off the ioc, discard it */
1249 	while (i++ < reply->msg_length) {
1250 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1251 			return (1);
1252 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1253 		    "0x%08x\n", DEVNAME(sc), dummy);
1254 	}
1255 
1256 	/* wait for the doorbell used bit to be reset and clear the intr */
1257 	if (mpii_wait_db_int(sc) != 0)
1258 		return (1);
1259 
1260 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1261 		return (1);
1262 
1263 	mpii_write_intr(sc, 0);
1264 
1265 	return (0);
1266 }
1267 
1268 void
1269 mpii_empty_done(struct mpii_ccb *ccb)
1270 {
1271 	/* nothing to do */
1272 }
1273 
1274 int
1275 mpii_iocfacts(struct mpii_softc *sc)
1276 {
1277 	struct mpii_msg_iocfacts_request	ifq;
1278 	struct mpii_msg_iocfacts_reply		ifp;
1279 	int					irs;
1280 	int					sge_size;
1281 	u_int					qdepth;
1282 
1283 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1284 
1285 	memset(&ifq, 0, sizeof(ifq));
1286 	memset(&ifp, 0, sizeof(ifp));
1287 
1288 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1289 
1290 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1291 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1292 		    DEVNAME(sc));
1293 		return (1);
1294 	}
1295 
1296 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1297 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1298 		    DEVNAME(sc));
1299 		return (1);
1300 	}
1301 
1302 	sc->sc_ioc_number = ifp.ioc_number;
1303 	sc->sc_vf_id = ifp.vf_id;
1304 
1305 	sc->sc_max_volumes = ifp.max_volumes;
1306 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1307 
1308 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1309 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1310 		SET(sc->sc_flags, MPII_F_RAID);
1311 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1312 	    MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1313 		sc->sc_ioc_event_replay = 1;
1314 
1315 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1316 	    MPII_REQUEST_CREDIT);
1317 
1318 	/* SAS3 and 3.5 controllers have different sgl layouts */
1319 	if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1320 	    || (ifp.msg_version_min == 6)))
1321 		SET(sc->sc_flags, MPII_F_SAS3);
1322 
1323 	/*
1324 	 * The host driver must ensure that there is at least one
1325 	 * unused entry in the Reply Free Queue. One way to ensure
1326 	 * that this requirement is met is to never allocate a number
1327 	 * of reply frames that is a multiple of 16.
1328 	 */
1329 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1330 	if (!(sc->sc_num_reply_frames % 16))
1331 		sc->sc_num_reply_frames--;
1332 
1333 	/* must be multiple of 16 */
1334 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1335 	    sc->sc_num_reply_frames;
1336 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1337 
1338 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1339 	if (sc->sc_reply_post_qdepth > qdepth) {
1340 		sc->sc_reply_post_qdepth = qdepth;
1341 		if (sc->sc_reply_post_qdepth < 16) {
1342 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1343 			return (1);
1344 		}
1345 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1346 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1347 	}
1348 
1349 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1350 	    16 - (sc->sc_num_reply_frames % 16);
1351 
1352 	/*
1353 	 * Our request frame for an I/O operation looks like this:
1354 	 *
1355 	 * +-------------------+ -.
1356 	 * | mpii_msg_scsi_io  |  |
1357 	 * +-------------------|  |
1358 	 * | mpii_sge          |  |
1359 	 * + - - - - - - - - - +  |
1360 	 * | ...               |  > ioc_request_frame_size
1361 	 * + - - - - - - - - - +  |
1362 	 * | mpii_sge (tail)   |  |
1363 	 * + - - - - - - - - - +  |
1364 	 * | mpii_sge (csge)   |  | --.
1365 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1366 	 * | mpii_sge          |<-----'
1367 	 * + - - - - - - - - - +
1368 	 * | ...               |
1369 	 * + - - - - - - - - - +
1370 	 * | mpii_sge (tail)   |
1371 	 * +-------------------+
1372 	 * |                   |
1373 	 * ~~~~~~~~~~~~~~~~~~~~~
1374 	 * |                   |
1375 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1376 	 * | scsi_sense_data   |
1377 	 * +-------------------+
1378 	 *
1379 	 * If the controller gives us a maximum chain size, there can be
1380 	 * multiple chain sges, each of which points to the sge following it.
1381 	 * Otherwise, there will only be one chain sge.
1382 	 */
1383 
1384 	/* both sizes are in 32-bit words */
1385 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1386 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1387 	sc->sc_request_size = MPII_REQUEST_SIZE;
1388 	/* make sure we have enough space for scsi sense data */
1389 	if (irs > sc->sc_request_size) {
1390 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1391 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1392 	}
1393 
1394 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1395 		sge_size = sizeof(struct mpii_ieee_sge);
1396 	} else {
1397 		sge_size = sizeof(struct mpii_sge);
1398 	}
1399 
1400 	/* offset to the chain sge */
1401 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1402 	    sge_size - 1;
1403 
1404 	sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size);
1405 
1406 	/*
1407 	 * A number of simple scatter-gather elements we can fit into the
1408 	 * request buffer after the I/O command minus the chain element(s).
1409 	 */
1410 	sc->sc_max_sgl = (sc->sc_request_size -
1411  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1412 	    sge_size - 1;
1413 	if (sc->sc_max_chain > 0) {
1414 		sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) /
1415 		    sc->sc_max_chain;
1416 	}
1417 
1418 	return (0);
1419 }
1420 
1421 int
1422 mpii_iocinit(struct mpii_softc *sc)
1423 {
1424 	struct mpii_msg_iocinit_request		iiq;
1425 	struct mpii_msg_iocinit_reply		iip;
1426 
1427 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1428 
1429 	memset(&iiq, 0, sizeof(iiq));
1430 	memset(&iip, 0, sizeof(iip));
1431 
1432 	iiq.function = MPII_FUNCTION_IOC_INIT;
1433 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1434 
1435 	/* XXX JPG do something about vf_id */
1436 	iiq.vf_id = 0;
1437 
1438 	iiq.msg_version_maj = 0x02;
1439 	iiq.msg_version_min = 0x00;
1440 
1441 	/* XXX JPG ensure compliance with some level and hard-code? */
1442 	iiq.hdr_version_unit = 0x00;
1443 	iiq.hdr_version_dev = 0x00;
1444 
1445 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1446 
1447 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1448 	    sc->sc_reply_post_qdepth);
1449 
1450 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1451 
1452 	htolem32(&iiq.sense_buffer_address_high,
1453 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1454 
1455 	htolem32(&iiq.system_reply_address_high,
1456 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1457 
1458 	htolem32(&iiq.system_request_frame_base_address_lo,
1459 	    MPII_DMA_DVA(sc->sc_requests));
1460 	htolem32(&iiq.system_request_frame_base_address_hi,
1461 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1462 
1463 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1464 	    MPII_DMA_DVA(sc->sc_reply_postq));
1465 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1466 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1467 
1468 	htolem32(&iiq.reply_free_queue_address_lo,
1469 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1470 	htolem32(&iiq.reply_free_queue_address_hi,
1471 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1472 
1473 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1474 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1475 		    DEVNAME(sc));
1476 		return (1);
1477 	}
1478 
1479 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1480 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1481 		    DEVNAME(sc));
1482 		return (1);
1483 	}
1484 
1485 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1486 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1487 	    iip.msg_length, iip.whoinit);
1488 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1489 	    iip.msg_flags);
1490 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1491 	    iip.vf_id, iip.vp_id);
1492 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1493 	    lemtoh16(&iip.ioc_status));
1494 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1495 	    lemtoh32(&iip.ioc_loginfo));
1496 
1497 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1498 	    lemtoh32(&iip.ioc_loginfo))
1499 		return (1);
1500 
1501 	return (0);
1502 }
1503 
1504 void
1505 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1506 {
1507 	u_int32_t		*rfp;
1508 	u_int			idx;
1509 
1510 	if (rcb == NULL)
1511 		return;
1512 
1513 	idx = sc->sc_reply_free_host_index;
1514 
1515 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1516 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1517 
1518 	if (++idx >= sc->sc_reply_free_qdepth)
1519 		idx = 0;
1520 
1521 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1522 }
1523 
1524 int
1525 mpii_portfacts(struct mpii_softc *sc)
1526 {
1527 	struct mpii_msg_portfacts_request	*pfq;
1528 	struct mpii_msg_portfacts_reply		*pfp;
1529 	struct mpii_ccb				*ccb;
1530 	int					rv = 1;
1531 
1532 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1533 
1534 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1535 	if (ccb == NULL) {
1536 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1537 		    DEVNAME(sc));
1538 		return (rv);
1539 	}
1540 
1541 	ccb->ccb_done = mpii_empty_done;
1542 	pfq = ccb->ccb_cmd;
1543 
1544 	memset(pfq, 0, sizeof(*pfq));
1545 
1546 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1547 	pfq->chain_offset = 0;
1548 	pfq->msg_flags = 0;
1549 	pfq->port_number = 0;
1550 	pfq->vp_id = 0;
1551 	pfq->vf_id = 0;
1552 
1553 	if (mpii_poll(sc, ccb) != 0) {
1554 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1555 		    DEVNAME(sc));
1556 		goto err;
1557 	}
1558 
1559 	if (ccb->ccb_rcb == NULL) {
1560 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1561 		    DEVNAME(sc));
1562 		goto err;
1563 	}
1564 
1565 	pfp = ccb->ccb_rcb->rcb_reply;
1566 	sc->sc_porttype = pfp->port_type;
1567 
1568 	mpii_push_reply(sc, ccb->ccb_rcb);
1569 	rv = 0;
1570 err:
1571 	scsi_io_put(&sc->sc_iopool, ccb);
1572 
1573 	return (rv);
1574 }
1575 
1576 void
1577 mpii_eventack(void *cookie, void *io)
1578 {
1579 	struct mpii_softc			*sc = cookie;
1580 	struct mpii_ccb				*ccb = io;
1581 	struct mpii_rcb				*rcb, *next;
1582 	struct mpii_msg_event_reply		*enp;
1583 	struct mpii_msg_eventack_request	*eaq;
1584 
1585 	mtx_enter(&sc->sc_evt_ack_mtx);
1586 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1587 	if (rcb != NULL) {
1588 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1589 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1590 	}
1591 	mtx_leave(&sc->sc_evt_ack_mtx);
1592 
1593 	if (rcb == NULL) {
1594 		scsi_io_put(&sc->sc_iopool, ccb);
1595 		return;
1596 	}
1597 
1598 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1599 
1600 	ccb->ccb_done = mpii_eventack_done;
1601 	eaq = ccb->ccb_cmd;
1602 
1603 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1604 
1605 	eaq->event = enp->event;
1606 	eaq->event_context = enp->event_context;
1607 
1608 	mpii_push_reply(sc, rcb);
1609 
1610 	mpii_start(sc, ccb);
1611 
1612 	if (next != NULL)
1613 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1614 }
1615 
1616 void
1617 mpii_eventack_done(struct mpii_ccb *ccb)
1618 {
1619 	struct mpii_softc			*sc = ccb->ccb_sc;
1620 
1621 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1622 
1623 	mpii_push_reply(sc, ccb->ccb_rcb);
1624 	scsi_io_put(&sc->sc_iopool, ccb);
1625 }
1626 
1627 int
1628 mpii_portenable(struct mpii_softc *sc)
1629 {
1630 	struct mpii_msg_portenable_request	*peq;
1631 	struct mpii_ccb				*ccb;
1632 
1633 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1634 
1635 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1636 	if (ccb == NULL) {
1637 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1638 		    DEVNAME(sc));
1639 		return (1);
1640 	}
1641 
1642 	ccb->ccb_done = mpii_empty_done;
1643 	peq = ccb->ccb_cmd;
1644 
1645 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1646 	peq->vf_id = sc->sc_vf_id;
1647 
1648 	if (mpii_poll(sc, ccb) != 0) {
1649 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1650 		    DEVNAME(sc));
1651 		return (1);
1652 	}
1653 
1654 	if (ccb->ccb_rcb == NULL) {
1655 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1656 		    DEVNAME(sc));
1657 		return (1);
1658 	}
1659 
1660 	mpii_push_reply(sc, ccb->ccb_rcb);
1661 	scsi_io_put(&sc->sc_iopool, ccb);
1662 
1663 	return (0);
1664 }
1665 
1666 int
1667 mpii_cfg_coalescing(struct mpii_softc *sc)
1668 {
1669 	struct mpii_cfg_hdr			hdr;
1670 	struct mpii_cfg_ioc_pg1			ipg;
1671 
1672 	hdr.page_version = 0;
1673 	hdr.page_length = sizeof(ipg) / 4;
1674 	hdr.page_number = 1;
1675 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1676 	memset(&ipg, 0, sizeof(ipg));
1677 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1678 	    sizeof(ipg)) != 0) {
1679 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1680 		    "page 1\n", DEVNAME(sc));
1681 		return (1);
1682 	}
1683 
1684 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1685 		return (0);
1686 
1687 	/* Disable coalescing */
1688 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1689 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1690 	    sizeof(ipg)) != 0) {
1691 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1692 		    DEVNAME(sc));
1693 		return (1);
1694 	}
1695 
1696 	return (0);
1697 }
1698 
1699 #define MPII_EVENT_MASKALL(enq)		do {			\
1700 		enq->event_masks[0] = 0xffffffff;		\
1701 		enq->event_masks[1] = 0xffffffff;		\
1702 		enq->event_masks[2] = 0xffffffff;		\
1703 		enq->event_masks[3] = 0xffffffff;		\
1704 	} while (0)
1705 
1706 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1707 		enq->event_masks[evt / 32] &=			\
1708 		    htole32(~(1 << (evt % 32)));		\
1709 	} while (0)
1710 
1711 int
1712 mpii_eventnotify(struct mpii_softc *sc)
1713 {
1714 	struct mpii_msg_event_request		*enq;
1715 	struct mpii_ccb				*ccb;
1716 
1717 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1718 	if (ccb == NULL) {
1719 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1720 		    DEVNAME(sc));
1721 		return (1);
1722 	}
1723 
1724 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1725 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1726 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1727 
1728 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1729 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1730 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1731 	    mpii_eventack, sc);
1732 
1733 	ccb->ccb_done = mpii_eventnotify_done;
1734 	enq = ccb->ccb_cmd;
1735 
1736 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1737 
1738 	/*
1739 	 * Enable reporting of the following events:
1740 	 *
1741 	 * MPII_EVENT_SAS_DISCOVERY
1742 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1743 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1744 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1745 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1746 	 * MPII_EVENT_IR_VOLUME
1747 	 * MPII_EVENT_IR_PHYSICAL_DISK
1748 	 * MPII_EVENT_IR_OPERATION_STATUS
1749 	 */
1750 
1751 	MPII_EVENT_MASKALL(enq);
1752 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1753 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1754 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1755 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1756 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1757 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1758 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1759 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1760 
1761 	mpii_start(sc, ccb);
1762 
1763 	return (0);
1764 }
1765 
1766 void
1767 mpii_eventnotify_done(struct mpii_ccb *ccb)
1768 {
1769 	struct mpii_softc			*sc = ccb->ccb_sc;
1770 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1771 
1772 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1773 
1774 	scsi_io_put(&sc->sc_iopool, ccb);
1775 	mpii_event_process(sc, rcb);
1776 }
1777 
1778 void
1779 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1780 {
1781 	struct mpii_evt_ir_cfg_change_list	*ccl;
1782 	struct mpii_evt_ir_cfg_element		*ce;
1783 	struct mpii_device			*dev;
1784 	u_int16_t				type;
1785 	int					i;
1786 
1787 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1788 	if (ccl->num_elements == 0)
1789 		return;
1790 
1791 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1792 		/* bail on foreign configurations */
1793 		return;
1794 	}
1795 
1796 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1797 
1798 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1799 		type = (lemtoh16(&ce->element_flags) &
1800 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1801 
1802 		switch (type) {
1803 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1804 			switch (ce->reason_code) {
1805 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1806 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1807 				if (mpii_find_dev(sc,
1808 				    lemtoh16(&ce->vol_dev_handle))) {
1809 					printf("%s: device %#x is already "
1810 					    "configured\n", DEVNAME(sc),
1811 					    lemtoh16(&ce->vol_dev_handle));
1812 					break;
1813 				}
1814 				dev = malloc(sizeof(*dev), M_DEVBUF,
1815 				    M_NOWAIT | M_ZERO);
1816 				if (!dev) {
1817 					printf("%s: failed to allocate a "
1818 					    "device structure\n", DEVNAME(sc));
1819 					break;
1820 				}
1821 				SET(dev->flags, MPII_DF_VOLUME);
1822 				dev->slot = sc->sc_vd_id_low;
1823 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1824 				if (mpii_insert_dev(sc, dev)) {
1825 					free(dev, M_DEVBUF, sizeof *dev);
1826 					break;
1827 				}
1828 				sc->sc_vd_count++;
1829 				break;
1830 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1831 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1832 				if (!(dev = mpii_find_dev(sc,
1833 				    lemtoh16(&ce->vol_dev_handle))))
1834 					break;
1835 				mpii_remove_dev(sc, dev);
1836 				sc->sc_vd_count--;
1837 				break;
1838 			}
1839 			break;
1840 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1841 			if (ce->reason_code ==
1842 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1843 			    ce->reason_code ==
1844 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1845 				/* there should be an underlying sas drive */
1846 				if (!(dev = mpii_find_dev(sc,
1847 				    lemtoh16(&ce->phys_disk_dev_handle))))
1848 					break;
1849 				/* promoted from a hot spare? */
1850 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1851 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1852 				    MPII_DF_HIDDEN);
1853 			}
1854 			break;
1855 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1856 			if (ce->reason_code ==
1857 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1858 				/* there should be an underlying sas drive */
1859 				if (!(dev = mpii_find_dev(sc,
1860 				    lemtoh16(&ce->phys_disk_dev_handle))))
1861 					break;
1862 				SET(dev->flags, MPII_DF_HOT_SPARE |
1863 				    MPII_DF_HIDDEN);
1864 			}
1865 			break;
1866 		}
1867 	}
1868 }
1869 
1870 void
1871 mpii_event_sas(void *xsc)
1872 {
1873 	struct mpii_softc *sc = xsc;
1874 	struct mpii_rcb *rcb, *next;
1875 	struct mpii_msg_event_reply *enp;
1876 	struct mpii_evt_sas_tcl		*tcl;
1877 	struct mpii_evt_phy_entry	*pe;
1878 	struct mpii_device		*dev;
1879 	int				i;
1880 	u_int16_t			handle;
1881 
1882 	mtx_enter(&sc->sc_evt_sas_mtx);
1883 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1884 	if (rcb != NULL) {
1885 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1886 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1887 	}
1888 	mtx_leave(&sc->sc_evt_sas_mtx);
1889 
1890 	if (rcb == NULL)
1891 		return;
1892 	if (next != NULL)
1893 		task_add(systq, &sc->sc_evt_sas_task);
1894 
1895 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1896 	switch (lemtoh16(&enp->event)) {
1897 	case MPII_EVENT_SAS_DISCOVERY:
1898 		mpii_event_discovery(sc, enp);
1899 		goto done;
1900 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1901 		/* handle below */
1902 		break;
1903 	default:
1904 		panic("%s: unexpected event %#x in sas event queue",
1905 		    DEVNAME(sc), lemtoh16(&enp->event));
1906 		/* NOTREACHED */
1907 	}
1908 
1909 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1910 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1911 
1912 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1913 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1914 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1915 			handle = lemtoh16(&pe->dev_handle);
1916 			if (mpii_find_dev(sc, handle)) {
1917 				printf("%s: device %#x is already "
1918 				    "configured\n", DEVNAME(sc), handle);
1919 				break;
1920 			}
1921 
1922 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1923 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1924 			dev->dev_handle = handle;
1925 			dev->phy_num = tcl->start_phy_num + i;
1926 			if (tcl->enclosure_handle)
1927 				dev->physical_port = tcl->physical_port;
1928 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1929 			dev->expander = lemtoh16(&tcl->expander_handle);
1930 
1931 			if (mpii_insert_dev(sc, dev)) {
1932 				free(dev, M_DEVBUF, sizeof *dev);
1933 				break;
1934 			}
1935 
1936 			if (sc->sc_scsibus != NULL)
1937 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1938 			break;
1939 
1940 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1941 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1942 			if (dev == NULL)
1943 				break;
1944 
1945 			mpii_remove_dev(sc, dev);
1946 			mpii_sas_remove_device(sc, dev->dev_handle);
1947 			if (sc->sc_scsibus != NULL &&
1948 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1949 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1950 				    DVACT_DEACTIVATE);
1951 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1952 				    DETACH_FORCE);
1953 			}
1954 
1955 			free(dev, M_DEVBUF, sizeof *dev);
1956 			break;
1957 		}
1958 	}
1959 
1960 done:
1961 	mpii_event_done(sc, rcb);
1962 }
1963 
1964 void
1965 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1966 {
1967 	struct mpii_evt_sas_discovery *esd =
1968 	    (struct mpii_evt_sas_discovery *)(enp + 1);
1969 
1970 	if (sc->sc_pending == 0)
1971 		return;
1972 
1973 	switch (esd->reason_code) {
1974 	case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED:
1975 		++sc->sc_pending;
1976 		break;
1977 	case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED:
1978 		if (--sc->sc_pending == 1) {
1979 			sc->sc_pending = 0;
1980 			config_pending_decr();
1981 		}
1982 		break;
1983 	}
1984 }
1985 
1986 void
1987 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1988 {
1989 	struct mpii_msg_event_reply		*enp;
1990 
1991 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1992 
1993 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1994 	    lemtoh16(&enp->event));
1995 
1996 	switch (lemtoh16(&enp->event)) {
1997 	case MPII_EVENT_EVENT_CHANGE:
1998 		/* should be properly ignored */
1999 		break;
2000 	case MPII_EVENT_SAS_DISCOVERY:
2001 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2002 		mtx_enter(&sc->sc_evt_sas_mtx);
2003 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
2004 		mtx_leave(&sc->sc_evt_sas_mtx);
2005 		task_add(systq, &sc->sc_evt_sas_task);
2006 		return;
2007 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2008 		break;
2009 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2010 		break;
2011 	case MPII_EVENT_IR_VOLUME: {
2012 		struct mpii_evt_ir_volume	*evd =
2013 		    (struct mpii_evt_ir_volume *)(enp + 1);
2014 		struct mpii_device		*dev;
2015 #if NBIO > 0
2016 		const char *vol_states[] = {
2017 			BIOC_SVINVALID_S,
2018 			BIOC_SVOFFLINE_S,
2019 			BIOC_SVBUILDING_S,
2020 			BIOC_SVONLINE_S,
2021 			BIOC_SVDEGRADED_S,
2022 			BIOC_SVONLINE_S,
2023 		};
2024 #endif
2025 
2026 		if (cold)
2027 			break;
2028 		KERNEL_LOCK();
2029 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
2030 		KERNEL_UNLOCK();
2031 		if (dev == NULL)
2032 			break;
2033 #if NBIO > 0
2034 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2035 			printf("%s: volume %d state changed from %s to %s\n",
2036 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2037 			    vol_states[evd->prev_value],
2038 			    vol_states[evd->new_value]);
2039 #endif
2040 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2041 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2042 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2043 			printf("%s: started resync on a volume %d\n",
2044 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2045 		}
2046 		break;
2047 	case MPII_EVENT_IR_PHYSICAL_DISK:
2048 		break;
2049 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2050 		mpii_event_raid(sc, enp);
2051 		break;
2052 	case MPII_EVENT_IR_OPERATION_STATUS: {
2053 		struct mpii_evt_ir_status	*evs =
2054 		    (struct mpii_evt_ir_status *)(enp + 1);
2055 		struct mpii_device		*dev;
2056 
2057 		KERNEL_LOCK();
2058 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
2059 		KERNEL_UNLOCK();
2060 		if (dev != NULL &&
2061 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2062 			dev->percent = evs->percent;
2063 		break;
2064 		}
2065 	default:
2066 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2067 		    DEVNAME(sc), lemtoh16(&enp->event));
2068 	}
2069 
2070 	mpii_event_done(sc, rcb);
2071 }
2072 
2073 void
2074 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2075 {
2076 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2077 
2078 	if (enp->ack_required) {
2079 		mtx_enter(&sc->sc_evt_ack_mtx);
2080 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2081 		mtx_leave(&sc->sc_evt_ack_mtx);
2082 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2083 	} else
2084 		mpii_push_reply(sc, rcb);
2085 }
2086 
2087 void
2088 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2089 {
2090 	struct mpii_msg_scsi_task_request	*stq;
2091 	struct mpii_msg_sas_oper_request	*soq;
2092 	struct mpii_ccb				*ccb;
2093 
2094 	ccb = scsi_io_get(&sc->sc_iopool, 0);
2095 	if (ccb == NULL)
2096 		return;
2097 
2098 	stq = ccb->ccb_cmd;
2099 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2100 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2101 	htolem16(&stq->dev_handle, handle);
2102 
2103 	ccb->ccb_done = mpii_empty_done;
2104 	mpii_wait(sc, ccb);
2105 
2106 	if (ccb->ccb_rcb != NULL)
2107 		mpii_push_reply(sc, ccb->ccb_rcb);
2108 
2109 	/* reuse a ccb */
2110 	ccb->ccb_state = MPII_CCB_READY;
2111 	ccb->ccb_rcb = NULL;
2112 
2113 	soq = ccb->ccb_cmd;
2114 	memset(soq, 0, sizeof(*soq));
2115 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2116 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2117 	htolem16(&soq->dev_handle, handle);
2118 
2119 	ccb->ccb_done = mpii_empty_done;
2120 	mpii_wait(sc, ccb);
2121 	if (ccb->ccb_rcb != NULL)
2122 		mpii_push_reply(sc, ccb->ccb_rcb);
2123 
2124 	scsi_io_put(&sc->sc_iopool, ccb);
2125 }
2126 
2127 int
2128 mpii_board_info(struct mpii_softc *sc)
2129 {
2130 	struct mpii_msg_iocfacts_request	ifq;
2131 	struct mpii_msg_iocfacts_reply		ifp;
2132 	struct mpii_cfg_manufacturing_pg0	mpg;
2133 	struct mpii_cfg_hdr			hdr;
2134 
2135 	memset(&ifq, 0, sizeof(ifq));
2136 	memset(&ifp, 0, sizeof(ifp));
2137 
2138 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2139 
2140 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2141 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2142 		    DEVNAME(sc));
2143 		return (1);
2144 	}
2145 
2146 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2147 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2148 		    DEVNAME(sc));
2149 		return (1);
2150 	}
2151 
2152 	hdr.page_version = 0;
2153 	hdr.page_length = sizeof(mpg) / 4;
2154 	hdr.page_number = 0;
2155 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2156 	memset(&mpg, 0, sizeof(mpg));
2157 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2158 	    sizeof(mpg)) != 0) {
2159 		printf("%s: unable to fetch manufacturing page 0\n",
2160 		    DEVNAME(sc));
2161 		return (EINVAL);
2162 	}
2163 
2164 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2165 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2166 	    ifp.fw_version_unit, ifp.fw_version_dev,
2167 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2168 	    ifp.msg_version_maj, ifp.msg_version_min);
2169 
2170 	return (0);
2171 }
2172 
2173 int
2174 mpii_target_map(struct mpii_softc *sc)
2175 {
2176 	struct mpii_cfg_hdr			hdr;
2177 	struct mpii_cfg_ioc_pg8			ipg;
2178 	int					flags, pad = 0;
2179 
2180 	hdr.page_version = 0;
2181 	hdr.page_length = sizeof(ipg) / 4;
2182 	hdr.page_number = 8;
2183 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2184 	memset(&ipg, 0, sizeof(ipg));
2185 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2186 	    sizeof(ipg)) != 0) {
2187 		printf("%s: unable to fetch ioc page 8\n",
2188 		    DEVNAME(sc));
2189 		return (EINVAL);
2190 	}
2191 
2192 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2193 		pad = 1;
2194 
2195 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2196 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2197 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2198 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2199 			sc->sc_vd_id_low += pad;
2200 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2201 		} else
2202 			sc->sc_vd_id_low = sc->sc_max_devices -
2203 			    sc->sc_max_volumes;
2204 	}
2205 
2206 	sc->sc_pd_id_start += pad;
2207 
2208 	return (0);
2209 }
2210 
2211 int
2212 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2213     u_int32_t address, int flags, void *p)
2214 {
2215 	struct mpii_msg_config_request		*cq;
2216 	struct mpii_msg_config_reply		*cp;
2217 	struct mpii_ccb				*ccb;
2218 	struct mpii_cfg_hdr			*hdr = p;
2219 	struct mpii_ecfg_hdr			*ehdr = p;
2220 	int					etype = 0;
2221 	int					rv = 0;
2222 
2223 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2224 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2225 	    address, flags, MPII_PG_FMT);
2226 
2227 	ccb = scsi_io_get(&sc->sc_iopool,
2228 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2229 	if (ccb == NULL) {
2230 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2231 		    DEVNAME(sc));
2232 		return (1);
2233 	}
2234 
2235 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2236 		etype = type;
2237 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2238 	}
2239 
2240 	cq = ccb->ccb_cmd;
2241 
2242 	cq->function = MPII_FUNCTION_CONFIG;
2243 
2244 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2245 
2246 	cq->config_header.page_number = number;
2247 	cq->config_header.page_type = type;
2248 	cq->ext_page_type = etype;
2249 	htolem32(&cq->page_address, address);
2250 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2251 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2252 
2253 	ccb->ccb_done = mpii_empty_done;
2254 	if (ISSET(flags, MPII_PG_POLL)) {
2255 		if (mpii_poll(sc, ccb) != 0) {
2256 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2257 			    DEVNAME(sc));
2258 			return (1);
2259 		}
2260 	} else
2261 		mpii_wait(sc, ccb);
2262 
2263 	if (ccb->ccb_rcb == NULL) {
2264 		scsi_io_put(&sc->sc_iopool, ccb);
2265 		return (1);
2266 	}
2267 	cp = ccb->ccb_rcb->rcb_reply;
2268 
2269 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2270 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2271 	    cp->sgl_flags, cp->msg_length, cp->function);
2272 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2273 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2274 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2275 	    cp->msg_flags);
2276 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2277 	    cp->vp_id, cp->vf_id);
2278 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2279 	    lemtoh16(&cp->ioc_status));
2280 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2281 	    lemtoh32(&cp->ioc_loginfo));
2282 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2283 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2284 	    cp->config_header.page_version,
2285 	    cp->config_header.page_length,
2286 	    cp->config_header.page_number,
2287 	    cp->config_header.page_type);
2288 
2289 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2290 		rv = 1;
2291 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2292 		memset(ehdr, 0, sizeof(*ehdr));
2293 		ehdr->page_version = cp->config_header.page_version;
2294 		ehdr->page_number = cp->config_header.page_number;
2295 		ehdr->page_type = cp->config_header.page_type;
2296 		ehdr->ext_page_length = cp->ext_page_length;
2297 		ehdr->ext_page_type = cp->ext_page_type;
2298 	} else
2299 		*hdr = cp->config_header;
2300 
2301 	mpii_push_reply(sc, ccb->ccb_rcb);
2302 	scsi_io_put(&sc->sc_iopool, ccb);
2303 
2304 	return (rv);
2305 }
2306 
2307 int
2308 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2309     void *p, int read, void *page, size_t len)
2310 {
2311 	struct mpii_msg_config_request		*cq;
2312 	struct mpii_msg_config_reply		*cp;
2313 	struct mpii_ccb				*ccb;
2314 	struct mpii_cfg_hdr			*hdr = p;
2315 	struct mpii_ecfg_hdr			*ehdr = p;
2316 	caddr_t					kva;
2317 	int					page_length;
2318 	int					rv = 0;
2319 
2320 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2321 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2322 
2323 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2324 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2325 
2326 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2327 		return (1);
2328 
2329 	ccb = scsi_io_get(&sc->sc_iopool,
2330 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2331 	if (ccb == NULL) {
2332 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2333 		    DEVNAME(sc));
2334 		return (1);
2335 	}
2336 
2337 	cq = ccb->ccb_cmd;
2338 
2339 	cq->function = MPII_FUNCTION_CONFIG;
2340 
2341 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2342 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2343 
2344 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2345 		cq->config_header.page_version = ehdr->page_version;
2346 		cq->config_header.page_number = ehdr->page_number;
2347 		cq->config_header.page_type = ehdr->page_type;
2348 		cq->ext_page_len = ehdr->ext_page_length;
2349 		cq->ext_page_type = ehdr->ext_page_type;
2350 	} else
2351 		cq->config_header = *hdr;
2352 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2353 	htolem32(&cq->page_address, address);
2354 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2355 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2356 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2357 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2358 
2359 	/* bounce the page via the request space to avoid more bus_dma games */
2360 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2361 	    sizeof(struct mpii_msg_config_request));
2362 
2363 	kva = ccb->ccb_cmd;
2364 	kva += sizeof(struct mpii_msg_config_request);
2365 
2366 	if (!read)
2367 		memcpy(kva, page, len);
2368 
2369 	ccb->ccb_done = mpii_empty_done;
2370 	if (ISSET(flags, MPII_PG_POLL)) {
2371 		if (mpii_poll(sc, ccb) != 0) {
2372 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2373 			    DEVNAME(sc));
2374 			return (1);
2375 		}
2376 	} else
2377 		mpii_wait(sc, ccb);
2378 
2379 	if (ccb->ccb_rcb == NULL) {
2380 		scsi_io_put(&sc->sc_iopool, ccb);
2381 		return (1);
2382 	}
2383 	cp = ccb->ccb_rcb->rcb_reply;
2384 
2385 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2386 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2387 	    cp->function);
2388 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2389 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2390 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2391 	    cp->msg_flags);
2392 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2393 	    cp->vp_id, cp->vf_id);
2394 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2395 	    lemtoh16(&cp->ioc_status));
2396 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2397 	    lemtoh32(&cp->ioc_loginfo));
2398 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2399 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2400 	    cp->config_header.page_version,
2401 	    cp->config_header.page_length,
2402 	    cp->config_header.page_number,
2403 	    cp->config_header.page_type);
2404 
2405 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2406 		rv = 1;
2407 	else if (read)
2408 		memcpy(page, kva, len);
2409 
2410 	mpii_push_reply(sc, ccb->ccb_rcb);
2411 	scsi_io_put(&sc->sc_iopool, ccb);
2412 
2413 	return (rv);
2414 }
2415 
2416 struct mpii_rcb *
2417 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2418 {
2419 	struct mpii_rcb		*rcb = NULL;
2420 	u_int32_t		rfid;
2421 
2422 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2423 
2424 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2425 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2426 		rfid = (lemtoh32(&rdp->frame_addr) -
2427 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2428 		    sc->sc_reply_size;
2429 
2430 		bus_dmamap_sync(sc->sc_dmat,
2431 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2432 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2433 
2434 		rcb = &sc->sc_rcbs[rfid];
2435 	}
2436 
2437 	memset(rdp, 0xff, sizeof(*rdp));
2438 
2439 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2440 	    8 * sc->sc_reply_post_host_index, 8,
2441 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2442 
2443 	return (rcb);
2444 }
2445 
2446 struct mpii_dmamem *
2447 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2448 {
2449 	struct mpii_dmamem	*mdm;
2450 	int			nsegs;
2451 
2452 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2453 	if (mdm == NULL)
2454 		return (NULL);
2455 
2456 	mdm->mdm_size = size;
2457 
2458 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2459 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2460 		goto mdmfree;
2461 
2462 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2463 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2464 		goto destroy;
2465 
2466 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2467 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2468 		goto free;
2469 
2470 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2471 	    NULL, BUS_DMA_NOWAIT) != 0)
2472 		goto unmap;
2473 
2474 	return (mdm);
2475 
2476 unmap:
2477 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2478 free:
2479 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2480 destroy:
2481 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2482 mdmfree:
2483 	free(mdm, M_DEVBUF, sizeof *mdm);
2484 
2485 	return (NULL);
2486 }
2487 
2488 void
2489 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2490 {
2491 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2492 
2493 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2494 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2495 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2496 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2497 	free(mdm, M_DEVBUF, sizeof *mdm);
2498 }
2499 
2500 int
2501 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2502 {
2503 	int		slot;	/* initial hint */
2504 
2505 	if (dev == NULL || dev->slot < 0)
2506 		return (1);
2507 	slot = dev->slot;
2508 
2509 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2510 		slot++;
2511 
2512 	if (slot >= sc->sc_max_devices)
2513 		return (1);
2514 
2515 	dev->slot = slot;
2516 	sc->sc_devs[slot] = dev;
2517 
2518 	return (0);
2519 }
2520 
2521 int
2522 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2523 {
2524 	int			i;
2525 
2526 	if (dev == NULL)
2527 		return (1);
2528 
2529 	for (i = 0; i < sc->sc_max_devices; i++) {
2530 		if (sc->sc_devs[i] == NULL)
2531 			continue;
2532 
2533 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2534 			sc->sc_devs[i] = NULL;
2535 			return (0);
2536 		}
2537 	}
2538 
2539 	return (1);
2540 }
2541 
2542 struct mpii_device *
2543 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2544 {
2545 	int			i;
2546 
2547 	for (i = 0; i < sc->sc_max_devices; i++) {
2548 		if (sc->sc_devs[i] == NULL)
2549 			continue;
2550 
2551 		if (sc->sc_devs[i]->dev_handle == handle)
2552 			return (sc->sc_devs[i]);
2553 	}
2554 
2555 	return (NULL);
2556 }
2557 
2558 int
2559 mpii_alloc_ccbs(struct mpii_softc *sc)
2560 {
2561 	struct mpii_ccb		*ccb;
2562 	u_int8_t		*cmd;
2563 	int			i;
2564 
2565 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2566 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2567 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2568 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2569 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2570 	    mpii_scsi_cmd_tmo_handler, sc);
2571 
2572 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2573 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2574 	if (sc->sc_ccbs == NULL) {
2575 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2576 		return (1);
2577 	}
2578 
2579 	sc->sc_requests = mpii_dmamem_alloc(sc,
2580 	    sc->sc_request_size * sc->sc_max_cmds);
2581 	if (sc->sc_requests == NULL) {
2582 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2583 		goto free_ccbs;
2584 	}
2585 	cmd = MPII_DMA_KVA(sc->sc_requests);
2586 
2587 	/*
2588 	 * we have sc->sc_max_cmds system request message
2589 	 * frames, but smid zero cannot be used. so we then
2590 	 * have (sc->sc_max_cmds - 1) number of ccbs
2591 	 */
2592 	for (i = 1; i < sc->sc_max_cmds; i++) {
2593 		ccb = &sc->sc_ccbs[i - 1];
2594 
2595 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2596 		    MAXPHYS, 0,
2597 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2598 		    &ccb->ccb_dmamap) != 0) {
2599 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2600 			goto free_maps;
2601 		}
2602 
2603 		ccb->ccb_sc = sc;
2604 		htolem16(&ccb->ccb_smid, i);
2605 		ccb->ccb_offset = sc->sc_request_size * i;
2606 
2607 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2608 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2609 		    ccb->ccb_offset;
2610 
2611 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2612 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2613 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2614 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2615 		    ccb->ccb_cmd_dva);
2616 
2617 		mpii_put_ccb(sc, ccb);
2618 	}
2619 
2620 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2621 
2622 	return (0);
2623 
2624 free_maps:
2625 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2626 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2627 
2628 	mpii_dmamem_free(sc, sc->sc_requests);
2629 free_ccbs:
2630 	free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2631 
2632 	return (1);
2633 }
2634 
2635 void
2636 mpii_put_ccb(void *cookie, void *io)
2637 {
2638 	struct mpii_softc	*sc = cookie;
2639 	struct mpii_ccb		*ccb = io;
2640 
2641 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2642 
2643 	ccb->ccb_state = MPII_CCB_FREE;
2644 	ccb->ccb_cookie = NULL;
2645 	ccb->ccb_done = NULL;
2646 	ccb->ccb_rcb = NULL;
2647 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2648 
2649 	KERNEL_UNLOCK();
2650 	mtx_enter(&sc->sc_ccb_free_mtx);
2651 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2652 	mtx_leave(&sc->sc_ccb_free_mtx);
2653 	KERNEL_LOCK();
2654 }
2655 
2656 void *
2657 mpii_get_ccb(void *cookie)
2658 {
2659 	struct mpii_softc	*sc = cookie;
2660 	struct mpii_ccb		*ccb;
2661 
2662 	KERNEL_UNLOCK();
2663 
2664 	mtx_enter(&sc->sc_ccb_free_mtx);
2665 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2666 	if (ccb != NULL) {
2667 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2668 		ccb->ccb_state = MPII_CCB_READY;
2669 	}
2670 	mtx_leave(&sc->sc_ccb_free_mtx);
2671 
2672 	KERNEL_LOCK();
2673 
2674 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2675 
2676 	return (ccb);
2677 }
2678 
2679 int
2680 mpii_alloc_replies(struct mpii_softc *sc)
2681 {
2682 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2683 
2684 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2685 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2686 	if (sc->sc_rcbs == NULL)
2687 		return (1);
2688 
2689 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2690 	    sc->sc_num_reply_frames);
2691 	if (sc->sc_replies == NULL) {
2692 		free(sc->sc_rcbs, M_DEVBUF,
2693 		    sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2694 		return (1);
2695 	}
2696 
2697 	return (0);
2698 }
2699 
2700 void
2701 mpii_push_replies(struct mpii_softc *sc)
2702 {
2703 	struct mpii_rcb		*rcb;
2704 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2705 	int			i;
2706 
2707 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2708 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2709 	    BUS_DMASYNC_PREREAD);
2710 
2711 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2712 		rcb = &sc->sc_rcbs[i];
2713 
2714 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2715 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2716 		    sc->sc_reply_size * i;
2717 		mpii_push_reply(sc, rcb);
2718 	}
2719 }
2720 
2721 void
2722 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2723 {
2724 	struct mpii_request_header	*rhp;
2725 	struct mpii_request_descr	descr;
2726 	u_long				 *rdp = (u_long *)&descr;
2727 
2728 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2729 	    ccb->ccb_cmd_dva);
2730 
2731 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2732 	    ccb->ccb_offset, sc->sc_request_size,
2733 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2734 
2735 	ccb->ccb_state = MPII_CCB_QUEUED;
2736 
2737 	rhp = ccb->ccb_cmd;
2738 
2739 	memset(&descr, 0, sizeof(descr));
2740 
2741 	switch (rhp->function) {
2742 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2743 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2744 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2745 		break;
2746 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2747 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2748 		break;
2749 	default:
2750 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2751 	}
2752 
2753 	descr.vf_id = sc->sc_vf_id;
2754 	descr.smid = ccb->ccb_smid;
2755 
2756 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2757 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2758 
2759 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2760 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2761 
2762 #if defined(__LP64__)
2763 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2764 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2765 #else
2766 	mtx_enter(&sc->sc_req_mtx);
2767 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2768 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2769 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2770 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2771 
2772 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2773 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2774 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2775 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2776 	mtx_leave(&sc->sc_req_mtx);
2777 #endif
2778 }
2779 
2780 int
2781 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2782 {
2783 	void				(*done)(struct mpii_ccb *);
2784 	void				*cookie;
2785 	int				rv = 1;
2786 
2787 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2788 
2789 	done = ccb->ccb_done;
2790 	cookie = ccb->ccb_cookie;
2791 
2792 	ccb->ccb_done = mpii_poll_done;
2793 	ccb->ccb_cookie = &rv;
2794 
2795 	mpii_start(sc, ccb);
2796 
2797 	while (rv == 1) {
2798 		/* avoid excessive polling */
2799 		if (mpii_reply_waiting(sc))
2800 			mpii_intr(sc);
2801 		else
2802 			delay(10);
2803 	}
2804 
2805 	ccb->ccb_cookie = cookie;
2806 	done(ccb);
2807 
2808 	return (0);
2809 }
2810 
2811 void
2812 mpii_poll_done(struct mpii_ccb *ccb)
2813 {
2814 	int				*rv = ccb->ccb_cookie;
2815 
2816 	*rv = 0;
2817 }
2818 
2819 int
2820 mpii_alloc_queues(struct mpii_softc *sc)
2821 {
2822 	u_int32_t		*rfp;
2823 	int			i;
2824 
2825 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2826 
2827 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2828 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2829 	if (sc->sc_reply_freeq == NULL)
2830 		return (1);
2831 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2832 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2833 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2834 		    sc->sc_reply_size * i;
2835 	}
2836 
2837 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2838 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2839 	if (sc->sc_reply_postq == NULL)
2840 		goto free_reply_freeq;
2841 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2842 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2843 	    sizeof(struct mpii_reply_descr));
2844 
2845 	return (0);
2846 
2847 free_reply_freeq:
2848 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2849 	return (1);
2850 }
2851 
2852 void
2853 mpii_init_queues(struct mpii_softc *sc)
2854 {
2855 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2856 
2857 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2858 	sc->sc_reply_post_host_index = 0;
2859 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2860 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2861 }
2862 
2863 void
2864 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2865 {
2866 	struct mutex		mtx = MUTEX_INITIALIZER(IPL_BIO);
2867 	void			(*done)(struct mpii_ccb *);
2868 	void			*cookie;
2869 
2870 	done = ccb->ccb_done;
2871 	cookie = ccb->ccb_cookie;
2872 
2873 	ccb->ccb_done = mpii_wait_done;
2874 	ccb->ccb_cookie = &mtx;
2875 
2876 	/* XXX this will wait forever for the ccb to complete */
2877 
2878 	mpii_start(sc, ccb);
2879 
2880 	mtx_enter(&mtx);
2881 	while (ccb->ccb_cookie != NULL)
2882 		msleep_nsec(ccb, &mtx, PRIBIO, "mpiiwait", INFSLP);
2883 	mtx_leave(&mtx);
2884 
2885 	ccb->ccb_cookie = cookie;
2886 	done(ccb);
2887 }
2888 
2889 void
2890 mpii_wait_done(struct mpii_ccb *ccb)
2891 {
2892 	struct mutex		*mtx = ccb->ccb_cookie;
2893 
2894 	mtx_enter(mtx);
2895 	ccb->ccb_cookie = NULL;
2896 	mtx_leave(mtx);
2897 
2898 	wakeup_one(ccb);
2899 }
2900 
2901 void
2902 mpii_scsi_cmd(struct scsi_xfer *xs)
2903 {
2904 	struct scsi_link	*link = xs->sc_link;
2905 	struct mpii_softc	*sc = link->adapter_softc;
2906 	struct mpii_ccb		*ccb = xs->io;
2907 	struct mpii_msg_scsi_io	*io;
2908 	struct mpii_device	*dev;
2909 	int			 ret;
2910 
2911 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2912 
2913 	if (xs->cmdlen > MPII_CDB_LEN) {
2914 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2915 		    DEVNAME(sc), xs->cmdlen);
2916 		memset(&xs->sense, 0, sizeof(xs->sense));
2917 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2918 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2919 		xs->sense.add_sense_code = 0x20;
2920 		xs->error = XS_SENSE;
2921 		scsi_done(xs);
2922 		return;
2923 	}
2924 
2925 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2926 		/* device no longer exists */
2927 		xs->error = XS_SELTIMEOUT;
2928 		scsi_done(xs);
2929 		return;
2930 	}
2931 
2932 	KERNEL_UNLOCK();
2933 
2934 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2935 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2936 
2937 	ccb->ccb_cookie = xs;
2938 	ccb->ccb_done = mpii_scsi_cmd_done;
2939 	ccb->ccb_dev_handle = dev->dev_handle;
2940 
2941 	io = ccb->ccb_cmd;
2942 	memset(io, 0, sizeof(*io));
2943 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2944 	io->sense_buffer_length = sizeof(xs->sense);
2945 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2946 	htolem16(&io->io_flags, xs->cmdlen);
2947 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2948 	htobem16(&io->lun[0], link->lun);
2949 
2950 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2951 	case SCSI_DATA_IN:
2952 		io->direction = MPII_SCSIIO_DIR_READ;
2953 		break;
2954 	case SCSI_DATA_OUT:
2955 		io->direction = MPII_SCSIIO_DIR_WRITE;
2956 		break;
2957 	default:
2958 		io->direction = MPII_SCSIIO_DIR_NONE;
2959 		break;
2960 	}
2961 
2962 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2963 
2964 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2965 
2966 	htolem32(&io->data_length, xs->datalen);
2967 
2968 	/* sense data is at the end of a request */
2969 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2970 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2971 
2972 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2973 		ret = mpii_load_xs_sas3(ccb);
2974 	else
2975 		ret = mpii_load_xs(ccb);
2976 
2977 	if (ret != 0) {
2978 		xs->error = XS_DRIVER_STUFFUP;
2979 		goto done;
2980 	}
2981 
2982 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2983 	if (xs->flags & SCSI_POLL) {
2984 		if (mpii_poll(sc, ccb) != 0) {
2985 			xs->error = XS_DRIVER_STUFFUP;
2986 			goto done;
2987 		}
2988 	} else {
2989 		timeout_add_msec(&xs->stimeout, xs->timeout);
2990 		mpii_start(sc, ccb);
2991 	}
2992 
2993 	KERNEL_LOCK();
2994 	return;
2995 
2996 done:
2997 	KERNEL_LOCK();
2998 	scsi_done(xs);
2999 }
3000 
3001 void
3002 mpii_scsi_cmd_tmo(void *xccb)
3003 {
3004 	struct mpii_ccb		*ccb = xccb;
3005 	struct mpii_softc	*sc = ccb->ccb_sc;
3006 
3007 	printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc),
3008 	    mpii_read_db(sc));
3009 
3010 	mtx_enter(&sc->sc_ccb_mtx);
3011 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
3012 		ccb->ccb_state = MPII_CCB_TIMEOUT;
3013 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3014 	}
3015 	mtx_leave(&sc->sc_ccb_mtx);
3016 
3017 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
3018 }
3019 
3020 void
3021 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
3022 {
3023 	struct mpii_softc			*sc = cookie;
3024 	struct mpii_ccb				*tccb = io;
3025 	struct mpii_ccb				*ccb;
3026 	struct mpii_msg_scsi_task_request	*stq;
3027 
3028 	mtx_enter(&sc->sc_ccb_mtx);
3029 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3030 	if (ccb != NULL) {
3031 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3032 		ccb->ccb_state = MPII_CCB_QUEUED;
3033 	}
3034 	/* should remove any other ccbs for the same dev handle */
3035 	mtx_leave(&sc->sc_ccb_mtx);
3036 
3037 	if (ccb == NULL) {
3038 		scsi_io_put(&sc->sc_iopool, tccb);
3039 		return;
3040 	}
3041 
3042 	stq = tccb->ccb_cmd;
3043 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3044 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3045 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
3046 
3047 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3048 	mpii_start(sc, tccb);
3049 }
3050 
3051 void
3052 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3053 {
3054 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3055 }
3056 
3057 void
3058 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3059 {
3060 	struct mpii_ccb		*tccb;
3061 	struct mpii_msg_scsi_io_error	*sie;
3062 	struct mpii_softc	*sc = ccb->ccb_sc;
3063 	struct scsi_xfer	*xs = ccb->ccb_cookie;
3064 	struct scsi_sense_data	*sense;
3065 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3066 
3067 	timeout_del(&xs->stimeout);
3068 	mtx_enter(&sc->sc_ccb_mtx);
3069 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3070 		/* ENOSIMPLEQ_REMOVE :( */
3071 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3072 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3073 		else {
3074 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3075 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3076 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3077 					    tccb, ccb_link);
3078 					break;
3079 				}
3080 			}
3081 		}
3082 	}
3083 
3084 	ccb->ccb_state = MPII_CCB_READY;
3085 	mtx_leave(&sc->sc_ccb_mtx);
3086 
3087 	if (xs->datalen != 0) {
3088 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3089 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3090 		    BUS_DMASYNC_POSTWRITE);
3091 
3092 		bus_dmamap_unload(sc->sc_dmat, dmap);
3093 	}
3094 
3095 	xs->error = XS_NOERROR;
3096 	xs->resid = 0;
3097 
3098 	if (ccb->ccb_rcb == NULL) {
3099 		/* no scsi error, we're ok so drop out early */
3100 		xs->status = SCSI_OK;
3101 		goto done;
3102 	}
3103 
3104 	sie = ccb->ccb_rcb->rcb_reply;
3105 
3106 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3107 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3108 	    xs->flags);
3109 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3110 	    "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3111 	    sie->msg_length, sie->function);
3112 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3113 	    sie->vp_id, sie->vf_id);
3114 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3115 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3116 	    sie->scsi_state, lemtoh16(&sie->ioc_status));
3117 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3118 	    lemtoh32(&sie->ioc_loginfo));
3119 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3120 	    lemtoh32(&sie->transfer_count));
3121 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3122 	    lemtoh32(&sie->sense_count));
3123 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3124 	    lemtoh32(&sie->response_info));
3125 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3126 	    lemtoh16(&sie->task_tag));
3127 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3128 	    DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3129 
3130 	if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3131 		xs->status = SCSI_TERMINATED;
3132 	else
3133 		xs->status = sie->scsi_status;
3134 	xs->resid = 0;
3135 
3136 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3137 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3138 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3139 		/* FALLTHROUGH */
3140 
3141 	case MPII_IOCSTATUS_SUCCESS:
3142 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3143 		switch (xs->status) {
3144 		case SCSI_OK:
3145 			xs->error = XS_NOERROR;
3146 			break;
3147 
3148 		case SCSI_CHECK:
3149 			xs->error = XS_SENSE;
3150 			break;
3151 
3152 		case SCSI_BUSY:
3153 		case SCSI_QUEUE_FULL:
3154 			xs->error = XS_BUSY;
3155 			break;
3156 
3157 		default:
3158 			xs->error = XS_DRIVER_STUFFUP;
3159 		}
3160 		break;
3161 
3162 	case MPII_IOCSTATUS_BUSY:
3163 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3164 		xs->error = XS_BUSY;
3165 		break;
3166 
3167 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3168 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3169 		xs->error = XS_RESET;
3170 		break;
3171 
3172 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3173 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3174 		xs->error = XS_SELTIMEOUT;
3175 		break;
3176 
3177 	default:
3178 		xs->error = XS_DRIVER_STUFFUP;
3179 		break;
3180 	}
3181 
3182 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3183 	    sc->sc_request_size - sizeof(*sense));
3184 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3185 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3186 
3187 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3188 	    xs->error, xs->status);
3189 
3190 	mpii_push_reply(sc, ccb->ccb_rcb);
3191 done:
3192 	KERNEL_LOCK();
3193 	scsi_done(xs);
3194 	KERNEL_UNLOCK();
3195 }
3196 
3197 int
3198 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3199 {
3200 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
3201 	struct mpii_device	*dev = sc->sc_devs[link->target];
3202 
3203 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3204 
3205 	switch (cmd) {
3206 	case DIOCGCACHE:
3207 	case DIOCSCACHE:
3208 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3209 			return (mpii_ioctl_cache(link, cmd,
3210 			    (struct dk_cache *)addr));
3211 		}
3212 		break;
3213 
3214 	default:
3215 		if (sc->sc_ioctl)
3216 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3217 
3218 		break;
3219 	}
3220 
3221 	return (ENOTTY);
3222 }
3223 
3224 int
3225 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3226 {
3227 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3228 	struct mpii_device *dev = sc->sc_devs[link->target];
3229 	struct mpii_cfg_raid_vol_pg0 *vpg;
3230 	struct mpii_msg_raid_action_request *req;
3231 	struct mpii_msg_raid_action_reply *rep;
3232 	struct mpii_cfg_hdr hdr;
3233 	struct mpii_ccb	*ccb;
3234 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3235 	size_t pagelen;
3236 	int rv = 0;
3237 	int enabled;
3238 
3239 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3240 	    addr, MPII_PG_POLL, &hdr) != 0)
3241 		return (EINVAL);
3242 
3243 	pagelen = hdr.page_length * 4;
3244 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3245 	if (vpg == NULL)
3246 		return (ENOMEM);
3247 
3248 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3249 	    vpg, pagelen) != 0) {
3250 		rv = EINVAL;
3251 		goto done;
3252 	}
3253 
3254 	enabled = ((lemtoh16(&vpg->volume_settings) &
3255 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3256 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3257 
3258 	if (cmd == DIOCGCACHE) {
3259 		dc->wrcache = enabled;
3260 		dc->rdcache = 0;
3261 		goto done;
3262 	} /* else DIOCSCACHE */
3263 
3264 	if (dc->rdcache) {
3265 		rv = EOPNOTSUPP;
3266 		goto done;
3267 	}
3268 
3269 	if (((dc->wrcache) ? 1 : 0) == enabled)
3270 		goto done;
3271 
3272 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3273 	if (ccb == NULL) {
3274 		rv = ENOMEM;
3275 		goto done;
3276 	}
3277 
3278 	ccb->ccb_done = mpii_empty_done;
3279 
3280 	req = ccb->ccb_cmd;
3281 	memset(req, 0, sizeof(*req));
3282 	req->function = MPII_FUNCTION_RAID_ACTION;
3283 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3284 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3285 	htolem32(&req->action_data, dc->wrcache ?
3286 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3287 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3288 
3289 	if (mpii_poll(sc, ccb) != 0) {
3290 		rv = EIO;
3291 		goto done;
3292 	}
3293 
3294 	if (ccb->ccb_rcb != NULL) {
3295 		rep = ccb->ccb_rcb->rcb_reply;
3296 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3297 		    ((rep->action_data[0] &
3298 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3299 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3300 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3301 			rv = EINVAL;
3302 		mpii_push_reply(sc, ccb->ccb_rcb);
3303 	}
3304 
3305 	scsi_io_put(&sc->sc_iopool, ccb);
3306 
3307 done:
3308 	free(vpg, M_TEMP, pagelen);
3309 	return (rv);
3310 }
3311 
3312 #if NBIO > 0
3313 int
3314 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3315 {
3316 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3317 	int			error = 0;
3318 
3319 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3320 
3321 	switch (cmd) {
3322 	case BIOCINQ:
3323 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3324 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3325 		break;
3326 	case BIOCVOL:
3327 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3328 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3329 		break;
3330 	case BIOCDISK:
3331 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3332 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3333 		break;
3334 	default:
3335 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3336 		error = ENOTTY;
3337 	}
3338 
3339 	return (error);
3340 }
3341 
3342 int
3343 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3344 {
3345 	int			i;
3346 
3347 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3348 
3349 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3350 	for (i = 0; i < sc->sc_max_devices; i++)
3351 		if (sc->sc_devs[i] &&
3352 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3353 			bi->bi_novol++;
3354 	return (0);
3355 }
3356 
3357 int
3358 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3359 {
3360 	struct mpii_cfg_raid_vol_pg0	*vpg;
3361 	struct mpii_cfg_hdr		hdr;
3362 	struct mpii_device		*dev;
3363 	struct scsi_link		*lnk;
3364 	struct device			*scdev;
3365 	size_t				pagelen;
3366 	u_int16_t			volh;
3367 	int				rv, hcnt = 0;
3368 
3369 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3370 	    DEVNAME(sc), bv->bv_volid);
3371 
3372 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3373 		return (ENODEV);
3374 	volh = dev->dev_handle;
3375 
3376 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3377 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3378 		printf("%s: unable to fetch header for raid volume page 0\n",
3379 		    DEVNAME(sc));
3380 		return (EINVAL);
3381 	}
3382 
3383 	pagelen = hdr.page_length * 4;
3384 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3385 	if (vpg == NULL) {
3386 		printf("%s: unable to allocate space for raid "
3387 		    "volume page 0\n", DEVNAME(sc));
3388 		return (ENOMEM);
3389 	}
3390 
3391 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3392 	    &hdr, 1, vpg, pagelen) != 0) {
3393 		printf("%s: unable to fetch raid volume page 0\n",
3394 		    DEVNAME(sc));
3395 		free(vpg, M_TEMP, pagelen);
3396 		return (EINVAL);
3397 	}
3398 
3399 	switch (vpg->volume_state) {
3400 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3401 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3402 		bv->bv_status = BIOC_SVONLINE;
3403 		break;
3404 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3405 		if (ISSET(lemtoh32(&vpg->volume_status),
3406 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3407 			bv->bv_status = BIOC_SVREBUILD;
3408 			bv->bv_percent = dev->percent;
3409 		} else
3410 			bv->bv_status = BIOC_SVDEGRADED;
3411 		break;
3412 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3413 		bv->bv_status = BIOC_SVOFFLINE;
3414 		break;
3415 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3416 		bv->bv_status = BIOC_SVBUILDING;
3417 		break;
3418 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3419 	default:
3420 		bv->bv_status = BIOC_SVINVALID;
3421 		break;
3422 	}
3423 
3424 	switch (vpg->volume_type) {
3425 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3426 		bv->bv_level = 0;
3427 		break;
3428 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3429 		bv->bv_level = 1;
3430 		break;
3431 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3432 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3433 		bv->bv_level = 10;
3434 		break;
3435 	default:
3436 		bv->bv_level = -1;
3437 	}
3438 
3439 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3440 		free(vpg, M_TEMP, pagelen);
3441 		return (rv);
3442 	}
3443 
3444 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3445 
3446 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3447 
3448 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3449 	if (lnk != NULL) {
3450 		scdev = lnk->device_softc;
3451 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3452 	}
3453 
3454 	free(vpg, M_TEMP, pagelen);
3455 	return (0);
3456 }
3457 
3458 int
3459 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3460 {
3461 	struct mpii_cfg_raid_vol_pg0		*vpg;
3462 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3463 	struct mpii_cfg_hdr			hdr;
3464 	struct mpii_device			*dev;
3465 	size_t					pagelen;
3466 	u_int16_t				volh;
3467 	u_int8_t				dn;
3468 
3469 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3470 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3471 
3472 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3473 		return (ENODEV);
3474 	volh = dev->dev_handle;
3475 
3476 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3477 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3478 		printf("%s: unable to fetch header for raid volume page 0\n",
3479 		    DEVNAME(sc));
3480 		return (EINVAL);
3481 	}
3482 
3483 	pagelen = hdr.page_length * 4;
3484 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3485 	if (vpg == NULL) {
3486 		printf("%s: unable to allocate space for raid "
3487 		    "volume page 0\n", DEVNAME(sc));
3488 		return (ENOMEM);
3489 	}
3490 
3491 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3492 	    &hdr, 1, vpg, pagelen) != 0) {
3493 		printf("%s: unable to fetch raid volume page 0\n",
3494 		    DEVNAME(sc));
3495 		free(vpg, M_TEMP, pagelen);
3496 		return (EINVAL);
3497 	}
3498 
3499 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3500 		int		nvdsk = vpg->num_phys_disks;
3501 		int		hsmap = vpg->hot_spare_pool;
3502 
3503 		free(vpg, M_TEMP, pagelen);
3504 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3505 	}
3506 
3507 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3508 	    bd->bd_diskid;
3509 	dn = pd->phys_disk_num;
3510 
3511 	free(vpg, M_TEMP, pagelen);
3512 	return (mpii_bio_disk(sc, bd, dn));
3513 }
3514 
3515 int
3516 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3517      int hsmap, int *hscnt)
3518 {
3519 	struct mpii_cfg_raid_config_pg0	*cpg;
3520 	struct mpii_raid_config_element	*el;
3521 	struct mpii_ecfg_hdr		ehdr;
3522 	size_t				pagelen;
3523 	int				i, nhs = 0;
3524 
3525 	if (bd)
3526 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3527 		    bd->bd_diskid - nvdsk);
3528 	else
3529 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3530 
3531 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3532 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3533 	    &ehdr) != 0) {
3534 		printf("%s: unable to fetch header for raid config page 0\n",
3535 		    DEVNAME(sc));
3536 		return (EINVAL);
3537 	}
3538 
3539 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3540 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3541 	if (cpg == NULL) {
3542 		printf("%s: unable to allocate space for raid config page 0\n",
3543 		    DEVNAME(sc));
3544 		return (ENOMEM);
3545 	}
3546 
3547 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3548 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3549 		printf("%s: unable to fetch raid config page 0\n",
3550 		    DEVNAME(sc));
3551 		free(cpg, M_TEMP, pagelen);
3552 		return (EINVAL);
3553 	}
3554 
3555 	el = (struct mpii_raid_config_element *)(cpg + 1);
3556 	for (i = 0; i < cpg->num_elements; i++, el++) {
3557 		if (ISSET(lemtoh16(&el->element_flags),
3558 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3559 		    el->hot_spare_pool == hsmap) {
3560 			/*
3561 			 * diskid comparison is based on the idea that all
3562 			 * disks are counted by the bio(4) in sequence, thus
3563 			 * substracting the number of disks in the volume
3564 			 * from the diskid yields us a "relative" hotspare
3565 			 * number, which is good enough for us.
3566 			 */
3567 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3568 				u_int8_t dn = el->phys_disk_num;
3569 
3570 				free(cpg, M_TEMP, pagelen);
3571 				return (mpii_bio_disk(sc, bd, dn));
3572 			}
3573 			nhs++;
3574 		}
3575 	}
3576 
3577 	if (hscnt)
3578 		*hscnt = nhs;
3579 
3580 	free(cpg, M_TEMP, pagelen);
3581 	return (0);
3582 }
3583 
3584 int
3585 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3586 {
3587 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3588 	struct mpii_cfg_hdr			hdr;
3589 	struct mpii_device			*dev;
3590 	int					len;
3591 
3592 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3593 	    bd->bd_diskid);
3594 
3595 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3596 	if (ppg == NULL) {
3597 		printf("%s: unable to allocate space for raid physical disk "
3598 		    "page 0\n", DEVNAME(sc));
3599 		return (ENOMEM);
3600 	}
3601 
3602 	hdr.page_version = 0;
3603 	hdr.page_length = sizeof(*ppg) / 4;
3604 	hdr.page_number = 0;
3605 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3606 
3607 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3608 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3609 		printf("%s: unable to fetch raid drive page 0\n",
3610 		    DEVNAME(sc));
3611 		free(ppg, M_TEMP, sizeof(*ppg));
3612 		return (EINVAL);
3613 	}
3614 
3615 	bd->bd_target = ppg->phys_disk_num;
3616 
3617 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3618 		bd->bd_status = BIOC_SDINVALID;
3619 		free(ppg, M_TEMP, sizeof(*ppg));
3620 		return (0);
3621 	}
3622 
3623 	switch (ppg->phys_disk_state) {
3624 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3625 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3626 		bd->bd_status = BIOC_SDONLINE;
3627 		break;
3628 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3629 		if (ppg->offline_reason ==
3630 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3631 		    ppg->offline_reason ==
3632 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3633 			bd->bd_status = BIOC_SDFAILED;
3634 		else
3635 			bd->bd_status = BIOC_SDOFFLINE;
3636 		break;
3637 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3638 		bd->bd_status = BIOC_SDFAILED;
3639 		break;
3640 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3641 		bd->bd_status = BIOC_SDREBUILD;
3642 		break;
3643 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3644 		bd->bd_status = BIOC_SDHOTSPARE;
3645 		break;
3646 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3647 		bd->bd_status = BIOC_SDUNUSED;
3648 		break;
3649 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3650 	default:
3651 		bd->bd_status = BIOC_SDINVALID;
3652 		break;
3653 	}
3654 
3655 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3656 
3657 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3658 	len = strlen(bd->bd_vendor);
3659 	bd->bd_vendor[len] = ' ';
3660 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3661 	    sizeof(ppg->product_id));
3662 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3663 
3664 	free(ppg, M_TEMP, sizeof(*ppg));
3665 	return (0);
3666 }
3667 
3668 struct mpii_device *
3669 mpii_find_vol(struct mpii_softc *sc, int volid)
3670 {
3671 	struct mpii_device	*dev = NULL;
3672 
3673 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3674 		return (NULL);
3675 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3676 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3677 		return (dev);
3678 	return (NULL);
3679 }
3680 
3681 #ifndef SMALL_KERNEL
3682 /*
3683  * Non-sleeping lightweight version of the mpii_ioctl_vol
3684  */
3685 int
3686 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3687 {
3688 	struct mpii_cfg_raid_vol_pg0	*vpg;
3689 	struct mpii_cfg_hdr		hdr;
3690 	struct mpii_device		*dev = NULL;
3691 	size_t				pagelen;
3692 	u_int16_t			volh;
3693 
3694 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3695 		return (ENODEV);
3696 	volh = dev->dev_handle;
3697 
3698 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3699 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3700 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3701 		    "volume page 0\n", DEVNAME(sc));
3702 		return (EINVAL);
3703 	}
3704 
3705 	pagelen = hdr.page_length * 4;
3706 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3707 	if (vpg == NULL) {
3708 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3709 		    "volume page 0\n", DEVNAME(sc));
3710 		return (ENOMEM);
3711 	}
3712 
3713 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3714 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3715 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3716 		    "page 0\n", DEVNAME(sc));
3717 		free(vpg, M_TEMP, pagelen);
3718 		return (EINVAL);
3719 	}
3720 
3721 	switch (vpg->volume_state) {
3722 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3723 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3724 		bv->bv_status = BIOC_SVONLINE;
3725 		break;
3726 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3727 		if (ISSET(lemtoh32(&vpg->volume_status),
3728 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3729 			bv->bv_status = BIOC_SVREBUILD;
3730 		else
3731 			bv->bv_status = BIOC_SVDEGRADED;
3732 		break;
3733 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3734 		bv->bv_status = BIOC_SVOFFLINE;
3735 		break;
3736 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3737 		bv->bv_status = BIOC_SVBUILDING;
3738 		break;
3739 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3740 	default:
3741 		bv->bv_status = BIOC_SVINVALID;
3742 		break;
3743 	}
3744 
3745 	free(vpg, M_TEMP, pagelen);
3746 	return (0);
3747 }
3748 
3749 int
3750 mpii_create_sensors(struct mpii_softc *sc)
3751 {
3752 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3753 	struct device		*dev;
3754 	struct scsi_link	*link;
3755 	int			i;
3756 
3757 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3758 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3759 	if (sc->sc_sensors == NULL)
3760 		return (1);
3761 	sc->sc_nsensors = sc->sc_vd_count;
3762 
3763 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3764 	    sizeof(sc->sc_sensordev.xname));
3765 
3766 	for (i = 0; i < sc->sc_vd_count; i++) {
3767 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3768 		if (link == NULL)
3769 			goto bad;
3770 
3771 		dev = link->device_softc;
3772 
3773 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3774 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3775 
3776 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3777 		    sizeof(sc->sc_sensors[i].desc));
3778 
3779 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3780 	}
3781 
3782 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3783 		goto bad;
3784 
3785 	sensordev_install(&sc->sc_sensordev);
3786 
3787 	return (0);
3788 
3789 bad:
3790 	free(sc->sc_sensors, M_DEVBUF, 0);
3791 
3792 	return (1);
3793 }
3794 
3795 void
3796 mpii_refresh_sensors(void *arg)
3797 {
3798 	struct mpii_softc	*sc = arg;
3799 	struct bioc_vol		bv;
3800 	int			i;
3801 
3802 	for (i = 0; i < sc->sc_nsensors; i++) {
3803 		memset(&bv, 0, sizeof(bv));
3804 		bv.bv_volid = i;
3805 		if (mpii_bio_volstate(sc, &bv))
3806 			return;
3807 		switch(bv.bv_status) {
3808 		case BIOC_SVOFFLINE:
3809 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3810 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3811 			break;
3812 		case BIOC_SVDEGRADED:
3813 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3814 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3815 			break;
3816 		case BIOC_SVREBUILD:
3817 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3818 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3819 			break;
3820 		case BIOC_SVONLINE:
3821 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3822 			sc->sc_sensors[i].status = SENSOR_S_OK;
3823 			break;
3824 		case BIOC_SVINVALID:
3825 			/* FALLTHROUGH */
3826 		default:
3827 			sc->sc_sensors[i].value = 0; /* unknown */
3828 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3829 		}
3830 	}
3831 }
3832 #endif /* SMALL_KERNEL */
3833 #endif /* NBIO > 0 */
3834