xref: /openbsd-src/sys/dev/pci/mpii.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: mpii.c,v 1.96 2014/07/13 23:10:23 deraadt Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/kernel.h>
30 #include <sys/rwlock.h>
31 #include <sys/sensors.h>
32 #include <sys/dkio.h>
33 #include <sys/tree.h>
34 #include <sys/task.h>
35 
36 #include <machine/bus.h>
37 
38 #include <dev/pci/pcireg.h>
39 #include <dev/pci/pcivar.h>
40 #include <dev/pci/pcidevs.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsiconf.h>
44 
45 #include <dev/biovar.h>
46 
47 #include <dev/pci/mpiireg.h>
48 
49 /* #define MPII_DEBUG */
50 #ifdef MPII_DEBUG
51 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
52 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
53 #define	MPII_D_CMD		(0x0001)
54 #define	MPII_D_INTR		(0x0002)
55 #define	MPII_D_MISC		(0x0004)
56 #define	MPII_D_DMA		(0x0008)
57 #define	MPII_D_IOCTL		(0x0010)
58 #define	MPII_D_RW		(0x0020)
59 #define	MPII_D_MEM		(0x0040)
60 #define	MPII_D_CCB		(0x0080)
61 #define	MPII_D_PPR		(0x0100)
62 #define	MPII_D_RAID		(0x0200)
63 #define	MPII_D_EVT		(0x0400)
64 #define MPII_D_CFG		(0x0800)
65 #define MPII_D_MAP		(0x1000)
66 
67 u_int32_t  mpii_debug = 0
68 		| MPII_D_CMD
69 		| MPII_D_INTR
70 		| MPII_D_MISC
71 		| MPII_D_DMA
72 		| MPII_D_IOCTL
73 		| MPII_D_RW
74 		| MPII_D_MEM
75 		| MPII_D_CCB
76 		| MPII_D_PPR
77 		| MPII_D_RAID
78 		| MPII_D_EVT
79 		| MPII_D_CFG
80 		| MPII_D_MAP
81 	;
82 #else
83 #define DPRINTF(x...)
84 #define DNPRINTF(n,x...)
85 #endif
86 
87 #define MPII_REQUEST_SIZE		(512)
88 #define MPII_REQUEST_CREDIT		(128)
89 
90 struct mpii_dmamem {
91 	bus_dmamap_t		mdm_map;
92 	bus_dma_segment_t	mdm_seg;
93 	size_t			mdm_size;
94 	caddr_t			mdm_kva;
95 };
96 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
97 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
98 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
99 
100 struct mpii_softc;
101 
102 struct mpii_rcb {
103 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
104 	void			*rcb_reply;
105 	u_int32_t		rcb_reply_dva;
106 };
107 
108 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
109 
110 struct mpii_device {
111 	int			flags;
112 #define MPII_DF_ATTACH		(0x0001)
113 #define MPII_DF_DETACH		(0x0002)
114 #define MPII_DF_HIDDEN		(0x0004)
115 #define MPII_DF_UNUSED		(0x0008)
116 #define MPII_DF_VOLUME		(0x0010)
117 #define MPII_DF_VOLUME_DISK	(0x0020)
118 #define MPII_DF_HOT_SPARE	(0x0040)
119 	short			slot;
120 	short			percent;
121 	u_int16_t		dev_handle;
122 	u_int16_t		enclosure;
123 	u_int16_t		expander;
124 	u_int8_t		phy_num;
125 	u_int8_t		physical_port;
126 };
127 
128 struct mpii_ccb {
129 	struct mpii_softc	*ccb_sc;
130 
131 	void *			ccb_cookie;
132 	bus_dmamap_t		ccb_dmamap;
133 
134 	bus_addr_t		ccb_offset;
135 	void			*ccb_cmd;
136 	bus_addr_t		ccb_cmd_dva;
137 	u_int16_t		ccb_dev_handle;
138 	u_int16_t		ccb_smid;
139 
140 	volatile enum {
141 		MPII_CCB_FREE,
142 		MPII_CCB_READY,
143 		MPII_CCB_QUEUED,
144 		MPII_CCB_TIMEOUT
145 	}			ccb_state;
146 
147 	void			(*ccb_done)(struct mpii_ccb *);
148 	struct mpii_rcb		*ccb_rcb;
149 
150 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
151 };
152 
153 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
154 
155 struct mpii_softc {
156 	struct device		sc_dev;
157 
158 	pci_chipset_tag_t	sc_pc;
159 	pcitag_t		sc_tag;
160 
161 	void			*sc_ih;
162 
163 	struct scsi_link	sc_link;
164 
165 	int			sc_flags;
166 #define MPII_F_RAID		(1<<1)
167 
168 	struct scsibus_softc	*sc_scsibus;
169 
170 	struct mpii_device	**sc_devs;
171 
172 	bus_space_tag_t		sc_iot;
173 	bus_space_handle_t	sc_ioh;
174 	bus_size_t		sc_ios;
175 	bus_dma_tag_t		sc_dmat;
176 
177 	struct mutex		sc_req_mtx;
178 	struct mutex		sc_rep_mtx;
179 
180 	ushort			sc_reply_size;
181 	ushort			sc_request_size;
182 
183 	ushort			sc_max_cmds;
184 	ushort			sc_num_reply_frames;
185 	u_int			sc_reply_free_qdepth;
186 	u_int			sc_reply_post_qdepth;
187 
188 	ushort			sc_chain_sge;
189 	ushort			sc_max_sgl;
190 
191 	u_int8_t		sc_ioc_event_replay;
192 
193 	u_int8_t		sc_porttype;
194 	u_int8_t		sc_max_volumes;
195 	u_int16_t		sc_max_devices;
196 	u_int16_t		sc_vd_count;
197 	u_int16_t		sc_vd_id_low;
198 	u_int16_t		sc_pd_id_start;
199 	int			sc_ioc_number;
200 	u_int8_t		sc_vf_id;
201 
202 	struct mpii_ccb		*sc_ccbs;
203 	struct mpii_ccb_list	sc_ccb_free;
204 	struct mutex		sc_ccb_free_mtx;
205 
206 	struct mutex		sc_ccb_mtx;
207 				/*
208 				 * this protects the ccb state and list entry
209 				 * between mpii_scsi_cmd and scsidone.
210 				 */
211 
212 	struct mpii_ccb_list	sc_ccb_tmos;
213 	struct scsi_iohandler	sc_ccb_tmo_handler;
214 
215 	struct scsi_iopool	sc_iopool;
216 
217 	struct mpii_dmamem	*sc_requests;
218 
219 	struct mpii_dmamem	*sc_replies;
220 	struct mpii_rcb		*sc_rcbs;
221 
222 	struct mpii_dmamem	*sc_reply_postq;
223 	struct mpii_reply_descr	*sc_reply_postq_kva;
224 	u_int			sc_reply_post_host_index;
225 
226 	struct mpii_dmamem	*sc_reply_freeq;
227 	u_int			sc_reply_free_host_index;
228 
229 	struct mpii_rcb_list	sc_evt_sas_queue;
230 	struct mutex		sc_evt_sas_mtx;
231 	struct task		sc_evt_sas_task;
232 
233 	struct mpii_rcb_list	sc_evt_ack_queue;
234 	struct mutex		sc_evt_ack_mtx;
235 	struct scsi_iohandler	sc_evt_ack_handler;
236 
237 	/* scsi ioctl from sd device */
238 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
239 
240 	int			sc_nsensors;
241 	struct ksensor		*sc_sensors;
242 	struct ksensordev	sc_sensordev;
243 };
244 
245 int	mpii_match(struct device *, void *, void *);
246 void	mpii_attach(struct device *, struct device *, void *);
247 int	mpii_detach(struct device *, int);
248 
249 int	mpii_intr(void *);
250 
251 struct cfattach mpii_ca = {
252 	sizeof(struct mpii_softc),
253 	mpii_match,
254 	mpii_attach,
255 	mpii_detach
256 };
257 
258 struct cfdriver mpii_cd = {
259 	NULL,
260 	"mpii",
261 	DV_DULL
262 };
263 
264 void		mpii_scsi_cmd(struct scsi_xfer *);
265 void		mpii_scsi_cmd_done(struct mpii_ccb *);
266 int		mpii_scsi_probe(struct scsi_link *);
267 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
268 
269 struct scsi_adapter mpii_switch = {
270 	mpii_scsi_cmd,
271 	scsi_minphys,
272 	mpii_scsi_probe,
273 	NULL,
274 	mpii_scsi_ioctl
275 };
276 
277 struct mpii_dmamem *
278 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
279 void		mpii_dmamem_free(struct mpii_softc *,
280 		    struct mpii_dmamem *);
281 int		mpii_alloc_ccbs(struct mpii_softc *);
282 void *		mpii_get_ccb(void *);
283 void		mpii_put_ccb(void *, void *);
284 int		mpii_alloc_replies(struct mpii_softc *);
285 int		mpii_alloc_queues(struct mpii_softc *);
286 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
287 void		mpii_push_replies(struct mpii_softc *);
288 
289 void		mpii_scsi_cmd_tmo(void *);
290 void		mpii_scsi_cmd_tmo_handler(void *, void *);
291 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
292 
293 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
294 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
295 struct mpii_device *
296 		mpii_find_dev(struct mpii_softc *, u_int16_t);
297 
298 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
299 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
300 void		mpii_poll_done(struct mpii_ccb *);
301 struct mpii_rcb *
302 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
303 
304 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
305 void		mpii_wait_done(struct mpii_ccb *);
306 
307 void		mpii_init_queues(struct mpii_softc *);
308 
309 int		mpii_load_xs(struct mpii_ccb *);
310 
311 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
312 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
313 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
314 		    u_int32_t);
315 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
316 		    u_int32_t);
317 
318 int		mpii_init(struct mpii_softc *);
319 int		mpii_reset_soft(struct mpii_softc *);
320 int		mpii_reset_hard(struct mpii_softc *);
321 
322 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
323 int		mpii_handshake_recv_dword(struct mpii_softc *,
324 		    u_int32_t *);
325 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
326 
327 void		mpii_empty_done(struct mpii_ccb *);
328 
329 int		mpii_iocinit(struct mpii_softc *);
330 int		mpii_iocfacts(struct mpii_softc *);
331 int		mpii_portfacts(struct mpii_softc *);
332 int		mpii_portenable(struct mpii_softc *);
333 int		mpii_cfg_coalescing(struct mpii_softc *);
334 int		mpii_board_info(struct mpii_softc *);
335 int		mpii_target_map(struct mpii_softc *);
336 
337 int		mpii_eventnotify(struct mpii_softc *);
338 void		mpii_eventnotify_done(struct mpii_ccb *);
339 void		mpii_eventack(void *, void *);
340 void		mpii_eventack_done(struct mpii_ccb *);
341 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
342 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
343 void		mpii_event_sas(void *, void *);
344 void		mpii_event_raid(struct mpii_softc *,
345 		    struct mpii_msg_event_reply *);
346 
347 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
348 
349 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
350 		    u_int8_t, u_int32_t, int, void *);
351 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
352 		    void *, int, void *, size_t);
353 
354 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
355 
356 #if NBIO > 0
357 int		mpii_ioctl(struct device *, u_long, caddr_t);
358 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
359 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
360 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
361 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
362 		    int, int *);
363 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
364 		    u_int8_t);
365 struct mpii_device *
366 		mpii_find_vol(struct mpii_softc *, int);
367 #ifndef SMALL_KERNEL
368  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
369 int		mpii_create_sensors(struct mpii_softc *);
370 void		mpii_refresh_sensors(void *);
371 #endif /* SMALL_KERNEL */
372 #endif /* NBIO > 0 */
373 
374 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
375 
376 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
377 
378 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
379 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
380 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
381 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
382 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
383 				    == MPII_INTR_STATUS_REPLY)
384 
385 #define mpii_write_reply_free(s, v) \
386     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
387     MPII_REPLY_FREE_HOST_INDEX, (v))
388 #define mpii_write_reply_post(s, v) \
389     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
390     MPII_REPLY_POST_HOST_INDEX, (v))
391 
392 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
393 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
394 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
395 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
396 
397 static inline void
398 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
399 {
400 	htolem32(&sge->sg_addr_lo, dva);
401 	htolem32(&sge->sg_addr_hi, dva >> 32);
402 }
403 
404 #define MPII_PG_EXTENDED	(1<<0)
405 #define MPII_PG_POLL		(1<<1)
406 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
407 
408 static const struct pci_matchid mpii_devices[] = {
409 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 }
425 };
426 
427 int
428 mpii_match(struct device *parent, void *match, void *aux)
429 {
430 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
431 }
432 
433 void
434 mpii_attach(struct device *parent, struct device *self, void *aux)
435 {
436 	struct mpii_softc		*sc = (struct mpii_softc *)self;
437 	struct pci_attach_args		*pa = aux;
438 	pcireg_t			memtype;
439 	int				r;
440 	pci_intr_handle_t		ih;
441 	struct scsibus_attach_args	saa;
442 	struct mpii_ccb			*ccb;
443 
444 	sc->sc_pc = pa->pa_pc;
445 	sc->sc_tag = pa->pa_tag;
446 	sc->sc_dmat = pa->pa_dmat;
447 
448 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
449 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
450 
451 	/* find the appropriate memory base */
452 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
453 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
454 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
455 			break;
456 	}
457 	if (r >= PCI_MAPREG_END) {
458 		printf(": unable to locate system interface registers\n");
459 		return;
460 	}
461 
462 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
463 	    NULL, &sc->sc_ios, 0xFF) != 0) {
464 		printf(": unable to map system interface registers\n");
465 		return;
466 	}
467 
468 	/* disable the expansion rom */
469 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
470 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
471 	    ~PCI_ROM_ENABLE);
472 
473 	/* disable interrupts */
474 	mpii_write(sc, MPII_INTR_MASK,
475 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
476 	    MPII_INTR_MASK_DOORBELL);
477 
478 	/* hook up the interrupt */
479 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
480 		printf(": unable to map interrupt\n");
481 		goto unmap;
482 	}
483 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
484 
485 	if (mpii_init(sc) != 0) {
486 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
487 		goto unmap;
488 	}
489 
490 	if (mpii_iocfacts(sc) != 0) {
491 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
492 		goto unmap;
493 	}
494 
495 	if (mpii_alloc_ccbs(sc) != 0) {
496 		/* error already printed */
497 		goto unmap;
498 	}
499 
500 	if (mpii_alloc_replies(sc) != 0) {
501 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
502 		goto free_ccbs;
503 	}
504 
505 	if (mpii_alloc_queues(sc) != 0) {
506 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
507 		goto free_replies;
508 	}
509 
510 	if (mpii_iocinit(sc) != 0) {
511 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
512 		goto free_queues;
513 	}
514 
515 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
516 	    MPII_DOORBELL_STATE_OPER) != 0) {
517 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
518 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
519 		printf("%s: operational state timeout\n", DEVNAME(sc));
520 		goto free_queues;
521 	}
522 
523 	mpii_push_replies(sc);
524 	mpii_init_queues(sc);
525 
526 	if (mpii_board_info(sc) != 0) {
527 		printf("%s: unable to get manufacturing page 0\n",
528 		    DEVNAME(sc));
529 		goto free_queues;
530 	}
531 
532 	if (mpii_portfacts(sc) != 0) {
533 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
534 		goto free_queues;
535 	}
536 
537 	if (mpii_target_map(sc) != 0) {
538 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
539 		goto free_queues;
540 	}
541 
542 	if (mpii_cfg_coalescing(sc) != 0) {
543 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
544 		goto free_queues;
545 	}
546 
547 	/* XXX bail on unsupported porttype? */
548 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
549 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL)) {
550 		if (mpii_eventnotify(sc) != 0) {
551 			printf("%s: unable to enable events\n", DEVNAME(sc));
552 			goto free_queues;
553 		}
554 	}
555 
556 	sc->sc_devs = mallocarray(sc->sc_max_devices,
557 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
558 	if (sc->sc_devs == NULL) {
559 		printf("%s: unable to allocate memory for mpii_device\n",
560 		    DEVNAME(sc));
561 		goto free_queues;
562 	}
563 
564 	if (mpii_portenable(sc) != 0) {
565 		printf("%s: unable to enable port\n", DEVNAME(sc));
566 		goto free_devs;
567 	}
568 
569 	/* we should be good to go now, attach scsibus */
570 	sc->sc_link.adapter = &mpii_switch;
571 	sc->sc_link.adapter_softc = sc;
572 	sc->sc_link.adapter_target = -1;
573 	sc->sc_link.adapter_buswidth = sc->sc_max_devices;
574 	sc->sc_link.luns = 1;
575 	sc->sc_link.openings = sc->sc_max_cmds - 1;
576 	sc->sc_link.pool = &sc->sc_iopool;
577 
578 	memset(&saa, 0, sizeof(saa));
579 	saa.saa_sc_link = &sc->sc_link;
580 
581 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
582 	    mpii_intr, sc, sc->sc_dev.dv_xname);
583 	if (sc->sc_ih == NULL)
584 		goto free_devs;
585 
586 	/* config_found() returns the scsibus attached to us */
587 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
588 	    &saa, scsiprint);
589 
590 	/* enable interrupts */
591 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
592 	    | MPII_INTR_MASK_RESET);
593 
594 #if NBIO > 0
595 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
596 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
597 			panic("%s: controller registration failed",
598 			    DEVNAME(sc));
599 		else
600 			sc->sc_ioctl = mpii_ioctl;
601 
602 #ifndef SMALL_KERNEL
603 		if (mpii_create_sensors(sc) != 0)
604 			printf("%s: unable to create sensors\n", DEVNAME(sc));
605 #endif
606 	}
607 #endif
608 
609 	return;
610 
611 free_devs:
612 	free(sc->sc_devs, M_DEVBUF, 0);
613 	sc->sc_devs = NULL;
614 
615 free_queues:
616 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
617 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
618 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
619 
620 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
621 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
622 	mpii_dmamem_free(sc, sc->sc_reply_postq);
623 
624 free_replies:
625 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
626 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
627 	mpii_dmamem_free(sc, sc->sc_replies);
628 
629 free_ccbs:
630 	while ((ccb = mpii_get_ccb(sc)) != NULL)
631 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
632 	mpii_dmamem_free(sc, sc->sc_requests);
633 	free(sc->sc_ccbs, M_DEVBUF, 0);
634 
635 unmap:
636 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
637 	sc->sc_ios = 0;
638 }
639 
640 int
641 mpii_detach(struct device *self, int flags)
642 {
643 	struct mpii_softc		*sc = (struct mpii_softc *)self;
644 
645 	if (sc->sc_ih != NULL) {
646 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
647 		sc->sc_ih = NULL;
648 	}
649 	if (sc->sc_ios != 0) {
650 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
651 		sc->sc_ios = 0;
652 	}
653 
654 	return (0);
655 }
656 
657 int
658 mpii_intr(void *arg)
659 {
660 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
661 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
662 	struct mpii_softc		*sc = arg;
663 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
664 	struct mpii_ccb			*ccb;
665 	struct mpii_rcb			*rcb;
666 	int				smid;
667 	u_int				idx;
668 	int				rv = 0;
669 
670 	mtx_enter(&sc->sc_rep_mtx);
671 	bus_dmamap_sync(sc->sc_dmat,
672 	    MPII_DMA_MAP(sc->sc_reply_postq),
673 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
674 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
675 
676 	idx = sc->sc_reply_post_host_index;
677 	for (;;) {
678 		rdp = &postq[idx];
679 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
680 		    MPII_REPLY_DESCR_UNUSED)
681 			break;
682 		if (rdp->data == 0xffffffff) {
683 			/*
684 			 * ioc is still writing to the reply post queue
685 			 * race condition - bail!
686 			 */
687 			break;
688 		}
689 
690 		smid = lemtoh16(&rdp->smid);
691 		rcb = mpii_reply(sc, rdp);
692 
693 		if (smid) {
694 			ccb = &sc->sc_ccbs[smid - 1];
695 			ccb->ccb_state = MPII_CCB_READY;
696 			ccb->ccb_rcb = rcb;
697 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
698 		} else
699 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
700 
701 		if (++idx >= sc->sc_reply_post_qdepth)
702 			idx = 0;
703 
704 		rv = 1;
705 	}
706 
707 	bus_dmamap_sync(sc->sc_dmat,
708 	    MPII_DMA_MAP(sc->sc_reply_postq),
709 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
710 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711 
712 	if (rv)
713 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
714 
715 	mtx_leave(&sc->sc_rep_mtx);
716 
717 	if (rv == 0)
718 		return (0);
719 
720 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
721 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
722 		ccb->ccb_done(ccb);
723 	}
724 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
725 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
726 		mpii_event_process(sc, rcb);
727 	}
728 
729 	return (1);
730 }
731 
732 int
733 mpii_load_xs(struct mpii_ccb *ccb)
734 {
735 	struct mpii_softc	*sc = ccb->ccb_sc;
736 	struct scsi_xfer	*xs = ccb->ccb_cookie;
737 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
738 	struct mpii_sge		*csge, *nsge, *sge;
739 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
740 	u_int32_t		flags;
741 	u_int16_t		len;
742 	int			i, error;
743 
744 	/* Request frame structure is described in the mpii_iocfacts */
745 	nsge = (struct mpii_sge *)(io + 1);
746 	csge = nsge + sc->sc_chain_sge;
747 
748 	/* zero length transfer still requires an SGE */
749 	if (xs->datalen == 0) {
750 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
751 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
752 		return (0);
753 	}
754 
755 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
756 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
757 	if (error) {
758 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
759 		return (1);
760 	}
761 
762 	/* safe default starting flags */
763 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
764 	if (xs->flags & SCSI_DATA_OUT)
765 		flags |= MPII_SGE_FL_DIR_OUT;
766 
767 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
768 		if (nsge == csge) {
769 			nsge++;
770 			sge->sg_hdr |= htole32(MPII_SGE_FL_LAST);
771 			/* offset to the chain sge from the beginning */
772 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
773 			/* length of the sgl segment we're pointing to */
774 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
775 			csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
776 			    MPII_SGE_FL_SIZE_64 | len);
777 			/* address of the next sge */
778 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
779 			    (caddr_t)nsge - (caddr_t)io);
780 		}
781 
782 		sge = nsge;
783 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
784 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
785 	}
786 
787 	/* terminate list */
788 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
789 	    MPII_SGE_FL_EOL);
790 
791 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
792 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
793 	    BUS_DMASYNC_PREWRITE);
794 
795 	return (0);
796 }
797 
798 int
799 mpii_scsi_probe(struct scsi_link *link)
800 {
801 	struct mpii_softc	*sc = link->adapter_softc;
802 	int			flags;
803 
804 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
805 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL))
806 		return (ENXIO);
807 
808 	if (sc->sc_devs[link->target] == NULL)
809 		return (1);
810 
811 	flags = sc->sc_devs[link->target]->flags;
812 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
813 		return (1);
814 
815 	return (0);
816 }
817 
818 u_int32_t
819 mpii_read(struct mpii_softc *sc, bus_size_t r)
820 {
821 	u_int32_t			rv;
822 
823 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
824 	    BUS_SPACE_BARRIER_READ);
825 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
826 
827 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#x %#x\n", DEVNAME(sc), r, rv);
828 
829 	return (rv);
830 }
831 
832 void
833 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
834 {
835 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#x %#x\n", DEVNAME(sc), r, v);
836 
837 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
838 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
839 	    BUS_SPACE_BARRIER_WRITE);
840 }
841 
842 
843 int
844 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
845     u_int32_t target)
846 {
847 	int			i;
848 
849 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
850 	    mask, target);
851 
852 	for (i = 0; i < 15000; i++) {
853 		if ((mpii_read(sc, r) & mask) == target)
854 			return (0);
855 		delay(1000);
856 	}
857 
858 	return (1);
859 }
860 
861 int
862 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
863     u_int32_t target)
864 {
865 	int			i;
866 
867 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
868 	    mask, target);
869 
870 	for (i = 0; i < 15000; i++) {
871 		if ((mpii_read(sc, r) & mask) != target)
872 			return (0);
873 		delay(1000);
874 	}
875 
876 	return (1);
877 }
878 
879 int
880 mpii_init(struct mpii_softc *sc)
881 {
882 	u_int32_t		db;
883 	int			i;
884 
885 	/* spin until the ioc leaves the reset state */
886 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
887 	    MPII_DOORBELL_STATE_RESET) != 0) {
888 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
889 		    "reset state\n", DEVNAME(sc));
890 		return (1);
891 	}
892 
893 	/* check current ownership */
894 	db = mpii_read_db(sc);
895 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
896 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
897 		    DEVNAME(sc));
898 		return (0);
899 	}
900 
901 	for (i = 0; i < 5; i++) {
902 		switch (db & MPII_DOORBELL_STATE) {
903 		case MPII_DOORBELL_STATE_READY:
904 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
905 			    DEVNAME(sc));
906 			return (0);
907 
908 		case MPII_DOORBELL_STATE_OPER:
909 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
910 			    DEVNAME(sc));
911 			if (sc->sc_ioc_event_replay)
912 				mpii_reset_soft(sc);
913 			else
914 				mpii_reset_hard(sc);
915 			break;
916 
917 		case MPII_DOORBELL_STATE_FAULT:
918 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
919 			    "reset hard\n" , DEVNAME(sc));
920 			mpii_reset_hard(sc);
921 			break;
922 
923 		case MPII_DOORBELL_STATE_RESET:
924 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
925 			    "out of reset\n", DEVNAME(sc));
926 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
927 			    MPII_DOORBELL_STATE_RESET) != 0)
928 				return (1);
929 			break;
930 		}
931 		db = mpii_read_db(sc);
932 	}
933 
934 	return (1);
935 }
936 
937 int
938 mpii_reset_soft(struct mpii_softc *sc)
939 {
940 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
941 
942 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
943 		return (1);
944 	}
945 
946 	mpii_write_db(sc,
947 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
948 
949 	/* XXX LSI waits 15 sec */
950 	if (mpii_wait_db_ack(sc) != 0)
951 		return (1);
952 
953 	/* XXX LSI waits 15 sec */
954 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
955 	    MPII_DOORBELL_STATE_READY) != 0)
956 		return (1);
957 
958 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
959 
960 	return (0);
961 }
962 
963 int
964 mpii_reset_hard(struct mpii_softc *sc)
965 {
966 	u_int16_t		i;
967 
968 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
969 
970 	mpii_write_intr(sc, 0);
971 
972 	/* enable diagnostic register */
973 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
974 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
975 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
976 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
977 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
978 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
979 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
980 
981 	delay(100);
982 
983 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
984 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
985 		    "diagnostic read/write\n", DEVNAME(sc));
986 		return(1);
987 	}
988 
989 	/* reset ioc */
990 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
991 
992 	/* 240 milliseconds */
993 	delay(240000);
994 
995 
996 	/* XXX this whole function should be more robust */
997 
998 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
999 	for (i = 0; i < 30000; i++) {
1000 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1001 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1002 			break;
1003 		delay(10000);
1004 	}
1005 
1006 	/* disable diagnostic register */
1007 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1008 
1009 	/* XXX what else? */
1010 
1011 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1012 
1013 	return(0);
1014 }
1015 
1016 int
1017 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1018 {
1019 	u_int32_t		*query = buf;
1020 	int			i;
1021 
1022 	/* make sure the doorbell is not in use. */
1023 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1024 		return (1);
1025 
1026 	/* clear pending doorbell interrupts */
1027 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1028 		mpii_write_intr(sc, 0);
1029 
1030 	/*
1031 	 * first write the doorbell with the handshake function and the
1032 	 * dword count.
1033 	 */
1034 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1035 	    MPII_DOORBELL_DWORDS(dwords));
1036 
1037 	/*
1038 	 * the doorbell used bit will be set because a doorbell function has
1039 	 * started. wait for the interrupt and then ack it.
1040 	 */
1041 	if (mpii_wait_db_int(sc) != 0)
1042 		return (1);
1043 	mpii_write_intr(sc, 0);
1044 
1045 	/* poll for the acknowledgement. */
1046 	if (mpii_wait_db_ack(sc) != 0)
1047 		return (1);
1048 
1049 	/* write the query through the doorbell. */
1050 	for (i = 0; i < dwords; i++) {
1051 		mpii_write_db(sc, htole32(query[i]));
1052 		if (mpii_wait_db_ack(sc) != 0)
1053 			return (1);
1054 	}
1055 
1056 	return (0);
1057 }
1058 
1059 int
1060 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1061 {
1062 	u_int16_t		*words = (u_int16_t *)dword;
1063 	int			i;
1064 
1065 	for (i = 0; i < 2; i++) {
1066 		if (mpii_wait_db_int(sc) != 0)
1067 			return (1);
1068 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1069 		mpii_write_intr(sc, 0);
1070 	}
1071 
1072 	return (0);
1073 }
1074 
1075 int
1076 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1077 {
1078 	struct mpii_msg_reply	*reply = buf;
1079 	u_int32_t		*dbuf = buf, dummy;
1080 	int			i;
1081 
1082 	/* get the first dword so we can read the length out of the header. */
1083 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1084 		return (1);
1085 
1086 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %d reply: %d\n",
1087 	    DEVNAME(sc), dwords, reply->msg_length);
1088 
1089 	/*
1090 	 * the total length, in dwords, is in the message length field of the
1091 	 * reply header.
1092 	 */
1093 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1094 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1095 			return (1);
1096 	}
1097 
1098 	/* if there's extra stuff to come off the ioc, discard it */
1099 	while (i++ < reply->msg_length) {
1100 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1101 			return (1);
1102 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1103 		    "0x%08x\n", DEVNAME(sc), dummy);
1104 	}
1105 
1106 	/* wait for the doorbell used bit to be reset and clear the intr */
1107 	if (mpii_wait_db_int(sc) != 0)
1108 		return (1);
1109 
1110 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1111 		return (1);
1112 
1113 	mpii_write_intr(sc, 0);
1114 
1115 	return (0);
1116 }
1117 
1118 void
1119 mpii_empty_done(struct mpii_ccb *ccb)
1120 {
1121 	/* nothing to do */
1122 }
1123 
1124 int
1125 mpii_iocfacts(struct mpii_softc *sc)
1126 {
1127 	struct mpii_msg_iocfacts_request	ifq;
1128 	struct mpii_msg_iocfacts_reply		ifp;
1129 	int					irs;
1130 	u_int					qdepth;
1131 
1132 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1133 
1134 	memset(&ifq, 0, sizeof(ifq));
1135 	memset(&ifp, 0, sizeof(ifp));
1136 
1137 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1138 
1139 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1140 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1141 		    DEVNAME(sc));
1142 		return (1);
1143 	}
1144 
1145 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1146 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1147 		    DEVNAME(sc));
1148 		return (1);
1149 	}
1150 
1151 	sc->sc_ioc_number = ifp.ioc_number;
1152 	sc->sc_vf_id = ifp.vf_id;
1153 
1154 	sc->sc_max_volumes = ifp.max_volumes;
1155 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1156 
1157 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1158 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1159 		SET(sc->sc_flags, MPII_F_RAID);
1160 
1161 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1162 	    MPII_REQUEST_CREDIT);
1163 
1164 	/*
1165 	 * The host driver must ensure that there is at least one
1166 	 * unused entry in the Reply Free Queue. One way to ensure
1167 	 * that this requirement is met is to never allocate a number
1168 	 * of reply frames that is a multiple of 16.
1169 	 */
1170 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1171 	if (!(sc->sc_num_reply_frames % 16))
1172 		sc->sc_num_reply_frames--;
1173 
1174 	/* must be multiple of 16 */
1175 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1176 	    sc->sc_num_reply_frames;
1177 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1178 
1179 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1180 	if (sc->sc_reply_post_qdepth > qdepth) {
1181 		sc->sc_reply_post_qdepth = qdepth;
1182 		if (sc->sc_reply_post_qdepth < 16) {
1183 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1184 			return (1);
1185 		}
1186 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1187 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1188 	}
1189 
1190 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1191 	    16 - (sc->sc_num_reply_frames % 16);
1192 
1193 	/*
1194 	 * Our request frame for an I/O operation looks like this:
1195 	 *
1196 	 * +-------------------+ -.
1197 	 * | mpii_msg_scsi_io  |  |
1198 	 * +-------------------|  |
1199 	 * | mpii_sge          |  |
1200 	 * + - - - - - - - - - +  |
1201 	 * | ...               |  > ioc_request_frame_size
1202 	 * + - - - - - - - - - +  |
1203 	 * | mpii_sge (tail)   |  |
1204 	 * + - - - - - - - - - +  |
1205 	 * | mpii_sge (csge)   |  | --.
1206 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1207 	 * | mpii_sge          |<-----'
1208 	 * + - - - - - - - - - +
1209 	 * | ...               |
1210 	 * + - - - - - - - - - +
1211 	 * | mpii_sge (tail)   |
1212 	 * +-------------------+
1213 	 * |                   |
1214 	 * ~~~~~~~~~~~~~~~~~~~~~
1215 	 * |                   |
1216 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1217 	 * | scsi_sense_data   |
1218 	 * +-------------------+
1219 	 */
1220 
1221 	/* both sizes are in 32-bit words */
1222 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1223 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1224 	sc->sc_request_size = MPII_REQUEST_SIZE;
1225 	/* make sure we have enough space for scsi sense data */
1226 	if (irs > sc->sc_request_size) {
1227 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1228 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1229 	}
1230 
1231 	/* offset to the chain sge */
1232 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1233 	    sizeof(struct mpii_sge) - 1;
1234 
1235 	/*
1236 	 * A number of simple scatter-gather elements we can fit into the
1237 	 * request buffer after the I/O command minus the chain element.
1238 	 */
1239 	sc->sc_max_sgl = (sc->sc_request_size -
1240  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1241 	    sizeof(struct mpii_sge) - 1;
1242 
1243 	return (0);
1244 }
1245 
1246 int
1247 mpii_iocinit(struct mpii_softc *sc)
1248 {
1249 	struct mpii_msg_iocinit_request		iiq;
1250 	struct mpii_msg_iocinit_reply		iip;
1251 
1252 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1253 
1254 	memset(&iiq, 0, sizeof(iiq));
1255 	memset(&iip, 0, sizeof(iip));
1256 
1257 	iiq.function = MPII_FUNCTION_IOC_INIT;
1258 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1259 
1260 	/* XXX JPG do something about vf_id */
1261 	iiq.vf_id = 0;
1262 
1263 	iiq.msg_version_maj = 0x02;
1264 	iiq.msg_version_min = 0x00;
1265 
1266 	/* XXX JPG ensure compliance with some level and hard-code? */
1267 	iiq.hdr_version_unit = 0x00;
1268 	iiq.hdr_version_dev = 0x00;
1269 
1270 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1271 
1272 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1273 	    sc->sc_reply_post_qdepth);
1274 
1275 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1276 
1277 	htolem32(&iiq.sense_buffer_address_high,
1278 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1279 
1280 	htolem32(&iiq.system_reply_address_high,
1281 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1282 
1283 	htolem32(&iiq.system_request_frame_base_address_lo,
1284 	    MPII_DMA_DVA(sc->sc_requests));
1285 	htolem32(&iiq.system_request_frame_base_address_hi,
1286 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1287 
1288 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1289 	    MPII_DMA_DVA(sc->sc_reply_postq));
1290 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1291 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1292 
1293 	htolem32(&iiq.reply_free_queue_address_lo,
1294 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1295 	htolem32(&iiq.reply_free_queue_address_hi,
1296 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1297 
1298 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1299 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1300 		    DEVNAME(sc));
1301 		return (1);
1302 	}
1303 
1304 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1305 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1306 		    DEVNAME(sc));
1307 		return (1);
1308 	}
1309 
1310 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1311 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1312 	    iip.msg_length, iip.whoinit);
1313 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1314 	    iip.msg_flags);
1315 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1316 	    iip.vf_id, iip.vp_id);
1317 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1318 	    letoh16(iip.ioc_status));
1319 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1320 	    letoh32(iip.ioc_loginfo));
1321 
1322 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1323 	    lemtoh32(&iip.ioc_loginfo))
1324 		return (1);
1325 
1326 	return (0);
1327 }
1328 
1329 void
1330 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1331 {
1332 	u_int32_t		*rfp;
1333 	u_int			idx;
1334 
1335 	if (rcb == NULL)
1336 		return;
1337 
1338 	idx = sc->sc_reply_free_host_index;
1339 
1340 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1341 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1342 
1343 	if (++idx >= sc->sc_reply_free_qdepth)
1344 		idx = 0;
1345 
1346 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1347 }
1348 
1349 int
1350 mpii_portfacts(struct mpii_softc *sc)
1351 {
1352 	struct mpii_msg_portfacts_request	*pfq;
1353 	struct mpii_msg_portfacts_reply		*pfp;
1354 	struct mpii_ccb				*ccb;
1355 	int					rv = 1;
1356 
1357 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1358 
1359 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1360 	if (ccb == NULL) {
1361 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1362 		    DEVNAME(sc));
1363 		return (rv);
1364 	}
1365 
1366 	ccb->ccb_done = mpii_empty_done;
1367 	pfq = ccb->ccb_cmd;
1368 
1369 	memset(pfq, 0, sizeof(*pfq));
1370 
1371 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1372 	pfq->chain_offset = 0;
1373 	pfq->msg_flags = 0;
1374 	pfq->port_number = 0;
1375 	pfq->vp_id = 0;
1376 	pfq->vf_id = 0;
1377 
1378 	if (mpii_poll(sc, ccb) != 0) {
1379 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1380 		    DEVNAME(sc));
1381 		goto err;
1382 	}
1383 
1384 	if (ccb->ccb_rcb == NULL) {
1385 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1386 		    DEVNAME(sc));
1387 		goto err;
1388 	}
1389 
1390 	pfp = ccb->ccb_rcb->rcb_reply;
1391 	sc->sc_porttype = pfp->port_type;
1392 
1393 	mpii_push_reply(sc, ccb->ccb_rcb);
1394 	rv = 0;
1395 err:
1396 	scsi_io_put(&sc->sc_iopool, ccb);
1397 
1398 	return (rv);
1399 }
1400 
1401 void
1402 mpii_eventack(void *cookie, void *io)
1403 {
1404 	struct mpii_softc			*sc = cookie;
1405 	struct mpii_ccb				*ccb = io;
1406 	struct mpii_rcb				*rcb, *next;
1407 	struct mpii_msg_event_reply		*enp;
1408 	struct mpii_msg_eventack_request	*eaq;
1409 
1410 	mtx_enter(&sc->sc_evt_ack_mtx);
1411 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1412 	if (rcb != NULL) {
1413 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1414 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1415 	}
1416 	mtx_leave(&sc->sc_evt_ack_mtx);
1417 
1418 	if (rcb == NULL) {
1419 		scsi_io_put(&sc->sc_iopool, ccb);
1420 		return;
1421 	}
1422 
1423 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1424 
1425 	ccb->ccb_done = mpii_eventack_done;
1426 	eaq = ccb->ccb_cmd;
1427 
1428 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1429 
1430 	eaq->event = enp->event;
1431 	eaq->event_context = enp->event_context;
1432 
1433 	mpii_push_reply(sc, rcb);
1434 
1435 	mpii_start(sc, ccb);
1436 
1437 	if (next != NULL)
1438 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1439 }
1440 
1441 void
1442 mpii_eventack_done(struct mpii_ccb *ccb)
1443 {
1444 	struct mpii_softc			*sc = ccb->ccb_sc;
1445 
1446 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1447 
1448 	mpii_push_reply(sc, ccb->ccb_rcb);
1449 	scsi_io_put(&sc->sc_iopool, ccb);
1450 }
1451 
1452 int
1453 mpii_portenable(struct mpii_softc *sc)
1454 {
1455 	struct mpii_msg_portenable_request	*peq;
1456 	struct mpii_msg_portenable_repy		*pep;
1457 	struct mpii_ccb				*ccb;
1458 
1459 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1460 
1461 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1462 	if (ccb == NULL) {
1463 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1464 		    DEVNAME(sc));
1465 		return (1);
1466 	}
1467 
1468 	ccb->ccb_done = mpii_empty_done;
1469 	peq = ccb->ccb_cmd;
1470 
1471 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1472 	peq->vf_id = sc->sc_vf_id;
1473 
1474 	if (mpii_poll(sc, ccb) != 0) {
1475 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1476 		    DEVNAME(sc));
1477 		return (1);
1478 	}
1479 
1480 	if (ccb->ccb_rcb == NULL) {
1481 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1482 		    DEVNAME(sc));
1483 		return (1);
1484 	}
1485 	pep = ccb->ccb_rcb->rcb_reply;
1486 
1487 	mpii_push_reply(sc, ccb->ccb_rcb);
1488 	scsi_io_put(&sc->sc_iopool, ccb);
1489 
1490 	return (0);
1491 }
1492 
1493 int
1494 mpii_cfg_coalescing(struct mpii_softc *sc)
1495 {
1496 	struct mpii_cfg_hdr			hdr;
1497 	struct mpii_cfg_ioc_pg1			ipg;
1498 
1499 	hdr.page_version = 0;
1500 	hdr.page_length = sizeof(ipg) / 4;
1501 	hdr.page_number = 1;
1502 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1503 	memset(&ipg, 0, sizeof(ipg));
1504 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1505 	    sizeof(ipg)) != 0) {
1506 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1507 		    "page 1\n", DEVNAME(sc));
1508 		return (1);
1509 	}
1510 
1511 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1512 		return (0);
1513 
1514 	/* Disable coalescing */
1515 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1516 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1517 	    sizeof(ipg)) != 0) {
1518 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1519 		    DEVNAME(sc));
1520 		return (1);
1521 	}
1522 
1523 	return (0);
1524 }
1525 
1526 #define MPII_EVENT_MASKALL(enq)		do {			\
1527 		enq->event_masks[0] = 0xffffffff;		\
1528 		enq->event_masks[1] = 0xffffffff;		\
1529 		enq->event_masks[2] = 0xffffffff;		\
1530 		enq->event_masks[3] = 0xffffffff;		\
1531 	} while (0)
1532 
1533 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1534 		enq->event_masks[evt / 32] &=			\
1535 		    htole32(~(1 << (evt % 32)));		\
1536 	} while (0)
1537 
1538 int
1539 mpii_eventnotify(struct mpii_softc *sc)
1540 {
1541 	struct mpii_msg_event_request		*enq;
1542 	struct mpii_ccb				*ccb;
1543 
1544 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1545 	if (ccb == NULL) {
1546 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1547 		    DEVNAME(sc));
1548 		return (1);
1549 	}
1550 
1551 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1552 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1553 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc, NULL);
1554 
1555 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1556 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1557 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1558 	    mpii_eventack, sc);
1559 
1560 	ccb->ccb_done = mpii_eventnotify_done;
1561 	enq = ccb->ccb_cmd;
1562 
1563 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1564 
1565 	/*
1566 	 * Enable reporting of the following events:
1567 	 *
1568 	 * MPII_EVENT_SAS_DISCOVERY
1569 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1570 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1571 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1572 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1573 	 * MPII_EVENT_IR_VOLUME
1574 	 * MPII_EVENT_IR_PHYSICAL_DISK
1575 	 * MPII_EVENT_IR_OPERATION_STATUS
1576 	 */
1577 
1578 	MPII_EVENT_MASKALL(enq);
1579 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1580 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1581 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1582 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1583 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1584 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1585 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1586 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1587 
1588 	mpii_start(sc, ccb);
1589 
1590 	return (0);
1591 }
1592 
1593 void
1594 mpii_eventnotify_done(struct mpii_ccb *ccb)
1595 {
1596 	struct mpii_softc			*sc = ccb->ccb_sc;
1597 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1598 
1599 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1600 
1601 	scsi_io_put(&sc->sc_iopool, ccb);
1602 	mpii_event_process(sc, rcb);
1603 }
1604 
1605 void
1606 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1607 {
1608 	struct mpii_evt_ir_cfg_change_list	*ccl;
1609 	struct mpii_evt_ir_cfg_element		*ce;
1610 	struct mpii_device			*dev;
1611 	u_int16_t				type;
1612 	int					i;
1613 
1614 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1615 	if (ccl->num_elements == 0)
1616 		return;
1617 
1618 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1619 		/* bail on foreign configurations */
1620 		return;
1621 	}
1622 
1623 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1624 
1625 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1626 		type = (lemtoh16(&ce->element_flags) &
1627 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1628 
1629 		switch (type) {
1630 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1631 			switch (ce->reason_code) {
1632 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1633 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1634 				if (mpii_find_dev(sc,
1635 				    lemtoh16(&ce->vol_dev_handle))) {
1636 					printf("%s: device %#x is already "
1637 					    "configured\n", DEVNAME(sc),
1638 					    lemtoh16(&ce->vol_dev_handle));
1639 					break;
1640 				}
1641 				dev = malloc(sizeof(*dev), M_DEVBUF,
1642 				    M_NOWAIT | M_ZERO);
1643 				if (!dev) {
1644 					printf("%s: failed to allocate a "
1645 					    "device structure\n", DEVNAME(sc));
1646 					break;
1647 				}
1648 				SET(dev->flags, MPII_DF_VOLUME);
1649 				dev->slot = sc->sc_vd_id_low;
1650 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1651 				if (mpii_insert_dev(sc, dev)) {
1652 					free(dev, M_DEVBUF, 0);
1653 					break;
1654 				}
1655 				sc->sc_vd_count++;
1656 				break;
1657 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1658 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1659 				if (!(dev = mpii_find_dev(sc,
1660 				    lemtoh16(&ce->vol_dev_handle))))
1661 					break;
1662 				mpii_remove_dev(sc, dev);
1663 				sc->sc_vd_count--;
1664 				break;
1665 			}
1666 			break;
1667 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1668 			if (ce->reason_code ==
1669 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1670 			    ce->reason_code ==
1671 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1672 				/* there should be an underlying sas drive */
1673 				if (!(dev = mpii_find_dev(sc,
1674 				    lemtoh16(&ce->phys_disk_dev_handle))))
1675 					break;
1676 				/* promoted from a hot spare? */
1677 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1678 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1679 				    MPII_DF_HIDDEN);
1680 			}
1681 			break;
1682 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1683 			if (ce->reason_code ==
1684 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1685 				/* there should be an underlying sas drive */
1686 				if (!(dev = mpii_find_dev(sc,
1687 				    lemtoh16(&ce->phys_disk_dev_handle))))
1688 					break;
1689 				SET(dev->flags, MPII_DF_HOT_SPARE |
1690 				    MPII_DF_HIDDEN);
1691 			}
1692 			break;
1693 		}
1694 	}
1695 }
1696 
1697 void
1698 mpii_event_sas(void *xsc, void *x)
1699 {
1700 	struct mpii_softc *sc = xsc;
1701 	struct mpii_rcb *rcb, *next;
1702 	struct mpii_msg_event_reply *enp;
1703 	struct mpii_evt_sas_tcl		*tcl;
1704 	struct mpii_evt_phy_entry	*pe;
1705 	struct mpii_device		*dev;
1706 	int				i;
1707 	u_int16_t			handle;
1708 
1709 	mtx_enter(&sc->sc_evt_sas_mtx);
1710 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1711 	if (rcb != NULL) {
1712 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1713 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1714 	}
1715 	mtx_leave(&sc->sc_evt_sas_mtx);
1716 
1717 	if (rcb == NULL)
1718 		return;
1719 	if (next != NULL)
1720 		task_add(systq, &sc->sc_evt_sas_task);
1721 
1722 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1723 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1724 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1725 
1726 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1727 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1728 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1729 			handle = lemtoh16(&pe->dev_handle);
1730 			if (mpii_find_dev(sc, handle)) {
1731 				printf("%s: device %#x is already "
1732 				    "configured\n", DEVNAME(sc), handle);
1733 				break;
1734 			}
1735 
1736 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1737 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1738 			dev->dev_handle = handle;
1739 			dev->phy_num = tcl->start_phy_num + i;
1740 			if (tcl->enclosure_handle)
1741 				dev->physical_port = tcl->physical_port;
1742 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1743 			dev->expander = lemtoh16(&tcl->expander_handle);
1744 
1745 			if (mpii_insert_dev(sc, dev)) {
1746 				free(dev, M_DEVBUF, 0);
1747 				break;
1748 			}
1749 
1750 			if (sc->sc_scsibus != NULL)
1751 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1752 			break;
1753 
1754 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1755 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1756 			if (dev == NULL)
1757 				break;
1758 
1759 			mpii_remove_dev(sc, dev);
1760 			mpii_sas_remove_device(sc, dev->dev_handle);
1761 			if (sc->sc_scsibus != NULL &&
1762 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1763 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1764 				    DVACT_DEACTIVATE);
1765 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1766 				    DETACH_FORCE);
1767 			}
1768 
1769 			free(dev, M_DEVBUF, 0);
1770 			break;
1771 		}
1772 	}
1773 
1774 	mpii_event_done(sc, rcb);
1775 }
1776 
1777 void
1778 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1779 {
1780 	struct mpii_msg_event_reply		*enp;
1781 
1782 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1783 
1784 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1785 	    letoh16(enp->event));
1786 
1787 	switch (lemtoh16(&enp->event)) {
1788 	case MPII_EVENT_EVENT_CHANGE:
1789 		/* should be properly ignored */
1790 		break;
1791 	case MPII_EVENT_SAS_DISCOVERY: {
1792 		struct mpii_evt_sas_discovery	*esd =
1793 		    (struct mpii_evt_sas_discovery *)(enp + 1);
1794 
1795 		if (esd->reason_code ==
1796 		    MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED &&
1797 		    esd->discovery_status != 0)
1798 			printf("%s: sas discovery completed with status %#x\n",
1799 			    DEVNAME(sc), esd->discovery_status);
1800 		}
1801 		break;
1802 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1803 		mtx_enter(&sc->sc_evt_sas_mtx);
1804 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1805 		mtx_leave(&sc->sc_evt_sas_mtx);
1806 		task_add(systq, &sc->sc_evt_sas_task);
1807 		return;
1808 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
1809 		break;
1810 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1811 		break;
1812 	case MPII_EVENT_IR_VOLUME: {
1813 		struct mpii_evt_ir_volume	*evd =
1814 		    (struct mpii_evt_ir_volume *)(enp + 1);
1815 		struct mpii_device		*dev;
1816 #if NBIO > 0
1817 		const char *vol_states[] = {
1818 			BIOC_SVINVALID_S,
1819 			BIOC_SVOFFLINE_S,
1820 			BIOC_SVBUILDING_S,
1821 			BIOC_SVONLINE_S,
1822 			BIOC_SVDEGRADED_S,
1823 			BIOC_SVONLINE_S,
1824 		};
1825 #endif
1826 
1827 		if (cold)
1828 			break;
1829 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
1830 		if (dev == NULL)
1831 			break;
1832 #if NBIO > 0
1833 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
1834 			printf("%s: volume %d state changed from %s to %s\n",
1835 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
1836 			    vol_states[evd->prev_value],
1837 			    vol_states[evd->new_value]);
1838 #endif
1839 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
1840 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
1841 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
1842 			printf("%s: started resync on a volume %d\n",
1843 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
1844 		}
1845 		break;
1846 	case MPII_EVENT_IR_PHYSICAL_DISK:
1847 		break;
1848 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1849 		mpii_event_raid(sc, enp);
1850 		break;
1851 	case MPII_EVENT_IR_OPERATION_STATUS: {
1852 		struct mpii_evt_ir_status	*evs =
1853 		    (struct mpii_evt_ir_status *)(enp + 1);
1854 		struct mpii_device		*dev;
1855 
1856 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
1857 		if (dev != NULL &&
1858 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
1859 			dev->percent = evs->percent;
1860 		break;
1861 		}
1862 	default:
1863 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
1864 		    DEVNAME(sc), lemtoh16(&enp->event));
1865 	}
1866 
1867 	mpii_event_done(sc, rcb);
1868 }
1869 
1870 void
1871 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
1872 {
1873 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
1874 
1875 	if (enp->ack_required) {
1876 		mtx_enter(&sc->sc_evt_ack_mtx);
1877 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
1878 		mtx_leave(&sc->sc_evt_ack_mtx);
1879 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1880 	} else
1881 		mpii_push_reply(sc, rcb);
1882 }
1883 
1884 void
1885 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
1886 {
1887 	struct mpii_msg_scsi_task_request	*stq;
1888 	struct mpii_msg_sas_oper_request	*soq;
1889 	struct mpii_ccb				*ccb;
1890 
1891 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1892 	if (ccb == NULL)
1893 		return;
1894 
1895 	stq = ccb->ccb_cmd;
1896 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
1897 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
1898 	htolem16(&stq->dev_handle, handle);
1899 
1900 	ccb->ccb_done = mpii_empty_done;
1901 	mpii_wait(sc, ccb);
1902 
1903 	if (ccb->ccb_rcb != NULL)
1904 		mpii_push_reply(sc, ccb->ccb_rcb);
1905 
1906 	/* reuse a ccb */
1907 	ccb->ccb_state = MPII_CCB_READY;
1908 	ccb->ccb_rcb = NULL;
1909 
1910 	soq = ccb->ccb_cmd;
1911 	memset(soq, 0, sizeof(*soq));
1912 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
1913 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
1914 	htolem16(&soq->dev_handle, handle);
1915 
1916 	ccb->ccb_done = mpii_empty_done;
1917 	mpii_wait(sc, ccb);
1918 	if (ccb->ccb_rcb != NULL)
1919 		mpii_push_reply(sc, ccb->ccb_rcb);
1920 
1921 	scsi_io_put(&sc->sc_iopool, ccb);
1922 }
1923 
1924 int
1925 mpii_board_info(struct mpii_softc *sc)
1926 {
1927 	struct mpii_msg_iocfacts_request	ifq;
1928 	struct mpii_msg_iocfacts_reply		ifp;
1929 	struct mpii_cfg_manufacturing_pg0	mpg;
1930 	struct mpii_cfg_hdr			hdr;
1931 
1932 	memset(&ifq, 0, sizeof(ifq));
1933 	memset(&ifp, 0, sizeof(ifp));
1934 
1935 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1936 
1937 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1938 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
1939 		    DEVNAME(sc));
1940 		return (1);
1941 	}
1942 
1943 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1944 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
1945 		    DEVNAME(sc));
1946 		return (1);
1947 	}
1948 
1949 	hdr.page_version = 0;
1950 	hdr.page_length = sizeof(mpg) / 4;
1951 	hdr.page_number = 0;
1952 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
1953 	memset(&mpg, 0, sizeof(mpg));
1954 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
1955 	    sizeof(mpg)) != 0) {
1956 		printf("%s: unable to fetch manufacturing page 0\n",
1957 		    DEVNAME(sc));
1958 		return (EINVAL);
1959 	}
1960 
1961 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
1962 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
1963 	    ifp.fw_version_unit, ifp.fw_version_dev,
1964 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
1965 	    ifp.msg_version_maj, ifp.msg_version_min);
1966 
1967 	return (0);
1968 }
1969 
1970 int
1971 mpii_target_map(struct mpii_softc *sc)
1972 {
1973 	struct mpii_cfg_hdr			hdr;
1974 	struct mpii_cfg_ioc_pg8			ipg;
1975 	int					flags, pad = 0;
1976 
1977 	hdr.page_version = 0;
1978 	hdr.page_length = sizeof(ipg) / 4;
1979 	hdr.page_number = 8;
1980 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1981 	memset(&ipg, 0, sizeof(ipg));
1982 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1983 	    sizeof(ipg)) != 0) {
1984 		printf("%s: unable to fetch ioc page 8\n",
1985 		    DEVNAME(sc));
1986 		return (EINVAL);
1987 	}
1988 
1989 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
1990 		pad = 1;
1991 
1992 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
1993 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
1994 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
1995 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
1996 			sc->sc_vd_id_low += pad;
1997 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
1998 		} else
1999 			sc->sc_vd_id_low = sc->sc_max_devices -
2000 			    sc->sc_max_volumes;
2001 	}
2002 
2003 	sc->sc_pd_id_start += pad;
2004 
2005 	return (0);
2006 }
2007 
2008 int
2009 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2010     u_int32_t address, int flags, void *p)
2011 {
2012 	struct mpii_msg_config_request		*cq;
2013 	struct mpii_msg_config_reply		*cp;
2014 	struct mpii_ccb				*ccb;
2015 	struct mpii_cfg_hdr			*hdr = p;
2016 	struct mpii_ecfg_hdr			*ehdr = p;
2017 	int					etype = 0;
2018 	int					rv = 0;
2019 
2020 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2021 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2022 	    address, flags, MPII_PG_FMT);
2023 
2024 	ccb = scsi_io_get(&sc->sc_iopool,
2025 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2026 	if (ccb == NULL) {
2027 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2028 		    DEVNAME(sc));
2029 		return (1);
2030 	}
2031 
2032 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2033 		etype = type;
2034 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2035 	}
2036 
2037 	cq = ccb->ccb_cmd;
2038 
2039 	cq->function = MPII_FUNCTION_CONFIG;
2040 
2041 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2042 
2043 	cq->config_header.page_number = number;
2044 	cq->config_header.page_type = type;
2045 	cq->ext_page_type = etype;
2046 	htolem32(&cq->page_address, address);
2047 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2048 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2049 
2050 	ccb->ccb_done = mpii_empty_done;
2051 	if (ISSET(flags, MPII_PG_POLL)) {
2052 		if (mpii_poll(sc, ccb) != 0) {
2053 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2054 			    DEVNAME(sc));
2055 			return (1);
2056 		}
2057 	} else
2058 		mpii_wait(sc, ccb);
2059 
2060 	if (ccb->ccb_rcb == NULL) {
2061 		scsi_io_put(&sc->sc_iopool, ccb);
2062 		return (1);
2063 	}
2064 	cp = ccb->ccb_rcb->rcb_reply;
2065 
2066 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2067 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2068 	    cp->sgl_flags, cp->msg_length, cp->function);
2069 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2070 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2071 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2072 	    cp->msg_flags);
2073 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2074 	    cp->vp_id, cp->vf_id);
2075 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2076 	    letoh16(cp->ioc_status));
2077 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2078 	    letoh32(cp->ioc_loginfo));
2079 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2080 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2081 	    cp->config_header.page_version,
2082 	    cp->config_header.page_length,
2083 	    cp->config_header.page_number,
2084 	    cp->config_header.page_type);
2085 
2086 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2087 		rv = 1;
2088 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2089 		memset(ehdr, 0, sizeof(*ehdr));
2090 		ehdr->page_version = cp->config_header.page_version;
2091 		ehdr->page_number = cp->config_header.page_number;
2092 		ehdr->page_type = cp->config_header.page_type;
2093 		ehdr->ext_page_length = cp->ext_page_length;
2094 		ehdr->ext_page_type = cp->ext_page_type;
2095 	} else
2096 		*hdr = cp->config_header;
2097 
2098 	mpii_push_reply(sc, ccb->ccb_rcb);
2099 	scsi_io_put(&sc->sc_iopool, ccb);
2100 
2101 	return (rv);
2102 }
2103 
2104 int
2105 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2106     void *p, int read, void *page, size_t len)
2107 {
2108 	struct mpii_msg_config_request		*cq;
2109 	struct mpii_msg_config_reply		*cp;
2110 	struct mpii_ccb				*ccb;
2111 	struct mpii_cfg_hdr			*hdr = p;
2112 	struct mpii_ecfg_hdr			*ehdr = p;
2113 	caddr_t					kva;
2114 	int					page_length;
2115 	int					rv = 0;
2116 
2117 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2118 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2119 
2120 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2121 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2122 
2123 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2124 		return (1);
2125 
2126 	ccb = scsi_io_get(&sc->sc_iopool,
2127 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2128 	if (ccb == NULL) {
2129 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2130 		    DEVNAME(sc));
2131 		return (1);
2132 	}
2133 
2134 	cq = ccb->ccb_cmd;
2135 
2136 	cq->function = MPII_FUNCTION_CONFIG;
2137 
2138 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2139 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2140 
2141 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2142 		cq->config_header.page_version = ehdr->page_version;
2143 		cq->config_header.page_number = ehdr->page_number;
2144 		cq->config_header.page_type = ehdr->page_type;
2145 		cq->ext_page_len = ehdr->ext_page_length;
2146 		cq->ext_page_type = ehdr->ext_page_type;
2147 	} else
2148 		cq->config_header = *hdr;
2149 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2150 	htolem32(&cq->page_address, address);
2151 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2152 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2153 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2154 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2155 
2156 	/* bounce the page via the request space to avoid more bus_dma games */
2157 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2158 	    sizeof(struct mpii_msg_config_request));
2159 
2160 	kva = ccb->ccb_cmd;
2161 	kva += sizeof(struct mpii_msg_config_request);
2162 
2163 	if (!read)
2164 		memcpy(kva, page, len);
2165 
2166 	ccb->ccb_done = mpii_empty_done;
2167 	if (ISSET(flags, MPII_PG_POLL)) {
2168 		if (mpii_poll(sc, ccb) != 0) {
2169 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2170 			    DEVNAME(sc));
2171 			return (1);
2172 		}
2173 	} else
2174 		mpii_wait(sc, ccb);
2175 
2176 	if (ccb->ccb_rcb == NULL) {
2177 		scsi_io_put(&sc->sc_iopool, ccb);
2178 		return (1);
2179 	}
2180 	cp = ccb->ccb_rcb->rcb_reply;
2181 
2182 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2183 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2184 	    cp->function);
2185 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2186 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2187 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2188 	    cp->msg_flags);
2189 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2190 	    cp->vp_id, cp->vf_id);
2191 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2192 	    letoh16(cp->ioc_status));
2193 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2194 	    letoh32(cp->ioc_loginfo));
2195 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2196 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2197 	    cp->config_header.page_version,
2198 	    cp->config_header.page_length,
2199 	    cp->config_header.page_number,
2200 	    cp->config_header.page_type);
2201 
2202 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2203 		rv = 1;
2204 	else if (read)
2205 		memcpy(page, kva, len);
2206 
2207 	mpii_push_reply(sc, ccb->ccb_rcb);
2208 	scsi_io_put(&sc->sc_iopool, ccb);
2209 
2210 	return (rv);
2211 }
2212 
2213 struct mpii_rcb *
2214 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2215 {
2216 	struct mpii_rcb		*rcb = NULL;
2217 	u_int32_t		rfid;
2218 
2219 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2220 
2221 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2222 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2223 		rfid = (lemtoh32(&rdp->frame_addr) -
2224 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2225 		    sc->sc_reply_size;
2226 
2227 		bus_dmamap_sync(sc->sc_dmat,
2228 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2229 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2230 
2231 		rcb = &sc->sc_rcbs[rfid];
2232 	}
2233 
2234 	memset(rdp, 0xff, sizeof(*rdp));
2235 
2236 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2237 	    8 * sc->sc_reply_post_host_index, 8,
2238 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2239 
2240 	return (rcb);
2241 }
2242 
2243 struct mpii_dmamem *
2244 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2245 {
2246 	struct mpii_dmamem	*mdm;
2247 	int			nsegs;
2248 
2249 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2250 	if (mdm == NULL)
2251 		return (NULL);
2252 
2253 	mdm->mdm_size = size;
2254 
2255 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2256 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2257 		goto mdmfree;
2258 
2259 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2260 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2261 		goto destroy;
2262 
2263 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2264 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2265 		goto free;
2266 
2267 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2268 	    NULL, BUS_DMA_NOWAIT) != 0)
2269 		goto unmap;
2270 
2271 	return (mdm);
2272 
2273 unmap:
2274 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2275 free:
2276 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2277 destroy:
2278 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2279 mdmfree:
2280 	free(mdm, M_DEVBUF, 0);
2281 
2282 	return (NULL);
2283 }
2284 
2285 void
2286 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2287 {
2288 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %#x\n", DEVNAME(sc), mdm);
2289 
2290 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2291 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2292 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2293 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2294 	free(mdm, M_DEVBUF, 0);
2295 }
2296 
2297 int
2298 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2299 {
2300 	int		slot;	/* initial hint */
2301 
2302 	if (dev == NULL || dev->slot < 0)
2303 		return (1);
2304 	slot = dev->slot;
2305 
2306 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2307 		slot++;
2308 
2309 	if (slot >= sc->sc_max_devices)
2310 		return (1);
2311 
2312 	dev->slot = slot;
2313 	sc->sc_devs[slot] = dev;
2314 
2315 	return (0);
2316 }
2317 
2318 int
2319 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2320 {
2321 	int			i;
2322 
2323 	if (dev == NULL)
2324 		return (1);
2325 
2326 	for (i = 0; i < sc->sc_max_devices; i++) {
2327 		if (sc->sc_devs[i] == NULL)
2328 			continue;
2329 
2330 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2331 			sc->sc_devs[i] = NULL;
2332 			return (0);
2333 		}
2334 	}
2335 
2336 	return (1);
2337 }
2338 
2339 struct mpii_device *
2340 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2341 {
2342 	int			i;
2343 
2344 	for (i = 0; i < sc->sc_max_devices; i++) {
2345 		if (sc->sc_devs[i] == NULL)
2346 			continue;
2347 
2348 		if (sc->sc_devs[i]->dev_handle == handle)
2349 			return (sc->sc_devs[i]);
2350 	}
2351 
2352 	return (NULL);
2353 }
2354 
2355 int
2356 mpii_alloc_ccbs(struct mpii_softc *sc)
2357 {
2358 	struct mpii_ccb		*ccb;
2359 	u_int8_t		*cmd;
2360 	int			i;
2361 
2362 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2363 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2364 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2365 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2366 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2367 	    mpii_scsi_cmd_tmo_handler, sc);
2368 
2369 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2370 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2371 	if (sc->sc_ccbs == NULL) {
2372 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2373 		return (1);
2374 	}
2375 
2376 	sc->sc_requests = mpii_dmamem_alloc(sc,
2377 	    sc->sc_request_size * sc->sc_max_cmds);
2378 	if (sc->sc_requests == NULL) {
2379 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2380 		goto free_ccbs;
2381 	}
2382 	cmd = MPII_DMA_KVA(sc->sc_requests);
2383 
2384 	/*
2385 	 * we have sc->sc_max_cmds system request message
2386 	 * frames, but smid zero cannot be used. so we then
2387 	 * have (sc->sc_max_cmds - 1) number of ccbs
2388 	 */
2389 	for (i = 1; i < sc->sc_max_cmds; i++) {
2390 		ccb = &sc->sc_ccbs[i - 1];
2391 
2392 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2393 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2394 		    &ccb->ccb_dmamap) != 0) {
2395 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2396 			goto free_maps;
2397 		}
2398 
2399 		ccb->ccb_sc = sc;
2400 		htolem16(&ccb->ccb_smid, i);
2401 		ccb->ccb_offset = sc->sc_request_size * i;
2402 
2403 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2404 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2405 		    ccb->ccb_offset;
2406 
2407 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %#x map: %#x "
2408 		    "sc: %#x smid: %#x offs: %#x cmd: %#x dva: %#x\n",
2409 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2410 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2411 		    ccb->ccb_cmd_dva);
2412 
2413 		mpii_put_ccb(sc, ccb);
2414 	}
2415 
2416 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2417 
2418 	return (0);
2419 
2420 free_maps:
2421 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2422 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2423 
2424 	mpii_dmamem_free(sc, sc->sc_requests);
2425 free_ccbs:
2426 	free(sc->sc_ccbs, M_DEVBUF, 0);
2427 
2428 	return (1);
2429 }
2430 
2431 void
2432 mpii_put_ccb(void *cookie, void *io)
2433 {
2434 	struct mpii_softc	*sc = cookie;
2435 	struct mpii_ccb		*ccb = io;
2436 
2437 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %#x\n", DEVNAME(sc), ccb);
2438 
2439 	ccb->ccb_state = MPII_CCB_FREE;
2440 	ccb->ccb_cookie = NULL;
2441 	ccb->ccb_done = NULL;
2442 	ccb->ccb_rcb = NULL;
2443 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2444 
2445 	mtx_enter(&sc->sc_ccb_free_mtx);
2446 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2447 	mtx_leave(&sc->sc_ccb_free_mtx);
2448 }
2449 
2450 void *
2451 mpii_get_ccb(void *cookie)
2452 {
2453 	struct mpii_softc	*sc = cookie;
2454 	struct mpii_ccb		*ccb;
2455 
2456 	mtx_enter(&sc->sc_ccb_free_mtx);
2457 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2458 	if (ccb != NULL) {
2459 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2460 		ccb->ccb_state = MPII_CCB_READY;
2461 	}
2462 	mtx_leave(&sc->sc_ccb_free_mtx);
2463 
2464 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %#x\n", DEVNAME(sc), ccb);
2465 
2466 	return (ccb);
2467 }
2468 
2469 int
2470 mpii_alloc_replies(struct mpii_softc *sc)
2471 {
2472 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2473 
2474 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2475 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2476 	if (sc->sc_rcbs == NULL)
2477 		return (1);
2478 
2479 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2480 	    sc->sc_num_reply_frames);
2481 	if (sc->sc_replies == NULL) {
2482 		free(sc->sc_rcbs, M_DEVBUF, 0);
2483 		return (1);
2484 	}
2485 
2486 	return (0);
2487 }
2488 
2489 void
2490 mpii_push_replies(struct mpii_softc *sc)
2491 {
2492 	struct mpii_rcb		*rcb;
2493 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2494 	int			i;
2495 
2496 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2497 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2498 	    BUS_DMASYNC_PREREAD);
2499 
2500 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2501 		rcb = &sc->sc_rcbs[i];
2502 
2503 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2504 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2505 		    sc->sc_reply_size * i;
2506 		mpii_push_reply(sc, rcb);
2507 	}
2508 }
2509 
2510 void
2511 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2512 {
2513 	struct mpii_request_header	*rhp;
2514 	struct mpii_request_descr	descr;
2515 	u_long				 *rdp = (u_long *)&descr;
2516 
2517 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#x\n", DEVNAME(sc),
2518 	    ccb->ccb_cmd_dva);
2519 
2520 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2521 	    ccb->ccb_offset, sc->sc_request_size,
2522 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2523 
2524 	ccb->ccb_state = MPII_CCB_QUEUED;
2525 
2526 	rhp = ccb->ccb_cmd;
2527 
2528 	memset(&descr, 0, sizeof(descr));
2529 
2530 	switch (rhp->function) {
2531 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2532 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2533 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2534 		break;
2535 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2536 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2537 		break;
2538 	default:
2539 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2540 	}
2541 
2542 	descr.vf_id = sc->sc_vf_id;
2543 	descr.smid = ccb->ccb_smid;
2544 
2545 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2546 	    "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2547 
2548 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2549 	    "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2550 
2551 #if defined(__LP64__)
2552 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2553 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2554 #else
2555 	mtx_enter(&sc->sc_req_mtx);
2556 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2557 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2558 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2559 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2560 
2561 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2562 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2563 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2564 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2565 	mtx_leave(&sc->sc_req_mtx);
2566 #endif
2567 }
2568 
2569 int
2570 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2571 {
2572 	void				(*done)(struct mpii_ccb *);
2573 	void				*cookie;
2574 	int				rv = 1;
2575 
2576 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2577 
2578 	done = ccb->ccb_done;
2579 	cookie = ccb->ccb_cookie;
2580 
2581 	ccb->ccb_done = mpii_poll_done;
2582 	ccb->ccb_cookie = &rv;
2583 
2584 	mpii_start(sc, ccb);
2585 
2586 	while (rv == 1) {
2587 		/* avoid excessive polling */
2588 		if (mpii_reply_waiting(sc))
2589 			mpii_intr(sc);
2590 		else
2591 			delay(10);
2592 	}
2593 
2594 	ccb->ccb_cookie = cookie;
2595 	done(ccb);
2596 
2597 	return (0);
2598 }
2599 
2600 void
2601 mpii_poll_done(struct mpii_ccb *ccb)
2602 {
2603 	int				*rv = ccb->ccb_cookie;
2604 
2605 	*rv = 0;
2606 }
2607 
2608 int
2609 mpii_alloc_queues(struct mpii_softc *sc)
2610 {
2611 	u_int32_t		*rfp;
2612 	int			i;
2613 
2614 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2615 
2616 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2617 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2618 	if (sc->sc_reply_freeq == NULL)
2619 		return (1);
2620 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2621 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2622 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2623 		    sc->sc_reply_size * i;
2624 	}
2625 
2626 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2627 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2628 	if (sc->sc_reply_postq == NULL)
2629 		goto free_reply_freeq;
2630 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2631 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2632 	    sizeof(struct mpii_reply_descr));
2633 
2634 	return (0);
2635 
2636 free_reply_freeq:
2637 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2638 	return (1);
2639 }
2640 
2641 void
2642 mpii_init_queues(struct mpii_softc *sc)
2643 {
2644 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2645 
2646 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2647 	sc->sc_reply_post_host_index = 0;
2648 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2649 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2650 }
2651 
2652 void
2653 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2654 {
2655 	struct mutex		mtx = MUTEX_INITIALIZER(IPL_BIO);
2656 	void			(*done)(struct mpii_ccb *);
2657 	void			*cookie;
2658 
2659 	done = ccb->ccb_done;
2660 	cookie = ccb->ccb_cookie;
2661 
2662 	ccb->ccb_done = mpii_wait_done;
2663 	ccb->ccb_cookie = &mtx;
2664 
2665 	/* XXX this will wait forever for the ccb to complete */
2666 
2667 	mpii_start(sc, ccb);
2668 
2669 	mtx_enter(&mtx);
2670 	while (ccb->ccb_cookie != NULL)
2671 		msleep(ccb, &mtx, PRIBIO, "mpiiwait", 0);
2672 	mtx_leave(&mtx);
2673 
2674 	ccb->ccb_cookie = cookie;
2675 	done(ccb);
2676 }
2677 
2678 void
2679 mpii_wait_done(struct mpii_ccb *ccb)
2680 {
2681 	struct mutex		*mtx = ccb->ccb_cookie;
2682 
2683 	mtx_enter(mtx);
2684 	ccb->ccb_cookie = NULL;
2685 	mtx_leave(mtx);
2686 
2687 	wakeup_one(ccb);
2688 }
2689 
2690 void
2691 mpii_scsi_cmd(struct scsi_xfer *xs)
2692 {
2693 	struct scsi_link	*link = xs->sc_link;
2694 	struct mpii_softc	*sc = link->adapter_softc;
2695 	struct mpii_ccb		*ccb = xs->io;
2696 	struct mpii_msg_scsi_io	*io;
2697 	struct mpii_device	*dev;
2698 
2699 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2700 
2701 	if (xs->cmdlen > MPII_CDB_LEN) {
2702 		DNPRINTF(MPII_D_CMD, "%s: CBD too big %d\n",
2703 		    DEVNAME(sc), xs->cmdlen);
2704 		memset(&xs->sense, 0, sizeof(xs->sense));
2705 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2706 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2707 		xs->sense.add_sense_code = 0x20;
2708 		xs->error = XS_SENSE;
2709 		scsi_done(xs);
2710 		return;
2711 	}
2712 
2713 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2714 		/* device no longer exists */
2715 		xs->error = XS_SELTIMEOUT;
2716 		scsi_done(xs);
2717 		return;
2718 	}
2719 
2720 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2721 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2722 
2723 	ccb->ccb_cookie = xs;
2724 	ccb->ccb_done = mpii_scsi_cmd_done;
2725 	ccb->ccb_dev_handle = dev->dev_handle;
2726 
2727 	io = ccb->ccb_cmd;
2728 	memset(io, 0, sizeof(*io));
2729 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2730 	io->sense_buffer_length = sizeof(xs->sense);
2731 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2732 	htolem16(&io->io_flags, xs->cmdlen);
2733 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2734 	htobem16(&io->lun[0], link->lun);
2735 
2736 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2737 	case SCSI_DATA_IN:
2738 		io->direction = MPII_SCSIIO_DIR_READ;
2739 		break;
2740 	case SCSI_DATA_OUT:
2741 		io->direction = MPII_SCSIIO_DIR_WRITE;
2742 		break;
2743 	default:
2744 		io->direction = MPII_SCSIIO_DIR_NONE;
2745 		break;
2746 	}
2747 
2748 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2749 
2750 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2751 
2752 	htolem32(&io->data_length, xs->datalen);
2753 
2754 	/* sense data is at the end of a request */
2755 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2756 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2757 
2758 	if (mpii_load_xs(ccb) != 0) {
2759 		xs->error = XS_DRIVER_STUFFUP;
2760 		scsi_done(xs);
2761 		return;
2762 	}
2763 
2764 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2765 	if (xs->flags & SCSI_POLL) {
2766 		if (mpii_poll(sc, ccb) != 0) {
2767 			xs->error = XS_DRIVER_STUFFUP;
2768 			scsi_done(xs);
2769 		}
2770 		return;
2771 	}
2772 
2773 	timeout_add_msec(&xs->stimeout, xs->timeout);
2774 	mpii_start(sc, ccb);
2775 }
2776 
2777 void
2778 mpii_scsi_cmd_tmo(void *xccb)
2779 {
2780 	struct mpii_ccb		*ccb = xccb;
2781 	struct mpii_softc	*sc = ccb->ccb_sc;
2782 
2783 	printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
2784 
2785 	mtx_enter(&sc->sc_ccb_mtx);
2786 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
2787 		ccb->ccb_state = MPII_CCB_TIMEOUT;
2788 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
2789 	}
2790 	mtx_leave(&sc->sc_ccb_mtx);
2791 
2792 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
2793 }
2794 
2795 void
2796 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
2797 {
2798 	struct mpii_softc			*sc = cookie;
2799 	struct mpii_ccb				*tccb = io;
2800 	struct mpii_ccb				*ccb;
2801 	struct mpii_msg_scsi_task_request	*stq;
2802 
2803 	mtx_enter(&sc->sc_ccb_mtx);
2804 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
2805 	if (ccb != NULL) {
2806 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2807 		ccb->ccb_state = MPII_CCB_QUEUED;
2808 	}
2809 	/* should remove any other ccbs for the same dev handle */
2810 	mtx_leave(&sc->sc_ccb_mtx);
2811 
2812 	if (ccb == NULL) {
2813 		scsi_io_put(&sc->sc_iopool, tccb);
2814 		return;
2815 	}
2816 
2817 	stq = tccb->ccb_cmd;
2818 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2819 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2820 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
2821 
2822 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
2823 	mpii_start(sc, tccb);
2824 }
2825 
2826 void
2827 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
2828 {
2829 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
2830 }
2831 
2832 void
2833 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
2834 {
2835 	struct mpii_ccb		*tccb;
2836 	struct mpii_msg_scsi_io_error	*sie;
2837 	struct mpii_softc	*sc = ccb->ccb_sc;
2838 	struct scsi_xfer	*xs = ccb->ccb_cookie;
2839 	struct scsi_sense_data	*sense;
2840 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
2841 
2842 	timeout_del(&xs->stimeout);
2843 	mtx_enter(&sc->sc_ccb_mtx);
2844 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
2845 		/* ENOSIMPLEQ_REMOVE :( */
2846 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
2847 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2848 		else {
2849 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
2850 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
2851 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
2852 					    tccb, ccb_link);
2853 					break;
2854 				}
2855 			}
2856 		}
2857 	}
2858 
2859 	ccb->ccb_state = MPII_CCB_READY;
2860 	mtx_leave(&sc->sc_ccb_mtx);
2861 
2862 	if (xs->datalen != 0) {
2863 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2864 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
2865 		    BUS_DMASYNC_POSTWRITE);
2866 
2867 		bus_dmamap_unload(sc->sc_dmat, dmap);
2868 	}
2869 
2870 	xs->error = XS_NOERROR;
2871 	xs->resid = 0;
2872 
2873 	if (ccb->ccb_rcb == NULL) {
2874 		/* no scsi error, we're ok so drop out early */
2875 		xs->status = SCSI_OK;
2876 		scsi_done(xs);
2877 		return;
2878 	}
2879 
2880 	sie = ccb->ccb_rcb->rcb_reply;
2881 
2882 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
2883 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
2884 	    xs->flags);
2885 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
2886 	    "function: 0x%02x\n", DEVNAME(sc), letoh16(sie->dev_handle),
2887 	    sie->msg_length, sie->function);
2888 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2889 	    sie->vp_id, sie->vf_id);
2890 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
2891 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
2892 	    sie->scsi_state, letoh16(sie->ioc_status));
2893 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2894 	    letoh32(sie->ioc_loginfo));
2895 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
2896 	    letoh32(sie->transfer_count));
2897 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
2898 	    letoh32(sie->sense_count));
2899 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
2900 	    letoh32(sie->response_info));
2901 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
2902 	    letoh16(sie->task_tag));
2903 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
2904 	    DEVNAME(sc), letoh32(sie->bidirectional_transfer_count));
2905 
2906 	xs->status = sie->scsi_status;
2907 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
2908 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
2909 		switch (xs->status) {
2910 		case SCSI_OK:
2911 			xs->resid = xs->datalen -
2912 			    lemtoh32(&sie->transfer_count);
2913 			break;
2914 		default:
2915 			xs->error = XS_DRIVER_STUFFUP;
2916 			break;
2917 		}
2918 		break;
2919 
2920 	case MPII_IOCSTATUS_SUCCESS:
2921 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
2922 		switch (xs->status) {
2923 		case SCSI_OK:
2924 			xs->resid = 0;
2925 			break;
2926 
2927 		case SCSI_CHECK:
2928 			xs->error = XS_SENSE;
2929 			break;
2930 
2931 		case SCSI_BUSY:
2932 		case SCSI_QUEUE_FULL:
2933 			xs->error = XS_BUSY;
2934 			break;
2935 
2936 		default:
2937 			xs->error = XS_DRIVER_STUFFUP;
2938 		}
2939 		break;
2940 
2941 	case MPII_IOCSTATUS_BUSY:
2942 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
2943 		xs->error = XS_BUSY;
2944 		break;
2945 
2946 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
2947 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
2948 		xs->error = XS_RESET;
2949 		break;
2950 
2951 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2952 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2953 		xs->error = XS_SELTIMEOUT;
2954 		break;
2955 
2956 	default:
2957 		xs->error = XS_DRIVER_STUFFUP;
2958 		break;
2959 	}
2960 
2961 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
2962 	    sc->sc_request_size - sizeof(*sense));
2963 	if (sie->scsi_state & MPII_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
2964 		memcpy(&xs->sense, sense, sizeof(xs->sense));
2965 
2966 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
2967 	    xs->error, xs->status);
2968 
2969 	mpii_push_reply(sc, ccb->ccb_rcb);
2970 	scsi_done(xs);
2971 }
2972 
2973 int
2974 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2975 {
2976 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
2977 	struct mpii_device	*dev = sc->sc_devs[link->target];
2978 
2979 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
2980 
2981 	switch (cmd) {
2982 	case DIOCGCACHE:
2983 	case DIOCSCACHE:
2984 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
2985 			return (mpii_ioctl_cache(link, cmd,
2986 			    (struct dk_cache *)addr));
2987 		}
2988 		break;
2989 
2990 	default:
2991 		if (sc->sc_ioctl)
2992 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2993 
2994 		break;
2995 	}
2996 
2997 	return (ENOTTY);
2998 }
2999 
3000 int
3001 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3002 {
3003 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3004 	struct mpii_device *dev = sc->sc_devs[link->target];
3005 	struct mpii_cfg_raid_vol_pg0 *vpg;
3006 	struct mpii_msg_raid_action_request *req;
3007 	struct mpii_msg_raid_action_reply *rep;
3008 	struct mpii_cfg_hdr hdr;
3009 	struct mpii_ccb	*ccb;
3010 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3011 	size_t pagelen;
3012 	int rv = 0;
3013 	int enabled;
3014 
3015 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3016 	    addr, MPII_PG_POLL, &hdr) != 0)
3017 		return (EINVAL);
3018 
3019 	pagelen = hdr.page_length * 4;
3020 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3021 	if (vpg == NULL)
3022 		return (ENOMEM);
3023 
3024 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3025 	    vpg, pagelen) != 0) {
3026 		rv = EINVAL;
3027 		goto done;
3028 	}
3029 
3030 	enabled = ((lemtoh16(&vpg->volume_settings) &
3031 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3032 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3033 
3034 	if (cmd == DIOCGCACHE) {
3035 		dc->wrcache = enabled;
3036 		dc->rdcache = 0;
3037 		goto done;
3038 	} /* else DIOCSCACHE */
3039 
3040 	if (dc->rdcache) {
3041 		rv = EOPNOTSUPP;
3042 		goto done;
3043 	}
3044 
3045 	if (((dc->wrcache) ? 1 : 0) == enabled)
3046 		goto done;
3047 
3048 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3049 	if (ccb == NULL) {
3050 		rv = ENOMEM;
3051 		goto done;
3052 	}
3053 
3054 	ccb->ccb_done = mpii_empty_done;
3055 
3056 	req = ccb->ccb_cmd;
3057 	memset(req, 0, sizeof(*req));
3058 	req->function = MPII_FUNCTION_RAID_ACTION;
3059 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3060 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3061 	htolem32(&req->action_data, dc->wrcache ?
3062 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3063 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3064 
3065 	if (mpii_poll(sc, ccb) != 0) {
3066 		rv = EIO;
3067 		goto done;
3068 	}
3069 
3070 	if (ccb->ccb_rcb != NULL) {
3071 		rep = ccb->ccb_rcb->rcb_reply;
3072 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3073 		    ((rep->action_data[0] &
3074 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3075 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3076 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3077 			rv = EINVAL;
3078 		mpii_push_reply(sc, ccb->ccb_rcb);
3079 	}
3080 
3081 	scsi_io_put(&sc->sc_iopool, ccb);
3082 
3083 done:
3084 	free(vpg, M_TEMP, 0);
3085 	return (rv);
3086 }
3087 
3088 #if NBIO > 0
3089 int
3090 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3091 {
3092 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3093 	int			error = 0;
3094 
3095 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3096 
3097 	switch (cmd) {
3098 	case BIOCINQ:
3099 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3100 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3101 		break;
3102 	case BIOCVOL:
3103 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3104 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3105 		break;
3106 	case BIOCDISK:
3107 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3108 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3109 		break;
3110 	default:
3111 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3112 		error = EINVAL;
3113 	}
3114 
3115 	return (error);
3116 }
3117 
3118 int
3119 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3120 {
3121 	int			i;
3122 
3123 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3124 
3125 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3126 	for (i = 0; i < sc->sc_max_devices; i++)
3127 		if (sc->sc_devs[i] &&
3128 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3129 			bi->bi_novol++;
3130 	return (0);
3131 }
3132 
3133 int
3134 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3135 {
3136 	struct mpii_cfg_raid_vol_pg0	*vpg;
3137 	struct mpii_cfg_hdr		hdr;
3138 	struct mpii_device		*dev;
3139 	struct scsi_link		*lnk;
3140 	struct device			*scdev;
3141 	size_t				pagelen;
3142 	u_int16_t			volh;
3143 	int				rv, hcnt = 0;
3144 
3145 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3146 	    DEVNAME(sc), bv->bv_volid);
3147 
3148 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3149 		return (ENODEV);
3150 	volh = dev->dev_handle;
3151 
3152 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3153 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3154 		printf("%s: unable to fetch header for raid volume page 0\n",
3155 		    DEVNAME(sc));
3156 		return (EINVAL);
3157 	}
3158 
3159 	pagelen = hdr.page_length * 4;
3160 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3161 	if (vpg == NULL) {
3162 		printf("%s: unable to allocate space for raid "
3163 		    "volume page 0\n", DEVNAME(sc));
3164 		return (ENOMEM);
3165 	}
3166 
3167 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3168 	    &hdr, 1, vpg, pagelen) != 0) {
3169 		printf("%s: unable to fetch raid volume page 0\n",
3170 		    DEVNAME(sc));
3171 		free(vpg, M_TEMP, 0);
3172 		return (EINVAL);
3173 	}
3174 
3175 	switch (vpg->volume_state) {
3176 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3177 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3178 		bv->bv_status = BIOC_SVONLINE;
3179 		break;
3180 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3181 		if (ISSET(lemtoh32(&vpg->volume_status),
3182 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3183 			bv->bv_status = BIOC_SVREBUILD;
3184 			bv->bv_percent = dev->percent;
3185 		} else
3186 			bv->bv_status = BIOC_SVDEGRADED;
3187 		break;
3188 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3189 		bv->bv_status = BIOC_SVOFFLINE;
3190 		break;
3191 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3192 		bv->bv_status = BIOC_SVBUILDING;
3193 		break;
3194 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3195 	default:
3196 		bv->bv_status = BIOC_SVINVALID;
3197 		break;
3198 	}
3199 
3200 	switch (vpg->volume_type) {
3201 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3202 		bv->bv_level = 0;
3203 		break;
3204 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3205 		bv->bv_level = 1;
3206 		break;
3207 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3208 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3209 		bv->bv_level = 10;
3210 		break;
3211 	default:
3212 		bv->bv_level = -1;
3213 	}
3214 
3215 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3216 		free(vpg, M_TEMP, 0);
3217 		return (rv);
3218 	}
3219 
3220 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3221 
3222 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3223 
3224 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3225 	if (lnk != NULL) {
3226 		scdev = lnk->device_softc;
3227 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3228 	}
3229 
3230 	free(vpg, M_TEMP, 0);
3231 	return (0);
3232 }
3233 
3234 int
3235 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3236 {
3237 	struct mpii_cfg_raid_vol_pg0		*vpg;
3238 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3239 	struct mpii_cfg_hdr			hdr;
3240 	struct mpii_device			*dev;
3241 	size_t					pagelen;
3242 	u_int16_t				volh;
3243 	u_int8_t				dn;
3244 
3245 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3246 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3247 
3248 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3249 		return (ENODEV);
3250 	volh = dev->dev_handle;
3251 
3252 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3253 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3254 		printf("%s: unable to fetch header for raid volume page 0\n",
3255 		    DEVNAME(sc));
3256 		return (EINVAL);
3257 	}
3258 
3259 	pagelen = hdr.page_length * 4;
3260 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3261 	if (vpg == NULL) {
3262 		printf("%s: unable to allocate space for raid "
3263 		    "volume page 0\n", DEVNAME(sc));
3264 		return (ENOMEM);
3265 	}
3266 
3267 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3268 	    &hdr, 1, vpg, pagelen) != 0) {
3269 		printf("%s: unable to fetch raid volume page 0\n",
3270 		    DEVNAME(sc));
3271 		free(vpg, M_TEMP, 0);
3272 		return (EINVAL);
3273 	}
3274 
3275 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3276 		int		nvdsk = vpg->num_phys_disks;
3277 		int		hsmap = vpg->hot_spare_pool;
3278 
3279 		free(vpg, M_TEMP, 0);
3280 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3281 	}
3282 
3283 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3284 	    bd->bd_diskid;
3285 	dn = pd->phys_disk_num;
3286 
3287 	free(vpg, M_TEMP, 0);
3288 	return (mpii_bio_disk(sc, bd, dn));
3289 }
3290 
3291 int
3292 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3293      int hsmap, int *hscnt)
3294 {
3295 	struct mpii_cfg_raid_config_pg0	*cpg;
3296 	struct mpii_raid_config_element	*el;
3297 	struct mpii_ecfg_hdr		ehdr;
3298 	size_t				pagelen;
3299 	int				i, nhs = 0;
3300 
3301 	if (bd)
3302 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3303 		    bd->bd_diskid - nvdsk);
3304 	else
3305 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3306 
3307 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3308 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3309 	    &ehdr) != 0) {
3310 		printf("%s: unable to fetch header for raid config page 0\n",
3311 		    DEVNAME(sc));
3312 		return (EINVAL);
3313 	}
3314 
3315 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3316 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3317 	if (cpg == NULL) {
3318 		printf("%s: unable to allocate space for raid config page 0\n",
3319 		    DEVNAME(sc));
3320 		return (ENOMEM);
3321 	}
3322 
3323 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3324 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3325 		printf("%s: unable to fetch raid config page 0\n",
3326 		    DEVNAME(sc));
3327 		free(cpg, M_TEMP, 0);
3328 		return (EINVAL);
3329 	}
3330 
3331 	el = (struct mpii_raid_config_element *)(cpg + 1);
3332 	for (i = 0; i < cpg->num_elements; i++, el++) {
3333 		if (ISSET(lemtoh16(&el->element_flags),
3334 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3335 		    el->hot_spare_pool == hsmap) {
3336 			/*
3337 			 * diskid comparison is based on the idea that all
3338 			 * disks are counted by the bio(4) in sequence, thus
3339 			 * substracting the number of disks in the volume
3340 			 * from the diskid yields us a "relative" hotspare
3341 			 * number, which is good enough for us.
3342 			 */
3343 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3344 				u_int8_t dn = el->phys_disk_num;
3345 
3346 				free(cpg, M_TEMP, 0);
3347 				return (mpii_bio_disk(sc, bd, dn));
3348 			}
3349 			nhs++;
3350 		}
3351 	}
3352 
3353 	if (hscnt)
3354 		*hscnt = nhs;
3355 
3356 	free(cpg, M_TEMP, 0);
3357 	return (0);
3358 }
3359 
3360 int
3361 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3362 {
3363 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3364 	struct mpii_cfg_hdr			hdr;
3365 	struct mpii_device			*dev;
3366 	int					len;
3367 
3368 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3369 	    bd->bd_diskid);
3370 
3371 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3372 	if (ppg == NULL) {
3373 		printf("%s: unable to allocate space for raid physical disk "
3374 		    "page 0\n", DEVNAME(sc));
3375 		return (ENOMEM);
3376 	}
3377 
3378 	hdr.page_version = 0;
3379 	hdr.page_length = sizeof(*ppg) / 4;
3380 	hdr.page_number = 0;
3381 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3382 
3383 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3384 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3385 		printf("%s: unable to fetch raid drive page 0\n",
3386 		    DEVNAME(sc));
3387 		free(ppg, M_TEMP, 0);
3388 		return (EINVAL);
3389 	}
3390 
3391 	bd->bd_target = ppg->phys_disk_num;
3392 
3393 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3394 		bd->bd_status = BIOC_SDINVALID;
3395 		free(ppg, M_TEMP, 0);
3396 		return (0);
3397 	}
3398 
3399 	switch (ppg->phys_disk_state) {
3400 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3401 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3402 		bd->bd_status = BIOC_SDONLINE;
3403 		break;
3404 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3405 		if (ppg->offline_reason ==
3406 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3407 		    ppg->offline_reason ==
3408 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3409 			bd->bd_status = BIOC_SDFAILED;
3410 		else
3411 			bd->bd_status = BIOC_SDOFFLINE;
3412 		break;
3413 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3414 		bd->bd_status = BIOC_SDFAILED;
3415 		break;
3416 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3417 		bd->bd_status = BIOC_SDREBUILD;
3418 		break;
3419 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3420 		bd->bd_status = BIOC_SDHOTSPARE;
3421 		break;
3422 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3423 		bd->bd_status = BIOC_SDUNUSED;
3424 		break;
3425 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3426 	default:
3427 		bd->bd_status = BIOC_SDINVALID;
3428 		break;
3429 	}
3430 
3431 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3432 
3433 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3434 	len = strlen(bd->bd_vendor);
3435 	bd->bd_vendor[len] = ' ';
3436 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3437 	    sizeof(ppg->product_id));
3438 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3439 
3440 	free(ppg, M_TEMP, 0);
3441 	return (0);
3442 }
3443 
3444 struct mpii_device *
3445 mpii_find_vol(struct mpii_softc *sc, int volid)
3446 {
3447 	struct mpii_device	*dev = NULL;
3448 
3449 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3450 		return (NULL);
3451 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3452 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3453 		return (dev);
3454 	return (NULL);
3455 }
3456 
3457 #ifndef SMALL_KERNEL
3458 /*
3459  * Non-sleeping lightweight version of the mpii_ioctl_vol
3460  */
3461 int
3462 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3463 {
3464 	struct mpii_cfg_raid_vol_pg0	*vpg;
3465 	struct mpii_cfg_hdr		hdr;
3466 	struct mpii_device		*dev = NULL;
3467 	size_t				pagelen;
3468 	u_int16_t			volh;
3469 
3470 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3471 		return (ENODEV);
3472 	volh = dev->dev_handle;
3473 
3474 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3475 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3476 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3477 		    "volume page 0\n", DEVNAME(sc));
3478 		return (EINVAL);
3479 	}
3480 
3481 	pagelen = hdr.page_length * 4;
3482 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3483 	if (vpg == NULL) {
3484 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3485 		    "volume page 0\n", DEVNAME(sc));
3486 		return (ENOMEM);
3487 	}
3488 
3489 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3490 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3491 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3492 		    "page 0\n", DEVNAME(sc));
3493 		free(vpg, M_TEMP, 0);
3494 		return (EINVAL);
3495 	}
3496 
3497 	switch (vpg->volume_state) {
3498 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3499 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3500 		bv->bv_status = BIOC_SVONLINE;
3501 		break;
3502 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3503 		if (ISSET(lemtoh32(&vpg->volume_status),
3504 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3505 			bv->bv_status = BIOC_SVREBUILD;
3506 		else
3507 			bv->bv_status = BIOC_SVDEGRADED;
3508 		break;
3509 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3510 		bv->bv_status = BIOC_SVOFFLINE;
3511 		break;
3512 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3513 		bv->bv_status = BIOC_SVBUILDING;
3514 		break;
3515 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3516 	default:
3517 		bv->bv_status = BIOC_SVINVALID;
3518 		break;
3519 	}
3520 
3521 	free(vpg, M_TEMP, 0);
3522 	return (0);
3523 }
3524 
3525 int
3526 mpii_create_sensors(struct mpii_softc *sc)
3527 {
3528 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3529 	struct device		*dev;
3530 	struct scsi_link	*link;
3531 	int			i;
3532 
3533 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3534 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3535 	if (sc->sc_sensors == NULL)
3536 		return (1);
3537 	sc->sc_nsensors = sc->sc_vd_count;
3538 
3539 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3540 	    sizeof(sc->sc_sensordev.xname));
3541 
3542 	for (i = 0; i < sc->sc_vd_count; i++) {
3543 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3544 		if (link == NULL)
3545 			goto bad;
3546 
3547 		dev = link->device_softc;
3548 
3549 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3550 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3551 
3552 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3553 		    sizeof(sc->sc_sensors[i].desc));
3554 
3555 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3556 	}
3557 
3558 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3559 		goto bad;
3560 
3561 	sensordev_install(&sc->sc_sensordev);
3562 
3563 	return (0);
3564 
3565 bad:
3566 	free(sc->sc_sensors, M_DEVBUF, 0);
3567 
3568 	return (1);
3569 }
3570 
3571 void
3572 mpii_refresh_sensors(void *arg)
3573 {
3574 	struct mpii_softc	*sc = arg;
3575 	struct bioc_vol		bv;
3576 	int			i;
3577 
3578 	for (i = 0; i < sc->sc_nsensors; i++) {
3579 		memset(&bv, 0, sizeof(bv));
3580 		bv.bv_volid = i;
3581 		if (mpii_bio_volstate(sc, &bv))
3582 			return;
3583 		switch(bv.bv_status) {
3584 		case BIOC_SVOFFLINE:
3585 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3586 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3587 			break;
3588 		case BIOC_SVDEGRADED:
3589 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3590 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3591 			break;
3592 		case BIOC_SVREBUILD:
3593 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3594 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3595 			break;
3596 		case BIOC_SVONLINE:
3597 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3598 			sc->sc_sensors[i].status = SENSOR_S_OK;
3599 			break;
3600 		case BIOC_SVINVALID:
3601 			/* FALLTHROUGH */
3602 		default:
3603 			sc->sc_sensors[i].value = 0; /* unknown */
3604 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3605 		}
3606 	}
3607 }
3608 #endif /* SMALL_KERNEL */
3609 #endif /* NBIO > 0 */
3610