xref: /netbsd-src/sys/dev/pci/mpii.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /* $NetBSD: mpii.c,v 1.22 2019/03/11 14:35:22 kardel Exp $ */
2 /*	$OpenBSD: mpii.c,v 1.115 2018/08/14 05:22:21 jmatthew Exp $	*/
3 /*
4  * Copyright (c) 2010, 2012 Mike Belopuhov
5  * Copyright (c) 2009 James Giannoules
6  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
7  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: mpii.c,v 1.22 2019/03/11 14:35:22 kardel Exp $");
24 
25 #include "bio.h"
26 
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/device.h>
31 #include <sys/ioctl.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/mutex.h>
35 #include <sys/condvar.h>
36 #include <sys/dkio.h>
37 #include <sys/tree.h>
38 
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42 
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46 
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #include <dev/sysmon/sysmonvar.h>
50 #include <sys/envsys.h>
51 #endif
52 
53 #include <dev/pci/mpiireg.h>
54 
55 // #define MPII_DEBUG
56 #ifdef MPII_DEBUG
57 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
58 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
59 #define	MPII_D_CMD		(0x0001)
60 #define	MPII_D_INTR		(0x0002)
61 #define	MPII_D_MISC		(0x0004)
62 #define	MPII_D_DMA		(0x0008)
63 #define	MPII_D_IOCTL		(0x0010)
64 #define	MPII_D_RW		(0x0020)
65 #define	MPII_D_MEM		(0x0040)
66 #define	MPII_D_CCB		(0x0080)
67 #define	MPII_D_PPR		(0x0100)
68 #define	MPII_D_RAID		(0x0200)
69 #define	MPII_D_EVT		(0x0400)
70 #define MPII_D_CFG		(0x0800)
71 #define MPII_D_MAP		(0x1000)
72 
73 u_int32_t  mpii_debug = 0
74 //		| MPII_D_CMD
75 //		| MPII_D_INTR
76 //		| MPII_D_MISC
77 //		| MPII_D_DMA
78 //		| MPII_D_IOCTL
79 //		| MPII_D_RW
80 //		| MPII_D_MEM
81 //		| MPII_D_CCB
82 //		| MPII_D_PPR
83 //		| MPII_D_RAID
84 //		| MPII_D_EVT
85 //		| MPII_D_CFG
86 //		| MPII_D_MAP
87 	;
88 #else
89 #define DPRINTF(x...)
90 #define DNPRINTF(n,x...)
91 #endif
92 
93 #define MPII_REQUEST_SIZE		(512)
94 #define MPII_REQUEST_CREDIT		(128)
95 
96 struct mpii_dmamem {
97 	bus_dmamap_t		mdm_map;
98 	bus_dma_segment_t	mdm_seg;
99 	size_t			mdm_size;
100 	void 			*mdm_kva;
101 };
102 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
103 #define MPII_DMA_DVA(_mdm) ((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
104 #define MPII_DMA_KVA(_mdm) ((_mdm)->mdm_kva)
105 
106 struct mpii_softc;
107 
108 struct mpii_rcb {
109 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
110 	void			*rcb_reply;
111 	u_int32_t		rcb_reply_dva;
112 };
113 
114 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
115 
116 struct mpii_device {
117 	int			flags;
118 #define MPII_DF_ATTACH		(0x0001)
119 #define MPII_DF_DETACH		(0x0002)
120 #define MPII_DF_HIDDEN		(0x0004)
121 #define MPII_DF_UNUSED		(0x0008)
122 #define MPII_DF_VOLUME		(0x0010)
123 #define MPII_DF_VOLUME_DISK	(0x0020)
124 #define MPII_DF_HOT_SPARE	(0x0040)
125 	short			slot;
126 	short			percent;
127 	u_int16_t		dev_handle;
128 	u_int16_t		enclosure;
129 	u_int16_t		expander;
130 	u_int8_t		phy_num;
131 	u_int8_t		physical_port;
132 };
133 
134 struct mpii_ccb {
135 	struct mpii_softc	*ccb_sc;
136 
137 	void *			ccb_cookie;
138 	kmutex_t		ccb_mtx;
139 	kcondvar_t		ccb_cv;
140 
141 	bus_dmamap_t		ccb_dmamap;
142 
143 	bus_addr_t		ccb_offset;
144 	void			*ccb_cmd;
145 	bus_addr_t		ccb_cmd_dva;
146 	u_int16_t		ccb_dev_handle;
147 	u_int16_t		ccb_smid;
148 
149 	volatile enum {
150 		MPII_CCB_FREE,
151 		MPII_CCB_READY,
152 		MPII_CCB_QUEUED,
153 		MPII_CCB_TIMEOUT
154 	}			ccb_state;
155 
156 	void			(*ccb_done)(struct mpii_ccb *);
157 	struct mpii_rcb		*ccb_rcb;
158 
159 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
160 };
161 
162 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
163 
164 struct mpii_softc {
165 	device_t		sc_dev;
166 
167 	pci_chipset_tag_t	sc_pc;
168 	pcitag_t		sc_tag;
169 
170 	void			*sc_ih;
171 	pci_intr_handle_t	*sc_pihp;
172 
173 	struct scsipi_adapter	sc_adapt;
174 	struct scsipi_channel	sc_chan;
175 	device_t		sc_child; /* our scsibus */
176 
177 	int			sc_flags;
178 #define MPII_F_RAID		(1<<1)
179 #define MPII_F_SAS3		(1<<2)
180 
181 	struct mpii_device	**sc_devs;
182 	kmutex_t		sc_devs_mtx;
183 
184 	bus_space_tag_t		sc_iot;
185 	bus_space_handle_t	sc_ioh;
186 	bus_size_t		sc_ios;
187 	bus_dma_tag_t		sc_dmat;
188 
189 	kmutex_t		sc_req_mtx;
190 	kmutex_t		sc_rep_mtx;
191 
192 	ushort			sc_reply_size;
193 	ushort			sc_request_size;
194 
195 	ushort			sc_max_cmds;
196 	ushort			sc_num_reply_frames;
197 	u_int			sc_reply_free_qdepth;
198 	u_int			sc_reply_post_qdepth;
199 
200 	ushort			sc_chain_sge;
201 	ushort			sc_max_sgl;
202 
203 	u_int8_t		sc_ioc_event_replay;
204 
205 	u_int8_t		sc_porttype;
206 	u_int8_t		sc_max_volumes;
207 	u_int16_t		sc_max_devices;
208 	u_int16_t		sc_vd_count;
209 	u_int16_t		sc_vd_id_low;
210 	u_int16_t		sc_pd_id_start;
211 	int			sc_ioc_number;
212 	u_int8_t		sc_vf_id;
213 
214 	struct mpii_ccb		*sc_ccbs;
215 	struct mpii_ccb_list	sc_ccb_free;
216 	kmutex_t		sc_ccb_free_mtx;
217 	kcondvar_t		sc_ccb_free_cv;
218 
219 	struct mpii_ccb_list	sc_ccb_tmos;
220 	kmutex_t		sc_ssb_tmomtx;
221 	struct workqueue	*sc_ssb_tmowk;
222 	struct work		sc_ssb_tmowork;
223 
224 	struct mpii_dmamem	*sc_requests;
225 
226 	struct mpii_dmamem	*sc_replies;
227 	struct mpii_rcb		*sc_rcbs;
228 
229 	struct mpii_dmamem	*sc_reply_postq;
230 	struct mpii_reply_descr	*sc_reply_postq_kva;
231 	u_int			sc_reply_post_host_index;
232 
233 	struct mpii_dmamem	*sc_reply_freeq;
234 	u_int			sc_reply_free_host_index;
235 	kmutex_t		sc_reply_free_mtx;
236 
237 	struct mpii_rcb_list	sc_evt_sas_queue;
238 	kmutex_t		sc_evt_sas_mtx;
239 	struct workqueue	*sc_evt_sas_wq;
240 	struct work		sc_evt_sas_work;
241 
242 	struct mpii_rcb_list	sc_evt_ack_queue;
243 	kmutex_t		sc_evt_ack_mtx;
244 	struct workqueue	*sc_evt_ack_wq;
245 	struct work		sc_evt_ack_work;
246 
247 	struct sysmon_envsys	*sc_sme;
248 	envsys_data_t		*sc_sensors;
249 };
250 
251 int	mpii_match(device_t, cfdata_t, void *);
252 void	mpii_attach(device_t, device_t, void *);
253 int	mpii_detach(device_t, int);
254 void	mpii_childdetached(device_t, device_t);
255 int	mpii_rescan(device_t, const char *, const int *);
256 
257 int	mpii_intr(void *);
258 
259 CFATTACH_DECL3_NEW(mpii, sizeof(struct mpii_softc),
260     mpii_match, mpii_attach, mpii_detach, NULL, mpii_rescan,
261     mpii_childdetached, DVF_DETACH_SHUTDOWN);
262 
263 void		mpii_scsipi_request(struct scsipi_channel *,
264 			scsipi_adapter_req_t, void *);
265 void		mpii_scsi_cmd_done(struct mpii_ccb *);
266 
267 struct mpii_dmamem *
268 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
269 void		mpii_dmamem_free(struct mpii_softc *,
270 		    struct mpii_dmamem *);
271 int		mpii_alloc_ccbs(struct mpii_softc *);
272 struct mpii_ccb *mpii_get_ccb(struct mpii_softc *);
273 void		mpii_put_ccb(struct mpii_softc *, struct mpii_ccb *);
274 int		mpii_alloc_replies(struct mpii_softc *);
275 int		mpii_alloc_queues(struct mpii_softc *);
276 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
277 void		mpii_push_replies(struct mpii_softc *);
278 
279 void		mpii_scsi_cmd_tmo(void *);
280 void		mpii_scsi_cmd_tmo_handler(struct work *, void *);
281 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
282 
283 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
284 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
285 struct mpii_device *
286 		mpii_find_dev(struct mpii_softc *, u_int16_t);
287 
288 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
289 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
290 void		mpii_poll_done(struct mpii_ccb *);
291 struct mpii_rcb *
292 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
293 
294 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
295 void		mpii_wait_done(struct mpii_ccb *);
296 
297 void		mpii_init_queues(struct mpii_softc *);
298 
299 int		mpii_load_xs(struct mpii_ccb *);
300 int		mpii_load_xs_sas3(struct mpii_ccb *);
301 
302 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
303 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
304 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
305 		    u_int32_t);
306 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
307 		    u_int32_t);
308 
309 int		mpii_init(struct mpii_softc *);
310 int		mpii_reset_soft(struct mpii_softc *);
311 int		mpii_reset_hard(struct mpii_softc *);
312 
313 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
314 int		mpii_handshake_recv_dword(struct mpii_softc *,
315 		    u_int32_t *);
316 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
317 
318 void		mpii_empty_done(struct mpii_ccb *);
319 
320 int		mpii_iocinit(struct mpii_softc *);
321 int		mpii_iocfacts(struct mpii_softc *);
322 int		mpii_portfacts(struct mpii_softc *);
323 int		mpii_portenable(struct mpii_softc *);
324 int		mpii_cfg_coalescing(struct mpii_softc *);
325 int		mpii_board_info(struct mpii_softc *);
326 int		mpii_target_map(struct mpii_softc *);
327 
328 int		mpii_eventnotify(struct mpii_softc *);
329 void		mpii_eventnotify_done(struct mpii_ccb *);
330 void		mpii_eventack(struct work *, void *);
331 void		mpii_eventack_done(struct mpii_ccb *);
332 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
333 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
334 void		mpii_event_sas(struct mpii_softc *, struct mpii_rcb *);
335 void		mpii_event_sas_work(struct work *, void *);
336 void		mpii_event_raid(struct mpii_softc *,
337 		    struct mpii_msg_event_reply *);
338 void		mpii_event_discovery(struct mpii_softc *,
339 		    struct mpii_msg_event_reply *);
340 
341 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
342 
343 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
344 		    u_int8_t, u_int32_t, int, void *);
345 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
346 		    void *, int, void *, size_t);
347 
348 #if 0
349 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
350 #endif
351 
352 #if NBIO > 0
353 int		mpii_ioctl(device_t, u_long, void *);
354 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
355 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
356 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
357 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
358 		    int, int *);
359 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
360 		    u_int8_t);
361 struct mpii_device *
362 		mpii_find_vol(struct mpii_softc *, int);
363 #ifndef SMALL_KERNEL
364  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
365 int		mpii_create_sensors(struct mpii_softc *);
366 void		mpii_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
367 int		mpii_destroy_sensors(struct mpii_softc *);
368 #endif /* SMALL_KERNEL */
369 #endif /* NBIO > 0 */
370 
371 #define DEVNAME(s)		(device_xname((s)->sc_dev))
372 
373 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
374 
375 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
376 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
377 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
378 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
379 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
380 				    == MPII_INTR_STATUS_REPLY)
381 
382 #define mpii_write_reply_free(s, v) \
383     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
384     MPII_REPLY_FREE_HOST_INDEX, (v))
385 #define mpii_write_reply_post(s, v) \
386     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
387     MPII_REPLY_POST_HOST_INDEX, (v))
388 
389 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
390 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
391 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
392 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
393 
394 static inline void
395 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
396 {
397 	sge->sg_addr_lo = htole32(dva);
398 	sge->sg_addr_hi = htole32(dva >> 32);
399 }
400 
401 #define MPII_PG_EXTENDED	(1<<0)
402 #define MPII_PG_POLL		(1<<1)
403 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
404 
405 static const struct mpii_pci_product {
406 	pci_vendor_id_t         mpii_vendor;
407 	pci_product_id_t        mpii_product;
408 } mpii_devices[] = {
409 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3408 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3416 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508_1 },
435 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516 },
436 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516_1 },
437 	{ 0, 0}
438 };
439 
440 int
441 mpii_match(device_t parent, cfdata_t match, void *aux)
442 {
443 	struct pci_attach_args *pa = aux;
444 	const struct mpii_pci_product *mpii;
445 
446 	for (mpii = mpii_devices; mpii->mpii_vendor != 0; mpii++) {
447 		if (PCI_VENDOR(pa->pa_id) == mpii->mpii_vendor &&
448 		    PCI_PRODUCT(pa->pa_id) == mpii->mpii_product)
449 			return (1);
450 	}
451 	return (0);
452 }
453 
454 void
455 mpii_attach(device_t parent, device_t self, void *aux)
456 {
457 	struct mpii_softc		*sc = device_private(self);
458 	struct pci_attach_args		*pa = aux;
459 	pcireg_t			memtype;
460 	int				r;
461 	struct mpii_ccb			*ccb;
462 	struct scsipi_adapter *adapt = &sc->sc_adapt;
463 	struct scsipi_channel *chan = &sc->sc_chan;
464 	char intrbuf[PCI_INTRSTR_LEN];
465 	const char *intrstr;
466 
467 	pci_aprint_devinfo(pa, NULL);
468 
469 	sc->sc_pc = pa->pa_pc;
470 	sc->sc_tag = pa->pa_tag;
471 	sc->sc_dmat = pa->pa_dmat;
472 	sc->sc_dev = self;
473 
474 	mutex_init(&sc->sc_req_mtx, MUTEX_DEFAULT, IPL_BIO);
475 	mutex_init(&sc->sc_rep_mtx, MUTEX_DEFAULT, IPL_BIO);
476 
477 	/* find the appropriate memory base */
478 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
479 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
480 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
481 			break;
482 	}
483 	if (r >= PCI_MAPREG_END) {
484 		aprint_error_dev(self,
485 		    "unable to locate system interface registers\n");
486 		return;
487 	}
488 
489 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
490 	    NULL, &sc->sc_ios) != 0) {
491 		aprint_error_dev(self,
492 		    "unable to map system interface registers\n");
493 		return;
494 	}
495 
496 	/* disable the expansion rom */
497 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM,
498 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM) &
499 	    ~PCI_MAPREG_ROM_ENABLE);
500 
501 	/* disable interrupts */
502 	mpii_write(sc, MPII_INTR_MASK,
503 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
504 	    MPII_INTR_MASK_DOORBELL);
505 
506 	/* hook up the interrupt */
507 	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) {
508 		aprint_error_dev(self, "unable to map interrupt\n");
509 		goto unmap;
510 	}
511 	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0],
512 	    intrbuf, sizeof(intrbuf));
513 	pci_intr_setattr(pa->pa_pc, &sc->sc_pihp[0], PCI_INTR_MPSAFE, true);
514 	sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_BIO,
515 	    mpii_intr, sc, device_xname(self));
516 	if (sc->sc_ih == NULL) {
517 		aprint_error_dev(self, "couldn't establish interrupt");
518 		if (intrstr != NULL)
519 			aprint_error(" at %s", intrstr);
520 		aprint_error("\n");
521 		return;
522 	}
523 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
524 	aprint_naive("\n");
525 
526 	if (mpii_iocfacts(sc) != 0) {
527 		aprint_error_dev(self,  "unable to get iocfacts\n");
528 		goto unmap;
529 	}
530 
531 	if (mpii_init(sc) != 0) {
532 		aprint_error_dev(self, "unable to initialize ioc\n");
533 		goto unmap;
534 	}
535 
536 	if (mpii_alloc_ccbs(sc) != 0) {
537 		/* error already printed */
538 		goto unmap;
539 	}
540 
541 	if (mpii_alloc_replies(sc) != 0) {
542 		aprint_error_dev(self, "unable to allocated reply space\n");
543 		goto free_ccbs;
544 	}
545 
546 	if (mpii_alloc_queues(sc) != 0) {
547 		aprint_error_dev(self, "unable to allocate reply queues\n");
548 		goto free_replies;
549 	}
550 
551 	if (mpii_iocinit(sc) != 0) {
552 		aprint_error_dev(self, "unable to send iocinit\n");
553 		goto free_queues;
554 	}
555 
556 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
557 	    MPII_DOORBELL_STATE_OPER) != 0) {
558 		aprint_error_dev(self, "state: 0x%08x\n",
559 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
560 		aprint_error_dev(self, "operational state timeout\n");
561 		goto free_queues;
562 	}
563 
564 	mpii_push_replies(sc);
565 	mpii_init_queues(sc);
566 
567 	if (mpii_board_info(sc) != 0) {
568 		aprint_error_dev(self, "unable to get manufacturing page 0\n");
569 		goto free_queues;
570 	}
571 
572 	if (mpii_portfacts(sc) != 0) {
573 		aprint_error_dev(self, "unable to get portfacts\n");
574 		goto free_queues;
575 	}
576 
577 	if (mpii_target_map(sc) != 0) {
578 		aprint_error_dev(self, "unable to setup target mappings\n");
579 		goto free_queues;
580 	}
581 
582 	if (mpii_cfg_coalescing(sc) != 0) {
583 		aprint_error_dev(self, "unable to configure coalescing\n");
584 		goto free_queues;
585 	}
586 
587 	/* XXX bail on unsupported porttype? */
588 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
589 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
590 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
591 		if (mpii_eventnotify(sc) != 0) {
592 			aprint_error_dev(self, "unable to enable events\n");
593 			goto free_queues;
594 		}
595 	}
596 
597 	mutex_init(&sc->sc_devs_mtx, MUTEX_DEFAULT, IPL_BIO);
598 	sc->sc_devs = malloc(sc->sc_max_devices * sizeof(struct mpii_device *),
599 	    M_DEVBUF, M_NOWAIT | M_ZERO);
600 	if (sc->sc_devs == NULL) {
601 		aprint_error_dev(self,
602 		    "unable to allocate memory for mpii_device\n");
603 		goto free_queues;
604 	}
605 
606 	if (mpii_portenable(sc) != 0) {
607 		aprint_error_dev(self, "unable to enable port\n");
608 		goto free_devs;
609 	}
610 
611 	/* we should be good to go now, attach scsibus */
612 	memset(adapt, 0, sizeof(*adapt));
613 	adapt->adapt_dev = sc->sc_dev;
614 	adapt->adapt_nchannels = 1;
615 	adapt->adapt_openings = sc->sc_max_cmds - 4;
616 	adapt->adapt_max_periph = adapt->adapt_openings;
617 	adapt->adapt_request = mpii_scsipi_request;
618 	adapt->adapt_minphys = minphys;
619 	adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
620 
621 	memset(chan, 0, sizeof(*chan));
622 	chan->chan_adapter = adapt;
623 	chan->chan_bustype = &scsi_sas_bustype;
624 	chan->chan_channel = 0;
625 	chan->chan_flags = 0;
626 	chan->chan_nluns = 8;
627 	chan->chan_ntargets = sc->sc_max_devices;
628 	chan->chan_id = -1;
629 
630 	mpii_rescan(self, "scsi", NULL);
631 
632 	/* enable interrupts */
633 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
634 	    | MPII_INTR_MASK_RESET);
635 
636 #if NBIO > 0
637 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
638 		if (bio_register(sc->sc_dev, mpii_ioctl) != 0)
639 			panic("%s: controller registration failed",
640 			    DEVNAME(sc));
641 		if (mpii_create_sensors(sc) != 0)
642 			aprint_error_dev(self, "unable to create sensors\n");
643 	}
644 #endif
645 
646 	return;
647 
648 free_devs:
649 	free(sc->sc_devs, M_DEVBUF);
650 	sc->sc_devs = NULL;
651 
652 free_queues:
653 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
654 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
655 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
656 
657 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
658 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
659 	mpii_dmamem_free(sc, sc->sc_reply_postq);
660 
661 free_replies:
662 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
663 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
664 	mpii_dmamem_free(sc, sc->sc_replies);
665 
666 free_ccbs:
667 	while ((ccb = mpii_get_ccb(sc)) != NULL)
668 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
669 	mpii_dmamem_free(sc, sc->sc_requests);
670 	free(sc->sc_ccbs, M_DEVBUF);
671 
672 unmap:
673 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
674 	sc->sc_ios = 0;
675 }
676 
677 int
678 mpii_detach(device_t self, int flags)
679 {
680 	struct mpii_softc	*sc = device_private(self);
681 	int error;
682 	struct mpii_ccb *ccb;
683 
684 	if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
685 		return error;
686 
687 #if NBIO > 0
688 	mpii_destroy_sensors(sc);
689 	bio_unregister(sc->sc_dev);
690 #endif /* NBIO > 0 */
691 
692 	if (sc->sc_ih != NULL) {
693 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
694 		sc->sc_ih = NULL;
695 	}
696 	if (sc->sc_ios != 0) {
697 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
698 		free(sc->sc_devs, M_DEVBUF);
699 		sc->sc_devs = NULL;
700 
701 		bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
702 		    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
703 		mpii_dmamem_free(sc, sc->sc_reply_freeq);
704 
705 		bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
706 		    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
707 		mpii_dmamem_free(sc, sc->sc_reply_postq);
708 
709 		bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
710 			0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
711 		mpii_dmamem_free(sc, sc->sc_replies);
712 
713 		while ((ccb = mpii_get_ccb(sc)) != NULL)
714 			bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
715 		mpii_dmamem_free(sc, sc->sc_requests);
716 		free(sc->sc_ccbs, M_DEVBUF);
717 
718 		sc->sc_ios = 0;
719 	}
720 
721 	return (0);
722 }
723 
724 int
725 mpii_rescan(device_t self, const char *ifattr, const int *locators)
726 {
727 	struct mpii_softc *sc = device_private(self);
728 
729 	if (sc->sc_child != NULL)
730 		return 0;
731 
732 	sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
733 	    scsiprint, NULL);
734 
735 	return 0;
736 }
737 
738 void
739 mpii_childdetached(device_t self, device_t child)
740 {
741 	struct mpii_softc *sc = device_private(self);
742 
743 	KASSERT(self == sc->sc_dev);
744 	KASSERT(child == sc->sc_child);
745 
746 	if (child == sc->sc_child)
747 		sc->sc_child = NULL;
748 }
749 
750 
751 int
752 mpii_intr(void *arg)
753 {
754 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
755 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
756 	struct mpii_softc		*sc = arg;
757 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
758 	struct mpii_ccb			*ccb;
759 	struct mpii_rcb			*rcb;
760 	int				smid;
761 	u_int				idx;
762 	int				rv = 0;
763 
764 	mutex_enter(&sc->sc_rep_mtx);
765 	bus_dmamap_sync(sc->sc_dmat,
766 	    MPII_DMA_MAP(sc->sc_reply_postq),
767 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
768 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
769 
770 	idx = sc->sc_reply_post_host_index;
771 	for (;;) {
772 		rdp = &postq[idx];
773 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
774 		    MPII_REPLY_DESCR_UNUSED)
775 			break;
776 		if (rdp->data == 0xffffffff) {
777 			/*
778 			 * ioc is still writing to the reply post queue
779 			 * race condition - bail!
780 			 */
781 			break;
782 		}
783 
784 		smid = le16toh(rdp->smid);
785 		rcb = mpii_reply(sc, rdp);
786 
787 		if (smid) {
788 			ccb = &sc->sc_ccbs[smid - 1];
789 			ccb->ccb_state = MPII_CCB_READY;
790 			ccb->ccb_rcb = rcb;
791 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
792 		} else
793 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
794 
795 		if (++idx >= sc->sc_reply_post_qdepth)
796 			idx = 0;
797 
798 		rv = 1;
799 	}
800 
801 	bus_dmamap_sync(sc->sc_dmat,
802 	    MPII_DMA_MAP(sc->sc_reply_postq),
803 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
804 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
805 
806 	if (rv)
807 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
808 
809 	mutex_exit(&sc->sc_rep_mtx);
810 
811 	if (rv == 0)
812 		return (0);
813 
814 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
815 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
816 		ccb->ccb_done(ccb);
817 	}
818 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
819 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
820 		mpii_event_process(sc, rcb);
821 	}
822 
823 	return (1);
824 }
825 
826 int
827 mpii_load_xs_sas3(struct mpii_ccb *ccb)
828 {
829 	struct mpii_softc	*sc = ccb->ccb_sc;
830 	struct scsipi_xfer	*xs = ccb->ccb_cookie;
831 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
832 	struct mpii_ieee_sge	*csge, *nsge, *sge;
833 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
834 	int			i, error;
835 
836 	/* Request frame structure is described in the mpii_iocfacts */
837 	nsge = (struct mpii_ieee_sge *)(io + 1);
838 	csge = nsge + sc->sc_chain_sge;
839 
840 	/* zero length transfer still requires an SGE */
841 	if (xs->datalen == 0) {
842 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
843 		return (0);
844 	}
845 
846 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
847 	    (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
848 	if (error) {
849 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
850 		return (1);
851 	}
852 
853 	sge = nsge;
854 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
855 		if (nsge == csge) {
856 			nsge++;
857 			/* offset to the chain sge from the beginning */
858 			io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
859 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
860 			    MPII_IEEE_SGE_ADDR_SYSTEM;
861 			/* address of the next sge */
862 			csge->sg_addr = htole64(ccb->ccb_cmd_dva +
863 			    ((uintptr_t)nsge - (uintptr_t)io));
864 			csge->sg_len = htole32((dmap->dm_nsegs - i) *
865 			    sizeof(*sge));
866 		}
867 
868 		sge = nsge;
869 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
870 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
871 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
872 	}
873 
874 	/* terminate list */
875 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
876 
877 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
878 	    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
879 	    BUS_DMASYNC_PREWRITE);
880 
881 	return (0);
882 }
883 
884 int
885 mpii_load_xs(struct mpii_ccb *ccb)
886 {
887 	struct mpii_softc	*sc = ccb->ccb_sc;
888 	struct scsipi_xfer	*xs = ccb->ccb_cookie;
889 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
890 	struct mpii_sge		*csge, *nsge, *sge;
891 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
892 	u_int32_t		flags;
893 	u_int16_t		len;
894 	int			i, error;
895 
896 	/* Request frame structure is described in the mpii_iocfacts */
897 	nsge = (struct mpii_sge *)(io + 1);
898 	csge = nsge + sc->sc_chain_sge;
899 
900 	/* zero length transfer still requires an SGE */
901 	if (xs->datalen == 0) {
902 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
903 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
904 		return (0);
905 	}
906 
907 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
908 	    (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
909 	if (error) {
910 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
911 		return (1);
912 	}
913 
914 	/* safe default starting flags */
915 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
916 	if (xs->xs_control & XS_CTL_DATA_OUT)
917 		flags |= MPII_SGE_FL_DIR_OUT;
918 
919 	sge = nsge;
920 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
921 		if (nsge == csge) {
922 			nsge++;
923 			/* offset to the chain sge from the beginning */
924 			io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
925 			/* length of the sgl segment we're pointing to */
926 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
927 			csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
928 			    MPII_SGE_FL_SIZE_64 | len);
929 			/* address of the next sge */
930 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
931 			    ((uintptr_t)nsge - (uintptr_t)io));
932 		}
933 
934 		sge = nsge;
935 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
936 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
937 	}
938 
939 	/* terminate list */
940 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
941 	    MPII_SGE_FL_EOL);
942 
943 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
944 	    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
945 	    BUS_DMASYNC_PREWRITE);
946 
947 	return (0);
948 }
949 
950 u_int32_t
951 mpii_read(struct mpii_softc *sc, bus_size_t r)
952 {
953 	u_int32_t			rv;
954 
955 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
956 	    BUS_SPACE_BARRIER_READ);
957 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
958 
959 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
960 
961 	return (rv);
962 }
963 
964 void
965 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
966 {
967 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
968 
969 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
970 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
971 	    BUS_SPACE_BARRIER_WRITE);
972 }
973 
974 
975 int
976 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
977     u_int32_t target)
978 {
979 	int			i;
980 
981 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
982 	    mask, target);
983 
984 	for (i = 0; i < 15000; i++) {
985 		if ((mpii_read(sc, r) & mask) == target)
986 			return (0);
987 		delay(1000);
988 	}
989 
990 	return (1);
991 }
992 
993 int
994 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
995     u_int32_t target)
996 {
997 	int			i;
998 
999 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1000 	    mask, target);
1001 
1002 	for (i = 0; i < 15000; i++) {
1003 		if ((mpii_read(sc, r) & mask) != target)
1004 			return (0);
1005 		delay(1000);
1006 	}
1007 
1008 	return (1);
1009 }
1010 
1011 int
1012 mpii_init(struct mpii_softc *sc)
1013 {
1014 	u_int32_t		db;
1015 	int			i;
1016 
1017 	/* spin until the ioc leaves the reset state */
1018 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1019 	    MPII_DOORBELL_STATE_RESET) != 0) {
1020 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1021 		    "reset state\n", DEVNAME(sc));
1022 		return (1);
1023 	}
1024 
1025 	/* check current ownership */
1026 	db = mpii_read_db(sc);
1027 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1028 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1029 		    DEVNAME(sc));
1030 		return (0);
1031 	}
1032 
1033 	for (i = 0; i < 5; i++) {
1034 		switch (db & MPII_DOORBELL_STATE) {
1035 		case MPII_DOORBELL_STATE_READY:
1036 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1037 			    DEVNAME(sc));
1038 			return (0);
1039 
1040 		case MPII_DOORBELL_STATE_OPER:
1041 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1042 			    DEVNAME(sc));
1043 			if (sc->sc_ioc_event_replay)
1044 				mpii_reset_soft(sc);
1045 			else
1046 				mpii_reset_hard(sc);
1047 			break;
1048 
1049 		case MPII_DOORBELL_STATE_FAULT:
1050 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1051 			    "reset hard\n" , DEVNAME(sc));
1052 			mpii_reset_hard(sc);
1053 			break;
1054 
1055 		case MPII_DOORBELL_STATE_RESET:
1056 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1057 			    "out of reset\n", DEVNAME(sc));
1058 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1059 			    MPII_DOORBELL_STATE_RESET) != 0)
1060 				return (1);
1061 			break;
1062 		}
1063 		db = mpii_read_db(sc);
1064 	}
1065 
1066 	return (1);
1067 }
1068 
1069 int
1070 mpii_reset_soft(struct mpii_softc *sc)
1071 {
1072 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1073 
1074 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1075 		return (1);
1076 	}
1077 
1078 	mpii_write_db(sc,
1079 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1080 
1081 	/* XXX LSI waits 15 sec */
1082 	if (mpii_wait_db_ack(sc) != 0)
1083 		return (1);
1084 
1085 	/* XXX LSI waits 15 sec */
1086 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1087 	    MPII_DOORBELL_STATE_READY) != 0)
1088 		return (1);
1089 
1090 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1091 
1092 	return (0);
1093 }
1094 
1095 int
1096 mpii_reset_hard(struct mpii_softc *sc)
1097 {
1098 	u_int16_t		i;
1099 
1100 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1101 
1102 	mpii_write_intr(sc, 0);
1103 
1104 	/* enable diagnostic register */
1105 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1106 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1107 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1108 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1109 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1110 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1111 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1112 
1113 	delay(100);
1114 
1115 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1116 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1117 		    "diagnostic read/write\n", DEVNAME(sc));
1118 		return(1);
1119 	}
1120 
1121 	/* reset ioc */
1122 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1123 
1124 	/* 240 milliseconds */
1125 	delay(240000);
1126 
1127 
1128 	/* XXX this whole function should be more robust */
1129 
1130 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1131 	for (i = 0; i < 30000; i++) {
1132 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1133 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1134 			break;
1135 		delay(10000);
1136 	}
1137 
1138 	/* disable diagnostic register */
1139 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1140 
1141 	/* XXX what else? */
1142 
1143 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1144 
1145 	return(0);
1146 }
1147 
1148 int
1149 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1150 {
1151 	u_int32_t		*query = buf;
1152 	int			i;
1153 
1154 	/* make sure the doorbell is not in use. */
1155 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1156 		return (1);
1157 
1158 	/* clear pending doorbell interrupts */
1159 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1160 		mpii_write_intr(sc, 0);
1161 
1162 	/*
1163 	 * first write the doorbell with the handshake function and the
1164 	 * dword count.
1165 	 */
1166 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1167 	    MPII_DOORBELL_DWORDS(dwords));
1168 
1169 	/*
1170 	 * the doorbell used bit will be set because a doorbell function has
1171 	 * started. wait for the interrupt and then ack it.
1172 	 */
1173 	if (mpii_wait_db_int(sc) != 0)
1174 		return (1);
1175 	mpii_write_intr(sc, 0);
1176 
1177 	/* poll for the acknowledgement. */
1178 	if (mpii_wait_db_ack(sc) != 0)
1179 		return (1);
1180 
1181 	/* write the query through the doorbell. */
1182 	for (i = 0; i < dwords; i++) {
1183 		mpii_write_db(sc, htole32(query[i]));
1184 		if (mpii_wait_db_ack(sc) != 0)
1185 			return (1);
1186 	}
1187 
1188 	return (0);
1189 }
1190 
1191 int
1192 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1193 {
1194 	u_int16_t		*words = (u_int16_t *)dword;
1195 	int			i;
1196 
1197 	for (i = 0; i < 2; i++) {
1198 		if (mpii_wait_db_int(sc) != 0)
1199 			return (1);
1200 		words[i] = le16toh(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1201 		mpii_write_intr(sc, 0);
1202 	}
1203 
1204 	return (0);
1205 }
1206 
1207 int
1208 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1209 {
1210 	struct mpii_msg_reply	*reply = buf;
1211 	u_int32_t		*dbuf = buf, dummy;
1212 	int			i;
1213 
1214 	/* get the first dword so we can read the length out of the header. */
1215 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1216 		return (1);
1217 
1218 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1219 	    DEVNAME(sc), dwords, reply->msg_length);
1220 
1221 	/*
1222 	 * the total length, in dwords, is in the message length field of the
1223 	 * reply header.
1224 	 */
1225 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1226 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1227 			return (1);
1228 	}
1229 
1230 	/* if there's extra stuff to come off the ioc, discard it */
1231 	while (i++ < reply->msg_length) {
1232 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1233 			return (1);
1234 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1235 		    "0x%08x\n", DEVNAME(sc), dummy);
1236 	}
1237 
1238 	/* wait for the doorbell used bit to be reset and clear the intr */
1239 	if (mpii_wait_db_int(sc) != 0)
1240 		return (1);
1241 
1242 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1243 		return (1);
1244 
1245 	mpii_write_intr(sc, 0);
1246 
1247 	return (0);
1248 }
1249 
1250 void
1251 mpii_empty_done(struct mpii_ccb *ccb)
1252 {
1253 	/* nothing to do */
1254 }
1255 
1256 int
1257 mpii_iocfacts(struct mpii_softc *sc)
1258 {
1259 	struct mpii_msg_iocfacts_request	ifq;
1260 	struct mpii_msg_iocfacts_reply		ifp;
1261 	int					irs;
1262 	int					sge_size;
1263 	u_int					qdepth;
1264 
1265 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1266 
1267 	memset(&ifq, 0, sizeof(ifq));
1268 	memset(&ifp, 0, sizeof(ifp));
1269 
1270 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1271 
1272 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1273 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1274 		    DEVNAME(sc));
1275 		return (1);
1276 	}
1277 
1278 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1279 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1280 		    DEVNAME(sc));
1281 		return (1);
1282 	}
1283 
1284 	sc->sc_ioc_number = ifp.ioc_number;
1285 	sc->sc_vf_id = ifp.vf_id;
1286 
1287 	sc->sc_max_volumes = ifp.max_volumes;
1288 	sc->sc_max_devices = ifp.max_volumes + le16toh(ifp.max_targets);
1289 
1290 	if (ISSET(le32toh(ifp.ioc_capabilities),
1291 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1292 		SET(sc->sc_flags, MPII_F_RAID);
1293 	if (ISSET(le32toh(ifp.ioc_capabilities),
1294 	    MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1295 		sc->sc_ioc_event_replay = 1;
1296 
1297 	sc->sc_max_cmds = MIN(le16toh(ifp.request_credit),
1298 	    MPII_REQUEST_CREDIT);
1299 
1300 	/* SAS3 and 3.5 controllers have different sgl layouts */
1301 	if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1302 	    || (ifp.msg_version_min == 6)))
1303 		SET(sc->sc_flags, MPII_F_SAS3);
1304 
1305 	/*
1306 	 * The host driver must ensure that there is at least one
1307 	 * unused entry in the Reply Free Queue. One way to ensure
1308 	 * that this requirement is met is to never allocate a number
1309 	 * of reply frames that is a multiple of 16.
1310 	 */
1311 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1312 	if (!(sc->sc_num_reply_frames % 16))
1313 		sc->sc_num_reply_frames--;
1314 
1315 	/* must be multiple of 16 */
1316 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1317 	    sc->sc_num_reply_frames;
1318 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1319 
1320 	qdepth = le16toh(ifp.max_reply_descriptor_post_queue_depth);
1321 	if (sc->sc_reply_post_qdepth > qdepth) {
1322 		sc->sc_reply_post_qdepth = qdepth;
1323 		if (sc->sc_reply_post_qdepth < 16) {
1324 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1325 			return (1);
1326 		}
1327 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1328 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1329 	}
1330 
1331 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1332 	    16 - (sc->sc_num_reply_frames % 16);
1333 
1334 	/*
1335 	 * Our request frame for an I/O operation looks like this:
1336 	 *
1337 	 * +-------------------+ -.
1338 	 * | mpii_msg_scsi_io  |  |
1339 	 * +-------------------|  |
1340 	 * | mpii_sge          |  |
1341 	 * + - - - - - - - - - +  |
1342 	 * | ...               |  > ioc_request_frame_size
1343 	 * + - - - - - - - - - +  |
1344 	 * | mpii_sge (tail)   |  |
1345 	 * + - - - - - - - - - +  |
1346 	 * | mpii_sge (csge)   |  | --.
1347 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1348 	 * | mpii_sge          |<-----'
1349 	 * + - - - - - - - - - +
1350 	 * | ...               |
1351 	 * + - - - - - - - - - +
1352 	 * | mpii_sge (tail)   |
1353 	 * +-------------------+
1354 	 * |                   |
1355 	 * ~~~~~~~~~~~~~~~~~~~~~
1356 	 * |                   |
1357 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1358 	 * | scsi_sense_data   |
1359 	 * +-------------------+
1360 	 */
1361 
1362 	/* both sizes are in 32-bit words */
1363 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1364 	irs = le16toh(ifp.ioc_request_frame_size) * 4;
1365 	sc->sc_request_size = MPII_REQUEST_SIZE;
1366 	/* make sure we have enough space for scsi sense data */
1367 	if (irs > sc->sc_request_size) {
1368 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1369 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1370 	}
1371 
1372 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1373 		sge_size = sizeof(struct mpii_ieee_sge);
1374 	} else {
1375 		sge_size = sizeof(struct mpii_sge);
1376 	}
1377 
1378 	/* offset to the chain sge */
1379 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1380 	    sge_size - 1;
1381 
1382 	/*
1383 	 * A number of simple scatter-gather elements we can fit into the
1384 	 * request buffer after the I/O command minus the chain element.
1385 	 */
1386 	sc->sc_max_sgl = (sc->sc_request_size -
1387  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1388 	    sge_size - 1;
1389 
1390 	return (0);
1391 }
1392 
1393 int
1394 mpii_iocinit(struct mpii_softc *sc)
1395 {
1396 	struct mpii_msg_iocinit_request		iiq;
1397 	struct mpii_msg_iocinit_reply		iip;
1398 
1399 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1400 
1401 	memset(&iiq, 0, sizeof(iiq));
1402 	memset(&iip, 0, sizeof(iip));
1403 
1404 	iiq.function = MPII_FUNCTION_IOC_INIT;
1405 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1406 
1407 	/* XXX JPG do something about vf_id */
1408 	iiq.vf_id = 0;
1409 
1410 	iiq.msg_version_maj = 0x02;
1411 	iiq.msg_version_min = 0x00;
1412 
1413 	/* XXX JPG ensure compliance with some level and hard-code? */
1414 	iiq.hdr_version_unit = 0x00;
1415 	iiq.hdr_version_dev = 0x00;
1416 
1417 	iiq.system_request_frame_size = htole16(sc->sc_request_size / 4);
1418 
1419 	iiq.reply_descriptor_post_queue_depth =
1420 	    htole16(sc->sc_reply_post_qdepth);
1421 
1422 	iiq.reply_free_queue_depth = htole16(sc->sc_reply_free_qdepth);
1423 
1424 	iiq.sense_buffer_address_high =
1425 	    htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1426 
1427 	iiq.system_reply_address_high =
1428 	    htole32(MPII_DMA_DVA(sc->sc_replies) >> 32);
1429 
1430 	iiq.system_request_frame_base_address_lo =
1431 	    htole32(MPII_DMA_DVA(sc->sc_requests));
1432 	iiq.system_request_frame_base_address_hi =
1433 	    htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1434 
1435 	iiq.reply_descriptor_post_queue_address_lo =
1436 	    htole32(MPII_DMA_DVA(sc->sc_reply_postq));
1437 	iiq.reply_descriptor_post_queue_address_hi =
1438 	    htole32(MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1439 
1440 	iiq.reply_free_queue_address_lo =
1441 	    htole32(MPII_DMA_DVA(sc->sc_reply_freeq));
1442 	iiq.reply_free_queue_address_hi =
1443 	    htole32(MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1444 
1445 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1446 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1447 		    DEVNAME(sc));
1448 		return (1);
1449 	}
1450 
1451 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1452 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1453 		    DEVNAME(sc));
1454 		return (1);
1455 	}
1456 
1457 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1458 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1459 	    iip.msg_length, iip.whoinit);
1460 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1461 	    iip.msg_flags);
1462 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1463 	    iip.vf_id, iip.vp_id);
1464 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1465 	    le16toh(iip.ioc_status));
1466 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1467 	    le32toh(iip.ioc_loginfo));
1468 
1469 	if (le16toh(iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1470 	    le32toh(iip.ioc_loginfo))
1471 		return (1);
1472 
1473 	return (0);
1474 }
1475 
1476 void
1477 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1478 {
1479 	u_int32_t		*rfp;
1480 	u_int			idx;
1481 
1482 	if (rcb == NULL)
1483 		return;
1484 
1485 	mutex_enter(&sc->sc_reply_free_mtx);
1486 	idx = sc->sc_reply_free_host_index;
1487 
1488 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1489 	rfp[idx] = htole32(rcb->rcb_reply_dva);
1490 
1491 	if (++idx >= sc->sc_reply_free_qdepth)
1492 		idx = 0;
1493 
1494 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1495 	mutex_exit(&sc->sc_reply_free_mtx);
1496 }
1497 
1498 int
1499 mpii_portfacts(struct mpii_softc *sc)
1500 {
1501 	struct mpii_msg_portfacts_request	*pfq;
1502 	struct mpii_msg_portfacts_reply		*pfp;
1503 	struct mpii_ccb				*ccb;
1504 	int					rv = 1;
1505 
1506 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1507 
1508 	ccb = mpii_get_ccb(sc);
1509 	if (ccb == NULL) {
1510 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1511 		    DEVNAME(sc));
1512 		return (rv);
1513 	}
1514 
1515 	ccb->ccb_done = mpii_empty_done;
1516 	pfq = ccb->ccb_cmd;
1517 
1518 	memset(pfq, 0, sizeof(*pfq));
1519 
1520 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1521 	pfq->chain_offset = 0;
1522 	pfq->msg_flags = 0;
1523 	pfq->port_number = 0;
1524 	pfq->vp_id = 0;
1525 	pfq->vf_id = 0;
1526 
1527 	if (mpii_poll(sc, ccb) != 0) {
1528 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1529 		    DEVNAME(sc));
1530 		goto err;
1531 	}
1532 
1533 	if (ccb->ccb_rcb == NULL) {
1534 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1535 		    DEVNAME(sc));
1536 		goto err;
1537 	}
1538 
1539 	pfp = ccb->ccb_rcb->rcb_reply;
1540 	sc->sc_porttype = pfp->port_type;
1541 
1542 	mpii_push_reply(sc, ccb->ccb_rcb);
1543 	rv = 0;
1544 err:
1545 	mpii_put_ccb(sc, ccb);
1546 
1547 	return (rv);
1548 }
1549 
1550 void
1551 mpii_eventack(struct work *wk, void * cookie)
1552 {
1553 	struct mpii_softc			*sc = cookie;
1554 	struct mpii_ccb				*ccb;
1555 	struct mpii_rcb				*rcb, *next;
1556 	struct mpii_msg_event_reply		*enp;
1557 	struct mpii_msg_eventack_request	*eaq;
1558 
1559 	mutex_enter(&sc->sc_evt_ack_mtx);
1560 	next = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1561 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1562 	mutex_exit(&sc->sc_evt_ack_mtx);
1563 
1564 	while (next != NULL) {
1565 		rcb = next;
1566 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1567 
1568 		enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1569 
1570 		ccb = mpii_get_ccb(sc);
1571 		ccb->ccb_done = mpii_eventack_done;
1572 		eaq = ccb->ccb_cmd;
1573 
1574 		eaq->function = MPII_FUNCTION_EVENT_ACK;
1575 
1576 		eaq->event = enp->event;
1577 		eaq->event_context = enp->event_context;
1578 
1579 		mpii_push_reply(sc, rcb);
1580 
1581 		mpii_start(sc, ccb);
1582 	}
1583 }
1584 
1585 void
1586 mpii_eventack_done(struct mpii_ccb *ccb)
1587 {
1588 	struct mpii_softc			*sc = ccb->ccb_sc;
1589 
1590 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1591 
1592 	mpii_push_reply(sc, ccb->ccb_rcb);
1593 	mpii_put_ccb(sc, ccb);
1594 }
1595 
1596 int
1597 mpii_portenable(struct mpii_softc *sc)
1598 {
1599 	struct mpii_msg_portenable_request	*peq;
1600 	struct mpii_ccb				*ccb;
1601 
1602 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1603 
1604 	ccb = mpii_get_ccb(sc);
1605 	if (ccb == NULL) {
1606 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1607 		    DEVNAME(sc));
1608 		return (1);
1609 	}
1610 
1611 	ccb->ccb_done = mpii_empty_done;
1612 	peq = ccb->ccb_cmd;
1613 
1614 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1615 	peq->vf_id = sc->sc_vf_id;
1616 
1617 	if (mpii_poll(sc, ccb) != 0) {
1618 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1619 		    DEVNAME(sc));
1620 		return (1);
1621 	}
1622 
1623 	if (ccb->ccb_rcb == NULL) {
1624 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1625 		    DEVNAME(sc));
1626 		return (1);
1627 	}
1628 
1629 	mpii_push_reply(sc, ccb->ccb_rcb);
1630 	mpii_put_ccb(sc, ccb);
1631 
1632 	return (0);
1633 }
1634 
1635 int
1636 mpii_cfg_coalescing(struct mpii_softc *sc)
1637 {
1638 	struct mpii_cfg_hdr			hdr;
1639 	struct mpii_cfg_ioc_pg1			ipg;
1640 
1641 	hdr.page_version = 0;
1642 	hdr.page_length = sizeof(ipg) / 4;
1643 	hdr.page_number = 1;
1644 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1645 	memset(&ipg, 0, sizeof(ipg));
1646 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1647 	    sizeof(ipg)) != 0) {
1648 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1649 		    "page 1\n", DEVNAME(sc));
1650 		return (1);
1651 	}
1652 
1653 	if (!ISSET(le32toh(ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1654 		return (0);
1655 
1656 	/* Disable coalescing */
1657 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1658 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1659 	    sizeof(ipg)) != 0) {
1660 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1661 		    DEVNAME(sc));
1662 		return (1);
1663 	}
1664 
1665 	return (0);
1666 }
1667 
1668 #define MPII_EVENT_MASKALL(enq)		do {			\
1669 		enq->event_masks[0] = 0xffffffff;		\
1670 		enq->event_masks[1] = 0xffffffff;		\
1671 		enq->event_masks[2] = 0xffffffff;		\
1672 		enq->event_masks[3] = 0xffffffff;		\
1673 	} while (0)
1674 
1675 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1676 		enq->event_masks[evt / 32] &=			\
1677 		    htole32(~(1 << (evt % 32)));		\
1678 	} while (0)
1679 
1680 int
1681 mpii_eventnotify(struct mpii_softc *sc)
1682 {
1683 	struct mpii_msg_event_request		*enq;
1684 	struct mpii_ccb				*ccb;
1685 	char wkname[15];
1686 
1687 	ccb = mpii_get_ccb(sc);
1688 	if (ccb == NULL) {
1689 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1690 		    DEVNAME(sc));
1691 		return (1);
1692 	}
1693 
1694 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1695 	mutex_init(&sc->sc_evt_sas_mtx, MUTEX_DEFAULT, IPL_BIO);
1696 	snprintf(wkname, sizeof(wkname), "%ssas", DEVNAME(sc));
1697 	if (workqueue_create(&sc->sc_evt_sas_wq, wkname,
1698 	    mpii_event_sas_work, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1699 		mpii_put_ccb(sc, ccb);
1700 		aprint_error_dev(sc->sc_dev,
1701 		    "can't create %s workqueue\n", wkname);
1702 		return 1;
1703 	}
1704 
1705 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1706 	mutex_init(&sc->sc_evt_ack_mtx, MUTEX_DEFAULT, IPL_BIO);
1707 	snprintf(wkname, sizeof(wkname), "%sevt", DEVNAME(sc));
1708 	if (workqueue_create(&sc->sc_evt_ack_wq, wkname,
1709 	    mpii_eventack, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1710 		mpii_put_ccb(sc, ccb);
1711 		aprint_error_dev(sc->sc_dev,
1712 		    "can't create %s workqueue\n", wkname);
1713 		return 1;
1714 	}
1715 
1716 	ccb->ccb_done = mpii_eventnotify_done;
1717 	enq = ccb->ccb_cmd;
1718 
1719 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1720 
1721 	/*
1722 	 * Enable reporting of the following events:
1723 	 *
1724 	 * MPII_EVENT_SAS_DISCOVERY
1725 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1726 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1727 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1728 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1729 	 * MPII_EVENT_IR_VOLUME
1730 	 * MPII_EVENT_IR_PHYSICAL_DISK
1731 	 * MPII_EVENT_IR_OPERATION_STATUS
1732 	 */
1733 
1734 	MPII_EVENT_MASKALL(enq);
1735 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1736 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1737 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1738 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1739 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1740 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1741 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1742 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1743 
1744 	mpii_start(sc, ccb);
1745 
1746 	return (0);
1747 }
1748 
1749 void
1750 mpii_eventnotify_done(struct mpii_ccb *ccb)
1751 {
1752 	struct mpii_softc			*sc = ccb->ccb_sc;
1753 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1754 
1755 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1756 
1757 	mpii_put_ccb(sc, ccb);
1758 	mpii_event_process(sc, rcb);
1759 }
1760 
1761 void
1762 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1763 {
1764 	struct mpii_evt_ir_cfg_change_list	*ccl;
1765 	struct mpii_evt_ir_cfg_element		*ce;
1766 	struct mpii_device			*dev;
1767 	u_int16_t				type;
1768 	int					i;
1769 
1770 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1771 	if (ccl->num_elements == 0)
1772 		return;
1773 
1774 	if (ISSET(le32toh(ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1775 		/* bail on foreign configurations */
1776 		return;
1777 	}
1778 
1779 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1780 
1781 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1782 		type = (le16toh(ce->element_flags) &
1783 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1784 
1785 		switch (type) {
1786 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1787 			switch (ce->reason_code) {
1788 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1789 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1790 				dev = malloc(sizeof(*dev), M_DEVBUF,
1791 				    M_NOWAIT | M_ZERO);
1792 				if (!dev) {
1793 					printf("%s: failed to allocate a "
1794 					    "device structure\n", DEVNAME(sc));
1795 					break;
1796 				}
1797 				mutex_enter(&sc->sc_devs_mtx);
1798 				if (mpii_find_dev(sc,
1799 				    le16toh(ce->vol_dev_handle))) {
1800 					mutex_exit(&sc->sc_devs_mtx);
1801 					free(dev, M_DEVBUF);
1802 					printf("%s: device %#x is already "
1803 					    "configured\n", DEVNAME(sc),
1804 					    le16toh(ce->vol_dev_handle));
1805 					break;
1806 				}
1807 				SET(dev->flags, MPII_DF_VOLUME);
1808 				dev->slot = sc->sc_vd_id_low;
1809 				dev->dev_handle = le16toh(ce->vol_dev_handle);
1810 				if (mpii_insert_dev(sc, dev)) {
1811 					mutex_exit(&sc->sc_devs_mtx);
1812 					free(dev, M_DEVBUF);
1813 					break;
1814 				}
1815 				sc->sc_vd_count++;
1816 				mutex_exit(&sc->sc_devs_mtx);
1817 				break;
1818 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1819 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1820 				mutex_enter(&sc->sc_devs_mtx);
1821 				if (!(dev = mpii_find_dev(sc,
1822 				    le16toh(ce->vol_dev_handle)))) {
1823 					mutex_exit(&sc->sc_devs_mtx);
1824 					break;
1825 				}
1826 				mpii_remove_dev(sc, dev);
1827 				sc->sc_vd_count--;
1828 				mutex_exit(&sc->sc_devs_mtx);
1829 				break;
1830 			}
1831 			break;
1832 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1833 			if (ce->reason_code ==
1834 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1835 			    ce->reason_code ==
1836 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1837 				/* there should be an underlying sas drive */
1838 				mutex_enter(&sc->sc_devs_mtx);
1839 				if (!(dev = mpii_find_dev(sc,
1840 				    le16toh(ce->phys_disk_dev_handle)))) {
1841 					mutex_exit(&sc->sc_devs_mtx);
1842 					break;
1843 				}
1844 				/* promoted from a hot spare? */
1845 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1846 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1847 				    MPII_DF_HIDDEN);
1848 				mutex_exit(&sc->sc_devs_mtx);
1849 			}
1850 			break;
1851 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1852 			if (ce->reason_code ==
1853 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1854 				/* there should be an underlying sas drive */
1855 				mutex_enter(&sc->sc_devs_mtx);
1856 				if (!(dev = mpii_find_dev(sc,
1857 				    le16toh(ce->phys_disk_dev_handle)))) {
1858 					mutex_exit(&sc->sc_devs_mtx);
1859 					break;
1860 				}
1861 				SET(dev->flags, MPII_DF_HOT_SPARE |
1862 				    MPII_DF_HIDDEN);
1863 				mutex_exit(&sc->sc_devs_mtx);
1864 			}
1865 			break;
1866 		}
1867 	}
1868 }
1869 
1870 void
1871 mpii_event_sas(struct mpii_softc *sc, struct mpii_rcb *rcb)
1872 {
1873 	struct mpii_msg_event_reply 	*enp;
1874 	struct mpii_evt_sas_tcl		*tcl;
1875 	struct mpii_evt_phy_entry	*pe;
1876 	struct mpii_device		*dev;
1877 	int				i;
1878 	u_int16_t			handle;
1879 	int				need_queue = 0;
1880 
1881 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1882 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas 0x%x\n",
1883 		    DEVNAME(sc), le16toh(enp->event));
1884 	KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1885 
1886 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1887 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1888 
1889 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1890 		DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1891 		    DEVNAME(sc), i, pe->phy_status,
1892 		    le16toh(pe->dev_handle),
1893 		    sc->sc_pd_id_start + tcl->start_phy_num + i,
1894 		    tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1895 
1896 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1897 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1898 			handle = le16toh(pe->dev_handle);
1899 			DNPRINTF(MPII_D_EVT, "%s: sas add handle %d\n",
1900 			    DEVNAME(sc), handle);
1901 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1902 			mutex_enter(&sc->sc_devs_mtx);
1903 			if (mpii_find_dev(sc, handle)) {
1904 				mutex_exit(&sc->sc_devs_mtx);
1905 				free(dev, M_DEVBUF);
1906 				printf("%s: device %#x is already "
1907 				    "configured\n", DEVNAME(sc), handle);
1908 				break;
1909 			}
1910 
1911 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1912 			dev->dev_handle = handle;
1913 			dev->phy_num = tcl->start_phy_num + i;
1914 			if (tcl->enclosure_handle)
1915 				dev->physical_port = tcl->physical_port;
1916 			dev->enclosure = le16toh(tcl->enclosure_handle);
1917 			dev->expander = le16toh(tcl->expander_handle);
1918 
1919 			if (mpii_insert_dev(sc, dev)) {
1920 				mutex_exit(&sc->sc_devs_mtx);
1921 				free(dev, M_DEVBUF);
1922 				break;
1923 			}
1924 			printf("%s: physical device inserted in slot %d\n",
1925 			    DEVNAME(sc), dev->slot);
1926 			mutex_exit(&sc->sc_devs_mtx);
1927 			break;
1928 
1929 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1930 			/* defer to workqueue thread */
1931 			need_queue++;
1932 			break;
1933 		}
1934 	}
1935 
1936 	if (need_queue) {
1937 		bool start_wk;
1938 		mutex_enter(&sc->sc_evt_sas_mtx);
1939 		start_wk = (SIMPLEQ_FIRST(&sc->sc_evt_sas_queue) == 0);
1940 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1941 		if (start_wk) {
1942 			workqueue_enqueue(sc->sc_evt_sas_wq,
1943 			    &sc->sc_evt_sas_work, NULL);
1944 		}
1945 		mutex_exit(&sc->sc_evt_sas_mtx);
1946 	} else
1947 		mpii_event_done(sc, rcb);
1948 }
1949 
1950 void
1951 mpii_event_sas_work(struct work *wq, void *xsc)
1952 {
1953 	struct mpii_softc *sc = xsc;
1954 	struct mpii_rcb *rcb, *next;
1955 	struct mpii_msg_event_reply *enp;
1956 	struct mpii_evt_sas_tcl		*tcl;
1957 	struct mpii_evt_phy_entry	*pe;
1958 	struct mpii_device		*dev;
1959 	int				i;
1960 
1961 	mutex_enter(&sc->sc_evt_sas_mtx);
1962 	next = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1963 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1964 	mutex_exit(&sc->sc_evt_sas_mtx);
1965 
1966 	while (next != NULL) {
1967 		rcb = next;
1968 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1969 
1970 		enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1971 		DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas_work 0x%x\n",
1972 			    DEVNAME(sc), le16toh(enp->event));
1973 		KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1974 		tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1975 		pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1976 
1977 		for (i = 0; i < tcl->num_entries; i++, pe++) {
1978 			DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1979 			    DEVNAME(sc), i, pe->phy_status,
1980 			    le16toh(pe->dev_handle),
1981 			    sc->sc_pd_id_start + tcl->start_phy_num + i,
1982 			    tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1983 
1984 			switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1985 			case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1986 				/* already handled */
1987 				break;
1988 
1989 			case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1990 				mutex_enter(&sc->sc_devs_mtx);
1991 				dev = mpii_find_dev(sc, le16toh(pe->dev_handle));
1992 				if (dev == NULL) {
1993 					mutex_exit(&sc->sc_devs_mtx);
1994 					break;
1995 				}
1996 
1997 				printf(
1998 				    "%s: physical device removed from slot %d\n",
1999 				    DEVNAME(sc), dev->slot);
2000 				mpii_remove_dev(sc, dev);
2001 				mutex_exit(&sc->sc_devs_mtx);
2002 				mpii_sas_remove_device(sc, dev->dev_handle);
2003 				if (!ISSET(dev->flags, MPII_DF_HIDDEN)) {
2004 					scsipi_target_detach(&sc->sc_chan,
2005 					    dev->slot, 0, DETACH_FORCE);
2006 				}
2007 
2008 				free(dev, M_DEVBUF);
2009 				break;
2010 			}
2011 		}
2012 		mpii_event_done(sc, rcb);
2013 	}
2014 }
2015 
2016 void
2017 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
2018 {
2019 	struct mpii_evt_sas_discovery *esd =
2020 	    (struct mpii_evt_sas_discovery *)(enp + 1);
2021 
2022 	if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
2023 		if (esd->discovery_status != 0) {
2024 			printf("%s: sas discovery completed with status %#x\n",
2025 			    DEVNAME(sc), esd->discovery_status);
2026 		}
2027 
2028 	}
2029 }
2030 
2031 void
2032 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2033 {
2034 	struct mpii_msg_event_reply		*enp;
2035 
2036 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2037 
2038 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2039 	    le16toh(enp->event));
2040 
2041 	switch (le16toh(enp->event)) {
2042 	case MPII_EVENT_EVENT_CHANGE:
2043 		/* should be properly ignored */
2044 		break;
2045 	case MPII_EVENT_SAS_DISCOVERY:
2046 		mpii_event_discovery(sc, enp);
2047 		break;
2048 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2049 		mpii_event_sas(sc, rcb);
2050 		return;
2051 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2052 		break;
2053 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2054 		break;
2055 	case MPII_EVENT_IR_VOLUME: {
2056 		struct mpii_evt_ir_volume	*evd =
2057 		    (struct mpii_evt_ir_volume *)(enp + 1);
2058 		struct mpii_device		*dev;
2059 #if NBIO > 0
2060 		const char *vol_states[] = {
2061 			BIOC_SVINVALID_S,
2062 			BIOC_SVOFFLINE_S,
2063 			BIOC_SVBUILDING_S,
2064 			BIOC_SVONLINE_S,
2065 			BIOC_SVDEGRADED_S,
2066 			BIOC_SVONLINE_S,
2067 		};
2068 #endif
2069 
2070 		if (cold)
2071 			break;
2072 		mutex_enter(&sc->sc_devs_mtx);
2073 		dev = mpii_find_dev(sc, le16toh(evd->vol_dev_handle));
2074 		if (dev == NULL) {
2075 			mutex_exit(&sc->sc_devs_mtx);
2076 			break;
2077 		}
2078 #if NBIO > 0
2079 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2080 			printf("%s: volume %d state changed from %s to %s\n",
2081 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2082 			    vol_states[evd->prev_value],
2083 			    vol_states[evd->new_value]);
2084 #endif
2085 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2086 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2087 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2088 			printf("%s: started resync on a volume %d\n",
2089 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2090 		}
2091 		mutex_exit(&sc->sc_devs_mtx);
2092 		break;
2093 	case MPII_EVENT_IR_PHYSICAL_DISK:
2094 		break;
2095 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2096 		mpii_event_raid(sc, enp);
2097 		break;
2098 	case MPII_EVENT_IR_OPERATION_STATUS: {
2099 		struct mpii_evt_ir_status	*evs =
2100 		    (struct mpii_evt_ir_status *)(enp + 1);
2101 		struct mpii_device		*dev;
2102 
2103 		mutex_enter(&sc->sc_devs_mtx);
2104 		dev = mpii_find_dev(sc, le16toh(evs->vol_dev_handle));
2105 		if (dev != NULL &&
2106 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2107 			dev->percent = evs->percent;
2108 		mutex_exit(&sc->sc_devs_mtx);
2109 		break;
2110 		}
2111 	default:
2112 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2113 		    DEVNAME(sc), le16toh(enp->event));
2114 	}
2115 
2116 	mpii_event_done(sc, rcb);
2117 }
2118 
2119 void
2120 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2121 {
2122 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2123 	bool	need_start;
2124 
2125 	if (enp->ack_required) {
2126 		mutex_enter(&sc->sc_evt_ack_mtx);
2127 		need_start = (SIMPLEQ_FIRST(&sc->sc_evt_ack_queue) == 0);
2128 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2129 		if (need_start)
2130 			workqueue_enqueue(sc->sc_evt_ack_wq,
2131 			    &sc->sc_evt_ack_work, NULL);
2132 		mutex_exit(&sc->sc_evt_ack_mtx);
2133 	} else
2134 		mpii_push_reply(sc, rcb);
2135 }
2136 
2137 void
2138 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2139 {
2140 	struct mpii_msg_scsi_task_request	*stq;
2141 	struct mpii_msg_sas_oper_request	*soq;
2142 	struct mpii_ccb				*ccb;
2143 
2144 	ccb = mpii_get_ccb(sc);
2145 	if (ccb == NULL)
2146 		return;
2147 
2148 	stq = ccb->ccb_cmd;
2149 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2150 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2151 	stq->dev_handle = htole16(handle);
2152 
2153 	ccb->ccb_done = mpii_empty_done;
2154 	mpii_wait(sc, ccb);
2155 
2156 	if (ccb->ccb_rcb != NULL)
2157 		mpii_push_reply(sc, ccb->ccb_rcb);
2158 
2159 	/* reuse a ccb */
2160 	ccb->ccb_state = MPII_CCB_READY;
2161 	ccb->ccb_rcb = NULL;
2162 
2163 	soq = ccb->ccb_cmd;
2164 	memset(soq, 0, sizeof(*soq));
2165 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2166 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2167 	soq->dev_handle = htole16(handle);
2168 
2169 	ccb->ccb_done = mpii_empty_done;
2170 	mpii_wait(sc, ccb);
2171 	if (ccb->ccb_rcb != NULL)
2172 		mpii_push_reply(sc, ccb->ccb_rcb);
2173 
2174 	mpii_put_ccb(sc, ccb);
2175 }
2176 
2177 int
2178 mpii_board_info(struct mpii_softc *sc)
2179 {
2180 	struct mpii_msg_iocfacts_request	ifq;
2181 	struct mpii_msg_iocfacts_reply		ifp;
2182 	struct mpii_cfg_manufacturing_pg0	mpg;
2183 	struct mpii_cfg_hdr			hdr;
2184 
2185 	memset(&ifq, 0, sizeof(ifq));
2186 	memset(&ifp, 0, sizeof(ifp));
2187 
2188 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2189 
2190 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2191 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2192 		    DEVNAME(sc));
2193 		return (1);
2194 	}
2195 
2196 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2197 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2198 		    DEVNAME(sc));
2199 		return (1);
2200 	}
2201 
2202 	hdr.page_version = 0;
2203 	hdr.page_length = sizeof(mpg) / 4;
2204 	hdr.page_number = 0;
2205 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2206 	memset(&mpg, 0, sizeof(mpg));
2207 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2208 	    sizeof(mpg)) != 0) {
2209 		printf("%s: unable to fetch manufacturing page 0\n",
2210 		    DEVNAME(sc));
2211 		return (EINVAL);
2212 	}
2213 
2214 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2215 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2216 	    ifp.fw_version_unit, ifp.fw_version_dev,
2217 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2218 	    ifp.msg_version_maj, ifp.msg_version_min);
2219 
2220 	return (0);
2221 }
2222 
2223 int
2224 mpii_target_map(struct mpii_softc *sc)
2225 {
2226 	struct mpii_cfg_hdr			hdr;
2227 	struct mpii_cfg_ioc_pg8			ipg;
2228 	int					flags, pad = 0;
2229 
2230 	hdr.page_version = 0;
2231 	hdr.page_length = sizeof(ipg) / 4;
2232 	hdr.page_number = 8;
2233 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2234 	memset(&ipg, 0, sizeof(ipg));
2235 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2236 	    sizeof(ipg)) != 0) {
2237 		printf("%s: unable to fetch ioc page 8\n",
2238 		    DEVNAME(sc));
2239 		return (EINVAL);
2240 	}
2241 
2242 	if (le16toh(ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2243 		pad = 1;
2244 
2245 	flags = le16toh(ipg.ir_volume_mapping_flags) &
2246 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2247 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2248 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2249 			sc->sc_vd_id_low += pad;
2250 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2251 		} else
2252 			sc->sc_vd_id_low = sc->sc_max_devices -
2253 			    sc->sc_max_volumes;
2254 	}
2255 
2256 	sc->sc_pd_id_start += pad;
2257 
2258 	return (0);
2259 }
2260 
2261 int
2262 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2263     u_int32_t address, int flags, void *p)
2264 {
2265 	struct mpii_msg_config_request		*cq;
2266 	struct mpii_msg_config_reply		*cp;
2267 	struct mpii_ccb				*ccb;
2268 	struct mpii_cfg_hdr			*hdr = p;
2269 	struct mpii_ecfg_hdr			*ehdr = p;
2270 	int					etype = 0;
2271 	int					rv = 0;
2272 
2273 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2274 	    "address: 0x%08x flags: 0x%x\n", DEVNAME(sc), type, number,
2275 	    address, flags);
2276 
2277 	ccb = mpii_get_ccb(sc);
2278 	if (ccb == NULL) {
2279 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2280 		    DEVNAME(sc));
2281 		return (1);
2282 	}
2283 
2284 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2285 		etype = type;
2286 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2287 	}
2288 
2289 	cq = ccb->ccb_cmd;
2290 
2291 	cq->function = MPII_FUNCTION_CONFIG;
2292 
2293 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2294 
2295 	cq->config_header.page_number = number;
2296 	cq->config_header.page_type = type;
2297 	cq->ext_page_type = etype;
2298 	cq->page_address = htole32(address);
2299 	cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2300 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2301 
2302 	ccb->ccb_done = mpii_empty_done;
2303 	if (ISSET(flags, MPII_PG_POLL)) {
2304 		if (mpii_poll(sc, ccb) != 0) {
2305 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2306 			    DEVNAME(sc));
2307 			return (1);
2308 		}
2309 	} else
2310 		mpii_wait(sc, ccb);
2311 
2312 	if (ccb->ccb_rcb == NULL) {
2313 		mpii_put_ccb(sc, ccb);
2314 		return (1);
2315 	}
2316 	cp = ccb->ccb_rcb->rcb_reply;
2317 
2318 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2319 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2320 	    cp->sgl_flags, cp->msg_length, cp->function);
2321 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2322 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2323 	    le16toh(cp->ext_page_length), cp->ext_page_type,
2324 	    cp->msg_flags);
2325 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2326 	    cp->vp_id, cp->vf_id);
2327 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2328 	    le16toh(cp->ioc_status));
2329 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2330 	    le32toh(cp->ioc_loginfo));
2331 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2332 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2333 	    cp->config_header.page_version,
2334 	    cp->config_header.page_length,
2335 	    cp->config_header.page_number,
2336 	    cp->config_header.page_type);
2337 
2338 	if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2339 		rv = 1;
2340 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2341 		memset(ehdr, 0, sizeof(*ehdr));
2342 		ehdr->page_version = cp->config_header.page_version;
2343 		ehdr->page_number = cp->config_header.page_number;
2344 		ehdr->page_type = cp->config_header.page_type;
2345 		ehdr->ext_page_length = cp->ext_page_length;
2346 		ehdr->ext_page_type = cp->ext_page_type;
2347 	} else
2348 		*hdr = cp->config_header;
2349 
2350 	mpii_push_reply(sc, ccb->ccb_rcb);
2351 	mpii_put_ccb(sc, ccb);
2352 
2353 	return (rv);
2354 }
2355 
2356 int
2357 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2358     void *p, int read, void *page, size_t len)
2359 {
2360 	struct mpii_msg_config_request		*cq;
2361 	struct mpii_msg_config_reply		*cp;
2362 	struct mpii_ccb				*ccb;
2363 	struct mpii_cfg_hdr			*hdr = p;
2364 	struct mpii_ecfg_hdr			*ehdr = p;
2365 	uintptr_t				kva;
2366 	int					page_length;
2367 	int					rv = 0;
2368 
2369 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2370 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2371 
2372 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2373 	    le16toh(ehdr->ext_page_length) : hdr->page_length;
2374 
2375 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2376 		return (1);
2377 
2378 	ccb = mpii_get_ccb(sc);
2379 	if (ccb == NULL) {
2380 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2381 		    DEVNAME(sc));
2382 		return (1);
2383 	}
2384 
2385 	cq = ccb->ccb_cmd;
2386 
2387 	cq->function = MPII_FUNCTION_CONFIG;
2388 
2389 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2390 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2391 
2392 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2393 		cq->config_header.page_version = ehdr->page_version;
2394 		cq->config_header.page_number = ehdr->page_number;
2395 		cq->config_header.page_type = ehdr->page_type;
2396 		cq->ext_page_len = ehdr->ext_page_length;
2397 		cq->ext_page_type = ehdr->ext_page_type;
2398 	} else
2399 		cq->config_header = *hdr;
2400 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2401 	cq->page_address = htole32(address);
2402 	cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2403 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2404 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2405 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2406 
2407 	/* bounce the page via the request space to avoid more bus_dma games */
2408 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2409 	    sizeof(struct mpii_msg_config_request));
2410 
2411 	kva = (uintptr_t)ccb->ccb_cmd;
2412 	kva += sizeof(struct mpii_msg_config_request);
2413 
2414 	if (!read)
2415 		memcpy((void *)kva, page, len);
2416 
2417 	ccb->ccb_done = mpii_empty_done;
2418 	if (ISSET(flags, MPII_PG_POLL)) {
2419 		if (mpii_poll(sc, ccb) != 0) {
2420 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2421 			    DEVNAME(sc));
2422 			return (1);
2423 		}
2424 	} else
2425 		mpii_wait(sc, ccb);
2426 
2427 	if (ccb->ccb_rcb == NULL) {
2428 		mpii_put_ccb(sc, ccb);
2429 		return (1);
2430 	}
2431 	cp = ccb->ccb_rcb->rcb_reply;
2432 
2433 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2434 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2435 	    cp->function);
2436 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2437 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2438 	    le16toh(cp->ext_page_length), cp->ext_page_type,
2439 	    cp->msg_flags);
2440 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2441 	    cp->vp_id, cp->vf_id);
2442 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2443 	    le16toh(cp->ioc_status));
2444 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2445 	    le32toh(cp->ioc_loginfo));
2446 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2447 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2448 	    cp->config_header.page_version,
2449 	    cp->config_header.page_length,
2450 	    cp->config_header.page_number,
2451 	    cp->config_header.page_type);
2452 
2453 	if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2454 		rv = 1;
2455 	else if (read)
2456 		memcpy(page, (void *)kva, len);
2457 
2458 	mpii_push_reply(sc, ccb->ccb_rcb);
2459 	mpii_put_ccb(sc, ccb);
2460 
2461 	return (rv);
2462 }
2463 
2464 struct mpii_rcb *
2465 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2466 {
2467 	struct mpii_rcb		*rcb = NULL;
2468 	u_int32_t		rfid;
2469 
2470 	KASSERT(mutex_owned(&sc->sc_rep_mtx));
2471 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2472 
2473 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2474 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2475 		rfid = (le32toh(rdp->frame_addr) -
2476 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2477 		    sc->sc_reply_size;
2478 
2479 		bus_dmamap_sync(sc->sc_dmat,
2480 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2481 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2482 
2483 		rcb = &sc->sc_rcbs[rfid];
2484 	}
2485 
2486 	memset(rdp, 0xff, sizeof(*rdp));
2487 
2488 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2489 	    8 * sc->sc_reply_post_host_index, 8,
2490 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2491 
2492 	return (rcb);
2493 }
2494 
2495 struct mpii_dmamem *
2496 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2497 {
2498 	struct mpii_dmamem	*mdm;
2499 	int			nsegs;
2500 
2501 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2502 	if (mdm == NULL)
2503 		return (NULL);
2504 
2505 	mdm->mdm_size = size;
2506 
2507 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2508 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2509 		goto mdmfree;
2510 
2511 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2512 	    1, &nsegs, BUS_DMA_NOWAIT) != 0)
2513 		goto destroy;
2514 
2515 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2516 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2517 		goto free;
2518 
2519 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2520 	    NULL, BUS_DMA_NOWAIT) != 0)
2521 		goto unmap;
2522 
2523 	memset(mdm->mdm_kva, 0, size);
2524 
2525 	return (mdm);
2526 
2527 unmap:
2528 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2529 free:
2530 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2531 destroy:
2532 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2533 mdmfree:
2534 	free(mdm, M_DEVBUF);
2535 
2536 	return (NULL);
2537 }
2538 
2539 void
2540 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2541 {
2542 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2543 
2544 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2545 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2546 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2547 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2548 	free(mdm, M_DEVBUF);
2549 }
2550 
2551 int
2552 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2553 {
2554 	int		slot;	/* initial hint */
2555 
2556 	KASSERT(mutex_owned(&sc->sc_devs_mtx));
2557 	DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev wants slot %d\n",
2558 	    DEVNAME(sc), dev->slot);
2559 	if (dev == NULL || dev->slot < 0)
2560 		return (1);
2561 	slot = dev->slot;
2562 
2563 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2564 		slot++;
2565 
2566 	if (slot >= sc->sc_max_devices)
2567 		return (1);
2568 
2569 	DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev alloc slot %d\n",
2570 	    DEVNAME(sc), slot);
2571 
2572 	dev->slot = slot;
2573 	sc->sc_devs[slot] = dev;
2574 
2575 	return (0);
2576 }
2577 
2578 int
2579 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2580 {
2581 	int			i;
2582 
2583 	KASSERT(mutex_owned(&sc->sc_devs_mtx));
2584 	if (dev == NULL)
2585 		return (1);
2586 
2587 	for (i = 0; i < sc->sc_max_devices; i++) {
2588 		if (sc->sc_devs[i] == NULL)
2589 			continue;
2590 
2591 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2592 			sc->sc_devs[i] = NULL;
2593 			return (0);
2594 		}
2595 	}
2596 
2597 	return (1);
2598 }
2599 
2600 struct mpii_device *
2601 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2602 {
2603 	int			i;
2604 	KASSERT(mutex_owned(&sc->sc_devs_mtx));
2605 
2606 	for (i = 0; i < sc->sc_max_devices; i++) {
2607 		if (sc->sc_devs[i] == NULL)
2608 			continue;
2609 
2610 		if (sc->sc_devs[i]->dev_handle == handle)
2611 			return (sc->sc_devs[i]);
2612 	}
2613 
2614 	return (NULL);
2615 }
2616 
2617 int
2618 mpii_alloc_ccbs(struct mpii_softc *sc)
2619 {
2620 	struct mpii_ccb		*ccb;
2621 	u_int8_t		*cmd;
2622 	int			i;
2623 	char wqname[16];
2624 
2625 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2626 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2627 	mutex_init(&sc->sc_ccb_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2628 	cv_init(&sc->sc_ccb_free_cv, "mpii_ccbs");
2629 	mutex_init(&sc->sc_ssb_tmomtx, MUTEX_DEFAULT, IPL_BIO);
2630 	snprintf(wqname, sizeof(wqname) - 1, "%sabrt", DEVNAME(sc));
2631 	workqueue_create(&sc->sc_ssb_tmowk, wqname, mpii_scsi_cmd_tmo_handler,
2632 	    sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
2633 	if (sc->sc_ssb_tmowk == NULL)
2634 		return 1;
2635 
2636 	sc->sc_ccbs = malloc((sc->sc_max_cmds-1) * sizeof(*ccb),
2637 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2638 	if (sc->sc_ccbs == NULL) {
2639 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2640 		return (1);
2641 	}
2642 
2643 	sc->sc_requests = mpii_dmamem_alloc(sc,
2644 	    sc->sc_request_size * sc->sc_max_cmds);
2645 	if (sc->sc_requests == NULL) {
2646 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2647 		goto free_ccbs;
2648 	}
2649 	cmd = MPII_DMA_KVA(sc->sc_requests);
2650 
2651 	/*
2652 	 * we have sc->sc_max_cmds system request message
2653 	 * frames, but smid zero cannot be used. so we then
2654 	 * have (sc->sc_max_cmds - 1) number of ccbs
2655 	 */
2656 	for (i = 1; i < sc->sc_max_cmds; i++) {
2657 		ccb = &sc->sc_ccbs[i - 1];
2658 
2659 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2660 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2661 		    &ccb->ccb_dmamap) != 0) {
2662 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2663 			goto free_maps;
2664 		}
2665 
2666 		ccb->ccb_sc = sc;
2667 		mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2668 		cv_init(&ccb->ccb_cv, "mpiiexec");
2669 
2670 		ccb->ccb_smid = htole16(i);
2671 		ccb->ccb_offset = sc->sc_request_size * i;
2672 
2673 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2674 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2675 		    ccb->ccb_offset;
2676 
2677 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2678 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2679 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2680 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2681 		    ccb->ccb_cmd_dva);
2682 
2683 		mpii_put_ccb(sc, ccb);
2684 	}
2685 
2686 	return (0);
2687 
2688 free_maps:
2689 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2690 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2691 
2692 	mpii_dmamem_free(sc, sc->sc_requests);
2693 free_ccbs:
2694 	free(sc->sc_ccbs, M_DEVBUF);
2695 
2696 	return (1);
2697 }
2698 
2699 void
2700 mpii_put_ccb(struct mpii_softc *sc, struct mpii_ccb *ccb)
2701 {
2702 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2703 
2704 	ccb->ccb_state = MPII_CCB_FREE;
2705 	ccb->ccb_cookie = NULL;
2706 	ccb->ccb_done = NULL;
2707 	ccb->ccb_rcb = NULL;
2708 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2709 
2710 	mutex_enter(&sc->sc_ccb_free_mtx);
2711 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2712 	mutex_exit(&sc->sc_ccb_free_mtx);
2713 }
2714 
2715 struct mpii_ccb *
2716 mpii_get_ccb(struct mpii_softc *sc)
2717 {
2718 	struct mpii_ccb		*ccb;
2719 
2720 	mutex_enter(&sc->sc_ccb_free_mtx);
2721 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2722 	if (ccb != NULL) {
2723 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2724 		ccb->ccb_state = MPII_CCB_READY;
2725 		KASSERT(ccb->ccb_sc == sc);
2726 	}
2727 	mutex_exit(&sc->sc_ccb_free_mtx);
2728 
2729 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2730 
2731 	return (ccb);
2732 }
2733 
2734 int
2735 mpii_alloc_replies(struct mpii_softc *sc)
2736 {
2737 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2738 
2739 	sc->sc_rcbs = malloc(sc->sc_num_reply_frames * sizeof(struct mpii_rcb),
2740 	    M_DEVBUF, M_NOWAIT);
2741 	if (sc->sc_rcbs == NULL)
2742 		return (1);
2743 
2744 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2745 	    sc->sc_num_reply_frames);
2746 	if (sc->sc_replies == NULL) {
2747 		free(sc->sc_rcbs, M_DEVBUF);
2748 		return (1);
2749 	}
2750 
2751 	return (0);
2752 }
2753 
2754 void
2755 mpii_push_replies(struct mpii_softc *sc)
2756 {
2757 	struct mpii_rcb		*rcb;
2758 	uintptr_t		kva = (uintptr_t)MPII_DMA_KVA(sc->sc_replies);
2759 	int			i;
2760 
2761 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2762 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2763 	    BUS_DMASYNC_PREREAD);
2764 
2765 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2766 		rcb = &sc->sc_rcbs[i];
2767 
2768 		rcb->rcb_reply = (void *)(kva + sc->sc_reply_size * i);
2769 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2770 		    sc->sc_reply_size * i;
2771 		mpii_push_reply(sc, rcb);
2772 	}
2773 }
2774 
2775 void
2776 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2777 {
2778 	struct mpii_request_header	*rhp;
2779 	struct mpii_request_descr	descr;
2780 #if defined(__LP64__) && 0
2781 	u_long				 *rdp = (u_long *)&descr;
2782 #else
2783 	u_int32_t			 *rdp = (u_int32_t *)&descr;
2784 #endif
2785 
2786 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2787 	    ccb->ccb_cmd_dva);
2788 
2789 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2790 	    ccb->ccb_offset, sc->sc_request_size,
2791 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2792 
2793 	ccb->ccb_state = MPII_CCB_QUEUED;
2794 
2795 	rhp = ccb->ccb_cmd;
2796 
2797 	memset(&descr, 0, sizeof(descr));
2798 
2799 	switch (rhp->function) {
2800 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2801 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2802 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2803 		break;
2804 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2805 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2806 		break;
2807 	default:
2808 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2809 	}
2810 
2811 	descr.vf_id = sc->sc_vf_id;
2812 	descr.smid = ccb->ccb_smid;
2813 
2814 #if defined(__LP64__) && 0
2815 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2816 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2817 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2818 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2819 #else
2820 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2821 	    "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2822 
2823 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2824 	    "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2825 
2826 	mutex_enter(&sc->sc_req_mtx);
2827 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2828 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2829 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2830 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2831 
2832 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2833 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2834 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2835 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2836 	mutex_exit(&sc->sc_req_mtx);
2837 #endif
2838 }
2839 
2840 int
2841 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2842 {
2843 	void				(*done)(struct mpii_ccb *);
2844 	void				*cookie;
2845 	int				rv = 1;
2846 
2847 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2848 
2849 	done = ccb->ccb_done;
2850 	cookie = ccb->ccb_cookie;
2851 
2852 	ccb->ccb_done = mpii_poll_done;
2853 	ccb->ccb_cookie = &rv;
2854 
2855 	mpii_start(sc, ccb);
2856 
2857 	while (rv == 1) {
2858 		/* avoid excessive polling */
2859 		if (mpii_reply_waiting(sc))
2860 			mpii_intr(sc);
2861 		else
2862 			delay(10);
2863 	}
2864 
2865 	ccb->ccb_cookie = cookie;
2866 	done(ccb);
2867 
2868 	return (0);
2869 }
2870 
2871 void
2872 mpii_poll_done(struct mpii_ccb *ccb)
2873 {
2874 	int				*rv = ccb->ccb_cookie;
2875 
2876 	*rv = 0;
2877 }
2878 
2879 int
2880 mpii_alloc_queues(struct mpii_softc *sc)
2881 {
2882 	u_int32_t		*rfp;
2883 	int			i;
2884 
2885 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2886 
2887 	mutex_init(&sc->sc_reply_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2888 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2889 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2890 	if (sc->sc_reply_freeq == NULL)
2891 		return (1);
2892 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2893 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2894 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2895 		    sc->sc_reply_size * i;
2896 	}
2897 
2898 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2899 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2900 	if (sc->sc_reply_postq == NULL)
2901 		goto free_reply_freeq;
2902 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2903 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2904 	    sizeof(struct mpii_reply_descr));
2905 
2906 	return (0);
2907 
2908 free_reply_freeq:
2909 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2910 	return (1);
2911 }
2912 
2913 void
2914 mpii_init_queues(struct mpii_softc *sc)
2915 {
2916 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2917 
2918 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2919 	sc->sc_reply_post_host_index = 0;
2920 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2921 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2922 }
2923 
2924 void
2925 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2926 {
2927 	void			(*done)(struct mpii_ccb *);
2928 	void			*cookie;
2929 
2930 	done = ccb->ccb_done;
2931 	cookie = ccb->ccb_cookie;
2932 
2933 	ccb->ccb_done = mpii_wait_done;
2934 	ccb->ccb_cookie = ccb;
2935 
2936 	/* XXX this will wait forever for the ccb to complete */
2937 
2938 	mpii_start(sc, ccb);
2939 
2940 	mutex_enter(&ccb->ccb_mtx);
2941 	while (ccb->ccb_cookie != NULL)
2942 		cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
2943 	mutex_exit(&ccb->ccb_mtx);
2944 
2945 	ccb->ccb_cookie = cookie;
2946 	done(ccb);
2947 }
2948 
2949 void
2950 mpii_wait_done(struct mpii_ccb *ccb)
2951 {
2952 	mutex_enter(&ccb->ccb_mtx);
2953 	ccb->ccb_cookie = NULL;
2954 	cv_signal(&ccb->ccb_cv);
2955 	mutex_exit(&ccb->ccb_mtx);
2956 }
2957 
2958 void
2959 mpii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2960     void *arg)
2961 {
2962 	struct scsipi_periph	*periph;
2963 	struct scsipi_xfer	*xs;
2964 	struct scsipi_adapter	*adapt = chan->chan_adapter;
2965 	struct mpii_softc	*sc = device_private(adapt->adapt_dev);
2966 	struct mpii_ccb		*ccb;
2967 	struct mpii_msg_scsi_io	*io;
2968 	struct mpii_device	*dev;
2969 	int			target, timeout, ret;
2970 	u_int16_t		dev_handle;
2971 
2972 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request\n", DEVNAME(sc));
2973 
2974 	switch (req) {
2975 	case ADAPTER_REQ_GROW_RESOURCES:
2976 		/* Not supported. */
2977 		return;
2978 	case ADAPTER_REQ_SET_XFER_MODE:
2979 	{
2980 		struct scsipi_xfer_mode *xm = arg;
2981 		xm->xm_mode = PERIPH_CAP_TQING;
2982 		xm->xm_period = 0;
2983 		xm->xm_offset = 0;
2984 		scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2985 		return;
2986 	}
2987 	case ADAPTER_REQ_RUN_XFER:
2988 		break;
2989 	}
2990 
2991 	xs = arg;
2992 	periph = xs->xs_periph;
2993 	target = periph->periph_target;
2994 
2995 	if (xs->cmdlen > MPII_CDB_LEN) {
2996 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2997 		    DEVNAME(sc), xs->cmdlen);
2998 		memset(&xs->sense, 0, sizeof(xs->sense));
2999 		xs->sense.scsi_sense.response_code =
3000 		    SSD_RCODE_VALID | SSD_RCODE_CURRENT;
3001 		xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
3002 		xs->sense.scsi_sense.asc = 0x20;
3003 		xs->error = XS_SENSE;
3004 		scsipi_done(xs);
3005 		return;
3006 	}
3007 
3008 	mutex_enter(&sc->sc_devs_mtx);
3009 	if ((dev = sc->sc_devs[target]) == NULL) {
3010 		mutex_exit(&sc->sc_devs_mtx);
3011 		/* device no longer exists */
3012 		xs->error = XS_SELTIMEOUT;
3013 		scsipi_done(xs);
3014 		return;
3015 	}
3016 	dev_handle = dev->dev_handle;
3017 	mutex_exit(&sc->sc_devs_mtx);
3018 
3019 	ccb = mpii_get_ccb(sc);
3020 	if (ccb == NULL) {
3021 		xs->error = XS_RESOURCE_SHORTAGE;
3022 		scsipi_done(xs);
3023 		return;
3024 	}
3025 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->cmd->opcode: 0x%02x xs->xs_control: 0x%x\n",
3026 	    DEVNAME(sc), ccb->ccb_smid, xs->cmd->opcode, xs->xs_control);
3027 
3028 	ccb->ccb_cookie = xs;
3029 	ccb->ccb_done = mpii_scsi_cmd_done;
3030 	ccb->ccb_dev_handle = dev_handle;
3031 
3032 	io = ccb->ccb_cmd;
3033 	memset(io, 0, sizeof(*io));
3034 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
3035 	io->sense_buffer_length = sizeof(xs->sense);
3036 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
3037 	io->io_flags = htole16(xs->cmdlen);
3038 	io->dev_handle = htole16(ccb->ccb_dev_handle);
3039 	io->lun[0] = htobe16(periph->periph_lun);
3040 
3041 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
3042 	case XS_CTL_DATA_IN:
3043 		io->direction = MPII_SCSIIO_DIR_READ;
3044 		break;
3045 	case XS_CTL_DATA_OUT:
3046 		io->direction = MPII_SCSIIO_DIR_WRITE;
3047 		break;
3048 	default:
3049 		io->direction = MPII_SCSIIO_DIR_NONE;
3050 		break;
3051 	}
3052 
3053 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
3054 
3055 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
3056 
3057 	io->data_length = htole32(xs->datalen);
3058 
3059 	/* sense data is at the end of a request */
3060 	io->sense_buffer_low_address = htole32(ccb->ccb_cmd_dva +
3061 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
3062 
3063 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
3064 		ret = mpii_load_xs_sas3(ccb);
3065 	else
3066 		ret = mpii_load_xs(ccb);
3067 
3068 	if (ret != 0) {
3069 		xs->error = XS_DRIVER_STUFFUP;
3070 		goto done;
3071 	}
3072 
3073 	if (xs->xs_control & XS_CTL_POLL) {
3074 		if (mpii_poll(sc, ccb) != 0) {
3075 			xs->error = XS_DRIVER_STUFFUP;
3076 			goto done;
3077 		}
3078 		return;
3079 	}
3080         timeout = mstohz(xs->timeout);
3081 	if (timeout == 0)
3082 		timeout = 1;
3083 	callout_reset(&xs->xs_callout, timeout, mpii_scsi_cmd_tmo, ccb);
3084 	mpii_start(sc, ccb);
3085 	return;
3086 done:
3087 	mpii_put_ccb(sc, ccb);
3088 	scsipi_done(xs);
3089 }
3090 
3091 void
3092 mpii_scsi_cmd_tmo(void *xccb)
3093 {
3094 	struct mpii_ccb		*ccb = xccb;
3095 	struct mpii_softc	*sc = ccb->ccb_sc;
3096 	bool	start_work;
3097 
3098 	printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
3099 
3100 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
3101 		mutex_enter(&sc->sc_ssb_tmomtx);
3102 		start_work = (SIMPLEQ_FIRST(&sc->sc_ccb_tmos) == 0);
3103 		ccb->ccb_state = MPII_CCB_TIMEOUT;
3104 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3105 		if (start_work) {
3106 			workqueue_enqueue(sc->sc_ssb_tmowk,
3107 			    &sc->sc_ssb_tmowork, NULL);
3108 		}
3109 		mutex_exit(&sc->sc_ssb_tmomtx);
3110 	}
3111 }
3112 
3113 void
3114 mpii_scsi_cmd_tmo_handler(struct work *wk, void *cookie)
3115 {
3116 	struct mpii_softc			*sc = cookie;
3117 	struct mpii_ccb				*next;
3118 	struct mpii_ccb				*ccb;
3119 	struct mpii_ccb				*tccb;
3120 	struct mpii_msg_scsi_task_request	*stq;
3121 
3122 	mutex_enter(&sc->sc_ssb_tmomtx);
3123 	next = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3124 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
3125 	mutex_exit(&sc->sc_ssb_tmomtx);
3126 
3127 	while (next != NULL) {
3128 		ccb = next;
3129 		next = SIMPLEQ_NEXT(ccb, ccb_link);
3130 		if (ccb->ccb_state != MPII_CCB_TIMEOUT)
3131 			continue;
3132 		tccb = mpii_get_ccb(sc);
3133 		stq = tccb->ccb_cmd;
3134 		stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3135 		stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3136 		stq->dev_handle = htole16(ccb->ccb_dev_handle);
3137 
3138 		tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3139 		mpii_wait(sc, tccb);
3140 	}
3141 }
3142 
3143 void
3144 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3145 {
3146 	mpii_put_ccb(tccb->ccb_sc, tccb);
3147 }
3148 
3149 static u_int8_t
3150 map_scsi_status(u_int8_t mpii_scsi_status)
3151 {
3152 	u_int8_t scsi_status;
3153 
3154 	switch (mpii_scsi_status)
3155 	{
3156 	case MPII_SCSIIO_STATUS_GOOD:
3157 		scsi_status = SCSI_OK;
3158 		break;
3159 
3160 	case MPII_SCSIIO_STATUS_CHECK_COND:
3161 		scsi_status = SCSI_CHECK;
3162 		break;
3163 
3164 	case MPII_SCSIIO_STATUS_BUSY:
3165 		scsi_status = SCSI_BUSY;
3166 		break;
3167 
3168 	case MPII_SCSIIO_STATUS_INTERMEDIATE:
3169 		scsi_status = SCSI_INTERM;
3170 		break;
3171 
3172 	case MPII_SCSIIO_STATUS_INTERMEDIATE_CONDMET:
3173 		scsi_status = SCSI_INTERM;
3174 		break;
3175 
3176 	case MPII_SCSIIO_STATUS_RESERVATION_CONFLICT:
3177 		scsi_status = SCSI_RESV_CONFLICT;
3178 		break;
3179 
3180 	case MPII_SCSIIO_STATUS_CMD_TERM:
3181 	case MPII_SCSIIO_STATUS_TASK_ABORTED:
3182 		scsi_status = SCSI_TERMINATED;
3183 		break;
3184 
3185 	case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3186 		scsi_status = SCSI_QUEUE_FULL;
3187 		break;
3188 
3189 	case MPII_SCSIIO_STATUS_ACA_ACTIVE:
3190 		scsi_status = SCSI_ACA_ACTIVE;
3191 		break;
3192 
3193 	default:
3194 		/* XXX: for the lack of anything better and other than OK */
3195 		scsi_status = 0xFF;
3196 		break;
3197 	}
3198 
3199 	return scsi_status;
3200 }
3201 
3202 void
3203 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3204 {
3205 	struct mpii_msg_scsi_io_error	*sie;
3206 	struct mpii_softc	*sc = ccb->ccb_sc;
3207 	struct scsipi_xfer	*xs = ccb->ccb_cookie;
3208 	struct scsi_sense_data	*sense;
3209 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3210 	bool timeout = 1;
3211 
3212 	callout_stop(&xs->xs_callout);
3213 	if (ccb->ccb_state == MPII_CCB_TIMEOUT)
3214 		timeout = 1;
3215 	ccb->ccb_state = MPII_CCB_READY;
3216 
3217 	if (xs->datalen != 0) {
3218 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3219 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3220 		    BUS_DMASYNC_POSTWRITE);
3221 
3222 		bus_dmamap_unload(sc->sc_dmat, dmap);
3223 	}
3224 
3225 	KASSERT(xs->error == XS_NOERROR);
3226 	KASSERT(xs->resid == xs->datalen);
3227 	KASSERT(xs->status == SCSI_OK);
3228 
3229 	if (ccb->ccb_rcb == NULL) {
3230 		/* no scsi error, we're ok so drop out early */
3231 		xs->resid = 0;
3232 		goto done;
3233 	}
3234 
3235 	sie = ccb->ccb_rcb->rcb_reply;
3236 
3237 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3238 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3239 	    xs->xs_control);
3240 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3241 	    "function: 0x%02x\n", DEVNAME(sc), le16toh(sie->dev_handle),
3242 	    sie->msg_length, sie->function);
3243 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3244 	    sie->vp_id, sie->vf_id);
3245 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3246 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3247 	    sie->scsi_state, le16toh(sie->ioc_status));
3248 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3249 	    le32toh(sie->ioc_loginfo));
3250 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3251 	    le32toh(sie->transfer_count));
3252 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3253 	    le32toh(sie->sense_count));
3254 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3255 	    le32toh(sie->response_info));
3256 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3257 	    le16toh(sie->task_tag));
3258 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3259 	    DEVNAME(sc), le32toh(sie->bidirectional_transfer_count));
3260 
3261 	xs->status = map_scsi_status(sie->scsi_status);
3262 
3263 	switch (le16toh(sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3264 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3265 		switch(sie->scsi_status) {
3266 		case MPII_SCSIIO_STATUS_CHECK_COND:
3267 			xs->error = XS_SENSE;
3268 			/* FALLTHROUGH */
3269 		case MPII_SCSIIO_STATUS_GOOD:
3270 			xs->resid = xs->datalen - le32toh(sie->transfer_count);
3271 			break;
3272 		default:
3273 			xs->error = XS_DRIVER_STUFFUP;
3274 			break;
3275 		}
3276 		break;
3277 
3278 	case MPII_IOCSTATUS_SUCCESS:
3279 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3280 		switch (sie->scsi_status) {
3281 		case MPII_SCSIIO_STATUS_GOOD:
3282 			xs->resid = 0;
3283 			break;
3284 
3285 		case MPII_SCSIIO_STATUS_CHECK_COND:
3286 			xs->resid = 0;
3287 			xs->error = XS_SENSE;
3288 			break;
3289 
3290 		case MPII_SCSIIO_STATUS_BUSY:
3291 		case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3292 			xs->error = XS_BUSY;
3293 			break;
3294 
3295 		default:
3296 			xs->error = XS_DRIVER_STUFFUP;
3297 		}
3298 		break;
3299 
3300 	case MPII_IOCSTATUS_BUSY:
3301 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3302 		xs->error = XS_BUSY;
3303 		break;
3304 
3305 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3306 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3307 		xs->error = timeout ? XS_TIMEOUT : XS_RESET;
3308 		break;
3309 
3310 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3311 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3312 		xs->error = XS_SELTIMEOUT;
3313 		break;
3314 
3315 	default:
3316 		xs->error = XS_DRIVER_STUFFUP;
3317 		break;
3318 	}
3319 
3320 	sense = (struct scsi_sense_data *)((uintptr_t)ccb->ccb_cmd +
3321 	    sc->sc_request_size - sizeof(*sense));
3322 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3323 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3324 
3325 	mpii_push_reply(sc, ccb->ccb_rcb);
3326 
3327  done:
3328 	mpii_put_ccb(sc, ccb);
3329 
3330 	DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x len: %d resid: %d\n",
3331 		 DEVNAME(sc), xs->error, xs->status, xs->datalen, xs->resid);
3332 
3333 	scsipi_done(xs);
3334 }
3335 
3336 #if 0
3337 int
3338 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, void *addr, int flag)
3339 {
3340 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
3341 	struct mpii_device	*dev = sc->sc_devs[link->target];
3342 
3343 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3344 
3345 	switch (cmd) {
3346 	case DIOCGCACHE:
3347 	case DIOCSCACHE:
3348 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3349 			return (mpii_ioctl_cache(link, cmd,
3350 			    (struct dk_cache *)addr));
3351 		}
3352 		break;
3353 
3354 	default:
3355 		if (sc->sc_ioctl)
3356 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3357 
3358 		break;
3359 	}
3360 
3361 	return (ENOTTY);
3362 }
3363 
3364 int
3365 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3366 {
3367 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3368 	struct mpii_device *dev = sc->sc_devs[link->target];
3369 	struct mpii_cfg_raid_vol_pg0 *vpg;
3370 	struct mpii_msg_raid_action_request *req;
3371 	struct mpii_msg_raid_action_reply *rep;
3372 	struct mpii_cfg_hdr hdr;
3373 	struct mpii_ccb	*ccb;
3374 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3375 	size_t pagelen;
3376 	int rv = 0;
3377 	int enabled;
3378 
3379 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3380 	    addr, MPII_PG_POLL, &hdr) != 0)
3381 		return (EINVAL);
3382 
3383 	pagelen = hdr.page_length * 4;
3384 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3385 	if (vpg == NULL)
3386 		return (ENOMEM);
3387 
3388 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3389 	    vpg, pagelen) != 0) {
3390 		rv = EINVAL;
3391 		goto done;
3392 	}
3393 
3394 	enabled = ((le16toh(vpg->volume_settings) &
3395 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3396 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3397 
3398 	if (cmd == DIOCGCACHE) {
3399 		dc->wrcache = enabled;
3400 		dc->rdcache = 0;
3401 		goto done;
3402 	} /* else DIOCSCACHE */
3403 
3404 	if (dc->rdcache) {
3405 		rv = EOPNOTSUPP;
3406 		goto done;
3407 	}
3408 
3409 	if (((dc->wrcache) ? 1 : 0) == enabled)
3410 		goto done;
3411 
3412 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3413 	if (ccb == NULL) {
3414 		rv = ENOMEM;
3415 		goto done;
3416 	}
3417 
3418 	ccb->ccb_done = mpii_empty_done;
3419 
3420 	req = ccb->ccb_cmd;
3421 	memset(req, 0, sizeof(*req));
3422 	req->function = MPII_FUNCTION_RAID_ACTION;
3423 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3424 	req->vol_dev_handle = htole16(dev->dev_handle);
3425 	req->action_data = htole32(dc->wrcache ?
3426 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3427 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3428 
3429 	if (mpii_poll(sc, ccb) != 0) {
3430 		rv = EIO;
3431 		goto done;
3432 	}
3433 
3434 	if (ccb->ccb_rcb != NULL) {
3435 		rep = ccb->ccb_rcb->rcb_reply;
3436 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3437 		    ((rep->action_data[0] &
3438 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3439 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3440 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3441 			rv = EINVAL;
3442 		mpii_push_reply(sc, ccb->ccb_rcb);
3443 	}
3444 
3445 	scsi_io_put(&sc->sc_iopool, ccb);
3446 
3447 done:
3448 	free(vpg, M_TEMP);
3449 	return (rv);
3450 }
3451 #endif /* 0 */
3452 
3453 #if NBIO > 0
3454 int
3455 mpii_ioctl(device_t dev, u_long cmd, void *addr)
3456 {
3457 	struct mpii_softc	*sc = device_private(dev);
3458 	int			error = 0;
3459 
3460 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3461 
3462 	switch (cmd) {
3463 	case BIOCINQ:
3464 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3465 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3466 		break;
3467 	case BIOCVOL:
3468 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3469 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3470 		break;
3471 	case BIOCDISK:
3472 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3473 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3474 		break;
3475 	default:
3476 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3477 		error = ENOTTY;
3478 	}
3479 
3480 	return (error);
3481 }
3482 
3483 int
3484 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3485 {
3486 	int			i;
3487 
3488 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3489 
3490 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3491 	mutex_enter(&sc->sc_devs_mtx);
3492 	for (i = 0; i < sc->sc_max_devices; i++)
3493 		if (sc->sc_devs[i] &&
3494 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3495 			bi->bi_novol++;
3496 	mutex_exit(&sc->sc_devs_mtx);
3497 	return (0);
3498 }
3499 
3500 int
3501 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3502 {
3503 	struct mpii_cfg_raid_vol_pg0	*vpg;
3504 	struct mpii_cfg_hdr		hdr;
3505 	struct mpii_device		*dev;
3506 	size_t				pagelen;
3507 	u_int16_t			volh;
3508 	int				rv, hcnt = 0;
3509 	int				percent;
3510 
3511 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3512 	    DEVNAME(sc), bv->bv_volid);
3513 
3514 	mutex_enter(&sc->sc_devs_mtx);
3515 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3516 		mutex_exit(&sc->sc_devs_mtx);
3517 		return (ENODEV);
3518 	}
3519 	volh = dev->dev_handle;
3520 	percent = dev->percent;
3521 	mutex_exit(&sc->sc_devs_mtx);
3522 
3523 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3524 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3525 		printf("%s: unable to fetch header for raid volume page 0\n",
3526 		    DEVNAME(sc));
3527 		return (EINVAL);
3528 	}
3529 
3530 	pagelen = hdr.page_length * 4;
3531 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3532 	if (vpg == NULL) {
3533 		printf("%s: unable to allocate space for raid "
3534 		    "volume page 0\n", DEVNAME(sc));
3535 		return (ENOMEM);
3536 	}
3537 
3538 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3539 	    &hdr, 1, vpg, pagelen) != 0) {
3540 		printf("%s: unable to fetch raid volume page 0\n",
3541 		    DEVNAME(sc));
3542 		free(vpg, M_TEMP);
3543 		return (EINVAL);
3544 	}
3545 
3546 	switch (vpg->volume_state) {
3547 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3548 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3549 		bv->bv_status = BIOC_SVONLINE;
3550 		break;
3551 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3552 		if (ISSET(le32toh(vpg->volume_status),
3553 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3554 			bv->bv_status = BIOC_SVREBUILD;
3555 			bv->bv_percent = percent;
3556 		} else
3557 			bv->bv_status = BIOC_SVDEGRADED;
3558 		break;
3559 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3560 		bv->bv_status = BIOC_SVOFFLINE;
3561 		break;
3562 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3563 		bv->bv_status = BIOC_SVBUILDING;
3564 		break;
3565 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3566 	default:
3567 		bv->bv_status = BIOC_SVINVALID;
3568 		break;
3569 	}
3570 
3571 	switch (vpg->volume_type) {
3572 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3573 		bv->bv_level = 0;
3574 		break;
3575 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3576 		bv->bv_level = 1;
3577 		break;
3578 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3579 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3580 		bv->bv_level = 10;
3581 		break;
3582 	default:
3583 		bv->bv_level = -1;
3584 	}
3585 
3586 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3587 		free(vpg, M_TEMP);
3588 		return (rv);
3589 	}
3590 
3591 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3592 
3593 	bv->bv_size = le64toh(vpg->max_lba) * le16toh(vpg->block_size);
3594 
3595 	free(vpg, M_TEMP);
3596 	return (0);
3597 }
3598 
3599 int
3600 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3601 {
3602 	struct mpii_cfg_raid_vol_pg0		*vpg;
3603 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3604 	struct mpii_cfg_hdr			hdr;
3605 	struct mpii_device			*dev;
3606 	size_t					pagelen;
3607 	u_int16_t				volh;
3608 	u_int8_t				dn;
3609 
3610 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3611 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3612 
3613 	mutex_enter(&sc->sc_devs_mtx);
3614 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) {
3615 		mutex_exit(&sc->sc_devs_mtx);
3616 		return (ENODEV);
3617 	}
3618 	volh = dev->dev_handle;
3619 	mutex_exit(&sc->sc_devs_mtx);
3620 
3621 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3622 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3623 		printf("%s: unable to fetch header for raid volume page 0\n",
3624 		    DEVNAME(sc));
3625 		return (EINVAL);
3626 	}
3627 
3628 	pagelen = hdr.page_length * 4;
3629 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3630 	if (vpg == NULL) {
3631 		printf("%s: unable to allocate space for raid "
3632 		    "volume page 0\n", DEVNAME(sc));
3633 		return (ENOMEM);
3634 	}
3635 
3636 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3637 	    &hdr, 1, vpg, pagelen) != 0) {
3638 		printf("%s: unable to fetch raid volume page 0\n",
3639 		    DEVNAME(sc));
3640 		free(vpg, M_TEMP);
3641 		return (EINVAL);
3642 	}
3643 
3644 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3645 		int		nvdsk = vpg->num_phys_disks;
3646 		int		hsmap = vpg->hot_spare_pool;
3647 
3648 		free(vpg, M_TEMP);
3649 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3650 	}
3651 
3652 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3653 	    bd->bd_diskid;
3654 	dn = pd->phys_disk_num;
3655 
3656 	free(vpg, M_TEMP);
3657 	return (mpii_bio_disk(sc, bd, dn));
3658 }
3659 
3660 int
3661 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3662      int hsmap, int *hscnt)
3663 {
3664 	struct mpii_cfg_raid_config_pg0	*cpg;
3665 	struct mpii_raid_config_element	*el;
3666 	struct mpii_ecfg_hdr		ehdr;
3667 	size_t				pagelen;
3668 	int				i, nhs = 0;
3669 
3670 	if (bd) {
3671 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3672 		    bd->bd_diskid - nvdsk);
3673 	} else {
3674 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3675 	}
3676 
3677 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3678 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3679 	    &ehdr) != 0) {
3680 		printf("%s: unable to fetch header for raid config page 0\n",
3681 		    DEVNAME(sc));
3682 		return (EINVAL);
3683 	}
3684 
3685 	pagelen = le16toh(ehdr.ext_page_length) * 4;
3686 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3687 	if (cpg == NULL) {
3688 		printf("%s: unable to allocate space for raid config page 0\n",
3689 		    DEVNAME(sc));
3690 		return (ENOMEM);
3691 	}
3692 
3693 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3694 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3695 		printf("%s: unable to fetch raid config page 0\n",
3696 		    DEVNAME(sc));
3697 		free(cpg, M_TEMP);
3698 		return (EINVAL);
3699 	}
3700 
3701 	el = (struct mpii_raid_config_element *)(cpg + 1);
3702 	for (i = 0; i < cpg->num_elements; i++, el++) {
3703 		if (ISSET(le16toh(el->element_flags),
3704 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3705 		    el->hot_spare_pool == hsmap) {
3706 			/*
3707 			 * diskid comparison is based on the idea that all
3708 			 * disks are counted by the bio(4) in sequence, thus
3709 			 * substracting the number of disks in the volume
3710 			 * from the diskid yields us a "relative" hotspare
3711 			 * number, which is good enough for us.
3712 			 */
3713 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3714 				u_int8_t dn = el->phys_disk_num;
3715 
3716 				free(cpg, M_TEMP);
3717 				return (mpii_bio_disk(sc, bd, dn));
3718 			}
3719 			nhs++;
3720 		}
3721 	}
3722 
3723 	if (hscnt)
3724 		*hscnt = nhs;
3725 
3726 	free(cpg, M_TEMP);
3727 	return (0);
3728 }
3729 
3730 int
3731 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3732 {
3733 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3734 	struct mpii_cfg_hdr			hdr;
3735 	struct mpii_device			*dev;
3736 	int					len;
3737 
3738 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3739 	    bd->bd_diskid);
3740 
3741 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_ZERO);
3742 	if (ppg == NULL) {
3743 		printf("%s: unable to allocate space for raid physical disk "
3744 		    "page 0\n", DEVNAME(sc));
3745 		return (ENOMEM);
3746 	}
3747 
3748 	hdr.page_version = 0;
3749 	hdr.page_length = sizeof(*ppg) / 4;
3750 	hdr.page_number = 0;
3751 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3752 
3753 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3754 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3755 		printf("%s: unable to fetch raid drive page 0\n",
3756 		    DEVNAME(sc));
3757 		free(ppg, M_TEMP);
3758 		return (EINVAL);
3759 	}
3760 
3761 	bd->bd_target = ppg->phys_disk_num;
3762 
3763 	mutex_enter(&sc->sc_devs_mtx);
3764 	if ((dev = mpii_find_dev(sc, le16toh(ppg->dev_handle))) == NULL) {
3765 		mutex_exit(&sc->sc_devs_mtx);
3766 		bd->bd_status = BIOC_SDINVALID;
3767 		free(ppg, M_TEMP);
3768 		return (0);
3769 	}
3770 	mutex_exit(&sc->sc_devs_mtx);
3771 
3772 	switch (ppg->phys_disk_state) {
3773 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3774 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3775 		bd->bd_status = BIOC_SDONLINE;
3776 		break;
3777 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3778 		if (ppg->offline_reason ==
3779 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3780 		    ppg->offline_reason ==
3781 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3782 			bd->bd_status = BIOC_SDFAILED;
3783 		else
3784 			bd->bd_status = BIOC_SDOFFLINE;
3785 		break;
3786 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3787 		bd->bd_status = BIOC_SDFAILED;
3788 		break;
3789 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3790 		bd->bd_status = BIOC_SDREBUILD;
3791 		break;
3792 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3793 		bd->bd_status = BIOC_SDHOTSPARE;
3794 		break;
3795 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3796 		bd->bd_status = BIOC_SDUNUSED;
3797 		break;
3798 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3799 	default:
3800 		bd->bd_status = BIOC_SDINVALID;
3801 		break;
3802 	}
3803 
3804 	bd->bd_size = le64toh(ppg->dev_max_lba) * le16toh(ppg->block_size);
3805 
3806 	strnvisx(bd->bd_vendor, sizeof(bd->bd_vendor),
3807 	    ppg->vendor_id, sizeof(ppg->vendor_id),
3808 	    VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3809 	len = strlen(bd->bd_vendor);
3810 	bd->bd_vendor[len] = ' ';
3811 	strnvisx(&bd->bd_vendor[len + 1], sizeof(ppg->vendor_id) - len - 1,
3812 	    ppg->product_id, sizeof(ppg->product_id),
3813 	    VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3814 	strnvisx(bd->bd_serial, sizeof(bd->bd_serial),
3815 	    ppg->serial, sizeof(ppg->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3816 
3817 	free(ppg, M_TEMP);
3818 	return (0);
3819 }
3820 
3821 struct mpii_device *
3822 mpii_find_vol(struct mpii_softc *sc, int volid)
3823 {
3824 	struct mpii_device	*dev = NULL;
3825 
3826 	KASSERT(mutex_owned(&sc->sc_devs_mtx));
3827 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3828 		return (NULL);
3829 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3830 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3831 		return (dev);
3832 	return (NULL);
3833 }
3834 
3835 /*
3836  * Non-sleeping lightweight version of the mpii_ioctl_vol
3837  */
3838 int
3839 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3840 {
3841 	struct mpii_cfg_raid_vol_pg0	*vpg;
3842 	struct mpii_cfg_hdr		hdr;
3843 	struct mpii_device		*dev = NULL;
3844 	size_t				pagelen;
3845 	u_int16_t			volh;
3846 
3847 	mutex_enter(&sc->sc_devs_mtx);
3848 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3849 		mutex_exit(&sc->sc_devs_mtx);
3850 		return (ENODEV);
3851 	}
3852 	volh = dev->dev_handle;
3853 	mutex_exit(&sc->sc_devs_mtx);
3854 
3855 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3856 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3857 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3858 		    "volume page 0\n", DEVNAME(sc));
3859 		return (EINVAL);
3860 	}
3861 
3862 	pagelen = hdr.page_length * 4;
3863 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3864 	if (vpg == NULL) {
3865 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3866 		    "volume page 0\n", DEVNAME(sc));
3867 		return (ENOMEM);
3868 	}
3869 
3870 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3871 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3872 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3873 		    "page 0\n", DEVNAME(sc));
3874 		free(vpg, M_TEMP);
3875 		return (EINVAL);
3876 	}
3877 
3878 	switch (vpg->volume_state) {
3879 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3880 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3881 		bv->bv_status = BIOC_SVONLINE;
3882 		break;
3883 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3884 		if (ISSET(le32toh(vpg->volume_status),
3885 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3886 			bv->bv_status = BIOC_SVREBUILD;
3887 		else
3888 			bv->bv_status = BIOC_SVDEGRADED;
3889 		break;
3890 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3891 		bv->bv_status = BIOC_SVOFFLINE;
3892 		break;
3893 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3894 		bv->bv_status = BIOC_SVBUILDING;
3895 		break;
3896 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3897 	default:
3898 		bv->bv_status = BIOC_SVINVALID;
3899 		break;
3900 	}
3901 
3902 	free(vpg, M_TEMP);
3903 	return (0);
3904 }
3905 
3906 int
3907 mpii_create_sensors(struct mpii_softc *sc)
3908 {
3909 	int			i, rv;
3910 
3911 	DNPRINTF(MPII_D_MISC, "%s: mpii_create_sensors(%d)\n",
3912 	    DEVNAME(sc), sc->sc_max_volumes);
3913 	sc->sc_sme = sysmon_envsys_create();
3914 	sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_max_volumes,
3915 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3916 	if (sc->sc_sensors == NULL) {
3917 		aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3918 		return (1);
3919 	}
3920 
3921 	for (i = 0; i < sc->sc_max_volumes; i++) {
3922 		sc->sc_sensors[i].units = ENVSYS_DRIVE;
3923 		sc->sc_sensors[i].state = ENVSYS_SINVALID;
3924 		sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
3925 		sc->sc_sensors[i].flags |= ENVSYS_FMONSTCHANGED;
3926 
3927 		/* logical drives */
3928 		snprintf(sc->sc_sensors[i].desc,
3929 		    sizeof(sc->sc_sensors[i].desc), "%s:%d",
3930 		    DEVNAME(sc), i);
3931 												if ((rv = sysmon_envsys_sensor_attach(sc->sc_sme,
3932 		    &sc->sc_sensors[i])) != 0) {
3933 			aprint_error_dev(sc->sc_dev,
3934 			    "unable to attach sensor (rv = %d)\n", rv);
3935 			goto out;
3936 												}
3937 	}
3938 	sc->sc_sme->sme_name =  DEVNAME(sc);
3939 	sc->sc_sme->sme_cookie = sc;
3940 	sc->sc_sme->sme_refresh = mpii_refresh_sensors;
3941 
3942 	rv = sysmon_envsys_register(sc->sc_sme);
3943 	if (rv != 0) {
3944 		aprint_error_dev(sc->sc_dev,
3945 		    "unable to register with sysmon (rv = %d)\n", rv);
3946 		goto out;
3947 	}
3948 	return 0;
3949 
3950 out:
3951 	free(sc->sc_sensors, M_DEVBUF);
3952 	sysmon_envsys_destroy(sc->sc_sme);
3953 	sc->sc_sme = NULL;
3954 	return 1;
3955 }
3956 
3957 int
3958 mpii_destroy_sensors(struct mpii_softc *sc)
3959 {
3960 	if (sc->sc_sme == NULL)
3961 		return 0;
3962 	sysmon_envsys_unregister(sc->sc_sme);
3963 	sc->sc_sme = NULL;
3964 	free(sc->sc_sensors, M_DEVBUF);
3965 	return 0;
3966 
3967 }
3968 
3969 void
3970 mpii_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
3971 {
3972 	struct mpii_softc	*sc = sme->sme_cookie;
3973 	struct bioc_vol		bv;
3974 
3975 	memset(&bv, 0, sizeof(bv));
3976 	bv.bv_volid = edata->sensor;
3977 	if (mpii_bio_volstate(sc, &bv))
3978 		bv.bv_status = BIOC_SVINVALID;
3979 	bio_vol_to_envsys(edata, &bv);
3980 }
3981 #endif /* NBIO > 0 */
3982