xref: /openbsd-src/sys/dev/pci/mpii.c (revision 68dd5bb1859285b71cb62a10bf107b8ad54064d9)
1 /*	$OpenBSD: mpii.c,v 1.147 2023/11/29 06:54:09 jmatthew Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	int			sc_flags;
163 #define MPII_F_RAID		(1<<1)
164 #define MPII_F_SAS3		(1<<2)
165 #define MPII_F_AERO		(1<<3)
166 
167 	struct scsibus_softc	*sc_scsibus;
168 	unsigned int		sc_pending;
169 
170 	struct mpii_device	**sc_devs;
171 
172 	bus_space_tag_t		sc_iot;
173 	bus_space_handle_t	sc_ioh;
174 	bus_size_t		sc_ios;
175 	bus_dma_tag_t		sc_dmat;
176 
177 	struct mutex		sc_req_mtx;
178 	struct mutex		sc_rep_mtx;
179 
180 	ushort			sc_reply_size;
181 	ushort			sc_request_size;
182 
183 	ushort			sc_max_cmds;
184 	ushort			sc_num_reply_frames;
185 	u_int			sc_reply_free_qdepth;
186 	u_int			sc_reply_post_qdepth;
187 
188 	ushort			sc_chain_sge;
189 	ushort			sc_max_sgl;
190 	int			sc_max_chain;
191 
192 	u_int8_t		sc_ioc_event_replay;
193 
194 	u_int8_t		sc_porttype;
195 	u_int8_t		sc_max_volumes;
196 	u_int16_t		sc_max_devices;
197 	u_int16_t		sc_vd_count;
198 	u_int16_t		sc_vd_id_low;
199 	u_int16_t		sc_pd_id_start;
200 	int			sc_ioc_number;
201 	u_int8_t		sc_vf_id;
202 
203 	struct mpii_ccb		*sc_ccbs;
204 	struct mpii_ccb_list	sc_ccb_free;
205 	struct mutex		sc_ccb_free_mtx;
206 
207 	struct mutex		sc_ccb_mtx;
208 				/*
209 				 * this protects the ccb state and list entry
210 				 * between mpii_scsi_cmd and scsidone.
211 				 */
212 
213 	struct mpii_ccb_list	sc_ccb_tmos;
214 	struct scsi_iohandler	sc_ccb_tmo_handler;
215 
216 	struct scsi_iopool	sc_iopool;
217 
218 	struct mpii_dmamem	*sc_requests;
219 
220 	struct mpii_dmamem	*sc_replies;
221 	struct mpii_rcb		*sc_rcbs;
222 
223 	struct mpii_dmamem	*sc_reply_postq;
224 	struct mpii_reply_descr	*sc_reply_postq_kva;
225 	u_int			sc_reply_post_host_index;
226 
227 	struct mpii_dmamem	*sc_reply_freeq;
228 	u_int			sc_reply_free_host_index;
229 
230 	struct mpii_rcb_list	sc_evt_sas_queue;
231 	struct mutex		sc_evt_sas_mtx;
232 	struct task		sc_evt_sas_task;
233 
234 	struct mpii_rcb_list	sc_evt_ack_queue;
235 	struct mutex		sc_evt_ack_mtx;
236 	struct scsi_iohandler	sc_evt_ack_handler;
237 
238 	/* scsi ioctl from sd device */
239 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
240 
241 	int			sc_nsensors;
242 	struct ksensor		*sc_sensors;
243 	struct ksensordev	sc_sensordev;
244 };
245 
246 int	mpii_match(struct device *, void *, void *);
247 void	mpii_attach(struct device *, struct device *, void *);
248 int	mpii_detach(struct device *, int);
249 
250 int	mpii_intr(void *);
251 
252 const struct cfattach mpii_ca = {
253 	sizeof(struct mpii_softc),
254 	mpii_match,
255 	mpii_attach,
256 	mpii_detach
257 };
258 
259 struct cfdriver mpii_cd = {
260 	NULL,
261 	"mpii",
262 	DV_DULL
263 };
264 
265 void		mpii_scsi_cmd(struct scsi_xfer *);
266 void		mpii_scsi_cmd_done(struct mpii_ccb *);
267 int		mpii_scsi_probe(struct scsi_link *);
268 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
269 
270 const struct scsi_adapter mpii_switch = {
271 	mpii_scsi_cmd, NULL, mpii_scsi_probe, NULL, mpii_scsi_ioctl
272 };
273 
274 struct mpii_dmamem *
275 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
276 void		mpii_dmamem_free(struct mpii_softc *,
277 		    struct mpii_dmamem *);
278 int		mpii_alloc_ccbs(struct mpii_softc *);
279 void *		mpii_get_ccb(void *);
280 void		mpii_put_ccb(void *, void *);
281 int		mpii_alloc_replies(struct mpii_softc *);
282 int		mpii_alloc_queues(struct mpii_softc *);
283 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
284 void		mpii_push_replies(struct mpii_softc *);
285 
286 void		mpii_scsi_cmd_tmo(void *);
287 void		mpii_scsi_cmd_tmo_handler(void *, void *);
288 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
289 
290 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
291 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
292 struct mpii_device *
293 		mpii_find_dev(struct mpii_softc *, u_int16_t);
294 
295 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
296 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
297 void		mpii_poll_done(struct mpii_ccb *);
298 struct mpii_rcb *
299 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
300 
301 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
302 void		mpii_wait_done(struct mpii_ccb *);
303 
304 void		mpii_init_queues(struct mpii_softc *);
305 
306 int		mpii_load_xs(struct mpii_ccb *);
307 int		mpii_load_xs_sas3(struct mpii_ccb *);
308 
309 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
310 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
311 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
312 		    u_int32_t);
313 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
314 		    u_int32_t);
315 
316 int		mpii_init(struct mpii_softc *);
317 int		mpii_reset_soft(struct mpii_softc *);
318 int		mpii_reset_hard(struct mpii_softc *);
319 
320 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
321 int		mpii_handshake_recv_dword(struct mpii_softc *,
322 		    u_int32_t *);
323 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
324 
325 void		mpii_empty_done(struct mpii_ccb *);
326 
327 int		mpii_iocinit(struct mpii_softc *);
328 int		mpii_iocfacts(struct mpii_softc *);
329 int		mpii_portfacts(struct mpii_softc *);
330 int		mpii_portenable(struct mpii_softc *);
331 int		mpii_cfg_coalescing(struct mpii_softc *);
332 int		mpii_board_info(struct mpii_softc *);
333 int		mpii_target_map(struct mpii_softc *);
334 
335 int		mpii_eventnotify(struct mpii_softc *);
336 void		mpii_eventnotify_done(struct mpii_ccb *);
337 void		mpii_eventack(void *, void *);
338 void		mpii_eventack_done(struct mpii_ccb *);
339 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
340 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
341 void		mpii_event_sas(void *);
342 void		mpii_event_raid(struct mpii_softc *,
343 		    struct mpii_msg_event_reply *);
344 void		mpii_event_discovery(struct mpii_softc *,
345 		    struct mpii_msg_event_reply *);
346 
347 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
348 
349 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
350 		    u_int8_t, u_int32_t, int, void *);
351 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
352 		    void *, int, void *, size_t);
353 
354 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
355 
356 #if NBIO > 0
357 int		mpii_ioctl(struct device *, u_long, caddr_t);
358 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
359 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
360 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
361 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
362 		    int, int *);
363 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
364 		    u_int8_t);
365 struct mpii_device *
366 		mpii_find_vol(struct mpii_softc *, int);
367 #ifndef SMALL_KERNEL
368  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
369 int		mpii_create_sensors(struct mpii_softc *);
370 void		mpii_refresh_sensors(void *);
371 #endif /* SMALL_KERNEL */
372 #endif /* NBIO > 0 */
373 
374 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
375 
376 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
377 
378 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
379 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
380 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
381 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
382 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
383 				    == MPII_INTR_STATUS_REPLY)
384 
385 #define mpii_write_reply_free(s, v) \
386     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
387     MPII_REPLY_FREE_HOST_INDEX, (v))
388 #define mpii_write_reply_post(s, v) \
389     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
390     MPII_REPLY_POST_HOST_INDEX, (v))
391 
392 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
393 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
394 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
395 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
396 
397 static inline void
398 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
399 {
400 	htolem32(&sge->sg_addr_lo, dva);
401 	htolem32(&sge->sg_addr_hi, dva >> 32);
402 }
403 
404 #define MPII_PG_EXTENDED	(1<<0)
405 #define MPII_PG_POLL		(1<<1)
406 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
407 
408 static const struct pci_matchid mpii_devices[] = {
409 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SSS6200 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3408 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3416 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508 },
435 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508_1 },
436 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516 },
437 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516_1 },
438 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS38XX },
439 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS38XX_1 },
440 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS39XX },
441 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS39XX_1 },
442 };
443 
444 int
445 mpii_match(struct device *parent, void *match, void *aux)
446 {
447 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
448 }
449 
450 void
451 mpii_attach(struct device *parent, struct device *self, void *aux)
452 {
453 	struct mpii_softc		*sc = (struct mpii_softc *)self;
454 	struct pci_attach_args		*pa = aux;
455 	pcireg_t			memtype;
456 	int				r;
457 	pci_intr_handle_t		ih;
458 	struct scsibus_attach_args	saa;
459 	struct mpii_ccb			*ccb;
460 
461 	sc->sc_pc = pa->pa_pc;
462 	sc->sc_tag = pa->pa_tag;
463 	sc->sc_dmat = pa->pa_dmat;
464 
465 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
466 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
467 
468 	/* find the appropriate memory base */
469 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
470 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
471 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
472 			break;
473 	}
474 	if (r >= PCI_MAPREG_END) {
475 		printf(": unable to locate system interface registers\n");
476 		return;
477 	}
478 
479 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
480 	    NULL, &sc->sc_ios, 0xFF) != 0) {
481 		printf(": unable to map system interface registers\n");
482 		return;
483 	}
484 
485 	/* disable the expansion rom */
486 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
487 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
488 	    ~PCI_ROM_ENABLE);
489 
490 	/* disable interrupts */
491 	mpii_write(sc, MPII_INTR_MASK,
492 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
493 	    MPII_INTR_MASK_DOORBELL);
494 
495 	/* hook up the interrupt */
496 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
497 		printf(": unable to map interrupt\n");
498 		goto unmap;
499 	}
500 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
501 
502 	switch (PCI_PRODUCT(pa->pa_id)) {
503 	case PCI_PRODUCT_SYMBIOS_SAS38XX:
504 	case PCI_PRODUCT_SYMBIOS_SAS38XX_1:
505 	case PCI_PRODUCT_SYMBIOS_SAS39XX:
506 	case PCI_PRODUCT_SYMBIOS_SAS39XX_1:
507 		SET(sc->sc_flags, MPII_F_AERO);
508 		break;
509 	}
510 
511 	if (mpii_iocfacts(sc) != 0) {
512 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
513 		goto unmap;
514 	}
515 
516 	if (mpii_init(sc) != 0) {
517 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
518 		goto unmap;
519 	}
520 
521 	if (mpii_alloc_ccbs(sc) != 0) {
522 		/* error already printed */
523 		goto unmap;
524 	}
525 
526 	if (mpii_alloc_replies(sc) != 0) {
527 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
528 		goto free_ccbs;
529 	}
530 
531 	if (mpii_alloc_queues(sc) != 0) {
532 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
533 		goto free_replies;
534 	}
535 
536 	if (mpii_iocinit(sc) != 0) {
537 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
538 		goto free_queues;
539 	}
540 
541 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
542 	    MPII_DOORBELL_STATE_OPER) != 0) {
543 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
544 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
545 		printf("%s: operational state timeout\n", DEVNAME(sc));
546 		goto free_queues;
547 	}
548 
549 	mpii_push_replies(sc);
550 	mpii_init_queues(sc);
551 
552 	if (mpii_board_info(sc) != 0) {
553 		printf("%s: unable to get manufacturing page 0\n",
554 		    DEVNAME(sc));
555 		goto free_queues;
556 	}
557 
558 	if (mpii_portfacts(sc) != 0) {
559 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
560 		goto free_queues;
561 	}
562 
563 	if (mpii_target_map(sc) != 0) {
564 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
565 		goto free_queues;
566 	}
567 
568 	if (mpii_cfg_coalescing(sc) != 0) {
569 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
570 		goto free_queues;
571 	}
572 
573 	/* XXX bail on unsupported porttype? */
574 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
575 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
576 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
577 		if (mpii_eventnotify(sc) != 0) {
578 			printf("%s: unable to enable events\n", DEVNAME(sc));
579 			goto free_queues;
580 		}
581 	}
582 
583 	sc->sc_devs = mallocarray(sc->sc_max_devices,
584 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
585 	if (sc->sc_devs == NULL) {
586 		printf("%s: unable to allocate memory for mpii_device\n",
587 		    DEVNAME(sc));
588 		goto free_queues;
589 	}
590 
591 	if (mpii_portenable(sc) != 0) {
592 		printf("%s: unable to enable port\n", DEVNAME(sc));
593 		goto free_devs;
594 	}
595 
596 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
597 	    mpii_intr, sc, sc->sc_dev.dv_xname);
598 	if (sc->sc_ih == NULL)
599 		goto free_devs;
600 
601 	/* force autoconf to wait for the first sas discovery to complete */
602 	sc->sc_pending = 1;
603 	config_pending_incr();
604 
605 	saa.saa_adapter = &mpii_switch;
606 	saa.saa_adapter_softc = sc;
607 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
608 	saa.saa_adapter_buswidth = sc->sc_max_devices;
609 	saa.saa_luns = 1;
610 	saa.saa_openings = sc->sc_max_cmds - 1;
611 	saa.saa_pool = &sc->sc_iopool;
612 	saa.saa_quirks = saa.saa_flags = 0;
613 	saa.saa_wwpn = saa.saa_wwnn = 0;
614 
615 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
616 	    &saa, scsiprint);
617 
618 	/* enable interrupts */
619 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
620 	    | MPII_INTR_MASK_RESET);
621 
622 #if NBIO > 0
623 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
624 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
625 			panic("%s: controller registration failed",
626 			    DEVNAME(sc));
627 		else
628 			sc->sc_ioctl = mpii_ioctl;
629 
630 #ifndef SMALL_KERNEL
631 		if (mpii_create_sensors(sc) != 0)
632 			printf("%s: unable to create sensors\n", DEVNAME(sc));
633 #endif
634 	}
635 #endif
636 
637 	return;
638 
639 free_devs:
640 	free(sc->sc_devs, M_DEVBUF, 0);
641 	sc->sc_devs = NULL;
642 
643 free_queues:
644 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
645 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
646 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
647 
648 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
649 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
650 	mpii_dmamem_free(sc, sc->sc_reply_postq);
651 
652 free_replies:
653 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
654 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
655 	mpii_dmamem_free(sc, sc->sc_replies);
656 
657 free_ccbs:
658 	while ((ccb = mpii_get_ccb(sc)) != NULL)
659 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
660 	mpii_dmamem_free(sc, sc->sc_requests);
661 	free(sc->sc_ccbs, M_DEVBUF, 0);
662 
663 unmap:
664 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
665 	sc->sc_ios = 0;
666 }
667 
668 int
669 mpii_detach(struct device *self, int flags)
670 {
671 	struct mpii_softc		*sc = (struct mpii_softc *)self;
672 
673 	if (sc->sc_ih != NULL) {
674 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
675 		sc->sc_ih = NULL;
676 	}
677 	if (sc->sc_ios != 0) {
678 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
679 		sc->sc_ios = 0;
680 	}
681 
682 	return (0);
683 }
684 
685 int
686 mpii_intr(void *arg)
687 {
688 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
689 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
690 	struct mpii_softc		*sc = arg;
691 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
692 	struct mpii_ccb			*ccb;
693 	struct mpii_rcb			*rcb;
694 	int				smid;
695 	u_int				idx;
696 	int				rv = 0;
697 
698 	mtx_enter(&sc->sc_rep_mtx);
699 	bus_dmamap_sync(sc->sc_dmat,
700 	    MPII_DMA_MAP(sc->sc_reply_postq),
701 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
702 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
703 
704 	idx = sc->sc_reply_post_host_index;
705 	for (;;) {
706 		rdp = &postq[idx];
707 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
708 		    MPII_REPLY_DESCR_UNUSED)
709 			break;
710 		if (rdp->data == 0xffffffff) {
711 			/*
712 			 * ioc is still writing to the reply post queue
713 			 * race condition - bail!
714 			 */
715 			break;
716 		}
717 
718 		smid = lemtoh16(&rdp->smid);
719 		rcb = mpii_reply(sc, rdp);
720 
721 		if (smid) {
722 			ccb = &sc->sc_ccbs[smid - 1];
723 			ccb->ccb_state = MPII_CCB_READY;
724 			ccb->ccb_rcb = rcb;
725 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
726 		} else
727 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
728 
729 		if (++idx >= sc->sc_reply_post_qdepth)
730 			idx = 0;
731 
732 		rv = 1;
733 	}
734 
735 	bus_dmamap_sync(sc->sc_dmat,
736 	    MPII_DMA_MAP(sc->sc_reply_postq),
737 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
738 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
739 
740 	if (rv)
741 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
742 
743 	mtx_leave(&sc->sc_rep_mtx);
744 
745 	if (rv == 0)
746 		return (0);
747 
748 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
749 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
750 		ccb->ccb_done(ccb);
751 	}
752 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
753 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
754 		mpii_event_process(sc, rcb);
755 	}
756 
757 	return (1);
758 }
759 
760 int
761 mpii_load_xs_sas3(struct mpii_ccb *ccb)
762 {
763 	struct mpii_softc	*sc = ccb->ccb_sc;
764 	struct scsi_xfer	*xs = ccb->ccb_cookie;
765 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
766 	struct mpii_ieee_sge	*csge, *nsge, *sge;
767 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
768 	int			i, error;
769 
770 	/* Request frame structure is described in the mpii_iocfacts */
771 	nsge = (struct mpii_ieee_sge *)(io + 1);
772 
773 	/* zero length transfer still requires an SGE */
774 	if (xs->datalen == 0) {
775 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
776 		return (0);
777 	}
778 
779 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
780 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
781 	if (error) {
782 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
783 		return (1);
784 	}
785 
786 	csge = NULL;
787 	if (dmap->dm_nsegs > sc->sc_chain_sge) {
788 		csge = nsge + sc->sc_chain_sge;
789 
790 		/* offset to the chain sge from the beginning */
791 		io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge);
792 	}
793 
794 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
795 		if (nsge == csge) {
796 			nsge++;
797 
798 			/* address of the next sge */
799 			htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +
800 			    ((caddr_t)nsge - (caddr_t)io));
801 			htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *
802 			    sizeof(*sge));
803 			csge->sg_next_chain_offset = 0;
804 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
805 			    MPII_IEEE_SGE_ADDR_SYSTEM;
806 
807 			if ((dmap->dm_nsegs - i) > sc->sc_max_chain) {
808 				csge->sg_next_chain_offset = sc->sc_max_chain;
809 				csge += sc->sc_max_chain;
810 			}
811 		}
812 
813 		sge = nsge;
814 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
815 		sge->sg_next_chain_offset = 0;
816 		htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len);
817 		htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr);
818 	}
819 
820 	/* terminate list */
821 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
822 
823 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
824 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
825 	    BUS_DMASYNC_PREWRITE);
826 
827 	return (0);
828 }
829 
830 int
831 mpii_load_xs(struct mpii_ccb *ccb)
832 {
833 	struct mpii_softc	*sc = ccb->ccb_sc;
834 	struct scsi_xfer	*xs = ccb->ccb_cookie;
835 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
836 	struct mpii_sge		*csge, *nsge, *sge;
837 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
838 	u_int32_t		flags;
839 	u_int16_t		len;
840 	int			i, error;
841 
842 	/* Request frame structure is described in the mpii_iocfacts */
843 	nsge = (struct mpii_sge *)(io + 1);
844 	csge = nsge + sc->sc_chain_sge;
845 
846 	/* zero length transfer still requires an SGE */
847 	if (xs->datalen == 0) {
848 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
849 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
850 		return (0);
851 	}
852 
853 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
854 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
855 	if (error) {
856 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
857 		return (1);
858 	}
859 
860 	/* safe default starting flags */
861 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
862 	if (xs->flags & SCSI_DATA_OUT)
863 		flags |= MPII_SGE_FL_DIR_OUT;
864 
865 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
866 		if (nsge == csge) {
867 			nsge++;
868 			/* offset to the chain sge from the beginning */
869 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
870 			/* length of the sgl segment we're pointing to */
871 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
872 			htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |
873 			    MPII_SGE_FL_SIZE_64 | len);
874 			/* address of the next sge */
875 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
876 			    ((caddr_t)nsge - (caddr_t)io));
877 		}
878 
879 		sge = nsge;
880 		htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len);
881 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
882 	}
883 
884 	/* terminate list */
885 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
886 	    MPII_SGE_FL_EOL);
887 
888 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
889 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
890 	    BUS_DMASYNC_PREWRITE);
891 
892 	return (0);
893 }
894 
895 int
896 mpii_scsi_probe(struct scsi_link *link)
897 {
898 	struct mpii_softc *sc = link->bus->sb_adapter_softc;
899 	struct mpii_cfg_sas_dev_pg0 pg0;
900 	struct mpii_ecfg_hdr ehdr;
901 	struct mpii_device *dev;
902 	uint32_t address;
903 	int flags;
904 
905 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
906 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) &&
907 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE))
908 		return (ENXIO);
909 
910 	dev = sc->sc_devs[link->target];
911 	if (dev == NULL)
912 		return (1);
913 
914 	flags = dev->flags;
915 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
916 		return (1);
917 
918 	if (ISSET(flags, MPII_DF_VOLUME)) {
919 		struct mpii_cfg_hdr hdr;
920 		struct mpii_cfg_raid_vol_pg1 vpg;
921 		size_t pagelen;
922 
923 		address = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
924 
925 		if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
926 		    1, address, MPII_PG_POLL, &hdr) != 0)
927 			return (EINVAL);
928 
929 		memset(&vpg, 0, sizeof(vpg));
930 		/* avoid stack trash on future page growth */
931 		pagelen = min(sizeof(vpg), hdr.page_length * 4);
932 
933 		if (mpii_req_cfg_page(sc, address, MPII_PG_POLL, &hdr, 1,
934 		    &vpg, pagelen) != 0)
935 			return (EINVAL);
936 
937 		link->port_wwn = letoh64(vpg.wwid);
938 		/*
939 		 * WWIDs generated by LSI firmware are not IEEE NAA compliant
940 		 * and historical practise in OBP on sparc64 is to set the top
941 		 * nibble to 3 to indicate that this is a RAID volume.
942 		 */
943 		link->port_wwn &= 0x0fffffffffffffff;
944 		link->port_wwn |= 0x3000000000000000;
945 
946 		return (0);
947 	}
948 
949 	memset(&ehdr, 0, sizeof(ehdr));
950 	ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
951 	ehdr.page_number = 0;
952 	ehdr.page_version = 0;
953 	ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
954 	ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
955 
956 	address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
957 	if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
958 	    &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
959 		printf("%s: unable to fetch SAS device page 0 for target %u\n",
960 		    DEVNAME(sc), link->target);
961 
962 		return (0); /* the handle should still work */
963 	}
964 
965 	link->port_wwn = letoh64(pg0.sas_addr);
966 	link->node_wwn = letoh64(pg0.device_name);
967 
968 	if (ISSET(lemtoh32(&pg0.device_info),
969 	    MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
970 		link->flags |= SDEV_ATAPI;
971 	}
972 
973 	return (0);
974 }
975 
976 u_int32_t
977 mpii_read(struct mpii_softc *sc, bus_size_t r)
978 {
979 	u_int32_t			rv;
980 	int				i;
981 
982 	if (ISSET(sc->sc_flags, MPII_F_AERO)) {
983 		i = 0;
984 		do {
985 			if (i > 0)
986 				DNPRINTF(MPII_D_RW, "%s: mpii_read retry %d\n",
987 				    DEVNAME(sc), i);
988 			bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
989 			    BUS_SPACE_BARRIER_READ);
990 			rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
991 			i++;
992 		} while (rv == 0 && i < 3);
993 	} else {
994 		bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
995 		    BUS_SPACE_BARRIER_READ);
996 		rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
997 	}
998 
999 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
1000 
1001 	return (rv);
1002 }
1003 
1004 void
1005 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
1006 {
1007 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
1008 
1009 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1010 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1011 	    BUS_SPACE_BARRIER_WRITE);
1012 }
1013 
1014 
1015 int
1016 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1017     u_int32_t target)
1018 {
1019 	int			i;
1020 
1021 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
1022 	    mask, target);
1023 
1024 	for (i = 0; i < 15000; i++) {
1025 		if ((mpii_read(sc, r) & mask) == target)
1026 			return (0);
1027 		delay(1000);
1028 	}
1029 
1030 	return (1);
1031 }
1032 
1033 int
1034 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1035     u_int32_t target)
1036 {
1037 	int			i;
1038 
1039 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1040 	    mask, target);
1041 
1042 	for (i = 0; i < 15000; i++) {
1043 		if ((mpii_read(sc, r) & mask) != target)
1044 			return (0);
1045 		delay(1000);
1046 	}
1047 
1048 	return (1);
1049 }
1050 
1051 int
1052 mpii_init(struct mpii_softc *sc)
1053 {
1054 	u_int32_t		db;
1055 	int			i;
1056 
1057 	/* spin until the ioc leaves the reset state */
1058 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1059 	    MPII_DOORBELL_STATE_RESET) != 0) {
1060 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1061 		    "reset state\n", DEVNAME(sc));
1062 		return (1);
1063 	}
1064 
1065 	/* check current ownership */
1066 	db = mpii_read_db(sc);
1067 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1068 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1069 		    DEVNAME(sc));
1070 		return (0);
1071 	}
1072 
1073 	for (i = 0; i < 5; i++) {
1074 		switch (db & MPII_DOORBELL_STATE) {
1075 		case MPII_DOORBELL_STATE_READY:
1076 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1077 			    DEVNAME(sc));
1078 			return (0);
1079 
1080 		case MPII_DOORBELL_STATE_OPER:
1081 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1082 			    DEVNAME(sc));
1083 			if (sc->sc_ioc_event_replay)
1084 				mpii_reset_soft(sc);
1085 			else
1086 				mpii_reset_hard(sc);
1087 			break;
1088 
1089 		case MPII_DOORBELL_STATE_FAULT:
1090 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1091 			    "reset hard\n" , DEVNAME(sc));
1092 			mpii_reset_hard(sc);
1093 			break;
1094 
1095 		case MPII_DOORBELL_STATE_RESET:
1096 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1097 			    "out of reset\n", DEVNAME(sc));
1098 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1099 			    MPII_DOORBELL_STATE_RESET) != 0)
1100 				return (1);
1101 			break;
1102 		}
1103 		db = mpii_read_db(sc);
1104 	}
1105 
1106 	return (1);
1107 }
1108 
1109 int
1110 mpii_reset_soft(struct mpii_softc *sc)
1111 {
1112 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1113 
1114 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1115 		return (1);
1116 	}
1117 
1118 	mpii_write_db(sc,
1119 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1120 
1121 	/* XXX LSI waits 15 sec */
1122 	if (mpii_wait_db_ack(sc) != 0)
1123 		return (1);
1124 
1125 	/* XXX LSI waits 15 sec */
1126 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1127 	    MPII_DOORBELL_STATE_READY) != 0)
1128 		return (1);
1129 
1130 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1131 
1132 	return (0);
1133 }
1134 
1135 int
1136 mpii_reset_hard(struct mpii_softc *sc)
1137 {
1138 	u_int16_t		i;
1139 
1140 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1141 
1142 	mpii_write_intr(sc, 0);
1143 
1144 	/* enable diagnostic register */
1145 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1146 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1147 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1148 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1149 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1150 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1151 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1152 
1153 	delay(100);
1154 
1155 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1156 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1157 		    "diagnostic read/write\n", DEVNAME(sc));
1158 		return(1);
1159 	}
1160 
1161 	/* reset ioc */
1162 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1163 
1164 	/* 240 milliseconds */
1165 	delay(240000);
1166 
1167 
1168 	/* XXX this whole function should be more robust */
1169 
1170 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1171 	for (i = 0; i < 30000; i++) {
1172 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1173 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1174 			break;
1175 		delay(10000);
1176 	}
1177 
1178 	/* disable diagnostic register */
1179 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1180 
1181 	/* XXX what else? */
1182 
1183 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1184 
1185 	return(0);
1186 }
1187 
1188 int
1189 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1190 {
1191 	u_int32_t		*query = buf;
1192 	int			i;
1193 
1194 	/* make sure the doorbell is not in use. */
1195 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1196 		return (1);
1197 
1198 	/* clear pending doorbell interrupts */
1199 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1200 		mpii_write_intr(sc, 0);
1201 
1202 	/*
1203 	 * first write the doorbell with the handshake function and the
1204 	 * dword count.
1205 	 */
1206 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1207 	    MPII_DOORBELL_DWORDS(dwords));
1208 
1209 	/*
1210 	 * the doorbell used bit will be set because a doorbell function has
1211 	 * started. wait for the interrupt and then ack it.
1212 	 */
1213 	if (mpii_wait_db_int(sc) != 0)
1214 		return (1);
1215 	mpii_write_intr(sc, 0);
1216 
1217 	/* poll for the acknowledgement. */
1218 	if (mpii_wait_db_ack(sc) != 0)
1219 		return (1);
1220 
1221 	/* write the query through the doorbell. */
1222 	for (i = 0; i < dwords; i++) {
1223 		mpii_write_db(sc, htole32(query[i]));
1224 		if (mpii_wait_db_ack(sc) != 0)
1225 			return (1);
1226 	}
1227 
1228 	return (0);
1229 }
1230 
1231 int
1232 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1233 {
1234 	u_int16_t		*words = (u_int16_t *)dword;
1235 	int			i;
1236 
1237 	for (i = 0; i < 2; i++) {
1238 		if (mpii_wait_db_int(sc) != 0)
1239 			return (1);
1240 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1241 		mpii_write_intr(sc, 0);
1242 	}
1243 
1244 	return (0);
1245 }
1246 
1247 int
1248 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1249 {
1250 	struct mpii_msg_reply	*reply = buf;
1251 	u_int32_t		*dbuf = buf, dummy;
1252 	int			i;
1253 
1254 	/* get the first dword so we can read the length out of the header. */
1255 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1256 		return (1);
1257 
1258 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1259 	    DEVNAME(sc), dwords, reply->msg_length);
1260 
1261 	/*
1262 	 * the total length, in dwords, is in the message length field of the
1263 	 * reply header.
1264 	 */
1265 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1266 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1267 			return (1);
1268 	}
1269 
1270 	/* if there's extra stuff to come off the ioc, discard it */
1271 	while (i++ < reply->msg_length) {
1272 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1273 			return (1);
1274 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1275 		    "0x%08x\n", DEVNAME(sc), dummy);
1276 	}
1277 
1278 	/* wait for the doorbell used bit to be reset and clear the intr */
1279 	if (mpii_wait_db_int(sc) != 0)
1280 		return (1);
1281 
1282 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1283 		return (1);
1284 
1285 	mpii_write_intr(sc, 0);
1286 
1287 	return (0);
1288 }
1289 
1290 void
1291 mpii_empty_done(struct mpii_ccb *ccb)
1292 {
1293 	/* nothing to do */
1294 }
1295 
1296 int
1297 mpii_iocfacts(struct mpii_softc *sc)
1298 {
1299 	struct mpii_msg_iocfacts_request	ifq;
1300 	struct mpii_msg_iocfacts_reply		ifp;
1301 	int					irs;
1302 	int					sge_size;
1303 	u_int					qdepth;
1304 
1305 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1306 
1307 	memset(&ifq, 0, sizeof(ifq));
1308 	memset(&ifp, 0, sizeof(ifp));
1309 
1310 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1311 
1312 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1313 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1314 		    DEVNAME(sc));
1315 		return (1);
1316 	}
1317 
1318 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1319 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1320 		    DEVNAME(sc));
1321 		return (1);
1322 	}
1323 
1324 	sc->sc_ioc_number = ifp.ioc_number;
1325 	sc->sc_vf_id = ifp.vf_id;
1326 
1327 	sc->sc_max_volumes = ifp.max_volumes;
1328 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1329 
1330 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1331 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1332 		SET(sc->sc_flags, MPII_F_RAID);
1333 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1334 	    MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1335 		sc->sc_ioc_event_replay = 1;
1336 
1337 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1338 	    MPII_REQUEST_CREDIT);
1339 
1340 	/* SAS3 and 3.5 controllers have different sgl layouts */
1341 	if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1342 	    || (ifp.msg_version_min == 6)))
1343 		SET(sc->sc_flags, MPII_F_SAS3);
1344 
1345 	/*
1346 	 * The host driver must ensure that there is at least one
1347 	 * unused entry in the Reply Free Queue. One way to ensure
1348 	 * that this requirement is met is to never allocate a number
1349 	 * of reply frames that is a multiple of 16.
1350 	 */
1351 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1352 	if (!(sc->sc_num_reply_frames % 16))
1353 		sc->sc_num_reply_frames--;
1354 
1355 	/* must be multiple of 16 */
1356 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1357 	    sc->sc_num_reply_frames;
1358 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1359 
1360 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1361 	if (sc->sc_reply_post_qdepth > qdepth) {
1362 		sc->sc_reply_post_qdepth = qdepth;
1363 		if (sc->sc_reply_post_qdepth < 16) {
1364 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1365 			return (1);
1366 		}
1367 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1368 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1369 	}
1370 
1371 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1372 	    16 - (sc->sc_num_reply_frames % 16);
1373 
1374 	/*
1375 	 * Our request frame for an I/O operation looks like this:
1376 	 *
1377 	 * +-------------------+ -.
1378 	 * | mpii_msg_scsi_io  |  |
1379 	 * +-------------------|  |
1380 	 * | mpii_sge          |  |
1381 	 * + - - - - - - - - - +  |
1382 	 * | ...               |  > ioc_request_frame_size
1383 	 * + - - - - - - - - - +  |
1384 	 * | mpii_sge (tail)   |  |
1385 	 * + - - - - - - - - - +  |
1386 	 * | mpii_sge (csge)   |  | --.
1387 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1388 	 * | mpii_sge          |<-----'
1389 	 * + - - - - - - - - - +
1390 	 * | ...               |
1391 	 * + - - - - - - - - - +
1392 	 * | mpii_sge (tail)   |
1393 	 * +-------------------+
1394 	 * |                   |
1395 	 * ~~~~~~~~~~~~~~~~~~~~~
1396 	 * |                   |
1397 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1398 	 * | scsi_sense_data   |
1399 	 * +-------------------+
1400 	 *
1401 	 * If the controller gives us a maximum chain size, there can be
1402 	 * multiple chain sges, each of which points to the sge following it.
1403 	 * Otherwise, there will only be one chain sge.
1404 	 */
1405 
1406 	/* both sizes are in 32-bit words */
1407 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1408 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1409 	sc->sc_request_size = MPII_REQUEST_SIZE;
1410 	/* make sure we have enough space for scsi sense data */
1411 	if (irs > sc->sc_request_size) {
1412 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1413 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1414 	}
1415 
1416 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1417 		sge_size = sizeof(struct mpii_ieee_sge);
1418 	} else {
1419 		sge_size = sizeof(struct mpii_sge);
1420 	}
1421 
1422 	/* offset to the chain sge */
1423 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1424 	    sge_size - 1;
1425 
1426 	sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size);
1427 
1428 	/*
1429 	 * A number of simple scatter-gather elements we can fit into the
1430 	 * request buffer after the I/O command minus the chain element(s).
1431 	 */
1432 	sc->sc_max_sgl = (sc->sc_request_size -
1433  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1434 	    sge_size - 1;
1435 	if (sc->sc_max_chain > 0) {
1436 		sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) /
1437 		    sc->sc_max_chain;
1438 	}
1439 
1440 	return (0);
1441 }
1442 
1443 int
1444 mpii_iocinit(struct mpii_softc *sc)
1445 {
1446 	struct mpii_msg_iocinit_request		iiq;
1447 	struct mpii_msg_iocinit_reply		iip;
1448 
1449 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1450 
1451 	memset(&iiq, 0, sizeof(iiq));
1452 	memset(&iip, 0, sizeof(iip));
1453 
1454 	iiq.function = MPII_FUNCTION_IOC_INIT;
1455 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1456 
1457 	/* XXX JPG do something about vf_id */
1458 	iiq.vf_id = 0;
1459 
1460 	iiq.msg_version_maj = 0x02;
1461 	iiq.msg_version_min = 0x00;
1462 
1463 	/* XXX JPG ensure compliance with some level and hard-code? */
1464 	iiq.hdr_version_unit = 0x00;
1465 	iiq.hdr_version_dev = 0x00;
1466 
1467 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1468 
1469 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1470 	    sc->sc_reply_post_qdepth);
1471 
1472 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1473 
1474 	htolem32(&iiq.sense_buffer_address_high,
1475 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1476 
1477 	htolem32(&iiq.system_reply_address_high,
1478 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1479 
1480 	htolem32(&iiq.system_request_frame_base_address_lo,
1481 	    MPII_DMA_DVA(sc->sc_requests));
1482 	htolem32(&iiq.system_request_frame_base_address_hi,
1483 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1484 
1485 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1486 	    MPII_DMA_DVA(sc->sc_reply_postq));
1487 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1488 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1489 
1490 	htolem32(&iiq.reply_free_queue_address_lo,
1491 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1492 	htolem32(&iiq.reply_free_queue_address_hi,
1493 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1494 
1495 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1496 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1497 		    DEVNAME(sc));
1498 		return (1);
1499 	}
1500 
1501 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1502 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1503 		    DEVNAME(sc));
1504 		return (1);
1505 	}
1506 
1507 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1508 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1509 	    iip.msg_length, iip.whoinit);
1510 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1511 	    iip.msg_flags);
1512 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1513 	    iip.vf_id, iip.vp_id);
1514 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1515 	    lemtoh16(&iip.ioc_status));
1516 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1517 	    lemtoh32(&iip.ioc_loginfo));
1518 
1519 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1520 	    lemtoh32(&iip.ioc_loginfo))
1521 		return (1);
1522 
1523 	return (0);
1524 }
1525 
1526 void
1527 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1528 {
1529 	u_int32_t		*rfp;
1530 	u_int			idx;
1531 
1532 	if (rcb == NULL)
1533 		return;
1534 
1535 	idx = sc->sc_reply_free_host_index;
1536 
1537 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1538 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1539 
1540 	if (++idx >= sc->sc_reply_free_qdepth)
1541 		idx = 0;
1542 
1543 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1544 }
1545 
1546 int
1547 mpii_portfacts(struct mpii_softc *sc)
1548 {
1549 	struct mpii_msg_portfacts_request	*pfq;
1550 	struct mpii_msg_portfacts_reply		*pfp;
1551 	struct mpii_ccb				*ccb;
1552 	int					rv = 1;
1553 
1554 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1555 
1556 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1557 	if (ccb == NULL) {
1558 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1559 		    DEVNAME(sc));
1560 		return (rv);
1561 	}
1562 
1563 	ccb->ccb_done = mpii_empty_done;
1564 	pfq = ccb->ccb_cmd;
1565 
1566 	memset(pfq, 0, sizeof(*pfq));
1567 
1568 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1569 	pfq->chain_offset = 0;
1570 	pfq->msg_flags = 0;
1571 	pfq->port_number = 0;
1572 	pfq->vp_id = 0;
1573 	pfq->vf_id = 0;
1574 
1575 	if (mpii_poll(sc, ccb) != 0) {
1576 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1577 		    DEVNAME(sc));
1578 		goto err;
1579 	}
1580 
1581 	if (ccb->ccb_rcb == NULL) {
1582 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1583 		    DEVNAME(sc));
1584 		goto err;
1585 	}
1586 
1587 	pfp = ccb->ccb_rcb->rcb_reply;
1588 	sc->sc_porttype = pfp->port_type;
1589 
1590 	mpii_push_reply(sc, ccb->ccb_rcb);
1591 	rv = 0;
1592 err:
1593 	scsi_io_put(&sc->sc_iopool, ccb);
1594 
1595 	return (rv);
1596 }
1597 
1598 void
1599 mpii_eventack(void *cookie, void *io)
1600 {
1601 	struct mpii_softc			*sc = cookie;
1602 	struct mpii_ccb				*ccb = io;
1603 	struct mpii_rcb				*rcb, *next;
1604 	struct mpii_msg_event_reply		*enp;
1605 	struct mpii_msg_eventack_request	*eaq;
1606 
1607 	mtx_enter(&sc->sc_evt_ack_mtx);
1608 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1609 	if (rcb != NULL) {
1610 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1611 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1612 	}
1613 	mtx_leave(&sc->sc_evt_ack_mtx);
1614 
1615 	if (rcb == NULL) {
1616 		scsi_io_put(&sc->sc_iopool, ccb);
1617 		return;
1618 	}
1619 
1620 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1621 
1622 	ccb->ccb_done = mpii_eventack_done;
1623 	eaq = ccb->ccb_cmd;
1624 
1625 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1626 
1627 	eaq->event = enp->event;
1628 	eaq->event_context = enp->event_context;
1629 
1630 	mpii_push_reply(sc, rcb);
1631 
1632 	mpii_start(sc, ccb);
1633 
1634 	if (next != NULL)
1635 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1636 }
1637 
1638 void
1639 mpii_eventack_done(struct mpii_ccb *ccb)
1640 {
1641 	struct mpii_softc			*sc = ccb->ccb_sc;
1642 
1643 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1644 
1645 	mpii_push_reply(sc, ccb->ccb_rcb);
1646 	scsi_io_put(&sc->sc_iopool, ccb);
1647 }
1648 
1649 int
1650 mpii_portenable(struct mpii_softc *sc)
1651 {
1652 	struct mpii_msg_portenable_request	*peq;
1653 	struct mpii_ccb				*ccb;
1654 
1655 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1656 
1657 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1658 	if (ccb == NULL) {
1659 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1660 		    DEVNAME(sc));
1661 		return (1);
1662 	}
1663 
1664 	ccb->ccb_done = mpii_empty_done;
1665 	peq = ccb->ccb_cmd;
1666 
1667 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1668 	peq->vf_id = sc->sc_vf_id;
1669 
1670 	if (mpii_poll(sc, ccb) != 0) {
1671 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1672 		    DEVNAME(sc));
1673 		return (1);
1674 	}
1675 
1676 	if (ccb->ccb_rcb == NULL) {
1677 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1678 		    DEVNAME(sc));
1679 		return (1);
1680 	}
1681 
1682 	mpii_push_reply(sc, ccb->ccb_rcb);
1683 	scsi_io_put(&sc->sc_iopool, ccb);
1684 
1685 	return (0);
1686 }
1687 
1688 int
1689 mpii_cfg_coalescing(struct mpii_softc *sc)
1690 {
1691 	struct mpii_cfg_hdr			hdr;
1692 	struct mpii_cfg_ioc_pg1			ipg;
1693 
1694 	hdr.page_version = 0;
1695 	hdr.page_length = sizeof(ipg) / 4;
1696 	hdr.page_number = 1;
1697 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1698 	memset(&ipg, 0, sizeof(ipg));
1699 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1700 	    sizeof(ipg)) != 0) {
1701 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1702 		    "page 1\n", DEVNAME(sc));
1703 		return (1);
1704 	}
1705 
1706 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1707 		return (0);
1708 
1709 	/* Disable coalescing */
1710 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1711 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1712 	    sizeof(ipg)) != 0) {
1713 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1714 		    DEVNAME(sc));
1715 		return (1);
1716 	}
1717 
1718 	return (0);
1719 }
1720 
1721 #define MPII_EVENT_MASKALL(enq)		do {			\
1722 		enq->event_masks[0] = 0xffffffff;		\
1723 		enq->event_masks[1] = 0xffffffff;		\
1724 		enq->event_masks[2] = 0xffffffff;		\
1725 		enq->event_masks[3] = 0xffffffff;		\
1726 	} while (0)
1727 
1728 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1729 		enq->event_masks[evt / 32] &=			\
1730 		    htole32(~(1 << (evt % 32)));		\
1731 	} while (0)
1732 
1733 int
1734 mpii_eventnotify(struct mpii_softc *sc)
1735 {
1736 	struct mpii_msg_event_request		*enq;
1737 	struct mpii_ccb				*ccb;
1738 
1739 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1740 	if (ccb == NULL) {
1741 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1742 		    DEVNAME(sc));
1743 		return (1);
1744 	}
1745 
1746 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1747 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1748 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1749 
1750 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1751 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1752 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1753 	    mpii_eventack, sc);
1754 
1755 	ccb->ccb_done = mpii_eventnotify_done;
1756 	enq = ccb->ccb_cmd;
1757 
1758 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1759 
1760 	/*
1761 	 * Enable reporting of the following events:
1762 	 *
1763 	 * MPII_EVENT_SAS_DISCOVERY
1764 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1765 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1766 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1767 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1768 	 * MPII_EVENT_IR_VOLUME
1769 	 * MPII_EVENT_IR_PHYSICAL_DISK
1770 	 * MPII_EVENT_IR_OPERATION_STATUS
1771 	 */
1772 
1773 	MPII_EVENT_MASKALL(enq);
1774 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1775 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1776 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1777 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1778 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1779 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1780 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1781 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1782 
1783 	mpii_start(sc, ccb);
1784 
1785 	return (0);
1786 }
1787 
1788 void
1789 mpii_eventnotify_done(struct mpii_ccb *ccb)
1790 {
1791 	struct mpii_softc			*sc = ccb->ccb_sc;
1792 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1793 
1794 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1795 
1796 	scsi_io_put(&sc->sc_iopool, ccb);
1797 	mpii_event_process(sc, rcb);
1798 }
1799 
1800 void
1801 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1802 {
1803 	struct mpii_evt_ir_cfg_change_list	*ccl;
1804 	struct mpii_evt_ir_cfg_element		*ce;
1805 	struct mpii_device			*dev;
1806 	u_int16_t				type;
1807 	int					i;
1808 
1809 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1810 	if (ccl->num_elements == 0)
1811 		return;
1812 
1813 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1814 		/* bail on foreign configurations */
1815 		return;
1816 	}
1817 
1818 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1819 
1820 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1821 		type = (lemtoh16(&ce->element_flags) &
1822 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1823 
1824 		switch (type) {
1825 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1826 			switch (ce->reason_code) {
1827 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1828 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1829 				if (mpii_find_dev(sc,
1830 				    lemtoh16(&ce->vol_dev_handle))) {
1831 					printf("%s: device %#x is already "
1832 					    "configured\n", DEVNAME(sc),
1833 					    lemtoh16(&ce->vol_dev_handle));
1834 					break;
1835 				}
1836 				dev = malloc(sizeof(*dev), M_DEVBUF,
1837 				    M_NOWAIT | M_ZERO);
1838 				if (!dev) {
1839 					printf("%s: failed to allocate a "
1840 					    "device structure\n", DEVNAME(sc));
1841 					break;
1842 				}
1843 				SET(dev->flags, MPII_DF_VOLUME);
1844 				dev->slot = sc->sc_vd_id_low;
1845 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1846 				if (mpii_insert_dev(sc, dev)) {
1847 					free(dev, M_DEVBUF, sizeof *dev);
1848 					break;
1849 				}
1850 				sc->sc_vd_count++;
1851 				break;
1852 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1853 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1854 				if (!(dev = mpii_find_dev(sc,
1855 				    lemtoh16(&ce->vol_dev_handle))))
1856 					break;
1857 				mpii_remove_dev(sc, dev);
1858 				sc->sc_vd_count--;
1859 				break;
1860 			}
1861 			break;
1862 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1863 			if (ce->reason_code ==
1864 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1865 			    ce->reason_code ==
1866 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1867 				/* there should be an underlying sas drive */
1868 				if (!(dev = mpii_find_dev(sc,
1869 				    lemtoh16(&ce->phys_disk_dev_handle))))
1870 					break;
1871 				/* promoted from a hot spare? */
1872 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1873 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1874 				    MPII_DF_HIDDEN);
1875 			}
1876 			break;
1877 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1878 			if (ce->reason_code ==
1879 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1880 				/* there should be an underlying sas drive */
1881 				if (!(dev = mpii_find_dev(sc,
1882 				    lemtoh16(&ce->phys_disk_dev_handle))))
1883 					break;
1884 				SET(dev->flags, MPII_DF_HOT_SPARE |
1885 				    MPII_DF_HIDDEN);
1886 			}
1887 			break;
1888 		}
1889 	}
1890 }
1891 
1892 void
1893 mpii_event_sas(void *xsc)
1894 {
1895 	struct mpii_softc *sc = xsc;
1896 	struct mpii_rcb *rcb, *next;
1897 	struct mpii_msg_event_reply *enp;
1898 	struct mpii_evt_sas_tcl		*tcl;
1899 	struct mpii_evt_phy_entry	*pe;
1900 	struct mpii_device		*dev;
1901 	int				i;
1902 	u_int16_t			handle;
1903 
1904 	mtx_enter(&sc->sc_evt_sas_mtx);
1905 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1906 	if (rcb != NULL) {
1907 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1908 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1909 	}
1910 	mtx_leave(&sc->sc_evt_sas_mtx);
1911 
1912 	if (rcb == NULL)
1913 		return;
1914 	if (next != NULL)
1915 		task_add(systq, &sc->sc_evt_sas_task);
1916 
1917 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1918 	switch (lemtoh16(&enp->event)) {
1919 	case MPII_EVENT_SAS_DISCOVERY:
1920 		mpii_event_discovery(sc, enp);
1921 		goto done;
1922 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1923 		/* handle below */
1924 		break;
1925 	default:
1926 		panic("%s: unexpected event %#x in sas event queue",
1927 		    DEVNAME(sc), lemtoh16(&enp->event));
1928 		/* NOTREACHED */
1929 	}
1930 
1931 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1932 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1933 
1934 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1935 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1936 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1937 			handle = lemtoh16(&pe->dev_handle);
1938 			if (mpii_find_dev(sc, handle)) {
1939 				printf("%s: device %#x is already "
1940 				    "configured\n", DEVNAME(sc), handle);
1941 				break;
1942 			}
1943 
1944 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1945 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1946 			dev->dev_handle = handle;
1947 			dev->phy_num = tcl->start_phy_num + i;
1948 			if (tcl->enclosure_handle)
1949 				dev->physical_port = tcl->physical_port;
1950 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1951 			dev->expander = lemtoh16(&tcl->expander_handle);
1952 
1953 			if (mpii_insert_dev(sc, dev)) {
1954 				free(dev, M_DEVBUF, sizeof *dev);
1955 				break;
1956 			}
1957 
1958 			if (sc->sc_scsibus != NULL)
1959 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1960 			break;
1961 
1962 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1963 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1964 			if (dev == NULL)
1965 				break;
1966 
1967 			mpii_remove_dev(sc, dev);
1968 			mpii_sas_remove_device(sc, dev->dev_handle);
1969 			if (sc->sc_scsibus != NULL &&
1970 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1971 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1972 				    DVACT_DEACTIVATE);
1973 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1974 				    DETACH_FORCE);
1975 			}
1976 
1977 			free(dev, M_DEVBUF, sizeof *dev);
1978 			break;
1979 		}
1980 	}
1981 
1982 done:
1983 	mpii_event_done(sc, rcb);
1984 }
1985 
1986 void
1987 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1988 {
1989 	struct mpii_evt_sas_discovery *esd =
1990 	    (struct mpii_evt_sas_discovery *)(enp + 1);
1991 
1992 	if (sc->sc_pending == 0)
1993 		return;
1994 
1995 	switch (esd->reason_code) {
1996 	case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED:
1997 		++sc->sc_pending;
1998 		break;
1999 	case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED:
2000 		if (--sc->sc_pending == 1) {
2001 			sc->sc_pending = 0;
2002 			config_pending_decr();
2003 		}
2004 		break;
2005 	}
2006 }
2007 
2008 void
2009 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2010 {
2011 	struct mpii_msg_event_reply		*enp;
2012 
2013 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2014 
2015 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2016 	    lemtoh16(&enp->event));
2017 
2018 	switch (lemtoh16(&enp->event)) {
2019 	case MPII_EVENT_EVENT_CHANGE:
2020 		/* should be properly ignored */
2021 		break;
2022 	case MPII_EVENT_SAS_DISCOVERY:
2023 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2024 		mtx_enter(&sc->sc_evt_sas_mtx);
2025 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
2026 		mtx_leave(&sc->sc_evt_sas_mtx);
2027 		task_add(systq, &sc->sc_evt_sas_task);
2028 		return;
2029 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2030 		break;
2031 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2032 		break;
2033 	case MPII_EVENT_IR_VOLUME: {
2034 		struct mpii_evt_ir_volume	*evd =
2035 		    (struct mpii_evt_ir_volume *)(enp + 1);
2036 		struct mpii_device		*dev;
2037 #if NBIO > 0
2038 		const char *vol_states[] = {
2039 			BIOC_SVINVALID_S,
2040 			BIOC_SVOFFLINE_S,
2041 			BIOC_SVBUILDING_S,
2042 			BIOC_SVONLINE_S,
2043 			BIOC_SVDEGRADED_S,
2044 			BIOC_SVONLINE_S,
2045 		};
2046 #endif
2047 
2048 		if (cold)
2049 			break;
2050 		KERNEL_LOCK();
2051 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
2052 		KERNEL_UNLOCK();
2053 		if (dev == NULL)
2054 			break;
2055 #if NBIO > 0
2056 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2057 			printf("%s: volume %d state changed from %s to %s\n",
2058 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2059 			    vol_states[evd->prev_value],
2060 			    vol_states[evd->new_value]);
2061 #endif
2062 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2063 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2064 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2065 			printf("%s: started resync on a volume %d\n",
2066 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2067 		}
2068 		break;
2069 	case MPII_EVENT_IR_PHYSICAL_DISK:
2070 		break;
2071 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2072 		mpii_event_raid(sc, enp);
2073 		break;
2074 	case MPII_EVENT_IR_OPERATION_STATUS: {
2075 		struct mpii_evt_ir_status	*evs =
2076 		    (struct mpii_evt_ir_status *)(enp + 1);
2077 		struct mpii_device		*dev;
2078 
2079 		KERNEL_LOCK();
2080 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
2081 		KERNEL_UNLOCK();
2082 		if (dev != NULL &&
2083 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2084 			dev->percent = evs->percent;
2085 		break;
2086 		}
2087 	default:
2088 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2089 		    DEVNAME(sc), lemtoh16(&enp->event));
2090 	}
2091 
2092 	mpii_event_done(sc, rcb);
2093 }
2094 
2095 void
2096 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2097 {
2098 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2099 
2100 	if (enp->ack_required) {
2101 		mtx_enter(&sc->sc_evt_ack_mtx);
2102 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2103 		mtx_leave(&sc->sc_evt_ack_mtx);
2104 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2105 	} else
2106 		mpii_push_reply(sc, rcb);
2107 }
2108 
2109 void
2110 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2111 {
2112 	struct mpii_msg_scsi_task_request	*stq;
2113 	struct mpii_msg_sas_oper_request	*soq;
2114 	struct mpii_ccb				*ccb;
2115 
2116 	ccb = scsi_io_get(&sc->sc_iopool, 0);
2117 	if (ccb == NULL)
2118 		return;
2119 
2120 	stq = ccb->ccb_cmd;
2121 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2122 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2123 	htolem16(&stq->dev_handle, handle);
2124 
2125 	ccb->ccb_done = mpii_empty_done;
2126 	mpii_wait(sc, ccb);
2127 
2128 	if (ccb->ccb_rcb != NULL)
2129 		mpii_push_reply(sc, ccb->ccb_rcb);
2130 
2131 	/* reuse a ccb */
2132 	ccb->ccb_state = MPII_CCB_READY;
2133 	ccb->ccb_rcb = NULL;
2134 
2135 	soq = ccb->ccb_cmd;
2136 	memset(soq, 0, sizeof(*soq));
2137 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2138 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2139 	htolem16(&soq->dev_handle, handle);
2140 
2141 	ccb->ccb_done = mpii_empty_done;
2142 	mpii_wait(sc, ccb);
2143 	if (ccb->ccb_rcb != NULL)
2144 		mpii_push_reply(sc, ccb->ccb_rcb);
2145 
2146 	scsi_io_put(&sc->sc_iopool, ccb);
2147 }
2148 
2149 int
2150 mpii_board_info(struct mpii_softc *sc)
2151 {
2152 	struct mpii_msg_iocfacts_request	ifq;
2153 	struct mpii_msg_iocfacts_reply		ifp;
2154 	struct mpii_cfg_manufacturing_pg0	mpg;
2155 	struct mpii_cfg_hdr			hdr;
2156 
2157 	memset(&ifq, 0, sizeof(ifq));
2158 	memset(&ifp, 0, sizeof(ifp));
2159 
2160 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2161 
2162 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2163 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2164 		    DEVNAME(sc));
2165 		return (1);
2166 	}
2167 
2168 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2169 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2170 		    DEVNAME(sc));
2171 		return (1);
2172 	}
2173 
2174 	hdr.page_version = 0;
2175 	hdr.page_length = sizeof(mpg) / 4;
2176 	hdr.page_number = 0;
2177 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2178 	memset(&mpg, 0, sizeof(mpg));
2179 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2180 	    sizeof(mpg)) != 0) {
2181 		printf("%s: unable to fetch manufacturing page 0\n",
2182 		    DEVNAME(sc));
2183 		return (EINVAL);
2184 	}
2185 
2186 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2187 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2188 	    ifp.fw_version_unit, ifp.fw_version_dev,
2189 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2190 	    ifp.msg_version_maj, ifp.msg_version_min);
2191 
2192 	return (0);
2193 }
2194 
2195 int
2196 mpii_target_map(struct mpii_softc *sc)
2197 {
2198 	struct mpii_cfg_hdr			hdr;
2199 	struct mpii_cfg_ioc_pg8			ipg;
2200 	int					flags, pad = 0;
2201 
2202 	hdr.page_version = 0;
2203 	hdr.page_length = sizeof(ipg) / 4;
2204 	hdr.page_number = 8;
2205 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2206 	memset(&ipg, 0, sizeof(ipg));
2207 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2208 	    sizeof(ipg)) != 0) {
2209 		printf("%s: unable to fetch ioc page 8\n",
2210 		    DEVNAME(sc));
2211 		return (EINVAL);
2212 	}
2213 
2214 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2215 		pad = 1;
2216 
2217 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2218 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2219 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2220 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2221 			sc->sc_vd_id_low += pad;
2222 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2223 		} else
2224 			sc->sc_vd_id_low = sc->sc_max_devices -
2225 			    sc->sc_max_volumes;
2226 	}
2227 
2228 	sc->sc_pd_id_start += pad;
2229 
2230 	return (0);
2231 }
2232 
2233 int
2234 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2235     u_int32_t address, int flags, void *p)
2236 {
2237 	struct mpii_msg_config_request		*cq;
2238 	struct mpii_msg_config_reply		*cp;
2239 	struct mpii_ccb				*ccb;
2240 	struct mpii_cfg_hdr			*hdr = p;
2241 	struct mpii_ecfg_hdr			*ehdr = p;
2242 	int					etype = 0;
2243 	int					rv = 0;
2244 
2245 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2246 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2247 	    address, flags, MPII_PG_FMT);
2248 
2249 	ccb = scsi_io_get(&sc->sc_iopool,
2250 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2251 	if (ccb == NULL) {
2252 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2253 		    DEVNAME(sc));
2254 		return (1);
2255 	}
2256 
2257 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2258 		etype = type;
2259 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2260 	}
2261 
2262 	cq = ccb->ccb_cmd;
2263 
2264 	cq->function = MPII_FUNCTION_CONFIG;
2265 
2266 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2267 
2268 	cq->config_header.page_number = number;
2269 	cq->config_header.page_type = type;
2270 	cq->ext_page_type = etype;
2271 	htolem32(&cq->page_address, address);
2272 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2273 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2274 
2275 	ccb->ccb_done = mpii_empty_done;
2276 	if (ISSET(flags, MPII_PG_POLL)) {
2277 		if (mpii_poll(sc, ccb) != 0) {
2278 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2279 			    DEVNAME(sc));
2280 			return (1);
2281 		}
2282 	} else
2283 		mpii_wait(sc, ccb);
2284 
2285 	if (ccb->ccb_rcb == NULL) {
2286 		scsi_io_put(&sc->sc_iopool, ccb);
2287 		return (1);
2288 	}
2289 	cp = ccb->ccb_rcb->rcb_reply;
2290 
2291 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2292 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2293 	    cp->sgl_flags, cp->msg_length, cp->function);
2294 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2295 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2296 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2297 	    cp->msg_flags);
2298 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2299 	    cp->vp_id, cp->vf_id);
2300 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2301 	    lemtoh16(&cp->ioc_status));
2302 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2303 	    lemtoh32(&cp->ioc_loginfo));
2304 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2305 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2306 	    cp->config_header.page_version,
2307 	    cp->config_header.page_length,
2308 	    cp->config_header.page_number,
2309 	    cp->config_header.page_type);
2310 
2311 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2312 		rv = 1;
2313 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2314 		memset(ehdr, 0, sizeof(*ehdr));
2315 		ehdr->page_version = cp->config_header.page_version;
2316 		ehdr->page_number = cp->config_header.page_number;
2317 		ehdr->page_type = cp->config_header.page_type;
2318 		ehdr->ext_page_length = cp->ext_page_length;
2319 		ehdr->ext_page_type = cp->ext_page_type;
2320 	} else
2321 		*hdr = cp->config_header;
2322 
2323 	mpii_push_reply(sc, ccb->ccb_rcb);
2324 	scsi_io_put(&sc->sc_iopool, ccb);
2325 
2326 	return (rv);
2327 }
2328 
2329 int
2330 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2331     void *p, int read, void *page, size_t len)
2332 {
2333 	struct mpii_msg_config_request		*cq;
2334 	struct mpii_msg_config_reply		*cp;
2335 	struct mpii_ccb				*ccb;
2336 	struct mpii_cfg_hdr			*hdr = p;
2337 	struct mpii_ecfg_hdr			*ehdr = p;
2338 	caddr_t					kva;
2339 	int					page_length;
2340 	int					rv = 0;
2341 
2342 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2343 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2344 
2345 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2346 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2347 
2348 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2349 		return (1);
2350 
2351 	ccb = scsi_io_get(&sc->sc_iopool,
2352 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2353 	if (ccb == NULL) {
2354 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2355 		    DEVNAME(sc));
2356 		return (1);
2357 	}
2358 
2359 	cq = ccb->ccb_cmd;
2360 
2361 	cq->function = MPII_FUNCTION_CONFIG;
2362 
2363 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2364 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2365 
2366 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2367 		cq->config_header.page_version = ehdr->page_version;
2368 		cq->config_header.page_number = ehdr->page_number;
2369 		cq->config_header.page_type = ehdr->page_type;
2370 		cq->ext_page_len = ehdr->ext_page_length;
2371 		cq->ext_page_type = ehdr->ext_page_type;
2372 	} else
2373 		cq->config_header = *hdr;
2374 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2375 	htolem32(&cq->page_address, address);
2376 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2377 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2378 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2379 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2380 
2381 	/* bounce the page via the request space to avoid more bus_dma games */
2382 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2383 	    sizeof(struct mpii_msg_config_request));
2384 
2385 	kva = ccb->ccb_cmd;
2386 	kva += sizeof(struct mpii_msg_config_request);
2387 
2388 	if (!read)
2389 		memcpy(kva, page, len);
2390 
2391 	ccb->ccb_done = mpii_empty_done;
2392 	if (ISSET(flags, MPII_PG_POLL)) {
2393 		if (mpii_poll(sc, ccb) != 0) {
2394 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2395 			    DEVNAME(sc));
2396 			return (1);
2397 		}
2398 	} else
2399 		mpii_wait(sc, ccb);
2400 
2401 	if (ccb->ccb_rcb == NULL) {
2402 		scsi_io_put(&sc->sc_iopool, ccb);
2403 		return (1);
2404 	}
2405 	cp = ccb->ccb_rcb->rcb_reply;
2406 
2407 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2408 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2409 	    cp->function);
2410 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2411 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2412 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2413 	    cp->msg_flags);
2414 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2415 	    cp->vp_id, cp->vf_id);
2416 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2417 	    lemtoh16(&cp->ioc_status));
2418 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2419 	    lemtoh32(&cp->ioc_loginfo));
2420 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2421 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2422 	    cp->config_header.page_version,
2423 	    cp->config_header.page_length,
2424 	    cp->config_header.page_number,
2425 	    cp->config_header.page_type);
2426 
2427 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2428 		rv = 1;
2429 	else if (read)
2430 		memcpy(page, kva, len);
2431 
2432 	mpii_push_reply(sc, ccb->ccb_rcb);
2433 	scsi_io_put(&sc->sc_iopool, ccb);
2434 
2435 	return (rv);
2436 }
2437 
2438 struct mpii_rcb *
2439 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2440 {
2441 	struct mpii_rcb		*rcb = NULL;
2442 	u_int32_t		rfid;
2443 
2444 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2445 
2446 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2447 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2448 		rfid = (lemtoh32(&rdp->frame_addr) -
2449 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2450 		    sc->sc_reply_size;
2451 
2452 		bus_dmamap_sync(sc->sc_dmat,
2453 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2454 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2455 
2456 		rcb = &sc->sc_rcbs[rfid];
2457 	}
2458 
2459 	memset(rdp, 0xff, sizeof(*rdp));
2460 
2461 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2462 	    8 * sc->sc_reply_post_host_index, 8,
2463 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2464 
2465 	return (rcb);
2466 }
2467 
2468 struct mpii_dmamem *
2469 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2470 {
2471 	struct mpii_dmamem	*mdm;
2472 	int			nsegs;
2473 
2474 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2475 	if (mdm == NULL)
2476 		return (NULL);
2477 
2478 	mdm->mdm_size = size;
2479 
2480 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2481 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2482 		goto mdmfree;
2483 
2484 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2485 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2486 		goto destroy;
2487 
2488 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2489 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2490 		goto free;
2491 
2492 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2493 	    NULL, BUS_DMA_NOWAIT) != 0)
2494 		goto unmap;
2495 
2496 	return (mdm);
2497 
2498 unmap:
2499 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2500 free:
2501 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2502 destroy:
2503 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2504 mdmfree:
2505 	free(mdm, M_DEVBUF, sizeof *mdm);
2506 
2507 	return (NULL);
2508 }
2509 
2510 void
2511 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2512 {
2513 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2514 
2515 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2516 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2517 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2518 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2519 	free(mdm, M_DEVBUF, sizeof *mdm);
2520 }
2521 
2522 int
2523 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2524 {
2525 	int		slot;	/* initial hint */
2526 
2527 	if (dev == NULL || dev->slot < 0)
2528 		return (1);
2529 	slot = dev->slot;
2530 
2531 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2532 		slot++;
2533 
2534 	if (slot >= sc->sc_max_devices)
2535 		return (1);
2536 
2537 	dev->slot = slot;
2538 	sc->sc_devs[slot] = dev;
2539 
2540 	return (0);
2541 }
2542 
2543 int
2544 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2545 {
2546 	int			i;
2547 
2548 	if (dev == NULL)
2549 		return (1);
2550 
2551 	for (i = 0; i < sc->sc_max_devices; i++) {
2552 		if (sc->sc_devs[i] == NULL)
2553 			continue;
2554 
2555 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2556 			sc->sc_devs[i] = NULL;
2557 			return (0);
2558 		}
2559 	}
2560 
2561 	return (1);
2562 }
2563 
2564 struct mpii_device *
2565 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2566 {
2567 	int			i;
2568 
2569 	for (i = 0; i < sc->sc_max_devices; i++) {
2570 		if (sc->sc_devs[i] == NULL)
2571 			continue;
2572 
2573 		if (sc->sc_devs[i]->dev_handle == handle)
2574 			return (sc->sc_devs[i]);
2575 	}
2576 
2577 	return (NULL);
2578 }
2579 
2580 int
2581 mpii_alloc_ccbs(struct mpii_softc *sc)
2582 {
2583 	struct mpii_ccb		*ccb;
2584 	u_int8_t		*cmd;
2585 	int			i;
2586 
2587 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2588 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2589 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2590 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2591 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2592 	    mpii_scsi_cmd_tmo_handler, sc);
2593 
2594 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2595 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2596 	if (sc->sc_ccbs == NULL) {
2597 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2598 		return (1);
2599 	}
2600 
2601 	sc->sc_requests = mpii_dmamem_alloc(sc,
2602 	    sc->sc_request_size * sc->sc_max_cmds);
2603 	if (sc->sc_requests == NULL) {
2604 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2605 		goto free_ccbs;
2606 	}
2607 	cmd = MPII_DMA_KVA(sc->sc_requests);
2608 
2609 	/*
2610 	 * we have sc->sc_max_cmds system request message
2611 	 * frames, but smid zero cannot be used. so we then
2612 	 * have (sc->sc_max_cmds - 1) number of ccbs
2613 	 */
2614 	for (i = 1; i < sc->sc_max_cmds; i++) {
2615 		ccb = &sc->sc_ccbs[i - 1];
2616 
2617 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2618 		    MAXPHYS, 0,
2619 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2620 		    &ccb->ccb_dmamap) != 0) {
2621 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2622 			goto free_maps;
2623 		}
2624 
2625 		ccb->ccb_sc = sc;
2626 		htolem16(&ccb->ccb_smid, i);
2627 		ccb->ccb_offset = sc->sc_request_size * i;
2628 
2629 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2630 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2631 		    ccb->ccb_offset;
2632 
2633 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2634 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2635 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2636 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2637 		    ccb->ccb_cmd_dva);
2638 
2639 		mpii_put_ccb(sc, ccb);
2640 	}
2641 
2642 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2643 
2644 	return (0);
2645 
2646 free_maps:
2647 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2648 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2649 
2650 	mpii_dmamem_free(sc, sc->sc_requests);
2651 free_ccbs:
2652 	free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2653 
2654 	return (1);
2655 }
2656 
2657 void
2658 mpii_put_ccb(void *cookie, void *io)
2659 {
2660 	struct mpii_softc	*sc = cookie;
2661 	struct mpii_ccb		*ccb = io;
2662 
2663 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2664 
2665 	ccb->ccb_state = MPII_CCB_FREE;
2666 	ccb->ccb_cookie = NULL;
2667 	ccb->ccb_done = NULL;
2668 	ccb->ccb_rcb = NULL;
2669 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2670 
2671 	KERNEL_UNLOCK();
2672 	mtx_enter(&sc->sc_ccb_free_mtx);
2673 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2674 	mtx_leave(&sc->sc_ccb_free_mtx);
2675 	KERNEL_LOCK();
2676 }
2677 
2678 void *
2679 mpii_get_ccb(void *cookie)
2680 {
2681 	struct mpii_softc	*sc = cookie;
2682 	struct mpii_ccb		*ccb;
2683 
2684 	KERNEL_UNLOCK();
2685 
2686 	mtx_enter(&sc->sc_ccb_free_mtx);
2687 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2688 	if (ccb != NULL) {
2689 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2690 		ccb->ccb_state = MPII_CCB_READY;
2691 	}
2692 	mtx_leave(&sc->sc_ccb_free_mtx);
2693 
2694 	KERNEL_LOCK();
2695 
2696 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2697 
2698 	return (ccb);
2699 }
2700 
2701 int
2702 mpii_alloc_replies(struct mpii_softc *sc)
2703 {
2704 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2705 
2706 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2707 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2708 	if (sc->sc_rcbs == NULL)
2709 		return (1);
2710 
2711 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2712 	    sc->sc_num_reply_frames);
2713 	if (sc->sc_replies == NULL) {
2714 		free(sc->sc_rcbs, M_DEVBUF,
2715 		    sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2716 		return (1);
2717 	}
2718 
2719 	return (0);
2720 }
2721 
2722 void
2723 mpii_push_replies(struct mpii_softc *sc)
2724 {
2725 	struct mpii_rcb		*rcb;
2726 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2727 	int			i;
2728 
2729 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2730 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2731 	    BUS_DMASYNC_PREREAD);
2732 
2733 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2734 		rcb = &sc->sc_rcbs[i];
2735 
2736 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2737 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2738 		    sc->sc_reply_size * i;
2739 		mpii_push_reply(sc, rcb);
2740 	}
2741 }
2742 
2743 void
2744 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2745 {
2746 	struct mpii_request_header	*rhp;
2747 	struct mpii_request_descr	descr;
2748 	u_long				 *rdp = (u_long *)&descr;
2749 
2750 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2751 	    ccb->ccb_cmd_dva);
2752 
2753 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2754 	    ccb->ccb_offset, sc->sc_request_size,
2755 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2756 
2757 	ccb->ccb_state = MPII_CCB_QUEUED;
2758 
2759 	rhp = ccb->ccb_cmd;
2760 
2761 	memset(&descr, 0, sizeof(descr));
2762 
2763 	switch (rhp->function) {
2764 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2765 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2766 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2767 		break;
2768 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2769 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2770 		break;
2771 	default:
2772 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2773 	}
2774 
2775 	descr.vf_id = sc->sc_vf_id;
2776 	descr.smid = ccb->ccb_smid;
2777 
2778 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2779 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2780 
2781 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2782 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2783 
2784 #if defined(__LP64__)
2785 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2786 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2787 #else
2788 	mtx_enter(&sc->sc_req_mtx);
2789 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2790 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2791 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2792 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2793 
2794 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2795 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2796 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2797 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2798 	mtx_leave(&sc->sc_req_mtx);
2799 #endif
2800 }
2801 
2802 int
2803 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2804 {
2805 	void				(*done)(struct mpii_ccb *);
2806 	void				*cookie;
2807 	int				rv = 1;
2808 
2809 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2810 
2811 	done = ccb->ccb_done;
2812 	cookie = ccb->ccb_cookie;
2813 
2814 	ccb->ccb_done = mpii_poll_done;
2815 	ccb->ccb_cookie = &rv;
2816 
2817 	mpii_start(sc, ccb);
2818 
2819 	while (rv == 1) {
2820 		/* avoid excessive polling */
2821 		if (mpii_reply_waiting(sc))
2822 			mpii_intr(sc);
2823 		else
2824 			delay(10);
2825 	}
2826 
2827 	ccb->ccb_cookie = cookie;
2828 	done(ccb);
2829 
2830 	return (0);
2831 }
2832 
2833 void
2834 mpii_poll_done(struct mpii_ccb *ccb)
2835 {
2836 	int				*rv = ccb->ccb_cookie;
2837 
2838 	*rv = 0;
2839 }
2840 
2841 int
2842 mpii_alloc_queues(struct mpii_softc *sc)
2843 {
2844 	u_int32_t		*rfp;
2845 	int			i;
2846 
2847 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2848 
2849 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2850 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2851 	if (sc->sc_reply_freeq == NULL)
2852 		return (1);
2853 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2854 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2855 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2856 		    sc->sc_reply_size * i;
2857 	}
2858 
2859 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2860 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2861 	if (sc->sc_reply_postq == NULL)
2862 		goto free_reply_freeq;
2863 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2864 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2865 	    sizeof(struct mpii_reply_descr));
2866 
2867 	return (0);
2868 
2869 free_reply_freeq:
2870 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2871 	return (1);
2872 }
2873 
2874 void
2875 mpii_init_queues(struct mpii_softc *sc)
2876 {
2877 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2878 
2879 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2880 	sc->sc_reply_post_host_index = 0;
2881 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2882 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2883 }
2884 
2885 void
2886 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2887 {
2888 	struct mutex		mtx;
2889 	void			(*done)(struct mpii_ccb *);
2890 	void			*cookie;
2891 
2892 	mtx_init(&mtx, IPL_BIO);
2893 
2894 	done = ccb->ccb_done;
2895 	cookie = ccb->ccb_cookie;
2896 
2897 	ccb->ccb_done = mpii_wait_done;
2898 	ccb->ccb_cookie = &mtx;
2899 
2900 	/* XXX this will wait forever for the ccb to complete */
2901 
2902 	mpii_start(sc, ccb);
2903 
2904 	mtx_enter(&mtx);
2905 	while (ccb->ccb_cookie != NULL)
2906 		msleep_nsec(ccb, &mtx, PRIBIO, "mpiiwait", INFSLP);
2907 	mtx_leave(&mtx);
2908 
2909 	ccb->ccb_cookie = cookie;
2910 	done(ccb);
2911 }
2912 
2913 void
2914 mpii_wait_done(struct mpii_ccb *ccb)
2915 {
2916 	struct mutex		*mtx = ccb->ccb_cookie;
2917 
2918 	mtx_enter(mtx);
2919 	ccb->ccb_cookie = NULL;
2920 	mtx_leave(mtx);
2921 
2922 	wakeup_one(ccb);
2923 }
2924 
2925 void
2926 mpii_scsi_cmd(struct scsi_xfer *xs)
2927 {
2928 	struct scsi_link	*link = xs->sc_link;
2929 	struct mpii_softc	*sc = link->bus->sb_adapter_softc;
2930 	struct mpii_ccb		*ccb = xs->io;
2931 	struct mpii_msg_scsi_io	*io;
2932 	struct mpii_device	*dev;
2933 	int			 ret;
2934 
2935 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2936 
2937 	if (xs->cmdlen > MPII_CDB_LEN) {
2938 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2939 		    DEVNAME(sc), xs->cmdlen);
2940 		memset(&xs->sense, 0, sizeof(xs->sense));
2941 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2942 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2943 		xs->sense.add_sense_code = 0x20;
2944 		xs->error = XS_SENSE;
2945 		scsi_done(xs);
2946 		return;
2947 	}
2948 
2949 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2950 		/* device no longer exists */
2951 		xs->error = XS_SELTIMEOUT;
2952 		scsi_done(xs);
2953 		return;
2954 	}
2955 
2956 	KERNEL_UNLOCK();
2957 
2958 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2959 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2960 
2961 	ccb->ccb_cookie = xs;
2962 	ccb->ccb_done = mpii_scsi_cmd_done;
2963 	ccb->ccb_dev_handle = dev->dev_handle;
2964 
2965 	io = ccb->ccb_cmd;
2966 	memset(io, 0, sizeof(*io));
2967 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2968 	io->sense_buffer_length = sizeof(xs->sense);
2969 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2970 	htolem16(&io->io_flags, xs->cmdlen);
2971 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2972 	htobem16(&io->lun[0], link->lun);
2973 
2974 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2975 	case SCSI_DATA_IN:
2976 		io->direction = MPII_SCSIIO_DIR_READ;
2977 		break;
2978 	case SCSI_DATA_OUT:
2979 		io->direction = MPII_SCSIIO_DIR_WRITE;
2980 		break;
2981 	default:
2982 		io->direction = MPII_SCSIIO_DIR_NONE;
2983 		break;
2984 	}
2985 
2986 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2987 
2988 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2989 
2990 	htolem32(&io->data_length, xs->datalen);
2991 
2992 	/* sense data is at the end of a request */
2993 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2994 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2995 
2996 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2997 		ret = mpii_load_xs_sas3(ccb);
2998 	else
2999 		ret = mpii_load_xs(ccb);
3000 
3001 	if (ret != 0) {
3002 		xs->error = XS_DRIVER_STUFFUP;
3003 		goto done;
3004 	}
3005 
3006 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
3007 	if (xs->flags & SCSI_POLL) {
3008 		if (mpii_poll(sc, ccb) != 0) {
3009 			xs->error = XS_DRIVER_STUFFUP;
3010 			goto done;
3011 		}
3012 	} else {
3013 		timeout_add_msec(&xs->stimeout, xs->timeout);
3014 		mpii_start(sc, ccb);
3015 	}
3016 
3017 	KERNEL_LOCK();
3018 	return;
3019 
3020 done:
3021 	KERNEL_LOCK();
3022 	scsi_done(xs);
3023 }
3024 
3025 void
3026 mpii_scsi_cmd_tmo(void *xccb)
3027 {
3028 	struct mpii_ccb		*ccb = xccb;
3029 	struct mpii_softc	*sc = ccb->ccb_sc;
3030 
3031 	printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc),
3032 	    mpii_read_db(sc));
3033 
3034 	mtx_enter(&sc->sc_ccb_mtx);
3035 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
3036 		ccb->ccb_state = MPII_CCB_TIMEOUT;
3037 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3038 	}
3039 	mtx_leave(&sc->sc_ccb_mtx);
3040 
3041 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
3042 }
3043 
3044 void
3045 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
3046 {
3047 	struct mpii_softc			*sc = cookie;
3048 	struct mpii_ccb				*tccb = io;
3049 	struct mpii_ccb				*ccb;
3050 	struct mpii_msg_scsi_task_request	*stq;
3051 
3052 	mtx_enter(&sc->sc_ccb_mtx);
3053 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3054 	if (ccb != NULL) {
3055 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3056 		ccb->ccb_state = MPII_CCB_QUEUED;
3057 	}
3058 	/* should remove any other ccbs for the same dev handle */
3059 	mtx_leave(&sc->sc_ccb_mtx);
3060 
3061 	if (ccb == NULL) {
3062 		scsi_io_put(&sc->sc_iopool, tccb);
3063 		return;
3064 	}
3065 
3066 	stq = tccb->ccb_cmd;
3067 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3068 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3069 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
3070 
3071 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3072 	mpii_start(sc, tccb);
3073 }
3074 
3075 void
3076 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3077 {
3078 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3079 }
3080 
3081 void
3082 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3083 {
3084 	struct mpii_ccb		*tccb;
3085 	struct mpii_msg_scsi_io_error	*sie;
3086 	struct mpii_softc	*sc = ccb->ccb_sc;
3087 	struct scsi_xfer	*xs = ccb->ccb_cookie;
3088 	struct scsi_sense_data	*sense;
3089 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3090 
3091 	timeout_del(&xs->stimeout);
3092 	mtx_enter(&sc->sc_ccb_mtx);
3093 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3094 		/* ENOSIMPLEQ_REMOVE :( */
3095 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3096 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3097 		else {
3098 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3099 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3100 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3101 					    tccb, ccb_link);
3102 					break;
3103 				}
3104 			}
3105 		}
3106 	}
3107 
3108 	ccb->ccb_state = MPII_CCB_READY;
3109 	mtx_leave(&sc->sc_ccb_mtx);
3110 
3111 	if (xs->datalen != 0) {
3112 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3113 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3114 		    BUS_DMASYNC_POSTWRITE);
3115 
3116 		bus_dmamap_unload(sc->sc_dmat, dmap);
3117 	}
3118 
3119 	xs->error = XS_NOERROR;
3120 	xs->resid = 0;
3121 
3122 	if (ccb->ccb_rcb == NULL) {
3123 		/* no scsi error, we're ok so drop out early */
3124 		xs->status = SCSI_OK;
3125 		goto done;
3126 	}
3127 
3128 	sie = ccb->ccb_rcb->rcb_reply;
3129 
3130 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3131 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
3132 	    xs->flags);
3133 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3134 	    "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3135 	    sie->msg_length, sie->function);
3136 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3137 	    sie->vp_id, sie->vf_id);
3138 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3139 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3140 	    sie->scsi_state, lemtoh16(&sie->ioc_status));
3141 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3142 	    lemtoh32(&sie->ioc_loginfo));
3143 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3144 	    lemtoh32(&sie->transfer_count));
3145 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3146 	    lemtoh32(&sie->sense_count));
3147 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3148 	    lemtoh32(&sie->response_info));
3149 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3150 	    lemtoh16(&sie->task_tag));
3151 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3152 	    DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3153 
3154 	if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3155 		xs->status = SCSI_TERMINATED;
3156 	else
3157 		xs->status = sie->scsi_status;
3158 	xs->resid = 0;
3159 
3160 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3161 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3162 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3163 		/* FALLTHROUGH */
3164 
3165 	case MPII_IOCSTATUS_SUCCESS:
3166 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3167 		switch (xs->status) {
3168 		case SCSI_OK:
3169 			xs->error = XS_NOERROR;
3170 			break;
3171 
3172 		case SCSI_CHECK:
3173 			xs->error = XS_SENSE;
3174 			break;
3175 
3176 		case SCSI_BUSY:
3177 		case SCSI_QUEUE_FULL:
3178 			xs->error = XS_BUSY;
3179 			break;
3180 
3181 		default:
3182 			xs->error = XS_DRIVER_STUFFUP;
3183 		}
3184 		break;
3185 
3186 	case MPII_IOCSTATUS_BUSY:
3187 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3188 		xs->error = XS_BUSY;
3189 		break;
3190 
3191 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3192 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3193 		xs->error = XS_RESET;
3194 		break;
3195 
3196 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3197 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3198 		xs->error = XS_SELTIMEOUT;
3199 		break;
3200 
3201 	default:
3202 		xs->error = XS_DRIVER_STUFFUP;
3203 		break;
3204 	}
3205 
3206 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3207 	    sc->sc_request_size - sizeof(*sense));
3208 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3209 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3210 
3211 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3212 	    xs->error, xs->status);
3213 
3214 	mpii_push_reply(sc, ccb->ccb_rcb);
3215 done:
3216 	KERNEL_LOCK();
3217 	scsi_done(xs);
3218 	KERNEL_UNLOCK();
3219 }
3220 
3221 int
3222 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3223 {
3224 	struct mpii_softc	*sc = link->bus->sb_adapter_softc;
3225 	struct mpii_device	*dev = sc->sc_devs[link->target];
3226 
3227 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3228 
3229 	switch (cmd) {
3230 	case DIOCGCACHE:
3231 	case DIOCSCACHE:
3232 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3233 			return (mpii_ioctl_cache(link, cmd,
3234 			    (struct dk_cache *)addr));
3235 		}
3236 		break;
3237 
3238 	default:
3239 		if (sc->sc_ioctl)
3240 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
3241 
3242 		break;
3243 	}
3244 
3245 	return (ENOTTY);
3246 }
3247 
3248 int
3249 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3250 {
3251 	struct mpii_softc *sc = link->bus->sb_adapter_softc;
3252 	struct mpii_device *dev = sc->sc_devs[link->target];
3253 	struct mpii_cfg_raid_vol_pg0 *vpg;
3254 	struct mpii_msg_raid_action_request *req;
3255 	struct mpii_msg_raid_action_reply *rep;
3256 	struct mpii_cfg_hdr hdr;
3257 	struct mpii_ccb	*ccb;
3258 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3259 	size_t pagelen;
3260 	int rv = 0;
3261 	int enabled;
3262 
3263 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3264 	    addr, MPII_PG_POLL, &hdr) != 0)
3265 		return (EINVAL);
3266 
3267 	pagelen = hdr.page_length * 4;
3268 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3269 	if (vpg == NULL)
3270 		return (ENOMEM);
3271 
3272 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3273 	    vpg, pagelen) != 0) {
3274 		rv = EINVAL;
3275 		goto done;
3276 	}
3277 
3278 	enabled = ((lemtoh16(&vpg->volume_settings) &
3279 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3280 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3281 
3282 	if (cmd == DIOCGCACHE) {
3283 		dc->wrcache = enabled;
3284 		dc->rdcache = 0;
3285 		goto done;
3286 	} /* else DIOCSCACHE */
3287 
3288 	if (dc->rdcache) {
3289 		rv = EOPNOTSUPP;
3290 		goto done;
3291 	}
3292 
3293 	if (((dc->wrcache) ? 1 : 0) == enabled)
3294 		goto done;
3295 
3296 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3297 	if (ccb == NULL) {
3298 		rv = ENOMEM;
3299 		goto done;
3300 	}
3301 
3302 	ccb->ccb_done = mpii_empty_done;
3303 
3304 	req = ccb->ccb_cmd;
3305 	memset(req, 0, sizeof(*req));
3306 	req->function = MPII_FUNCTION_RAID_ACTION;
3307 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3308 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3309 	htolem32(&req->action_data, dc->wrcache ?
3310 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3311 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3312 
3313 	if (mpii_poll(sc, ccb) != 0) {
3314 		rv = EIO;
3315 		goto done;
3316 	}
3317 
3318 	if (ccb->ccb_rcb != NULL) {
3319 		rep = ccb->ccb_rcb->rcb_reply;
3320 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3321 		    ((rep->action_data[0] &
3322 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3323 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3324 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3325 			rv = EINVAL;
3326 		mpii_push_reply(sc, ccb->ccb_rcb);
3327 	}
3328 
3329 	scsi_io_put(&sc->sc_iopool, ccb);
3330 
3331 done:
3332 	free(vpg, M_TEMP, pagelen);
3333 	return (rv);
3334 }
3335 
3336 #if NBIO > 0
3337 int
3338 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3339 {
3340 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3341 	int			error = 0;
3342 
3343 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3344 
3345 	switch (cmd) {
3346 	case BIOCINQ:
3347 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3348 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3349 		break;
3350 	case BIOCVOL:
3351 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3352 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3353 		break;
3354 	case BIOCDISK:
3355 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3356 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3357 		break;
3358 	default:
3359 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3360 		error = ENOTTY;
3361 	}
3362 
3363 	return (error);
3364 }
3365 
3366 int
3367 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3368 {
3369 	int			i;
3370 
3371 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3372 
3373 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3374 	for (i = 0; i < sc->sc_max_devices; i++)
3375 		if (sc->sc_devs[i] &&
3376 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3377 			bi->bi_novol++;
3378 	return (0);
3379 }
3380 
3381 int
3382 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3383 {
3384 	struct mpii_cfg_raid_vol_pg0	*vpg;
3385 	struct mpii_cfg_hdr		hdr;
3386 	struct mpii_device		*dev;
3387 	struct scsi_link		*lnk;
3388 	struct device			*scdev;
3389 	size_t				pagelen;
3390 	u_int16_t			volh;
3391 	int				rv, hcnt = 0;
3392 
3393 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3394 	    DEVNAME(sc), bv->bv_volid);
3395 
3396 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3397 		return (ENODEV);
3398 	volh = dev->dev_handle;
3399 
3400 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3401 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3402 		printf("%s: unable to fetch header for raid volume page 0\n",
3403 		    DEVNAME(sc));
3404 		return (EINVAL);
3405 	}
3406 
3407 	pagelen = hdr.page_length * 4;
3408 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3409 	if (vpg == NULL) {
3410 		printf("%s: unable to allocate space for raid "
3411 		    "volume page 0\n", DEVNAME(sc));
3412 		return (ENOMEM);
3413 	}
3414 
3415 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3416 	    &hdr, 1, vpg, pagelen) != 0) {
3417 		printf("%s: unable to fetch raid volume page 0\n",
3418 		    DEVNAME(sc));
3419 		free(vpg, M_TEMP, pagelen);
3420 		return (EINVAL);
3421 	}
3422 
3423 	switch (vpg->volume_state) {
3424 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3425 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3426 		bv->bv_status = BIOC_SVONLINE;
3427 		break;
3428 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3429 		if (ISSET(lemtoh32(&vpg->volume_status),
3430 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3431 			bv->bv_status = BIOC_SVREBUILD;
3432 			bv->bv_percent = dev->percent;
3433 		} else
3434 			bv->bv_status = BIOC_SVDEGRADED;
3435 		break;
3436 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3437 		bv->bv_status = BIOC_SVOFFLINE;
3438 		break;
3439 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3440 		bv->bv_status = BIOC_SVBUILDING;
3441 		break;
3442 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3443 	default:
3444 		bv->bv_status = BIOC_SVINVALID;
3445 		break;
3446 	}
3447 
3448 	switch (vpg->volume_type) {
3449 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3450 		bv->bv_level = 0;
3451 		break;
3452 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3453 		bv->bv_level = 1;
3454 		break;
3455 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3456 		bv->bv_level = 0x1E;
3457 		break;
3458 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3459 		bv->bv_level = 10;
3460 		break;
3461 	default:
3462 		bv->bv_level = -1;
3463 	}
3464 
3465 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3466 		free(vpg, M_TEMP, pagelen);
3467 		return (rv);
3468 	}
3469 
3470 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3471 
3472 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3473 
3474 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3475 	if (lnk != NULL) {
3476 		scdev = lnk->device_softc;
3477 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3478 	}
3479 
3480 	free(vpg, M_TEMP, pagelen);
3481 	return (0);
3482 }
3483 
3484 int
3485 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3486 {
3487 	struct mpii_cfg_raid_vol_pg0		*vpg;
3488 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3489 	struct mpii_cfg_hdr			hdr;
3490 	struct mpii_device			*dev;
3491 	size_t					pagelen;
3492 	u_int16_t				volh;
3493 	u_int8_t				dn;
3494 
3495 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3496 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3497 
3498 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3499 		return (ENODEV);
3500 	volh = dev->dev_handle;
3501 
3502 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3503 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3504 		printf("%s: unable to fetch header for raid volume page 0\n",
3505 		    DEVNAME(sc));
3506 		return (EINVAL);
3507 	}
3508 
3509 	pagelen = hdr.page_length * 4;
3510 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3511 	if (vpg == NULL) {
3512 		printf("%s: unable to allocate space for raid "
3513 		    "volume page 0\n", DEVNAME(sc));
3514 		return (ENOMEM);
3515 	}
3516 
3517 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3518 	    &hdr, 1, vpg, pagelen) != 0) {
3519 		printf("%s: unable to fetch raid volume page 0\n",
3520 		    DEVNAME(sc));
3521 		free(vpg, M_TEMP, pagelen);
3522 		return (EINVAL);
3523 	}
3524 
3525 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3526 		int		nvdsk = vpg->num_phys_disks;
3527 		int		hsmap = vpg->hot_spare_pool;
3528 
3529 		free(vpg, M_TEMP, pagelen);
3530 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3531 	}
3532 
3533 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3534 	    bd->bd_diskid;
3535 	dn = pd->phys_disk_num;
3536 
3537 	free(vpg, M_TEMP, pagelen);
3538 	return (mpii_bio_disk(sc, bd, dn));
3539 }
3540 
3541 int
3542 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3543      int hsmap, int *hscnt)
3544 {
3545 	struct mpii_cfg_raid_config_pg0	*cpg;
3546 	struct mpii_raid_config_element	*el;
3547 	struct mpii_ecfg_hdr		ehdr;
3548 	size_t				pagelen;
3549 	int				i, nhs = 0;
3550 
3551 	if (bd)
3552 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3553 		    bd->bd_diskid - nvdsk);
3554 	else
3555 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3556 
3557 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3558 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3559 	    &ehdr) != 0) {
3560 		printf("%s: unable to fetch header for raid config page 0\n",
3561 		    DEVNAME(sc));
3562 		return (EINVAL);
3563 	}
3564 
3565 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3566 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3567 	if (cpg == NULL) {
3568 		printf("%s: unable to allocate space for raid config page 0\n",
3569 		    DEVNAME(sc));
3570 		return (ENOMEM);
3571 	}
3572 
3573 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3574 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3575 		printf("%s: unable to fetch raid config page 0\n",
3576 		    DEVNAME(sc));
3577 		free(cpg, M_TEMP, pagelen);
3578 		return (EINVAL);
3579 	}
3580 
3581 	el = (struct mpii_raid_config_element *)(cpg + 1);
3582 	for (i = 0; i < cpg->num_elements; i++, el++) {
3583 		if (ISSET(lemtoh16(&el->element_flags),
3584 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3585 		    el->hot_spare_pool == hsmap) {
3586 			/*
3587 			 * diskid comparison is based on the idea that all
3588 			 * disks are counted by the bio(4) in sequence, thus
3589 			 * subtracting the number of disks in the volume
3590 			 * from the diskid yields us a "relative" hotspare
3591 			 * number, which is good enough for us.
3592 			 */
3593 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3594 				u_int8_t dn = el->phys_disk_num;
3595 
3596 				free(cpg, M_TEMP, pagelen);
3597 				return (mpii_bio_disk(sc, bd, dn));
3598 			}
3599 			nhs++;
3600 		}
3601 	}
3602 
3603 	if (hscnt)
3604 		*hscnt = nhs;
3605 
3606 	free(cpg, M_TEMP, pagelen);
3607 	return (0);
3608 }
3609 
3610 int
3611 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3612 {
3613 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3614 	struct mpii_cfg_hdr			hdr;
3615 	struct mpii_device			*dev;
3616 	int					len;
3617 
3618 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3619 	    bd->bd_diskid);
3620 
3621 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3622 	if (ppg == NULL) {
3623 		printf("%s: unable to allocate space for raid physical disk "
3624 		    "page 0\n", DEVNAME(sc));
3625 		return (ENOMEM);
3626 	}
3627 
3628 	hdr.page_version = 0;
3629 	hdr.page_length = sizeof(*ppg) / 4;
3630 	hdr.page_number = 0;
3631 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3632 
3633 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3634 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3635 		printf("%s: unable to fetch raid drive page 0\n",
3636 		    DEVNAME(sc));
3637 		free(ppg, M_TEMP, sizeof(*ppg));
3638 		return (EINVAL);
3639 	}
3640 
3641 	bd->bd_target = ppg->phys_disk_num;
3642 
3643 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3644 		bd->bd_status = BIOC_SDINVALID;
3645 		free(ppg, M_TEMP, sizeof(*ppg));
3646 		return (0);
3647 	}
3648 
3649 	switch (ppg->phys_disk_state) {
3650 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3651 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3652 		bd->bd_status = BIOC_SDONLINE;
3653 		break;
3654 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3655 		if (ppg->offline_reason ==
3656 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3657 		    ppg->offline_reason ==
3658 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3659 			bd->bd_status = BIOC_SDFAILED;
3660 		else
3661 			bd->bd_status = BIOC_SDOFFLINE;
3662 		break;
3663 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3664 		bd->bd_status = BIOC_SDFAILED;
3665 		break;
3666 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3667 		bd->bd_status = BIOC_SDREBUILD;
3668 		break;
3669 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3670 		bd->bd_status = BIOC_SDHOTSPARE;
3671 		break;
3672 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3673 		bd->bd_status = BIOC_SDUNUSED;
3674 		break;
3675 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3676 	default:
3677 		bd->bd_status = BIOC_SDINVALID;
3678 		break;
3679 	}
3680 
3681 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3682 
3683 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3684 	len = strlen(bd->bd_vendor);
3685 	bd->bd_vendor[len] = ' ';
3686 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3687 	    sizeof(ppg->product_id));
3688 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3689 
3690 	free(ppg, M_TEMP, sizeof(*ppg));
3691 	return (0);
3692 }
3693 
3694 struct mpii_device *
3695 mpii_find_vol(struct mpii_softc *sc, int volid)
3696 {
3697 	struct mpii_device	*dev = NULL;
3698 
3699 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3700 		return (NULL);
3701 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3702 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3703 		return (dev);
3704 	return (NULL);
3705 }
3706 
3707 #ifndef SMALL_KERNEL
3708 /*
3709  * Non-sleeping lightweight version of the mpii_ioctl_vol
3710  */
3711 int
3712 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3713 {
3714 	struct mpii_cfg_raid_vol_pg0	*vpg;
3715 	struct mpii_cfg_hdr		hdr;
3716 	struct mpii_device		*dev = NULL;
3717 	size_t				pagelen;
3718 	u_int16_t			volh;
3719 
3720 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3721 		return (ENODEV);
3722 	volh = dev->dev_handle;
3723 
3724 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3725 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3726 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3727 		    "volume page 0\n", DEVNAME(sc));
3728 		return (EINVAL);
3729 	}
3730 
3731 	pagelen = hdr.page_length * 4;
3732 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3733 	if (vpg == NULL) {
3734 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3735 		    "volume page 0\n", DEVNAME(sc));
3736 		return (ENOMEM);
3737 	}
3738 
3739 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3740 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3741 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3742 		    "page 0\n", DEVNAME(sc));
3743 		free(vpg, M_TEMP, pagelen);
3744 		return (EINVAL);
3745 	}
3746 
3747 	switch (vpg->volume_state) {
3748 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3749 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3750 		bv->bv_status = BIOC_SVONLINE;
3751 		break;
3752 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3753 		if (ISSET(lemtoh32(&vpg->volume_status),
3754 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3755 			bv->bv_status = BIOC_SVREBUILD;
3756 		else
3757 			bv->bv_status = BIOC_SVDEGRADED;
3758 		break;
3759 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3760 		bv->bv_status = BIOC_SVOFFLINE;
3761 		break;
3762 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3763 		bv->bv_status = BIOC_SVBUILDING;
3764 		break;
3765 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3766 	default:
3767 		bv->bv_status = BIOC_SVINVALID;
3768 		break;
3769 	}
3770 
3771 	free(vpg, M_TEMP, pagelen);
3772 	return (0);
3773 }
3774 
3775 int
3776 mpii_create_sensors(struct mpii_softc *sc)
3777 {
3778 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3779 	struct device		*dev;
3780 	struct scsi_link	*link;
3781 	int			i;
3782 
3783 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3784 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3785 	if (sc->sc_sensors == NULL)
3786 		return (1);
3787 	sc->sc_nsensors = sc->sc_vd_count;
3788 
3789 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3790 	    sizeof(sc->sc_sensordev.xname));
3791 
3792 	for (i = 0; i < sc->sc_vd_count; i++) {
3793 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3794 		if (link == NULL)
3795 			goto bad;
3796 
3797 		dev = link->device_softc;
3798 
3799 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3800 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3801 
3802 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3803 		    sizeof(sc->sc_sensors[i].desc));
3804 
3805 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3806 	}
3807 
3808 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3809 		goto bad;
3810 
3811 	sensordev_install(&sc->sc_sensordev);
3812 
3813 	return (0);
3814 
3815 bad:
3816 	free(sc->sc_sensors, M_DEVBUF, 0);
3817 
3818 	return (1);
3819 }
3820 
3821 void
3822 mpii_refresh_sensors(void *arg)
3823 {
3824 	struct mpii_softc	*sc = arg;
3825 	struct bioc_vol		bv;
3826 	int			i;
3827 
3828 	for (i = 0; i < sc->sc_nsensors; i++) {
3829 		memset(&bv, 0, sizeof(bv));
3830 		bv.bv_volid = i;
3831 		if (mpii_bio_volstate(sc, &bv))
3832 			return;
3833 		switch(bv.bv_status) {
3834 		case BIOC_SVOFFLINE:
3835 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3836 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3837 			break;
3838 		case BIOC_SVDEGRADED:
3839 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3840 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3841 			break;
3842 		case BIOC_SVREBUILD:
3843 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3844 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3845 			break;
3846 		case BIOC_SVONLINE:
3847 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3848 			sc->sc_sensors[i].status = SENSOR_S_OK;
3849 			break;
3850 		case BIOC_SVINVALID:
3851 			/* FALLTHROUGH */
3852 		default:
3853 			sc->sc_sensors[i].value = 0; /* unknown */
3854 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3855 		}
3856 	}
3857 }
3858 #endif /* SMALL_KERNEL */
3859 #endif /* NBIO > 0 */
3860