xref: /openbsd-src/sys/dev/pci/mpii.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: mpii.c,v 1.112 2017/08/10 15:01:42 mikeb Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	struct scsi_link	sc_link;
163 
164 	int			sc_flags;
165 #define MPII_F_RAID		(1<<1)
166 #define MPII_F_SAS3		(1<<2)
167 #define MPII_F_CONFIG_PENDING	(1<<3)
168 
169 	struct scsibus_softc	*sc_scsibus;
170 
171 	struct mpii_device	**sc_devs;
172 
173 	bus_space_tag_t		sc_iot;
174 	bus_space_handle_t	sc_ioh;
175 	bus_size_t		sc_ios;
176 	bus_dma_tag_t		sc_dmat;
177 
178 	struct mutex		sc_req_mtx;
179 	struct mutex		sc_rep_mtx;
180 
181 	ushort			sc_reply_size;
182 	ushort			sc_request_size;
183 
184 	ushort			sc_max_cmds;
185 	ushort			sc_num_reply_frames;
186 	u_int			sc_reply_free_qdepth;
187 	u_int			sc_reply_post_qdepth;
188 
189 	ushort			sc_chain_sge;
190 	ushort			sc_max_sgl;
191 
192 	u_int8_t		sc_ioc_event_replay;
193 
194 	u_int8_t		sc_porttype;
195 	u_int8_t		sc_max_volumes;
196 	u_int16_t		sc_max_devices;
197 	u_int16_t		sc_vd_count;
198 	u_int16_t		sc_vd_id_low;
199 	u_int16_t		sc_pd_id_start;
200 	int			sc_ioc_number;
201 	u_int8_t		sc_vf_id;
202 
203 	struct mpii_ccb		*sc_ccbs;
204 	struct mpii_ccb_list	sc_ccb_free;
205 	struct mutex		sc_ccb_free_mtx;
206 
207 	struct mutex		sc_ccb_mtx;
208 				/*
209 				 * this protects the ccb state and list entry
210 				 * between mpii_scsi_cmd and scsidone.
211 				 */
212 
213 	struct mpii_ccb_list	sc_ccb_tmos;
214 	struct scsi_iohandler	sc_ccb_tmo_handler;
215 
216 	struct scsi_iopool	sc_iopool;
217 
218 	struct mpii_dmamem	*sc_requests;
219 
220 	struct mpii_dmamem	*sc_replies;
221 	struct mpii_rcb		*sc_rcbs;
222 
223 	struct mpii_dmamem	*sc_reply_postq;
224 	struct mpii_reply_descr	*sc_reply_postq_kva;
225 	u_int			sc_reply_post_host_index;
226 
227 	struct mpii_dmamem	*sc_reply_freeq;
228 	u_int			sc_reply_free_host_index;
229 
230 	struct mpii_rcb_list	sc_evt_sas_queue;
231 	struct mutex		sc_evt_sas_mtx;
232 	struct task		sc_evt_sas_task;
233 
234 	struct mpii_rcb_list	sc_evt_ack_queue;
235 	struct mutex		sc_evt_ack_mtx;
236 	struct scsi_iohandler	sc_evt_ack_handler;
237 
238 	/* scsi ioctl from sd device */
239 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
240 
241 	int			sc_nsensors;
242 	struct ksensor		*sc_sensors;
243 	struct ksensordev	sc_sensordev;
244 };
245 
246 int	mpii_match(struct device *, void *, void *);
247 void	mpii_attach(struct device *, struct device *, void *);
248 int	mpii_detach(struct device *, int);
249 
250 int	mpii_intr(void *);
251 
252 struct cfattach mpii_ca = {
253 	sizeof(struct mpii_softc),
254 	mpii_match,
255 	mpii_attach,
256 	mpii_detach
257 };
258 
259 struct cfdriver mpii_cd = {
260 	NULL,
261 	"mpii",
262 	DV_DULL
263 };
264 
265 void		mpii_scsi_cmd(struct scsi_xfer *);
266 void		mpii_scsi_cmd_done(struct mpii_ccb *);
267 int		mpii_scsi_probe(struct scsi_link *);
268 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
269 
270 struct scsi_adapter mpii_switch = {
271 	mpii_scsi_cmd,
272 	scsi_minphys,
273 	mpii_scsi_probe,
274 	NULL,
275 	mpii_scsi_ioctl
276 };
277 
278 struct mpii_dmamem *
279 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
280 void		mpii_dmamem_free(struct mpii_softc *,
281 		    struct mpii_dmamem *);
282 int		mpii_alloc_ccbs(struct mpii_softc *);
283 void *		mpii_get_ccb(void *);
284 void		mpii_put_ccb(void *, void *);
285 int		mpii_alloc_replies(struct mpii_softc *);
286 int		mpii_alloc_queues(struct mpii_softc *);
287 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
288 void		mpii_push_replies(struct mpii_softc *);
289 
290 void		mpii_scsi_cmd_tmo(void *);
291 void		mpii_scsi_cmd_tmo_handler(void *, void *);
292 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
293 
294 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
295 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
296 struct mpii_device *
297 		mpii_find_dev(struct mpii_softc *, u_int16_t);
298 
299 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
300 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
301 void		mpii_poll_done(struct mpii_ccb *);
302 struct mpii_rcb *
303 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
304 
305 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
306 void		mpii_wait_done(struct mpii_ccb *);
307 
308 void		mpii_init_queues(struct mpii_softc *);
309 
310 int		mpii_load_xs(struct mpii_ccb *);
311 int		mpii_load_xs_sas3(struct mpii_ccb *);
312 
313 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
314 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
315 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
316 		    u_int32_t);
317 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
318 		    u_int32_t);
319 
320 int		mpii_init(struct mpii_softc *);
321 int		mpii_reset_soft(struct mpii_softc *);
322 int		mpii_reset_hard(struct mpii_softc *);
323 
324 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
325 int		mpii_handshake_recv_dword(struct mpii_softc *,
326 		    u_int32_t *);
327 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
328 
329 void		mpii_empty_done(struct mpii_ccb *);
330 
331 int		mpii_iocinit(struct mpii_softc *);
332 int		mpii_iocfacts(struct mpii_softc *);
333 int		mpii_portfacts(struct mpii_softc *);
334 int		mpii_portenable(struct mpii_softc *);
335 int		mpii_cfg_coalescing(struct mpii_softc *);
336 int		mpii_board_info(struct mpii_softc *);
337 int		mpii_target_map(struct mpii_softc *);
338 
339 int		mpii_eventnotify(struct mpii_softc *);
340 void		mpii_eventnotify_done(struct mpii_ccb *);
341 void		mpii_eventack(void *, void *);
342 void		mpii_eventack_done(struct mpii_ccb *);
343 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
344 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
345 void		mpii_event_sas(void *);
346 void		mpii_event_raid(struct mpii_softc *,
347 		    struct mpii_msg_event_reply *);
348 void		mpii_event_discovery(struct mpii_softc *,
349 		    struct mpii_msg_event_reply *);
350 
351 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
352 
353 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
354 		    u_int8_t, u_int32_t, int, void *);
355 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
356 		    void *, int, void *, size_t);
357 
358 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
359 
360 #if NBIO > 0
361 int		mpii_ioctl(struct device *, u_long, caddr_t);
362 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
363 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
364 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
365 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
366 		    int, int *);
367 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
368 		    u_int8_t);
369 struct mpii_device *
370 		mpii_find_vol(struct mpii_softc *, int);
371 #ifndef SMALL_KERNEL
372  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
373 int		mpii_create_sensors(struct mpii_softc *);
374 void		mpii_refresh_sensors(void *);
375 #endif /* SMALL_KERNEL */
376 #endif /* NBIO > 0 */
377 
378 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
379 
380 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
381 
382 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
383 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
384 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
385 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
386 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
387 				    == MPII_INTR_STATUS_REPLY)
388 
389 #define mpii_write_reply_free(s, v) \
390     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
391     MPII_REPLY_FREE_HOST_INDEX, (v))
392 #define mpii_write_reply_post(s, v) \
393     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
394     MPII_REPLY_POST_HOST_INDEX, (v))
395 
396 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
397 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
398 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
399 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
400 
401 static inline void
402 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
403 {
404 	htolem32(&sge->sg_addr_lo, dva);
405 	htolem32(&sge->sg_addr_hi, dva >> 32);
406 }
407 
408 #define MPII_PG_EXTENDED	(1<<0)
409 #define MPII_PG_POLL		(1<<1)
410 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
411 
412 static const struct pci_matchid mpii_devices[] = {
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 }
435 };
436 
437 int
438 mpii_match(struct device *parent, void *match, void *aux)
439 {
440 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
441 }
442 
443 void
444 mpii_attach(struct device *parent, struct device *self, void *aux)
445 {
446 	struct mpii_softc		*sc = (struct mpii_softc *)self;
447 	struct pci_attach_args		*pa = aux;
448 	pcireg_t			memtype;
449 	int				r;
450 	pci_intr_handle_t		ih;
451 	struct scsibus_attach_args	saa;
452 	struct mpii_ccb			*ccb;
453 
454 	sc->sc_pc = pa->pa_pc;
455 	sc->sc_tag = pa->pa_tag;
456 	sc->sc_dmat = pa->pa_dmat;
457 
458 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
459 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
460 
461 	/* find the appropriate memory base */
462 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
463 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
464 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
465 			break;
466 	}
467 	if (r >= PCI_MAPREG_END) {
468 		printf(": unable to locate system interface registers\n");
469 		return;
470 	}
471 
472 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
473 	    NULL, &sc->sc_ios, 0xFF) != 0) {
474 		printf(": unable to map system interface registers\n");
475 		return;
476 	}
477 
478 	/* disable the expansion rom */
479 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
480 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
481 	    ~PCI_ROM_ENABLE);
482 
483 	/* disable interrupts */
484 	mpii_write(sc, MPII_INTR_MASK,
485 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
486 	    MPII_INTR_MASK_DOORBELL);
487 
488 	/* hook up the interrupt */
489 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
490 		printf(": unable to map interrupt\n");
491 		goto unmap;
492 	}
493 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
494 
495 	if (mpii_init(sc) != 0) {
496 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
497 		goto unmap;
498 	}
499 
500 	if (mpii_iocfacts(sc) != 0) {
501 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
502 		goto unmap;
503 	}
504 
505 	if (mpii_alloc_ccbs(sc) != 0) {
506 		/* error already printed */
507 		goto unmap;
508 	}
509 
510 	if (mpii_alloc_replies(sc) != 0) {
511 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
512 		goto free_ccbs;
513 	}
514 
515 	if (mpii_alloc_queues(sc) != 0) {
516 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
517 		goto free_replies;
518 	}
519 
520 	if (mpii_iocinit(sc) != 0) {
521 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
522 		goto free_queues;
523 	}
524 
525 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
526 	    MPII_DOORBELL_STATE_OPER) != 0) {
527 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
528 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
529 		printf("%s: operational state timeout\n", DEVNAME(sc));
530 		goto free_queues;
531 	}
532 
533 	mpii_push_replies(sc);
534 	mpii_init_queues(sc);
535 
536 	if (mpii_board_info(sc) != 0) {
537 		printf("%s: unable to get manufacturing page 0\n",
538 		    DEVNAME(sc));
539 		goto free_queues;
540 	}
541 
542 	if (mpii_portfacts(sc) != 0) {
543 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
544 		goto free_queues;
545 	}
546 
547 	if (mpii_target_map(sc) != 0) {
548 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
549 		goto free_queues;
550 	}
551 
552 	if (mpii_cfg_coalescing(sc) != 0) {
553 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
554 		goto free_queues;
555 	}
556 
557 	/* XXX bail on unsupported porttype? */
558 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
559 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL)) {
560 		if (mpii_eventnotify(sc) != 0) {
561 			printf("%s: unable to enable events\n", DEVNAME(sc));
562 			goto free_queues;
563 		}
564 	}
565 
566 	sc->sc_devs = mallocarray(sc->sc_max_devices,
567 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
568 	if (sc->sc_devs == NULL) {
569 		printf("%s: unable to allocate memory for mpii_device\n",
570 		    DEVNAME(sc));
571 		goto free_queues;
572 	}
573 
574 	if (mpii_portenable(sc) != 0) {
575 		printf("%s: unable to enable port\n", DEVNAME(sc));
576 		goto free_devs;
577 	}
578 
579 	/* we should be good to go now, attach scsibus */
580 	sc->sc_link.adapter = &mpii_switch;
581 	sc->sc_link.adapter_softc = sc;
582 	sc->sc_link.adapter_target = -1;
583 	sc->sc_link.adapter_buswidth = sc->sc_max_devices;
584 	sc->sc_link.luns = 1;
585 	sc->sc_link.openings = sc->sc_max_cmds - 1;
586 	sc->sc_link.pool = &sc->sc_iopool;
587 
588 	memset(&saa, 0, sizeof(saa));
589 	saa.saa_sc_link = &sc->sc_link;
590 
591 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
592 	    mpii_intr, sc, sc->sc_dev.dv_xname);
593 	if (sc->sc_ih == NULL)
594 		goto free_devs;
595 
596 	/* force autoconf to wait for the first sas discovery to complete */
597 	SET(sc->sc_flags, MPII_F_CONFIG_PENDING);
598 	config_pending_incr();
599 
600 	/* config_found() returns the scsibus attached to us */
601 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
602 	    &saa, scsiprint);
603 
604 	/* enable interrupts */
605 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
606 	    | MPII_INTR_MASK_RESET);
607 
608 #if NBIO > 0
609 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
610 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
611 			panic("%s: controller registration failed",
612 			    DEVNAME(sc));
613 		else
614 			sc->sc_ioctl = mpii_ioctl;
615 
616 #ifndef SMALL_KERNEL
617 		if (mpii_create_sensors(sc) != 0)
618 			printf("%s: unable to create sensors\n", DEVNAME(sc));
619 #endif
620 	}
621 #endif
622 
623 	return;
624 
625 free_devs:
626 	free(sc->sc_devs, M_DEVBUF, 0);
627 	sc->sc_devs = NULL;
628 
629 free_queues:
630 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
631 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
632 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
633 
634 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
635 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
636 	mpii_dmamem_free(sc, sc->sc_reply_postq);
637 
638 free_replies:
639 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
640 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
641 	mpii_dmamem_free(sc, sc->sc_replies);
642 
643 free_ccbs:
644 	while ((ccb = mpii_get_ccb(sc)) != NULL)
645 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
646 	mpii_dmamem_free(sc, sc->sc_requests);
647 	free(sc->sc_ccbs, M_DEVBUF, 0);
648 
649 unmap:
650 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
651 	sc->sc_ios = 0;
652 }
653 
654 int
655 mpii_detach(struct device *self, int flags)
656 {
657 	struct mpii_softc		*sc = (struct mpii_softc *)self;
658 
659 	if (sc->sc_ih != NULL) {
660 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
661 		sc->sc_ih = NULL;
662 	}
663 	if (sc->sc_ios != 0) {
664 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
665 		sc->sc_ios = 0;
666 	}
667 
668 	return (0);
669 }
670 
671 int
672 mpii_intr(void *arg)
673 {
674 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
675 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
676 	struct mpii_softc		*sc = arg;
677 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
678 	struct mpii_ccb			*ccb;
679 	struct mpii_rcb			*rcb;
680 	int				smid;
681 	u_int				idx;
682 	int				rv = 0;
683 
684 	mtx_enter(&sc->sc_rep_mtx);
685 	bus_dmamap_sync(sc->sc_dmat,
686 	    MPII_DMA_MAP(sc->sc_reply_postq),
687 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
688 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
689 
690 	idx = sc->sc_reply_post_host_index;
691 	for (;;) {
692 		rdp = &postq[idx];
693 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
694 		    MPII_REPLY_DESCR_UNUSED)
695 			break;
696 		if (rdp->data == 0xffffffff) {
697 			/*
698 			 * ioc is still writing to the reply post queue
699 			 * race condition - bail!
700 			 */
701 			break;
702 		}
703 
704 		smid = lemtoh16(&rdp->smid);
705 		rcb = mpii_reply(sc, rdp);
706 
707 		if (smid) {
708 			ccb = &sc->sc_ccbs[smid - 1];
709 			ccb->ccb_state = MPII_CCB_READY;
710 			ccb->ccb_rcb = rcb;
711 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
712 		} else
713 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
714 
715 		if (++idx >= sc->sc_reply_post_qdepth)
716 			idx = 0;
717 
718 		rv = 1;
719 	}
720 
721 	bus_dmamap_sync(sc->sc_dmat,
722 	    MPII_DMA_MAP(sc->sc_reply_postq),
723 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
724 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
725 
726 	if (rv)
727 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
728 
729 	mtx_leave(&sc->sc_rep_mtx);
730 
731 	if (rv == 0)
732 		return (0);
733 
734 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
735 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
736 		ccb->ccb_done(ccb);
737 	}
738 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
739 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
740 		mpii_event_process(sc, rcb);
741 	}
742 
743 	return (1);
744 }
745 
746 int
747 mpii_load_xs_sas3(struct mpii_ccb *ccb)
748 {
749 	struct mpii_softc	*sc = ccb->ccb_sc;
750 	struct scsi_xfer	*xs = ccb->ccb_cookie;
751 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
752 	struct mpii_ieee_sge	*csge, *nsge, *sge;
753 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
754 	int			i, error;
755 
756 	/* Request frame structure is described in the mpii_iocfacts */
757 	nsge = (struct mpii_ieee_sge *)(io + 1);
758 	csge = nsge + sc->sc_chain_sge;
759 
760 	/* zero length transfer still requires an SGE */
761 	if (xs->datalen == 0) {
762 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
763 		return (0);
764 	}
765 
766 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
767 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
768 	if (error) {
769 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
770 		return (1);
771 	}
772 
773 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
774 		if (nsge == csge) {
775 			nsge++;
776 			/* offset to the chain sge from the beginning */
777 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
778 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
779 			    MPII_IEEE_SGE_ADDR_SYSTEM;
780 			/* address of the next sge */
781 			csge->sg_addr = htole64(ccb->ccb_cmd_dva +
782 			    ((caddr_t)nsge - (caddr_t)io));
783 			csge->sg_len = htole32((dmap->dm_nsegs - i) *
784 			    sizeof(*sge));
785 		}
786 
787 		sge = nsge;
788 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
789 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
790 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
791 	}
792 
793 	/* terminate list */
794 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
795 
796 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
797 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
798 	    BUS_DMASYNC_PREWRITE);
799 
800 	return (0);
801 }
802 
803 int
804 mpii_load_xs(struct mpii_ccb *ccb)
805 {
806 	struct mpii_softc	*sc = ccb->ccb_sc;
807 	struct scsi_xfer	*xs = ccb->ccb_cookie;
808 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
809 	struct mpii_sge		*csge, *nsge, *sge;
810 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
811 	u_int32_t		flags;
812 	u_int16_t		len;
813 	int			i, error;
814 
815 	/* Request frame structure is described in the mpii_iocfacts */
816 	nsge = (struct mpii_sge *)(io + 1);
817 	csge = nsge + sc->sc_chain_sge;
818 
819 	/* zero length transfer still requires an SGE */
820 	if (xs->datalen == 0) {
821 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
822 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
823 		return (0);
824 	}
825 
826 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
827 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
828 	if (error) {
829 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
830 		return (1);
831 	}
832 
833 	/* safe default starting flags */
834 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
835 	if (xs->flags & SCSI_DATA_OUT)
836 		flags |= MPII_SGE_FL_DIR_OUT;
837 
838 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
839 		if (nsge == csge) {
840 			nsge++;
841 			/* offset to the chain sge from the beginning */
842 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
843 			/* length of the sgl segment we're pointing to */
844 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
845 			csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
846 			    MPII_SGE_FL_SIZE_64 | len);
847 			/* address of the next sge */
848 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
849 			    ((caddr_t)nsge - (caddr_t)io));
850 		}
851 
852 		sge = nsge;
853 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
854 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
855 	}
856 
857 	/* terminate list */
858 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
859 	    MPII_SGE_FL_EOL);
860 
861 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
862 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
863 	    BUS_DMASYNC_PREWRITE);
864 
865 	return (0);
866 }
867 
868 int
869 mpii_scsi_probe(struct scsi_link *link)
870 {
871 	struct mpii_softc *sc = link->adapter_softc;
872 	struct mpii_cfg_sas_dev_pg0 pg0;
873 	struct mpii_ecfg_hdr ehdr;
874 	struct mpii_device *dev;
875 	uint32_t address;
876 	int flags;
877 
878 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
879 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL))
880 		return (ENXIO);
881 
882 	dev = sc->sc_devs[link->target];
883 	if (dev == NULL)
884 		return (1);
885 
886 	flags = dev->flags;
887 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
888 		return (1);
889 
890 	if (ISSET(flags, MPII_DF_VOLUME))
891 		return (0);
892 
893 	memset(&ehdr, 0, sizeof(ehdr));
894 	ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
895 	ehdr.page_number = 0;
896 	ehdr.page_version = 0;
897 	ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
898 	ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
899 
900 	address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
901 	if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
902 	    &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
903 		printf("%s: unable to fetch SAS device page 0 for target %u\n",
904 		    DEVNAME(sc), link->target);
905 
906 		return (0); /* the handle should still work */
907 	}
908 
909 	link->port_wwn = letoh64(pg0.sas_addr);
910 	link->node_wwn = letoh64(pg0.device_name);
911 
912 	if (ISSET(lemtoh32(&pg0.device_info),
913 	    MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
914 		link->flags |= SDEV_ATAPI;
915 		link->quirks |= SDEV_ONLYBIG;
916 	}
917 
918 	return (0);
919 }
920 
921 u_int32_t
922 mpii_read(struct mpii_softc *sc, bus_size_t r)
923 {
924 	u_int32_t			rv;
925 
926 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
927 	    BUS_SPACE_BARRIER_READ);
928 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
929 
930 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
931 
932 	return (rv);
933 }
934 
935 void
936 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
937 {
938 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
939 
940 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
941 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
942 	    BUS_SPACE_BARRIER_WRITE);
943 }
944 
945 
946 int
947 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
948     u_int32_t target)
949 {
950 	int			i;
951 
952 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
953 	    mask, target);
954 
955 	for (i = 0; i < 15000; i++) {
956 		if ((mpii_read(sc, r) & mask) == target)
957 			return (0);
958 		delay(1000);
959 	}
960 
961 	return (1);
962 }
963 
964 int
965 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
966     u_int32_t target)
967 {
968 	int			i;
969 
970 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
971 	    mask, target);
972 
973 	for (i = 0; i < 15000; i++) {
974 		if ((mpii_read(sc, r) & mask) != target)
975 			return (0);
976 		delay(1000);
977 	}
978 
979 	return (1);
980 }
981 
982 int
983 mpii_init(struct mpii_softc *sc)
984 {
985 	u_int32_t		db;
986 	int			i;
987 
988 	/* spin until the ioc leaves the reset state */
989 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
990 	    MPII_DOORBELL_STATE_RESET) != 0) {
991 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
992 		    "reset state\n", DEVNAME(sc));
993 		return (1);
994 	}
995 
996 	/* check current ownership */
997 	db = mpii_read_db(sc);
998 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
999 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1000 		    DEVNAME(sc));
1001 		return (0);
1002 	}
1003 
1004 	for (i = 0; i < 5; i++) {
1005 		switch (db & MPII_DOORBELL_STATE) {
1006 		case MPII_DOORBELL_STATE_READY:
1007 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1008 			    DEVNAME(sc));
1009 			return (0);
1010 
1011 		case MPII_DOORBELL_STATE_OPER:
1012 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1013 			    DEVNAME(sc));
1014 			if (sc->sc_ioc_event_replay)
1015 				mpii_reset_soft(sc);
1016 			else
1017 				mpii_reset_hard(sc);
1018 			break;
1019 
1020 		case MPII_DOORBELL_STATE_FAULT:
1021 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1022 			    "reset hard\n" , DEVNAME(sc));
1023 			mpii_reset_hard(sc);
1024 			break;
1025 
1026 		case MPII_DOORBELL_STATE_RESET:
1027 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1028 			    "out of reset\n", DEVNAME(sc));
1029 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1030 			    MPII_DOORBELL_STATE_RESET) != 0)
1031 				return (1);
1032 			break;
1033 		}
1034 		db = mpii_read_db(sc);
1035 	}
1036 
1037 	return (1);
1038 }
1039 
1040 int
1041 mpii_reset_soft(struct mpii_softc *sc)
1042 {
1043 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1044 
1045 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1046 		return (1);
1047 	}
1048 
1049 	mpii_write_db(sc,
1050 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1051 
1052 	/* XXX LSI waits 15 sec */
1053 	if (mpii_wait_db_ack(sc) != 0)
1054 		return (1);
1055 
1056 	/* XXX LSI waits 15 sec */
1057 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1058 	    MPII_DOORBELL_STATE_READY) != 0)
1059 		return (1);
1060 
1061 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1062 
1063 	return (0);
1064 }
1065 
1066 int
1067 mpii_reset_hard(struct mpii_softc *sc)
1068 {
1069 	u_int16_t		i;
1070 
1071 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1072 
1073 	mpii_write_intr(sc, 0);
1074 
1075 	/* enable diagnostic register */
1076 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1077 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1078 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1079 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1080 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1081 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1082 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1083 
1084 	delay(100);
1085 
1086 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1087 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1088 		    "diagnostic read/write\n", DEVNAME(sc));
1089 		return(1);
1090 	}
1091 
1092 	/* reset ioc */
1093 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1094 
1095 	/* 240 milliseconds */
1096 	delay(240000);
1097 
1098 
1099 	/* XXX this whole function should be more robust */
1100 
1101 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1102 	for (i = 0; i < 30000; i++) {
1103 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1104 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1105 			break;
1106 		delay(10000);
1107 	}
1108 
1109 	/* disable diagnostic register */
1110 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1111 
1112 	/* XXX what else? */
1113 
1114 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1115 
1116 	return(0);
1117 }
1118 
1119 int
1120 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1121 {
1122 	u_int32_t		*query = buf;
1123 	int			i;
1124 
1125 	/* make sure the doorbell is not in use. */
1126 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1127 		return (1);
1128 
1129 	/* clear pending doorbell interrupts */
1130 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1131 		mpii_write_intr(sc, 0);
1132 
1133 	/*
1134 	 * first write the doorbell with the handshake function and the
1135 	 * dword count.
1136 	 */
1137 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1138 	    MPII_DOORBELL_DWORDS(dwords));
1139 
1140 	/*
1141 	 * the doorbell used bit will be set because a doorbell function has
1142 	 * started. wait for the interrupt and then ack it.
1143 	 */
1144 	if (mpii_wait_db_int(sc) != 0)
1145 		return (1);
1146 	mpii_write_intr(sc, 0);
1147 
1148 	/* poll for the acknowledgement. */
1149 	if (mpii_wait_db_ack(sc) != 0)
1150 		return (1);
1151 
1152 	/* write the query through the doorbell. */
1153 	for (i = 0; i < dwords; i++) {
1154 		mpii_write_db(sc, htole32(query[i]));
1155 		if (mpii_wait_db_ack(sc) != 0)
1156 			return (1);
1157 	}
1158 
1159 	return (0);
1160 }
1161 
1162 int
1163 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1164 {
1165 	u_int16_t		*words = (u_int16_t *)dword;
1166 	int			i;
1167 
1168 	for (i = 0; i < 2; i++) {
1169 		if (mpii_wait_db_int(sc) != 0)
1170 			return (1);
1171 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1172 		mpii_write_intr(sc, 0);
1173 	}
1174 
1175 	return (0);
1176 }
1177 
1178 int
1179 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1180 {
1181 	struct mpii_msg_reply	*reply = buf;
1182 	u_int32_t		*dbuf = buf, dummy;
1183 	int			i;
1184 
1185 	/* get the first dword so we can read the length out of the header. */
1186 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1187 		return (1);
1188 
1189 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1190 	    DEVNAME(sc), dwords, reply->msg_length);
1191 
1192 	/*
1193 	 * the total length, in dwords, is in the message length field of the
1194 	 * reply header.
1195 	 */
1196 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1197 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1198 			return (1);
1199 	}
1200 
1201 	/* if there's extra stuff to come off the ioc, discard it */
1202 	while (i++ < reply->msg_length) {
1203 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1204 			return (1);
1205 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1206 		    "0x%08x\n", DEVNAME(sc), dummy);
1207 	}
1208 
1209 	/* wait for the doorbell used bit to be reset and clear the intr */
1210 	if (mpii_wait_db_int(sc) != 0)
1211 		return (1);
1212 
1213 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1214 		return (1);
1215 
1216 	mpii_write_intr(sc, 0);
1217 
1218 	return (0);
1219 }
1220 
1221 void
1222 mpii_empty_done(struct mpii_ccb *ccb)
1223 {
1224 	/* nothing to do */
1225 }
1226 
1227 int
1228 mpii_iocfacts(struct mpii_softc *sc)
1229 {
1230 	struct mpii_msg_iocfacts_request	ifq;
1231 	struct mpii_msg_iocfacts_reply		ifp;
1232 	int					irs;
1233 	int					sge_size;
1234 	u_int					qdepth;
1235 
1236 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1237 
1238 	memset(&ifq, 0, sizeof(ifq));
1239 	memset(&ifp, 0, sizeof(ifp));
1240 
1241 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1242 
1243 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1244 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1245 		    DEVNAME(sc));
1246 		return (1);
1247 	}
1248 
1249 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1250 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1251 		    DEVNAME(sc));
1252 		return (1);
1253 	}
1254 
1255 	sc->sc_ioc_number = ifp.ioc_number;
1256 	sc->sc_vf_id = ifp.vf_id;
1257 
1258 	sc->sc_max_volumes = ifp.max_volumes;
1259 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1260 
1261 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1262 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1263 		SET(sc->sc_flags, MPII_F_RAID);
1264 
1265 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1266 	    MPII_REQUEST_CREDIT);
1267 
1268 	/* SAS3 controllers have different sgl layouts */
1269 	if (ifp.msg_version_maj == 2 && ifp.msg_version_min == 5)
1270 		SET(sc->sc_flags, MPII_F_SAS3);
1271 
1272 	/*
1273 	 * The host driver must ensure that there is at least one
1274 	 * unused entry in the Reply Free Queue. One way to ensure
1275 	 * that this requirement is met is to never allocate a number
1276 	 * of reply frames that is a multiple of 16.
1277 	 */
1278 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1279 	if (!(sc->sc_num_reply_frames % 16))
1280 		sc->sc_num_reply_frames--;
1281 
1282 	/* must be multiple of 16 */
1283 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1284 	    sc->sc_num_reply_frames;
1285 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1286 
1287 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1288 	if (sc->sc_reply_post_qdepth > qdepth) {
1289 		sc->sc_reply_post_qdepth = qdepth;
1290 		if (sc->sc_reply_post_qdepth < 16) {
1291 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1292 			return (1);
1293 		}
1294 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1295 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1296 	}
1297 
1298 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1299 	    16 - (sc->sc_num_reply_frames % 16);
1300 
1301 	/*
1302 	 * Our request frame for an I/O operation looks like this:
1303 	 *
1304 	 * +-------------------+ -.
1305 	 * | mpii_msg_scsi_io  |  |
1306 	 * +-------------------|  |
1307 	 * | mpii_sge          |  |
1308 	 * + - - - - - - - - - +  |
1309 	 * | ...               |  > ioc_request_frame_size
1310 	 * + - - - - - - - - - +  |
1311 	 * | mpii_sge (tail)   |  |
1312 	 * + - - - - - - - - - +  |
1313 	 * | mpii_sge (csge)   |  | --.
1314 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1315 	 * | mpii_sge          |<-----'
1316 	 * + - - - - - - - - - +
1317 	 * | ...               |
1318 	 * + - - - - - - - - - +
1319 	 * | mpii_sge (tail)   |
1320 	 * +-------------------+
1321 	 * |                   |
1322 	 * ~~~~~~~~~~~~~~~~~~~~~
1323 	 * |                   |
1324 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1325 	 * | scsi_sense_data   |
1326 	 * +-------------------+
1327 	 */
1328 
1329 	/* both sizes are in 32-bit words */
1330 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1331 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1332 	sc->sc_request_size = MPII_REQUEST_SIZE;
1333 	/* make sure we have enough space for scsi sense data */
1334 	if (irs > sc->sc_request_size) {
1335 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1336 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1337 	}
1338 
1339 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1340 		sge_size = sizeof(struct mpii_ieee_sge);
1341 	} else {
1342 		sge_size = sizeof(struct mpii_sge);
1343 	}
1344 
1345 	/* offset to the chain sge */
1346 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1347 	    sge_size - 1;
1348 
1349 	/*
1350 	 * A number of simple scatter-gather elements we can fit into the
1351 	 * request buffer after the I/O command minus the chain element.
1352 	 */
1353 	sc->sc_max_sgl = (sc->sc_request_size -
1354  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1355 	    sge_size - 1;
1356 
1357 	return (0);
1358 }
1359 
1360 int
1361 mpii_iocinit(struct mpii_softc *sc)
1362 {
1363 	struct mpii_msg_iocinit_request		iiq;
1364 	struct mpii_msg_iocinit_reply		iip;
1365 
1366 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1367 
1368 	memset(&iiq, 0, sizeof(iiq));
1369 	memset(&iip, 0, sizeof(iip));
1370 
1371 	iiq.function = MPII_FUNCTION_IOC_INIT;
1372 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1373 
1374 	/* XXX JPG do something about vf_id */
1375 	iiq.vf_id = 0;
1376 
1377 	iiq.msg_version_maj = 0x02;
1378 	iiq.msg_version_min = 0x00;
1379 
1380 	/* XXX JPG ensure compliance with some level and hard-code? */
1381 	iiq.hdr_version_unit = 0x00;
1382 	iiq.hdr_version_dev = 0x00;
1383 
1384 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1385 
1386 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1387 	    sc->sc_reply_post_qdepth);
1388 
1389 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1390 
1391 	htolem32(&iiq.sense_buffer_address_high,
1392 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1393 
1394 	htolem32(&iiq.system_reply_address_high,
1395 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1396 
1397 	htolem32(&iiq.system_request_frame_base_address_lo,
1398 	    MPII_DMA_DVA(sc->sc_requests));
1399 	htolem32(&iiq.system_request_frame_base_address_hi,
1400 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1401 
1402 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1403 	    MPII_DMA_DVA(sc->sc_reply_postq));
1404 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1405 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1406 
1407 	htolem32(&iiq.reply_free_queue_address_lo,
1408 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1409 	htolem32(&iiq.reply_free_queue_address_hi,
1410 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1411 
1412 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1413 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1414 		    DEVNAME(sc));
1415 		return (1);
1416 	}
1417 
1418 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1419 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1420 		    DEVNAME(sc));
1421 		return (1);
1422 	}
1423 
1424 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1425 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1426 	    iip.msg_length, iip.whoinit);
1427 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1428 	    iip.msg_flags);
1429 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1430 	    iip.vf_id, iip.vp_id);
1431 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1432 	    letoh16(iip.ioc_status));
1433 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1434 	    letoh32(iip.ioc_loginfo));
1435 
1436 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1437 	    lemtoh32(&iip.ioc_loginfo))
1438 		return (1);
1439 
1440 	return (0);
1441 }
1442 
1443 void
1444 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1445 {
1446 	u_int32_t		*rfp;
1447 	u_int			idx;
1448 
1449 	if (rcb == NULL)
1450 		return;
1451 
1452 	idx = sc->sc_reply_free_host_index;
1453 
1454 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1455 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1456 
1457 	if (++idx >= sc->sc_reply_free_qdepth)
1458 		idx = 0;
1459 
1460 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1461 }
1462 
1463 int
1464 mpii_portfacts(struct mpii_softc *sc)
1465 {
1466 	struct mpii_msg_portfacts_request	*pfq;
1467 	struct mpii_msg_portfacts_reply		*pfp;
1468 	struct mpii_ccb				*ccb;
1469 	int					rv = 1;
1470 
1471 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1472 
1473 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1474 	if (ccb == NULL) {
1475 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1476 		    DEVNAME(sc));
1477 		return (rv);
1478 	}
1479 
1480 	ccb->ccb_done = mpii_empty_done;
1481 	pfq = ccb->ccb_cmd;
1482 
1483 	memset(pfq, 0, sizeof(*pfq));
1484 
1485 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1486 	pfq->chain_offset = 0;
1487 	pfq->msg_flags = 0;
1488 	pfq->port_number = 0;
1489 	pfq->vp_id = 0;
1490 	pfq->vf_id = 0;
1491 
1492 	if (mpii_poll(sc, ccb) != 0) {
1493 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1494 		    DEVNAME(sc));
1495 		goto err;
1496 	}
1497 
1498 	if (ccb->ccb_rcb == NULL) {
1499 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1500 		    DEVNAME(sc));
1501 		goto err;
1502 	}
1503 
1504 	pfp = ccb->ccb_rcb->rcb_reply;
1505 	sc->sc_porttype = pfp->port_type;
1506 
1507 	mpii_push_reply(sc, ccb->ccb_rcb);
1508 	rv = 0;
1509 err:
1510 	scsi_io_put(&sc->sc_iopool, ccb);
1511 
1512 	return (rv);
1513 }
1514 
1515 void
1516 mpii_eventack(void *cookie, void *io)
1517 {
1518 	struct mpii_softc			*sc = cookie;
1519 	struct mpii_ccb				*ccb = io;
1520 	struct mpii_rcb				*rcb, *next;
1521 	struct mpii_msg_event_reply		*enp;
1522 	struct mpii_msg_eventack_request	*eaq;
1523 
1524 	mtx_enter(&sc->sc_evt_ack_mtx);
1525 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1526 	if (rcb != NULL) {
1527 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1528 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1529 	}
1530 	mtx_leave(&sc->sc_evt_ack_mtx);
1531 
1532 	if (rcb == NULL) {
1533 		scsi_io_put(&sc->sc_iopool, ccb);
1534 		return;
1535 	}
1536 
1537 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1538 
1539 	ccb->ccb_done = mpii_eventack_done;
1540 	eaq = ccb->ccb_cmd;
1541 
1542 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1543 
1544 	eaq->event = enp->event;
1545 	eaq->event_context = enp->event_context;
1546 
1547 	mpii_push_reply(sc, rcb);
1548 
1549 	mpii_start(sc, ccb);
1550 
1551 	if (next != NULL)
1552 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1553 }
1554 
1555 void
1556 mpii_eventack_done(struct mpii_ccb *ccb)
1557 {
1558 	struct mpii_softc			*sc = ccb->ccb_sc;
1559 
1560 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1561 
1562 	mpii_push_reply(sc, ccb->ccb_rcb);
1563 	scsi_io_put(&sc->sc_iopool, ccb);
1564 }
1565 
1566 int
1567 mpii_portenable(struct mpii_softc *sc)
1568 {
1569 	struct mpii_msg_portenable_request	*peq;
1570 	struct mpii_msg_portenable_repy		*pep;
1571 	struct mpii_ccb				*ccb;
1572 
1573 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1574 
1575 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1576 	if (ccb == NULL) {
1577 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1578 		    DEVNAME(sc));
1579 		return (1);
1580 	}
1581 
1582 	ccb->ccb_done = mpii_empty_done;
1583 	peq = ccb->ccb_cmd;
1584 
1585 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1586 	peq->vf_id = sc->sc_vf_id;
1587 
1588 	if (mpii_poll(sc, ccb) != 0) {
1589 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1590 		    DEVNAME(sc));
1591 		return (1);
1592 	}
1593 
1594 	if (ccb->ccb_rcb == NULL) {
1595 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1596 		    DEVNAME(sc));
1597 		return (1);
1598 	}
1599 	pep = ccb->ccb_rcb->rcb_reply;
1600 
1601 	mpii_push_reply(sc, ccb->ccb_rcb);
1602 	scsi_io_put(&sc->sc_iopool, ccb);
1603 
1604 	return (0);
1605 }
1606 
1607 int
1608 mpii_cfg_coalescing(struct mpii_softc *sc)
1609 {
1610 	struct mpii_cfg_hdr			hdr;
1611 	struct mpii_cfg_ioc_pg1			ipg;
1612 
1613 	hdr.page_version = 0;
1614 	hdr.page_length = sizeof(ipg) / 4;
1615 	hdr.page_number = 1;
1616 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1617 	memset(&ipg, 0, sizeof(ipg));
1618 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1619 	    sizeof(ipg)) != 0) {
1620 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1621 		    "page 1\n", DEVNAME(sc));
1622 		return (1);
1623 	}
1624 
1625 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1626 		return (0);
1627 
1628 	/* Disable coalescing */
1629 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1630 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1631 	    sizeof(ipg)) != 0) {
1632 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1633 		    DEVNAME(sc));
1634 		return (1);
1635 	}
1636 
1637 	return (0);
1638 }
1639 
1640 #define MPII_EVENT_MASKALL(enq)		do {			\
1641 		enq->event_masks[0] = 0xffffffff;		\
1642 		enq->event_masks[1] = 0xffffffff;		\
1643 		enq->event_masks[2] = 0xffffffff;		\
1644 		enq->event_masks[3] = 0xffffffff;		\
1645 	} while (0)
1646 
1647 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1648 		enq->event_masks[evt / 32] &=			\
1649 		    htole32(~(1 << (evt % 32)));		\
1650 	} while (0)
1651 
1652 int
1653 mpii_eventnotify(struct mpii_softc *sc)
1654 {
1655 	struct mpii_msg_event_request		*enq;
1656 	struct mpii_ccb				*ccb;
1657 
1658 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1659 	if (ccb == NULL) {
1660 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1661 		    DEVNAME(sc));
1662 		return (1);
1663 	}
1664 
1665 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1666 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1667 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1668 
1669 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1670 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1671 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1672 	    mpii_eventack, sc);
1673 
1674 	ccb->ccb_done = mpii_eventnotify_done;
1675 	enq = ccb->ccb_cmd;
1676 
1677 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1678 
1679 	/*
1680 	 * Enable reporting of the following events:
1681 	 *
1682 	 * MPII_EVENT_SAS_DISCOVERY
1683 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1684 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1685 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1686 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1687 	 * MPII_EVENT_IR_VOLUME
1688 	 * MPII_EVENT_IR_PHYSICAL_DISK
1689 	 * MPII_EVENT_IR_OPERATION_STATUS
1690 	 */
1691 
1692 	MPII_EVENT_MASKALL(enq);
1693 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1694 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1695 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1696 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1697 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1698 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1699 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1700 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1701 
1702 	mpii_start(sc, ccb);
1703 
1704 	return (0);
1705 }
1706 
1707 void
1708 mpii_eventnotify_done(struct mpii_ccb *ccb)
1709 {
1710 	struct mpii_softc			*sc = ccb->ccb_sc;
1711 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1712 
1713 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1714 
1715 	scsi_io_put(&sc->sc_iopool, ccb);
1716 	mpii_event_process(sc, rcb);
1717 }
1718 
1719 void
1720 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1721 {
1722 	struct mpii_evt_ir_cfg_change_list	*ccl;
1723 	struct mpii_evt_ir_cfg_element		*ce;
1724 	struct mpii_device			*dev;
1725 	u_int16_t				type;
1726 	int					i;
1727 
1728 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1729 	if (ccl->num_elements == 0)
1730 		return;
1731 
1732 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1733 		/* bail on foreign configurations */
1734 		return;
1735 	}
1736 
1737 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1738 
1739 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1740 		type = (lemtoh16(&ce->element_flags) &
1741 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1742 
1743 		switch (type) {
1744 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1745 			switch (ce->reason_code) {
1746 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1747 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1748 				if (mpii_find_dev(sc,
1749 				    lemtoh16(&ce->vol_dev_handle))) {
1750 					printf("%s: device %#x is already "
1751 					    "configured\n", DEVNAME(sc),
1752 					    lemtoh16(&ce->vol_dev_handle));
1753 					break;
1754 				}
1755 				dev = malloc(sizeof(*dev), M_DEVBUF,
1756 				    M_NOWAIT | M_ZERO);
1757 				if (!dev) {
1758 					printf("%s: failed to allocate a "
1759 					    "device structure\n", DEVNAME(sc));
1760 					break;
1761 				}
1762 				SET(dev->flags, MPII_DF_VOLUME);
1763 				dev->slot = sc->sc_vd_id_low;
1764 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1765 				if (mpii_insert_dev(sc, dev)) {
1766 					free(dev, M_DEVBUF, sizeof *dev);
1767 					break;
1768 				}
1769 				sc->sc_vd_count++;
1770 				break;
1771 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1772 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1773 				if (!(dev = mpii_find_dev(sc,
1774 				    lemtoh16(&ce->vol_dev_handle))))
1775 					break;
1776 				mpii_remove_dev(sc, dev);
1777 				sc->sc_vd_count--;
1778 				break;
1779 			}
1780 			break;
1781 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1782 			if (ce->reason_code ==
1783 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1784 			    ce->reason_code ==
1785 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1786 				/* there should be an underlying sas drive */
1787 				if (!(dev = mpii_find_dev(sc,
1788 				    lemtoh16(&ce->phys_disk_dev_handle))))
1789 					break;
1790 				/* promoted from a hot spare? */
1791 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1792 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1793 				    MPII_DF_HIDDEN);
1794 			}
1795 			break;
1796 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1797 			if (ce->reason_code ==
1798 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1799 				/* there should be an underlying sas drive */
1800 				if (!(dev = mpii_find_dev(sc,
1801 				    lemtoh16(&ce->phys_disk_dev_handle))))
1802 					break;
1803 				SET(dev->flags, MPII_DF_HOT_SPARE |
1804 				    MPII_DF_HIDDEN);
1805 			}
1806 			break;
1807 		}
1808 	}
1809 }
1810 
1811 void
1812 mpii_event_sas(void *xsc)
1813 {
1814 	struct mpii_softc *sc = xsc;
1815 	struct mpii_rcb *rcb, *next;
1816 	struct mpii_msg_event_reply *enp;
1817 	struct mpii_evt_sas_tcl		*tcl;
1818 	struct mpii_evt_phy_entry	*pe;
1819 	struct mpii_device		*dev;
1820 	int				i;
1821 	u_int16_t			handle;
1822 
1823 	mtx_enter(&sc->sc_evt_sas_mtx);
1824 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1825 	if (rcb != NULL) {
1826 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1827 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1828 	}
1829 	mtx_leave(&sc->sc_evt_sas_mtx);
1830 
1831 	if (rcb == NULL)
1832 		return;
1833 	if (next != NULL)
1834 		task_add(systq, &sc->sc_evt_sas_task);
1835 
1836 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1837 	switch (lemtoh16(&enp->event)) {
1838 	case MPII_EVENT_SAS_DISCOVERY:
1839 		mpii_event_discovery(sc, enp);
1840 		goto done;
1841 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1842 		/* handle below */
1843 		break;
1844 	default:
1845 		panic("%s: unexpected event %#x in sas event queue",
1846 		    DEVNAME(sc), lemtoh16(&enp->event));
1847 		/* NOTREACHED */
1848 	}
1849 
1850 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1851 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1852 
1853 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1854 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1855 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1856 			handle = lemtoh16(&pe->dev_handle);
1857 			if (mpii_find_dev(sc, handle)) {
1858 				printf("%s: device %#x is already "
1859 				    "configured\n", DEVNAME(sc), handle);
1860 				break;
1861 			}
1862 
1863 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1864 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1865 			dev->dev_handle = handle;
1866 			dev->phy_num = tcl->start_phy_num + i;
1867 			if (tcl->enclosure_handle)
1868 				dev->physical_port = tcl->physical_port;
1869 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1870 			dev->expander = lemtoh16(&tcl->expander_handle);
1871 
1872 			if (mpii_insert_dev(sc, dev)) {
1873 				free(dev, M_DEVBUF, sizeof *dev);
1874 				break;
1875 			}
1876 
1877 			if (sc->sc_scsibus != NULL)
1878 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1879 			break;
1880 
1881 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1882 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1883 			if (dev == NULL)
1884 				break;
1885 
1886 			mpii_remove_dev(sc, dev);
1887 			mpii_sas_remove_device(sc, dev->dev_handle);
1888 			if (sc->sc_scsibus != NULL &&
1889 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1890 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1891 				    DVACT_DEACTIVATE);
1892 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1893 				    DETACH_FORCE);
1894 			}
1895 
1896 			free(dev, M_DEVBUF, sizeof *dev);
1897 			break;
1898 		}
1899 	}
1900 
1901 done:
1902 	mpii_event_done(sc, rcb);
1903 }
1904 
1905 void
1906 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1907 {
1908 	struct mpii_evt_sas_discovery *esd =
1909 	    (struct mpii_evt_sas_discovery *)(enp + 1);
1910 
1911 	if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
1912 		if (esd->discovery_status != 0) {
1913 			printf("%s: sas discovery completed with status %#x\n",
1914 			    DEVNAME(sc), esd->discovery_status);
1915 		}
1916 
1917 		if (ISSET(sc->sc_flags, MPII_F_CONFIG_PENDING)) {
1918 			CLR(sc->sc_flags, MPII_F_CONFIG_PENDING);
1919 			config_pending_decr();
1920 		}
1921 	}
1922 }
1923 
1924 void
1925 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1926 {
1927 	struct mpii_msg_event_reply		*enp;
1928 
1929 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1930 
1931 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1932 	    letoh16(enp->event));
1933 
1934 	switch (lemtoh16(&enp->event)) {
1935 	case MPII_EVENT_EVENT_CHANGE:
1936 		/* should be properly ignored */
1937 		break;
1938 	case MPII_EVENT_SAS_DISCOVERY:
1939 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1940 		mtx_enter(&sc->sc_evt_sas_mtx);
1941 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1942 		mtx_leave(&sc->sc_evt_sas_mtx);
1943 		task_add(systq, &sc->sc_evt_sas_task);
1944 		return;
1945 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
1946 		break;
1947 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1948 		break;
1949 	case MPII_EVENT_IR_VOLUME: {
1950 		struct mpii_evt_ir_volume	*evd =
1951 		    (struct mpii_evt_ir_volume *)(enp + 1);
1952 		struct mpii_device		*dev;
1953 #if NBIO > 0
1954 		const char *vol_states[] = {
1955 			BIOC_SVINVALID_S,
1956 			BIOC_SVOFFLINE_S,
1957 			BIOC_SVBUILDING_S,
1958 			BIOC_SVONLINE_S,
1959 			BIOC_SVDEGRADED_S,
1960 			BIOC_SVONLINE_S,
1961 		};
1962 #endif
1963 
1964 		if (cold)
1965 			break;
1966 		KERNEL_LOCK();
1967 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
1968 		KERNEL_UNLOCK();
1969 		if (dev == NULL)
1970 			break;
1971 #if NBIO > 0
1972 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
1973 			printf("%s: volume %d state changed from %s to %s\n",
1974 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
1975 			    vol_states[evd->prev_value],
1976 			    vol_states[evd->new_value]);
1977 #endif
1978 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
1979 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
1980 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
1981 			printf("%s: started resync on a volume %d\n",
1982 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
1983 		}
1984 		break;
1985 	case MPII_EVENT_IR_PHYSICAL_DISK:
1986 		break;
1987 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1988 		mpii_event_raid(sc, enp);
1989 		break;
1990 	case MPII_EVENT_IR_OPERATION_STATUS: {
1991 		struct mpii_evt_ir_status	*evs =
1992 		    (struct mpii_evt_ir_status *)(enp + 1);
1993 		struct mpii_device		*dev;
1994 
1995 		KERNEL_LOCK();
1996 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
1997 		KERNEL_UNLOCK();
1998 		if (dev != NULL &&
1999 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2000 			dev->percent = evs->percent;
2001 		break;
2002 		}
2003 	default:
2004 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2005 		    DEVNAME(sc), lemtoh16(&enp->event));
2006 	}
2007 
2008 	mpii_event_done(sc, rcb);
2009 }
2010 
2011 void
2012 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2013 {
2014 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2015 
2016 	if (enp->ack_required) {
2017 		mtx_enter(&sc->sc_evt_ack_mtx);
2018 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2019 		mtx_leave(&sc->sc_evt_ack_mtx);
2020 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2021 	} else
2022 		mpii_push_reply(sc, rcb);
2023 }
2024 
2025 void
2026 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2027 {
2028 	struct mpii_msg_scsi_task_request	*stq;
2029 	struct mpii_msg_sas_oper_request	*soq;
2030 	struct mpii_ccb				*ccb;
2031 
2032 	ccb = scsi_io_get(&sc->sc_iopool, 0);
2033 	if (ccb == NULL)
2034 		return;
2035 
2036 	stq = ccb->ccb_cmd;
2037 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2038 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2039 	htolem16(&stq->dev_handle, handle);
2040 
2041 	ccb->ccb_done = mpii_empty_done;
2042 	mpii_wait(sc, ccb);
2043 
2044 	if (ccb->ccb_rcb != NULL)
2045 		mpii_push_reply(sc, ccb->ccb_rcb);
2046 
2047 	/* reuse a ccb */
2048 	ccb->ccb_state = MPII_CCB_READY;
2049 	ccb->ccb_rcb = NULL;
2050 
2051 	soq = ccb->ccb_cmd;
2052 	memset(soq, 0, sizeof(*soq));
2053 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2054 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2055 	htolem16(&soq->dev_handle, handle);
2056 
2057 	ccb->ccb_done = mpii_empty_done;
2058 	mpii_wait(sc, ccb);
2059 	if (ccb->ccb_rcb != NULL)
2060 		mpii_push_reply(sc, ccb->ccb_rcb);
2061 
2062 	scsi_io_put(&sc->sc_iopool, ccb);
2063 }
2064 
2065 int
2066 mpii_board_info(struct mpii_softc *sc)
2067 {
2068 	struct mpii_msg_iocfacts_request	ifq;
2069 	struct mpii_msg_iocfacts_reply		ifp;
2070 	struct mpii_cfg_manufacturing_pg0	mpg;
2071 	struct mpii_cfg_hdr			hdr;
2072 
2073 	memset(&ifq, 0, sizeof(ifq));
2074 	memset(&ifp, 0, sizeof(ifp));
2075 
2076 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2077 
2078 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2079 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2080 		    DEVNAME(sc));
2081 		return (1);
2082 	}
2083 
2084 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2085 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2086 		    DEVNAME(sc));
2087 		return (1);
2088 	}
2089 
2090 	hdr.page_version = 0;
2091 	hdr.page_length = sizeof(mpg) / 4;
2092 	hdr.page_number = 0;
2093 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2094 	memset(&mpg, 0, sizeof(mpg));
2095 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2096 	    sizeof(mpg)) != 0) {
2097 		printf("%s: unable to fetch manufacturing page 0\n",
2098 		    DEVNAME(sc));
2099 		return (EINVAL);
2100 	}
2101 
2102 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2103 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2104 	    ifp.fw_version_unit, ifp.fw_version_dev,
2105 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2106 	    ifp.msg_version_maj, ifp.msg_version_min);
2107 
2108 	return (0);
2109 }
2110 
2111 int
2112 mpii_target_map(struct mpii_softc *sc)
2113 {
2114 	struct mpii_cfg_hdr			hdr;
2115 	struct mpii_cfg_ioc_pg8			ipg;
2116 	int					flags, pad = 0;
2117 
2118 	hdr.page_version = 0;
2119 	hdr.page_length = sizeof(ipg) / 4;
2120 	hdr.page_number = 8;
2121 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2122 	memset(&ipg, 0, sizeof(ipg));
2123 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2124 	    sizeof(ipg)) != 0) {
2125 		printf("%s: unable to fetch ioc page 8\n",
2126 		    DEVNAME(sc));
2127 		return (EINVAL);
2128 	}
2129 
2130 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2131 		pad = 1;
2132 
2133 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2134 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2135 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2136 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2137 			sc->sc_vd_id_low += pad;
2138 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2139 		} else
2140 			sc->sc_vd_id_low = sc->sc_max_devices -
2141 			    sc->sc_max_volumes;
2142 	}
2143 
2144 	sc->sc_pd_id_start += pad;
2145 
2146 	return (0);
2147 }
2148 
2149 int
2150 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2151     u_int32_t address, int flags, void *p)
2152 {
2153 	struct mpii_msg_config_request		*cq;
2154 	struct mpii_msg_config_reply		*cp;
2155 	struct mpii_ccb				*ccb;
2156 	struct mpii_cfg_hdr			*hdr = p;
2157 	struct mpii_ecfg_hdr			*ehdr = p;
2158 	int					etype = 0;
2159 	int					rv = 0;
2160 
2161 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2162 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2163 	    address, flags, MPII_PG_FMT);
2164 
2165 	ccb = scsi_io_get(&sc->sc_iopool,
2166 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2167 	if (ccb == NULL) {
2168 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2169 		    DEVNAME(sc));
2170 		return (1);
2171 	}
2172 
2173 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2174 		etype = type;
2175 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2176 	}
2177 
2178 	cq = ccb->ccb_cmd;
2179 
2180 	cq->function = MPII_FUNCTION_CONFIG;
2181 
2182 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2183 
2184 	cq->config_header.page_number = number;
2185 	cq->config_header.page_type = type;
2186 	cq->ext_page_type = etype;
2187 	htolem32(&cq->page_address, address);
2188 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2189 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2190 
2191 	ccb->ccb_done = mpii_empty_done;
2192 	if (ISSET(flags, MPII_PG_POLL)) {
2193 		if (mpii_poll(sc, ccb) != 0) {
2194 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2195 			    DEVNAME(sc));
2196 			return (1);
2197 		}
2198 	} else
2199 		mpii_wait(sc, ccb);
2200 
2201 	if (ccb->ccb_rcb == NULL) {
2202 		scsi_io_put(&sc->sc_iopool, ccb);
2203 		return (1);
2204 	}
2205 	cp = ccb->ccb_rcb->rcb_reply;
2206 
2207 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2208 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2209 	    cp->sgl_flags, cp->msg_length, cp->function);
2210 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2211 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2212 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2213 	    cp->msg_flags);
2214 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2215 	    cp->vp_id, cp->vf_id);
2216 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2217 	    letoh16(cp->ioc_status));
2218 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2219 	    letoh32(cp->ioc_loginfo));
2220 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2221 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2222 	    cp->config_header.page_version,
2223 	    cp->config_header.page_length,
2224 	    cp->config_header.page_number,
2225 	    cp->config_header.page_type);
2226 
2227 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2228 		rv = 1;
2229 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2230 		memset(ehdr, 0, sizeof(*ehdr));
2231 		ehdr->page_version = cp->config_header.page_version;
2232 		ehdr->page_number = cp->config_header.page_number;
2233 		ehdr->page_type = cp->config_header.page_type;
2234 		ehdr->ext_page_length = cp->ext_page_length;
2235 		ehdr->ext_page_type = cp->ext_page_type;
2236 	} else
2237 		*hdr = cp->config_header;
2238 
2239 	mpii_push_reply(sc, ccb->ccb_rcb);
2240 	scsi_io_put(&sc->sc_iopool, ccb);
2241 
2242 	return (rv);
2243 }
2244 
2245 int
2246 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2247     void *p, int read, void *page, size_t len)
2248 {
2249 	struct mpii_msg_config_request		*cq;
2250 	struct mpii_msg_config_reply		*cp;
2251 	struct mpii_ccb				*ccb;
2252 	struct mpii_cfg_hdr			*hdr = p;
2253 	struct mpii_ecfg_hdr			*ehdr = p;
2254 	caddr_t					kva;
2255 	int					page_length;
2256 	int					rv = 0;
2257 
2258 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2259 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2260 
2261 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2262 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2263 
2264 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2265 		return (1);
2266 
2267 	ccb = scsi_io_get(&sc->sc_iopool,
2268 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2269 	if (ccb == NULL) {
2270 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2271 		    DEVNAME(sc));
2272 		return (1);
2273 	}
2274 
2275 	cq = ccb->ccb_cmd;
2276 
2277 	cq->function = MPII_FUNCTION_CONFIG;
2278 
2279 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2280 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2281 
2282 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2283 		cq->config_header.page_version = ehdr->page_version;
2284 		cq->config_header.page_number = ehdr->page_number;
2285 		cq->config_header.page_type = ehdr->page_type;
2286 		cq->ext_page_len = ehdr->ext_page_length;
2287 		cq->ext_page_type = ehdr->ext_page_type;
2288 	} else
2289 		cq->config_header = *hdr;
2290 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2291 	htolem32(&cq->page_address, address);
2292 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2293 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2294 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2295 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2296 
2297 	/* bounce the page via the request space to avoid more bus_dma games */
2298 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2299 	    sizeof(struct mpii_msg_config_request));
2300 
2301 	kva = ccb->ccb_cmd;
2302 	kva += sizeof(struct mpii_msg_config_request);
2303 
2304 	if (!read)
2305 		memcpy(kva, page, len);
2306 
2307 	ccb->ccb_done = mpii_empty_done;
2308 	if (ISSET(flags, MPII_PG_POLL)) {
2309 		if (mpii_poll(sc, ccb) != 0) {
2310 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2311 			    DEVNAME(sc));
2312 			return (1);
2313 		}
2314 	} else
2315 		mpii_wait(sc, ccb);
2316 
2317 	if (ccb->ccb_rcb == NULL) {
2318 		scsi_io_put(&sc->sc_iopool, ccb);
2319 		return (1);
2320 	}
2321 	cp = ccb->ccb_rcb->rcb_reply;
2322 
2323 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2324 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2325 	    cp->function);
2326 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2327 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2328 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2329 	    cp->msg_flags);
2330 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2331 	    cp->vp_id, cp->vf_id);
2332 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2333 	    letoh16(cp->ioc_status));
2334 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2335 	    letoh32(cp->ioc_loginfo));
2336 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2337 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2338 	    cp->config_header.page_version,
2339 	    cp->config_header.page_length,
2340 	    cp->config_header.page_number,
2341 	    cp->config_header.page_type);
2342 
2343 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2344 		rv = 1;
2345 	else if (read)
2346 		memcpy(page, kva, len);
2347 
2348 	mpii_push_reply(sc, ccb->ccb_rcb);
2349 	scsi_io_put(&sc->sc_iopool, ccb);
2350 
2351 	return (rv);
2352 }
2353 
2354 struct mpii_rcb *
2355 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2356 {
2357 	struct mpii_rcb		*rcb = NULL;
2358 	u_int32_t		rfid;
2359 
2360 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2361 
2362 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2363 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2364 		rfid = (lemtoh32(&rdp->frame_addr) -
2365 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2366 		    sc->sc_reply_size;
2367 
2368 		bus_dmamap_sync(sc->sc_dmat,
2369 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2370 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2371 
2372 		rcb = &sc->sc_rcbs[rfid];
2373 	}
2374 
2375 	memset(rdp, 0xff, sizeof(*rdp));
2376 
2377 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2378 	    8 * sc->sc_reply_post_host_index, 8,
2379 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2380 
2381 	return (rcb);
2382 }
2383 
2384 struct mpii_dmamem *
2385 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2386 {
2387 	struct mpii_dmamem	*mdm;
2388 	int			nsegs;
2389 
2390 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2391 	if (mdm == NULL)
2392 		return (NULL);
2393 
2394 	mdm->mdm_size = size;
2395 
2396 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2397 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2398 		goto mdmfree;
2399 
2400 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2401 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2402 		goto destroy;
2403 
2404 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2405 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2406 		goto free;
2407 
2408 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2409 	    NULL, BUS_DMA_NOWAIT) != 0)
2410 		goto unmap;
2411 
2412 	return (mdm);
2413 
2414 unmap:
2415 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2416 free:
2417 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2418 destroy:
2419 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2420 mdmfree:
2421 	free(mdm, M_DEVBUF, sizeof *mdm);
2422 
2423 	return (NULL);
2424 }
2425 
2426 void
2427 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2428 {
2429 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2430 
2431 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2432 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2433 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2434 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2435 	free(mdm, M_DEVBUF, sizeof *mdm);
2436 }
2437 
2438 int
2439 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2440 {
2441 	int		slot;	/* initial hint */
2442 
2443 	if (dev == NULL || dev->slot < 0)
2444 		return (1);
2445 	slot = dev->slot;
2446 
2447 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2448 		slot++;
2449 
2450 	if (slot >= sc->sc_max_devices)
2451 		return (1);
2452 
2453 	dev->slot = slot;
2454 	sc->sc_devs[slot] = dev;
2455 
2456 	return (0);
2457 }
2458 
2459 int
2460 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2461 {
2462 	int			i;
2463 
2464 	if (dev == NULL)
2465 		return (1);
2466 
2467 	for (i = 0; i < sc->sc_max_devices; i++) {
2468 		if (sc->sc_devs[i] == NULL)
2469 			continue;
2470 
2471 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2472 			sc->sc_devs[i] = NULL;
2473 			return (0);
2474 		}
2475 	}
2476 
2477 	return (1);
2478 }
2479 
2480 struct mpii_device *
2481 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2482 {
2483 	int			i;
2484 
2485 	for (i = 0; i < sc->sc_max_devices; i++) {
2486 		if (sc->sc_devs[i] == NULL)
2487 			continue;
2488 
2489 		if (sc->sc_devs[i]->dev_handle == handle)
2490 			return (sc->sc_devs[i]);
2491 	}
2492 
2493 	return (NULL);
2494 }
2495 
2496 int
2497 mpii_alloc_ccbs(struct mpii_softc *sc)
2498 {
2499 	struct mpii_ccb		*ccb;
2500 	u_int8_t		*cmd;
2501 	int			i;
2502 
2503 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2504 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2505 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2506 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2507 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2508 	    mpii_scsi_cmd_tmo_handler, sc);
2509 
2510 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2511 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2512 	if (sc->sc_ccbs == NULL) {
2513 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2514 		return (1);
2515 	}
2516 
2517 	sc->sc_requests = mpii_dmamem_alloc(sc,
2518 	    sc->sc_request_size * sc->sc_max_cmds);
2519 	if (sc->sc_requests == NULL) {
2520 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2521 		goto free_ccbs;
2522 	}
2523 	cmd = MPII_DMA_KVA(sc->sc_requests);
2524 
2525 	/*
2526 	 * we have sc->sc_max_cmds system request message
2527 	 * frames, but smid zero cannot be used. so we then
2528 	 * have (sc->sc_max_cmds - 1) number of ccbs
2529 	 */
2530 	for (i = 1; i < sc->sc_max_cmds; i++) {
2531 		ccb = &sc->sc_ccbs[i - 1];
2532 
2533 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2534 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2535 		    &ccb->ccb_dmamap) != 0) {
2536 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2537 			goto free_maps;
2538 		}
2539 
2540 		ccb->ccb_sc = sc;
2541 		htolem16(&ccb->ccb_smid, i);
2542 		ccb->ccb_offset = sc->sc_request_size * i;
2543 
2544 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2545 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2546 		    ccb->ccb_offset;
2547 
2548 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2549 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2550 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2551 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2552 		    ccb->ccb_cmd_dva);
2553 
2554 		mpii_put_ccb(sc, ccb);
2555 	}
2556 
2557 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2558 
2559 	return (0);
2560 
2561 free_maps:
2562 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2563 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2564 
2565 	mpii_dmamem_free(sc, sc->sc_requests);
2566 free_ccbs:
2567 	free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2568 
2569 	return (1);
2570 }
2571 
2572 void
2573 mpii_put_ccb(void *cookie, void *io)
2574 {
2575 	struct mpii_softc	*sc = cookie;
2576 	struct mpii_ccb		*ccb = io;
2577 
2578 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2579 
2580 	ccb->ccb_state = MPII_CCB_FREE;
2581 	ccb->ccb_cookie = NULL;
2582 	ccb->ccb_done = NULL;
2583 	ccb->ccb_rcb = NULL;
2584 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2585 
2586 	KERNEL_UNLOCK();
2587 	mtx_enter(&sc->sc_ccb_free_mtx);
2588 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2589 	mtx_leave(&sc->sc_ccb_free_mtx);
2590 	KERNEL_LOCK();
2591 }
2592 
2593 void *
2594 mpii_get_ccb(void *cookie)
2595 {
2596 	struct mpii_softc	*sc = cookie;
2597 	struct mpii_ccb		*ccb;
2598 
2599 	KERNEL_UNLOCK();
2600 
2601 	mtx_enter(&sc->sc_ccb_free_mtx);
2602 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2603 	if (ccb != NULL) {
2604 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2605 		ccb->ccb_state = MPII_CCB_READY;
2606 	}
2607 	mtx_leave(&sc->sc_ccb_free_mtx);
2608 
2609 	KERNEL_LOCK();
2610 
2611 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2612 
2613 	return (ccb);
2614 }
2615 
2616 int
2617 mpii_alloc_replies(struct mpii_softc *sc)
2618 {
2619 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2620 
2621 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2622 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2623 	if (sc->sc_rcbs == NULL)
2624 		return (1);
2625 
2626 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2627 	    sc->sc_num_reply_frames);
2628 	if (sc->sc_replies == NULL) {
2629 		free(sc->sc_rcbs, M_DEVBUF,
2630 		    sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2631 		return (1);
2632 	}
2633 
2634 	return (0);
2635 }
2636 
2637 void
2638 mpii_push_replies(struct mpii_softc *sc)
2639 {
2640 	struct mpii_rcb		*rcb;
2641 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2642 	int			i;
2643 
2644 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2645 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2646 	    BUS_DMASYNC_PREREAD);
2647 
2648 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2649 		rcb = &sc->sc_rcbs[i];
2650 
2651 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2652 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2653 		    sc->sc_reply_size * i;
2654 		mpii_push_reply(sc, rcb);
2655 	}
2656 }
2657 
2658 void
2659 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2660 {
2661 	struct mpii_request_header	*rhp;
2662 	struct mpii_request_descr	descr;
2663 	u_long				 *rdp = (u_long *)&descr;
2664 
2665 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2666 	    ccb->ccb_cmd_dva);
2667 
2668 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2669 	    ccb->ccb_offset, sc->sc_request_size,
2670 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2671 
2672 	ccb->ccb_state = MPII_CCB_QUEUED;
2673 
2674 	rhp = ccb->ccb_cmd;
2675 
2676 	memset(&descr, 0, sizeof(descr));
2677 
2678 	switch (rhp->function) {
2679 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2680 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2681 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2682 		break;
2683 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2684 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2685 		break;
2686 	default:
2687 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2688 	}
2689 
2690 	descr.vf_id = sc->sc_vf_id;
2691 	descr.smid = ccb->ccb_smid;
2692 
2693 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2694 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2695 
2696 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2697 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2698 
2699 #if defined(__LP64__)
2700 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2701 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2702 #else
2703 	mtx_enter(&sc->sc_req_mtx);
2704 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2705 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2706 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2707 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2708 
2709 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2710 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2711 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2712 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2713 	mtx_leave(&sc->sc_req_mtx);
2714 #endif
2715 }
2716 
2717 int
2718 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2719 {
2720 	void				(*done)(struct mpii_ccb *);
2721 	void				*cookie;
2722 	int				rv = 1;
2723 
2724 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2725 
2726 	done = ccb->ccb_done;
2727 	cookie = ccb->ccb_cookie;
2728 
2729 	ccb->ccb_done = mpii_poll_done;
2730 	ccb->ccb_cookie = &rv;
2731 
2732 	mpii_start(sc, ccb);
2733 
2734 	while (rv == 1) {
2735 		/* avoid excessive polling */
2736 		if (mpii_reply_waiting(sc))
2737 			mpii_intr(sc);
2738 		else
2739 			delay(10);
2740 	}
2741 
2742 	ccb->ccb_cookie = cookie;
2743 	done(ccb);
2744 
2745 	return (0);
2746 }
2747 
2748 void
2749 mpii_poll_done(struct mpii_ccb *ccb)
2750 {
2751 	int				*rv = ccb->ccb_cookie;
2752 
2753 	*rv = 0;
2754 }
2755 
2756 int
2757 mpii_alloc_queues(struct mpii_softc *sc)
2758 {
2759 	u_int32_t		*rfp;
2760 	int			i;
2761 
2762 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2763 
2764 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2765 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2766 	if (sc->sc_reply_freeq == NULL)
2767 		return (1);
2768 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2769 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2770 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2771 		    sc->sc_reply_size * i;
2772 	}
2773 
2774 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2775 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2776 	if (sc->sc_reply_postq == NULL)
2777 		goto free_reply_freeq;
2778 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2779 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2780 	    sizeof(struct mpii_reply_descr));
2781 
2782 	return (0);
2783 
2784 free_reply_freeq:
2785 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2786 	return (1);
2787 }
2788 
2789 void
2790 mpii_init_queues(struct mpii_softc *sc)
2791 {
2792 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2793 
2794 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2795 	sc->sc_reply_post_host_index = 0;
2796 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2797 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2798 }
2799 
2800 void
2801 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2802 {
2803 	struct mutex		mtx = MUTEX_INITIALIZER(IPL_BIO);
2804 	void			(*done)(struct mpii_ccb *);
2805 	void			*cookie;
2806 
2807 	done = ccb->ccb_done;
2808 	cookie = ccb->ccb_cookie;
2809 
2810 	ccb->ccb_done = mpii_wait_done;
2811 	ccb->ccb_cookie = &mtx;
2812 
2813 	/* XXX this will wait forever for the ccb to complete */
2814 
2815 	mpii_start(sc, ccb);
2816 
2817 	mtx_enter(&mtx);
2818 	while (ccb->ccb_cookie != NULL)
2819 		msleep(ccb, &mtx, PRIBIO, "mpiiwait", 0);
2820 	mtx_leave(&mtx);
2821 
2822 	ccb->ccb_cookie = cookie;
2823 	done(ccb);
2824 }
2825 
2826 void
2827 mpii_wait_done(struct mpii_ccb *ccb)
2828 {
2829 	struct mutex		*mtx = ccb->ccb_cookie;
2830 
2831 	mtx_enter(mtx);
2832 	ccb->ccb_cookie = NULL;
2833 	mtx_leave(mtx);
2834 
2835 	wakeup_one(ccb);
2836 }
2837 
2838 void
2839 mpii_scsi_cmd(struct scsi_xfer *xs)
2840 {
2841 	struct scsi_link	*link = xs->sc_link;
2842 	struct mpii_softc	*sc = link->adapter_softc;
2843 	struct mpii_ccb		*ccb = xs->io;
2844 	struct mpii_msg_scsi_io	*io;
2845 	struct mpii_device	*dev;
2846 	int			 ret;
2847 
2848 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2849 
2850 	if (xs->cmdlen > MPII_CDB_LEN) {
2851 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2852 		    DEVNAME(sc), xs->cmdlen);
2853 		memset(&xs->sense, 0, sizeof(xs->sense));
2854 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2855 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2856 		xs->sense.add_sense_code = 0x20;
2857 		xs->error = XS_SENSE;
2858 		scsi_done(xs);
2859 		return;
2860 	}
2861 
2862 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2863 		/* device no longer exists */
2864 		xs->error = XS_SELTIMEOUT;
2865 		scsi_done(xs);
2866 		return;
2867 	}
2868 
2869 	KERNEL_UNLOCK();
2870 
2871 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2872 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2873 
2874 	ccb->ccb_cookie = xs;
2875 	ccb->ccb_done = mpii_scsi_cmd_done;
2876 	ccb->ccb_dev_handle = dev->dev_handle;
2877 
2878 	io = ccb->ccb_cmd;
2879 	memset(io, 0, sizeof(*io));
2880 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2881 	io->sense_buffer_length = sizeof(xs->sense);
2882 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2883 	htolem16(&io->io_flags, xs->cmdlen);
2884 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2885 	htobem16(&io->lun[0], link->lun);
2886 
2887 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2888 	case SCSI_DATA_IN:
2889 		io->direction = MPII_SCSIIO_DIR_READ;
2890 		break;
2891 	case SCSI_DATA_OUT:
2892 		io->direction = MPII_SCSIIO_DIR_WRITE;
2893 		break;
2894 	default:
2895 		io->direction = MPII_SCSIIO_DIR_NONE;
2896 		break;
2897 	}
2898 
2899 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2900 
2901 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2902 
2903 	htolem32(&io->data_length, xs->datalen);
2904 
2905 	/* sense data is at the end of a request */
2906 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2907 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2908 
2909 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2910 		ret = mpii_load_xs_sas3(ccb);
2911 	else
2912 		ret = mpii_load_xs(ccb);
2913 
2914 	if (ret != 0) {
2915 		xs->error = XS_DRIVER_STUFFUP;
2916 		goto done;
2917 	}
2918 
2919 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2920 	if (xs->flags & SCSI_POLL) {
2921 		if (mpii_poll(sc, ccb) != 0) {
2922 			xs->error = XS_DRIVER_STUFFUP;
2923 			goto done;
2924 		}
2925 	} else {
2926 		timeout_add_msec(&xs->stimeout, xs->timeout);
2927 		mpii_start(sc, ccb);
2928 	}
2929 
2930 	KERNEL_LOCK();
2931 	return;
2932 
2933 done:
2934 	KERNEL_LOCK();
2935 	scsi_done(xs);
2936 }
2937 
2938 void
2939 mpii_scsi_cmd_tmo(void *xccb)
2940 {
2941 	struct mpii_ccb		*ccb = xccb;
2942 	struct mpii_softc	*sc = ccb->ccb_sc;
2943 
2944 	printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
2945 
2946 	mtx_enter(&sc->sc_ccb_mtx);
2947 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
2948 		ccb->ccb_state = MPII_CCB_TIMEOUT;
2949 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
2950 	}
2951 	mtx_leave(&sc->sc_ccb_mtx);
2952 
2953 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
2954 }
2955 
2956 void
2957 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
2958 {
2959 	struct mpii_softc			*sc = cookie;
2960 	struct mpii_ccb				*tccb = io;
2961 	struct mpii_ccb				*ccb;
2962 	struct mpii_msg_scsi_task_request	*stq;
2963 
2964 	mtx_enter(&sc->sc_ccb_mtx);
2965 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
2966 	if (ccb != NULL) {
2967 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
2968 		ccb->ccb_state = MPII_CCB_QUEUED;
2969 	}
2970 	/* should remove any other ccbs for the same dev handle */
2971 	mtx_leave(&sc->sc_ccb_mtx);
2972 
2973 	if (ccb == NULL) {
2974 		scsi_io_put(&sc->sc_iopool, tccb);
2975 		return;
2976 	}
2977 
2978 	stq = tccb->ccb_cmd;
2979 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2980 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2981 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
2982 
2983 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
2984 	mpii_start(sc, tccb);
2985 }
2986 
2987 void
2988 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
2989 {
2990 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
2991 }
2992 
2993 void
2994 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
2995 {
2996 	struct mpii_ccb		*tccb;
2997 	struct mpii_msg_scsi_io_error	*sie;
2998 	struct mpii_softc	*sc = ccb->ccb_sc;
2999 	struct scsi_xfer	*xs = ccb->ccb_cookie;
3000 	struct scsi_sense_data	*sense;
3001 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3002 
3003 	timeout_del(&xs->stimeout);
3004 	mtx_enter(&sc->sc_ccb_mtx);
3005 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3006 		/* ENOSIMPLEQ_REMOVE :( */
3007 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3008 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3009 		else {
3010 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3011 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3012 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3013 					    tccb, ccb_link);
3014 					break;
3015 				}
3016 			}
3017 		}
3018 	}
3019 
3020 	ccb->ccb_state = MPII_CCB_READY;
3021 	mtx_leave(&sc->sc_ccb_mtx);
3022 
3023 	if (xs->datalen != 0) {
3024 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3025 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3026 		    BUS_DMASYNC_POSTWRITE);
3027 
3028 		bus_dmamap_unload(sc->sc_dmat, dmap);
3029 	}
3030 
3031 	xs->error = XS_NOERROR;
3032 	xs->resid = 0;
3033 
3034 	if (ccb->ccb_rcb == NULL) {
3035 		/* no scsi error, we're ok so drop out early */
3036 		xs->status = SCSI_OK;
3037 		goto done;
3038 	}
3039 
3040 	sie = ccb->ccb_rcb->rcb_reply;
3041 
3042 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3043 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3044 	    xs->flags);
3045 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3046 	    "function: 0x%02x\n", DEVNAME(sc), letoh16(sie->dev_handle),
3047 	    sie->msg_length, sie->function);
3048 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3049 	    sie->vp_id, sie->vf_id);
3050 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3051 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3052 	    sie->scsi_state, letoh16(sie->ioc_status));
3053 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3054 	    letoh32(sie->ioc_loginfo));
3055 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3056 	    letoh32(sie->transfer_count));
3057 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3058 	    letoh32(sie->sense_count));
3059 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3060 	    letoh32(sie->response_info));
3061 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3062 	    letoh16(sie->task_tag));
3063 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3064 	    DEVNAME(sc), letoh32(sie->bidirectional_transfer_count));
3065 
3066 	if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3067 		xs->status = SCSI_TERMINATED;
3068 	else
3069 		xs->status = sie->scsi_status;
3070 	xs->resid = 0;
3071 
3072 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3073 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3074 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3075 		/* FALLTHROUGH */
3076 
3077 	case MPII_IOCSTATUS_SUCCESS:
3078 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3079 		switch (xs->status) {
3080 		case SCSI_OK:
3081 			xs->error = XS_NOERROR;
3082 			break;
3083 
3084 		case SCSI_CHECK:
3085 			xs->error = XS_SENSE;
3086 			break;
3087 
3088 		case SCSI_BUSY:
3089 		case SCSI_QUEUE_FULL:
3090 			xs->error = XS_BUSY;
3091 			break;
3092 
3093 		default:
3094 			xs->error = XS_DRIVER_STUFFUP;
3095 		}
3096 		break;
3097 
3098 	case MPII_IOCSTATUS_BUSY:
3099 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3100 		xs->error = XS_BUSY;
3101 		break;
3102 
3103 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3104 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3105 		xs->error = XS_RESET;
3106 		break;
3107 
3108 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3109 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3110 		xs->error = XS_SELTIMEOUT;
3111 		break;
3112 
3113 	default:
3114 		xs->error = XS_DRIVER_STUFFUP;
3115 		break;
3116 	}
3117 
3118 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3119 	    sc->sc_request_size - sizeof(*sense));
3120 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3121 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3122 
3123 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3124 	    xs->error, xs->status);
3125 
3126 	mpii_push_reply(sc, ccb->ccb_rcb);
3127 done:
3128 	KERNEL_LOCK();
3129 	scsi_done(xs);
3130 	KERNEL_UNLOCK();
3131 }
3132 
3133 int
3134 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3135 {
3136 	struct mpii_softc	*sc = (struct mpii_softc *)link->adapter_softc;
3137 	struct mpii_device	*dev = sc->sc_devs[link->target];
3138 
3139 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3140 
3141 	switch (cmd) {
3142 	case DIOCGCACHE:
3143 	case DIOCSCACHE:
3144 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3145 			return (mpii_ioctl_cache(link, cmd,
3146 			    (struct dk_cache *)addr));
3147 		}
3148 		break;
3149 
3150 	default:
3151 		if (sc->sc_ioctl)
3152 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3153 
3154 		break;
3155 	}
3156 
3157 	return (ENOTTY);
3158 }
3159 
3160 int
3161 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3162 {
3163 	struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3164 	struct mpii_device *dev = sc->sc_devs[link->target];
3165 	struct mpii_cfg_raid_vol_pg0 *vpg;
3166 	struct mpii_msg_raid_action_request *req;
3167 	struct mpii_msg_raid_action_reply *rep;
3168 	struct mpii_cfg_hdr hdr;
3169 	struct mpii_ccb	*ccb;
3170 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3171 	size_t pagelen;
3172 	int rv = 0;
3173 	int enabled;
3174 
3175 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3176 	    addr, MPII_PG_POLL, &hdr) != 0)
3177 		return (EINVAL);
3178 
3179 	pagelen = hdr.page_length * 4;
3180 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3181 	if (vpg == NULL)
3182 		return (ENOMEM);
3183 
3184 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3185 	    vpg, pagelen) != 0) {
3186 		rv = EINVAL;
3187 		goto done;
3188 	}
3189 
3190 	enabled = ((lemtoh16(&vpg->volume_settings) &
3191 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3192 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3193 
3194 	if (cmd == DIOCGCACHE) {
3195 		dc->wrcache = enabled;
3196 		dc->rdcache = 0;
3197 		goto done;
3198 	} /* else DIOCSCACHE */
3199 
3200 	if (dc->rdcache) {
3201 		rv = EOPNOTSUPP;
3202 		goto done;
3203 	}
3204 
3205 	if (((dc->wrcache) ? 1 : 0) == enabled)
3206 		goto done;
3207 
3208 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3209 	if (ccb == NULL) {
3210 		rv = ENOMEM;
3211 		goto done;
3212 	}
3213 
3214 	ccb->ccb_done = mpii_empty_done;
3215 
3216 	req = ccb->ccb_cmd;
3217 	memset(req, 0, sizeof(*req));
3218 	req->function = MPII_FUNCTION_RAID_ACTION;
3219 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3220 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3221 	htolem32(&req->action_data, dc->wrcache ?
3222 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3223 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3224 
3225 	if (mpii_poll(sc, ccb) != 0) {
3226 		rv = EIO;
3227 		goto done;
3228 	}
3229 
3230 	if (ccb->ccb_rcb != NULL) {
3231 		rep = ccb->ccb_rcb->rcb_reply;
3232 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3233 		    ((rep->action_data[0] &
3234 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3235 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3236 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3237 			rv = EINVAL;
3238 		mpii_push_reply(sc, ccb->ccb_rcb);
3239 	}
3240 
3241 	scsi_io_put(&sc->sc_iopool, ccb);
3242 
3243 done:
3244 	free(vpg, M_TEMP, pagelen);
3245 	return (rv);
3246 }
3247 
3248 #if NBIO > 0
3249 int
3250 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3251 {
3252 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3253 	int			error = 0;
3254 
3255 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3256 
3257 	switch (cmd) {
3258 	case BIOCINQ:
3259 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3260 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3261 		break;
3262 	case BIOCVOL:
3263 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3264 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3265 		break;
3266 	case BIOCDISK:
3267 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3268 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3269 		break;
3270 	default:
3271 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3272 		error = EINVAL;
3273 	}
3274 
3275 	return (error);
3276 }
3277 
3278 int
3279 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3280 {
3281 	int			i;
3282 
3283 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3284 
3285 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3286 	for (i = 0; i < sc->sc_max_devices; i++)
3287 		if (sc->sc_devs[i] &&
3288 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3289 			bi->bi_novol++;
3290 	return (0);
3291 }
3292 
3293 int
3294 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3295 {
3296 	struct mpii_cfg_raid_vol_pg0	*vpg;
3297 	struct mpii_cfg_hdr		hdr;
3298 	struct mpii_device		*dev;
3299 	struct scsi_link		*lnk;
3300 	struct device			*scdev;
3301 	size_t				pagelen;
3302 	u_int16_t			volh;
3303 	int				rv, hcnt = 0;
3304 
3305 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3306 	    DEVNAME(sc), bv->bv_volid);
3307 
3308 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3309 		return (ENODEV);
3310 	volh = dev->dev_handle;
3311 
3312 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3313 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3314 		printf("%s: unable to fetch header for raid volume page 0\n",
3315 		    DEVNAME(sc));
3316 		return (EINVAL);
3317 	}
3318 
3319 	pagelen = hdr.page_length * 4;
3320 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3321 	if (vpg == NULL) {
3322 		printf("%s: unable to allocate space for raid "
3323 		    "volume page 0\n", DEVNAME(sc));
3324 		return (ENOMEM);
3325 	}
3326 
3327 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3328 	    &hdr, 1, vpg, pagelen) != 0) {
3329 		printf("%s: unable to fetch raid volume page 0\n",
3330 		    DEVNAME(sc));
3331 		free(vpg, M_TEMP, pagelen);
3332 		return (EINVAL);
3333 	}
3334 
3335 	switch (vpg->volume_state) {
3336 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3337 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3338 		bv->bv_status = BIOC_SVONLINE;
3339 		break;
3340 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3341 		if (ISSET(lemtoh32(&vpg->volume_status),
3342 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3343 			bv->bv_status = BIOC_SVREBUILD;
3344 			bv->bv_percent = dev->percent;
3345 		} else
3346 			bv->bv_status = BIOC_SVDEGRADED;
3347 		break;
3348 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3349 		bv->bv_status = BIOC_SVOFFLINE;
3350 		break;
3351 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3352 		bv->bv_status = BIOC_SVBUILDING;
3353 		break;
3354 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3355 	default:
3356 		bv->bv_status = BIOC_SVINVALID;
3357 		break;
3358 	}
3359 
3360 	switch (vpg->volume_type) {
3361 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3362 		bv->bv_level = 0;
3363 		break;
3364 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3365 		bv->bv_level = 1;
3366 		break;
3367 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3368 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3369 		bv->bv_level = 10;
3370 		break;
3371 	default:
3372 		bv->bv_level = -1;
3373 	}
3374 
3375 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3376 		free(vpg, M_TEMP, pagelen);
3377 		return (rv);
3378 	}
3379 
3380 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3381 
3382 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3383 
3384 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3385 	if (lnk != NULL) {
3386 		scdev = lnk->device_softc;
3387 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3388 	}
3389 
3390 	free(vpg, M_TEMP, pagelen);
3391 	return (0);
3392 }
3393 
3394 int
3395 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3396 {
3397 	struct mpii_cfg_raid_vol_pg0		*vpg;
3398 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3399 	struct mpii_cfg_hdr			hdr;
3400 	struct mpii_device			*dev;
3401 	size_t					pagelen;
3402 	u_int16_t				volh;
3403 	u_int8_t				dn;
3404 
3405 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3406 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3407 
3408 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3409 		return (ENODEV);
3410 	volh = dev->dev_handle;
3411 
3412 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3413 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3414 		printf("%s: unable to fetch header for raid volume page 0\n",
3415 		    DEVNAME(sc));
3416 		return (EINVAL);
3417 	}
3418 
3419 	pagelen = hdr.page_length * 4;
3420 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3421 	if (vpg == NULL) {
3422 		printf("%s: unable to allocate space for raid "
3423 		    "volume page 0\n", DEVNAME(sc));
3424 		return (ENOMEM);
3425 	}
3426 
3427 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3428 	    &hdr, 1, vpg, pagelen) != 0) {
3429 		printf("%s: unable to fetch raid volume page 0\n",
3430 		    DEVNAME(sc));
3431 		free(vpg, M_TEMP, pagelen);
3432 		return (EINVAL);
3433 	}
3434 
3435 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3436 		int		nvdsk = vpg->num_phys_disks;
3437 		int		hsmap = vpg->hot_spare_pool;
3438 
3439 		free(vpg, M_TEMP, pagelen);
3440 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3441 	}
3442 
3443 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3444 	    bd->bd_diskid;
3445 	dn = pd->phys_disk_num;
3446 
3447 	free(vpg, M_TEMP, pagelen);
3448 	return (mpii_bio_disk(sc, bd, dn));
3449 }
3450 
3451 int
3452 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3453      int hsmap, int *hscnt)
3454 {
3455 	struct mpii_cfg_raid_config_pg0	*cpg;
3456 	struct mpii_raid_config_element	*el;
3457 	struct mpii_ecfg_hdr		ehdr;
3458 	size_t				pagelen;
3459 	int				i, nhs = 0;
3460 
3461 	if (bd)
3462 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3463 		    bd->bd_diskid - nvdsk);
3464 	else
3465 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3466 
3467 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3468 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3469 	    &ehdr) != 0) {
3470 		printf("%s: unable to fetch header for raid config page 0\n",
3471 		    DEVNAME(sc));
3472 		return (EINVAL);
3473 	}
3474 
3475 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3476 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3477 	if (cpg == NULL) {
3478 		printf("%s: unable to allocate space for raid config page 0\n",
3479 		    DEVNAME(sc));
3480 		return (ENOMEM);
3481 	}
3482 
3483 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3484 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3485 		printf("%s: unable to fetch raid config page 0\n",
3486 		    DEVNAME(sc));
3487 		free(cpg, M_TEMP, pagelen);
3488 		return (EINVAL);
3489 	}
3490 
3491 	el = (struct mpii_raid_config_element *)(cpg + 1);
3492 	for (i = 0; i < cpg->num_elements; i++, el++) {
3493 		if (ISSET(lemtoh16(&el->element_flags),
3494 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3495 		    el->hot_spare_pool == hsmap) {
3496 			/*
3497 			 * diskid comparison is based on the idea that all
3498 			 * disks are counted by the bio(4) in sequence, thus
3499 			 * substracting the number of disks in the volume
3500 			 * from the diskid yields us a "relative" hotspare
3501 			 * number, which is good enough for us.
3502 			 */
3503 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3504 				u_int8_t dn = el->phys_disk_num;
3505 
3506 				free(cpg, M_TEMP, pagelen);
3507 				return (mpii_bio_disk(sc, bd, dn));
3508 			}
3509 			nhs++;
3510 		}
3511 	}
3512 
3513 	if (hscnt)
3514 		*hscnt = nhs;
3515 
3516 	free(cpg, M_TEMP, pagelen);
3517 	return (0);
3518 }
3519 
3520 int
3521 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3522 {
3523 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3524 	struct mpii_cfg_hdr			hdr;
3525 	struct mpii_device			*dev;
3526 	int					len;
3527 
3528 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3529 	    bd->bd_diskid);
3530 
3531 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3532 	if (ppg == NULL) {
3533 		printf("%s: unable to allocate space for raid physical disk "
3534 		    "page 0\n", DEVNAME(sc));
3535 		return (ENOMEM);
3536 	}
3537 
3538 	hdr.page_version = 0;
3539 	hdr.page_length = sizeof(*ppg) / 4;
3540 	hdr.page_number = 0;
3541 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3542 
3543 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3544 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3545 		printf("%s: unable to fetch raid drive page 0\n",
3546 		    DEVNAME(sc));
3547 		free(ppg, M_TEMP, sizeof(*ppg));
3548 		return (EINVAL);
3549 	}
3550 
3551 	bd->bd_target = ppg->phys_disk_num;
3552 
3553 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3554 		bd->bd_status = BIOC_SDINVALID;
3555 		free(ppg, M_TEMP, sizeof(*ppg));
3556 		return (0);
3557 	}
3558 
3559 	switch (ppg->phys_disk_state) {
3560 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3561 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3562 		bd->bd_status = BIOC_SDONLINE;
3563 		break;
3564 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3565 		if (ppg->offline_reason ==
3566 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3567 		    ppg->offline_reason ==
3568 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3569 			bd->bd_status = BIOC_SDFAILED;
3570 		else
3571 			bd->bd_status = BIOC_SDOFFLINE;
3572 		break;
3573 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3574 		bd->bd_status = BIOC_SDFAILED;
3575 		break;
3576 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3577 		bd->bd_status = BIOC_SDREBUILD;
3578 		break;
3579 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3580 		bd->bd_status = BIOC_SDHOTSPARE;
3581 		break;
3582 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3583 		bd->bd_status = BIOC_SDUNUSED;
3584 		break;
3585 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3586 	default:
3587 		bd->bd_status = BIOC_SDINVALID;
3588 		break;
3589 	}
3590 
3591 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3592 
3593 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3594 	len = strlen(bd->bd_vendor);
3595 	bd->bd_vendor[len] = ' ';
3596 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3597 	    sizeof(ppg->product_id));
3598 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3599 
3600 	free(ppg, M_TEMP, sizeof(*ppg));
3601 	return (0);
3602 }
3603 
3604 struct mpii_device *
3605 mpii_find_vol(struct mpii_softc *sc, int volid)
3606 {
3607 	struct mpii_device	*dev = NULL;
3608 
3609 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3610 		return (NULL);
3611 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3612 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3613 		return (dev);
3614 	return (NULL);
3615 }
3616 
3617 #ifndef SMALL_KERNEL
3618 /*
3619  * Non-sleeping lightweight version of the mpii_ioctl_vol
3620  */
3621 int
3622 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3623 {
3624 	struct mpii_cfg_raid_vol_pg0	*vpg;
3625 	struct mpii_cfg_hdr		hdr;
3626 	struct mpii_device		*dev = NULL;
3627 	size_t				pagelen;
3628 	u_int16_t			volh;
3629 
3630 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3631 		return (ENODEV);
3632 	volh = dev->dev_handle;
3633 
3634 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3635 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3636 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3637 		    "volume page 0\n", DEVNAME(sc));
3638 		return (EINVAL);
3639 	}
3640 
3641 	pagelen = hdr.page_length * 4;
3642 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3643 	if (vpg == NULL) {
3644 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3645 		    "volume page 0\n", DEVNAME(sc));
3646 		return (ENOMEM);
3647 	}
3648 
3649 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3650 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3651 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3652 		    "page 0\n", DEVNAME(sc));
3653 		free(vpg, M_TEMP, pagelen);
3654 		return (EINVAL);
3655 	}
3656 
3657 	switch (vpg->volume_state) {
3658 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3659 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3660 		bv->bv_status = BIOC_SVONLINE;
3661 		break;
3662 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3663 		if (ISSET(lemtoh32(&vpg->volume_status),
3664 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3665 			bv->bv_status = BIOC_SVREBUILD;
3666 		else
3667 			bv->bv_status = BIOC_SVDEGRADED;
3668 		break;
3669 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3670 		bv->bv_status = BIOC_SVOFFLINE;
3671 		break;
3672 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3673 		bv->bv_status = BIOC_SVBUILDING;
3674 		break;
3675 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3676 	default:
3677 		bv->bv_status = BIOC_SVINVALID;
3678 		break;
3679 	}
3680 
3681 	free(vpg, M_TEMP, pagelen);
3682 	return (0);
3683 }
3684 
3685 int
3686 mpii_create_sensors(struct mpii_softc *sc)
3687 {
3688 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3689 	struct device		*dev;
3690 	struct scsi_link	*link;
3691 	int			i;
3692 
3693 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3694 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3695 	if (sc->sc_sensors == NULL)
3696 		return (1);
3697 	sc->sc_nsensors = sc->sc_vd_count;
3698 
3699 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3700 	    sizeof(sc->sc_sensordev.xname));
3701 
3702 	for (i = 0; i < sc->sc_vd_count; i++) {
3703 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3704 		if (link == NULL)
3705 			goto bad;
3706 
3707 		dev = link->device_softc;
3708 
3709 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3710 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3711 
3712 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3713 		    sizeof(sc->sc_sensors[i].desc));
3714 
3715 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3716 	}
3717 
3718 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3719 		goto bad;
3720 
3721 	sensordev_install(&sc->sc_sensordev);
3722 
3723 	return (0);
3724 
3725 bad:
3726 	free(sc->sc_sensors, M_DEVBUF, 0);
3727 
3728 	return (1);
3729 }
3730 
3731 void
3732 mpii_refresh_sensors(void *arg)
3733 {
3734 	struct mpii_softc	*sc = arg;
3735 	struct bioc_vol		bv;
3736 	int			i;
3737 
3738 	for (i = 0; i < sc->sc_nsensors; i++) {
3739 		memset(&bv, 0, sizeof(bv));
3740 		bv.bv_volid = i;
3741 		if (mpii_bio_volstate(sc, &bv))
3742 			return;
3743 		switch(bv.bv_status) {
3744 		case BIOC_SVOFFLINE:
3745 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3746 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3747 			break;
3748 		case BIOC_SVDEGRADED:
3749 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3750 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3751 			break;
3752 		case BIOC_SVREBUILD:
3753 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3754 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3755 			break;
3756 		case BIOC_SVONLINE:
3757 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3758 			sc->sc_sensors[i].status = SENSOR_S_OK;
3759 			break;
3760 		case BIOC_SVINVALID:
3761 			/* FALLTHROUGH */
3762 		default:
3763 			sc->sc_sensors[i].value = 0; /* unknown */
3764 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3765 		}
3766 	}
3767 }
3768 #endif /* SMALL_KERNEL */
3769 #endif /* NBIO > 0 */
3770