xref: /netbsd-src/sys/dev/pci/mfii.c (revision 3f351f34c6d827cf017cdcff3543f6ec0c88b420)
1 /* $NetBSD: mfii.c,v 1.31 2023/10/05 21:41:00 christos Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3 
4 /*
5  * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer@lip6.fr>
6  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.31 2023/10/05 21:41:00 christos Exp $");
23 
24 #include "bio.h"
25 
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39 
40 #include <uvm/uvm_param.h>
41 
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include <sys/bus.h>
46 
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49 
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56 
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60 
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63 
64 #define	MFII_BAR		0x14
65 #define MFII_BAR_35		0x10
66 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
67 
68 #define MFII_OSTS_INTR_VALID	0x00000009
69 #define MFII_RPI		0x6c /* reply post host index */
70 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
72 
73 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
75 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
78 
79 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
80 
81 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
83 
84 #define MFII_MAX_CHAIN_UNIT	0x00400000
85 #define MFII_MAX_CHAIN_MASK	0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT	5
87 
88 #define MFII_256K_IO		128
89 #define MFII_1MB_IO		(MFII_256K_IO * 4)
90 
91 #define MFII_CHAIN_FRAME_MIN	1024
92 
93 struct mfii_request_descr {
94 	u_int8_t	flags;
95 	u_int8_t	msix_index;
96 	u_int16_t	smid;
97 
98 	u_int16_t	lmid;
99 	u_int16_t	dev_handle;
100 } __packed;
101 
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
104 
105 struct mfii_raid_context {
106 	u_int8_t	type_nseg;
107 	u_int8_t	_reserved1;
108 	u_int16_t	timeout_value;
109 
110 	u_int16_t	reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
115 
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 	u_int16_t	virtual_disk_target_id;
119 
120 	u_int64_t	reg_lock_row_lba;
121 
122 	u_int32_t	reg_lock_length;
123 
124 	u_int16_t	next_lm_id;
125 	u_int8_t	ex_status;
126 	u_int8_t	status;
127 
128 	u_int8_t	raid_flags;
129 	u_int8_t	num_sge;
130 	u_int16_t	config_seq_num;
131 
132 	u_int8_t	span_arm;
133 	u_int8_t	_reserved3[3];
134 } __packed;
135 
136 struct mfii_sge {
137 	u_int64_t	sg_addr;
138 	u_int32_t	sg_len;
139 	u_int16_t	_reserved;
140 	u_int8_t	sg_next_chain_offset;
141 	u_int8_t	sg_flags;
142 } __packed;
143 
144 #define MFII_SGE_ADDR_MASK		(0x03)
145 #define MFII_SGE_ADDR_SYSTEM		(0x00)
146 #define MFII_SGE_ADDR_IOCDDR		(0x01)
147 #define MFII_SGE_ADDR_IOCPLB		(0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
149 #define MFII_SGE_END_OF_LIST		(0x40)
150 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
151 
152 #define MFII_REQUEST_SIZE	256
153 
154 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
155 
156 #define MFII_MAX_ROW		32
157 #define MFII_MAX_ARRAY		128
158 
159 struct mfii_array_map {
160 	uint16_t		mam_pd[MFII_MAX_ROW];
161 } __packed;
162 
163 struct mfii_dev_handle {
164 	uint16_t		mdh_cur_handle;
165 	uint8_t			mdh_valid;
166 	uint8_t			mdh_reserved;
167 	uint16_t		mdh_handle[2];
168 } __packed;
169 
170 struct mfii_ld_map {
171 	uint32_t		mlm_total_size;
172 	uint32_t		mlm_reserved1[5];
173 	uint32_t		mlm_num_lds;
174 	uint32_t		mlm_reserved2;
175 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 	uint8_t			mlm_pd_timeout;
177 	uint8_t			mlm_reserved3[7];
178 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
179 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181 
182 struct mfii_task_mgmt {
183 	union {
184 		uint8_t			request[128];
185 		struct mpii_msg_scsi_task_request
186 					mpii_request;
187 	} __packed __aligned(8);
188 
189 	union {
190 		uint8_t			reply[128];
191 		uint32_t		flags;
192 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
194 		struct mpii_msg_scsi_task_reply
195 					mpii_reply;
196 	} __packed __aligned(8);
197 } __packed __aligned(8);
198 
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 	char data[24];
202 } __packed;
203 
204 struct mfii_foreign_scan_info {
205 	uint32_t count; /* Number of foreign configs found */
206 	struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208 
209 #define MFII_MAX_LD_EXT		256
210 
211 struct mfii_ld_list_ext {
212 	uint32_t		mll_no_ld;
213 	uint32_t		mll_res;
214 	struct {
215 		struct mfi_ld	mll_ld;
216 		uint8_t		mll_state; /* states are the same as MFI_ */
217 		uint8_t		mll_res2;
218 		uint8_t		mll_res3;
219 		uint8_t		mll_res4;
220 		uint64_t	mll_size;
221 	} mll_list[MFII_MAX_LD_EXT];
222 } __packed;
223 
224 struct mfii_dmamem {
225 	bus_dmamap_t		mdm_map;
226 	bus_dma_segment_t	mdm_seg;
227 	size_t			mdm_size;
228 	void *			mdm_kva;
229 };
230 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
231 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
232 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
233 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
234 
235 struct mfii_softc;
236 
237 typedef enum mfii_direction {
238 	MFII_DATA_NONE = 0,
239 	MFII_DATA_IN,
240 	MFII_DATA_OUT
241 } mfii_direction_t;
242 
243 struct mfii_ccb {
244 	struct mfii_softc	*ccb_sc;
245 	void			*ccb_request;
246 	u_int64_t		ccb_request_dva;
247 	bus_addr_t		ccb_request_offset;
248 
249 	void			*ccb_mfi;
250 	u_int64_t		ccb_mfi_dva;
251 	bus_addr_t		ccb_mfi_offset;
252 
253 	struct mfi_sense	*ccb_sense;
254 	u_int64_t		ccb_sense_dva;
255 	bus_addr_t		ccb_sense_offset;
256 
257 	struct mfii_sge		*ccb_sgl;
258 	u_int64_t		ccb_sgl_dva;
259 	bus_addr_t		ccb_sgl_offset;
260 	u_int			ccb_sgl_len;
261 
262 	struct mfii_request_descr ccb_req;
263 
264 	bus_dmamap_t		ccb_dmamap64;
265 	bus_dmamap_t		ccb_dmamap32;
266 	bool			ccb_dma64;
267 
268 	/* data for sgl */
269 	void			*ccb_data;
270 	size_t			ccb_len;
271 
272 	mfii_direction_t	ccb_direction;
273 
274 	void			*ccb_cookie;
275 	kmutex_t		ccb_mtx;
276 	kcondvar_t		ccb_cv;
277 	void			(*ccb_done)(struct mfii_softc *,
278 				    struct mfii_ccb *);
279 
280 	u_int32_t		ccb_flags;
281 #define MFI_CCB_F_ERR			(1<<0)
282 	u_int			ccb_smid;
283 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
284 };
285 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
286 
287 struct mfii_iop {
288 	int bar;
289 	int num_sge_loc;
290 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
291 #define MFII_IOP_NUM_SGE_LOC_35		1
292 	u_int16_t ldio_ctx_reg_lock_flags;
293 	u_int8_t ldio_req_type;
294 	u_int8_t ldio_ctx_type_nseg;
295 	u_int8_t sge_flag_chain;
296 	u_int8_t sge_flag_eol;
297 	u_int8_t iop_flag;
298 #define MFII_IOP_QUIRK_REGREAD		0x01
299 #define MFII_IOP_HAS_32BITDESC_BIT	0x02
300 };
301 
302 struct mfii_softc {
303 	device_t		sc_dev;
304 	struct scsipi_channel   sc_chan;
305 	struct scsipi_adapter   sc_adapt;
306 
307 	const struct mfii_iop	*sc_iop;
308 	u_int			sc_iop_flag;
309 #define MFII_IOP_DESC_32BIT		0x01
310 
311 	pci_chipset_tag_t	sc_pc;
312 	pcitag_t		sc_tag;
313 
314 	bus_space_tag_t		sc_iot;
315 	bus_space_handle_t	sc_ioh;
316 	bus_size_t		sc_ios;
317 	bus_dma_tag_t		sc_dmat;
318 	bus_dma_tag_t		sc_dmat64;
319 	bool			sc_64bit_dma;
320 
321 	void			*sc_ih;
322 
323 	kmutex_t		sc_ccb_mtx;
324 	kmutex_t		sc_post_mtx;
325 
326 	u_int			sc_max_fw_cmds;
327 	u_int			sc_max_cmds;
328 	u_int			sc_max_sgl;
329 
330 	u_int			sc_reply_postq_depth;
331 	u_int			sc_reply_postq_index;
332 	kmutex_t		sc_reply_postq_mtx;
333 	struct mfii_dmamem	*sc_reply_postq;
334 
335 	struct mfii_dmamem	*sc_requests;
336 	struct mfii_dmamem	*sc_mfi;
337 	struct mfii_dmamem	*sc_sense;
338 	struct mfii_dmamem	*sc_sgl;
339 
340 	struct mfii_ccb		*sc_ccb;
341 	struct mfii_ccb_list	sc_ccb_freeq;
342 
343 	struct mfii_ccb		*sc_aen_ccb;
344 	struct workqueue	*sc_aen_wq;
345 	struct work		sc_aen_work;
346 
347 	kmutex_t		sc_abort_mtx;
348 	struct mfii_ccb_list	sc_abort_list;
349 	struct workqueue	*sc_abort_wq;
350 	struct work		sc_abort_work;
351 
352 	/* save some useful information for logical drives that is missing
353 	 * in sc_ld_list
354 	 */
355 	struct {
356 		bool		ld_present;
357 		char		ld_dev[16];	/* device name sd? */
358 		int		ld_target_id;
359 	}			sc_ld[MFII_MAX_LD_EXT];
360 	int			sc_target_lds[MFII_MAX_LD_EXT];
361 	bool			sc_max256vd;
362 
363 	/* bio */
364 	struct mfi_conf		*sc_cfg;
365 	struct mfi_ctrl_info	sc_info;
366 	struct mfii_ld_list_ext	sc_ld_list;
367 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
368 	int			sc_no_pd; /* used physical disks */
369 	int			sc_ld_sz; /* sizeof sc_ld_details */
370 
371 	/* mgmt lock */
372 	kmutex_t		sc_lock;
373 	bool			sc_running;
374 
375 	/* sensors */
376 	struct sysmon_envsys	*sc_sme;
377 	envsys_data_t		*sc_sensors;
378 	envsys_data_t		*sc_ld_sensors;
379 	bool			sc_bbuok;
380 
381 	device_t		sc_child;
382 };
383 
384 // #define MFII_DEBUG
385 #ifdef MFII_DEBUG
386 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
387 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
388 #define	MFII_D_CMD		0x0001
389 #define	MFII_D_INTR		0x0002
390 #define	MFII_D_MISC		0x0004
391 #define	MFII_D_DMA		0x0008
392 #define	MFII_D_IOCTL		0x0010
393 #define	MFII_D_RW		0x0020
394 #define	MFII_D_MEM		0x0040
395 #define	MFII_D_CCB		0x0080
396 uint32_t	mfii_debug = 0
397 /*		    | MFII_D_CMD */
398 /*		    | MFII_D_INTR */
399 		    | MFII_D_MISC
400 /*		    | MFII_D_DMA */
401 /*		    | MFII_D_IOCTL */
402 /*		    | MFII_D_RW */
403 /*		    | MFII_D_MEM */
404 /*		    | MFII_D_CCB */
405 		;
406 #else
407 #define DPRINTF(x...)
408 #define DNPRINTF(n,x...)
409 #endif
410 
411 static int	mfii_match(device_t, cfdata_t, void *);
412 static void	mfii_attach(device_t, device_t, void *);
413 static int	mfii_detach(device_t, int);
414 static int	mfii_rescan(device_t, const char *, const int *);
415 static void	mfii_childdetached(device_t, device_t);
416 static bool	mfii_suspend(device_t, const pmf_qual_t *);
417 static bool	mfii_resume(device_t, const pmf_qual_t *);
418 static bool	mfii_shutdown(device_t, int);
419 
420 
421 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
422     mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
423 	mfii_childdetached, DVF_DETACH_SHUTDOWN);
424 
425 static void	mfii_scsipi_request(struct scsipi_channel *,
426 			scsipi_adapter_req_t, void *);
427 static void	mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
428 
429 #define DEVNAME(_sc)		(device_xname((_sc)->sc_dev))
430 
431 static u_int32_t	mfii_read(struct mfii_softc *, bus_size_t);
432 static void		mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
433 
434 static struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
435 static void		mfii_dmamem_free(struct mfii_softc *,
436 			    struct mfii_dmamem *);
437 
438 static struct mfii_ccb *	mfii_get_ccb(struct mfii_softc *);
439 static void		mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
440 static int		mfii_init_ccb(struct mfii_softc *);
441 static void		mfii_scrub_ccb(struct mfii_ccb *);
442 
443 static int		mfii_transition_firmware(struct mfii_softc *);
444 static int		mfii_initialise_firmware(struct mfii_softc *);
445 static int		mfii_get_info(struct mfii_softc *);
446 
447 static void		mfii_start(struct mfii_softc *, struct mfii_ccb *);
448 static void		mfii_start64(struct mfii_softc *, struct mfii_ccb *);
449 static void		mfii_start_common(struct mfii_softc *,
450 			    struct mfii_ccb *, bool);
451 static void		mfii_done(struct mfii_softc *, struct mfii_ccb *);
452 static int		mfii_poll(struct mfii_softc *, struct mfii_ccb *);
453 static void		mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
454 static int		mfii_exec(struct mfii_softc *, struct mfii_ccb *);
455 static void		mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
456 static int		mfii_my_intr(struct mfii_softc *);
457 static int		mfii_intr(void *);
458 static void		mfii_postq(struct mfii_softc *);
459 
460 static int		mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
461 			    void *, int);
462 static int		mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
463 			    void *, int);
464 
465 static int		mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
466 
467 static int		mfii_mgmt(struct mfii_softc *, uint32_t,
468 			    const union mfi_mbox *, void *, size_t,
469 			    mfii_direction_t, bool);
470 static int		mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
471 			    uint32_t, const union mfi_mbox *, void *, size_t,
472 			    mfii_direction_t, bool);
473 static void		mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
474 
475 static int		mfii_scsi_cmd_io(struct mfii_softc *,
476 			    struct mfii_ccb *, struct scsipi_xfer *);
477 static int		mfii_scsi_cmd_cdb(struct mfii_softc *,
478 			    struct mfii_ccb *, struct scsipi_xfer *);
479 static void		mfii_scsi_cmd_tmo(void *);
480 
481 static void		mfii_abort_task(struct work *, void *);
482 static void		mfii_abort(struct mfii_softc *, struct mfii_ccb *,
483 			    uint16_t, uint16_t, uint8_t, uint32_t);
484 static void		mfii_scsi_cmd_abort_done(struct mfii_softc *,
485 			    struct mfii_ccb *);
486 
487 static int		mfii_aen_register(struct mfii_softc *);
488 static void		mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
489 			    struct mfii_dmamem *, uint32_t);
490 static void		mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
491 static void		mfii_aen(struct work *, void *);
492 static void		mfii_aen_unregister(struct mfii_softc *);
493 
494 static void		mfii_aen_pd_insert(struct mfii_softc *,
495 			    const struct mfi_evtarg_pd_address *);
496 static void		mfii_aen_pd_remove(struct mfii_softc *,
497 			    const struct mfi_evtarg_pd_address *);
498 static void		mfii_aen_pd_state_change(struct mfii_softc *,
499 			    const struct mfi_evtarg_pd_state *);
500 static void		mfii_aen_ld_update(struct mfii_softc *);
501 
502 #if NBIO > 0
503 static int	mfii_ioctl(device_t, u_long, void *);
504 static int	mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
505 static int	mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
506 static int	mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
507 static int	mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
508 static int	mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
509 static int	mfii_ioctl_setstate(struct mfii_softc *,
510 		    struct bioc_setstate *);
511 static int	mfii_bio_hs(struct mfii_softc *, int, int, void *);
512 static int	mfii_bio_getitall(struct mfii_softc *);
513 #endif /* NBIO > 0 */
514 
515 #if 0
516 static const char *mfi_bbu_indicators[] = {
517 	"pack missing",
518 	"voltage low",
519 	"temp high",
520 	"charge active",
521 	"discharge active",
522 	"learn cycle req'd",
523 	"learn cycle active",
524 	"learn cycle failed",
525 	"learn cycle timeout",
526 	"I2C errors",
527 	"replace pack",
528 	"low capacity",
529 	"periodic learn req'd"
530 };
531 #endif
532 
533 #define MFI_BBU_SENSORS 4
534 
535 static void	mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
536 static void	mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
537 static void	mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
538 static int	mfii_create_sensors(struct mfii_softc *);
539 static int	mfii_destroy_sensors(struct mfii_softc *);
540 static void	mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
541 static void	mfii_bbu(struct mfii_softc *, envsys_data_t *);
542 
543 /*
544  * mfii boards support asynchronous (and non-polled) completion of
545  * dcmds by proxying them through a passthru mpii command that points
546  * at a dcmd frame. since the passthru command is submitted like
547  * the scsi commands using an SMID in the request descriptor,
548  * ccb_request memory * must contain the passthru command because
549  * that is what the SMID refers to. this means ccb_request cannot
550  * contain the dcmd. rather than allocating separate dma memory to
551  * hold the dcmd, we reuse the sense memory buffer for it.
552  */
553 
554 static void	mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
555 
556 static inline void
557 mfii_dcmd_scrub(struct mfii_ccb *ccb)
558 {
559 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
560 }
561 
562 static inline struct mfi_dcmd_frame *
563 mfii_dcmd_frame(struct mfii_ccb *ccb)
564 {
565 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
566 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
567 }
568 
569 static inline void
570 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
571 {
572 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
573 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
574 }
575 
576 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
577 
578 static const struct mfii_iop mfii_iop_thunderbolt = {
579 	MFII_BAR,
580 	MFII_IOP_NUM_SGE_LOC_ORIG,
581 	0,
582 	MFII_REQ_TYPE_LDIO,
583 	0,
584 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
585 	0,
586 	0
587 };
588 
589 /*
590  * a lot of these values depend on us not implementing fastpath yet.
591  */
592 static const struct mfii_iop mfii_iop_25 = {
593 	MFII_BAR,
594 	MFII_IOP_NUM_SGE_LOC_ORIG,
595 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
596 	MFII_REQ_TYPE_NO_LOCK,
597 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
598 	MFII_SGE_CHAIN_ELEMENT,
599 	MFII_SGE_END_OF_LIST,
600 	0
601 };
602 
603 static const struct mfii_iop mfii_iop_35 = {
604 	MFII_BAR_35,
605 	MFII_IOP_NUM_SGE_LOC_35,
606 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
607 	MFII_REQ_TYPE_NO_LOCK,
608 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
609 	MFII_SGE_CHAIN_ELEMENT,
610 	MFII_SGE_END_OF_LIST,
611 	0
612 };
613 
614 static const struct mfii_iop mfii_iop_aero = {
615 	MFII_BAR_35,
616 	MFII_IOP_NUM_SGE_LOC_35,
617 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
618 	MFII_REQ_TYPE_NO_LOCK,
619 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
620 	MFII_SGE_CHAIN_ELEMENT,
621 	MFII_SGE_END_OF_LIST,
622 	MFII_IOP_QUIRK_REGREAD | MFII_IOP_HAS_32BITDESC_BIT
623 };
624 
625 struct mfii_device {
626 	pcireg_t		mpd_vendor;
627 	pcireg_t		mpd_product;
628 	const struct mfii_iop	*mpd_iop;
629 };
630 
631 static const struct mfii_device mfii_devices[] = {
632 	/* Fusion */
633 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
634 	    &mfii_iop_thunderbolt },
635 	/* Fury */
636 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
637 	    &mfii_iop_25 },
638 	/* Invader */
639 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
640 	    &mfii_iop_25 },
641 	/* Intruder */
642 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
643 	    &mfii_iop_25 },
644 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
645 	    &mfii_iop_25 },
646 	/* Cutlass */
647 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
648 	    &mfii_iop_25 },
649 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
650 	    &mfii_iop_25 },
651 	/* Crusader */
652 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
653 	    &mfii_iop_35 },
654 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
655 	    &mfii_iop_35 },
656 	/* Ventura */
657 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
658 	    &mfii_iop_35 },
659 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
660 	    &mfii_iop_35 },
661 	/* Tomcat */
662 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
663 	    &mfii_iop_35 },
664 	/* Harpoon */
665 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
666 	    &mfii_iop_35 },
667 	/* Aero */
668 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
669 	    &mfii_iop_aero },
670 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_3,
671 	    &mfii_iop_aero },
672 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
673 	    &mfii_iop_aero },
674 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_3,
675 	    &mfii_iop_aero }
676 };
677 
678 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
679 
680 static const struct mfii_iop *
681 mfii_find_iop(struct pci_attach_args *pa)
682 {
683 	const struct mfii_device *mpd;
684 	int i;
685 
686 	for (i = 0; i < __arraycount(mfii_devices); i++) {
687 		mpd = &mfii_devices[i];
688 
689 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
690 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
691 			return (mpd->mpd_iop);
692 	}
693 
694 	return (NULL);
695 }
696 
697 static int
698 mfii_match(device_t parent, cfdata_t match, void *aux)
699 {
700 	return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
701 }
702 
703 static void
704 mfii_attach(device_t parent, device_t self, void *aux)
705 {
706 	struct mfii_softc *sc = device_private(self);
707 	struct pci_attach_args *pa = aux;
708 	pcireg_t memtype;
709 	pci_intr_handle_t *ihp;
710 	char intrbuf[PCI_INTRSTR_LEN];
711 	const char *intrstr;
712 	u_int32_t status, scpad2, scpad3;
713 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
714 	struct scsipi_adapter *adapt = &sc->sc_adapt;
715 	struct scsipi_channel *chan = &sc->sc_chan;
716 	union mfi_mbox mbox;
717 
718 	/* init sc */
719 	sc->sc_dev = self;
720 	sc->sc_iop = mfii_find_iop(aux);
721 	sc->sc_dmat = pa->pa_dmat;
722 	if (pci_dma64_available(pa)) {
723 		sc->sc_dmat64 = pa->pa_dmat64;
724 		sc->sc_64bit_dma = 1;
725 	} else {
726 		sc->sc_dmat64 = pa->pa_dmat;
727 		sc->sc_64bit_dma = 0;
728 	}
729 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
730 	mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
731 	mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
732 	mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
733 
734 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
735 
736 	sc->sc_aen_ccb = NULL;
737 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
738 	workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
739 	    PRI_BIO, IPL_BIO, WQ_MPSAFE);
740 
741 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
742 	workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
743 	    sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
744 
745 	mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
746 	SIMPLEQ_INIT(&sc->sc_abort_list);
747 
748 	/* wire up the bus shizz */
749 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
750 	memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
751 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
752 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
753 		aprint_error(": unable to map registers\n");
754 		return;
755 	}
756 
757 	/* disable interrupts */
758 	mfii_write(sc, MFI_OMSK, 0xffffffff);
759 
760 	if (pci_intr_alloc(pa, &ihp, NULL, 0)) {
761 		aprint_error(": unable to map interrupt\n");
762 		goto pci_unmap;
763 	}
764 	intrstr = pci_intr_string(pa->pa_pc, ihp[0], intrbuf, sizeof(intrbuf));
765 	pci_intr_setattr(pa->pa_pc, &ihp[0], PCI_INTR_MPSAFE, true);
766 
767 	/* lets get started */
768 	if (mfii_transition_firmware(sc))
769 		goto pci_unmap;
770 	sc->sc_running = true;
771 
772 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
773 	scpad3 = mfii_read(sc, MFII_OSP3);
774 	status = mfii_fw_state(sc);
775 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
776 	if (sc->sc_max_fw_cmds == 0)
777 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
778 	/*
779 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
780 	 * exceed FW supplied max_fw_cmds.
781 	 */
782 	sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
783 
784 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
785 	scpad2 = mfii_read(sc, MFII_OSP2);
786 	chain_frame_sz =
787 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
788 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
789 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
790 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
791 
792 	nsge_in_io = (MFII_REQUEST_SIZE -
793 		sizeof(struct mpii_msg_scsi_io) -
794 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
795 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
796 
797 	/* round down to nearest power of two */
798 	sc->sc_max_sgl = 1;
799 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
800 		sc->sc_max_sgl <<= 1;
801 
802 	/* Check for atomic(32bit) descriptor */
803 	if (((sc->sc_iop->iop_flag & MFII_IOP_HAS_32BITDESC_BIT) != 0) &&
804 	    ((scpad2 & MFI_STATE_ATOMIC_DESCRIPTOR) != 0))
805 		sc->sc_iop_flag |= MFII_IOP_DESC_32BIT;
806 
807 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
808 	    DEVNAME(sc), status, scpad2, scpad3);
809 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
810 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
811 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
812 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
813 	    sc->sc_max_sgl);
814 
815 	/* sense memory */
816 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
817 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
818 	if (sc->sc_sense == NULL) {
819 		aprint_error(": unable to allocate sense memory\n");
820 		goto pci_unmap;
821 	}
822 
823 	/* reply post queue */
824 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
825 
826 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
827 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
828 	if (sc->sc_reply_postq == NULL)
829 		goto free_sense;
830 
831 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
832 	    MFII_DMA_LEN(sc->sc_reply_postq));
833 
834 	/* MPII request frame array */
835 	sc->sc_requests = mfii_dmamem_alloc(sc,
836 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
837 	if (sc->sc_requests == NULL)
838 		goto free_reply_postq;
839 
840 	/* MFI command frame array */
841 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
842 	if (sc->sc_mfi == NULL)
843 		goto free_requests;
844 
845 	/* MPII SGL array */
846 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
847 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
848 	if (sc->sc_sgl == NULL)
849 		goto free_mfi;
850 
851 	if (mfii_init_ccb(sc) != 0) {
852 		aprint_error(": could not init ccb list\n");
853 		goto free_sgl;
854 	}
855 
856 	/* kickstart firmware with all addresses and pointers */
857 	if (mfii_initialise_firmware(sc) != 0) {
858 		aprint_error(": could not initialize firmware\n");
859 		goto free_sgl;
860 	}
861 
862 	mutex_enter(&sc->sc_lock);
863 	if (mfii_get_info(sc) != 0) {
864 		mutex_exit(&sc->sc_lock);
865 		aprint_error(": could not retrieve controller information\n");
866 		goto free_sgl;
867 	}
868 	mutex_exit(&sc->sc_lock);
869 
870 	aprint_normal(": \"%s\", firmware %s",
871 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
872 	if (le16toh(sc->sc_info.mci_memory_size) > 0) {
873 		aprint_normal(", %uMB cache",
874 		    le16toh(sc->sc_info.mci_memory_size));
875 	}
876 	aprint_normal("\n");
877 	aprint_naive("\n");
878 
879 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ihp[0], IPL_BIO,
880 	    mfii_intr, sc, DEVNAME(sc));
881 	if (sc->sc_ih == NULL) {
882 		aprint_error_dev(self, "can't establish interrupt");
883 		if (intrstr)
884 			aprint_error(" at %s", intrstr);
885 		aprint_error("\n");
886 		goto free_sgl;
887 	}
888 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
889 
890 	for (i = 0; i < sc->sc_info.mci_lds_present; i++)
891 		sc->sc_ld[i].ld_present = 1;
892 
893 	sc->sc_max256vd =
894 	    (sc->sc_info.mci_adapter_ops3 & MFI_INFO_AOPS3_SUPP_MAX_EXT_LDS) ?
895 	    true : false;
896 
897 	if (sc->sc_max256vd)
898 		aprint_verbose_dev(self, "Max 256 VD support\n");
899 
900 	memset(adapt, 0, sizeof(*adapt));
901 	adapt->adapt_dev = sc->sc_dev;
902 	adapt->adapt_nchannels = 1;
903 	/* keep a few commands for management */
904 	if (sc->sc_max_cmds > 4)
905 		adapt->adapt_openings = sc->sc_max_cmds - 4;
906 	else
907 		adapt->adapt_openings = sc->sc_max_cmds;
908 	adapt->adapt_max_periph = adapt->adapt_openings;
909 	adapt->adapt_request = mfii_scsipi_request;
910 	adapt->adapt_minphys = minphys;
911 	adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
912 
913 	memset(chan, 0, sizeof(*chan));
914 	chan->chan_adapter = adapt;
915 	chan->chan_bustype = &scsi_sas_bustype;
916 	chan->chan_channel = 0;
917 	chan->chan_flags = 0;
918 	chan->chan_nluns = 8;
919 	chan->chan_ntargets = sc->sc_info.mci_max_lds;
920 	chan->chan_id = sc->sc_info.mci_max_lds;
921 
922 	mfii_rescan(sc->sc_dev, NULL, NULL);
923 
924 	if (mfii_aen_register(sc) != 0) {
925 		/* error printed by mfii_aen_register */
926 		goto intr_disestablish;
927 	}
928 
929 	memset(&mbox, 0, sizeof(mbox));
930 	if (sc->sc_max256vd)
931 		mbox.b[0] = 1;
932 	mutex_enter(&sc->sc_lock);
933 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
934 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
935 		mutex_exit(&sc->sc_lock);
936 		aprint_error_dev(self,
937 		    "getting list of logical disks failed\n");
938 		goto intr_disestablish;
939 	}
940 	mutex_exit(&sc->sc_lock);
941 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
942 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
943 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
944 		sc->sc_target_lds[target] = i;
945 		sc->sc_ld[i].ld_target_id = target;
946 	}
947 
948 	/* enable interrupts */
949 	mfii_write(sc, MFI_OSTS, 0xffffffff);
950 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
951 
952 #if NBIO > 0
953 	if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
954 		panic("%s: controller registration failed", DEVNAME(sc));
955 #endif /* NBIO > 0 */
956 
957 	if (mfii_create_sensors(sc) != 0)
958 		aprint_error_dev(self, "unable to create sensors\n");
959 
960 	if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
961 	    mfii_shutdown))
962 		aprint_error_dev(self, "couldn't establish power handler\n");
963 	return;
964 intr_disestablish:
965 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
966 free_sgl:
967 	mfii_dmamem_free(sc, sc->sc_sgl);
968 free_mfi:
969 	mfii_dmamem_free(sc, sc->sc_mfi);
970 free_requests:
971 	mfii_dmamem_free(sc, sc->sc_requests);
972 free_reply_postq:
973 	mfii_dmamem_free(sc, sc->sc_reply_postq);
974 free_sense:
975 	mfii_dmamem_free(sc, sc->sc_sense);
976 pci_unmap:
977 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
978 }
979 
980 #if 0
981 struct srp_gc mfii_dev_handles_gc =
982     SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
983 
984 static inline uint16_t
985 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
986 {
987 	struct srp_ref sr;
988 	uint16_t *map, handle;
989 
990 	map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
991 	handle = map[target];
992 	srp_leave(&sr);
993 
994 	return (handle);
995 }
996 
997 static int
998 mfii_dev_handles_update(struct mfii_softc *sc)
999 {
1000 	struct mfii_ld_map *lm;
1001 	uint16_t *dev_handles = NULL;
1002 	int i;
1003 	int rv = 0;
1004 
1005 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
1006 
1007 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
1008 	    MFII_DATA_IN, false);
1009 
1010 	if (rv != 0) {
1011 		rv = EIO;
1012 		goto free_lm;
1013 	}
1014 
1015 	dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
1016 	    M_DEVBUF, M_WAITOK);
1017 
1018 	for (i = 0; i < MFI_MAX_PD; i++)
1019 		dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
1020 
1021 	/* commit the updated info */
1022 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
1023 	srp_update_locked(&mfii_dev_handles_gc,
1024 	    &sc->sc_pd->pd_dev_handles, dev_handles);
1025 
1026 free_lm:
1027 	free(lm, M_TEMP, sizeof(*lm));
1028 
1029 	return (rv);
1030 }
1031 
1032 static void
1033 mfii_dev_handles_dtor(void *null, void *v)
1034 {
1035 	uint16_t *dev_handles = v;
1036 
1037 	free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
1038 }
1039 #endif /* 0 */
1040 
1041 static int
1042 mfii_detach(device_t self, int flags)
1043 {
1044 	struct mfii_softc *sc = device_private(self);
1045 	int error;
1046 
1047 	if (sc->sc_ih == NULL)
1048 		return (0);
1049 
1050 	if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
1051 		return error;
1052 
1053 	mfii_destroy_sensors(sc);
1054 #if NBIO > 0
1055 	bio_unregister(sc->sc_dev);
1056 #endif
1057 	mfii_shutdown(sc->sc_dev, 0);
1058 	mfii_write(sc, MFI_OMSK, 0xffffffff);
1059 
1060 	mfii_aen_unregister(sc);
1061 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1062 	mfii_dmamem_free(sc, sc->sc_sgl);
1063 	mfii_dmamem_free(sc, sc->sc_mfi);
1064 	mfii_dmamem_free(sc, sc->sc_requests);
1065 	mfii_dmamem_free(sc, sc->sc_reply_postq);
1066 	mfii_dmamem_free(sc, sc->sc_sense);
1067 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1068 
1069 	return (0);
1070 }
1071 
1072 static int
1073 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1074 {
1075 	struct mfii_softc *sc = device_private(self);
1076 
1077 	if (sc->sc_child != NULL)
1078 		return 0;
1079 
1080 	sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
1081 	    CFARGS_NONE);
1082 	return 0;
1083 }
1084 
1085 static void
1086 mfii_childdetached(device_t self, device_t child)
1087 {
1088 	struct mfii_softc *sc = device_private(self);
1089 
1090 	KASSERT(self == sc->sc_dev);
1091 	KASSERT(child == sc->sc_child);
1092 
1093 	if (child == sc->sc_child)
1094 		sc->sc_child = NULL;
1095 }
1096 
1097 static bool
1098 mfii_suspend(device_t dev, const pmf_qual_t *q)
1099 {
1100 	/* XXX to be implemented */
1101 	return false;
1102 }
1103 
1104 static bool
1105 mfii_resume(device_t dev, const pmf_qual_t *q)
1106 {
1107 	/* XXX to be implemented */
1108 	return false;
1109 }
1110 
1111 static bool
1112 mfii_shutdown(device_t dev, int how)
1113 {
1114 	struct mfii_softc	*sc = device_private(dev);
1115 	struct mfii_ccb *ccb;
1116 	union mfi_mbox		mbox;
1117 	bool rv = true;
1118 
1119 	memset(&mbox, 0, sizeof(mbox));
1120 
1121 	mutex_enter(&sc->sc_lock);
1122 	DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1123 	ccb = mfii_get_ccb(sc);
1124 	if (ccb == NULL)
1125 		return false;
1126 	mutex_enter(&sc->sc_ccb_mtx);
1127 	if (sc->sc_running) {
1128 		sc->sc_running = 0; /* prevent new commands */
1129 		mutex_exit(&sc->sc_ccb_mtx);
1130 #if 0 /* XXX why does this hang ? */
1131 		mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1132 		mfii_scrub_ccb(ccb);
1133 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1134 		    NULL, 0, MFII_DATA_NONE, true)) {
1135 			aprint_error_dev(dev,
1136 			    "shutdown: cache flush failed\n");
1137 			rv = false;
1138 			goto fail;
1139 		}
1140 		printf("ok1\n");
1141 #endif
1142 		mbox.b[0] = 0;
1143 		mfii_scrub_ccb(ccb);
1144 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1145 		    NULL, 0, MFII_DATA_NONE, true)) {
1146 			aprint_error_dev(dev, "shutdown: "
1147 			    "firmware shutdown failed\n");
1148 			rv = false;
1149 			goto fail;
1150 		}
1151 	} else {
1152 		mutex_exit(&sc->sc_ccb_mtx);
1153 	}
1154 fail:
1155 	mfii_put_ccb(sc, ccb);
1156 	mutex_exit(&sc->sc_lock);
1157 	return rv;
1158 }
1159 
1160 /* Register read function without retry */
1161 static inline u_int32_t
1162 mfii_read_wor(struct mfii_softc *sc, bus_size_t r)
1163 {
1164 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1165 	    BUS_SPACE_BARRIER_READ);
1166 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1167 }
1168 
1169 static u_int32_t
1170 mfii_read(struct mfii_softc *sc, bus_size_t r)
1171 {
1172 	uint32_t rv;
1173 	int i = 0;
1174 
1175 	if ((sc->sc_iop->iop_flag & MFII_IOP_QUIRK_REGREAD) != 0) {
1176 		do {
1177 			rv = mfii_read_wor(sc, r);
1178 			i++;
1179 		} while ((rv == 0) && (i < 3));
1180 	} else
1181 		rv = mfii_read_wor(sc, r);
1182 
1183 	return rv;
1184 }
1185 
1186 static void
1187 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1188 {
1189 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1190 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1191 	    BUS_SPACE_BARRIER_WRITE);
1192 }
1193 
1194 static struct mfii_dmamem *
1195 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1196 {
1197 	struct mfii_dmamem *m;
1198 	int nsegs;
1199 
1200 	m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1201 	m->mdm_size = size;
1202 
1203 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1204 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1205 		goto mdmfree;
1206 
1207 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1208 	    &nsegs, BUS_DMA_NOWAIT) != 0)
1209 		goto destroy;
1210 
1211 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1212 	    BUS_DMA_NOWAIT) != 0)
1213 		goto free;
1214 
1215 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1216 	    BUS_DMA_NOWAIT) != 0)
1217 		goto unmap;
1218 
1219 	memset(m->mdm_kva, 0, size);
1220 	return (m);
1221 
1222 unmap:
1223 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1224 free:
1225 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1226 destroy:
1227 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1228 mdmfree:
1229 	free(m, M_DEVBUF);
1230 
1231 	return (NULL);
1232 }
1233 
1234 static void
1235 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1236 {
1237 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1238 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1239 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1240 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1241 	free(m, M_DEVBUF);
1242 }
1243 
1244 static void
1245 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1246 {
1247 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1248 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1249 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1250 
1251 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1252 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1253 	io->chain_offset = io->sgl_offset0 / 4;
1254 
1255 	sge->sg_addr = htole64(ccb->ccb_sense_dva);
1256 	sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1257 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1258 
1259 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1260 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1261 
1262 	mfii_start(sc, ccb);
1263 }
1264 
1265 static int
1266 mfii_aen_register(struct mfii_softc *sc)
1267 {
1268 	struct mfi_evt_log_info mel;
1269 	struct mfii_ccb *ccb;
1270 	struct mfii_dmamem *mdm;
1271 	int rv;
1272 
1273 	ccb = mfii_get_ccb(sc);
1274 	if (ccb == NULL) {
1275 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1276 		return (ENOMEM);
1277 	}
1278 
1279 	memset(&mel, 0, sizeof(mel));
1280 	mfii_scrub_ccb(ccb);
1281 
1282 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1283 	    &mel, sizeof(mel), MFII_DATA_IN, true);
1284 	if (rv != 0) {
1285 		mfii_put_ccb(sc, ccb);
1286 		aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1287 		return (EIO);
1288 	}
1289 
1290 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1291 	if (mdm == NULL) {
1292 		mfii_put_ccb(sc, ccb);
1293 		aprint_error_dev(sc->sc_dev,
1294 		    "unable to allocate event data\n");
1295 		return (ENOMEM);
1296 	}
1297 
1298 	/* replay all the events from boot */
1299 	mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1300 
1301 	return (0);
1302 }
1303 
1304 static void
1305 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1306     struct mfii_dmamem *mdm, uint32_t seq)
1307 {
1308 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1309 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1310 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1311 	union mfi_evt_class_locale mec;
1312 
1313 	mfii_scrub_ccb(ccb);
1314 	mfii_dcmd_scrub(ccb);
1315 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1316 
1317 	ccb->ccb_cookie = mdm;
1318 	ccb->ccb_done = mfii_aen_done;
1319 	sc->sc_aen_ccb = ccb;
1320 
1321 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1322 	mec.mec_members.reserved = 0;
1323 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1324 
1325 	hdr->mfh_cmd = MFI_CMD_DCMD;
1326 	hdr->mfh_sg_count = 1;
1327 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1328 	hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1329 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1330 	dcmd->mdf_mbox.w[0] = htole32(seq);
1331 	dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1332 	sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1333 	sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1334 
1335 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1336 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1337 
1338 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1339 	mfii_dcmd_start(sc, ccb);
1340 }
1341 
1342 static void
1343 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1344 {
1345 	KASSERT(sc->sc_aen_ccb == ccb);
1346 
1347 	/*
1348 	 * defer to a thread with KERNEL_LOCK so we can run autoconf
1349 	 * We shouldn't have more than one AEN command pending at a time,
1350 	 * so no need to lock
1351 	 */
1352 	if (sc->sc_running)
1353 		workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1354 }
1355 
1356 static void
1357 mfii_aen(struct work *wk, void *arg)
1358 {
1359 	struct mfii_softc *sc = arg;
1360 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1361 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1362 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1363 
1364 	mfii_dcmd_sync(sc, ccb,
1365 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1366 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1367 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1368 
1369 	DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1370 	    le32toh(med->med_seq_num), le32toh(med->med_code),
1371 	    med->med_arg_type, med->med_description);
1372 
1373 	switch (le32toh(med->med_code)) {
1374 	case MR_EVT_PD_INSERTED_EXT:
1375 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1376 			break;
1377 
1378 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1379 		break;
1380 	case MR_EVT_PD_REMOVED_EXT:
1381 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1382 			break;
1383 
1384 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1385 		break;
1386 
1387 	case MR_EVT_PD_STATE_CHANGE:
1388 		if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1389 			break;
1390 
1391 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1392 		break;
1393 
1394 	case MR_EVT_LD_CREATED:
1395 	case MR_EVT_LD_DELETED:
1396 		mfii_aen_ld_update(sc);
1397 		break;
1398 
1399 	default:
1400 		break;
1401 	}
1402 
1403 	mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1404 }
1405 
1406 static void
1407 mfii_aen_pd_insert(struct mfii_softc *sc,
1408     const struct mfi_evtarg_pd_address *pd)
1409 {
1410 	printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1411 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1412 }
1413 
1414 static void
1415 mfii_aen_pd_remove(struct mfii_softc *sc,
1416     const struct mfi_evtarg_pd_address *pd)
1417 {
1418 	printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1419 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1420 }
1421 
1422 static void
1423 mfii_aen_pd_state_change(struct mfii_softc *sc,
1424     const struct mfi_evtarg_pd_state *state)
1425 {
1426 	return;
1427 }
1428 
1429 static void
1430 mfii_aen_ld_update(struct mfii_softc *sc)
1431 {
1432 	union mfi_mbox mbox;
1433 	int i, target, old, nld;
1434 	int newlds[MFII_MAX_LD_EXT];
1435 
1436 	memset(&mbox, 0, sizeof(mbox));
1437 	if (sc->sc_max256vd)
1438 		mbox.b[0] = 1;
1439 	mutex_enter(&sc->sc_lock);
1440 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
1441 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1442 		mutex_exit(&sc->sc_lock);
1443 		DNPRINTF(MFII_D_MISC,
1444 		    "%s: getting list of logical disks failed\n", DEVNAME(sc));
1445 		return;
1446 	}
1447 	mutex_exit(&sc->sc_lock);
1448 
1449 	memset(newlds, -1, sizeof(newlds));
1450 
1451 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1452 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1453 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1454 		    DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1455 		newlds[target] = i;
1456 		sc->sc_ld[i].ld_target_id = target;
1457 	}
1458 
1459 	for (i = 0; i < MFII_MAX_LD_EXT; i++) {
1460 		old = sc->sc_target_lds[i];
1461 		nld = newlds[i];
1462 
1463 		if (old == -1 && nld != -1) {
1464 			printf("%s: logical drive %d added (target %d)\n",
1465 			    DEVNAME(sc), i, nld);
1466 			sc->sc_ld[i].ld_present = 1;
1467 
1468 			// XXX scsi_probe_target(sc->sc_scsibus, i);
1469 
1470 			mfii_init_ld_sensor(sc, &sc->sc_ld_sensors[i], i);
1471 			mfii_attach_sensor(sc, &sc->sc_ld_sensors[i]);
1472 		} else if (nld == -1 && old != -1) {
1473 			printf("%s: logical drive %d removed (target %d)\n",
1474 			    DEVNAME(sc), i, old);
1475 			sc->sc_ld[i].ld_present = 0;
1476 
1477 			scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1478 			sysmon_envsys_sensor_detach(sc->sc_sme,
1479 			    &sc->sc_ld_sensors[i]);
1480 		}
1481 	}
1482 
1483 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1484 }
1485 
1486 static void
1487 mfii_aen_unregister(struct mfii_softc *sc)
1488 {
1489 	/* XXX */
1490 }
1491 
1492 static int
1493 mfii_transition_firmware(struct mfii_softc *sc)
1494 {
1495 	int32_t			fw_state, cur_state;
1496 	int			max_wait, i;
1497 
1498 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1499 
1500 	while (fw_state != MFI_STATE_READY) {
1501 		cur_state = fw_state;
1502 		switch (fw_state) {
1503 		case MFI_STATE_FAULT:
1504 			printf("%s: firmware fault\n", DEVNAME(sc));
1505 			return (1);
1506 		case MFI_STATE_WAIT_HANDSHAKE:
1507 			mfii_write(sc, MFI_SKINNY_IDB,
1508 			    MFI_INIT_CLEAR_HANDSHAKE);
1509 			max_wait = 2;
1510 			break;
1511 		case MFI_STATE_OPERATIONAL:
1512 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1513 			max_wait = 10;
1514 			break;
1515 		case MFI_STATE_UNDEFINED:
1516 		case MFI_STATE_BB_INIT:
1517 			max_wait = 2;
1518 			break;
1519 		case MFI_STATE_FW_INIT:
1520 		case MFI_STATE_DEVICE_SCAN:
1521 		case MFI_STATE_FLUSH_CACHE:
1522 			max_wait = 20;
1523 			break;
1524 		default:
1525 			printf("%s: unknown firmware state %d\n",
1526 			    DEVNAME(sc), fw_state);
1527 			return (1);
1528 		}
1529 		for (i = 0; i < (max_wait * 10); i++) {
1530 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1531 			if (fw_state == cur_state)
1532 				DELAY(100000);
1533 			else
1534 				break;
1535 		}
1536 		if (fw_state == cur_state) {
1537 			printf("%s: firmware stuck in state %#x\n",
1538 			    DEVNAME(sc), fw_state);
1539 			return (1);
1540 		}
1541 	}
1542 
1543 	return (0);
1544 }
1545 
1546 static int
1547 mfii_get_info(struct mfii_softc *sc)
1548 {
1549 	int i, rv;
1550 
1551 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1552 	    sizeof(sc->sc_info), MFII_DATA_IN, true);
1553 
1554 	if (rv != 0)
1555 		return (rv);
1556 
1557 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1558 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1559 		    DEVNAME(sc),
1560 		    sc->sc_info.mci_image_component[i].mic_name,
1561 		    sc->sc_info.mci_image_component[i].mic_version,
1562 		    sc->sc_info.mci_image_component[i].mic_build_date,
1563 		    sc->sc_info.mci_image_component[i].mic_build_time);
1564 	}
1565 
1566 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1567 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1568 		    DEVNAME(sc),
1569 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1570 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1571 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1572 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1573 	}
1574 
1575 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1576 	    DEVNAME(sc),
1577 	    sc->sc_info.mci_max_arms,
1578 	    sc->sc_info.mci_max_spans,
1579 	    sc->sc_info.mci_max_arrays,
1580 	    sc->sc_info.mci_max_lds,
1581 	    sc->sc_info.mci_product_name);
1582 
1583 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1584 	    DEVNAME(sc),
1585 	    sc->sc_info.mci_serial_number,
1586 	    sc->sc_info.mci_hw_present,
1587 	    sc->sc_info.mci_current_fw_time,
1588 	    sc->sc_info.mci_max_cmds,
1589 	    sc->sc_info.mci_max_sg_elements);
1590 
1591 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1592 	    DEVNAME(sc),
1593 	    sc->sc_info.mci_max_request_size,
1594 	    sc->sc_info.mci_lds_present,
1595 	    sc->sc_info.mci_lds_degraded,
1596 	    sc->sc_info.mci_lds_offline,
1597 	    sc->sc_info.mci_pd_present);
1598 
1599 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1600 	    DEVNAME(sc),
1601 	    sc->sc_info.mci_pd_disks_present,
1602 	    sc->sc_info.mci_pd_disks_pred_failure,
1603 	    sc->sc_info.mci_pd_disks_failed);
1604 
1605 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1606 	    DEVNAME(sc),
1607 	    sc->sc_info.mci_nvram_size,
1608 	    sc->sc_info.mci_memory_size,
1609 	    sc->sc_info.mci_flash_size);
1610 
1611 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1612 	    DEVNAME(sc),
1613 	    sc->sc_info.mci_ram_correctable_errors,
1614 	    sc->sc_info.mci_ram_uncorrectable_errors,
1615 	    sc->sc_info.mci_cluster_allowed,
1616 	    sc->sc_info.mci_cluster_active);
1617 
1618 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1619 	    DEVNAME(sc),
1620 	    sc->sc_info.mci_max_strips_per_io,
1621 	    sc->sc_info.mci_raid_levels,
1622 	    sc->sc_info.mci_adapter_ops,
1623 	    sc->sc_info.mci_ld_ops);
1624 
1625 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1626 	    DEVNAME(sc),
1627 	    sc->sc_info.mci_stripe_sz_ops.min,
1628 	    sc->sc_info.mci_stripe_sz_ops.max,
1629 	    sc->sc_info.mci_pd_ops,
1630 	    sc->sc_info.mci_pd_mix_support);
1631 
1632 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1633 	    DEVNAME(sc),
1634 	    sc->sc_info.mci_ecc_bucket_count,
1635 	    sc->sc_info.mci_package_version);
1636 
1637 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1638 	    DEVNAME(sc),
1639 	    sc->sc_info.mci_properties.mcp_seq_num,
1640 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1641 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1642 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1643 
1644 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1645 	    DEVNAME(sc),
1646 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1647 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1648 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1649 	    sc->sc_info.mci_properties.mcp_cc_rate);
1650 
1651 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1652 	    DEVNAME(sc),
1653 	    sc->sc_info.mci_properties.mcp_recon_rate,
1654 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1655 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1656 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1657 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1658 
1659 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1660 	    DEVNAME(sc),
1661 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1662 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1663 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1664 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1665 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1666 
1667 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1668 	    DEVNAME(sc),
1669 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1670 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1671 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1672 
1673 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1674 	    DEVNAME(sc),
1675 	    sc->sc_info.mci_pci.mip_vendor,
1676 	    sc->sc_info.mci_pci.mip_device,
1677 	    sc->sc_info.mci_pci.mip_subvendor,
1678 	    sc->sc_info.mci_pci.mip_subdevice);
1679 
1680 	DPRINTF("%s: type %#x port_count %d port_addr ",
1681 	    DEVNAME(sc),
1682 	    sc->sc_info.mci_host.mih_type,
1683 	    sc->sc_info.mci_host.mih_port_count);
1684 
1685 	for (i = 0; i < 8; i++)
1686 		DPRINTF("%.0" PRIx64 " ",
1687 		    sc->sc_info.mci_host.mih_port_addr[i]);
1688 	DPRINTF("\n");
1689 
1690 	DPRINTF("%s: type %.x port_count %d port_addr ",
1691 	    DEVNAME(sc),
1692 	    sc->sc_info.mci_device.mid_type,
1693 	    sc->sc_info.mci_device.mid_port_count);
1694 
1695 	for (i = 0; i < 8; i++)
1696 		DPRINTF("%.0" PRIx64 " ",
1697 		    sc->sc_info.mci_device.mid_port_addr[i]);
1698 	DPRINTF("\n");
1699 
1700 	return (0);
1701 }
1702 
1703 static int
1704 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1705 {
1706 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1707 	u_int64_t r;
1708 	int to = 0, rv = 0;
1709 
1710 #ifdef DIAGNOSTIC
1711 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1712 		panic("mfii_mfa_poll called with cookie or done set");
1713 #endif
1714 
1715 	hdr->mfh_context = ccb->ccb_smid;
1716 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1717 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1718 
1719 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1720 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1721 
1722 	/*
1723 	 * Even if the Aero card supports 32bit descriptor, 64bit descriptor
1724 	 * access is required for MFI_CMD_INIT.
1725 	 * Currently, mfii_mfa_poll() is called for MFI_CMD_INIT only.
1726 	 */
1727 	mfii_start64(sc, ccb);
1728 
1729 	for (;;) {
1730 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1731 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1732 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1733 
1734 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1735 			break;
1736 
1737 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1738 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1739 			    ccb->ccb_smid);
1740 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1741 			rv = 1;
1742 			break;
1743 		}
1744 
1745 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1746 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1747 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1748 
1749 		delay(1000);
1750 	}
1751 
1752 	if (ccb->ccb_len > 0) {
1753 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1754 		    0, ccb->ccb_dmamap32->dm_mapsize,
1755 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1756 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1757 
1758 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1759 	}
1760 
1761 	return (rv);
1762 }
1763 
1764 static int
1765 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1766 {
1767 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1768 	void *cookie;
1769 	int rv = 1;
1770 
1771 	done = ccb->ccb_done;
1772 	cookie = ccb->ccb_cookie;
1773 
1774 	ccb->ccb_done = mfii_poll_done;
1775 	ccb->ccb_cookie = &rv;
1776 
1777 	mfii_start(sc, ccb);
1778 
1779 	do {
1780 		delay(10);
1781 		mfii_postq(sc);
1782 	} while (rv == 1);
1783 
1784 	ccb->ccb_cookie = cookie;
1785 	done(sc, ccb);
1786 
1787 	return (0);
1788 }
1789 
1790 static void
1791 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1792 {
1793 	int *rv = ccb->ccb_cookie;
1794 
1795 	*rv = 0;
1796 }
1797 
1798 static int
1799 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1800 {
1801 #ifdef DIAGNOSTIC
1802 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1803 		panic("mfii_exec called with cookie or done set");
1804 #endif
1805 
1806 	ccb->ccb_cookie = ccb;
1807 	ccb->ccb_done = mfii_exec_done;
1808 
1809 	mfii_start(sc, ccb);
1810 
1811 	mutex_enter(&ccb->ccb_mtx);
1812 	while (ccb->ccb_cookie != NULL)
1813 		cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1814 	mutex_exit(&ccb->ccb_mtx);
1815 
1816 	return (0);
1817 }
1818 
1819 static void
1820 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1821 {
1822 	mutex_enter(&ccb->ccb_mtx);
1823 	ccb->ccb_cookie = NULL;
1824 	cv_signal(&ccb->ccb_cv);
1825 	mutex_exit(&ccb->ccb_mtx);
1826 }
1827 
1828 static int
1829 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1830     void *buf, size_t len, mfii_direction_t dir, bool poll)
1831 {
1832 	struct mfii_ccb *ccb;
1833 	int rv;
1834 
1835 	KASSERT(mutex_owned(&sc->sc_lock));
1836 	if (!sc->sc_running)
1837 		return EAGAIN;
1838 
1839 	ccb = mfii_get_ccb(sc);
1840 	if (ccb == NULL)
1841 		return (ENOMEM);
1842 
1843 	mfii_scrub_ccb(ccb);
1844 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1845 	mfii_put_ccb(sc, ccb);
1846 
1847 	return (rv);
1848 }
1849 
1850 static int
1851 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1852     const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1853     bool poll)
1854 {
1855 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1856 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1857 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1858 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1859 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1860 	int rv = EIO;
1861 
1862 	if (cold)
1863 		poll = true;
1864 
1865 	ccb->ccb_data = buf;
1866 	ccb->ccb_len = len;
1867 	ccb->ccb_direction = dir;
1868 	switch (dir) {
1869 	case MFII_DATA_IN:
1870 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1871 		break;
1872 	case MFII_DATA_OUT:
1873 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1874 		break;
1875 	case MFII_DATA_NONE:
1876 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1877 		break;
1878 	}
1879 
1880 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1881 		rv = ENOMEM;
1882 		goto done;
1883 	}
1884 
1885 	hdr->mfh_cmd = MFI_CMD_DCMD;
1886 	hdr->mfh_context = ccb->ccb_smid;
1887 	hdr->mfh_data_len = htole32(len);
1888 	hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1889 	KASSERT(!ccb->ccb_dma64);
1890 
1891 	dcmd->mdf_opcode = opc;
1892 	/* handle special opcodes */
1893 	if (mbox != NULL)
1894 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1895 
1896 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1897 	io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1898 	io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1899 
1900 	sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1901 	sge->sg_len = htole32(MFI_FRAME_SIZE);
1902 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1903 
1904 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1905 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1906 
1907 	if (poll) {
1908 		ccb->ccb_done = mfii_empty_done;
1909 		mfii_poll(sc, ccb);
1910 	} else
1911 		mfii_exec(sc, ccb);
1912 
1913 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1914 		rv = 0;
1915 	}
1916 
1917 done:
1918 	return (rv);
1919 }
1920 
1921 static void
1922 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1923 {
1924 	return;
1925 }
1926 
1927 static int
1928 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1929     void *sglp, int nosleep)
1930 {
1931 	union mfi_sgl *sgl = sglp;
1932 	bus_dmamap_t dmap = ccb->ccb_dmamap32;
1933 	int error;
1934 	int i;
1935 
1936 	KASSERT(!ccb->ccb_dma64);
1937 	if (ccb->ccb_len == 0)
1938 		return (0);
1939 
1940 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1941 	    ccb->ccb_data, ccb->ccb_len, NULL,
1942 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1943 	if (error) {
1944 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1945 		return (1);
1946 	}
1947 
1948 	for (i = 0; i < dmap->dm_nsegs; i++) {
1949 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1950 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1951 	}
1952 
1953 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1954 	    ccb->ccb_direction == MFII_DATA_OUT ?
1955 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1956 
1957 	return (0);
1958 }
1959 
1960 static void
1961 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1962 {
1963 
1964 	mfii_start_common(sc, ccb,
1965 	    ((sc->sc_iop_flag & MFII_IOP_DESC_32BIT) != 0) ? true : false);
1966 }
1967 
1968 static void
1969 mfii_start64(struct mfii_softc *sc, struct mfii_ccb *ccb)
1970 {
1971 
1972 	mfii_start_common(sc, ccb, false);
1973 }
1974 
1975 static void
1976 mfii_start_common(struct mfii_softc *sc, struct mfii_ccb *ccb, bool do32)
1977 {
1978 	uint32_t *r = (uint32_t *)&ccb->ccb_req;
1979 
1980 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1981 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1982 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1983 
1984 	if (do32)
1985 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_ISQP, r[0]);
1986 	else {
1987 #if defined(__LP64__)
1988 		uint64_t buf;
1989 
1990 		buf = ((uint64_t)r[1] << 32) | r[0];
1991 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
1992 #else
1993 		mutex_enter(&sc->sc_post_mtx);
1994 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1995 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1996 		bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1997 		    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1998 		mutex_exit(&sc->sc_post_mtx);
1999 #endif
2000 	}
2001 }
2002 
2003 static void
2004 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2005 {
2006 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2007 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2008 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2009 
2010 	if (ccb->ccb_sgl_len > 0) {
2011 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2012 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2013 		    BUS_DMASYNC_POSTWRITE);
2014 	}
2015 
2016 	if (ccb->ccb_dma64) {
2017 		KASSERT(ccb->ccb_len > 0);
2018 		bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
2019 		    0, ccb->ccb_dmamap64->dm_mapsize,
2020 		    (ccb->ccb_direction == MFII_DATA_IN) ?
2021 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2022 
2023 		bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
2024 	} else if (ccb->ccb_len > 0) {
2025 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
2026 		    0, ccb->ccb_dmamap32->dm_mapsize,
2027 		    (ccb->ccb_direction == MFII_DATA_IN) ?
2028 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2029 
2030 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
2031 	}
2032 
2033 	ccb->ccb_done(sc, ccb);
2034 }
2035 
2036 static int
2037 mfii_initialise_firmware(struct mfii_softc *sc)
2038 {
2039 	struct mpii_msg_iocinit_request *iiq;
2040 	struct mfii_dmamem *m;
2041 	struct mfii_ccb *ccb;
2042 	struct mfi_init_frame *init;
2043 	int rv;
2044 
2045 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2046 	if (m == NULL)
2047 		return (1);
2048 
2049 	iiq = MFII_DMA_KVA(m);
2050 	memset(iiq, 0, sizeof(*iiq));
2051 
2052 	iiq->function = MPII_FUNCTION_IOC_INIT;
2053 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2054 
2055 	iiq->msg_version_maj = 0x02;
2056 	iiq->msg_version_min = 0x00;
2057 	iiq->hdr_version_unit = 0x10;
2058 	iiq->hdr_version_dev = 0x0;
2059 
2060 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2061 
2062 	iiq->reply_descriptor_post_queue_depth =
2063 	    htole16(sc->sc_reply_postq_depth);
2064 	iiq->reply_free_queue_depth = htole16(0);
2065 
2066 	iiq->sense_buffer_address_high = htole32(
2067 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
2068 
2069 	iiq->reply_descriptor_post_queue_address_lo =
2070 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq));
2071 	iiq->reply_descriptor_post_queue_address_hi =
2072 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2073 
2074 	iiq->system_request_frame_base_address_lo =
2075 	    htole32(MFII_DMA_DVA(sc->sc_requests));
2076 	iiq->system_request_frame_base_address_hi =
2077 	    htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
2078 
2079 	iiq->timestamp = htole64(time_uptime);
2080 
2081 	ccb = mfii_get_ccb(sc);
2082 	if (ccb == NULL) {
2083 		/* shouldn't ever run out of ccbs during attach */
2084 		return (1);
2085 	}
2086 	mfii_scrub_ccb(ccb);
2087 	init = ccb->ccb_request;
2088 
2089 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
2090 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2091 	init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
2092 	init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
2093 
2094 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2095 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2096 	    BUS_DMASYNC_PREREAD);
2097 
2098 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2099 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2100 
2101 	rv = mfii_mfa_poll(sc, ccb);
2102 
2103 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2104 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2105 
2106 	mfii_put_ccb(sc, ccb);
2107 	mfii_dmamem_free(sc, m);
2108 
2109 	return (rv);
2110 }
2111 
2112 static int
2113 mfii_my_intr(struct mfii_softc *sc)
2114 {
2115 	u_int32_t status;
2116 
2117 	status = mfii_read(sc, MFI_OSTS);
2118 
2119 	DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2120 	if (ISSET(status, 0x1)) {
2121 		mfii_write(sc, MFI_OSTS, status);
2122 		return (1);
2123 	}
2124 
2125 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2126 }
2127 
2128 static int
2129 mfii_intr(void *arg)
2130 {
2131 	struct mfii_softc *sc = arg;
2132 
2133 	if (!mfii_my_intr(sc))
2134 		return (0);
2135 
2136 	mfii_postq(sc);
2137 
2138 	return (1);
2139 }
2140 
2141 static void
2142 mfii_postq(struct mfii_softc *sc)
2143 {
2144 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2145 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2146 	struct mpii_reply_descr *rdp;
2147 	struct mfii_ccb *ccb;
2148 	int rpi = 0;
2149 
2150 	mutex_enter(&sc->sc_reply_postq_mtx);
2151 
2152 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2153 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2154 	    BUS_DMASYNC_POSTREAD);
2155 
2156 	for (;;) {
2157 		rdp = &postq[sc->sc_reply_postq_index];
2158 		DNPRINTF(MFII_D_INTR,
2159 		    "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2160 		    DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2161 			rdp->data == 0xffffffff);
2162 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2163 		    MPII_REPLY_DESCR_UNUSED)
2164 			break;
2165 		if (rdp->data == 0xffffffff) {
2166 			/*
2167 			 * ioc is still writing to the reply post queue
2168 			 * race condition - bail!
2169 			 */
2170 			break;
2171 		}
2172 
2173 		ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2174 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2175 		memset(rdp, 0xff, sizeof(*rdp));
2176 
2177 		sc->sc_reply_postq_index++;
2178 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2179 		rpi = 1;
2180 	}
2181 
2182 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2183 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2184 	    BUS_DMASYNC_PREREAD);
2185 
2186 	if (rpi)
2187 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2188 
2189 	mutex_exit(&sc->sc_reply_postq_mtx);
2190 
2191 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2192 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2193 		mfii_done(sc, ccb);
2194 	}
2195 }
2196 
2197 static void
2198 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2199     void *arg)
2200 {
2201 	struct scsipi_periph    *periph;
2202 	struct scsipi_xfer	*xs;
2203 	struct scsipi_adapter   *adapt = chan->chan_adapter;
2204 	struct mfii_softc	*sc = device_private(adapt->adapt_dev);
2205 	struct mfii_ccb *ccb;
2206 	int timeout;
2207 	int target;
2208 
2209 	switch (req) {
2210 		case ADAPTER_REQ_GROW_RESOURCES:
2211 		/* Not supported. */
2212 		return;
2213 	case ADAPTER_REQ_SET_XFER_MODE:
2214 	{
2215 		struct scsipi_xfer_mode *xm = arg;
2216 		xm->xm_mode = PERIPH_CAP_TQING;
2217 		xm->xm_period = 0;
2218 		xm->xm_offset = 0;
2219 		scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2220 		return;
2221 	}
2222 	case ADAPTER_REQ_RUN_XFER:
2223 		break;
2224 	}
2225 
2226 	xs = arg;
2227 	periph = xs->xs_periph;
2228 	target = periph->periph_target;
2229 
2230 	if (target >= MFII_MAX_LD_EXT || !sc->sc_ld[target].ld_present ||
2231 	    periph->periph_lun != 0) {
2232 		xs->error = XS_SELTIMEOUT;
2233 		scsipi_done(xs);
2234 		return;
2235 	}
2236 
2237 	if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2238 	    xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2239 		/* the cache is stable storage, don't flush */
2240 		xs->error = XS_NOERROR;
2241 		xs->status = SCSI_OK;
2242 		xs->resid = 0;
2243 		scsipi_done(xs);
2244 		return;
2245 	}
2246 
2247 	ccb = mfii_get_ccb(sc);
2248 	if (ccb == NULL) {
2249 		xs->error = XS_RESOURCE_SHORTAGE;
2250 		scsipi_done(xs);
2251 		return;
2252 	}
2253 	mfii_scrub_ccb(ccb);
2254 	ccb->ccb_cookie = xs;
2255 	ccb->ccb_done = mfii_scsi_cmd_done;
2256 	ccb->ccb_data = xs->data;
2257 	ccb->ccb_len = xs->datalen;
2258 
2259 	timeout = mstohz(xs->timeout);
2260 	if (timeout == 0)
2261 		timeout = 1;
2262 	callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2263 
2264 	switch (xs->cmd->opcode) {
2265 	case SCSI_READ_6_COMMAND:
2266 	case READ_10:
2267 	case READ_12:
2268 	case READ_16:
2269 	case SCSI_WRITE_6_COMMAND:
2270 	case WRITE_10:
2271 	case WRITE_12:
2272 	case WRITE_16:
2273 		if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2274 			goto stuffup;
2275 		break;
2276 
2277 	default:
2278 		if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2279 			goto stuffup;
2280 		break;
2281 	}
2282 
2283 	xs->error = XS_NOERROR;
2284 	xs->resid = 0;
2285 
2286 	DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2287 	    xs->cmd->opcode);
2288 
2289 	if (xs->xs_control & XS_CTL_POLL) {
2290 		if (mfii_poll(sc, ccb) != 0)
2291 			goto stuffup;
2292 		return;
2293 	}
2294 
2295 	mfii_start(sc, ccb);
2296 
2297 	return;
2298 
2299 stuffup:
2300 	xs->error = XS_DRIVER_STUFFUP;
2301 	scsipi_done(xs);
2302 	mfii_put_ccb(sc, ccb);
2303 }
2304 
2305 static void
2306 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2307 {
2308 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2309 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2310 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2311 
2312 	if (callout_stop(&xs->xs_callout) != 0)
2313 		return;
2314 
2315 	switch (ctx->status) {
2316 	case MFI_STAT_OK:
2317 		break;
2318 
2319 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2320 		xs->error = XS_SENSE;
2321 		memset(&xs->sense, 0, sizeof(xs->sense));
2322 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2323 		break;
2324 
2325 	case MFI_STAT_LD_OFFLINE:
2326 	case MFI_STAT_DEVICE_NOT_FOUND:
2327 		xs->error = XS_SELTIMEOUT;
2328 		break;
2329 
2330 	default:
2331 		xs->error = XS_DRIVER_STUFFUP;
2332 		break;
2333 	}
2334 
2335 	scsipi_done(xs);
2336 	mfii_put_ccb(sc, ccb);
2337 }
2338 
2339 static int
2340 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2341     struct scsipi_xfer *xs)
2342 {
2343 	struct scsipi_periph *periph = xs->xs_periph;
2344 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2345 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2346 	int segs, target;
2347 
2348 	target = sc->sc_ld[periph->periph_target].ld_target_id;
2349 	io->dev_handle = htole16(target);
2350 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2351 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2352 	io->sgl_flags = htole16(0x02); /* XXX */
2353 	io->sense_buffer_length = sizeof(xs->sense);
2354 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2355 	io->data_length = htole32(xs->datalen);
2356 	io->io_flags = htole16(xs->cmdlen);
2357 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2358 	case XS_CTL_DATA_IN:
2359 		ccb->ccb_direction = MFII_DATA_IN;
2360 		io->direction = MPII_SCSIIO_DIR_READ;
2361 		break;
2362 	case XS_CTL_DATA_OUT:
2363 		ccb->ccb_direction = MFII_DATA_OUT;
2364 		io->direction = MPII_SCSIIO_DIR_WRITE;
2365 		break;
2366 	default:
2367 		ccb->ccb_direction = MFII_DATA_NONE;
2368 		io->direction = MPII_SCSIIO_DIR_NONE;
2369 		break;
2370 	}
2371 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2372 
2373 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2374 	ctx->timeout_value = htole16(0x14); /* XXX */
2375 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2376 	ctx->virtual_disk_target_id = htole16(target);
2377 
2378 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2379 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2380 		return (1);
2381 
2382 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2383 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2384 	switch (sc->sc_iop->num_sge_loc) {
2385 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2386 		ctx->num_sge = segs;
2387 		break;
2388 	case MFII_IOP_NUM_SGE_LOC_35:
2389 		/* 12 bit field, but we're only using the lower 8 */
2390 		ctx->span_arm = segs;
2391 		break;
2392 	}
2393 
2394 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2395 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2396 
2397 	return (0);
2398 }
2399 
2400 static int
2401 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2402     struct scsipi_xfer *xs)
2403 {
2404 	struct scsipi_periph *periph = xs->xs_periph;
2405 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2406 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2407 	int target;
2408 
2409 	target = sc->sc_ld[periph->periph_target].ld_target_id;
2410 	io->dev_handle = htole16(target);
2411 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2412 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2413 	io->sgl_flags = htole16(0x02); /* XXX */
2414 	io->sense_buffer_length = sizeof(xs->sense);
2415 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2416 	io->data_length = htole32(xs->datalen);
2417 	io->io_flags = htole16(xs->cmdlen);
2418 	io->lun[0] = htobe16(periph->periph_lun);
2419 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2420 	case XS_CTL_DATA_IN:
2421 		ccb->ccb_direction = MFII_DATA_IN;
2422 		io->direction = MPII_SCSIIO_DIR_READ;
2423 		break;
2424 	case XS_CTL_DATA_OUT:
2425 		ccb->ccb_direction = MFII_DATA_OUT;
2426 		io->direction = MPII_SCSIIO_DIR_WRITE;
2427 		break;
2428 	default:
2429 		ccb->ccb_direction = MFII_DATA_NONE;
2430 		io->direction = MPII_SCSIIO_DIR_NONE;
2431 		break;
2432 	}
2433 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2434 
2435 	ctx->virtual_disk_target_id = htole16(target);
2436 
2437 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2438 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2439 		return (1);
2440 
2441 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2442 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2443 
2444 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2445 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2446 
2447 	return (0);
2448 }
2449 
2450 #if 0
2451 void
2452 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2453 {
2454 	struct scsi_link *link = xs->sc_link;
2455 	struct mfii_softc *sc = link->adapter_softc;
2456 	struct mfii_ccb *ccb = xs->io;
2457 
2458 	mfii_scrub_ccb(ccb);
2459 	ccb->ccb_cookie = xs;
2460 	ccb->ccb_done = mfii_scsi_cmd_done;
2461 	ccb->ccb_data = xs->data;
2462 	ccb->ccb_len = xs->datalen;
2463 
2464 	// XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2465 
2466 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2467 	if (xs->error != XS_NOERROR)
2468 		goto done;
2469 
2470 	xs->resid = 0;
2471 
2472 	if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2473 		if (mfii_poll(sc, ccb) != 0)
2474 			goto stuffup;
2475 		return;
2476 	}
2477 
2478 	// XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2479 	mfii_start(sc, ccb);
2480 
2481 	return;
2482 
2483 stuffup:
2484 	xs->error = XS_DRIVER_STUFFUP;
2485 done:
2486 	scsi_done(xs);
2487 }
2488 
2489 int
2490 mfii_pd_scsi_probe(struct scsi_link *link)
2491 {
2492 	struct mfii_softc *sc = link->adapter_softc;
2493 	struct mfi_pd_details mpd;
2494 	union mfi_mbox mbox;
2495 	int rv;
2496 
2497 	if (link->lun > 0)
2498 		return (0);
2499 
2500 	memset(&mbox, 0, sizeof(mbox));
2501 	mbox.s[0] = htole16(link->target);
2502 
2503 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2504 	    MFII_DATA_IN, true);
2505 	if (rv != 0)
2506 		return (EIO);
2507 
2508 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2509 		return (ENXIO);
2510 
2511 	return (0);
2512 }
2513 
2514 int
2515 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2516     struct scsipi_xfer *xs)
2517 {
2518 	struct scsi_link *link = xs->sc_link;
2519 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2520 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2521 	uint16_t dev_handle;
2522 
2523 	dev_handle = mfii_dev_handle(sc, link->target);
2524 	if (dev_handle == htole16(0xffff))
2525 		return (XS_SELTIMEOUT);
2526 
2527 	io->dev_handle = dev_handle;
2528 	io->function = 0;
2529 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2530 	io->sgl_flags = htole16(0x02); /* XXX */
2531 	io->sense_buffer_length = sizeof(xs->sense);
2532 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2533 	io->data_length = htole32(xs->datalen);
2534 	io->io_flags = htole16(xs->cmdlen);
2535 	io->lun[0] = htobe16(link->lun);
2536 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2537 	case XS_CTL_DATA_IN:
2538 		ccb->ccb_direction = MFII_DATA_IN;
2539 		io->direction = MPII_SCSIIO_DIR_READ;
2540 		break;
2541 	case XS_CTL_DATA_OUT:
2542 		ccb->ccb_direction = MFII_DATA_OUT;
2543 		io->direction = MPII_SCSIIO_DIR_WRITE;
2544 		break;
2545 	default:
2546 		ccb->ccb_direction = MFII_DATA_NONE;
2547 		io->direction = MPII_SCSIIO_DIR_NONE;
2548 		break;
2549 	}
2550 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2551 
2552 	ctx->virtual_disk_target_id = htole16(link->target);
2553 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2554 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2555 
2556 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2557 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2558 		return (XS_DRIVER_STUFFUP);
2559 
2560 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2561 	KASSERT(ccb->ccb_dma64);
2562 
2563 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2564 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2565 	ccb->ccb_req.dev_handle = dev_handle;
2566 
2567 	return (XS_NOERROR);
2568 }
2569 #endif
2570 
2571 static int
2572 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2573     int nosleep)
2574 {
2575 	struct mpii_msg_request *req = ccb->ccb_request;
2576 	struct mfii_sge *sge = NULL, *nsge = sglp;
2577 	struct mfii_sge *ce = NULL;
2578 	bus_dmamap_t dmap = ccb->ccb_dmamap64;
2579 	u_int space;
2580 	int i;
2581 
2582 	int error;
2583 
2584 	if (ccb->ccb_len == 0)
2585 		return (0);
2586 
2587 	ccb->ccb_dma64 = true;
2588 	error = bus_dmamap_load(sc->sc_dmat64, dmap,
2589 	    ccb->ccb_data, ccb->ccb_len, NULL,
2590 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2591 	if (error) {
2592 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2593 		return (1);
2594 	}
2595 
2596 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2597 	    sizeof(*nsge);
2598 	if (dmap->dm_nsegs > space) {
2599 		space--;
2600 
2601 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2602 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2603 
2604 		ce = nsge + space;
2605 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2606 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2607 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2608 
2609 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2610 	}
2611 
2612 	for (i = 0; i < dmap->dm_nsegs; i++) {
2613 		if (nsge == ce)
2614 			nsge = ccb->ccb_sgl;
2615 
2616 		sge = nsge;
2617 
2618 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2619 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2620 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2621 
2622 		nsge = sge + 1;
2623 	}
2624 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2625 
2626 	bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2627 	    ccb->ccb_direction == MFII_DATA_OUT ?
2628 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2629 
2630 	if (ccb->ccb_sgl_len > 0) {
2631 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2632 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2633 		    BUS_DMASYNC_PREWRITE);
2634 	}
2635 
2636 	return (0);
2637 }
2638 
2639 static void
2640 mfii_scsi_cmd_tmo(void *p)
2641 {
2642 	struct mfii_ccb *ccb = p;
2643 	struct mfii_softc *sc = ccb->ccb_sc;
2644 	bool start_abort;
2645 
2646 	printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2647 
2648 	mutex_enter(&sc->sc_abort_mtx);
2649 	start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2650 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2651 	if (start_abort)
2652 		workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2653 	mutex_exit(&sc->sc_abort_mtx);
2654 }
2655 
2656 static void
2657 mfii_abort_task(struct work *wk, void *scp)
2658 {
2659 	struct mfii_softc *sc = scp;
2660 	struct mfii_ccb *list;
2661 
2662 	mutex_enter(&sc->sc_abort_mtx);
2663 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2664 	SIMPLEQ_INIT(&sc->sc_abort_list);
2665 	mutex_exit(&sc->sc_abort_mtx);
2666 
2667 	while (list != NULL) {
2668 		struct mfii_ccb *ccb = list;
2669 		struct scsipi_xfer *xs = ccb->ccb_cookie;
2670 		struct scsipi_periph *periph = xs->xs_periph;
2671 		struct mfii_ccb *accb;
2672 
2673 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2674 
2675 		if (!sc->sc_ld[periph->periph_target].ld_present) {
2676 			/* device is gone */
2677 			xs->error = XS_SELTIMEOUT;
2678 			scsipi_done(xs);
2679 			mfii_put_ccb(sc, ccb);
2680 			continue;
2681 		}
2682 
2683 		accb = mfii_get_ccb(sc);
2684 		mfii_scrub_ccb(accb);
2685 		mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2686 		    MPII_SCSI_TASK_ABORT_TASK,
2687 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2688 
2689 		accb->ccb_cookie = ccb;
2690 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2691 
2692 		mfii_start(sc, accb);
2693 	}
2694 }
2695 
2696 static void
2697 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2698     uint16_t smid, uint8_t type, uint32_t flags)
2699 {
2700 	struct mfii_task_mgmt *msg;
2701 	struct mpii_msg_scsi_task_request *req;
2702 
2703 	msg = accb->ccb_request;
2704 	req = &msg->mpii_request;
2705 	req->dev_handle = dev_handle;
2706 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2707 	req->task_type = type;
2708 	req->task_mid = htole16( smid);
2709 	msg->flags = flags;
2710 
2711 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2712 	accb->ccb_req.smid = le16toh(accb->ccb_smid);
2713 }
2714 
2715 static void
2716 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2717 {
2718 	struct mfii_ccb *ccb = accb->ccb_cookie;
2719 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2720 
2721 	/* XXX check accb completion? */
2722 
2723 	mfii_put_ccb(sc, accb);
2724 	printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2725 
2726 	xs->error = XS_TIMEOUT;
2727 	scsipi_done(xs);
2728 	mfii_put_ccb(sc, ccb);
2729 }
2730 
2731 static struct mfii_ccb *
2732 mfii_get_ccb(struct mfii_softc *sc)
2733 {
2734 	struct mfii_ccb *ccb;
2735 
2736 	mutex_enter(&sc->sc_ccb_mtx);
2737 	if (!sc->sc_running) {
2738 		ccb = NULL;
2739 	} else {
2740 		ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2741 		if (ccb != NULL)
2742 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2743 	}
2744 	mutex_exit(&sc->sc_ccb_mtx);
2745 	return (ccb);
2746 }
2747 
2748 static void
2749 mfii_scrub_ccb(struct mfii_ccb *ccb)
2750 {
2751 	ccb->ccb_cookie = NULL;
2752 	ccb->ccb_done = NULL;
2753 	ccb->ccb_flags = 0;
2754 	ccb->ccb_data = NULL;
2755 	ccb->ccb_direction = MFII_DATA_NONE;
2756 	ccb->ccb_dma64 = false;
2757 	ccb->ccb_len = 0;
2758 	ccb->ccb_sgl_len = 0;
2759 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2760 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2761 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2762 }
2763 
2764 static void
2765 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2766 {
2767 	mutex_enter(&sc->sc_ccb_mtx);
2768 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2769 	mutex_exit(&sc->sc_ccb_mtx);
2770 }
2771 
2772 static int
2773 mfii_init_ccb(struct mfii_softc *sc)
2774 {
2775 	struct mfii_ccb *ccb;
2776 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2777 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2778 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2779 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2780 	u_int i;
2781 	int error;
2782 
2783 	sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2784 	    M_DEVBUF, M_WAITOK|M_ZERO);
2785 
2786 	for (i = 0; i < sc->sc_max_cmds; i++) {
2787 		ccb = &sc->sc_ccb[i];
2788 		ccb->ccb_sc = sc;
2789 
2790 		/* create a dma map for transfer */
2791 		error = bus_dmamap_create(sc->sc_dmat,
2792 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2793 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2794 		if (error) {
2795 			printf("%s: cannot create ccb dmamap32 (%d)\n",
2796 			    DEVNAME(sc), error);
2797 			goto destroy;
2798 		}
2799 		error = bus_dmamap_create(sc->sc_dmat64,
2800 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2801 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2802 		if (error) {
2803 			printf("%s: cannot create ccb dmamap64 (%d)\n",
2804 			    DEVNAME(sc), error);
2805 			goto destroy32;
2806 		}
2807 
2808 		/* select i + 1'th request. 0 is reserved for events */
2809 		ccb->ccb_smid = i + 1;
2810 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2811 		ccb->ccb_request = request + ccb->ccb_request_offset;
2812 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2813 		    ccb->ccb_request_offset;
2814 
2815 		/* select i'th MFI command frame */
2816 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2817 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2818 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2819 		    ccb->ccb_mfi_offset;
2820 
2821 		/* select i'th sense */
2822 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2823 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2824 		    ccb->ccb_sense_offset);
2825 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2826 		    ccb->ccb_sense_offset;
2827 
2828 		/* select i'th sgl */
2829 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2830 		    sc->sc_max_sgl * i;
2831 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2832 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2833 		    ccb->ccb_sgl_offset;
2834 
2835 		mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2836 		cv_init(&ccb->ccb_cv, "mfiiexec");
2837 
2838 		/* add ccb to queue */
2839 		mfii_put_ccb(sc, ccb);
2840 	}
2841 
2842 	return (0);
2843 
2844 destroy32:
2845 	bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2846 destroy:
2847 	/* free dma maps and ccb memory */
2848 	while ((ccb = mfii_get_ccb(sc)) != NULL) {
2849 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2850 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2851 	}
2852 
2853 	free(sc->sc_ccb, M_DEVBUF);
2854 
2855 	return (1);
2856 }
2857 
2858 #if NBIO > 0
2859 static int
2860 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2861 {
2862 	struct mfii_softc	*sc = device_private(dev);
2863 	int error = 0;
2864 
2865 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2866 
2867 	mutex_enter(&sc->sc_lock);
2868 
2869 	switch (cmd) {
2870 	case BIOCINQ:
2871 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2872 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2873 		break;
2874 
2875 	case BIOCVOL:
2876 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2877 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2878 		break;
2879 
2880 	case BIOCDISK:
2881 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2882 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2883 		break;
2884 
2885 	case BIOCALARM:
2886 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2887 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2888 		break;
2889 
2890 	case BIOCBLINK:
2891 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2892 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2893 		break;
2894 
2895 	case BIOCSETSTATE:
2896 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2897 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2898 		break;
2899 
2900 #if 0
2901 	case BIOCPATROL:
2902 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2903 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2904 		break;
2905 #endif
2906 
2907 	default:
2908 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2909 		error = ENOTTY;
2910 	}
2911 
2912 	mutex_exit(&sc->sc_lock);
2913 
2914 	return (error);
2915 }
2916 
2917 static int
2918 mfii_bio_getitall(struct mfii_softc *sc)
2919 {
2920 	int			i, d, rv = EINVAL;
2921 	size_t			size;
2922 	union mfi_mbox		mbox;
2923 	struct mfi_conf		*cfg = NULL;
2924 	struct mfi_ld_details	*ld_det = NULL;
2925 
2926 	/* get info */
2927 	if (mfii_get_info(sc)) {
2928 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2929 		    DEVNAME(sc));
2930 		goto done;
2931 	}
2932 
2933 	/* send single element command to retrieve size for full structure */
2934 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2935 	if (cfg == NULL)
2936 		goto done;
2937 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2938 	    MFII_DATA_IN, false)) {
2939 		free(cfg, M_DEVBUF);
2940 		goto done;
2941 	}
2942 
2943 	size = cfg->mfc_size;
2944 	free(cfg, M_DEVBUF);
2945 
2946 	/* memory for read config */
2947 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2948 	if (cfg == NULL)
2949 		goto done;
2950 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2951 	    MFII_DATA_IN, false)) {
2952 		free(cfg, M_DEVBUF);
2953 		goto done;
2954 	}
2955 
2956 	/* replace current pointer with new one */
2957 	if (sc->sc_cfg)
2958 		free(sc->sc_cfg, M_DEVBUF);
2959 	sc->sc_cfg = cfg;
2960 
2961 	/* get all ld info */
2962 	memset(&mbox, 0, sizeof(mbox));
2963 	if (sc->sc_max256vd)
2964 		mbox.b[0] = 1;
2965 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
2966 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2967 		goto done;
2968 
2969 	/* get memory for all ld structures */
2970 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2971 	if (sc->sc_ld_sz != size) {
2972 		if (sc->sc_ld_details)
2973 			free(sc->sc_ld_details, M_DEVBUF);
2974 
2975 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2976 		if (ld_det == NULL)
2977 			goto done;
2978 		sc->sc_ld_sz = size;
2979 		sc->sc_ld_details = ld_det;
2980 	}
2981 
2982 	/* find used physical disks */
2983 	size = sizeof(struct mfi_ld_details);
2984 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2985 		memset(&mbox, 0, sizeof(mbox));
2986 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2987 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2988 		    &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2989 			goto done;
2990 
2991 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2992 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2993 	}
2994 	sc->sc_no_pd = d;
2995 
2996 	rv = 0;
2997 done:
2998 	return (rv);
2999 }
3000 
3001 static int
3002 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
3003 {
3004 	int			rv = EINVAL;
3005 	struct mfi_conf		*cfg = NULL;
3006 
3007 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
3008 
3009 	if (mfii_bio_getitall(sc)) {
3010 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3011 		    DEVNAME(sc));
3012 		goto done;
3013 	}
3014 
3015 	/* count unused disks as volumes */
3016 	if (sc->sc_cfg == NULL)
3017 		goto done;
3018 	cfg = sc->sc_cfg;
3019 
3020 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3021 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3022 #if notyet
3023 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3024 	    (bi->bi_nodisk - sc->sc_no_pd);
3025 #endif
3026 	/* tell bio who we are */
3027 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3028 
3029 	rv = 0;
3030 done:
3031 	return (rv);
3032 }
3033 
3034 static int
3035 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3036 {
3037 	int			i, per, rv = EINVAL;
3038 
3039 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3040 	    DEVNAME(sc), bv->bv_volid);
3041 
3042 	/* we really could skip and expect that inq took care of it */
3043 	if (mfii_bio_getitall(sc)) {
3044 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3045 		    DEVNAME(sc));
3046 		goto done;
3047 	}
3048 
3049 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3050 		/* go do hotspares & unused disks */
3051 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3052 		goto done;
3053 	}
3054 
3055 	i = bv->bv_volid;
3056 	strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
3057 	    sizeof(bv->bv_dev));
3058 
3059 	switch (sc->sc_ld_list.mll_list[i].mll_state) {
3060 	case MFI_LD_OFFLINE:
3061 		bv->bv_status = BIOC_SVOFFLINE;
3062 		break;
3063 
3064 	case MFI_LD_PART_DEGRADED:
3065 	case MFI_LD_DEGRADED:
3066 		bv->bv_status = BIOC_SVDEGRADED;
3067 		break;
3068 
3069 	case MFI_LD_ONLINE:
3070 		bv->bv_status = BIOC_SVONLINE;
3071 		break;
3072 
3073 	default:
3074 		bv->bv_status = BIOC_SVINVALID;
3075 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3076 		    DEVNAME(sc),
3077 		    sc->sc_ld_list.mll_list[i].mll_state);
3078 	}
3079 
3080 	/* additional status can modify MFI status */
3081 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3082 	case MFI_LD_PROG_CC:
3083 		bv->bv_status = BIOC_SVSCRUB;
3084 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3085 		bv->bv_percent = (per * 100) / 0xffff;
3086 		bv->bv_seconds =
3087 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3088 		break;
3089 
3090 	case MFI_LD_PROG_BGI:
3091 		bv->bv_status = BIOC_SVSCRUB;
3092 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3093 		bv->bv_percent = (per * 100) / 0xffff;
3094 		bv->bv_seconds =
3095 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3096 		break;
3097 
3098 	case MFI_LD_PROG_FGI:
3099 	case MFI_LD_PROG_RECONSTRUCT:
3100 		/* nothing yet */
3101 		break;
3102 	}
3103 
3104 #if 0
3105 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3106 		bv->bv_cache = BIOC_CVWRITEBACK;
3107 	else
3108 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3109 #endif
3110 
3111 	/*
3112 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3113 	 * a subset that is valid for the MFI controller.
3114 	 */
3115 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3116 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3117 		bv->bv_level *= 10;
3118 
3119 	bv->bv_nodisk =
3120 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3121 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3122 
3123 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3124 	bv->bv_stripe_size =
3125 	    (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3126 	    / 1024; /* in KB */
3127 
3128 	rv = 0;
3129 done:
3130 	return (rv);
3131 }
3132 
3133 static int
3134 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3135 {
3136 	struct mfi_conf		*cfg;
3137 	struct mfi_array	*ar;
3138 	struct mfi_ld_cfg	*ld;
3139 	struct mfi_pd_details	*pd;
3140 	struct mfi_pd_list	*pl;
3141 	struct scsipi_inquiry_data *inqbuf;
3142 	char			vend[8+16+4+1], *vendp;
3143 	int			i, rv = EINVAL;
3144 	int			arr, vol, disk, span;
3145 	union mfi_mbox		mbox;
3146 
3147 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3148 	    DEVNAME(sc), bd->bd_diskid);
3149 
3150 	/* we really could skip and expect that inq took care of it */
3151 	if (mfii_bio_getitall(sc)) {
3152 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3153 		    DEVNAME(sc));
3154 		return (rv);
3155 	}
3156 	cfg = sc->sc_cfg;
3157 
3158 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3159 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3160 
3161 	ar = cfg->mfc_array;
3162 	vol = bd->bd_volid;
3163 	if (vol >= cfg->mfc_no_ld) {
3164 		/* do hotspares */
3165 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3166 		goto freeme;
3167 	}
3168 
3169 	/* calculate offset to ld structure */
3170 	ld = (struct mfi_ld_cfg *)(
3171 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3172 	    cfg->mfc_array_size * cfg->mfc_no_array);
3173 
3174 	/* use span 0 only when raid group is not spanned */
3175 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3176 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3177 	else
3178 		span = 0;
3179 	arr = ld[vol].mlc_span[span].mls_index;
3180 
3181 	/* offset disk into pd list */
3182 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3183 
3184 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3185 		/* disk is missing but succeed command */
3186 		bd->bd_status = BIOC_SDFAILED;
3187 		rv = 0;
3188 
3189 		/* try to find an unused disk for the target to rebuild */
3190 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3191 		    MFII_DATA_IN, false))
3192 			goto freeme;
3193 
3194 		for (i = 0; i < pl->mpl_no_pd; i++) {
3195 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3196 				continue;
3197 
3198 			memset(&mbox, 0, sizeof(mbox));
3199 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3200 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3201 			    pd, sizeof(*pd), MFII_DATA_IN, false))
3202 				continue;
3203 
3204 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3205 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3206 				break;
3207 		}
3208 
3209 		if (i == pl->mpl_no_pd)
3210 			goto freeme;
3211 	} else {
3212 		memset(&mbox, 0, sizeof(mbox));
3213 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3214 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3215 		    MFII_DATA_IN, false)) {
3216 			bd->bd_status = BIOC_SDINVALID;
3217 			goto freeme;
3218 		}
3219 	}
3220 
3221 	/* get the remaining fields */
3222 	bd->bd_channel = pd->mpd_enc_idx;
3223 	bd->bd_target = pd->mpd_enc_slot;
3224 
3225 	/* get status */
3226 	switch (pd->mpd_fw_state){
3227 	case MFI_PD_UNCONFIG_GOOD:
3228 	case MFI_PD_UNCONFIG_BAD:
3229 		bd->bd_status = BIOC_SDUNUSED;
3230 		break;
3231 
3232 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3233 		bd->bd_status = BIOC_SDHOTSPARE;
3234 		break;
3235 
3236 	case MFI_PD_OFFLINE:
3237 		bd->bd_status = BIOC_SDOFFLINE;
3238 		break;
3239 
3240 	case MFI_PD_FAILED:
3241 		bd->bd_status = BIOC_SDFAILED;
3242 		break;
3243 
3244 	case MFI_PD_REBUILD:
3245 		bd->bd_status = BIOC_SDREBUILD;
3246 		break;
3247 
3248 	case MFI_PD_ONLINE:
3249 		bd->bd_status = BIOC_SDONLINE;
3250 		break;
3251 
3252 	case MFI_PD_COPYBACK:
3253 	case MFI_PD_SYSTEM:
3254 	default:
3255 		bd->bd_status = BIOC_SDINVALID;
3256 		break;
3257 	}
3258 
3259 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3260 
3261 	inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3262 	vendp = inqbuf->vendor;
3263 	memcpy(vend, vendp, sizeof vend - 1);
3264 	vend[sizeof vend - 1] = '\0';
3265 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3266 
3267 	/* XXX find a way to retrieve serial nr from drive */
3268 	/* XXX find a way to get bd_procdev */
3269 
3270 #if 0
3271 	mfp = &pd->mpd_progress;
3272 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3273 		mp = &mfp->mfp_patrol_read;
3274 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3275 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3276 	}
3277 #endif
3278 
3279 	rv = 0;
3280 freeme:
3281 	free(pd, M_DEVBUF);
3282 	free(pl, M_DEVBUF);
3283 
3284 	return (rv);
3285 }
3286 
3287 static int
3288 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3289 {
3290 	uint32_t		opc;
3291 	int			rv = 0;
3292 	int8_t			ret;
3293 	mfii_direction_t dir = MFII_DATA_NONE;
3294 
3295 	switch (ba->ba_opcode) {
3296 	case BIOC_SADISABLE:
3297 		opc = MR_DCMD_SPEAKER_DISABLE;
3298 		break;
3299 
3300 	case BIOC_SAENABLE:
3301 		opc = MR_DCMD_SPEAKER_ENABLE;
3302 		break;
3303 
3304 	case BIOC_SASILENCE:
3305 		opc = MR_DCMD_SPEAKER_SILENCE;
3306 		break;
3307 
3308 	case BIOC_GASTATUS:
3309 		opc = MR_DCMD_SPEAKER_GET;
3310 		dir = MFII_DATA_IN;
3311 		break;
3312 
3313 	case BIOC_SATEST:
3314 		opc = MR_DCMD_SPEAKER_TEST;
3315 		break;
3316 
3317 	default:
3318 		DNPRINTF(MFII_D_IOCTL,
3319 		    "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3320 		    DEVNAME(sc), ba->ba_opcode);
3321 		return (EINVAL);
3322 	}
3323 
3324 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3325 		rv = EINVAL;
3326 	else
3327 		if (ba->ba_opcode == BIOC_GASTATUS)
3328 			ba->ba_status = ret;
3329 		else
3330 			ba->ba_status = 0;
3331 
3332 	return (rv);
3333 }
3334 
3335 static int
3336 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3337 {
3338 	int			i, found, rv = EINVAL;
3339 	union mfi_mbox		mbox;
3340 	uint32_t		cmd;
3341 	struct mfi_pd_list	*pd;
3342 
3343 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3344 	    bb->bb_status);
3345 
3346 	/* channel 0 means not in an enclosure so can't be blinked */
3347 	if (bb->bb_channel == 0)
3348 		return (EINVAL);
3349 
3350 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3351 
3352 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3353 	    MFII_DATA_IN, false))
3354 		goto done;
3355 
3356 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3357 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3358 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3359 			found = 1;
3360 			break;
3361 		}
3362 
3363 	if (!found)
3364 		goto done;
3365 
3366 	memset(&mbox, 0, sizeof(mbox));
3367 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3368 
3369 	switch (bb->bb_status) {
3370 	case BIOC_SBUNBLINK:
3371 		cmd = MR_DCMD_PD_UNBLINK;
3372 		break;
3373 
3374 	case BIOC_SBBLINK:
3375 		cmd = MR_DCMD_PD_BLINK;
3376 		break;
3377 
3378 	case BIOC_SBALARM:
3379 	default:
3380 		DNPRINTF(MFII_D_IOCTL,
3381 		    "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3382 		    DEVNAME(sc), bb->bb_status);
3383 		goto done;
3384 	}
3385 
3386 
3387 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3388 		goto done;
3389 
3390 	rv = 0;
3391 done:
3392 	free(pd, M_DEVBUF);
3393 	return (rv);
3394 }
3395 
3396 static int
3397 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3398 {
3399 	struct mfii_foreign_scan_info *fsi;
3400 	struct mfi_pd_details	*pd;
3401 	union mfi_mbox		mbox;
3402 	int			rv;
3403 
3404 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3405 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3406 
3407 	memset(&mbox, 0, sizeof mbox);
3408 	mbox.s[0] = pd_id;
3409 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3410 	    MFII_DATA_IN, false);
3411 	if (rv != 0)
3412 		goto done;
3413 
3414 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3415 		mbox.s[0] = pd_id;
3416 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3417 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3418 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3419 		    MFII_DATA_NONE, false);
3420 		if (rv != 0)
3421 			goto done;
3422 	}
3423 
3424 	memset(&mbox, 0, sizeof mbox);
3425 	mbox.s[0] = pd_id;
3426 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3427 	    MFII_DATA_IN, false);
3428 	if (rv != 0)
3429 		goto done;
3430 
3431 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3432 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3433 		    fsi, sizeof(*fsi), MFII_DATA_IN, false);
3434 		if (rv != 0)
3435 			goto done;
3436 
3437 		if (fsi->count > 0) {
3438 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3439 			    NULL, 0, MFII_DATA_NONE, false);
3440 			if (rv != 0)
3441 				goto done;
3442 		}
3443 	}
3444 
3445 	memset(&mbox, 0, sizeof mbox);
3446 	mbox.s[0] = pd_id;
3447 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3448 	    MFII_DATA_IN, false);
3449 	if (rv != 0)
3450 		goto done;
3451 
3452 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3453 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3454 		rv = ENXIO;
3455 
3456 done:
3457 	free(fsi, M_DEVBUF);
3458 	free(pd, M_DEVBUF);
3459 
3460 	return (rv);
3461 }
3462 
3463 static int
3464 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3465 {
3466 	struct mfi_hotspare	*hs;
3467 	struct mfi_pd_details	*pd;
3468 	union mfi_mbox		mbox;
3469 	size_t			size;
3470 	int			rv = EINVAL;
3471 
3472 	/* we really could skip and expect that inq took care of it */
3473 	if (mfii_bio_getitall(sc)) {
3474 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3475 		    DEVNAME(sc));
3476 		return (rv);
3477 	}
3478 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3479 
3480 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3481 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3482 
3483 	memset(&mbox, 0, sizeof mbox);
3484 	mbox.s[0] = pd_id;
3485 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3486 	    MFII_DATA_IN, false);
3487 	if (rv != 0)
3488 		goto done;
3489 
3490 	memset(hs, 0, size);
3491 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3492 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3493 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3494 	    MFII_DATA_OUT, false);
3495 
3496 done:
3497 	free(hs, M_DEVBUF);
3498 	free(pd, M_DEVBUF);
3499 
3500 	return (rv);
3501 }
3502 
3503 static int
3504 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3505 {
3506 	struct mfi_pd_details	*pd;
3507 	struct mfi_pd_list	*pl;
3508 	int			i, found, rv = EINVAL;
3509 	union mfi_mbox		mbox;
3510 
3511 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3512 	    bs->bs_status);
3513 
3514 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3515 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3516 
3517 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3518 	    MFII_DATA_IN, false))
3519 		goto done;
3520 
3521 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3522 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3523 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3524 			found = 1;
3525 			break;
3526 		}
3527 
3528 	if (!found)
3529 		goto done;
3530 
3531 	memset(&mbox, 0, sizeof(mbox));
3532 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3533 
3534 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3535 	    MFII_DATA_IN, false))
3536 		goto done;
3537 
3538 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3539 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3540 
3541 	switch (bs->bs_status) {
3542 	case BIOC_SSONLINE:
3543 		mbox.b[4] = MFI_PD_ONLINE;
3544 		break;
3545 
3546 	case BIOC_SSOFFLINE:
3547 		mbox.b[4] = MFI_PD_OFFLINE;
3548 		break;
3549 
3550 	case BIOC_SSHOTSPARE:
3551 		mbox.b[4] = MFI_PD_HOTSPARE;
3552 		break;
3553 
3554 	case BIOC_SSREBUILD:
3555 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3556 			if ((rv = mfii_makegood(sc,
3557 			    pl->mpl_address[i].mpa_pd_id)))
3558 				goto done;
3559 
3560 			if ((rv = mfii_makespare(sc,
3561 			    pl->mpl_address[i].mpa_pd_id)))
3562 				goto done;
3563 
3564 			memset(&mbox, 0, sizeof(mbox));
3565 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3566 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3567 			    pd, sizeof(*pd), MFII_DATA_IN, false);
3568 			if (rv != 0)
3569 				goto done;
3570 
3571 			/* rebuilding might be started by mfii_makespare() */
3572 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3573 				rv = 0;
3574 				goto done;
3575 			}
3576 
3577 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3578 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3579 		}
3580 		mbox.b[4] = MFI_PD_REBUILD;
3581 		break;
3582 
3583 	default:
3584 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3585 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3586 		goto done;
3587 	}
3588 
3589 
3590 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3591 	    MFII_DATA_NONE, false);
3592 done:
3593 	free(pd, M_DEVBUF);
3594 	free(pl, M_DEVBUF);
3595 	return (rv);
3596 }
3597 
3598 #if 0
3599 int
3600 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3601 {
3602 	uint32_t		opc;
3603 	int			rv = 0;
3604 	struct mfi_pr_properties prop;
3605 	struct mfi_pr_status	status;
3606 	uint32_t		time, exec_freq;
3607 
3608 	switch (bp->bp_opcode) {
3609 	case BIOC_SPSTOP:
3610 	case BIOC_SPSTART:
3611 		if (bp->bp_opcode == BIOC_SPSTART)
3612 			opc = MR_DCMD_PR_START;
3613 		else
3614 			opc = MR_DCMD_PR_STOP;
3615 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3616 			return (EINVAL);
3617 		break;
3618 
3619 	case BIOC_SPMANUAL:
3620 	case BIOC_SPDISABLE:
3621 	case BIOC_SPAUTO:
3622 		/* Get device's time. */
3623 		opc = MR_DCMD_TIME_SECS_GET;
3624 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3625 		    MFII_DATA_IN, false))
3626 			return (EINVAL);
3627 
3628 		opc = MR_DCMD_PR_GET_PROPERTIES;
3629 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3630 		    MFII_DATA_IN, false))
3631 			return (EINVAL);
3632 
3633 		switch (bp->bp_opcode) {
3634 		case BIOC_SPMANUAL:
3635 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3636 			break;
3637 		case BIOC_SPDISABLE:
3638 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3639 			break;
3640 		case BIOC_SPAUTO:
3641 			if (bp->bp_autoival != 0) {
3642 				if (bp->bp_autoival == -1)
3643 					/* continuously */
3644 					exec_freq = 0xffffffffU;
3645 				else if (bp->bp_autoival > 0)
3646 					exec_freq = bp->bp_autoival;
3647 				else
3648 					return (EINVAL);
3649 				prop.exec_freq = exec_freq;
3650 			}
3651 			if (bp->bp_autonext != 0) {
3652 				if (bp->bp_autonext < 0)
3653 					return (EINVAL);
3654 				else
3655 					prop.next_exec =
3656 					    time + bp->bp_autonext;
3657 			}
3658 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3659 			break;
3660 		}
3661 
3662 		opc = MR_DCMD_PR_SET_PROPERTIES;
3663 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3664 		    MFII_DATA_OUT, false))
3665 			return (EINVAL);
3666 
3667 		break;
3668 
3669 	case BIOC_GPSTATUS:
3670 		opc = MR_DCMD_PR_GET_PROPERTIES;
3671 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3672 		    MFII_DATA_IN, false))
3673 			return (EINVAL);
3674 
3675 		opc = MR_DCMD_PR_GET_STATUS;
3676 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3677 		    MFII_DATA_IN, false))
3678 			return (EINVAL);
3679 
3680 		/* Get device's time. */
3681 		opc = MR_DCMD_TIME_SECS_GET;
3682 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3683 		    MFII_DATA_IN, false))
3684 			return (EINVAL);
3685 
3686 		switch (prop.op_mode) {
3687 		case MFI_PR_OPMODE_AUTO:
3688 			bp->bp_mode = BIOC_SPMAUTO;
3689 			bp->bp_autoival = prop.exec_freq;
3690 			bp->bp_autonext = prop.next_exec;
3691 			bp->bp_autonow = time;
3692 			break;
3693 		case MFI_PR_OPMODE_MANUAL:
3694 			bp->bp_mode = BIOC_SPMMANUAL;
3695 			break;
3696 		case MFI_PR_OPMODE_DISABLED:
3697 			bp->bp_mode = BIOC_SPMDISABLED;
3698 			break;
3699 		default:
3700 			printf("%s: unknown patrol mode %d\n",
3701 			    DEVNAME(sc), prop.op_mode);
3702 			break;
3703 		}
3704 
3705 		switch (status.state) {
3706 		case MFI_PR_STATE_STOPPED:
3707 			bp->bp_status = BIOC_SPSSTOPPED;
3708 			break;
3709 		case MFI_PR_STATE_READY:
3710 			bp->bp_status = BIOC_SPSREADY;
3711 			break;
3712 		case MFI_PR_STATE_ACTIVE:
3713 			bp->bp_status = BIOC_SPSACTIVE;
3714 			break;
3715 		case MFI_PR_STATE_ABORTED:
3716 			bp->bp_status = BIOC_SPSABORTED;
3717 			break;
3718 		default:
3719 			printf("%s: unknown patrol state %d\n",
3720 			    DEVNAME(sc), status.state);
3721 			break;
3722 		}
3723 
3724 		break;
3725 
3726 	default:
3727 		DNPRINTF(MFII_D_IOCTL,
3728 		    "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3729 		    DEVNAME(sc), bp->bp_opcode);
3730 		return (EINVAL);
3731 	}
3732 
3733 	return (rv);
3734 }
3735 #endif
3736 
3737 static int
3738 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3739 {
3740 	struct mfi_conf		*cfg;
3741 	struct mfi_hotspare	*hs;
3742 	struct mfi_pd_details	*pd;
3743 	struct bioc_disk	*sdhs;
3744 	struct bioc_vol		*vdhs;
3745 	struct scsipi_inquiry_data *inqbuf;
3746 	char			vend[8+16+4+1], *vendp;
3747 	int			i, rv = EINVAL;
3748 	uint32_t		size;
3749 	union mfi_mbox		mbox;
3750 
3751 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3752 
3753 	if (!bio_hs)
3754 		return (EINVAL);
3755 
3756 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3757 
3758 	/* send single element command to retrieve size for full structure */
3759 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3760 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3761 	    MFII_DATA_IN, false))
3762 		goto freeme;
3763 
3764 	size = cfg->mfc_size;
3765 	free(cfg, M_DEVBUF);
3766 
3767 	/* memory for read config */
3768 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3769 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3770 	    MFII_DATA_IN, false))
3771 		goto freeme;
3772 
3773 	/* calculate offset to hs structure */
3774 	hs = (struct mfi_hotspare *)(
3775 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3776 	    cfg->mfc_array_size * cfg->mfc_no_array +
3777 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3778 
3779 	if (volid < cfg->mfc_no_ld)
3780 		goto freeme; /* not a hotspare */
3781 
3782 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3783 		goto freeme; /* not a hotspare */
3784 
3785 	/* offset into hotspare structure */
3786 	i = volid - cfg->mfc_no_ld;
3787 
3788 	DNPRINTF(MFII_D_IOCTL,
3789 	    "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3790 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3791 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3792 
3793 	/* get pd fields */
3794 	memset(&mbox, 0, sizeof(mbox));
3795 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3796 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3797 	    MFII_DATA_IN, false)) {
3798 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3799 		    DEVNAME(sc));
3800 		goto freeme;
3801 	}
3802 
3803 	switch (type) {
3804 	case MFI_MGMT_VD:
3805 		vdhs = bio_hs;
3806 		vdhs->bv_status = BIOC_SVONLINE;
3807 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3808 		vdhs->bv_level = -1; /* hotspare */
3809 		vdhs->bv_nodisk = 1;
3810 		break;
3811 
3812 	case MFI_MGMT_SD:
3813 		sdhs = bio_hs;
3814 		sdhs->bd_status = BIOC_SDHOTSPARE;
3815 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3816 		sdhs->bd_channel = pd->mpd_enc_idx;
3817 		sdhs->bd_target = pd->mpd_enc_slot;
3818 		inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3819 		vendp = inqbuf->vendor;
3820 		memcpy(vend, vendp, sizeof vend - 1);
3821 		vend[sizeof vend - 1] = '\0';
3822 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3823 		break;
3824 
3825 	default:
3826 		goto freeme;
3827 	}
3828 
3829 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3830 	rv = 0;
3831 freeme:
3832 	free(pd, M_DEVBUF);
3833 	free(cfg, M_DEVBUF);
3834 
3835 	return (rv);
3836 }
3837 
3838 #endif /* NBIO > 0 */
3839 
3840 static void
3841 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3842 {
3843 	struct mfi_bbu_status bbu;
3844 	u_int32_t status;
3845 	u_int32_t mask;
3846 	u_int32_t soh_bad;
3847 	int rv;
3848 
3849 	mutex_enter(&sc->sc_lock);
3850 	rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3851 	    sizeof(bbu), MFII_DATA_IN, false);
3852 	mutex_exit(&sc->sc_lock);
3853 	if (rv != 0) {
3854 		edata->state = ENVSYS_SINVALID;
3855 		edata->value_cur = 0;
3856 		return;
3857 	}
3858 
3859 	switch (bbu.battery_type) {
3860 	case MFI_BBU_TYPE_IBBU:
3861 	case MFI_BBU_TYPE_IBBU09:
3862 	case MFI_BBU_TYPE_CVPM02:
3863 		mask = MFI_BBU_STATE_BAD_IBBU;
3864 		soh_bad = 0;
3865 		break;
3866 	case MFI_BBU_TYPE_BBU:
3867 		mask = MFI_BBU_STATE_BAD_BBU;
3868 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3869 		break;
3870 
3871 	case MFI_BBU_TYPE_NONE:
3872 	default:
3873 		edata->state = ENVSYS_SCRITICAL;
3874 		edata->value_cur = 0;
3875 		return;
3876 	}
3877 
3878 	status = le32toh(bbu.fw_status) & mask;
3879 	switch (edata->sensor) {
3880 	case 0:
3881 		edata->value_cur = (status || soh_bad) ? 0 : 1;
3882 		edata->state =
3883 		    edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3884 		return;
3885 	case 1:
3886 		edata->value_cur = le16toh(bbu.voltage) * 1000;
3887 		edata->state = ENVSYS_SVALID;
3888 		return;
3889 	case 2:
3890 		edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3891 		edata->state = ENVSYS_SVALID;
3892 		return;
3893 	case 3:
3894 		edata->value_cur =
3895 		    le16toh(bbu.temperature) * 1000000 + 273150000;
3896 		edata->state = ENVSYS_SVALID;
3897 		return;
3898 	}
3899 }
3900 
3901 static void
3902 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3903 {
3904 	struct bioc_vol bv;
3905 	int error;
3906 
3907 	memset(&bv, 0, sizeof(bv));
3908 	bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3909 	mutex_enter(&sc->sc_lock);
3910 	error = mfii_ioctl_vol(sc, &bv);
3911 	mutex_exit(&sc->sc_lock);
3912 	if (error)
3913 		bv.bv_status = BIOC_SVINVALID;
3914 	bio_vol_to_envsys(edata, &bv);
3915 }
3916 
3917 static void
3918 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3919 {
3920 	sensor->units = ENVSYS_DRIVE;
3921 	sensor->state = ENVSYS_SINVALID;
3922 	sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3923 	/* Enable monitoring for drive state changes */
3924 	sensor->flags |= ENVSYS_FMONSTCHANGED;
3925 	snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3926 }
3927 
3928 static void
3929 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3930 {
3931 	if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3932 		aprint_error_dev(sc->sc_dev,
3933 		    "failed to attach sensor %s\n", s->desc);
3934 }
3935 
3936 static int
3937 mfii_create_sensors(struct mfii_softc *sc)
3938 {
3939 	int i, rv;
3940 	const int nsensors = MFI_BBU_SENSORS + MFII_MAX_LD_EXT;
3941 
3942 	sc->sc_sme = sysmon_envsys_create();
3943 	sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3944 	    M_DEVBUF, M_WAITOK | M_ZERO);
3945 
3946 	/* BBU */
3947 	sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3948 	sc->sc_sensors[0].state = ENVSYS_SINVALID;
3949 	sc->sc_sensors[0].value_cur = 0;
3950 	sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3951 	sc->sc_sensors[1].state = ENVSYS_SINVALID;
3952 	sc->sc_sensors[1].value_cur = 0;
3953 	sc->sc_sensors[2].units = ENVSYS_SAMPS;
3954 	sc->sc_sensors[2].state = ENVSYS_SINVALID;
3955 	sc->sc_sensors[2].value_cur = 0;
3956 	sc->sc_sensors[3].units = ENVSYS_STEMP;
3957 	sc->sc_sensors[3].state = ENVSYS_SINVALID;
3958 	sc->sc_sensors[3].value_cur = 0;
3959 	sc->sc_ld_sensors = sc->sc_sensors + MFI_BBU_SENSORS;
3960 
3961 	if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3962 		sc->sc_bbuok = true;
3963 		sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3964 		snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3965 		    "%s BBU state", DEVNAME(sc));
3966 		snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3967 		    "%s BBU voltage", DEVNAME(sc));
3968 		snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3969 		    "%s BBU current", DEVNAME(sc));
3970 		snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3971 		    "%s BBU temperature", DEVNAME(sc));
3972 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3973 			mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3974 		}
3975 	}
3976 
3977 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3978 		mfii_init_ld_sensor(sc, &sc->sc_ld_sensors[i], i);
3979 		mfii_attach_sensor(sc, &sc->sc_ld_sensors[i]);
3980 	}
3981 
3982 	sc->sc_sme->sme_name = DEVNAME(sc);
3983 	sc->sc_sme->sme_cookie = sc;
3984 	sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3985 	rv = sysmon_envsys_register(sc->sc_sme);
3986 	if (rv) {
3987 		aprint_error_dev(sc->sc_dev,
3988 		    "unable to register with sysmon (rv = %d)\n", rv);
3989 		sysmon_envsys_destroy(sc->sc_sme);
3990 		sc->sc_sme = NULL;
3991 	}
3992 	return rv;
3993 
3994 }
3995 
3996 static int
3997 mfii_destroy_sensors(struct mfii_softc *sc)
3998 {
3999 	if (sc->sc_sme == NULL)
4000 		return 0;
4001 	sysmon_envsys_unregister(sc->sc_sme);
4002 	sc->sc_sme = NULL;
4003 	free(sc->sc_sensors, M_DEVBUF);
4004 	return 0;
4005 }
4006 
4007 static void
4008 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
4009 {
4010 	struct mfii_softc	*sc = sme->sme_cookie;
4011 
4012 	if (edata->sensor >= MFI_BBU_SENSORS + MFII_MAX_LD_EXT)
4013 		return;
4014 
4015 	if (edata->sensor < MFI_BBU_SENSORS) {
4016 		if (sc->sc_bbuok)
4017 			mfii_bbu(sc, edata);
4018 	} else {
4019 		mfii_refresh_ld_sensor(sc, edata);
4020 	}
4021 }
4022