xref: /netbsd-src/sys/dev/pci/mfii.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /* $NetBSD: mfii.c,v 1.4 2019/04/24 09:21:01 bouyer Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3 
4 /*
5  * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer@lip6.fr>
6  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.4 2019/04/24 09:21:01 bouyer Exp $");
23 
24 #include "bio.h"
25 
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39 
40 #include <uvm/uvm_param.h>
41 
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include <sys/bus.h>
46 
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49 
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56 
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60 
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63 
64 #define	MFII_BAR		0x14
65 #define MFII_BAR_35		0x10
66 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
67 
68 #define MFII_OSTS_INTR_VALID	0x00000009
69 #define MFII_RPI		0x6c /* reply post host index */
70 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
72 
73 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
75 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
78 
79 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
80 
81 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
83 
84 #define MFII_MAX_CHAIN_UNIT	0x00400000
85 #define MFII_MAX_CHAIN_MASK	0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT	5
87 
88 #define MFII_256K_IO		128
89 #define MFII_1MB_IO		(MFII_256K_IO * 4)
90 
91 #define MFII_CHAIN_FRAME_MIN	1024
92 
93 struct mfii_request_descr {
94 	u_int8_t	flags;
95 	u_int8_t	msix_index;
96 	u_int16_t	smid;
97 
98 	u_int16_t	lmid;
99 	u_int16_t	dev_handle;
100 } __packed;
101 
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
104 
105 struct mfii_raid_context {
106 	u_int8_t	type_nseg;
107 	u_int8_t	_reserved1;
108 	u_int16_t	timeout_value;
109 
110 	u_int16_t	reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
115 
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 	u_int16_t	virtual_disk_target_id;
119 
120 	u_int64_t	reg_lock_row_lba;
121 
122 	u_int32_t	reg_lock_length;
123 
124 	u_int16_t	next_lm_id;
125 	u_int8_t	ex_status;
126 	u_int8_t	status;
127 
128 	u_int8_t	raid_flags;
129 	u_int8_t	num_sge;
130 	u_int16_t	config_seq_num;
131 
132 	u_int8_t	span_arm;
133 	u_int8_t	_reserved3[3];
134 } __packed;
135 
136 struct mfii_sge {
137 	u_int64_t	sg_addr;
138 	u_int32_t	sg_len;
139 	u_int16_t	_reserved;
140 	u_int8_t	sg_next_chain_offset;
141 	u_int8_t	sg_flags;
142 } __packed;
143 
144 #define MFII_SGE_ADDR_MASK		(0x03)
145 #define MFII_SGE_ADDR_SYSTEM		(0x00)
146 #define MFII_SGE_ADDR_IOCDDR		(0x01)
147 #define MFII_SGE_ADDR_IOCPLB		(0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
149 #define MFII_SGE_END_OF_LIST		(0x40)
150 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
151 
152 #define MFII_REQUEST_SIZE	256
153 
154 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
155 
156 #define MFII_MAX_ROW		32
157 #define MFII_MAX_ARRAY		128
158 
159 struct mfii_array_map {
160 	uint16_t		mam_pd[MFII_MAX_ROW];
161 } __packed;
162 
163 struct mfii_dev_handle {
164 	uint16_t		mdh_cur_handle;
165 	uint8_t			mdh_valid;
166 	uint8_t			mdh_reserved;
167 	uint16_t		mdh_handle[2];
168 } __packed;
169 
170 struct mfii_ld_map {
171 	uint32_t		mlm_total_size;
172 	uint32_t		mlm_reserved1[5];
173 	uint32_t		mlm_num_lds;
174 	uint32_t		mlm_reserved2;
175 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 	uint8_t			mlm_pd_timeout;
177 	uint8_t			mlm_reserved3[7];
178 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
179 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181 
182 struct mfii_task_mgmt {
183 	union {
184 		uint8_t			request[128];
185 		struct mpii_msg_scsi_task_request
186 					mpii_request;
187 	} __packed __aligned(8);
188 
189 	union {
190 		uint8_t			reply[128];
191 		uint32_t		flags;
192 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
194 		struct mpii_msg_scsi_task_reply
195 					mpii_reply;
196 	} __packed __aligned(8);
197 } __packed __aligned(8);
198 
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201         char data[24];
202 } __packed;
203 
204 struct mfii_foreign_scan_info {
205 	uint32_t count; /* Number of foreign configs found */
206 	struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208 
209 struct mfii_dmamem {
210 	bus_dmamap_t		mdm_map;
211 	bus_dma_segment_t	mdm_seg;
212 	size_t			mdm_size;
213 	void *			mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
219 
220 struct mfii_softc;
221 
222 typedef enum mfii_direction {
223 	MFII_DATA_NONE = 0,
224 	MFII_DATA_IN,
225 	MFII_DATA_OUT
226 } mfii_direction_t;
227 
228 struct mfii_ccb {
229 	struct mfii_softc	*ccb_sc;
230 	void			*ccb_request;
231 	u_int64_t		ccb_request_dva;
232 	bus_addr_t		ccb_request_offset;
233 
234 	void			*ccb_mfi;
235 	u_int64_t		ccb_mfi_dva;
236 	bus_addr_t		ccb_mfi_offset;
237 
238 	struct mfi_sense	*ccb_sense;
239 	u_int64_t		ccb_sense_dva;
240 	bus_addr_t		ccb_sense_offset;
241 
242 	struct mfii_sge		*ccb_sgl;
243 	u_int64_t		ccb_sgl_dva;
244 	bus_addr_t		ccb_sgl_offset;
245 	u_int			ccb_sgl_len;
246 
247 	struct mfii_request_descr ccb_req;
248 
249 	bus_dmamap_t		ccb_dmamap64;
250 	bus_dmamap_t		ccb_dmamap32;
251 	bool			ccb_dma64;
252 
253 	/* data for sgl */
254 	void			*ccb_data;
255 	size_t			ccb_len;
256 
257 	mfii_direction_t	ccb_direction;
258 
259 	void			*ccb_cookie;
260 	kmutex_t		ccb_mtx;
261 	kcondvar_t		ccb_cv;
262 	void			(*ccb_done)(struct mfii_softc *,
263 				    struct mfii_ccb *);
264 
265 	u_int32_t		ccb_flags;
266 #define MFI_CCB_F_ERR			(1<<0)
267 	u_int			ccb_smid;
268 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271 
272 struct mfii_iop {
273 	int bar;
274 	int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
276 #define MFII_IOP_NUM_SGE_LOC_35		1
277 	u_int16_t ldio_ctx_reg_lock_flags;
278 	u_int8_t ldio_req_type;
279 	u_int8_t ldio_ctx_type_nseg;
280 	u_int8_t sge_flag_chain;
281 	u_int8_t sge_flag_eol;
282 };
283 
284 struct mfii_softc {
285 	device_t		sc_dev;
286 	struct scsipi_channel   sc_chan;
287 	struct scsipi_adapter   sc_adapt;
288 
289 	const struct mfii_iop	*sc_iop;
290 
291 	pci_chipset_tag_t	sc_pc;
292 	pcitag_t		sc_tag;
293 
294 	bus_space_tag_t		sc_iot;
295 	bus_space_handle_t	sc_ioh;
296 	bus_size_t		sc_ios;
297 	bus_dma_tag_t		sc_dmat;
298 	bus_dma_tag_t		sc_dmat64;
299 	bool			sc_64bit_dma;
300 
301 	void			*sc_ih;
302 
303 	kmutex_t		sc_ccb_mtx;
304 	kmutex_t		sc_post_mtx;
305 
306 	u_int			sc_max_fw_cmds;
307 	u_int			sc_max_cmds;
308 	u_int			sc_max_sgl;
309 
310 	u_int			sc_reply_postq_depth;
311 	u_int			sc_reply_postq_index;
312 	kmutex_t		sc_reply_postq_mtx;
313 	struct mfii_dmamem	*sc_reply_postq;
314 
315 	struct mfii_dmamem	*sc_requests;
316 	struct mfii_dmamem	*sc_mfi;
317 	struct mfii_dmamem	*sc_sense;
318 	struct mfii_dmamem	*sc_sgl;
319 
320 	struct mfii_ccb		*sc_ccb;
321 	struct mfii_ccb_list	sc_ccb_freeq;
322 
323 	struct mfii_ccb		*sc_aen_ccb;
324 	struct workqueue	*sc_aen_wq;
325 	struct work		sc_aen_work;
326 
327 	kmutex_t		sc_abort_mtx;
328 	struct mfii_ccb_list	sc_abort_list;
329 	struct workqueue	*sc_abort_wq;
330 	struct work		sc_abort_work;
331 
332 	/* save some useful information for logical drives that is missing
333 	 * in sc_ld_list
334 	 */
335 	struct {
336 		bool		ld_present;
337 		char		ld_dev[16];	/* device name sd? */
338 	}			sc_ld[MFI_MAX_LD];
339 	int			sc_target_lds[MFI_MAX_LD];
340 
341 	/* bio */
342 	struct mfi_conf	 *sc_cfg;
343 	struct mfi_ctrl_info    sc_info;
344 	struct mfi_ld_list	sc_ld_list;
345 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
346 	int			sc_no_pd; /* used physical disks */
347 	int			sc_ld_sz; /* sizeof sc_ld_details */
348 
349 	/* mgmt lock */
350 	kmutex_t		sc_lock;
351 	bool			sc_running;
352 
353 	/* sensors */
354 	struct sysmon_envsys	*sc_sme;
355 	envsys_data_t		*sc_sensors;
356 	bool			sc_bbuok;
357 
358 	device_t		sc_child;
359 };
360 
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
365 #define	MFII_D_CMD		0x0001
366 #define	MFII_D_INTR		0x0002
367 #define	MFII_D_MISC		0x0004
368 #define	MFII_D_DMA		0x0008
369 #define	MFII_D_IOCTL		0x0010
370 #define	MFII_D_RW		0x0020
371 #define	MFII_D_MEM		0x0040
372 #define	MFII_D_CCB		0x0080
373 uint32_t	mfii_debug = 0
374 /*		    | MFII_D_CMD */
375 /*		    | MFII_D_INTR */
376 	    	    | MFII_D_MISC
377 /*		    | MFII_D_DMA */
378 /*		    | MFII_D_IOCTL */
379 /*		    | MFII_D_RW */
380 /*		    | MFII_D_MEM */
381 /*		    | MFII_D_CCB */
382 		;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387 
388 int		mfii_match(device_t, cfdata_t, void *);
389 void		mfii_attach(device_t, device_t, void *);
390 int		mfii_detach(device_t, int);
391 int		mfii_rescan(device_t, const char *, const int *);
392 void		mfii_childdetached(device_t, device_t);
393 static bool	mfii_suspend(device_t, const pmf_qual_t *);
394 static bool	mfii_resume(device_t, const pmf_qual_t *);
395 static bool	mfii_shutdown(device_t, int);
396 
397 
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399     mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 	mfii_childdetached, DVF_DETACH_SHUTDOWN);
401 
402 void		mfii_scsipi_request(struct scsipi_channel *,
403 			scsipi_adapter_req_t, void *);
404 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405 
406 #define DEVNAME(_sc)		(device_xname((_sc)->sc_dev))
407 
408 static u_int32_t	mfii_read(struct mfii_softc *, bus_size_t);
409 static void		mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410 
411 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 void			mfii_dmamem_free(struct mfii_softc *,
413 			    struct mfii_dmamem *);
414 
415 struct mfii_ccb *	mfii_get_ccb(struct mfii_softc *);
416 void			mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 int			mfii_init_ccb(struct mfii_softc *);
418 void			mfii_scrub_ccb(struct mfii_ccb *);
419 
420 int			mfii_transition_firmware(struct mfii_softc *);
421 int			mfii_initialise_firmware(struct mfii_softc *);
422 int			mfii_get_info(struct mfii_softc *);
423 
424 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 int			mfii_my_intr(struct mfii_softc *);
431 int			mfii_intr(void *);
432 void			mfii_postq(struct mfii_softc *);
433 
434 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 			    void *, int);
436 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 			    void *, int);
438 
439 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440 
441 int			mfii_mgmt(struct mfii_softc *, uint32_t,
442 			    const union mfi_mbox *, void *, size_t,
443 			    mfii_direction_t, bool);
444 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 			    uint32_t, const union mfi_mbox *, void *, size_t,
446 			    mfii_direction_t, bool);
447 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448 
449 int			mfii_scsi_cmd_io(struct mfii_softc *,
450 			    struct mfii_ccb *, struct scsipi_xfer *);
451 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
452 			    struct mfii_ccb *, struct scsipi_xfer *);
453 void			mfii_scsi_cmd_tmo(void *);
454 
455 int			mfii_dev_handles_update(struct mfii_softc *sc);
456 void			mfii_dev_handles_dtor(void *, void *);
457 
458 void			mfii_abort_task(struct work *, void *);
459 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
460 			    uint16_t, uint16_t, uint8_t, uint32_t);
461 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
462 			    struct mfii_ccb *);
463 
464 int			mfii_aen_register(struct mfii_softc *);
465 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
466 			    struct mfii_dmamem *, uint32_t);
467 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
468 void			mfii_aen(struct work *, void *);
469 void			mfii_aen_unregister(struct mfii_softc *);
470 
471 void			mfii_aen_pd_insert(struct mfii_softc *,
472 			    const struct mfi_evtarg_pd_address *);
473 void			mfii_aen_pd_remove(struct mfii_softc *,
474 			    const struct mfi_evtarg_pd_address *);
475 void			mfii_aen_pd_state_change(struct mfii_softc *,
476 			    const struct mfi_evtarg_pd_state *);
477 void			mfii_aen_ld_update(struct mfii_softc *);
478 
479 #if NBIO > 0
480 int		mfii_ioctl(device_t, u_long, void *);
481 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
482 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
483 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
484 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
485 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
486 int		mfii_ioctl_setstate(struct mfii_softc *,
487 		    struct bioc_setstate *);
488 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
489 int		mfii_bio_getitall(struct mfii_softc *);
490 #endif /* NBIO > 0 */
491 
492 #if 0
493 static const char *mfi_bbu_indicators[] = {
494 	"pack missing",
495 	"voltage low",
496 	"temp high",
497 	"charge active",
498 	"discharge active",
499 	"learn cycle req'd",
500 	"learn cycle active",
501 	"learn cycle failed",
502 	"learn cycle timeout",
503 	"I2C errors",
504 	"replace pack",
505 	"low capacity",
506 	"periodic learn req'd"
507 };
508 #endif
509 
510 void		mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
511 void		mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
512 static void	mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
513 int		mfii_create_sensors(struct mfii_softc *);
514 static int	mfii_destroy_sensors(struct mfii_softc *);
515 void		mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
516 void		mfii_bbu(struct mfii_softc *, envsys_data_t *);
517 
518 /*
519  * mfii boards support asynchronous (and non-polled) completion of
520  * dcmds by proxying them through a passthru mpii command that points
521  * at a dcmd frame. since the passthru command is submitted like
522  * the scsi commands using an SMID in the request descriptor,
523  * ccb_request memory * must contain the passthru command because
524  * that is what the SMID refers to. this means ccb_request cannot
525  * contain the dcmd. rather than allocating separate dma memory to
526  * hold the dcmd, we reuse the sense memory buffer for it.
527  */
528 
529 void			mfii_dcmd_start(struct mfii_softc *,
530 			    struct mfii_ccb *);
531 
532 static inline void
533 mfii_dcmd_scrub(struct mfii_ccb *ccb)
534 {
535 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
536 }
537 
538 static inline struct mfi_dcmd_frame *
539 mfii_dcmd_frame(struct mfii_ccb *ccb)
540 {
541 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
542 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
543 }
544 
545 static inline void
546 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
547 {
548 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
549 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
550 }
551 
552 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
553 
554 const struct mfii_iop mfii_iop_thunderbolt = {
555 	MFII_BAR,
556 	MFII_IOP_NUM_SGE_LOC_ORIG,
557 	0,
558 	MFII_REQ_TYPE_LDIO,
559 	0,
560 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
561 	0
562 };
563 
564 /*
565  * a lot of these values depend on us not implementing fastpath yet.
566  */
567 const struct mfii_iop mfii_iop_25 = {
568 	MFII_BAR,
569 	MFII_IOP_NUM_SGE_LOC_ORIG,
570 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
571 	MFII_REQ_TYPE_NO_LOCK,
572 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
573 	MFII_SGE_CHAIN_ELEMENT,
574 	MFII_SGE_END_OF_LIST
575 };
576 
577 const struct mfii_iop mfii_iop_35 = {
578 	MFII_BAR_35,
579 	MFII_IOP_NUM_SGE_LOC_35,
580 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
581 	MFII_REQ_TYPE_NO_LOCK,
582 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
583 	MFII_SGE_CHAIN_ELEMENT,
584 	MFII_SGE_END_OF_LIST
585 };
586 
587 struct mfii_device {
588 	pcireg_t		mpd_vendor;
589 	pcireg_t		mpd_product;
590 	const struct mfii_iop	*mpd_iop;
591 };
592 
593 const struct mfii_device mfii_devices[] = {
594 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
595 	    &mfii_iop_thunderbolt },
596 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
597 	    &mfii_iop_25 },
598 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
599 	    &mfii_iop_25 },
600 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
601 	    &mfii_iop_35 },
602 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
603 	    &mfii_iop_35 },
604 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
605 	    &mfii_iop_35 },
606 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
607 	    &mfii_iop_35 },
608 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
609 	    &mfii_iop_35 },
610 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
611 	    &mfii_iop_35 }
612 };
613 
614 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
615 
616 const struct mfii_iop *
617 mfii_find_iop(struct pci_attach_args *pa)
618 {
619 	const struct mfii_device *mpd;
620 	int i;
621 
622 	for (i = 0; i < __arraycount(mfii_devices); i++) {
623 		mpd = &mfii_devices[i];
624 
625 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
626 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
627 			return (mpd->mpd_iop);
628 	}
629 
630 	return (NULL);
631 }
632 
633 int
634 mfii_match(device_t parent, cfdata_t match, void *aux)
635 {
636 	return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
637 }
638 
639 void
640 mfii_attach(device_t parent, device_t self, void *aux)
641 {
642 	struct mfii_softc *sc = device_private(self);
643 	struct pci_attach_args *pa = aux;
644 	pcireg_t memtype;
645 	pci_intr_handle_t ih;
646 	char intrbuf[PCI_INTRSTR_LEN];
647 	const char *intrstr;
648 	u_int32_t status, scpad2, scpad3;
649 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
650 	struct scsipi_adapter *adapt = &sc->sc_adapt;
651 	struct scsipi_channel *chan = &sc->sc_chan;
652 
653 	/* init sc */
654 	sc->sc_dev = self;
655 	sc->sc_iop = mfii_find_iop(aux);
656 	sc->sc_dmat = pa->pa_dmat;
657 	if (pci_dma64_available(pa)) {
658 		sc->sc_dmat64 = pa->pa_dmat64;
659 		sc->sc_64bit_dma = 1;
660 	} else {
661 		sc->sc_dmat64 = pa->pa_dmat;
662 		sc->sc_64bit_dma = 0;
663 	}
664 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
665 	mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
666 	mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
667 	mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
668 
669 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
670 
671 	sc->sc_aen_ccb = NULL;
672 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
673 	workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
674 	    PRI_BIO, IPL_BIO, WQ_MPSAFE);
675 
676 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
677 	workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
678 	    sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
679 
680 	mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
681 	SIMPLEQ_INIT(&sc->sc_abort_list);
682 
683 	/* wire up the bus shizz */
684 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
685 	memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
686 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
687 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
688 		aprint_error(": unable to map registers\n");
689 		return;
690 	}
691 
692 	/* disable interrupts */
693 	mfii_write(sc, MFI_OMSK, 0xffffffff);
694 
695 	if (pci_intr_map(pa, &ih) != 0) {
696 		aprint_error(": unable to map interrupt\n");
697 		goto pci_unmap;
698 	}
699 	intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
700 	pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
701 
702 	/* lets get started */
703 	if (mfii_transition_firmware(sc))
704 		goto pci_unmap;
705 	sc->sc_running = true;
706 
707 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
708 	scpad3 = mfii_read(sc, MFII_OSP3);
709 	status = mfii_fw_state(sc);
710 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
711 	if (sc->sc_max_fw_cmds == 0)
712 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
713 	/*
714 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
715 	 * exceed FW supplied max_fw_cmds.
716 	 */
717 	sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
718 
719 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
720 	scpad2 = mfii_read(sc, MFII_OSP2);
721 	chain_frame_sz =
722 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
723 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
724 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
725 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
726 
727 	nsge_in_io = (MFII_REQUEST_SIZE -
728 		sizeof(struct mpii_msg_scsi_io) -
729 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
730 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
731 
732 	/* round down to nearest power of two */
733 	sc->sc_max_sgl = 1;
734 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
735 		sc->sc_max_sgl <<= 1;
736 
737 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
738 	    DEVNAME(sc), status, scpad2, scpad3);
739 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
740 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
741 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
742 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
743 	    sc->sc_max_sgl);
744 
745 	/* sense memory */
746 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
747 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
748 	if (sc->sc_sense == NULL) {
749 		aprint_error(": unable to allocate sense memory\n");
750 		goto pci_unmap;
751 	}
752 
753 	/* reply post queue */
754 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
755 
756 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
757 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
758 	if (sc->sc_reply_postq == NULL)
759 		goto free_sense;
760 
761 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
762 	    MFII_DMA_LEN(sc->sc_reply_postq));
763 
764 	/* MPII request frame array */
765 	sc->sc_requests = mfii_dmamem_alloc(sc,
766 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
767 	if (sc->sc_requests == NULL)
768 		goto free_reply_postq;
769 
770 	/* MFI command frame array */
771 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
772 	if (sc->sc_mfi == NULL)
773 		goto free_requests;
774 
775 	/* MPII SGL array */
776 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
777 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
778 	if (sc->sc_sgl == NULL)
779 		goto free_mfi;
780 
781 	if (mfii_init_ccb(sc) != 0) {
782 		aprint_error(": could not init ccb list\n");
783 		goto free_sgl;
784 	}
785 
786 	/* kickstart firmware with all addresses and pointers */
787 	if (mfii_initialise_firmware(sc) != 0) {
788 		aprint_error(": could not initialize firmware\n");
789 		goto free_sgl;
790 	}
791 
792 	mutex_enter(&sc->sc_lock);
793 	if (mfii_get_info(sc) != 0) {
794 		mutex_exit(&sc->sc_lock);
795 		aprint_error(": could not retrieve controller information\n");
796 		goto free_sgl;
797 	}
798 	mutex_exit(&sc->sc_lock);
799 
800 	aprint_normal(": \"%s\", firmware %s",
801 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
802 	if (le16toh(sc->sc_info.mci_memory_size) > 0) {
803 		aprint_normal(", %uMB cache",
804 		    le16toh(sc->sc_info.mci_memory_size));
805 	}
806 	aprint_normal("\n");
807 	aprint_naive("\n");
808 
809 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
810 	    mfii_intr, sc, DEVNAME(sc));
811 	if (sc->sc_ih == NULL) {
812 		aprint_error_dev(self, "can't establish interrupt");
813 		if (intrstr)
814 			aprint_error(" at %s", intrstr);
815 		aprint_error("\n");
816 		goto free_sgl;
817 	}
818 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
819 
820 	for (i = 0; i < sc->sc_info.mci_lds_present; i++)
821 		sc->sc_ld[i].ld_present = 1;
822 
823 	memset(adapt, 0, sizeof(*adapt));
824 	adapt->adapt_dev = sc->sc_dev;
825 	adapt->adapt_nchannels = 1;
826 	/* keep a few commands for management */
827 	if (sc->sc_max_cmds > 4)
828 		adapt->adapt_openings = sc->sc_max_cmds - 4;
829 	else
830 		adapt->adapt_openings = sc->sc_max_cmds;
831 	adapt->adapt_max_periph = adapt->adapt_openings;
832 	adapt->adapt_request = mfii_scsipi_request;
833 	adapt->adapt_minphys = minphys;
834 	adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
835 
836 	memset(chan, 0, sizeof(*chan));
837 	chan->chan_adapter = adapt;
838 	chan->chan_bustype = &scsi_sas_bustype;
839 	chan->chan_channel = 0;
840 	chan->chan_flags = 0;
841 	chan->chan_nluns = 8;
842 	chan->chan_ntargets = sc->sc_info.mci_max_lds;
843 	chan->chan_id = sc->sc_info.mci_max_lds;
844 
845 	mfii_rescan(sc->sc_dev, "scsi", NULL);
846 
847 	if (mfii_aen_register(sc) != 0) {
848 		/* error printed by mfii_aen_register */
849 		goto intr_disestablish;
850 	}
851 
852 	mutex_enter(&sc->sc_lock);
853 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
854 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
855 		mutex_exit(&sc->sc_lock);
856 		aprint_error_dev(self,
857 		    "getting list of logical disks failed\n");
858 		goto intr_disestablish;
859 	}
860 	mutex_exit(&sc->sc_lock);
861 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
862 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
863 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
864 		sc->sc_target_lds[target] = i;
865 	}
866 
867 	/* enable interrupts */
868 	mfii_write(sc, MFI_OSTS, 0xffffffff);
869 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
870 
871 #if NBIO > 0
872 	if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
873 		panic("%s: controller registration failed", DEVNAME(sc));
874 #endif /* NBIO > 0 */
875 
876 	if (mfii_create_sensors(sc) != 0)
877 		aprint_error_dev(self, "unable to create sensors\n");
878 
879 	if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
880 	    mfii_shutdown))
881 		aprint_error_dev(self, "couldn't establish power handler\n");
882 	return;
883 intr_disestablish:
884 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
885 free_sgl:
886 	mfii_dmamem_free(sc, sc->sc_sgl);
887 free_mfi:
888 	mfii_dmamem_free(sc, sc->sc_mfi);
889 free_requests:
890 	mfii_dmamem_free(sc, sc->sc_requests);
891 free_reply_postq:
892 	mfii_dmamem_free(sc, sc->sc_reply_postq);
893 free_sense:
894 	mfii_dmamem_free(sc, sc->sc_sense);
895 pci_unmap:
896 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
897 }
898 
899 #if 0
900 struct srp_gc mfii_dev_handles_gc =
901     SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
902 
903 static inline uint16_t
904 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
905 {
906 	struct srp_ref sr;
907 	uint16_t *map, handle;
908 
909 	map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
910 	handle = map[target];
911 	srp_leave(&sr);
912 
913 	return (handle);
914 }
915 
916 int
917 mfii_dev_handles_update(struct mfii_softc *sc)
918 {
919 	struct mfii_ld_map *lm;
920 	uint16_t *dev_handles = NULL;
921 	int i;
922 	int rv = 0;
923 
924 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
925 
926 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
927 	    MFII_DATA_IN, false);
928 
929 	if (rv != 0) {
930 		rv = EIO;
931 		goto free_lm;
932 	}
933 
934 	dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
935 	    M_DEVBUF, M_WAITOK);
936 
937 	for (i = 0; i < MFI_MAX_PD; i++)
938 		dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
939 
940 	/* commit the updated info */
941 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
942 	srp_update_locked(&mfii_dev_handles_gc,
943 	    &sc->sc_pd->pd_dev_handles, dev_handles);
944 
945 free_lm:
946 	free(lm, M_TEMP, sizeof(*lm));
947 
948 	return (rv);
949 }
950 
951 void
952 mfii_dev_handles_dtor(void *null, void *v)
953 {
954 	uint16_t *dev_handles = v;
955 
956 	free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
957 }
958 #endif /* 0 */
959 
960 int
961 mfii_detach(device_t self, int flags)
962 {
963 	struct mfii_softc *sc = device_private(self);
964 	int error;
965 
966 	if (sc->sc_ih == NULL)
967 		return (0);
968 
969 	if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
970 		return error;
971 
972 	mfii_destroy_sensors(sc);
973 #if NBIO > 0
974 	bio_unregister(sc->sc_dev);
975 #endif
976 	mfii_shutdown(sc->sc_dev, 0);
977 	mfii_write(sc, MFI_OMSK, 0xffffffff);
978 
979 	mfii_aen_unregister(sc);
980 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
981 	mfii_dmamem_free(sc, sc->sc_sgl);
982 	mfii_dmamem_free(sc, sc->sc_mfi);
983 	mfii_dmamem_free(sc, sc->sc_requests);
984 	mfii_dmamem_free(sc, sc->sc_reply_postq);
985 	mfii_dmamem_free(sc, sc->sc_sense);
986 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
987 
988 	return (0);
989 }
990 
991 int
992 mfii_rescan(device_t self, const char *ifattr, const int *locators)
993 {
994 	struct mfii_softc *sc = device_private(self);
995 	if (sc->sc_child != NULL)
996 		return 0;
997 
998 	sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
999 	    scsiprint, NULL);
1000 	return 0;
1001 }
1002 
1003 void
1004 mfii_childdetached(device_t self, device_t child)
1005 {
1006 	struct mfii_softc *sc = device_private(self);
1007 
1008 	KASSERT(self == sc->sc_dev);
1009 	KASSERT(child == sc->sc_child);
1010 
1011 	if (child == sc->sc_child)
1012 		sc->sc_child = NULL;
1013 }
1014 
1015 static bool
1016 mfii_suspend(device_t dev, const pmf_qual_t *q)
1017 {
1018 	/* XXX to be implemented */
1019 	return false;
1020 }
1021 
1022 static bool
1023 mfii_resume(device_t dev, const pmf_qual_t *q)
1024 {
1025 	/* XXX to be implemented */
1026 	return false;
1027 }
1028 
1029 static bool
1030 mfii_shutdown(device_t dev, int how)
1031 {
1032 	struct mfii_softc	*sc = device_private(dev);
1033 	struct mfii_ccb *ccb;
1034 	union mfi_mbox		mbox;
1035 	bool rv = true;;
1036 
1037 	memset(&mbox, 0, sizeof(mbox));
1038 
1039 	mutex_enter(&sc->sc_lock);
1040 	DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1041 	ccb = mfii_get_ccb(sc);
1042 	if (ccb == NULL)
1043 		return false;
1044 	mutex_enter(&sc->sc_ccb_mtx);
1045 	if (sc->sc_running) {
1046 		sc->sc_running = 0; /* prevent new commands */
1047 		mutex_exit(&sc->sc_ccb_mtx);
1048 #if 0 /* XXX why does this hang ? */
1049 		mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1050 		mfii_scrub_ccb(ccb);
1051 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1052 		    NULL, 0, MFII_DATA_NONE, true)) {
1053 			aprint_error_dev(dev, "shutdown: cache flush failed\n");
1054 			rv = false;
1055 			goto fail;
1056 		}
1057 		printf("ok1\n");
1058 #endif
1059 		mbox.b[0] = 0;
1060 		mfii_scrub_ccb(ccb);
1061 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1062 		    NULL, 0, MFII_DATA_NONE, true)) {
1063 			aprint_error_dev(dev, "shutdown: "
1064 			    "firmware shutdown failed\n");
1065 		    	rv = false;
1066 			goto fail;
1067 		}
1068 	} else {
1069 		mutex_exit(&sc->sc_ccb_mtx);
1070 	}
1071 fail:
1072 	mfii_put_ccb(sc, ccb);
1073 	mutex_exit(&sc->sc_lock);
1074 	return rv;
1075 }
1076 
1077 static u_int32_t
1078 mfii_read(struct mfii_softc *sc, bus_size_t r)
1079 {
1080 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1081 	    BUS_SPACE_BARRIER_READ);
1082 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1083 }
1084 
1085 static void
1086 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1087 {
1088 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1089 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1090 	    BUS_SPACE_BARRIER_WRITE);
1091 }
1092 
1093 struct mfii_dmamem *
1094 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1095 {
1096 	struct mfii_dmamem *m;
1097 	int nsegs;
1098 
1099 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1100 	if (m == NULL)
1101 		return (NULL);
1102 
1103 	m->mdm_size = size;
1104 
1105 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1106 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1107 		goto mdmfree;
1108 
1109 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1110 	    &nsegs, BUS_DMA_NOWAIT) != 0)
1111 		goto destroy;
1112 
1113 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1114 	    BUS_DMA_NOWAIT) != 0)
1115 		goto free;
1116 
1117 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1118 	    BUS_DMA_NOWAIT) != 0)
1119 		goto unmap;
1120 
1121 	memset(m->mdm_kva, 0, size);
1122 	return (m);
1123 
1124 unmap:
1125 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1126 free:
1127 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1128 destroy:
1129 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1130 mdmfree:
1131 	free(m, M_DEVBUF);
1132 
1133 	return (NULL);
1134 }
1135 
1136 void
1137 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1138 {
1139 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1140 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1141 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1142 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1143 	free(m, M_DEVBUF);
1144 }
1145 
1146 void
1147 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1148 {
1149 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1150 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1151 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1152 
1153 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1154 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1155 	io->chain_offset = io->sgl_offset0 / 4;
1156 
1157 	sge->sg_addr = htole64(ccb->ccb_sense_dva);
1158 	sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1159 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1160 
1161 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1162 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1163 
1164 	mfii_start(sc, ccb);
1165 }
1166 
1167 int
1168 mfii_aen_register(struct mfii_softc *sc)
1169 {
1170 	struct mfi_evt_log_info mel;
1171 	struct mfii_ccb *ccb;
1172 	struct mfii_dmamem *mdm;
1173 	int rv;
1174 
1175 	ccb = mfii_get_ccb(sc);
1176 	if (ccb == NULL) {
1177 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1178 		return (ENOMEM);
1179 	}
1180 
1181 	memset(&mel, 0, sizeof(mel));
1182 	mfii_scrub_ccb(ccb);
1183 
1184 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1185 	    &mel, sizeof(mel), MFII_DATA_IN, true);
1186 	if (rv != 0) {
1187 		mfii_put_ccb(sc, ccb);
1188 		aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1189 		return (EIO);
1190 	}
1191 
1192 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1193 	if (mdm == NULL) {
1194 		mfii_put_ccb(sc, ccb);
1195 		aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1196 		return (ENOMEM);
1197 	}
1198 
1199 	/* replay all the events from boot */
1200 	mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1201 
1202 	return (0);
1203 }
1204 
1205 void
1206 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1207     struct mfii_dmamem *mdm, uint32_t seq)
1208 {
1209 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1210 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1211 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1212 	union mfi_evt_class_locale mec;
1213 
1214 	mfii_scrub_ccb(ccb);
1215 	mfii_dcmd_scrub(ccb);
1216 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1217 
1218 	ccb->ccb_cookie = mdm;
1219 	ccb->ccb_done = mfii_aen_done;
1220 	sc->sc_aen_ccb = ccb;
1221 
1222 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1223 	mec.mec_members.reserved = 0;
1224 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1225 
1226 	hdr->mfh_cmd = MFI_CMD_DCMD;
1227 	hdr->mfh_sg_count = 1;
1228 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1229 	hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1230 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1231 	dcmd->mdf_mbox.w[0] = htole32(seq);
1232 	dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1233 	sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1234 	sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1235 
1236 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1237 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1238 
1239 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1240 	mfii_dcmd_start(sc, ccb);
1241 }
1242 
1243 void
1244 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1245 {
1246 	KASSERT(sc->sc_aen_ccb == ccb);
1247 
1248 	/*
1249 	 * defer to a thread with KERNEL_LOCK so we can run autoconf
1250 	 * We shouldn't have more than one AEN command pending at a time,
1251 	 * so no need to lock
1252 	 */
1253 	if (sc->sc_running)
1254 		workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1255 }
1256 
1257 void
1258 mfii_aen(struct work *wk, void *arg)
1259 {
1260 	struct mfii_softc *sc = arg;
1261 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1262 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1263 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1264 
1265 	mfii_dcmd_sync(sc, ccb,
1266 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1267 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1268 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1269 
1270 	DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1271 	    le32toh(med->med_seq_num), le32toh(med->med_code),
1272 	    med->med_arg_type, med->med_description);
1273 
1274 	switch (le32toh(med->med_code)) {
1275 	case MR_EVT_PD_INSERTED_EXT:
1276 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1277 			break;
1278 
1279 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1280 		break;
1281  	case MR_EVT_PD_REMOVED_EXT:
1282 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1283 			break;
1284 
1285 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1286 		break;
1287 
1288 	case MR_EVT_PD_STATE_CHANGE:
1289 		if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1290 			break;
1291 
1292 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1293 		break;
1294 
1295 	case MR_EVT_LD_CREATED:
1296 	case MR_EVT_LD_DELETED:
1297 		mfii_aen_ld_update(sc);
1298 		break;
1299 
1300 	default:
1301 		break;
1302 	}
1303 
1304 	mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1305 }
1306 
1307 void
1308 mfii_aen_pd_insert(struct mfii_softc *sc,
1309     const struct mfi_evtarg_pd_address *pd)
1310 {
1311 	printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1312 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1313 }
1314 
1315 void
1316 mfii_aen_pd_remove(struct mfii_softc *sc,
1317     const struct mfi_evtarg_pd_address *pd)
1318 {
1319 	printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1320 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1321 }
1322 
1323 void
1324 mfii_aen_pd_state_change(struct mfii_softc *sc,
1325     const struct mfi_evtarg_pd_state *state)
1326 {
1327 	return;
1328 }
1329 
1330 void
1331 mfii_aen_ld_update(struct mfii_softc *sc)
1332 {
1333 	int i, target, old, nld;
1334 	int newlds[MFI_MAX_LD];
1335 
1336 	mutex_enter(&sc->sc_lock);
1337 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1338 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1339 		mutex_exit(&sc->sc_lock);
1340 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1341 		    DEVNAME(sc));
1342 		return;
1343 	}
1344 	mutex_exit(&sc->sc_lock);
1345 
1346 	memset(newlds, -1, sizeof(newlds));
1347 
1348 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1349 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1350 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1351 		    DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1352 		newlds[target] = i;
1353 	}
1354 
1355 	for (i = 0; i < MFI_MAX_LD; i++) {
1356 		old = sc->sc_target_lds[i];
1357 		nld = newlds[i];
1358 
1359 		if (old == -1 && nld != -1) {
1360 			printf("%s: logical drive %d added (target %d)\n",
1361 			    DEVNAME(sc), i, nld);
1362 
1363 			// XXX scsi_probe_target(sc->sc_scsibus, i);
1364 
1365 			mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1366 			mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1367 		} else if (nld == -1 && old != -1) {
1368 			printf("%s: logical drive %d removed (target %d)\n",
1369 			    DEVNAME(sc), i, old);
1370 
1371 			scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1372 			sysmon_envsys_sensor_detach(sc->sc_sme,
1373 			    &sc->sc_sensors[i]);
1374 		}
1375 	}
1376 
1377 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1378 }
1379 
1380 void
1381 mfii_aen_unregister(struct mfii_softc *sc)
1382 {
1383 	/* XXX */
1384 }
1385 
1386 int
1387 mfii_transition_firmware(struct mfii_softc *sc)
1388 {
1389 	int32_t			fw_state, cur_state;
1390 	int			max_wait, i;
1391 
1392 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1393 
1394 	while (fw_state != MFI_STATE_READY) {
1395 		cur_state = fw_state;
1396 		switch (fw_state) {
1397 		case MFI_STATE_FAULT:
1398 			printf("%s: firmware fault\n", DEVNAME(sc));
1399 			return (1);
1400 		case MFI_STATE_WAIT_HANDSHAKE:
1401 			mfii_write(sc, MFI_SKINNY_IDB,
1402 			    MFI_INIT_CLEAR_HANDSHAKE);
1403 			max_wait = 2;
1404 			break;
1405 		case MFI_STATE_OPERATIONAL:
1406 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1407 			max_wait = 10;
1408 			break;
1409 		case MFI_STATE_UNDEFINED:
1410 		case MFI_STATE_BB_INIT:
1411 			max_wait = 2;
1412 			break;
1413 		case MFI_STATE_FW_INIT:
1414 		case MFI_STATE_DEVICE_SCAN:
1415 		case MFI_STATE_FLUSH_CACHE:
1416 			max_wait = 20;
1417 			break;
1418 		default:
1419 			printf("%s: unknown firmware state %d\n",
1420 			    DEVNAME(sc), fw_state);
1421 			return (1);
1422 		}
1423 		for (i = 0; i < (max_wait * 10); i++) {
1424 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1425 			if (fw_state == cur_state)
1426 				DELAY(100000);
1427 			else
1428 				break;
1429 		}
1430 		if (fw_state == cur_state) {
1431 			printf("%s: firmware stuck in state %#x\n",
1432 			    DEVNAME(sc), fw_state);
1433 			return (1);
1434 		}
1435 	}
1436 
1437 	return (0);
1438 }
1439 
1440 int
1441 mfii_get_info(struct mfii_softc *sc)
1442 {
1443 	int i, rv;
1444 
1445 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1446 	    sizeof(sc->sc_info), MFII_DATA_IN, true);
1447 
1448 	if (rv != 0)
1449 		return (rv);
1450 
1451 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1452 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1453 		    DEVNAME(sc),
1454 		    sc->sc_info.mci_image_component[i].mic_name,
1455 		    sc->sc_info.mci_image_component[i].mic_version,
1456 		    sc->sc_info.mci_image_component[i].mic_build_date,
1457 		    sc->sc_info.mci_image_component[i].mic_build_time);
1458 	}
1459 
1460 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1461 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1462 		    DEVNAME(sc),
1463 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1464 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1465 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1466 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1467 	}
1468 
1469 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1470 	    DEVNAME(sc),
1471 	    sc->sc_info.mci_max_arms,
1472 	    sc->sc_info.mci_max_spans,
1473 	    sc->sc_info.mci_max_arrays,
1474 	    sc->sc_info.mci_max_lds,
1475 	    sc->sc_info.mci_product_name);
1476 
1477 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1478 	    DEVNAME(sc),
1479 	    sc->sc_info.mci_serial_number,
1480 	    sc->sc_info.mci_hw_present,
1481 	    sc->sc_info.mci_current_fw_time,
1482 	    sc->sc_info.mci_max_cmds,
1483 	    sc->sc_info.mci_max_sg_elements);
1484 
1485 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1486 	    DEVNAME(sc),
1487 	    sc->sc_info.mci_max_request_size,
1488 	    sc->sc_info.mci_lds_present,
1489 	    sc->sc_info.mci_lds_degraded,
1490 	    sc->sc_info.mci_lds_offline,
1491 	    sc->sc_info.mci_pd_present);
1492 
1493 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1494 	    DEVNAME(sc),
1495 	    sc->sc_info.mci_pd_disks_present,
1496 	    sc->sc_info.mci_pd_disks_pred_failure,
1497 	    sc->sc_info.mci_pd_disks_failed);
1498 
1499 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1500 	    DEVNAME(sc),
1501 	    sc->sc_info.mci_nvram_size,
1502 	    sc->sc_info.mci_memory_size,
1503 	    sc->sc_info.mci_flash_size);
1504 
1505 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1506 	    DEVNAME(sc),
1507 	    sc->sc_info.mci_ram_correctable_errors,
1508 	    sc->sc_info.mci_ram_uncorrectable_errors,
1509 	    sc->sc_info.mci_cluster_allowed,
1510 	    sc->sc_info.mci_cluster_active);
1511 
1512 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1513 	    DEVNAME(sc),
1514 	    sc->sc_info.mci_max_strips_per_io,
1515 	    sc->sc_info.mci_raid_levels,
1516 	    sc->sc_info.mci_adapter_ops,
1517 	    sc->sc_info.mci_ld_ops);
1518 
1519 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1520 	    DEVNAME(sc),
1521 	    sc->sc_info.mci_stripe_sz_ops.min,
1522 	    sc->sc_info.mci_stripe_sz_ops.max,
1523 	    sc->sc_info.mci_pd_ops,
1524 	    sc->sc_info.mci_pd_mix_support);
1525 
1526 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1527 	    DEVNAME(sc),
1528 	    sc->sc_info.mci_ecc_bucket_count,
1529 	    sc->sc_info.mci_package_version);
1530 
1531 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1532 	    DEVNAME(sc),
1533 	    sc->sc_info.mci_properties.mcp_seq_num,
1534 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1535 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1536 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1537 
1538 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1539 	    DEVNAME(sc),
1540 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1541 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1542 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1543 	    sc->sc_info.mci_properties.mcp_cc_rate);
1544 
1545 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1546 	    DEVNAME(sc),
1547 	    sc->sc_info.mci_properties.mcp_recon_rate,
1548 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1549 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1550 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1551 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1552 
1553 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1554 	    DEVNAME(sc),
1555 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1556 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1557 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1558 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1559 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1560 
1561 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1562 	    DEVNAME(sc),
1563 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1564 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1565 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1566 
1567 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1568 	    DEVNAME(sc),
1569 	    sc->sc_info.mci_pci.mip_vendor,
1570 	    sc->sc_info.mci_pci.mip_device,
1571 	    sc->sc_info.mci_pci.mip_subvendor,
1572 	    sc->sc_info.mci_pci.mip_subdevice);
1573 
1574 	DPRINTF("%s: type %#x port_count %d port_addr ",
1575 	    DEVNAME(sc),
1576 	    sc->sc_info.mci_host.mih_type,
1577 	    sc->sc_info.mci_host.mih_port_count);
1578 
1579 	for (i = 0; i < 8; i++)
1580 		DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1581 	DPRINTF("\n");
1582 
1583 	DPRINTF("%s: type %.x port_count %d port_addr ",
1584 	    DEVNAME(sc),
1585 	    sc->sc_info.mci_device.mid_type,
1586 	    sc->sc_info.mci_device.mid_port_count);
1587 
1588 	for (i = 0; i < 8; i++)
1589 		DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1590 	DPRINTF("\n");
1591 
1592 	return (0);
1593 }
1594 
1595 int
1596 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1597 {
1598 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1599 	u_int64_t r;
1600 	int to = 0, rv = 0;
1601 
1602 #ifdef DIAGNOSTIC
1603 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1604 		panic("mfii_mfa_poll called with cookie or done set");
1605 #endif
1606 
1607 	hdr->mfh_context = ccb->ccb_smid;
1608 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1609 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1610 
1611 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1612 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1613 
1614 	mfii_start(sc, ccb);
1615 
1616 	for (;;) {
1617 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1618 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1619 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1620 
1621 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1622 			break;
1623 
1624 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1625 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1626 			    ccb->ccb_smid);
1627 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1628 			rv = 1;
1629 			break;
1630 		}
1631 
1632 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1633 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1634 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1635 
1636 		delay(1000);
1637 	}
1638 
1639 	if (ccb->ccb_len > 0) {
1640 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1641 		    0, ccb->ccb_dmamap32->dm_mapsize,
1642 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1643 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1644 
1645 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1646 	}
1647 
1648 	return (rv);
1649 }
1650 
1651 int
1652 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1653 {
1654 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1655 	void *cookie;
1656 	int rv = 1;
1657 
1658 	done = ccb->ccb_done;
1659 	cookie = ccb->ccb_cookie;
1660 
1661 	ccb->ccb_done = mfii_poll_done;
1662 	ccb->ccb_cookie = &rv;
1663 
1664 	mfii_start(sc, ccb);
1665 
1666 	do {
1667 		delay(10);
1668 		mfii_postq(sc);
1669 	} while (rv == 1);
1670 
1671 	ccb->ccb_cookie = cookie;
1672 	done(sc, ccb);
1673 
1674 	return (0);
1675 }
1676 
1677 void
1678 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1679 {
1680 	int *rv = ccb->ccb_cookie;
1681 
1682 	*rv = 0;
1683 }
1684 
1685 int
1686 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1687 {
1688 #ifdef DIAGNOSTIC
1689 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1690 		panic("mfii_exec called with cookie or done set");
1691 #endif
1692 
1693 	ccb->ccb_cookie = ccb;
1694 	ccb->ccb_done = mfii_exec_done;
1695 
1696 	mfii_start(sc, ccb);
1697 
1698 	mutex_enter(&ccb->ccb_mtx);
1699 	while (ccb->ccb_cookie != NULL)
1700 		cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1701 	mutex_exit(&ccb->ccb_mtx);
1702 
1703 	return (0);
1704 }
1705 
1706 void
1707 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1708 {
1709 	mutex_enter(&ccb->ccb_mtx);
1710 	ccb->ccb_cookie = NULL;
1711 	cv_signal(&ccb->ccb_cv);
1712 	mutex_exit(&ccb->ccb_mtx);
1713 }
1714 
1715 int
1716 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1717     void *buf, size_t len, mfii_direction_t dir, bool poll)
1718 {
1719 	struct mfii_ccb *ccb;
1720 	int rv;
1721 
1722 	KASSERT(mutex_owned(&sc->sc_lock));
1723 	if (!sc->sc_running)
1724 		return EAGAIN;
1725 
1726 	ccb = mfii_get_ccb(sc);
1727 	if (ccb == NULL)
1728 		return (ENOMEM);
1729 
1730 	mfii_scrub_ccb(ccb);
1731 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1732 	mfii_put_ccb(sc, ccb);
1733 
1734 	return (rv);
1735 }
1736 
1737 int
1738 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1739     const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1740     bool poll)
1741 {
1742 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1743 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1744 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1745 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1746 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1747 	int rv = EIO;
1748 
1749 	if (cold)
1750 		poll = true;
1751 
1752 	ccb->ccb_data = buf;
1753 	ccb->ccb_len = len;
1754 	ccb->ccb_direction = dir;
1755 	switch (dir) {
1756 	case MFII_DATA_IN:
1757 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1758 		break;
1759 	case MFII_DATA_OUT:
1760 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1761 		break;
1762 	case MFII_DATA_NONE:
1763 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1764 		break;
1765 	}
1766 
1767 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1768 		rv = ENOMEM;
1769 		goto done;
1770 	}
1771 
1772 	hdr->mfh_cmd = MFI_CMD_DCMD;
1773 	hdr->mfh_context = ccb->ccb_smid;
1774 	hdr->mfh_data_len = htole32(len);
1775 	hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1776 	KASSERT(!ccb->ccb_dma64);
1777 
1778 	dcmd->mdf_opcode = opc;
1779 	/* handle special opcodes */
1780 	if (mbox != NULL)
1781 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1782 
1783 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1784 	io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1785 	io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1786 
1787 	sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1788 	sge->sg_len = htole32(MFI_FRAME_SIZE);
1789 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1790 
1791 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1792 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1793 
1794 	if (poll) {
1795 		ccb->ccb_done = mfii_empty_done;
1796 		mfii_poll(sc, ccb);
1797 	} else
1798 		mfii_exec(sc, ccb);
1799 
1800 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1801 		rv = 0;
1802 	}
1803 
1804 done:
1805 	return (rv);
1806 }
1807 
1808 void
1809 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1810 {
1811 	return;
1812 }
1813 
1814 int
1815 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1816     void *sglp, int nosleep)
1817 {
1818 	union mfi_sgl *sgl = sglp;
1819 	bus_dmamap_t dmap = ccb->ccb_dmamap32;
1820 	int error;
1821 	int i;
1822 
1823 	KASSERT(!ccb->ccb_dma64);
1824 	if (ccb->ccb_len == 0)
1825 		return (0);
1826 
1827 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1828 	    ccb->ccb_data, ccb->ccb_len, NULL,
1829 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1830 	if (error) {
1831 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1832 		return (1);
1833 	}
1834 
1835 	for (i = 0; i < dmap->dm_nsegs; i++) {
1836 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1837 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1838 	}
1839 
1840 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1841 	    ccb->ccb_direction == MFII_DATA_OUT ?
1842 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1843 
1844 	return (0);
1845 }
1846 
1847 void
1848 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1849 {
1850 	u_long *r = (u_long *)&ccb->ccb_req;
1851 
1852 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1853 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1854 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1855 
1856 #if defined(__LP64__) && 0
1857 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1858 #else
1859 	mutex_enter(&sc->sc_post_mtx);
1860 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1861 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1862 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1863 
1864 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1865 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1866 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1867 	mutex_exit(&sc->sc_post_mtx);
1868 #endif
1869 }
1870 
1871 void
1872 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1873 {
1874 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1875 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1876 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1877 
1878 	if (ccb->ccb_sgl_len > 0) {
1879 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1880 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1881 		    BUS_DMASYNC_POSTWRITE);
1882 	}
1883 
1884 	if (ccb->ccb_dma64) {
1885 		KASSERT(ccb->ccb_len > 0);
1886 		bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1887 		    0, ccb->ccb_dmamap64->dm_mapsize,
1888 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1889 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1890 
1891 		bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1892 	} else if (ccb->ccb_len > 0) {
1893 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1894 		    0, ccb->ccb_dmamap32->dm_mapsize,
1895 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1896 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1897 
1898 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1899 	}
1900 
1901 	ccb->ccb_done(sc, ccb);
1902 }
1903 
1904 int
1905 mfii_initialise_firmware(struct mfii_softc *sc)
1906 {
1907 	struct mpii_msg_iocinit_request *iiq;
1908 	struct mfii_dmamem *m;
1909 	struct mfii_ccb *ccb;
1910 	struct mfi_init_frame *init;
1911 	int rv;
1912 
1913 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1914 	if (m == NULL)
1915 		return (1);
1916 
1917 	iiq = MFII_DMA_KVA(m);
1918 	memset(iiq, 0, sizeof(*iiq));
1919 
1920 	iiq->function = MPII_FUNCTION_IOC_INIT;
1921 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1922 
1923 	iiq->msg_version_maj = 0x02;
1924 	iiq->msg_version_min = 0x00;
1925 	iiq->hdr_version_unit = 0x10;
1926 	iiq->hdr_version_dev = 0x0;
1927 
1928 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1929 
1930 	iiq->reply_descriptor_post_queue_depth =
1931 	    htole16(sc->sc_reply_postq_depth);
1932 	iiq->reply_free_queue_depth = htole16(0);
1933 
1934 	iiq->sense_buffer_address_high = htole32(
1935 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1936 
1937 	iiq->reply_descriptor_post_queue_address_lo =
1938 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1939 	iiq->reply_descriptor_post_queue_address_hi =
1940 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1941 
1942 	iiq->system_request_frame_base_address_lo =
1943 	    htole32(MFII_DMA_DVA(sc->sc_requests));
1944 	iiq->system_request_frame_base_address_hi =
1945 	    htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1946 
1947 	iiq->timestamp = htole64(time_uptime);
1948 
1949 	ccb = mfii_get_ccb(sc);
1950 	if (ccb == NULL) {
1951 		/* shouldn't ever run out of ccbs during attach */
1952 		return (1);
1953 	}
1954 	mfii_scrub_ccb(ccb);
1955 	init = ccb->ccb_request;
1956 
1957 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1958 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1959 	init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1960 	init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1961 
1962 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1963 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1964 	    BUS_DMASYNC_PREREAD);
1965 
1966 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1967 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1968 
1969 	rv = mfii_mfa_poll(sc, ccb);
1970 
1971 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1972 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1973 
1974 	mfii_put_ccb(sc, ccb);
1975 	mfii_dmamem_free(sc, m);
1976 
1977 	return (rv);
1978 }
1979 
1980 int
1981 mfii_my_intr(struct mfii_softc *sc)
1982 {
1983 	u_int32_t status;
1984 
1985 	status = mfii_read(sc, MFI_OSTS);
1986 
1987 	DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1988 	if (ISSET(status, 0x1)) {
1989 		mfii_write(sc, MFI_OSTS, status);
1990 		return (1);
1991 	}
1992 
1993 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1994 }
1995 
1996 int
1997 mfii_intr(void *arg)
1998 {
1999 	struct mfii_softc *sc = arg;
2000 
2001 	if (!mfii_my_intr(sc))
2002 		return (0);
2003 
2004 	mfii_postq(sc);
2005 
2006 	return (1);
2007 }
2008 
2009 void
2010 mfii_postq(struct mfii_softc *sc)
2011 {
2012 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2013 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2014 	struct mpii_reply_descr *rdp;
2015 	struct mfii_ccb *ccb;
2016 	int rpi = 0;
2017 
2018 	mutex_enter(&sc->sc_reply_postq_mtx);
2019 
2020 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2021 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2022 	    BUS_DMASYNC_POSTREAD);
2023 
2024 	for (;;) {
2025 		rdp = &postq[sc->sc_reply_postq_index];
2026 		DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2027 		    DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2028 			rdp->data == 0xffffffff);
2029 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2030 		    MPII_REPLY_DESCR_UNUSED)
2031 			break;
2032 		if (rdp->data == 0xffffffff) {
2033 			/*
2034 			 * ioc is still writing to the reply post queue
2035 			 * race condition - bail!
2036 			 */
2037 			break;
2038 		}
2039 
2040 		ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2041 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2042 		memset(rdp, 0xff, sizeof(*rdp));
2043 
2044 		sc->sc_reply_postq_index++;
2045 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2046 		rpi = 1;
2047 	}
2048 
2049 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2050 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2051 	    BUS_DMASYNC_PREREAD);
2052 
2053 	if (rpi)
2054 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2055 
2056 	mutex_exit(&sc->sc_reply_postq_mtx);
2057 
2058 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2059 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2060 		mfii_done(sc, ccb);
2061 	}
2062 }
2063 
2064 void
2065 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2066     void *arg)
2067 {
2068 	struct scsipi_periph    *periph;
2069 	struct scsipi_xfer	*xs;
2070 	struct scsipi_adapter   *adapt = chan->chan_adapter;
2071 	struct mfii_softc	*sc = device_private(adapt->adapt_dev);
2072 	struct mfii_ccb *ccb;
2073 	int timeout;
2074 	int target;
2075 
2076 	switch(req) {
2077 		case ADAPTER_REQ_GROW_RESOURCES:
2078 		/* Not supported. */
2079 		return;
2080 	case ADAPTER_REQ_SET_XFER_MODE:
2081 	{
2082 		struct scsipi_xfer_mode *xm = arg;
2083 		xm->xm_mode = PERIPH_CAP_TQING;
2084 		xm->xm_period = 0;
2085 		xm->xm_offset = 0;
2086 		scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2087 		return;
2088 	}
2089 	case ADAPTER_REQ_RUN_XFER:
2090 		break;
2091 	}
2092 
2093 	xs = arg;
2094 	periph = xs->xs_periph;
2095 	target = periph->periph_target;
2096 
2097 	if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2098 	    periph->periph_lun != 0) {
2099 		xs->error = XS_SELTIMEOUT;
2100 		scsipi_done(xs);
2101 		return;
2102 	}
2103 
2104 	if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2105 	    xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2106 		/* the cache is stable storage, don't flush */
2107 		xs->error = XS_NOERROR;
2108 		xs->status = SCSI_OK;
2109 		xs->resid = 0;
2110 		scsipi_done(xs);
2111 		return;
2112 	}
2113 
2114 	ccb = mfii_get_ccb(sc);
2115 	if (ccb == NULL) {
2116 		xs->error = XS_RESOURCE_SHORTAGE;
2117 		scsipi_done(xs);
2118 		return;
2119 	}
2120 	mfii_scrub_ccb(ccb);
2121 	ccb->ccb_cookie = xs;
2122 	ccb->ccb_done = mfii_scsi_cmd_done;
2123 	ccb->ccb_data = xs->data;
2124 	ccb->ccb_len = xs->datalen;
2125 
2126 	timeout = mstohz(xs->timeout);
2127 	if (timeout == 0)
2128 		timeout = 1;
2129 	callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2130 
2131 	switch (xs->cmd->opcode) {
2132 	case SCSI_READ_6_COMMAND:
2133 	case READ_10:
2134 	case READ_12:
2135 	case READ_16:
2136 	case SCSI_WRITE_6_COMMAND:
2137 	case WRITE_10:
2138 	case WRITE_12:
2139 	case WRITE_16:
2140 		if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2141 			goto stuffup;
2142 		break;
2143 
2144 	default:
2145 		if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2146 			goto stuffup;
2147 		break;
2148 	}
2149 
2150 	xs->error = XS_NOERROR;
2151 	xs->resid = 0;
2152 
2153 	DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2154 	    xs->cmd->opcode);
2155 
2156 	if (xs->xs_control & XS_CTL_POLL) {
2157 		if (mfii_poll(sc, ccb) != 0)
2158 			goto stuffup;
2159 		return;
2160 	}
2161 
2162 	mfii_start(sc, ccb);
2163 
2164 	return;
2165 
2166 stuffup:
2167 	xs->error = XS_DRIVER_STUFFUP;
2168 	scsipi_done(xs);
2169 	mfii_put_ccb(sc, ccb);
2170 }
2171 
2172 void
2173 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2174 {
2175 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2176 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2177 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2178 
2179 	if (callout_stop(&xs->xs_callout) != 0)
2180 		return;
2181 
2182 	switch (ctx->status) {
2183 	case MFI_STAT_OK:
2184 		break;
2185 
2186 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2187 		xs->error = XS_SENSE;
2188 		memset(&xs->sense, 0, sizeof(xs->sense));
2189 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2190 		break;
2191 
2192 	case MFI_STAT_LD_OFFLINE:
2193 	case MFI_STAT_DEVICE_NOT_FOUND:
2194 		xs->error = XS_SELTIMEOUT;
2195 		break;
2196 
2197 	default:
2198 		xs->error = XS_DRIVER_STUFFUP;
2199 		break;
2200 	}
2201 
2202 	scsipi_done(xs);
2203 	mfii_put_ccb(sc, ccb);
2204 }
2205 
2206 int
2207 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2208     struct scsipi_xfer *xs)
2209 {
2210 	struct scsipi_periph *periph = xs->xs_periph;
2211 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2212 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2213 	int segs;
2214 
2215 	io->dev_handle = htole16(periph->periph_target);
2216 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2217 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2218 	io->sgl_flags = htole16(0x02); /* XXX */
2219 	io->sense_buffer_length = sizeof(xs->sense);
2220 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2221 	io->data_length = htole32(xs->datalen);
2222 	io->io_flags = htole16(xs->cmdlen);
2223 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2224 	case XS_CTL_DATA_IN:
2225 		ccb->ccb_direction = MFII_DATA_IN;
2226 		io->direction = MPII_SCSIIO_DIR_READ;
2227 		break;
2228 	case XS_CTL_DATA_OUT:
2229 		ccb->ccb_direction = MFII_DATA_OUT;
2230 		io->direction = MPII_SCSIIO_DIR_WRITE;
2231 		break;
2232 	default:
2233 		ccb->ccb_direction = MFII_DATA_NONE;
2234 		io->direction = MPII_SCSIIO_DIR_NONE;
2235 		break;
2236 	}
2237 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2238 
2239 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2240 	ctx->timeout_value = htole16(0x14); /* XXX */
2241 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2242 	ctx->virtual_disk_target_id = htole16(periph->periph_target);
2243 
2244 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2245 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2246 		return (1);
2247 
2248 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2249 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2250 	switch (sc->sc_iop->num_sge_loc) {
2251 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2252 		ctx->num_sge = segs;
2253 		break;
2254 	case MFII_IOP_NUM_SGE_LOC_35:
2255 		/* 12 bit field, but we're only using the lower 8 */
2256 		ctx->span_arm = segs;
2257 		break;
2258 	}
2259 
2260 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2261 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2262 
2263 	return (0);
2264 }
2265 
2266 int
2267 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2268     struct scsipi_xfer *xs)
2269 {
2270 	struct scsipi_periph *periph = xs->xs_periph;
2271 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2272 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2273 
2274 	io->dev_handle = htole16(periph->periph_target);
2275 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2276 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2277 	io->sgl_flags = htole16(0x02); /* XXX */
2278 	io->sense_buffer_length = sizeof(xs->sense);
2279 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2280 	io->data_length = htole32(xs->datalen);
2281 	io->io_flags = htole16(xs->cmdlen);
2282 	io->lun[0] = htobe16(periph->periph_lun);
2283 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2284 	case XS_CTL_DATA_IN:
2285 		ccb->ccb_direction = MFII_DATA_IN;
2286 		io->direction = MPII_SCSIIO_DIR_READ;
2287 		break;
2288 	case XS_CTL_DATA_OUT:
2289 		ccb->ccb_direction = MFII_DATA_OUT;
2290 		io->direction = MPII_SCSIIO_DIR_WRITE;
2291 		break;
2292 	default:
2293 		ccb->ccb_direction = MFII_DATA_NONE;
2294 		io->direction = MPII_SCSIIO_DIR_NONE;
2295 		break;
2296 	}
2297 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2298 
2299 	ctx->virtual_disk_target_id = htole16(periph->periph_target);
2300 
2301 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2302 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2303 		return (1);
2304 
2305 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2306 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2307 
2308 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2309 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2310 
2311 	return (0);
2312 }
2313 
2314 #if 0
2315 void
2316 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2317 {
2318 	struct scsi_link *link = xs->sc_link;
2319 	struct mfii_softc *sc = link->adapter_softc;
2320 	struct mfii_ccb *ccb = xs->io;
2321 
2322 	mfii_scrub_ccb(ccb);
2323 	ccb->ccb_cookie = xs;
2324 	ccb->ccb_done = mfii_scsi_cmd_done;
2325 	ccb->ccb_data = xs->data;
2326 	ccb->ccb_len = xs->datalen;
2327 
2328 	// XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2329 
2330 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2331 	if (xs->error != XS_NOERROR)
2332 		goto done;
2333 
2334 	xs->resid = 0;
2335 
2336 	if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2337 		if (mfii_poll(sc, ccb) != 0)
2338 			goto stuffup;
2339 		return;
2340 	}
2341 
2342 	// XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2343 	mfii_start(sc, ccb);
2344 
2345 	return;
2346 
2347 stuffup:
2348 	xs->error = XS_DRIVER_STUFFUP;
2349 done:
2350 	scsi_done(xs);
2351 }
2352 
2353 int
2354 mfii_pd_scsi_probe(struct scsi_link *link)
2355 {
2356 	struct mfii_softc *sc = link->adapter_softc;
2357 	struct mfi_pd_details mpd;
2358 	union mfi_mbox mbox;
2359 	int rv;
2360 
2361 	if (link->lun > 0)
2362 		return (0);
2363 
2364 	memset(&mbox, 0, sizeof(mbox));
2365 	mbox.s[0] = htole16(link->target);
2366 
2367 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2368 	    MFII_DATA_IN, true);
2369 	if (rv != 0)
2370 		return (EIO);
2371 
2372 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2373 		return (ENXIO);
2374 
2375 	return (0);
2376 }
2377 
2378 int
2379 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2380     struct scsipi_xfer *xs)
2381 {
2382 	struct scsi_link *link = xs->sc_link;
2383 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2384 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2385 	uint16_t dev_handle;
2386 
2387 	dev_handle = mfii_dev_handle(sc, link->target);
2388 	if (dev_handle == htole16(0xffff))
2389 		return (XS_SELTIMEOUT);
2390 
2391 	io->dev_handle = dev_handle;
2392 	io->function = 0;
2393 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2394 	io->sgl_flags = htole16(0x02); /* XXX */
2395 	io->sense_buffer_length = sizeof(xs->sense);
2396 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2397 	io->data_length = htole32(xs->datalen);
2398 	io->io_flags = htole16(xs->cmdlen);
2399 	io->lun[0] = htobe16(link->lun);
2400 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2401 	case XS_CTL_DATA_IN:
2402 		ccb->ccb_direction = MFII_DATA_IN;
2403 		io->direction = MPII_SCSIIO_DIR_READ;
2404 		break;
2405 	case XS_CTL_DATA_OUT:
2406 		ccb->ccb_direction = MFII_DATA_OUT;
2407 		io->direction = MPII_SCSIIO_DIR_WRITE;
2408 		break;
2409 	default:
2410 		ccb->ccb_direction = MFII_DATA_NONE;
2411 		io->direction = MPII_SCSIIO_DIR_NONE;
2412 		break;
2413 	}
2414 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2415 
2416 	ctx->virtual_disk_target_id = htole16(link->target);
2417 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2418 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2419 
2420 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2421 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2422 		return (XS_DRIVER_STUFFUP);
2423 
2424 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2425 	KASSERT(ccb->ccb_dma64);
2426 
2427 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2428 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2429 	ccb->ccb_req.dev_handle = dev_handle;
2430 
2431 	return (XS_NOERROR);
2432 }
2433 #endif
2434 
2435 int
2436 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2437     int nosleep)
2438 {
2439 	struct mpii_msg_request *req = ccb->ccb_request;
2440 	struct mfii_sge *sge = NULL, *nsge = sglp;
2441 	struct mfii_sge *ce = NULL;
2442 	bus_dmamap_t dmap = ccb->ccb_dmamap64;
2443 	u_int space;
2444 	int i;
2445 
2446 	int error;
2447 
2448 	if (ccb->ccb_len == 0)
2449 		return (0);
2450 
2451 	ccb->ccb_dma64 = true;
2452 	error = bus_dmamap_load(sc->sc_dmat64, dmap,
2453 	    ccb->ccb_data, ccb->ccb_len, NULL,
2454 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2455 	if (error) {
2456 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2457 		return (1);
2458 	}
2459 
2460 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2461 	    sizeof(*nsge);
2462 	if (dmap->dm_nsegs > space) {
2463 		space--;
2464 
2465 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2466 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2467 
2468 		ce = nsge + space;
2469 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2470 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2471 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2472 
2473 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2474 	}
2475 
2476 	for (i = 0; i < dmap->dm_nsegs; i++) {
2477 		if (nsge == ce)
2478 			nsge = ccb->ccb_sgl;
2479 
2480 		sge = nsge;
2481 
2482 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2483 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2484 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2485 
2486 		nsge = sge + 1;
2487 	}
2488 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2489 
2490 	bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2491 	    ccb->ccb_direction == MFII_DATA_OUT ?
2492 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2493 
2494 	if (ccb->ccb_sgl_len > 0) {
2495 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2496 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2497 		    BUS_DMASYNC_PREWRITE);
2498 	}
2499 
2500 	return (0);
2501 }
2502 
2503 void
2504 mfii_scsi_cmd_tmo(void *p)
2505 {
2506 	struct mfii_ccb *ccb = p;
2507 	struct mfii_softc *sc = ccb->ccb_sc;
2508 	bool start_abort;
2509 
2510 	printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2511 
2512 	mutex_enter(&sc->sc_abort_mtx);
2513 	start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2514 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2515 	if (start_abort)
2516 		workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2517 	mutex_exit(&sc->sc_abort_mtx);
2518 }
2519 
2520 void
2521 mfii_abort_task(struct work *wk, void *scp)
2522 {
2523 	struct mfii_softc *sc = scp;
2524 	struct mfii_ccb *list;
2525 
2526 	mutex_enter(&sc->sc_abort_mtx);
2527 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2528 	SIMPLEQ_INIT(&sc->sc_abort_list);
2529 	mutex_exit(&sc->sc_abort_mtx);
2530 
2531 	while (list != NULL) {
2532 		struct mfii_ccb *ccb = list;
2533 		struct scsipi_xfer *xs = ccb->ccb_cookie;
2534 		struct scsipi_periph *periph = xs->xs_periph;
2535 		struct mfii_ccb *accb;
2536 
2537 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2538 
2539 		if (!sc->sc_ld[periph->periph_target].ld_present) {
2540 			/* device is gone */
2541 			xs->error = XS_SELTIMEOUT;
2542 			scsipi_done(xs);
2543 			mfii_put_ccb(sc, ccb);
2544 			continue;
2545 		}
2546 
2547 		accb = mfii_get_ccb(sc);
2548 		mfii_scrub_ccb(accb);
2549 		mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2550 		    MPII_SCSI_TASK_ABORT_TASK,
2551 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2552 
2553 		accb->ccb_cookie = ccb;
2554 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2555 
2556 		mfii_start(sc, accb);
2557 	}
2558 }
2559 
2560 void
2561 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2562     uint16_t smid, uint8_t type, uint32_t flags)
2563 {
2564 	struct mfii_task_mgmt *msg;
2565 	struct mpii_msg_scsi_task_request *req;
2566 
2567 	msg = accb->ccb_request;
2568 	req = &msg->mpii_request;
2569 	req->dev_handle = dev_handle;
2570 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2571 	req->task_type = type;
2572 	req->task_mid = htole16( smid);
2573 	msg->flags = flags;
2574 
2575 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2576 	accb->ccb_req.smid = le16toh(accb->ccb_smid);
2577 }
2578 
2579 void
2580 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2581 {
2582 	struct mfii_ccb *ccb = accb->ccb_cookie;
2583 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2584 
2585 	/* XXX check accb completion? */
2586 
2587 	mfii_put_ccb(sc, accb);
2588 	printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2589 
2590 	xs->error = XS_TIMEOUT;
2591 	scsipi_done(xs);
2592 	mfii_put_ccb(sc, ccb);
2593 }
2594 
2595 struct mfii_ccb *
2596 mfii_get_ccb(struct mfii_softc *sc)
2597 {
2598 	struct mfii_ccb *ccb;
2599 
2600 	mutex_enter(&sc->sc_ccb_mtx);
2601 	if (!sc->sc_running) {
2602 		ccb = NULL;
2603 	} else {
2604 		ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2605 		if (ccb != NULL)
2606 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2607 	}
2608 	mutex_exit(&sc->sc_ccb_mtx);
2609 	return (ccb);
2610 }
2611 
2612 void
2613 mfii_scrub_ccb(struct mfii_ccb *ccb)
2614 {
2615 	ccb->ccb_cookie = NULL;
2616 	ccb->ccb_done = NULL;
2617 	ccb->ccb_flags = 0;
2618 	ccb->ccb_data = NULL;
2619 	ccb->ccb_direction = MFII_DATA_NONE;
2620 	ccb->ccb_dma64 = false;
2621 	ccb->ccb_len = 0;
2622 	ccb->ccb_sgl_len = 0;
2623 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2624 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2625 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2626 }
2627 
2628 void
2629 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2630 {
2631 	mutex_enter(&sc->sc_ccb_mtx);
2632 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2633 	mutex_exit(&sc->sc_ccb_mtx);
2634 }
2635 
2636 int
2637 mfii_init_ccb(struct mfii_softc *sc)
2638 {
2639 	struct mfii_ccb *ccb;
2640 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2641 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2642 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2643 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2644 	u_int i;
2645 	int error;
2646 
2647 	sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2648 	    M_DEVBUF, M_WAITOK|M_ZERO);
2649 
2650 	for (i = 0; i < sc->sc_max_cmds; i++) {
2651 		ccb = &sc->sc_ccb[i];
2652 		ccb->ccb_sc = sc;
2653 
2654 		/* create a dma map for transfer */
2655 		error = bus_dmamap_create(sc->sc_dmat,
2656 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2657 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2658 		if (error) {
2659 			printf("%s: cannot create ccb dmamap32 (%d)\n",
2660 			    DEVNAME(sc), error);
2661 			goto destroy;
2662 		}
2663 		error = bus_dmamap_create(sc->sc_dmat64,
2664 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2665 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2666 		if (error) {
2667 			printf("%s: cannot create ccb dmamap64 (%d)\n",
2668 			    DEVNAME(sc), error);
2669 			goto destroy32;
2670 		}
2671 
2672 		/* select i + 1'th request. 0 is reserved for events */
2673 		ccb->ccb_smid = i + 1;
2674 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2675 		ccb->ccb_request = request + ccb->ccb_request_offset;
2676 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2677 		    ccb->ccb_request_offset;
2678 
2679 		/* select i'th MFI command frame */
2680 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2681 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2682 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2683 		    ccb->ccb_mfi_offset;
2684 
2685 		/* select i'th sense */
2686 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2687 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2688 		    ccb->ccb_sense_offset);
2689 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2690 		    ccb->ccb_sense_offset;
2691 
2692 		/* select i'th sgl */
2693 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2694 		    sc->sc_max_sgl * i;
2695 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2696 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2697 		    ccb->ccb_sgl_offset;
2698 
2699 		mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2700 		cv_init(&ccb->ccb_cv, "mfiiexec");
2701 
2702 		/* add ccb to queue */
2703 		mfii_put_ccb(sc, ccb);
2704 	}
2705 
2706 	return (0);
2707 
2708 destroy32:
2709 	bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2710 destroy:
2711 	/* free dma maps and ccb memory */
2712 	while ((ccb = mfii_get_ccb(sc)) != NULL) {
2713 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2714 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2715 	}
2716 
2717 	free(sc->sc_ccb, M_DEVBUF);
2718 
2719 	return (1);
2720 }
2721 
2722 #if NBIO > 0
2723 int
2724 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2725 {
2726 	struct mfii_softc	*sc = device_private(dev);
2727 	int error = 0;
2728 
2729 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2730 
2731 	mutex_enter(&sc->sc_lock);
2732 
2733 	switch (cmd) {
2734 	case BIOCINQ:
2735 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2736 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2737 		break;
2738 
2739 	case BIOCVOL:
2740 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2741 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2742 		break;
2743 
2744 	case BIOCDISK:
2745 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2746 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2747 		break;
2748 
2749 	case BIOCALARM:
2750 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2751 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2752 		break;
2753 
2754 	case BIOCBLINK:
2755 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2756 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2757 		break;
2758 
2759 	case BIOCSETSTATE:
2760 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2761 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2762 		break;
2763 
2764 #if 0
2765 	case BIOCPATROL:
2766 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2767 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2768 		break;
2769 #endif
2770 
2771 	default:
2772 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2773 		error = ENOTTY;
2774 	}
2775 
2776 	mutex_exit(&sc->sc_lock);
2777 
2778 	return (error);
2779 }
2780 
2781 int
2782 mfii_bio_getitall(struct mfii_softc *sc)
2783 {
2784 	int			i, d, rv = EINVAL;
2785 	size_t			size;
2786 	union mfi_mbox		mbox;
2787 	struct mfi_conf		*cfg = NULL;
2788 	struct mfi_ld_details	*ld_det = NULL;
2789 
2790 	/* get info */
2791 	if (mfii_get_info(sc)) {
2792 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2793 		    DEVNAME(sc));
2794 		goto done;
2795 	}
2796 
2797 	/* send single element command to retrieve size for full structure */
2798 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2799 	if (cfg == NULL)
2800 		goto done;
2801 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2802 	    MFII_DATA_IN, false)) {
2803 		free(cfg, M_DEVBUF);
2804 		goto done;
2805 	}
2806 
2807 	size = cfg->mfc_size;
2808 	free(cfg, M_DEVBUF);
2809 
2810 	/* memory for read config */
2811 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2812 	if (cfg == NULL)
2813 		goto done;
2814 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2815 	    MFII_DATA_IN, false)) {
2816 		free(cfg, M_DEVBUF);
2817 		goto done;
2818 	}
2819 
2820 	/* replace current pointer with new one */
2821 	if (sc->sc_cfg)
2822 		free(sc->sc_cfg, M_DEVBUF);
2823 	sc->sc_cfg = cfg;
2824 
2825 	/* get all ld info */
2826 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2827 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2828 		goto done;
2829 
2830 	/* get memory for all ld structures */
2831 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2832 	if (sc->sc_ld_sz != size) {
2833 		if (sc->sc_ld_details)
2834 			free(sc->sc_ld_details, M_DEVBUF);
2835 
2836 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2837 		if (ld_det == NULL)
2838 			goto done;
2839 		sc->sc_ld_sz = size;
2840 		sc->sc_ld_details = ld_det;
2841 	}
2842 
2843 	/* find used physical disks */
2844 	size = sizeof(struct mfi_ld_details);
2845 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2846 		memset(&mbox, 0, sizeof(mbox));
2847 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2848 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2849 		    &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2850 			goto done;
2851 
2852 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2853 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2854 	}
2855 	sc->sc_no_pd = d;
2856 
2857 	rv = 0;
2858 done:
2859 	return (rv);
2860 }
2861 
2862 int
2863 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2864 {
2865 	int			rv = EINVAL;
2866 	struct mfi_conf		*cfg = NULL;
2867 
2868 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2869 
2870 	if (mfii_bio_getitall(sc)) {
2871 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2872 		    DEVNAME(sc));
2873 		goto done;
2874 	}
2875 
2876 	/* count unused disks as volumes */
2877 	if (sc->sc_cfg == NULL)
2878 		goto done;
2879 	cfg = sc->sc_cfg;
2880 
2881 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2882 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2883 #if notyet
2884 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2885 	    (bi->bi_nodisk - sc->sc_no_pd);
2886 #endif
2887 	/* tell bio who we are */
2888 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2889 
2890 	rv = 0;
2891 done:
2892 	return (rv);
2893 }
2894 
2895 int
2896 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2897 {
2898 	int			i, per, rv = EINVAL;
2899 
2900 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2901 	    DEVNAME(sc), bv->bv_volid);
2902 
2903 	/* we really could skip and expect that inq took care of it */
2904 	if (mfii_bio_getitall(sc)) {
2905 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2906 		    DEVNAME(sc));
2907 		goto done;
2908 	}
2909 
2910 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2911 		/* go do hotspares & unused disks */
2912 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2913 		goto done;
2914 	}
2915 
2916 	i = bv->bv_volid;
2917 	strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2918 	    sizeof(bv->bv_dev));
2919 
2920 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2921 	case MFI_LD_OFFLINE:
2922 		bv->bv_status = BIOC_SVOFFLINE;
2923 		break;
2924 
2925 	case MFI_LD_PART_DEGRADED:
2926 	case MFI_LD_DEGRADED:
2927 		bv->bv_status = BIOC_SVDEGRADED;
2928 		break;
2929 
2930 	case MFI_LD_ONLINE:
2931 		bv->bv_status = BIOC_SVONLINE;
2932 		break;
2933 
2934 	default:
2935 		bv->bv_status = BIOC_SVINVALID;
2936 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2937 		    DEVNAME(sc),
2938 		    sc->sc_ld_list.mll_list[i].mll_state);
2939 	}
2940 
2941 	/* additional status can modify MFI status */
2942 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2943 	case MFI_LD_PROG_CC:
2944 	case MFI_LD_PROG_BGI:
2945 		bv->bv_status = BIOC_SVSCRUB;
2946 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2947 		bv->bv_percent = (per * 100) / 0xffff;
2948 		bv->bv_seconds =
2949 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2950 		break;
2951 
2952 	case MFI_LD_PROG_FGI:
2953 	case MFI_LD_PROG_RECONSTRUCT:
2954 		/* nothing yet */
2955 		break;
2956 	}
2957 
2958 #if 0
2959 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2960 		bv->bv_cache = BIOC_CVWRITEBACK;
2961 	else
2962 		bv->bv_cache = BIOC_CVWRITETHROUGH;
2963 #endif
2964 
2965 	/*
2966 	 * The RAID levels are determined per the SNIA DDF spec, this is only
2967 	 * a subset that is valid for the MFI controller.
2968 	 */
2969 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2970 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2971 		bv->bv_level *= 10;
2972 
2973 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2974 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2975 
2976 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2977 
2978 	rv = 0;
2979 done:
2980 	return (rv);
2981 }
2982 
2983 int
2984 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2985 {
2986 	struct mfi_conf		*cfg;
2987 	struct mfi_array	*ar;
2988 	struct mfi_ld_cfg	*ld;
2989 	struct mfi_pd_details	*pd;
2990 	struct mfi_pd_list	*pl;
2991 	struct scsipi_inquiry_data *inqbuf;
2992 	char			vend[8+16+4+1], *vendp;
2993 	int			i, rv = EINVAL;
2994 	int			arr, vol, disk, span;
2995 	union mfi_mbox		mbox;
2996 
2997 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
2998 	    DEVNAME(sc), bd->bd_diskid);
2999 
3000 	/* we really could skip and expect that inq took care of it */
3001 	if (mfii_bio_getitall(sc)) {
3002 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3003 		    DEVNAME(sc));
3004 		return (rv);
3005 	}
3006 	cfg = sc->sc_cfg;
3007 
3008 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3009 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3010 
3011 	ar = cfg->mfc_array;
3012 	vol = bd->bd_volid;
3013 	if (vol >= cfg->mfc_no_ld) {
3014 		/* do hotspares */
3015 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3016 		goto freeme;
3017 	}
3018 
3019 	/* calculate offset to ld structure */
3020 	ld = (struct mfi_ld_cfg *)(
3021 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3022 	    cfg->mfc_array_size * cfg->mfc_no_array);
3023 
3024 	/* use span 0 only when raid group is not spanned */
3025 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3026 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3027 	else
3028 		span = 0;
3029 	arr = ld[vol].mlc_span[span].mls_index;
3030 
3031 	/* offset disk into pd list */
3032 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3033 
3034 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3035 		/* disk is missing but succeed command */
3036 		bd->bd_status = BIOC_SDFAILED;
3037 		rv = 0;
3038 
3039 		/* try to find an unused disk for the target to rebuild */
3040 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3041 		    MFII_DATA_IN, false))
3042 			goto freeme;
3043 
3044 		for (i = 0; i < pl->mpl_no_pd; i++) {
3045 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3046 				continue;
3047 
3048 			memset(&mbox, 0, sizeof(mbox));
3049 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3050 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3051 			    pd, sizeof(*pd), MFII_DATA_IN, false))
3052 				continue;
3053 
3054 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3055 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3056 				break;
3057 		}
3058 
3059 		if (i == pl->mpl_no_pd)
3060 			goto freeme;
3061 	} else {
3062 		memset(&mbox, 0, sizeof(mbox));
3063 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3064 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3065 		    MFII_DATA_IN, false)) {
3066 			bd->bd_status = BIOC_SDINVALID;
3067 			goto freeme;
3068 		}
3069 	}
3070 
3071 	/* get the remaining fields */
3072 	bd->bd_channel = pd->mpd_enc_idx;
3073 	bd->bd_target = pd->mpd_enc_slot;
3074 
3075 	/* get status */
3076 	switch (pd->mpd_fw_state){
3077 	case MFI_PD_UNCONFIG_GOOD:
3078 	case MFI_PD_UNCONFIG_BAD:
3079 		bd->bd_status = BIOC_SDUNUSED;
3080 		break;
3081 
3082 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3083 		bd->bd_status = BIOC_SDHOTSPARE;
3084 		break;
3085 
3086 	case MFI_PD_OFFLINE:
3087 		bd->bd_status = BIOC_SDOFFLINE;
3088 		break;
3089 
3090 	case MFI_PD_FAILED:
3091 		bd->bd_status = BIOC_SDFAILED;
3092 		break;
3093 
3094 	case MFI_PD_REBUILD:
3095 		bd->bd_status = BIOC_SDREBUILD;
3096 		break;
3097 
3098 	case MFI_PD_ONLINE:
3099 		bd->bd_status = BIOC_SDONLINE;
3100 		break;
3101 
3102 	case MFI_PD_COPYBACK:
3103 	case MFI_PD_SYSTEM:
3104 	default:
3105 		bd->bd_status = BIOC_SDINVALID;
3106 		break;
3107 	}
3108 
3109 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3110 
3111 	inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3112 	vendp = inqbuf->vendor;
3113 	memcpy(vend, vendp, sizeof vend - 1);
3114 	vend[sizeof vend - 1] = '\0';
3115 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3116 
3117 	/* XXX find a way to retrieve serial nr from drive */
3118 	/* XXX find a way to get bd_procdev */
3119 
3120 #if 0
3121 	mfp = &pd->mpd_progress;
3122 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3123 		mp = &mfp->mfp_patrol_read;
3124 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3125 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3126 	}
3127 #endif
3128 
3129 	rv = 0;
3130 freeme:
3131 	free(pd, M_DEVBUF);
3132 	free(pl, M_DEVBUF);
3133 
3134 	return (rv);
3135 }
3136 
3137 int
3138 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3139 {
3140 	uint32_t		opc;
3141 	int			rv = 0;
3142 	int8_t			ret;
3143 	mfii_direction_t dir = MFII_DATA_NONE;
3144 
3145 	switch(ba->ba_opcode) {
3146 	case BIOC_SADISABLE:
3147 		opc = MR_DCMD_SPEAKER_DISABLE;
3148 		break;
3149 
3150 	case BIOC_SAENABLE:
3151 		opc = MR_DCMD_SPEAKER_ENABLE;
3152 		break;
3153 
3154 	case BIOC_SASILENCE:
3155 		opc = MR_DCMD_SPEAKER_SILENCE;
3156 		break;
3157 
3158 	case BIOC_GASTATUS:
3159 		opc = MR_DCMD_SPEAKER_GET;
3160 		dir = MFII_DATA_IN;
3161 		break;
3162 
3163 	case BIOC_SATEST:
3164 		opc = MR_DCMD_SPEAKER_TEST;
3165 		break;
3166 
3167 	default:
3168 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3169 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3170 		return (EINVAL);
3171 	}
3172 
3173 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3174 		rv = EINVAL;
3175 	else
3176 		if (ba->ba_opcode == BIOC_GASTATUS)
3177 			ba->ba_status = ret;
3178 		else
3179 			ba->ba_status = 0;
3180 
3181 	return (rv);
3182 }
3183 
3184 int
3185 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3186 {
3187 	int			i, found, rv = EINVAL;
3188 	union mfi_mbox		mbox;
3189 	uint32_t		cmd;
3190 	struct mfi_pd_list	*pd;
3191 
3192 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3193 	    bb->bb_status);
3194 
3195 	/* channel 0 means not in an enclosure so can't be blinked */
3196 	if (bb->bb_channel == 0)
3197 		return (EINVAL);
3198 
3199 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3200 
3201 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3202 	    MFII_DATA_IN, false))
3203 		goto done;
3204 
3205 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3206 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3207 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3208 			found = 1;
3209 			break;
3210 		}
3211 
3212 	if (!found)
3213 		goto done;
3214 
3215 	memset(&mbox, 0, sizeof(mbox));
3216 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3217 
3218 	switch (bb->bb_status) {
3219 	case BIOC_SBUNBLINK:
3220 		cmd = MR_DCMD_PD_UNBLINK;
3221 		break;
3222 
3223 	case BIOC_SBBLINK:
3224 		cmd = MR_DCMD_PD_BLINK;
3225 		break;
3226 
3227 	case BIOC_SBALARM:
3228 	default:
3229 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3230 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3231 		goto done;
3232 	}
3233 
3234 
3235 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3236 		goto done;
3237 
3238 	rv = 0;
3239 done:
3240 	free(pd, M_DEVBUF);
3241 	return (rv);
3242 }
3243 
3244 static int
3245 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3246 {
3247 	struct mfii_foreign_scan_info *fsi;
3248 	struct mfi_pd_details	*pd;
3249 	union mfi_mbox		mbox;
3250 	int			rv;
3251 
3252 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3253 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3254 
3255 	memset(&mbox, 0, sizeof mbox);
3256 	mbox.s[0] = pd_id;
3257 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3258 	    MFII_DATA_IN, false);
3259 	if (rv != 0)
3260 		goto done;
3261 
3262 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3263 		mbox.s[0] = pd_id;
3264 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3265 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3266 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3267 		    MFII_DATA_NONE, false);
3268 		if (rv != 0)
3269 			goto done;
3270 	}
3271 
3272 	memset(&mbox, 0, sizeof mbox);
3273 	mbox.s[0] = pd_id;
3274 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3275 	    MFII_DATA_IN, false);
3276 	if (rv != 0)
3277 		goto done;
3278 
3279 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3280 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3281 		    fsi, sizeof(*fsi), MFII_DATA_IN, false);
3282 		if (rv != 0)
3283 			goto done;
3284 
3285 		if (fsi->count > 0) {
3286 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3287 			    NULL, 0, MFII_DATA_NONE, false);
3288 			if (rv != 0)
3289 				goto done;
3290 		}
3291 	}
3292 
3293 	memset(&mbox, 0, sizeof mbox);
3294 	mbox.s[0] = pd_id;
3295 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3296 	    MFII_DATA_IN, false);
3297 	if (rv != 0)
3298 		goto done;
3299 
3300 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3301 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3302 		rv = ENXIO;
3303 
3304 done:
3305 	free(fsi, M_DEVBUF);
3306 	free(pd, M_DEVBUF);
3307 
3308 	return (rv);
3309 }
3310 
3311 static int
3312 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3313 {
3314 	struct mfi_hotspare	*hs;
3315 	struct mfi_pd_details	*pd;
3316 	union mfi_mbox		mbox;
3317 	size_t			size;
3318 	int			rv = EINVAL;
3319 
3320 	/* we really could skip and expect that inq took care of it */
3321 	if (mfii_bio_getitall(sc)) {
3322 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3323 		    DEVNAME(sc));
3324 		return (rv);
3325 	}
3326 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3327 
3328 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3329 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3330 
3331 	memset(&mbox, 0, sizeof mbox);
3332 	mbox.s[0] = pd_id;
3333 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3334 	    MFII_DATA_IN, false);
3335 	if (rv != 0)
3336 		goto done;
3337 
3338 	memset(hs, 0, size);
3339 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3340 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3341 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3342 	    MFII_DATA_OUT, false);
3343 
3344 done:
3345 	free(hs, M_DEVBUF);
3346 	free(pd, M_DEVBUF);
3347 
3348 	return (rv);
3349 }
3350 
3351 int
3352 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3353 {
3354 	struct mfi_pd_details	*pd;
3355 	struct mfi_pd_list	*pl;
3356 	int			i, found, rv = EINVAL;
3357 	union mfi_mbox		mbox;
3358 
3359 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3360 	    bs->bs_status);
3361 
3362 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3363 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3364 
3365 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3366 	    MFII_DATA_IN, false))
3367 		goto done;
3368 
3369 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3370 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3371 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3372 			found = 1;
3373 			break;
3374 		}
3375 
3376 	if (!found)
3377 		goto done;
3378 
3379 	memset(&mbox, 0, sizeof(mbox));
3380 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3381 
3382 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3383 	    MFII_DATA_IN, false))
3384 		goto done;
3385 
3386 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3387 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3388 
3389 	switch (bs->bs_status) {
3390 	case BIOC_SSONLINE:
3391 		mbox.b[4] = MFI_PD_ONLINE;
3392 		break;
3393 
3394 	case BIOC_SSOFFLINE:
3395 		mbox.b[4] = MFI_PD_OFFLINE;
3396 		break;
3397 
3398 	case BIOC_SSHOTSPARE:
3399 		mbox.b[4] = MFI_PD_HOTSPARE;
3400 		break;
3401 
3402 	case BIOC_SSREBUILD:
3403 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3404 			if ((rv = mfii_makegood(sc,
3405 			    pl->mpl_address[i].mpa_pd_id)))
3406 				goto done;
3407 
3408 			if ((rv = mfii_makespare(sc,
3409 			    pl->mpl_address[i].mpa_pd_id)))
3410 				goto done;
3411 
3412 			memset(&mbox, 0, sizeof(mbox));
3413 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3414 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3415 			    pd, sizeof(*pd), MFII_DATA_IN, false);
3416 			if (rv != 0)
3417 				goto done;
3418 
3419 			/* rebuilding might be started by mfii_makespare() */
3420 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3421 				rv = 0;
3422 				goto done;
3423 			}
3424 
3425 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3426 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3427 		}
3428 		mbox.b[4] = MFI_PD_REBUILD;
3429 		break;
3430 
3431 	default:
3432 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3433 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3434 		goto done;
3435 	}
3436 
3437 
3438 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3439 	    MFII_DATA_NONE, false);
3440 done:
3441 	free(pd, M_DEVBUF);
3442 	free(pl, M_DEVBUF);
3443 	return (rv);
3444 }
3445 
3446 #if 0
3447 int
3448 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3449 {
3450 	uint32_t		opc;
3451 	int			rv = 0;
3452 	struct mfi_pr_properties prop;
3453 	struct mfi_pr_status	status;
3454 	uint32_t		time, exec_freq;
3455 
3456 	switch (bp->bp_opcode) {
3457 	case BIOC_SPSTOP:
3458 	case BIOC_SPSTART:
3459 		if (bp->bp_opcode == BIOC_SPSTART)
3460 			opc = MR_DCMD_PR_START;
3461 		else
3462 			opc = MR_DCMD_PR_STOP;
3463 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3464 			return (EINVAL);
3465 		break;
3466 
3467 	case BIOC_SPMANUAL:
3468 	case BIOC_SPDISABLE:
3469 	case BIOC_SPAUTO:
3470 		/* Get device's time. */
3471 		opc = MR_DCMD_TIME_SECS_GET;
3472 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3473 		    MFII_DATA_IN, false))
3474 			return (EINVAL);
3475 
3476 		opc = MR_DCMD_PR_GET_PROPERTIES;
3477 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3478 		    MFII_DATA_IN, false))
3479 			return (EINVAL);
3480 
3481 		switch (bp->bp_opcode) {
3482 		case BIOC_SPMANUAL:
3483 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3484 			break;
3485 		case BIOC_SPDISABLE:
3486 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3487 			break;
3488 		case BIOC_SPAUTO:
3489 			if (bp->bp_autoival != 0) {
3490 				if (bp->bp_autoival == -1)
3491 					/* continuously */
3492 					exec_freq = 0xffffffffU;
3493 				else if (bp->bp_autoival > 0)
3494 					exec_freq = bp->bp_autoival;
3495 				else
3496 					return (EINVAL);
3497 				prop.exec_freq = exec_freq;
3498 			}
3499 			if (bp->bp_autonext != 0) {
3500 				if (bp->bp_autonext < 0)
3501 					return (EINVAL);
3502 				else
3503 					prop.next_exec = time + bp->bp_autonext;
3504 			}
3505 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3506 			break;
3507 		}
3508 
3509 		opc = MR_DCMD_PR_SET_PROPERTIES;
3510 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3511 		    MFII_DATA_OUT, false))
3512 			return (EINVAL);
3513 
3514 		break;
3515 
3516 	case BIOC_GPSTATUS:
3517 		opc = MR_DCMD_PR_GET_PROPERTIES;
3518 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3519 		    MFII_DATA_IN, false))
3520 			return (EINVAL);
3521 
3522 		opc = MR_DCMD_PR_GET_STATUS;
3523 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3524 		    MFII_DATA_IN, false))
3525 			return (EINVAL);
3526 
3527 		/* Get device's time. */
3528 		opc = MR_DCMD_TIME_SECS_GET;
3529 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3530 		    MFII_DATA_IN, false))
3531 			return (EINVAL);
3532 
3533 		switch (prop.op_mode) {
3534 		case MFI_PR_OPMODE_AUTO:
3535 			bp->bp_mode = BIOC_SPMAUTO;
3536 			bp->bp_autoival = prop.exec_freq;
3537 			bp->bp_autonext = prop.next_exec;
3538 			bp->bp_autonow = time;
3539 			break;
3540 		case MFI_PR_OPMODE_MANUAL:
3541 			bp->bp_mode = BIOC_SPMMANUAL;
3542 			break;
3543 		case MFI_PR_OPMODE_DISABLED:
3544 			bp->bp_mode = BIOC_SPMDISABLED;
3545 			break;
3546 		default:
3547 			printf("%s: unknown patrol mode %d\n",
3548 			    DEVNAME(sc), prop.op_mode);
3549 			break;
3550 		}
3551 
3552 		switch (status.state) {
3553 		case MFI_PR_STATE_STOPPED:
3554 			bp->bp_status = BIOC_SPSSTOPPED;
3555 			break;
3556 		case MFI_PR_STATE_READY:
3557 			bp->bp_status = BIOC_SPSREADY;
3558 			break;
3559 		case MFI_PR_STATE_ACTIVE:
3560 			bp->bp_status = BIOC_SPSACTIVE;
3561 			break;
3562 		case MFI_PR_STATE_ABORTED:
3563 			bp->bp_status = BIOC_SPSABORTED;
3564 			break;
3565 		default:
3566 			printf("%s: unknown patrol state %d\n",
3567 			    DEVNAME(sc), status.state);
3568 			break;
3569 		}
3570 
3571 		break;
3572 
3573 	default:
3574 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3575 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3576 		return (EINVAL);
3577 	}
3578 
3579 	return (rv);
3580 }
3581 #endif
3582 
3583 int
3584 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3585 {
3586 	struct mfi_conf		*cfg;
3587 	struct mfi_hotspare	*hs;
3588 	struct mfi_pd_details	*pd;
3589 	struct bioc_disk	*sdhs;
3590 	struct bioc_vol		*vdhs;
3591 	struct scsipi_inquiry_data *inqbuf;
3592 	char			vend[8+16+4+1], *vendp;
3593 	int			i, rv = EINVAL;
3594 	uint32_t		size;
3595 	union mfi_mbox		mbox;
3596 
3597 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3598 
3599 	if (!bio_hs)
3600 		return (EINVAL);
3601 
3602 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3603 
3604 	/* send single element command to retrieve size for full structure */
3605 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3606 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3607 	    MFII_DATA_IN, false))
3608 		goto freeme;
3609 
3610 	size = cfg->mfc_size;
3611 	free(cfg, M_DEVBUF);
3612 
3613 	/* memory for read config */
3614 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3615 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3616 	    MFII_DATA_IN, false))
3617 		goto freeme;
3618 
3619 	/* calculate offset to hs structure */
3620 	hs = (struct mfi_hotspare *)(
3621 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3622 	    cfg->mfc_array_size * cfg->mfc_no_array +
3623 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3624 
3625 	if (volid < cfg->mfc_no_ld)
3626 		goto freeme; /* not a hotspare */
3627 
3628 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3629 		goto freeme; /* not a hotspare */
3630 
3631 	/* offset into hotspare structure */
3632 	i = volid - cfg->mfc_no_ld;
3633 
3634 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3635 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3636 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3637 
3638 	/* get pd fields */
3639 	memset(&mbox, 0, sizeof(mbox));
3640 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3641 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3642 	    MFII_DATA_IN, false)) {
3643 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3644 		    DEVNAME(sc));
3645 		goto freeme;
3646 	}
3647 
3648 	switch (type) {
3649 	case MFI_MGMT_VD:
3650 		vdhs = bio_hs;
3651 		vdhs->bv_status = BIOC_SVONLINE;
3652 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3653 		vdhs->bv_level = -1; /* hotspare */
3654 		vdhs->bv_nodisk = 1;
3655 		break;
3656 
3657 	case MFI_MGMT_SD:
3658 		sdhs = bio_hs;
3659 		sdhs->bd_status = BIOC_SDHOTSPARE;
3660 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3661 		sdhs->bd_channel = pd->mpd_enc_idx;
3662 		sdhs->bd_target = pd->mpd_enc_slot;
3663 		inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3664 		vendp = inqbuf->vendor;
3665 		memcpy(vend, vendp, sizeof vend - 1);
3666 		vend[sizeof vend - 1] = '\0';
3667 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3668 		break;
3669 
3670 	default:
3671 		goto freeme;
3672 	}
3673 
3674 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3675 	rv = 0;
3676 freeme:
3677 	free(pd, M_DEVBUF);
3678 	free(cfg, M_DEVBUF);
3679 
3680 	return (rv);
3681 }
3682 
3683 #endif /* NBIO > 0 */
3684 
3685 #define MFI_BBU_SENSORS 4
3686 
3687 void
3688 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3689 {
3690 	struct mfi_bbu_status bbu;
3691 	u_int32_t status;
3692 	u_int32_t mask;
3693 	u_int32_t soh_bad;
3694 	int rv;
3695 
3696 	mutex_enter(&sc->sc_lock);
3697 	rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3698 	    sizeof(bbu), MFII_DATA_IN, false);
3699 	mutex_exit(&sc->sc_lock);
3700 	if (rv != 0) {
3701 		edata->state = ENVSYS_SINVALID;
3702 		edata->value_cur = 0;
3703 		return;
3704 	}
3705 
3706 	switch (bbu.battery_type) {
3707 	case MFI_BBU_TYPE_IBBU:
3708 		mask = MFI_BBU_STATE_BAD_IBBU;
3709 		soh_bad = 0;
3710 		break;
3711 	case MFI_BBU_TYPE_BBU:
3712 		mask = MFI_BBU_STATE_BAD_BBU;
3713 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3714 		break;
3715 
3716 	case MFI_BBU_TYPE_NONE:
3717 	default:
3718 		edata->state = ENVSYS_SCRITICAL;
3719 		edata->value_cur = 0;
3720 		return;
3721 	}
3722 
3723 	status = le32toh(bbu.fw_status) & mask;
3724 	switch(edata->sensor) {
3725 	case 0:
3726 		edata->value_cur = (status || soh_bad) ? 0 : 1;
3727 		edata->state =
3728 		    edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3729 		return;
3730 	case 1:
3731 		edata->value_cur = le16toh(bbu.voltage) * 1000;
3732 		edata->state = ENVSYS_SVALID;
3733 		return;
3734 	case 2:
3735 		edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3736 		edata->state = ENVSYS_SVALID;
3737 		return;
3738 	case 3:
3739 		edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3740 		edata->state = ENVSYS_SVALID;
3741 		return;
3742 	}
3743 }
3744 
3745 void
3746 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3747 {
3748 	struct bioc_vol bv;
3749 	int error;
3750 
3751 	memset(&bv, 0, sizeof(bv));
3752 	bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3753 	mutex_enter(&sc->sc_lock);
3754 	error = mfii_ioctl_vol(sc, &bv);
3755 	mutex_exit(&sc->sc_lock);
3756 	if (error)
3757 		bv.bv_status = BIOC_SVINVALID;
3758 	bio_vol_to_envsys(edata, &bv);
3759 }
3760 
3761 void
3762 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3763 {
3764 	sensor->units = ENVSYS_DRIVE;
3765 	sensor->state = ENVSYS_SINVALID;
3766 	sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3767 	/* Enable monitoring for drive state changes */
3768 	sensor->flags |= ENVSYS_FMONSTCHANGED;
3769 	snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3770 }
3771 
3772 static void
3773 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3774 {
3775 	if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3776 		aprint_error_dev(sc->sc_dev,
3777 		    "failed to attach sensor %s\n", s->desc);
3778 }
3779 
3780 int
3781 mfii_create_sensors(struct mfii_softc *sc)
3782 {
3783 	int i, rv;
3784 	const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3785 
3786 	sc->sc_sme = sysmon_envsys_create();
3787 	sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3788 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3789 
3790 	if (sc->sc_sensors == NULL) {
3791 		aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3792 		return ENOMEM;
3793 	}
3794 	/* BBU */
3795 	sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3796 	sc->sc_sensors[0].state = ENVSYS_SINVALID;
3797 	sc->sc_sensors[0].value_cur = 0;
3798 	sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3799 	sc->sc_sensors[1].state = ENVSYS_SINVALID;
3800 	sc->sc_sensors[1].value_cur = 0;
3801 	sc->sc_sensors[2].units = ENVSYS_SAMPS;
3802 	sc->sc_sensors[2].state = ENVSYS_SINVALID;
3803 	sc->sc_sensors[2].value_cur = 0;
3804 	sc->sc_sensors[3].units = ENVSYS_STEMP;
3805 	sc->sc_sensors[3].state = ENVSYS_SINVALID;
3806 	sc->sc_sensors[3].value_cur = 0;
3807 
3808 	if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3809 		sc->sc_bbuok = true;
3810 		sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3811 		snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3812 		    "%s BBU state", DEVNAME(sc));
3813 		snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3814 		    "%s BBU voltage", DEVNAME(sc));
3815 		snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3816 		    "%s BBU current", DEVNAME(sc));
3817 		snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3818 		    "%s BBU temperature", DEVNAME(sc));
3819 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3820 			mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3821 		}
3822 	}
3823 
3824 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3825 		mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3826 		mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3827 	}
3828 
3829 	sc->sc_sme->sme_name = DEVNAME(sc);
3830 	sc->sc_sme->sme_cookie = sc;
3831 	sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3832 	rv = sysmon_envsys_register(sc->sc_sme);
3833 	if (rv) {
3834 		aprint_error_dev(sc->sc_dev,
3835 		    "unable to register with sysmon (rv = %d)\n", rv);
3836 	}
3837 	return rv;
3838 
3839 }
3840 
3841 static int
3842 mfii_destroy_sensors(struct mfii_softc *sc)
3843 {
3844 	if (sc->sc_sme == NULL)
3845 		return 0;
3846 	sysmon_envsys_unregister(sc->sc_sme);
3847 	sc->sc_sme = NULL;
3848 	free(sc->sc_sensors, M_DEVBUF);
3849 	return 0;
3850 }
3851 
3852 void
3853 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3854 {
3855 	struct mfii_softc	*sc = sme->sme_cookie;
3856 
3857 	if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3858 		return;
3859 
3860 	if (edata->sensor < MFI_BBU_SENSORS) {
3861 		if (sc->sc_bbuok)
3862 			mfii_bbu(sc, edata);
3863 	} else {
3864 		mfii_refresh_ld_sensor(sc, edata);
3865 	}
3866 }
3867