xref: /openbsd-src/sys/dev/pci/mfii.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /* $OpenBSD: mfii.c,v 1.60 2019/03/05 01:43:07 jmatthew Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 
32 #include <dev/biovar.h>
33 #include <dev/pci/pcidevs.h>
34 #include <dev/pci/pcivar.h>
35 
36 #include <machine/bus.h>
37 
38 #include <scsi/scsi_all.h>
39 #include <scsi/scsi_disk.h>
40 #include <scsi/scsiconf.h>
41 
42 #include <dev/ic/mfireg.h>
43 #include <dev/pci/mpiireg.h>
44 
45 #define	MFII_BAR		0x14
46 #define MFII_BAR_35		0x10
47 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
48 
49 #define MFII_OSTS_INTR_VALID	0x00000009
50 #define MFII_RPI		0x6c /* reply post host index */
51 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
52 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
53 
54 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
55 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
56 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
57 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
58 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
59 
60 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
61 
62 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
63 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
64 
65 #define MFII_MAX_CHAIN_UNIT	0x00400000
66 #define MFII_MAX_CHAIN_MASK	0x000003E0
67 #define MFII_MAX_CHAIN_SHIFT	5
68 
69 #define MFII_256K_IO		128
70 #define MFII_1MB_IO		(MFII_256K_IO * 4)
71 
72 #define MFII_CHAIN_FRAME_MIN	1024
73 
74 struct mfii_request_descr {
75 	u_int8_t	flags;
76 	u_int8_t	msix_index;
77 	u_int16_t	smid;
78 
79 	u_int16_t	lmid;
80 	u_int16_t	dev_handle;
81 } __packed;
82 
83 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
84 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
85 
86 struct mfii_raid_context {
87 	u_int8_t	type_nseg;
88 	u_int8_t	_reserved1;
89 	u_int16_t	timeout_value;
90 
91 	u_int16_t	reg_lock_flags;
92 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
93 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
94 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
95 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
96 
97 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
98 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
99 	u_int16_t	virtual_disk_target_id;
100 
101 	u_int64_t	reg_lock_row_lba;
102 
103 	u_int32_t	reg_lock_length;
104 
105 	u_int16_t	next_lm_id;
106 	u_int8_t	ex_status;
107 	u_int8_t	status;
108 
109 	u_int8_t	raid_flags;
110 	u_int8_t	num_sge;
111 	u_int16_t	config_seq_num;
112 
113 	u_int8_t	span_arm;
114 	u_int8_t	_reserved3[3];
115 } __packed;
116 
117 struct mfii_sge {
118 	u_int64_t	sg_addr;
119 	u_int32_t	sg_len;
120 	u_int16_t	_reserved;
121 	u_int8_t	sg_next_chain_offset;
122 	u_int8_t	sg_flags;
123 } __packed;
124 
125 #define MFII_SGE_ADDR_MASK		(0x03)
126 #define MFII_SGE_ADDR_SYSTEM		(0x00)
127 #define MFII_SGE_ADDR_IOCDDR		(0x01)
128 #define MFII_SGE_ADDR_IOCPLB		(0x02)
129 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
130 #define MFII_SGE_END_OF_LIST		(0x40)
131 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
132 
133 #define MFII_REQUEST_SIZE	256
134 
135 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
136 
137 #define MFII_MAX_ROW		32
138 #define MFII_MAX_ARRAY		128
139 
140 struct mfii_array_map {
141 	uint16_t		mam_pd[MFII_MAX_ROW];
142 } __packed;
143 
144 struct mfii_dev_handle {
145 	uint16_t		mdh_cur_handle;
146 	uint8_t			mdh_valid;
147 	uint8_t			mdh_reserved;
148 	uint16_t		mdh_handle[2];
149 } __packed;
150 
151 struct mfii_ld_map {
152 	uint32_t		mlm_total_size;
153 	uint32_t		mlm_reserved1[5];
154 	uint32_t		mlm_num_lds;
155 	uint32_t		mlm_reserved2;
156 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
157 	uint8_t			mlm_pd_timeout;
158 	uint8_t			mlm_reserved3[7];
159 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
160 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
161 } __packed;
162 
163 struct mfii_task_mgmt {
164 	union {
165 		uint8_t			request[128];
166 		struct mpii_msg_scsi_task_request
167 					mpii_request;
168 	} __packed __aligned(8);
169 
170 	union {
171 		uint8_t			reply[128];
172 		uint32_t		flags;
173 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
174 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
175 		struct mpii_msg_scsi_task_reply
176 					mpii_reply;
177 	} __packed __aligned(8);
178 } __packed __aligned(8);
179 
180 struct mfii_dmamem {
181 	bus_dmamap_t		mdm_map;
182 	bus_dma_segment_t	mdm_seg;
183 	size_t			mdm_size;
184 	caddr_t			mdm_kva;
185 };
186 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
187 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
188 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
189 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
190 
191 struct mfii_softc;
192 
193 struct mfii_ccb {
194 	void			*ccb_request;
195 	u_int64_t		ccb_request_dva;
196 	bus_addr_t		ccb_request_offset;
197 
198 	void			*ccb_mfi;
199 	u_int64_t		ccb_mfi_dva;
200 	bus_addr_t		ccb_mfi_offset;
201 
202 	struct mfi_sense	*ccb_sense;
203 	u_int64_t		ccb_sense_dva;
204 	bus_addr_t		ccb_sense_offset;
205 
206 	struct mfii_sge		*ccb_sgl;
207 	u_int64_t		ccb_sgl_dva;
208 	bus_addr_t		ccb_sgl_offset;
209 	u_int			ccb_sgl_len;
210 
211 	struct mfii_request_descr ccb_req;
212 
213 	bus_dmamap_t		ccb_dmamap;
214 
215 	/* data for sgl */
216 	void			*ccb_data;
217 	size_t			ccb_len;
218 
219 	int			ccb_direction;
220 #define MFII_DATA_NONE			0
221 #define MFII_DATA_IN			1
222 #define MFII_DATA_OUT			2
223 
224 	void			*ccb_cookie;
225 	void			(*ccb_done)(struct mfii_softc *,
226 				    struct mfii_ccb *);
227 
228 	u_int32_t		ccb_flags;
229 #define MFI_CCB_F_ERR			(1<<0)
230 	u_int			ccb_smid;
231 	u_int			ccb_refcnt;
232 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
233 };
234 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
235 
236 struct mfii_pd_softc {
237 	struct scsi_link	pd_link;
238 	struct scsibus_softc	*pd_scsibus;
239 	struct srp		pd_dev_handles;
240 	uint8_t			pd_timeout;
241 };
242 
243 struct mfii_iop {
244 	int bar;
245 	int num_sge_loc;
246 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
247 #define MFII_IOP_NUM_SGE_LOC_35		1
248 	u_int16_t ldio_ctx_reg_lock_flags;
249 	u_int8_t ldio_req_type;
250 	u_int8_t ldio_ctx_type_nseg;
251 	u_int8_t sge_flag_chain;
252 	u_int8_t sge_flag_eol;
253 };
254 
255 struct mfii_softc {
256 	struct device		sc_dev;
257 	const struct mfii_iop	*sc_iop;
258 
259 	pci_chipset_tag_t	sc_pc;
260 	pcitag_t		sc_tag;
261 
262 	bus_space_tag_t		sc_iot;
263 	bus_space_handle_t	sc_ioh;
264 	bus_size_t		sc_ios;
265 	bus_dma_tag_t		sc_dmat;
266 
267 	void			*sc_ih;
268 
269 	struct mutex		sc_ccb_mtx;
270 	struct mutex		sc_post_mtx;
271 
272 	u_int			sc_max_fw_cmds;
273 	u_int			sc_max_cmds;
274 	u_int			sc_max_sgl;
275 
276 	u_int			sc_reply_postq_depth;
277 	u_int			sc_reply_postq_index;
278 	struct mutex		sc_reply_postq_mtx;
279 	struct mfii_dmamem	*sc_reply_postq;
280 
281 	struct mfii_dmamem	*sc_requests;
282 	struct mfii_dmamem	*sc_mfi;
283 	struct mfii_dmamem	*sc_sense;
284 	struct mfii_dmamem	*sc_sgl;
285 
286 	struct mfii_ccb		*sc_ccb;
287 	struct mfii_ccb_list	sc_ccb_freeq;
288 
289 	struct mfii_ccb		*sc_aen_ccb;
290 	struct task		sc_aen_task;
291 
292 	struct mutex		sc_abort_mtx;
293 	struct mfii_ccb_list	sc_abort_list;
294 	struct task		sc_abort_task;
295 
296 	struct scsi_link	sc_link;
297 	struct scsibus_softc	*sc_scsibus;
298 	struct mfii_pd_softc	*sc_pd;
299 	struct scsi_iopool	sc_iopool;
300 
301 	/* save some useful information for logical drives that is missing
302 	 * in sc_ld_list
303 	 */
304 	struct {
305 		char		ld_dev[16];	/* device name sd? */
306 	}			sc_ld[MFI_MAX_LD];
307 	int			sc_target_lds[MFI_MAX_LD];
308 
309 	/* scsi ioctl from sd device */
310 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
311 
312 	/* bio */
313 	struct mfi_conf		*sc_cfg;
314 	struct mfi_ctrl_info	sc_info;
315 	struct mfi_ld_list	sc_ld_list;
316 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
317 	int			sc_no_pd; /* used physical disks */
318 	int			sc_ld_sz; /* sizeof sc_ld_details */
319 
320 	/* mgmt lock */
321 	struct rwlock		sc_lock;
322 
323 	/* sensors */
324 	struct ksensordev	sc_sensordev;
325 	struct ksensor		*sc_bbu;
326 	struct ksensor		*sc_bbu_status;
327 	struct ksensor		*sc_sensors;
328 };
329 
330 #ifdef MFII_DEBUG
331 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
332 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
333 #define	MFII_D_CMD		0x0001
334 #define	MFII_D_INTR		0x0002
335 #define	MFII_D_MISC		0x0004
336 #define	MFII_D_DMA		0x0008
337 #define	MFII_D_IOCTL		0x0010
338 #define	MFII_D_RW		0x0020
339 #define	MFII_D_MEM		0x0040
340 #define	MFII_D_CCB		0x0080
341 uint32_t	mfii_debug = 0
342 /*		    | MFII_D_CMD */
343 /*		    | MFII_D_INTR */
344 		    | MFII_D_MISC
345 /*		    | MFII_D_DMA */
346 /*		    | MFII_D_IOCTL */
347 /*		    | MFII_D_RW */
348 /*		    | MFII_D_MEM */
349 /*		    | MFII_D_CCB */
350 		;
351 #else
352 #define DPRINTF(x...)
353 #define DNPRINTF(n,x...)
354 #endif
355 
356 int		mfii_match(struct device *, void *, void *);
357 void		mfii_attach(struct device *, struct device *, void *);
358 int		mfii_detach(struct device *, int);
359 
360 struct cfattach mfii_ca = {
361 	sizeof(struct mfii_softc),
362 	mfii_match,
363 	mfii_attach,
364 	mfii_detach
365 };
366 
367 struct cfdriver mfii_cd = {
368 	NULL,
369 	"mfii",
370 	DV_DULL
371 };
372 
373 void		mfii_scsi_cmd(struct scsi_xfer *);
374 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
375 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
376 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
377 
378 struct scsi_adapter mfii_switch = {
379 	mfii_scsi_cmd,
380 	scsi_minphys,
381 	NULL, /* probe */
382 	NULL, /* unprobe */
383 	mfii_scsi_ioctl
384 };
385 
386 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
387 int		mfii_pd_scsi_probe(struct scsi_link *);
388 
389 struct scsi_adapter mfii_pd_switch = {
390 	mfii_pd_scsi_cmd,
391 	scsi_minphys,
392 	mfii_pd_scsi_probe
393 };
394 
395 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
396 
397 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
398 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
399 
400 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
401 void			mfii_dmamem_free(struct mfii_softc *,
402 			    struct mfii_dmamem *);
403 
404 void *			mfii_get_ccb(void *);
405 void			mfii_put_ccb(void *, void *);
406 int			mfii_init_ccb(struct mfii_softc *);
407 void			mfii_scrub_ccb(struct mfii_ccb *);
408 
409 int			mfii_transition_firmware(struct mfii_softc *);
410 int			mfii_initialise_firmware(struct mfii_softc *);
411 int			mfii_get_info(struct mfii_softc *);
412 int			mfii_syspd(struct mfii_softc *);
413 
414 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
415 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
416 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
417 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
418 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
419 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
420 int			mfii_my_intr(struct mfii_softc *);
421 int			mfii_intr(void *);
422 void			mfii_postq(struct mfii_softc *);
423 
424 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
425 			    void *, int);
426 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
427 			    void *, int);
428 
429 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
430 
431 int			mfii_mgmt(struct mfii_softc *, uint32_t,
432 			    const union mfi_mbox *, void *, size_t, int);
433 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
434 			    uint32_t, const union mfi_mbox *, void *, size_t,
435 			    int);
436 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
437 
438 int			mfii_scsi_cmd_io(struct mfii_softc *,
439 			    struct scsi_xfer *);
440 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
441 			    struct scsi_xfer *);
442 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
443 			    struct scsi_xfer *);
444 void			mfii_scsi_cmd_tmo(void *);
445 
446 int			mfii_dev_handles_update(struct mfii_softc *sc);
447 void			mfii_dev_handles_dtor(void *, void *);
448 
449 void			mfii_abort_task(void *);
450 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
451 			    uint16_t, uint16_t, uint8_t, uint32_t);
452 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
453 			    struct mfii_ccb *);
454 
455 int			mfii_aen_register(struct mfii_softc *);
456 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
457 			    struct mfii_dmamem *, uint32_t);
458 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
459 void			mfii_aen(void *);
460 void			mfii_aen_unregister(struct mfii_softc *);
461 
462 void			mfii_aen_pd_insert(struct mfii_softc *,
463 			    const struct mfi_evtarg_pd_address *);
464 void			mfii_aen_pd_remove(struct mfii_softc *,
465 			    const struct mfi_evtarg_pd_address *);
466 void			mfii_aen_pd_state_change(struct mfii_softc *,
467 			    const struct mfi_evtarg_pd_state *);
468 void			mfii_aen_ld_update(struct mfii_softc *);
469 
470 #if NBIO > 0
471 int		mfii_ioctl(struct device *, u_long, caddr_t);
472 int		mfii_bio_getitall(struct mfii_softc *);
473 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
474 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
475 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
476 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
477 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
478 int		mfii_ioctl_setstate(struct mfii_softc *,
479 		    struct bioc_setstate *);
480 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
481 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
482 
483 #ifndef SMALL_KERNEL
484 static const char *mfi_bbu_indicators[] = {
485 	"pack missing",
486 	"voltage low",
487 	"temp high",
488 	"charge active",
489 	"discharge active",
490 	"learn cycle req'd",
491 	"learn cycle active",
492 	"learn cycle failed",
493 	"learn cycle timeout",
494 	"I2C errors",
495 	"replace pack",
496 	"low capacity",
497 	"periodic learn req'd"
498 };
499 
500 void		mfii_init_ld_sensor(struct mfii_softc *, int);
501 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
502 int		mfii_create_sensors(struct mfii_softc *);
503 void		mfii_refresh_sensors(void *);
504 void		mfii_bbu(struct mfii_softc *);
505 #endif /* SMALL_KERNEL */
506 #endif /* NBIO > 0 */
507 
508 /*
509  * mfii boards support asynchronous (and non-polled) completion of
510  * dcmds by proxying them through a passthru mpii command that points
511  * at a dcmd frame. since the passthru command is submitted like
512  * the scsi commands using an SMID in the request descriptor,
513  * ccb_request memory * must contain the passthru command because
514  * that is what the SMID refers to. this means ccb_request cannot
515  * contain the dcmd. rather than allocating separate dma memory to
516  * hold the dcmd, we reuse the sense memory buffer for it.
517  */
518 
519 void			mfii_dcmd_start(struct mfii_softc *,
520 			    struct mfii_ccb *);
521 
522 static inline void
523 mfii_dcmd_scrub(struct mfii_ccb *ccb)
524 {
525 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
526 }
527 
528 static inline struct mfi_dcmd_frame *
529 mfii_dcmd_frame(struct mfii_ccb *ccb)
530 {
531 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
532 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
533 }
534 
535 static inline void
536 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
537 {
538 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
539 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
540 }
541 
542 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
543 
544 const struct mfii_iop mfii_iop_thunderbolt = {
545 	MFII_BAR,
546 	MFII_IOP_NUM_SGE_LOC_ORIG,
547 	0,
548 	MFII_REQ_TYPE_LDIO,
549 	0,
550 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
551 	0
552 };
553 
554 /*
555  * a lot of these values depend on us not implementing fastpath yet.
556  */
557 const struct mfii_iop mfii_iop_25 = {
558 	MFII_BAR,
559 	MFII_IOP_NUM_SGE_LOC_ORIG,
560 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
561 	MFII_REQ_TYPE_NO_LOCK,
562 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
563 	MFII_SGE_CHAIN_ELEMENT,
564 	MFII_SGE_END_OF_LIST
565 };
566 
567 const struct mfii_iop mfii_iop_35 = {
568 	MFII_BAR_35,
569 	MFII_IOP_NUM_SGE_LOC_35,
570 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
571 	MFII_REQ_TYPE_NO_LOCK,
572 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
573 	MFII_SGE_CHAIN_ELEMENT,
574 	MFII_SGE_END_OF_LIST
575 };
576 
577 struct mfii_device {
578 	pcireg_t		mpd_vendor;
579 	pcireg_t		mpd_product;
580 	const struct mfii_iop	*mpd_iop;
581 };
582 
583 const struct mfii_device mfii_devices[] = {
584 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
585 	    &mfii_iop_thunderbolt },
586 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
587 	    &mfii_iop_25 },
588 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
589 	    &mfii_iop_25 },
590 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
591 	    &mfii_iop_35 },
592 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
593 	    &mfii_iop_35 },
594 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
595 	    &mfii_iop_35 },
596 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
597 	    &mfii_iop_35 },
598 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
599 	    &mfii_iop_35 },
600 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
601 	    &mfii_iop_35 }
602 };
603 
604 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
605 
606 const struct mfii_iop *
607 mfii_find_iop(struct pci_attach_args *pa)
608 {
609 	const struct mfii_device *mpd;
610 	int i;
611 
612 	for (i = 0; i < nitems(mfii_devices); i++) {
613 		mpd = &mfii_devices[i];
614 
615 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
616 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
617 			return (mpd->mpd_iop);
618 	}
619 
620 	return (NULL);
621 }
622 
623 int
624 mfii_match(struct device *parent, void *match, void *aux)
625 {
626 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
627 }
628 
629 void
630 mfii_attach(struct device *parent, struct device *self, void *aux)
631 {
632 	struct mfii_softc *sc = (struct mfii_softc *)self;
633 	struct pci_attach_args *pa = aux;
634 	pcireg_t memtype;
635 	pci_intr_handle_t ih;
636 	struct scsibus_attach_args saa;
637 	u_int32_t status, scpad2, scpad3;
638 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
639 
640 	/* init sc */
641 	sc->sc_iop = mfii_find_iop(aux);
642 	sc->sc_dmat = pa->pa_dmat;
643 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
644 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
645 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
646 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
647 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
648 
649 	rw_init(&sc->sc_lock, "mfii_lock");
650 
651 	sc->sc_aen_ccb = NULL;
652 	task_set(&sc->sc_aen_task, mfii_aen, sc);
653 
654 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
655 	SIMPLEQ_INIT(&sc->sc_abort_list);
656 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
657 
658 	/* wire up the bus shizz */
659 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
660 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
661 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
662 		printf(": unable to map registers\n");
663 		return;
664 	}
665 
666 	/* disable interrupts */
667 	mfii_write(sc, MFI_OMSK, 0xffffffff);
668 
669 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
670 		printf(": unable to map interrupt\n");
671 		goto pci_unmap;
672 	}
673 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
674 
675 	/* lets get started */
676 	if (mfii_transition_firmware(sc))
677 		goto pci_unmap;
678 
679 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
680 	scpad3 = mfii_read(sc, MFII_OSP3);
681 	status = mfii_fw_state(sc);
682 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
683 	if (sc->sc_max_fw_cmds == 0)
684 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
685 	/*
686 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
687 	 * exceed FW supplied max_fw_cmds.
688 	 */
689 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
690 
691 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
692 	scpad2 = mfii_read(sc, MFII_OSP2);
693 	chain_frame_sz =
694 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
695 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
696 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
697 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
698 
699 	nsge_in_io = (MFII_REQUEST_SIZE -
700 		sizeof(struct mpii_msg_scsi_io) -
701 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
702 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
703 
704 	/* round down to nearest power of two */
705 	sc->sc_max_sgl = 1;
706 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
707 		sc->sc_max_sgl <<= 1;
708 
709 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
710 	    DEVNAME(sc), status, scpad2, scpad3);
711 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
712 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
713 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
714 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
715 	    sc->sc_max_sgl);
716 
717 	/* sense memory */
718 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
719 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
720 	if (sc->sc_sense == NULL) {
721 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
722 		goto pci_unmap;
723 	}
724 
725 	/* reply post queue */
726 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
727 
728 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
729 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
730 	if (sc->sc_reply_postq == NULL)
731 		goto free_sense;
732 
733 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
734 	    MFII_DMA_LEN(sc->sc_reply_postq));
735 
736 	/* MPII request frame array */
737 	sc->sc_requests = mfii_dmamem_alloc(sc,
738 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
739 	if (sc->sc_requests == NULL)
740 		goto free_reply_postq;
741 
742 	/* MFI command frame array */
743 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
744 	if (sc->sc_mfi == NULL)
745 		goto free_requests;
746 
747 	/* MPII SGL array */
748 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
749 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
750 	if (sc->sc_sgl == NULL)
751 		goto free_mfi;
752 
753 	if (mfii_init_ccb(sc) != 0) {
754 		printf("%s: could not init ccb list\n", DEVNAME(sc));
755 		goto free_sgl;
756 	}
757 
758 	/* kickstart firmware with all addresses and pointers */
759 	if (mfii_initialise_firmware(sc) != 0) {
760 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
761 		goto free_sgl;
762 	}
763 
764 	if (mfii_get_info(sc) != 0) {
765 		printf("%s: could not retrieve controller information\n",
766 		    DEVNAME(sc));
767 		goto free_sgl;
768 	}
769 
770 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
771 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
772 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
773 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
774 	printf("\n");
775 
776 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
777 	    mfii_intr, sc, DEVNAME(sc));
778 	if (sc->sc_ih == NULL)
779 		goto free_sgl;
780 
781 	sc->sc_link.openings = sc->sc_max_cmds;
782 	sc->sc_link.adapter_softc = sc;
783 	sc->sc_link.adapter = &mfii_switch;
784 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
785 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
786 	sc->sc_link.pool = &sc->sc_iopool;
787 
788 	memset(&saa, 0, sizeof(saa));
789 	saa.saa_sc_link = &sc->sc_link;
790 
791 	sc->sc_scsibus = (struct scsibus_softc *)
792 	    config_found(&sc->sc_dev, &saa, scsiprint);
793 
794 	mfii_syspd(sc);
795 
796 	if (mfii_aen_register(sc) != 0) {
797 		/* error printed by mfii_aen_register */
798 		goto intr_disestablish;
799 	}
800 
801 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
802 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
803 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
804 		goto intr_disestablish;
805 	}
806 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
807 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
808 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
809 		sc->sc_target_lds[target] = i;
810 	}
811 
812 	/* enable interrupts */
813 	mfii_write(sc, MFI_OSTS, 0xffffffff);
814 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
815 
816 #if NBIO > 0
817 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
818 		panic("%s: controller registration failed", DEVNAME(sc));
819 	else
820 		sc->sc_ioctl = mfii_ioctl;
821 
822 #ifndef SMALL_KERNEL
823 	if (mfii_create_sensors(sc) != 0)
824 		printf("%s: unable to create sensors\n", DEVNAME(sc));
825 #endif
826 #endif /* NBIO > 0 */
827 
828 	return;
829 intr_disestablish:
830 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
831 free_sgl:
832 	mfii_dmamem_free(sc, sc->sc_sgl);
833 free_mfi:
834 	mfii_dmamem_free(sc, sc->sc_mfi);
835 free_requests:
836 	mfii_dmamem_free(sc, sc->sc_requests);
837 free_reply_postq:
838 	mfii_dmamem_free(sc, sc->sc_reply_postq);
839 free_sense:
840 	mfii_dmamem_free(sc, sc->sc_sense);
841 pci_unmap:
842 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
843 }
844 
845 struct srp_gc mfii_dev_handles_gc =
846     SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
847 
848 static inline uint16_t
849 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
850 {
851 	struct srp_ref sr;
852 	uint16_t *map, handle;
853 
854 	map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
855 	handle = map[target];
856 	srp_leave(&sr);
857 
858 	return (handle);
859 }
860 
861 int
862 mfii_dev_handles_update(struct mfii_softc *sc)
863 {
864 	struct mfii_ld_map *lm;
865 	uint16_t *dev_handles = NULL;
866 	int i;
867 	int rv = 0;
868 
869 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
870 
871 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
872 	    SCSI_DATA_IN|SCSI_NOSLEEP);
873 
874 	if (rv != 0) {
875 		rv = EIO;
876 		goto free_lm;
877 	}
878 
879 	dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
880 	    M_DEVBUF, M_WAITOK);
881 
882 	for (i = 0; i < MFI_MAX_PD; i++)
883 		dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
884 
885 	/* commit the updated info */
886 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
887 	srp_update_locked(&mfii_dev_handles_gc,
888 	    &sc->sc_pd->pd_dev_handles, dev_handles);
889 
890 free_lm:
891 	free(lm, M_TEMP, sizeof(*lm));
892 
893 	return (rv);
894 }
895 
896 void
897 mfii_dev_handles_dtor(void *null, void *v)
898 {
899 	uint16_t *dev_handles = v;
900 
901 	free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
902 }
903 
904 int
905 mfii_syspd(struct mfii_softc *sc)
906 {
907 	struct scsibus_attach_args saa;
908 	struct scsi_link *link;
909 
910 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
911 	if (sc->sc_pd == NULL)
912 		return (1);
913 
914 	srp_init(&sc->sc_pd->pd_dev_handles);
915 	if (mfii_dev_handles_update(sc) != 0)
916 		goto free_pdsc;
917 
918 	link = &sc->sc_pd->pd_link;
919 	link->adapter = &mfii_pd_switch;
920 	link->adapter_softc = sc;
921 	link->adapter_buswidth = MFI_MAX_PD;
922 	link->adapter_target = -1;
923 	link->openings = sc->sc_max_cmds - 1;
924 	link->pool = &sc->sc_iopool;
925 
926 	memset(&saa, 0, sizeof(saa));
927 	saa.saa_sc_link = link;
928 
929 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
930 	    config_found(&sc->sc_dev, &saa, scsiprint);
931 
932 	return (0);
933 
934 free_pdsc:
935 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
936 	return (1);
937 }
938 
939 int
940 mfii_detach(struct device *self, int flags)
941 {
942 	struct mfii_softc *sc = (struct mfii_softc *)self;
943 
944 	if (sc->sc_ih == NULL)
945 		return (0);
946 
947 #ifndef SMALL_KERNEL
948 	if (sc->sc_sensors) {
949 		sensordev_deinstall(&sc->sc_sensordev);
950 		free(sc->sc_sensors, M_DEVBUF,
951 		    MFI_MAX_LD * sizeof(struct ksensor));
952 	}
953 
954 	if (sc->sc_bbu) {
955 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
956 	}
957 
958 	if (sc->sc_bbu_status) {
959 		free(sc->sc_bbu_status, M_DEVBUF,
960 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
961 	}
962 #endif /* SMALL_KERNEL */
963 
964 	mfii_aen_unregister(sc);
965 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
966 	mfii_dmamem_free(sc, sc->sc_sgl);
967 	mfii_dmamem_free(sc, sc->sc_mfi);
968 	mfii_dmamem_free(sc, sc->sc_requests);
969 	mfii_dmamem_free(sc, sc->sc_reply_postq);
970 	mfii_dmamem_free(sc, sc->sc_sense);
971 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
972 
973 	return (0);
974 }
975 
976 u_int32_t
977 mfii_read(struct mfii_softc *sc, bus_size_t r)
978 {
979 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
980 	    BUS_SPACE_BARRIER_READ);
981 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
982 }
983 
984 void
985 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
986 {
987 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
988 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
989 	    BUS_SPACE_BARRIER_WRITE);
990 }
991 
992 struct mfii_dmamem *
993 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
994 {
995 	struct mfii_dmamem *m;
996 	int nsegs;
997 
998 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
999 	if (m == NULL)
1000 		return (NULL);
1001 
1002 	m->mdm_size = size;
1003 
1004 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1005 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1006 		goto mdmfree;
1007 
1008 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1009 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1010 		goto destroy;
1011 
1012 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1013 	    BUS_DMA_NOWAIT) != 0)
1014 		goto free;
1015 
1016 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1017 	    BUS_DMA_NOWAIT) != 0)
1018 		goto unmap;
1019 
1020 	return (m);
1021 
1022 unmap:
1023 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1024 free:
1025 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1026 destroy:
1027 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1028 mdmfree:
1029 	free(m, M_DEVBUF, sizeof *m);
1030 
1031 	return (NULL);
1032 }
1033 
1034 void
1035 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1036 {
1037 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1038 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1039 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1040 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1041 	free(m, M_DEVBUF, sizeof *m);
1042 }
1043 
1044 void
1045 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1046 {
1047 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1048 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1049 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1050 
1051 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1052 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1053 	io->chain_offset = io->sgl_offset0 / 4;
1054 
1055 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1056 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1057 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1058 
1059 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1060 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1061 
1062 	mfii_start(sc, ccb);
1063 }
1064 
1065 int
1066 mfii_aen_register(struct mfii_softc *sc)
1067 {
1068 	struct mfi_evt_log_info mel;
1069 	struct mfii_ccb *ccb;
1070 	struct mfii_dmamem *mdm;
1071 	int rv;
1072 
1073 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1074 	if (ccb == NULL) {
1075 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1076 		return (ENOMEM);
1077 	}
1078 
1079 	memset(&mel, 0, sizeof(mel));
1080 	mfii_scrub_ccb(ccb);
1081 
1082 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1083 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1084 	if (rv != 0) {
1085 		scsi_io_put(&sc->sc_iopool, ccb);
1086 		printf("%s: unable to get event info\n", DEVNAME(sc));
1087 		return (EIO);
1088 	}
1089 
1090 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1091 	if (mdm == NULL) {
1092 		scsi_io_put(&sc->sc_iopool, ccb);
1093 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1094 		return (ENOMEM);
1095 	}
1096 
1097 	/* replay all the events from boot */
1098 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1099 
1100 	return (0);
1101 }
1102 
1103 void
1104 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1105     struct mfii_dmamem *mdm, uint32_t seq)
1106 {
1107 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1108 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1109 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1110 	union mfi_evt_class_locale mec;
1111 
1112 	mfii_scrub_ccb(ccb);
1113 	mfii_dcmd_scrub(ccb);
1114 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1115 
1116 	ccb->ccb_cookie = mdm;
1117 	ccb->ccb_done = mfii_aen_done;
1118 	sc->sc_aen_ccb = ccb;
1119 
1120 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1121 	mec.mec_members.reserved = 0;
1122 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1123 
1124 	hdr->mfh_cmd = MFI_CMD_DCMD;
1125 	hdr->mfh_sg_count = 1;
1126 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1127 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1128 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1129 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1130 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1131 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1132 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1133 
1134 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1135 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1136 
1137 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1138 	mfii_dcmd_start(sc, ccb);
1139 }
1140 
1141 void
1142 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1143 {
1144 	KASSERT(sc->sc_aen_ccb == ccb);
1145 
1146 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1147 	task_add(systq, &sc->sc_aen_task);
1148 }
1149 
1150 void
1151 mfii_aen(void *arg)
1152 {
1153 	struct mfii_softc *sc = arg;
1154 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1155 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1156 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1157 
1158 	mfii_dcmd_sync(sc, ccb,
1159 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1160 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1161 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1162 
1163 #if 0
1164 	printf("%s: %u %08x %02x %s\n", DEVNAME(sc),
1165 	    lemtoh32(&med->med_seq_num), lemtoh32(&med->med_code),
1166 	    med->med_arg_type, med->med_description);
1167 #endif
1168 
1169 	switch (lemtoh32(&med->med_code)) {
1170 	case MFI_EVT_PD_INSERTED_EXT:
1171 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1172 			break;
1173 
1174 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1175 		break;
1176  	case MFI_EVT_PD_REMOVED_EXT:
1177 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1178 			break;
1179 
1180 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1181 		break;
1182 
1183 	case MFI_EVT_PD_STATE_CHANGE:
1184 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1185 			break;
1186 
1187 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1188 		break;
1189 
1190 	case MFI_EVT_LD_CREATED:
1191 	case MFI_EVT_LD_DELETED:
1192 		mfii_aen_ld_update(sc);
1193 		break;
1194 
1195 	default:
1196 		break;
1197 	}
1198 
1199 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1200 }
1201 
1202 void
1203 mfii_aen_pd_insert(struct mfii_softc *sc,
1204     const struct mfi_evtarg_pd_address *pd)
1205 {
1206 #if 0
1207 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1208 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1209 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1210 	    pd->scsi_dev_type);
1211 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1212 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1213 	    lemtoh64(&pd->sas_addr[1]));
1214 #endif
1215 
1216 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1217 		return;
1218 
1219 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1220 }
1221 
1222 void
1223 mfii_aen_pd_remove(struct mfii_softc *sc,
1224     const struct mfi_evtarg_pd_address *pd)
1225 {
1226 #if 0
1227 	printf("%s: pd removed ext\n", DEVNAME(sc));
1228 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1229 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1230 	    pd->scsi_dev_type);
1231 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1232 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1233 	    lemtoh64(&pd->sas_addr[1]));
1234 #endif
1235 	uint16_t target = lemtoh16(&pd->device_id);
1236 
1237 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1238 
1239 	/* the firmware will abort outstanding commands for us */
1240 
1241 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1242 }
1243 
1244 void
1245 mfii_aen_pd_state_change(struct mfii_softc *sc,
1246     const struct mfi_evtarg_pd_state *state)
1247 {
1248 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1249 
1250 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1251 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1252 		/* it's been pulled or configured for raid */
1253 
1254 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1255 		    DVACT_DEACTIVATE);
1256 		/* outstanding commands will simply complete or get aborted */
1257 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1258 		    DETACH_FORCE);
1259 
1260 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1261 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1262 		/* the firmware is handing the disk over */
1263 
1264 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1265 	}
1266 }
1267 
1268 void
1269 mfii_aen_ld_update(struct mfii_softc *sc)
1270 {
1271 	int i, state, target, old, nld;
1272 	int newlds[MFI_MAX_LD];
1273 
1274 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1275 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1276 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1277 		    DEVNAME(sc));
1278 		return;
1279 	}
1280 
1281 	memset(newlds, -1, sizeof(newlds));
1282 
1283 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1284 		state = sc->sc_ld_list.mll_list[i].mll_state;
1285 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1286 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1287 		    DEVNAME(sc), target, state);
1288 		newlds[target] = i;
1289 	}
1290 
1291 	for (i = 0; i < MFI_MAX_LD; i++) {
1292 		old = sc->sc_target_lds[i];
1293 		nld = newlds[i];
1294 
1295 		if (old == -1 && nld != -1) {
1296 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1297 			    DEVNAME(sc), i);
1298 
1299 			scsi_probe_target(sc->sc_scsibus, i);
1300 
1301 #ifndef SMALL_KERNEL
1302 			mfii_init_ld_sensor(sc, nld);
1303 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1304 #endif
1305 		} else if (nld == -1 && old != -1) {
1306 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1307 			    DEVNAME(sc), i);
1308 
1309 			scsi_activate(sc->sc_scsibus, i, -1,
1310 			    DVACT_DEACTIVATE);
1311 			scsi_detach_target(sc->sc_scsibus, i,
1312 			    DETACH_FORCE);
1313 #ifndef SMALL_KERNEL
1314 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1315 #endif
1316 		}
1317 	}
1318 
1319 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1320 }
1321 
1322 void
1323 mfii_aen_unregister(struct mfii_softc *sc)
1324 {
1325 	/* XXX */
1326 }
1327 
1328 int
1329 mfii_transition_firmware(struct mfii_softc *sc)
1330 {
1331 	int32_t			fw_state, cur_state;
1332 	int			max_wait, i;
1333 
1334 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1335 
1336 	while (fw_state != MFI_STATE_READY) {
1337 		cur_state = fw_state;
1338 		switch (fw_state) {
1339 		case MFI_STATE_FAULT:
1340 			printf("%s: firmware fault\n", DEVNAME(sc));
1341 			return (1);
1342 		case MFI_STATE_WAIT_HANDSHAKE:
1343 			mfii_write(sc, MFI_SKINNY_IDB,
1344 			    MFI_INIT_CLEAR_HANDSHAKE);
1345 			max_wait = 2;
1346 			break;
1347 		case MFI_STATE_OPERATIONAL:
1348 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1349 			max_wait = 10;
1350 			break;
1351 		case MFI_STATE_UNDEFINED:
1352 		case MFI_STATE_BB_INIT:
1353 			max_wait = 2;
1354 			break;
1355 		case MFI_STATE_FW_INIT:
1356 		case MFI_STATE_DEVICE_SCAN:
1357 		case MFI_STATE_FLUSH_CACHE:
1358 			max_wait = 20;
1359 			break;
1360 		default:
1361 			printf("%s: unknown firmware state %d\n",
1362 			    DEVNAME(sc), fw_state);
1363 			return (1);
1364 		}
1365 		for (i = 0; i < (max_wait * 10); i++) {
1366 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1367 			if (fw_state == cur_state)
1368 				DELAY(100000);
1369 			else
1370 				break;
1371 		}
1372 		if (fw_state == cur_state) {
1373 			printf("%s: firmware stuck in state %#x\n",
1374 			    DEVNAME(sc), fw_state);
1375 			return (1);
1376 		}
1377 	}
1378 
1379 	return (0);
1380 }
1381 
1382 int
1383 mfii_get_info(struct mfii_softc *sc)
1384 {
1385 	int i, rv;
1386 
1387 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1388 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1389 
1390 	if (rv != 0)
1391 		return (rv);
1392 
1393 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1394 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1395 		    DEVNAME(sc),
1396 		    sc->sc_info.mci_image_component[i].mic_name,
1397 		    sc->sc_info.mci_image_component[i].mic_version,
1398 		    sc->sc_info.mci_image_component[i].mic_build_date,
1399 		    sc->sc_info.mci_image_component[i].mic_build_time);
1400 	}
1401 
1402 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1403 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1404 		    DEVNAME(sc),
1405 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1406 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1407 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1408 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1409 	}
1410 
1411 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1412 	    DEVNAME(sc),
1413 	    sc->sc_info.mci_max_arms,
1414 	    sc->sc_info.mci_max_spans,
1415 	    sc->sc_info.mci_max_arrays,
1416 	    sc->sc_info.mci_max_lds,
1417 	    sc->sc_info.mci_product_name);
1418 
1419 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1420 	    DEVNAME(sc),
1421 	    sc->sc_info.mci_serial_number,
1422 	    sc->sc_info.mci_hw_present,
1423 	    sc->sc_info.mci_current_fw_time,
1424 	    sc->sc_info.mci_max_cmds,
1425 	    sc->sc_info.mci_max_sg_elements);
1426 
1427 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1428 	    DEVNAME(sc),
1429 	    sc->sc_info.mci_max_request_size,
1430 	    sc->sc_info.mci_lds_present,
1431 	    sc->sc_info.mci_lds_degraded,
1432 	    sc->sc_info.mci_lds_offline,
1433 	    sc->sc_info.mci_pd_present);
1434 
1435 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1436 	    DEVNAME(sc),
1437 	    sc->sc_info.mci_pd_disks_present,
1438 	    sc->sc_info.mci_pd_disks_pred_failure,
1439 	    sc->sc_info.mci_pd_disks_failed);
1440 
1441 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1442 	    DEVNAME(sc),
1443 	    sc->sc_info.mci_nvram_size,
1444 	    sc->sc_info.mci_memory_size,
1445 	    sc->sc_info.mci_flash_size);
1446 
1447 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1448 	    DEVNAME(sc),
1449 	    sc->sc_info.mci_ram_correctable_errors,
1450 	    sc->sc_info.mci_ram_uncorrectable_errors,
1451 	    sc->sc_info.mci_cluster_allowed,
1452 	    sc->sc_info.mci_cluster_active);
1453 
1454 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1455 	    DEVNAME(sc),
1456 	    sc->sc_info.mci_max_strips_per_io,
1457 	    sc->sc_info.mci_raid_levels,
1458 	    sc->sc_info.mci_adapter_ops,
1459 	    sc->sc_info.mci_ld_ops);
1460 
1461 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1462 	    DEVNAME(sc),
1463 	    sc->sc_info.mci_stripe_sz_ops.min,
1464 	    sc->sc_info.mci_stripe_sz_ops.max,
1465 	    sc->sc_info.mci_pd_ops,
1466 	    sc->sc_info.mci_pd_mix_support);
1467 
1468 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1469 	    DEVNAME(sc),
1470 	    sc->sc_info.mci_ecc_bucket_count,
1471 	    sc->sc_info.mci_package_version);
1472 
1473 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1474 	    DEVNAME(sc),
1475 	    sc->sc_info.mci_properties.mcp_seq_num,
1476 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1477 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1478 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1479 
1480 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1481 	    DEVNAME(sc),
1482 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1483 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1484 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1485 	    sc->sc_info.mci_properties.mcp_cc_rate);
1486 
1487 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1488 	    DEVNAME(sc),
1489 	    sc->sc_info.mci_properties.mcp_recon_rate,
1490 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1491 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1492 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1493 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1494 
1495 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1496 	    DEVNAME(sc),
1497 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1498 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1499 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1500 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1501 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1502 
1503 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1504 	    DEVNAME(sc),
1505 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1506 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1507 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1508 
1509 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1510 	    DEVNAME(sc),
1511 	    sc->sc_info.mci_pci.mip_vendor,
1512 	    sc->sc_info.mci_pci.mip_device,
1513 	    sc->sc_info.mci_pci.mip_subvendor,
1514 	    sc->sc_info.mci_pci.mip_subdevice);
1515 
1516 	DPRINTF("%s: type %#x port_count %d port_addr ",
1517 	    DEVNAME(sc),
1518 	    sc->sc_info.mci_host.mih_type,
1519 	    sc->sc_info.mci_host.mih_port_count);
1520 
1521 	for (i = 0; i < 8; i++)
1522 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1523 	DPRINTF("\n");
1524 
1525 	DPRINTF("%s: type %.x port_count %d port_addr ",
1526 	    DEVNAME(sc),
1527 	    sc->sc_info.mci_device.mid_type,
1528 	    sc->sc_info.mci_device.mid_port_count);
1529 
1530 	for (i = 0; i < 8; i++)
1531 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1532 	DPRINTF("\n");
1533 
1534 	return (0);
1535 }
1536 
1537 int
1538 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1539 {
1540 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1541 	u_int64_t r;
1542 	int to = 0, rv = 0;
1543 
1544 #ifdef DIAGNOSTIC
1545 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1546 		panic("mfii_mfa_poll called with cookie or done set");
1547 #endif
1548 
1549 	hdr->mfh_context = ccb->ccb_smid;
1550 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1551 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1552 
1553 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1554 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1555 
1556 	mfii_start(sc, ccb);
1557 
1558 	for (;;) {
1559 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1560 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1561 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1562 
1563 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1564 			break;
1565 
1566 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1567 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1568 			    ccb->ccb_smid);
1569 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1570 			rv = 1;
1571 			break;
1572 		}
1573 
1574 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1575 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1576 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1577 
1578 		delay(1000);
1579 	}
1580 
1581 	if (ccb->ccb_len > 0) {
1582 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1583 		    0, ccb->ccb_dmamap->dm_mapsize,
1584 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1585 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1586 
1587 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1588 	}
1589 
1590 	return (rv);
1591 }
1592 
1593 int
1594 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1595 {
1596 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1597 	void *cookie;
1598 	int rv = 1;
1599 
1600 	done = ccb->ccb_done;
1601 	cookie = ccb->ccb_cookie;
1602 
1603 	ccb->ccb_done = mfii_poll_done;
1604 	ccb->ccb_cookie = &rv;
1605 
1606 	mfii_start(sc, ccb);
1607 
1608 	do {
1609 		delay(10);
1610 		mfii_postq(sc);
1611 	} while (rv == 1);
1612 
1613 	ccb->ccb_cookie = cookie;
1614 	done(sc, ccb);
1615 
1616 	return (0);
1617 }
1618 
1619 void
1620 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1621 {
1622 	int *rv = ccb->ccb_cookie;
1623 
1624 	*rv = 0;
1625 }
1626 
1627 int
1628 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1629 {
1630 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1631 
1632 #ifdef DIAGNOSTIC
1633 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1634 		panic("mfii_exec called with cookie or done set");
1635 #endif
1636 
1637 	ccb->ccb_cookie = &m;
1638 	ccb->ccb_done = mfii_exec_done;
1639 
1640 	mfii_start(sc, ccb);
1641 
1642 	mtx_enter(&m);
1643 	while (ccb->ccb_cookie != NULL)
1644 		msleep(ccb, &m, PRIBIO, "mfiiexec", 0);
1645 	mtx_leave(&m);
1646 
1647 	return (0);
1648 }
1649 
1650 void
1651 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1652 {
1653 	struct mutex *m = ccb->ccb_cookie;
1654 
1655 	mtx_enter(m);
1656 	ccb->ccb_cookie = NULL;
1657 	wakeup_one(ccb);
1658 	mtx_leave(m);
1659 }
1660 
1661 int
1662 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1663     void *buf, size_t len, int flags)
1664 {
1665 	struct mfii_ccb *ccb;
1666 	int rv;
1667 
1668 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1669 	if (ccb == NULL)
1670 		return (ENOMEM);
1671 
1672 	mfii_scrub_ccb(ccb);
1673 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1674 	scsi_io_put(&sc->sc_iopool, ccb);
1675 
1676 	return (rv);
1677 }
1678 
1679 int
1680 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1681     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1682 {
1683 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1684 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1685 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1686 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1687 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1688 	u_int8_t *dma_buf;
1689 	int rv = EIO;
1690 
1691 	if (cold)
1692 		flags |= SCSI_NOSLEEP;
1693 
1694 	dma_buf = dma_alloc(len, PR_WAITOK);
1695 	if (dma_buf == NULL)
1696 		return (ENOMEM);
1697 
1698 	ccb->ccb_data = dma_buf;
1699 	ccb->ccb_len = len;
1700 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1701 	case SCSI_DATA_IN:
1702 		ccb->ccb_direction = MFII_DATA_IN;
1703 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1704 		break;
1705 	case SCSI_DATA_OUT:
1706 		ccb->ccb_direction = MFII_DATA_OUT;
1707 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1708 		memcpy(dma_buf, buf, len);
1709 		break;
1710 	case 0:
1711 		ccb->ccb_direction = MFII_DATA_NONE;
1712 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1713 		break;
1714 	}
1715 
1716 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1717 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1718 		rv = ENOMEM;
1719 		goto done;
1720 	}
1721 
1722 	hdr->mfh_cmd = MFI_CMD_DCMD;
1723 	hdr->mfh_context = ccb->ccb_smid;
1724 	hdr->mfh_data_len = htole32(len);
1725 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1726 
1727 	dcmd->mdf_opcode = opc;
1728 	/* handle special opcodes */
1729 	if (mbox != NULL)
1730 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1731 
1732 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1733 	io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1734 	io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1735 
1736 	htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1737 	htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1738 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1739 
1740 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1741 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1742 
1743 	if (ISSET(flags, SCSI_NOSLEEP)) {
1744 		ccb->ccb_done = mfii_empty_done;
1745 		mfii_poll(sc, ccb);
1746 	} else
1747 		mfii_exec(sc, ccb);
1748 
1749 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1750 		rv = 0;
1751 
1752 		if (ccb->ccb_direction == MFII_DATA_IN)
1753 			memcpy(buf, dma_buf, len);
1754 	}
1755 
1756 done:
1757 	dma_free(dma_buf, len);
1758 
1759 	return (rv);
1760 }
1761 
1762 void
1763 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1764 {
1765 	return;
1766 }
1767 
1768 int
1769 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1770     void *sglp, int nosleep)
1771 {
1772 	union mfi_sgl *sgl = sglp;
1773 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1774 	int error;
1775 	int i;
1776 
1777 	if (ccb->ccb_len == 0)
1778 		return (0);
1779 
1780 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1781 	    ccb->ccb_data, ccb->ccb_len, NULL,
1782 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1783 	if (error) {
1784 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1785 		return (1);
1786 	}
1787 
1788 	for (i = 0; i < dmap->dm_nsegs; i++) {
1789 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1790 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1791 	}
1792 
1793 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1794 	    ccb->ccb_direction == MFII_DATA_OUT ?
1795 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1796 
1797 	return (0);
1798 }
1799 
1800 void
1801 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1802 {
1803 	u_long *r = (u_long *)&ccb->ccb_req;
1804 
1805 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1806 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1807 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1808 
1809 #if defined(__LP64__)
1810 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1811 #else
1812 	mtx_enter(&sc->sc_post_mtx);
1813 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1814 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1815 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1816 
1817 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1818 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1819 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1820 	mtx_leave(&sc->sc_post_mtx);
1821 #endif
1822 }
1823 
1824 void
1825 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1826 {
1827 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1828 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1829 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1830 
1831 	if (ccb->ccb_sgl_len > 0) {
1832 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1833 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1834 		    BUS_DMASYNC_POSTWRITE);
1835 	}
1836 
1837 	if (ccb->ccb_len > 0) {
1838 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1839 		    0, ccb->ccb_dmamap->dm_mapsize,
1840 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1841 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1842 
1843 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1844 	}
1845 
1846 	ccb->ccb_done(sc, ccb);
1847 }
1848 
1849 int
1850 mfii_initialise_firmware(struct mfii_softc *sc)
1851 {
1852 	struct mpii_msg_iocinit_request *iiq;
1853 	struct mfii_dmamem *m;
1854 	struct mfii_ccb *ccb;
1855 	struct mfi_init_frame *init;
1856 	int rv;
1857 
1858 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1859 	if (m == NULL)
1860 		return (1);
1861 
1862 	iiq = MFII_DMA_KVA(m);
1863 	memset(iiq, 0, sizeof(*iiq));
1864 
1865 	iiq->function = MPII_FUNCTION_IOC_INIT;
1866 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1867 
1868 	iiq->msg_version_maj = 0x02;
1869 	iiq->msg_version_min = 0x00;
1870 	iiq->hdr_version_unit = 0x10;
1871 	iiq->hdr_version_dev = 0x0;
1872 
1873 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1874 
1875 	iiq->reply_descriptor_post_queue_depth =
1876 	    htole16(sc->sc_reply_postq_depth);
1877 	iiq->reply_free_queue_depth = htole16(0);
1878 
1879 	htolem32(&iiq->sense_buffer_address_high,
1880 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1881 
1882 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1883 	    MFII_DMA_DVA(sc->sc_reply_postq));
1884 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1885 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1886 
1887 	htolem32(&iiq->system_request_frame_base_address_lo,
1888 	    MFII_DMA_DVA(sc->sc_requests));
1889 	htolem32(&iiq->system_request_frame_base_address_hi,
1890 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1891 
1892 	iiq->timestamp = htole64(time_uptime);
1893 
1894 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1895 	if (ccb == NULL) {
1896 		/* shouldn't ever run out of ccbs during attach */
1897 		return (1);
1898 	}
1899 	mfii_scrub_ccb(ccb);
1900 	init = ccb->ccb_request;
1901 
1902 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1903 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1904 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1905 
1906 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1907 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1908 	    BUS_DMASYNC_PREREAD);
1909 
1910 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1911 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1912 
1913 	rv = mfii_mfa_poll(sc, ccb);
1914 
1915 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1916 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1917 
1918 	scsi_io_put(&sc->sc_iopool, ccb);
1919 	mfii_dmamem_free(sc, m);
1920 
1921 	return (rv);
1922 }
1923 
1924 int
1925 mfii_my_intr(struct mfii_softc *sc)
1926 {
1927 	u_int32_t status;
1928 
1929 	status = mfii_read(sc, MFI_OSTS);
1930 	if (ISSET(status, 0x1)) {
1931 		mfii_write(sc, MFI_OSTS, status);
1932 		return (1);
1933 	}
1934 
1935 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1936 }
1937 
1938 int
1939 mfii_intr(void *arg)
1940 {
1941 	struct mfii_softc *sc = arg;
1942 
1943 	if (!mfii_my_intr(sc))
1944 		return (0);
1945 
1946 	mfii_postq(sc);
1947 
1948 	return (1);
1949 }
1950 
1951 void
1952 mfii_postq(struct mfii_softc *sc)
1953 {
1954 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
1955 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
1956 	struct mpii_reply_descr *rdp;
1957 	struct mfii_ccb *ccb;
1958 	int rpi = 0;
1959 
1960 	mtx_enter(&sc->sc_reply_postq_mtx);
1961 
1962 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1963 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1964 	    BUS_DMASYNC_POSTREAD);
1965 
1966 	for (;;) {
1967 		rdp = &postq[sc->sc_reply_postq_index];
1968 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
1969 		    MPII_REPLY_DESCR_UNUSED)
1970 			break;
1971 		if (rdp->data == 0xffffffff) {
1972 			/*
1973 			 * ioc is still writing to the reply post queue
1974 			 * race condition - bail!
1975 			 */
1976 			break;
1977 		}
1978 
1979 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
1980 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
1981 		memset(rdp, 0xff, sizeof(*rdp));
1982 
1983 		sc->sc_reply_postq_index++;
1984 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
1985 		rpi = 1;
1986 	}
1987 
1988 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1989 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1990 	    BUS_DMASYNC_PREREAD);
1991 
1992 	if (rpi)
1993 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
1994 
1995 	mtx_leave(&sc->sc_reply_postq_mtx);
1996 
1997 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
1998 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
1999 		mfii_done(sc, ccb);
2000 	}
2001 }
2002 
2003 void
2004 mfii_scsi_cmd(struct scsi_xfer *xs)
2005 {
2006 	struct scsi_link *link = xs->sc_link;
2007 	struct mfii_softc *sc = link->adapter_softc;
2008 	struct mfii_ccb *ccb = xs->io;
2009 
2010 	mfii_scrub_ccb(ccb);
2011 	ccb->ccb_cookie = xs;
2012 	ccb->ccb_done = mfii_scsi_cmd_done;
2013 	ccb->ccb_data = xs->data;
2014 	ccb->ccb_len = xs->datalen;
2015 
2016 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2017 
2018 	switch (xs->cmd->opcode) {
2019 	case READ_COMMAND:
2020 	case READ_BIG:
2021 	case READ_12:
2022 	case READ_16:
2023 	case WRITE_COMMAND:
2024 	case WRITE_BIG:
2025 	case WRITE_12:
2026 	case WRITE_16:
2027 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2028 			goto stuffup;
2029 
2030 		break;
2031 
2032 	default:
2033 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2034 			goto stuffup;
2035 		break;
2036 	}
2037 
2038 	xs->error = XS_NOERROR;
2039 	xs->resid = 0;
2040 
2041 	if (ISSET(xs->flags, SCSI_POLL)) {
2042 		if (mfii_poll(sc, ccb) != 0)
2043 			goto stuffup;
2044 		return;
2045 	}
2046 
2047 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2048 	timeout_add_msec(&xs->stimeout, xs->timeout);
2049 	mfii_start(sc, ccb);
2050 
2051 	return;
2052 
2053 stuffup:
2054 	xs->error = XS_DRIVER_STUFFUP;
2055 	scsi_done(xs);
2056 }
2057 
2058 void
2059 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2060 {
2061 	struct scsi_xfer *xs = ccb->ccb_cookie;
2062 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2063 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2064 	u_int refs = 1;
2065 
2066 	if (timeout_del(&xs->stimeout))
2067 		refs = 2;
2068 
2069 	switch (ctx->status) {
2070 	case MFI_STAT_OK:
2071 		break;
2072 
2073 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2074 		xs->error = XS_SENSE;
2075 		memset(&xs->sense, 0, sizeof(xs->sense));
2076 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2077 		break;
2078 
2079 	case MFI_STAT_LD_OFFLINE:
2080 	case MFI_STAT_DEVICE_NOT_FOUND:
2081 		xs->error = XS_SELTIMEOUT;
2082 		break;
2083 
2084 	default:
2085 		xs->error = XS_DRIVER_STUFFUP;
2086 		break;
2087 	}
2088 
2089 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2090 		scsi_done(xs);
2091 }
2092 
2093 int
2094 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2095 {
2096 	struct mfii_softc	*sc = (struct mfii_softc *)link->adapter_softc;
2097 
2098 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2099 
2100 	switch (cmd) {
2101 	case DIOCGCACHE:
2102 	case DIOCSCACHE:
2103 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2104 		break;
2105 
2106 	default:
2107 		if (sc->sc_ioctl)
2108 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2109 		break;
2110 	}
2111 
2112 	return (ENOTTY);
2113 }
2114 
2115 int
2116 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2117 {
2118 	struct mfii_softc	*sc = (struct mfii_softc *)link->adapter_softc;
2119 	int			 rv, wrenable, rdenable;
2120 	struct mfi_ld_prop	 ldp;
2121 	union mfi_mbox		 mbox;
2122 
2123 	if (mfii_get_info(sc)) {
2124 		rv = EIO;
2125 		goto done;
2126 	}
2127 
2128 	if (sc->sc_target_lds[link->target] == -1) {
2129 		rv = EIO;
2130 		goto done;
2131 	}
2132 
2133 	memset(&mbox, 0, sizeof(mbox));
2134 	mbox.b[0] = link->target;
2135 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2136 	    SCSI_DATA_IN);
2137 	if (rv != 0)
2138 		goto done;
2139 
2140 	if (sc->sc_info.mci_memory_size > 0) {
2141 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2142 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2143 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2144 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2145 	} else {
2146 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2147 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2148 		rdenable = 0;
2149 	}
2150 
2151 	if (cmd == DIOCGCACHE) {
2152 		dc->wrcache = wrenable;
2153 		dc->rdcache = rdenable;
2154 		goto done;
2155 	} /* else DIOCSCACHE */
2156 
2157 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2158 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2159 		goto done;
2160 
2161 	memset(&mbox, 0, sizeof(mbox));
2162 	mbox.b[0] = ldp.mlp_ld.mld_target;
2163 	mbox.b[1] = ldp.mlp_ld.mld_res;
2164 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2165 
2166 	if (sc->sc_info.mci_memory_size > 0) {
2167 		if (dc->rdcache)
2168 			SET(ldp.mlp_cur_cache_policy,
2169 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2170 		else
2171 			CLR(ldp.mlp_cur_cache_policy,
2172 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2173 		if (dc->wrcache)
2174 			SET(ldp.mlp_cur_cache_policy,
2175 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2176 		else
2177 			CLR(ldp.mlp_cur_cache_policy,
2178 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2179 	} else {
2180 		if (dc->rdcache) {
2181 			rv = EOPNOTSUPP;
2182 			goto done;
2183 		}
2184 		if (dc->wrcache)
2185 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2186 		else
2187 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2188 	}
2189 
2190 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2191 	    SCSI_DATA_OUT);
2192 done:
2193 	return (rv);
2194 }
2195 
2196 int
2197 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2198 {
2199 	struct scsi_link *link = xs->sc_link;
2200 	struct mfii_ccb *ccb = xs->io;
2201 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2202 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2203 	int segs;
2204 
2205 	io->dev_handle = htole16(link->target);
2206 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2207 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2208 	io->sgl_flags = htole16(0x02); /* XXX */
2209 	io->sense_buffer_length = sizeof(xs->sense);
2210 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2211 	io->data_length = htole32(xs->datalen);
2212 	io->io_flags = htole16(xs->cmdlen);
2213 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2214 	case SCSI_DATA_IN:
2215 		ccb->ccb_direction = MFII_DATA_IN;
2216 		io->direction = MPII_SCSIIO_DIR_READ;
2217 		break;
2218 	case SCSI_DATA_OUT:
2219 		ccb->ccb_direction = MFII_DATA_OUT;
2220 		io->direction = MPII_SCSIIO_DIR_WRITE;
2221 		break;
2222 	default:
2223 		ccb->ccb_direction = MFII_DATA_NONE;
2224 		io->direction = MPII_SCSIIO_DIR_NONE;
2225 		break;
2226 	}
2227 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2228 
2229 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2230 	ctx->timeout_value = htole16(0x14); /* XXX */
2231 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2232 	ctx->virtual_disk_target_id = htole16(link->target);
2233 
2234 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2235 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2236 		return (1);
2237 
2238 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2239 	switch (sc->sc_iop->num_sge_loc) {
2240 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2241 		ctx->num_sge = segs;
2242 		break;
2243 	case MFII_IOP_NUM_SGE_LOC_35:
2244 		/* 12 bit field, but we're only using the lower 8 */
2245 		ctx->span_arm = segs;
2246 		break;
2247 	}
2248 
2249 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2250 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2251 
2252 	return (0);
2253 }
2254 
2255 int
2256 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2257 {
2258 	struct scsi_link *link = xs->sc_link;
2259 	struct mfii_ccb *ccb = xs->io;
2260 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2261 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2262 
2263 	io->dev_handle = htole16(link->target);
2264 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2265 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2266 	io->sgl_flags = htole16(0x02); /* XXX */
2267 	io->sense_buffer_length = sizeof(xs->sense);
2268 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2269 	io->data_length = htole32(xs->datalen);
2270 	io->io_flags = htole16(xs->cmdlen);
2271 	io->lun[0] = htobe16(link->lun);
2272 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2273 	case SCSI_DATA_IN:
2274 		ccb->ccb_direction = MFII_DATA_IN;
2275 		io->direction = MPII_SCSIIO_DIR_READ;
2276 		break;
2277 	case SCSI_DATA_OUT:
2278 		ccb->ccb_direction = MFII_DATA_OUT;
2279 		io->direction = MPII_SCSIIO_DIR_WRITE;
2280 		break;
2281 	default:
2282 		ccb->ccb_direction = MFII_DATA_NONE;
2283 		io->direction = MPII_SCSIIO_DIR_NONE;
2284 		break;
2285 	}
2286 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2287 
2288 	ctx->virtual_disk_target_id = htole16(link->target);
2289 
2290 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2291 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2292 		return (1);
2293 
2294 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2295 
2296 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2297 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2298 
2299 	return (0);
2300 }
2301 
2302 void
2303 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2304 {
2305 	struct scsi_link *link = xs->sc_link;
2306 	struct mfii_softc *sc = link->adapter_softc;
2307 	struct mfii_ccb *ccb = xs->io;
2308 
2309 	mfii_scrub_ccb(ccb);
2310 	ccb->ccb_cookie = xs;
2311 	ccb->ccb_done = mfii_scsi_cmd_done;
2312 	ccb->ccb_data = xs->data;
2313 	ccb->ccb_len = xs->datalen;
2314 
2315 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2316 
2317 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2318 	if (xs->error != XS_NOERROR)
2319 		goto done;
2320 
2321 	xs->resid = 0;
2322 
2323 	if (ISSET(xs->flags, SCSI_POLL)) {
2324 		if (mfii_poll(sc, ccb) != 0)
2325 			goto stuffup;
2326 		return;
2327 	}
2328 
2329 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2330 	timeout_add_msec(&xs->stimeout, xs->timeout);
2331 	mfii_start(sc, ccb);
2332 
2333 	return;
2334 
2335 stuffup:
2336 	xs->error = XS_DRIVER_STUFFUP;
2337 done:
2338 	scsi_done(xs);
2339 }
2340 
2341 int
2342 mfii_pd_scsi_probe(struct scsi_link *link)
2343 {
2344 	struct mfii_softc *sc = link->adapter_softc;
2345 	struct mfi_pd_details mpd;
2346 	union mfi_mbox mbox;
2347 	int rv;
2348 
2349 	if (link->lun > 0)
2350 		return (0);
2351 
2352 	memset(&mbox, 0, sizeof(mbox));
2353 	mbox.s[0] = htole16(link->target);
2354 
2355 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2356 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2357 	if (rv != 0)
2358 		return (EIO);
2359 
2360 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2361 		return (ENXIO);
2362 
2363 	return (0);
2364 }
2365 
2366 int
2367 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2368 {
2369 	struct scsi_link *link = xs->sc_link;
2370 	struct mfii_ccb *ccb = xs->io;
2371 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2372 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2373 	uint16_t dev_handle;
2374 
2375 	dev_handle = mfii_dev_handle(sc, link->target);
2376 	if (dev_handle == htole16(0xffff))
2377 		return (XS_SELTIMEOUT);
2378 
2379 	io->dev_handle = dev_handle;
2380 	io->function = 0;
2381 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2382 	io->sgl_flags = htole16(0x02); /* XXX */
2383 	io->sense_buffer_length = sizeof(xs->sense);
2384 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2385 	io->data_length = htole32(xs->datalen);
2386 	io->io_flags = htole16(xs->cmdlen);
2387 	io->lun[0] = htobe16(link->lun);
2388 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2389 	case SCSI_DATA_IN:
2390 		ccb->ccb_direction = MFII_DATA_IN;
2391 		io->direction = MPII_SCSIIO_DIR_READ;
2392 		break;
2393 	case SCSI_DATA_OUT:
2394 		ccb->ccb_direction = MFII_DATA_OUT;
2395 		io->direction = MPII_SCSIIO_DIR_WRITE;
2396 		break;
2397 	default:
2398 		ccb->ccb_direction = MFII_DATA_NONE;
2399 		io->direction = MPII_SCSIIO_DIR_NONE;
2400 		break;
2401 	}
2402 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2403 
2404 	ctx->virtual_disk_target_id = htole16(link->target);
2405 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2406 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2407 
2408 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2409 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2410 		return (XS_DRIVER_STUFFUP);
2411 
2412 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2413 
2414 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2415 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2416 	ccb->ccb_req.dev_handle = dev_handle;
2417 
2418 	return (XS_NOERROR);
2419 }
2420 
2421 int
2422 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2423     int nosleep)
2424 {
2425 	struct mpii_msg_request *req = ccb->ccb_request;
2426 	struct mfii_sge *sge = NULL, *nsge = sglp;
2427 	struct mfii_sge *ce = NULL;
2428 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2429 	u_int space;
2430 	int i;
2431 
2432 	int error;
2433 
2434 	if (ccb->ccb_len == 0)
2435 		return (0);
2436 
2437 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2438 	    ccb->ccb_data, ccb->ccb_len, NULL,
2439 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2440 	if (error) {
2441 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2442 		return (1);
2443 	}
2444 
2445 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2446 	    sizeof(*nsge);
2447 	if (dmap->dm_nsegs > space) {
2448 		space--;
2449 
2450 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2451 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2452 
2453 		ce = nsge + space;
2454 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2455 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2456 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2457 
2458 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2459 	}
2460 
2461 	for (i = 0; i < dmap->dm_nsegs; i++) {
2462 		if (nsge == ce)
2463 			nsge = ccb->ccb_sgl;
2464 
2465 		sge = nsge;
2466 
2467 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2468 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2469 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2470 
2471 		nsge = sge + 1;
2472 	}
2473 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2474 
2475 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2476 	    ccb->ccb_direction == MFII_DATA_OUT ?
2477 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2478 
2479 	if (ccb->ccb_sgl_len > 0) {
2480 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2481 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2482 		    BUS_DMASYNC_PREWRITE);
2483 	}
2484 
2485 	return (0);
2486 }
2487 
2488 void
2489 mfii_scsi_cmd_tmo(void *xsp)
2490 {
2491 	struct scsi_xfer *xs = xsp;
2492 	struct scsi_link *link = xs->sc_link;
2493 	struct mfii_softc *sc = link->adapter_softc;
2494 	struct mfii_ccb *ccb = xs->io;
2495 
2496 	mtx_enter(&sc->sc_abort_mtx);
2497 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2498 	mtx_leave(&sc->sc_abort_mtx);
2499 
2500 	task_add(systqmp, &sc->sc_abort_task);
2501 }
2502 
2503 void
2504 mfii_abort_task(void *scp)
2505 {
2506 	struct mfii_softc *sc = scp;
2507 	struct mfii_ccb *list;
2508 
2509 	mtx_enter(&sc->sc_abort_mtx);
2510 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2511 	SIMPLEQ_INIT(&sc->sc_abort_list);
2512 	mtx_leave(&sc->sc_abort_mtx);
2513 
2514 	while (list != NULL) {
2515 		struct mfii_ccb *ccb = list;
2516 		struct scsi_xfer *xs = ccb->ccb_cookie;
2517 		struct scsi_link *link = xs->sc_link;
2518 
2519 		uint16_t dev_handle;
2520 		struct mfii_ccb *accb;
2521 
2522 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2523 
2524 		dev_handle = mfii_dev_handle(sc, link->target);
2525 		if (dev_handle == htole16(0xffff)) {
2526 			/* device is gone */
2527 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2528 				scsi_done(xs);
2529 			continue;
2530 		}
2531 
2532 		accb = scsi_io_get(&sc->sc_iopool, 0);
2533 		mfii_scrub_ccb(accb);
2534 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2535 		    MPII_SCSI_TASK_ABORT_TASK,
2536 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2537 
2538 		accb->ccb_cookie = ccb;
2539 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2540 
2541 		mfii_start(sc, accb);
2542 	}
2543 }
2544 
2545 void
2546 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2547     uint16_t smid, uint8_t type, uint32_t flags)
2548 {
2549 	struct mfii_task_mgmt *msg;
2550 	struct mpii_msg_scsi_task_request *req;
2551 
2552 	msg = accb->ccb_request;
2553 	req = &msg->mpii_request;
2554 	req->dev_handle = dev_handle;
2555 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2556 	req->task_type = type;
2557 	htolem16(&req->task_mid, smid);
2558 	msg->flags = flags;
2559 
2560 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2561 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2562 }
2563 
2564 void
2565 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2566 {
2567 	struct mfii_ccb *ccb = accb->ccb_cookie;
2568 	struct scsi_xfer *xs = ccb->ccb_cookie;
2569 
2570 	/* XXX check accb completion? */
2571 
2572 	scsi_io_put(&sc->sc_iopool, accb);
2573 
2574 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2575 		scsi_done(xs);
2576 }
2577 
2578 void *
2579 mfii_get_ccb(void *cookie)
2580 {
2581 	struct mfii_softc *sc = cookie;
2582 	struct mfii_ccb *ccb;
2583 
2584 	mtx_enter(&sc->sc_ccb_mtx);
2585 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2586 	if (ccb != NULL)
2587 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2588 	mtx_leave(&sc->sc_ccb_mtx);
2589 
2590 	return (ccb);
2591 }
2592 
2593 void
2594 mfii_scrub_ccb(struct mfii_ccb *ccb)
2595 {
2596 	ccb->ccb_cookie = NULL;
2597 	ccb->ccb_done = NULL;
2598 	ccb->ccb_flags = 0;
2599 	ccb->ccb_data = NULL;
2600 	ccb->ccb_direction = 0;
2601 	ccb->ccb_len = 0;
2602 	ccb->ccb_sgl_len = 0;
2603 	ccb->ccb_refcnt = 1;
2604 
2605 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2606 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2607 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2608 }
2609 
2610 void
2611 mfii_put_ccb(void *cookie, void *io)
2612 {
2613 	struct mfii_softc *sc = cookie;
2614 	struct mfii_ccb *ccb = io;
2615 
2616 	mtx_enter(&sc->sc_ccb_mtx);
2617 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2618 	mtx_leave(&sc->sc_ccb_mtx);
2619 }
2620 
2621 int
2622 mfii_init_ccb(struct mfii_softc *sc)
2623 {
2624 	struct mfii_ccb *ccb;
2625 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2626 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2627 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2628 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2629 	u_int i;
2630 	int error;
2631 
2632 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2633 	    M_DEVBUF, M_WAITOK|M_ZERO);
2634 
2635 	for (i = 0; i < sc->sc_max_cmds; i++) {
2636 		ccb = &sc->sc_ccb[i];
2637 
2638 		/* create a dma map for transfer */
2639 		error = bus_dmamap_create(sc->sc_dmat,
2640 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2641 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2642 		if (error) {
2643 			printf("%s: cannot create ccb dmamap (%d)\n",
2644 			    DEVNAME(sc), error);
2645 			goto destroy;
2646 		}
2647 
2648 		/* select i + 1'th request. 0 is reserved for events */
2649 		ccb->ccb_smid = i + 1;
2650 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2651 		ccb->ccb_request = request + ccb->ccb_request_offset;
2652 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2653 		    ccb->ccb_request_offset;
2654 
2655 		/* select i'th MFI command frame */
2656 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2657 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2658 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2659 		    ccb->ccb_mfi_offset;
2660 
2661 		/* select i'th sense */
2662 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2663 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2664 		    ccb->ccb_sense_offset);
2665 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2666 		    ccb->ccb_sense_offset;
2667 
2668 		/* select i'th sgl */
2669 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2670 		    sc->sc_max_sgl * i;
2671 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2672 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2673 		    ccb->ccb_sgl_offset;
2674 
2675 		/* add ccb to queue */
2676 		mfii_put_ccb(sc, ccb);
2677 	}
2678 
2679 	return (0);
2680 
2681 destroy:
2682 	/* free dma maps and ccb memory */
2683 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2684 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2685 
2686 	free(sc->sc_ccb, M_DEVBUF, 0);
2687 
2688 	return (1);
2689 }
2690 
2691 #if NBIO > 0
2692 int
2693 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2694 {
2695 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2696 	int error = 0;
2697 
2698 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2699 
2700 	rw_enter_write(&sc->sc_lock);
2701 
2702 	switch (cmd) {
2703 	case BIOCINQ:
2704 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2705 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2706 		break;
2707 
2708 	case BIOCVOL:
2709 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2710 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2711 		break;
2712 
2713 	case BIOCDISK:
2714 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2715 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2716 		break;
2717 
2718 	case BIOCALARM:
2719 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2720 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2721 		break;
2722 
2723 	case BIOCBLINK:
2724 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2725 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2726 		break;
2727 
2728 	case BIOCSETSTATE:
2729 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2730 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2731 		break;
2732 
2733 	case BIOCPATROL:
2734 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2735 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2736 		break;
2737 
2738 	default:
2739 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2740 		error = ENOTTY;
2741 	}
2742 
2743 	rw_exit_write(&sc->sc_lock);
2744 
2745 	return (error);
2746 }
2747 
2748 int
2749 mfii_bio_getitall(struct mfii_softc *sc)
2750 {
2751 	int			i, d, rv = EINVAL;
2752 	size_t			size;
2753 	union mfi_mbox		mbox;
2754 	struct mfi_conf		*cfg = NULL;
2755 	struct mfi_ld_details	*ld_det = NULL;
2756 
2757 	/* get info */
2758 	if (mfii_get_info(sc)) {
2759 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2760 		    DEVNAME(sc));
2761 		goto done;
2762 	}
2763 
2764 	/* send single element command to retrieve size for full structure */
2765 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2766 	if (cfg == NULL)
2767 		goto done;
2768 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2769 	    SCSI_DATA_IN)) {
2770 		free(cfg, M_DEVBUF, sizeof *cfg);
2771 		goto done;
2772 	}
2773 
2774 	size = cfg->mfc_size;
2775 	free(cfg, M_DEVBUF, sizeof *cfg);
2776 
2777 	/* memory for read config */
2778 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2779 	if (cfg == NULL)
2780 		goto done;
2781 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2782 		free(cfg, M_DEVBUF, size);
2783 		goto done;
2784 	}
2785 
2786 	/* replace current pointer with new one */
2787 	if (sc->sc_cfg)
2788 		free(sc->sc_cfg, M_DEVBUF, 0);
2789 	sc->sc_cfg = cfg;
2790 
2791 	/* get all ld info */
2792 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2793 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2794 		goto done;
2795 
2796 	/* get memory for all ld structures */
2797 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2798 	if (sc->sc_ld_sz != size) {
2799 		if (sc->sc_ld_details)
2800 			free(sc->sc_ld_details, M_DEVBUF, 0);
2801 
2802 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2803 		if (ld_det == NULL)
2804 			goto done;
2805 		sc->sc_ld_sz = size;
2806 		sc->sc_ld_details = ld_det;
2807 	}
2808 
2809 	/* find used physical disks */
2810 	size = sizeof(struct mfi_ld_details);
2811 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2812 		memset(&mbox, 0, sizeof(mbox));
2813 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2814 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2815 		    SCSI_DATA_IN))
2816 			goto done;
2817 
2818 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2819 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2820 	}
2821 	sc->sc_no_pd = d;
2822 
2823 	rv = 0;
2824 done:
2825 	return (rv);
2826 }
2827 
2828 int
2829 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2830 {
2831 	int			rv = EINVAL;
2832 	struct mfi_conf		*cfg = NULL;
2833 
2834 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2835 
2836 	if (mfii_bio_getitall(sc)) {
2837 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2838 		    DEVNAME(sc));
2839 		goto done;
2840 	}
2841 
2842 	/* count unused disks as volumes */
2843 	if (sc->sc_cfg == NULL)
2844 		goto done;
2845 	cfg = sc->sc_cfg;
2846 
2847 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2848 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2849 #if notyet
2850 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2851 	    (bi->bi_nodisk - sc->sc_no_pd);
2852 #endif
2853 	/* tell bio who we are */
2854 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2855 
2856 	rv = 0;
2857 done:
2858 	return (rv);
2859 }
2860 
2861 int
2862 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2863 {
2864 	int			i, per, target, rv = EINVAL;
2865 	struct scsi_link	*link;
2866 	struct device		*dev;
2867 
2868 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2869 	    DEVNAME(sc), bv->bv_volid);
2870 
2871 	/* we really could skip and expect that inq took care of it */
2872 	if (mfii_bio_getitall(sc)) {
2873 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2874 		    DEVNAME(sc));
2875 		goto done;
2876 	}
2877 
2878 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2879 		/* go do hotspares & unused disks */
2880 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2881 		goto done;
2882 	}
2883 
2884 	i = bv->bv_volid;
2885 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2886 	link = scsi_get_link(sc->sc_scsibus, target, 0);
2887 	if (link == NULL) {
2888 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2889 	} else {
2890 		dev = link->device_softc;
2891 		if (dev == NULL)
2892 			goto done;
2893 
2894 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2895 	}
2896 
2897 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2898 	case MFI_LD_OFFLINE:
2899 		bv->bv_status = BIOC_SVOFFLINE;
2900 		break;
2901 
2902 	case MFI_LD_PART_DEGRADED:
2903 	case MFI_LD_DEGRADED:
2904 		bv->bv_status = BIOC_SVDEGRADED;
2905 		break;
2906 
2907 	case MFI_LD_ONLINE:
2908 		bv->bv_status = BIOC_SVONLINE;
2909 		break;
2910 
2911 	default:
2912 		bv->bv_status = BIOC_SVINVALID;
2913 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2914 		    DEVNAME(sc),
2915 		    sc->sc_ld_list.mll_list[i].mll_state);
2916 	}
2917 
2918 	/* additional status can modify MFI status */
2919 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2920 	case MFI_LD_PROG_CC:
2921 		bv->bv_status = BIOC_SVSCRUB;
2922 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2923 		bv->bv_percent = (per * 100) / 0xffff;
2924 		bv->bv_seconds =
2925 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2926 		break;
2927 
2928 	case MFI_LD_PROG_BGI:
2929 		bv->bv_status = BIOC_SVSCRUB;
2930 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
2931 		bv->bv_percent = (per * 100) / 0xffff;
2932 		bv->bv_seconds =
2933 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
2934 		break;
2935 
2936 	case MFI_LD_PROG_FGI:
2937 	case MFI_LD_PROG_RECONSTRUCT:
2938 		/* nothing yet */
2939 		break;
2940 	}
2941 
2942 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2943 		bv->bv_cache = BIOC_CVWRITEBACK;
2944 	else
2945 		bv->bv_cache = BIOC_CVWRITETHROUGH;
2946 
2947 	/*
2948 	 * The RAID levels are determined per the SNIA DDF spec, this is only
2949 	 * a subset that is valid for the MFI controller.
2950 	 */
2951 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2952 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2953 		bv->bv_level *= 10;
2954 
2955 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2956 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2957 
2958 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2959 
2960 	rv = 0;
2961 done:
2962 	return (rv);
2963 }
2964 
2965 int
2966 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2967 {
2968 	struct mfi_conf		*cfg;
2969 	struct mfi_array	*ar;
2970 	struct mfi_ld_cfg	*ld;
2971 	struct mfi_pd_details	*pd;
2972 	struct mfi_pd_list	*pl;
2973 	struct mfi_pd_progress	*mfp;
2974 	struct mfi_progress	*mp;
2975 	struct scsi_inquiry_data *inqbuf;
2976 	char			vend[8+16+4+1], *vendp;
2977 	int			i, rv = EINVAL;
2978 	int			arr, vol, disk, span;
2979 	union mfi_mbox		mbox;
2980 
2981 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
2982 	    DEVNAME(sc), bd->bd_diskid);
2983 
2984 	/* we really could skip and expect that inq took care of it */
2985 	if (mfii_bio_getitall(sc)) {
2986 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2987 		    DEVNAME(sc));
2988 		return (rv);
2989 	}
2990 	cfg = sc->sc_cfg;
2991 
2992 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2993 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
2994 
2995 	ar = cfg->mfc_array;
2996 	vol = bd->bd_volid;
2997 	if (vol >= cfg->mfc_no_ld) {
2998 		/* do hotspares */
2999 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3000 		goto freeme;
3001 	}
3002 
3003 	/* calculate offset to ld structure */
3004 	ld = (struct mfi_ld_cfg *)(
3005 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3006 	    cfg->mfc_array_size * cfg->mfc_no_array);
3007 
3008 	/* use span 0 only when raid group is not spanned */
3009 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3010 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3011 	else
3012 		span = 0;
3013 	arr = ld[vol].mlc_span[span].mls_index;
3014 
3015 	/* offset disk into pd list */
3016 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3017 
3018 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3019 		/* disk is missing but succeed command */
3020 		bd->bd_status = BIOC_SDFAILED;
3021 		rv = 0;
3022 
3023 		/* try to find an unused disk for the target to rebuild */
3024 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3025 		    SCSI_DATA_IN))
3026 			goto freeme;
3027 
3028 		for (i = 0; i < pl->mpl_no_pd; i++) {
3029 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3030 				continue;
3031 
3032 			memset(&mbox, 0, sizeof(mbox));
3033 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3034 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3035 			    SCSI_DATA_IN))
3036 				continue;
3037 
3038 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3039 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3040 				break;
3041 		}
3042 
3043 		if (i == pl->mpl_no_pd)
3044 			goto freeme;
3045 	} else {
3046 		memset(&mbox, 0, sizeof(mbox));
3047 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3048 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3049 		    SCSI_DATA_IN)) {
3050 			bd->bd_status = BIOC_SDINVALID;
3051 			goto freeme;
3052 		}
3053 	}
3054 
3055 	/* get the remaining fields */
3056 	bd->bd_channel = pd->mpd_enc_idx;
3057 	bd->bd_target = pd->mpd_enc_slot;
3058 
3059 	/* get status */
3060 	switch (pd->mpd_fw_state){
3061 	case MFI_PD_UNCONFIG_GOOD:
3062 	case MFI_PD_UNCONFIG_BAD:
3063 		bd->bd_status = BIOC_SDUNUSED;
3064 		break;
3065 
3066 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3067 		bd->bd_status = BIOC_SDHOTSPARE;
3068 		break;
3069 
3070 	case MFI_PD_OFFLINE:
3071 		bd->bd_status = BIOC_SDOFFLINE;
3072 		break;
3073 
3074 	case MFI_PD_FAILED:
3075 		bd->bd_status = BIOC_SDFAILED;
3076 		break;
3077 
3078 	case MFI_PD_REBUILD:
3079 		bd->bd_status = BIOC_SDREBUILD;
3080 		break;
3081 
3082 	case MFI_PD_ONLINE:
3083 		bd->bd_status = BIOC_SDONLINE;
3084 		break;
3085 
3086 	case MFI_PD_COPYBACK:
3087 	case MFI_PD_SYSTEM:
3088 	default:
3089 		bd->bd_status = BIOC_SDINVALID;
3090 		break;
3091 	}
3092 
3093 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3094 
3095 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3096 	vendp = inqbuf->vendor;
3097 	memcpy(vend, vendp, sizeof vend - 1);
3098 	vend[sizeof vend - 1] = '\0';
3099 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3100 
3101 	/* XXX find a way to retrieve serial nr from drive */
3102 	/* XXX find a way to get bd_procdev */
3103 
3104 	mfp = &pd->mpd_progress;
3105 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3106 		mp = &mfp->mfp_patrol_read;
3107 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3108 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3109 	}
3110 
3111 	rv = 0;
3112 freeme:
3113 	free(pd, M_DEVBUF, sizeof *pd);
3114 	free(pl, M_DEVBUF, sizeof *pl);
3115 
3116 	return (rv);
3117 }
3118 
3119 int
3120 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3121 {
3122 	uint32_t		opc, flags = 0;
3123 	int			rv = 0;
3124 	int8_t			ret;
3125 
3126 	switch(ba->ba_opcode) {
3127 	case BIOC_SADISABLE:
3128 		opc = MR_DCMD_SPEAKER_DISABLE;
3129 		break;
3130 
3131 	case BIOC_SAENABLE:
3132 		opc = MR_DCMD_SPEAKER_ENABLE;
3133 		break;
3134 
3135 	case BIOC_SASILENCE:
3136 		opc = MR_DCMD_SPEAKER_SILENCE;
3137 		break;
3138 
3139 	case BIOC_GASTATUS:
3140 		opc = MR_DCMD_SPEAKER_GET;
3141 		flags = SCSI_DATA_IN;
3142 		break;
3143 
3144 	case BIOC_SATEST:
3145 		opc = MR_DCMD_SPEAKER_TEST;
3146 		break;
3147 
3148 	default:
3149 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3150 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3151 		return (EINVAL);
3152 	}
3153 
3154 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3155 		rv = EINVAL;
3156 	else
3157 		if (ba->ba_opcode == BIOC_GASTATUS)
3158 			ba->ba_status = ret;
3159 		else
3160 			ba->ba_status = 0;
3161 
3162 	return (rv);
3163 }
3164 
3165 int
3166 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3167 {
3168 	int			i, found, rv = EINVAL;
3169 	union mfi_mbox		mbox;
3170 	uint32_t		cmd;
3171 	struct mfi_pd_list	*pd;
3172 
3173 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3174 	    bb->bb_status);
3175 
3176 	/* channel 0 means not in an enclosure so can't be blinked */
3177 	if (bb->bb_channel == 0)
3178 		return (EINVAL);
3179 
3180 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3181 
3182 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3183 		goto done;
3184 
3185 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3186 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3187 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3188 			found = 1;
3189 			break;
3190 		}
3191 
3192 	if (!found)
3193 		goto done;
3194 
3195 	memset(&mbox, 0, sizeof(mbox));
3196 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3197 
3198 	switch (bb->bb_status) {
3199 	case BIOC_SBUNBLINK:
3200 		cmd = MR_DCMD_PD_UNBLINK;
3201 		break;
3202 
3203 	case BIOC_SBBLINK:
3204 		cmd = MR_DCMD_PD_BLINK;
3205 		break;
3206 
3207 	case BIOC_SBALARM:
3208 	default:
3209 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3210 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3211 		goto done;
3212 	}
3213 
3214 
3215 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0))
3216 		goto done;
3217 
3218 	rv = 0;
3219 done:
3220 	free(pd, M_DEVBUF, sizeof *pd);
3221 	return (rv);
3222 }
3223 
3224 static int
3225 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3226 {
3227 	struct mfii_foreign_scan_info *fsi;
3228 	struct mfi_pd_details	*pd;
3229 	union mfi_mbox		mbox;
3230 	int			rv;
3231 
3232 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3233 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3234 
3235 	memset(&mbox, 0, sizeof mbox);
3236 	mbox.s[0] = pd_id;
3237 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3238 	if (rv != 0)
3239 		goto done;
3240 
3241 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3242 		mbox.s[0] = pd_id;
3243 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3244 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3245 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3246 		if (rv != 0)
3247 			goto done;
3248 	}
3249 
3250 	memset(&mbox, 0, sizeof mbox);
3251 	mbox.s[0] = pd_id;
3252 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3253 	if (rv != 0)
3254 		goto done;
3255 
3256 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3257 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3258 		    SCSI_DATA_IN);
3259 		if (rv != 0)
3260 			goto done;
3261 
3262 		if (fsi->count > 0) {
3263 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3264 			if (rv != 0)
3265 				goto done;
3266 		}
3267 	}
3268 
3269 	memset(&mbox, 0, sizeof mbox);
3270 	mbox.s[0] = pd_id;
3271 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3272 	if (rv != 0)
3273 		goto done;
3274 
3275 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3276 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3277 		rv = ENXIO;
3278 
3279 done:
3280 	free(fsi, M_DEVBUF, sizeof *fsi);
3281 	free(pd, M_DEVBUF, sizeof *pd);
3282 
3283 	return (rv);
3284 }
3285 
3286 static int
3287 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3288 {
3289 	struct mfi_hotspare	*hs;
3290 	struct mfi_pd_details	*pd;
3291 	union mfi_mbox		mbox;
3292 	size_t			size;
3293 	int			rv = EINVAL;
3294 
3295 	/* we really could skip and expect that inq took care of it */
3296 	if (mfii_bio_getitall(sc)) {
3297 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3298 		    DEVNAME(sc));
3299 		return (rv);
3300 	}
3301 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3302 
3303 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3304 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3305 
3306 	memset(&mbox, 0, sizeof mbox);
3307 	mbox.s[0] = pd_id;
3308 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3309 	    SCSI_DATA_IN);
3310 	if (rv != 0)
3311 		goto done;
3312 
3313 	memset(hs, 0, size);
3314 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3315 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3316 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3317 
3318 done:
3319 	free(hs, M_DEVBUF, size);
3320 	free(pd, M_DEVBUF, sizeof *pd);
3321 
3322 	return (rv);
3323 }
3324 
3325 int
3326 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3327 {
3328 	struct mfi_pd_details	*pd;
3329 	struct mfi_pd_list	*pl;
3330 	int			i, found, rv = EINVAL;
3331 	union mfi_mbox		mbox;
3332 
3333 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3334 	    bs->bs_status);
3335 
3336 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3337 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3338 
3339 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3340 		goto done;
3341 
3342 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3343 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3344 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3345 			found = 1;
3346 			break;
3347 		}
3348 
3349 	if (!found)
3350 		goto done;
3351 
3352 	memset(&mbox, 0, sizeof(mbox));
3353 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3354 
3355 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3356 		goto done;
3357 
3358 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3359 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3360 
3361 	switch (bs->bs_status) {
3362 	case BIOC_SSONLINE:
3363 		mbox.b[4] = MFI_PD_ONLINE;
3364 		break;
3365 
3366 	case BIOC_SSOFFLINE:
3367 		mbox.b[4] = MFI_PD_OFFLINE;
3368 		break;
3369 
3370 	case BIOC_SSHOTSPARE:
3371 		mbox.b[4] = MFI_PD_HOTSPARE;
3372 		break;
3373 
3374 	case BIOC_SSREBUILD:
3375 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3376 			if ((rv = mfii_makegood(sc,
3377 			    pl->mpl_address[i].mpa_pd_id)))
3378 				goto done;
3379 
3380 			if ((rv = mfii_makespare(sc,
3381 			    pl->mpl_address[i].mpa_pd_id)))
3382 				goto done;
3383 
3384 			memset(&mbox, 0, sizeof(mbox));
3385 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3386 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3387 			    SCSI_DATA_IN);
3388 			if (rv != 0)
3389 				goto done;
3390 
3391 			/* rebuilding might be started by mfii_makespare() */
3392 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3393 				rv = 0;
3394 				goto done;
3395 			}
3396 
3397 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3398 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3399 		}
3400 		mbox.b[4] = MFI_PD_REBUILD;
3401 		break;
3402 
3403 	default:
3404 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3405 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3406 		goto done;
3407 	}
3408 
3409 
3410 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3411 done:
3412 	free(pd, M_DEVBUF, sizeof *pd);
3413 	free(pl, M_DEVBUF, sizeof *pl);
3414 	return (rv);
3415 }
3416 
3417 int
3418 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3419 {
3420 	uint32_t		opc;
3421 	int			rv = 0;
3422 	struct mfi_pr_properties prop;
3423 	struct mfi_pr_status	status;
3424 	uint32_t		time, exec_freq;
3425 
3426 	switch (bp->bp_opcode) {
3427 	case BIOC_SPSTOP:
3428 	case BIOC_SPSTART:
3429 		if (bp->bp_opcode == BIOC_SPSTART)
3430 			opc = MR_DCMD_PR_START;
3431 		else
3432 			opc = MR_DCMD_PR_STOP;
3433 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3434 			return (EINVAL);
3435 		break;
3436 
3437 	case BIOC_SPMANUAL:
3438 	case BIOC_SPDISABLE:
3439 	case BIOC_SPAUTO:
3440 		/* Get device's time. */
3441 		opc = MR_DCMD_TIME_SECS_GET;
3442 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3443 			return (EINVAL);
3444 
3445 		opc = MR_DCMD_PR_GET_PROPERTIES;
3446 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3447 			return (EINVAL);
3448 
3449 		switch (bp->bp_opcode) {
3450 		case BIOC_SPMANUAL:
3451 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3452 			break;
3453 		case BIOC_SPDISABLE:
3454 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3455 			break;
3456 		case BIOC_SPAUTO:
3457 			if (bp->bp_autoival != 0) {
3458 				if (bp->bp_autoival == -1)
3459 					/* continuously */
3460 					exec_freq = 0xffffffffU;
3461 				else if (bp->bp_autoival > 0)
3462 					exec_freq = bp->bp_autoival;
3463 				else
3464 					return (EINVAL);
3465 				prop.exec_freq = exec_freq;
3466 			}
3467 			if (bp->bp_autonext != 0) {
3468 				if (bp->bp_autonext < 0)
3469 					return (EINVAL);
3470 				else
3471 					prop.next_exec = time + bp->bp_autonext;
3472 			}
3473 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3474 			break;
3475 		}
3476 
3477 		opc = MR_DCMD_PR_SET_PROPERTIES;
3478 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3479 			return (EINVAL);
3480 
3481 		break;
3482 
3483 	case BIOC_GPSTATUS:
3484 		opc = MR_DCMD_PR_GET_PROPERTIES;
3485 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3486 			return (EINVAL);
3487 
3488 		opc = MR_DCMD_PR_GET_STATUS;
3489 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3490 			return (EINVAL);
3491 
3492 		/* Get device's time. */
3493 		opc = MR_DCMD_TIME_SECS_GET;
3494 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3495 			return (EINVAL);
3496 
3497 		switch (prop.op_mode) {
3498 		case MFI_PR_OPMODE_AUTO:
3499 			bp->bp_mode = BIOC_SPMAUTO;
3500 			bp->bp_autoival = prop.exec_freq;
3501 			bp->bp_autonext = prop.next_exec;
3502 			bp->bp_autonow = time;
3503 			break;
3504 		case MFI_PR_OPMODE_MANUAL:
3505 			bp->bp_mode = BIOC_SPMMANUAL;
3506 			break;
3507 		case MFI_PR_OPMODE_DISABLED:
3508 			bp->bp_mode = BIOC_SPMDISABLED;
3509 			break;
3510 		default:
3511 			printf("%s: unknown patrol mode %d\n",
3512 			    DEVNAME(sc), prop.op_mode);
3513 			break;
3514 		}
3515 
3516 		switch (status.state) {
3517 		case MFI_PR_STATE_STOPPED:
3518 			bp->bp_status = BIOC_SPSSTOPPED;
3519 			break;
3520 		case MFI_PR_STATE_READY:
3521 			bp->bp_status = BIOC_SPSREADY;
3522 			break;
3523 		case MFI_PR_STATE_ACTIVE:
3524 			bp->bp_status = BIOC_SPSACTIVE;
3525 			break;
3526 		case MFI_PR_STATE_ABORTED:
3527 			bp->bp_status = BIOC_SPSABORTED;
3528 			break;
3529 		default:
3530 			printf("%s: unknown patrol state %d\n",
3531 			    DEVNAME(sc), status.state);
3532 			break;
3533 		}
3534 
3535 		break;
3536 
3537 	default:
3538 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3539 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3540 		return (EINVAL);
3541 	}
3542 
3543 	return (rv);
3544 }
3545 
3546 int
3547 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3548 {
3549 	struct mfi_conf		*cfg;
3550 	struct mfi_hotspare	*hs;
3551 	struct mfi_pd_details	*pd;
3552 	struct bioc_disk	*sdhs;
3553 	struct bioc_vol		*vdhs;
3554 	struct scsi_inquiry_data *inqbuf;
3555 	char			vend[8+16+4+1], *vendp;
3556 	int			i, rv = EINVAL;
3557 	uint32_t		size;
3558 	union mfi_mbox		mbox;
3559 
3560 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3561 
3562 	if (!bio_hs)
3563 		return (EINVAL);
3564 
3565 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3566 
3567 	/* send single element command to retrieve size for full structure */
3568 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3569 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3570 		goto freeme;
3571 
3572 	size = cfg->mfc_size;
3573 	free(cfg, M_DEVBUF, sizeof *cfg);
3574 
3575 	/* memory for read config */
3576 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3577 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3578 		goto freeme;
3579 
3580 	/* calculate offset to hs structure */
3581 	hs = (struct mfi_hotspare *)(
3582 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3583 	    cfg->mfc_array_size * cfg->mfc_no_array +
3584 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3585 
3586 	if (volid < cfg->mfc_no_ld)
3587 		goto freeme; /* not a hotspare */
3588 
3589 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3590 		goto freeme; /* not a hotspare */
3591 
3592 	/* offset into hotspare structure */
3593 	i = volid - cfg->mfc_no_ld;
3594 
3595 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3596 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3597 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3598 
3599 	/* get pd fields */
3600 	memset(&mbox, 0, sizeof(mbox));
3601 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3602 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3603 	    SCSI_DATA_IN)) {
3604 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3605 		    DEVNAME(sc));
3606 		goto freeme;
3607 	}
3608 
3609 	switch (type) {
3610 	case MFI_MGMT_VD:
3611 		vdhs = bio_hs;
3612 		vdhs->bv_status = BIOC_SVONLINE;
3613 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3614 		vdhs->bv_level = -1; /* hotspare */
3615 		vdhs->bv_nodisk = 1;
3616 		break;
3617 
3618 	case MFI_MGMT_SD:
3619 		sdhs = bio_hs;
3620 		sdhs->bd_status = BIOC_SDHOTSPARE;
3621 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3622 		sdhs->bd_channel = pd->mpd_enc_idx;
3623 		sdhs->bd_target = pd->mpd_enc_slot;
3624 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3625 		vendp = inqbuf->vendor;
3626 		memcpy(vend, vendp, sizeof vend - 1);
3627 		vend[sizeof vend - 1] = '\0';
3628 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3629 		break;
3630 
3631 	default:
3632 		goto freeme;
3633 	}
3634 
3635 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3636 	rv = 0;
3637 freeme:
3638 	free(pd, M_DEVBUF, sizeof *pd);
3639 	free(cfg, M_DEVBUF, 0);
3640 
3641 	return (rv);
3642 }
3643 
3644 #ifndef SMALL_KERNEL
3645 
3646 #define MFI_BBU_SENSORS 4
3647 
3648 void
3649 mfii_bbu(struct mfii_softc *sc)
3650 {
3651 	struct mfi_bbu_status bbu;
3652 	u_int32_t status;
3653 	u_int32_t mask;
3654 	u_int32_t soh_bad;
3655 	int i;
3656 
3657 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3658 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3659 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3660 			sc->sc_bbu[i].value = 0;
3661 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3662 		}
3663 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3664 			sc->sc_bbu_status[i].value = 0;
3665 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3666 		}
3667 		return;
3668 	}
3669 
3670 	switch (bbu.battery_type) {
3671 	case MFI_BBU_TYPE_IBBU:
3672 		mask = MFI_BBU_STATE_BAD_IBBU;
3673 		soh_bad = 0;
3674 		break;
3675 	case MFI_BBU_TYPE_BBU:
3676 		mask = MFI_BBU_STATE_BAD_BBU;
3677 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3678 		break;
3679 
3680 	case MFI_BBU_TYPE_NONE:
3681 	default:
3682 		sc->sc_bbu[0].value = 0;
3683 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3684 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3685 			sc->sc_bbu[i].value = 0;
3686 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3687 		}
3688 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3689 			sc->sc_bbu_status[i].value = 0;
3690 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3691 		}
3692 		return;
3693 	}
3694 
3695 	status = letoh32(bbu.fw_status);
3696 
3697 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3698 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3699 	    SENSOR_S_OK;
3700 
3701 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3702 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3703 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3704 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3705 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3706 
3707 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3708 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3709 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3710 	}
3711 }
3712 
3713 void
3714 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3715 {
3716 	struct ksensor *sensor;
3717 	int target;
3718 
3719 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3720 	sensor = &sc->sc_sensors[target];
3721 
3722 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3723 	case MFI_LD_OFFLINE:
3724 		sensor->value = SENSOR_DRIVE_FAIL;
3725 		sensor->status = SENSOR_S_CRIT;
3726 		break;
3727 
3728 	case MFI_LD_PART_DEGRADED:
3729 	case MFI_LD_DEGRADED:
3730 		sensor->value = SENSOR_DRIVE_PFAIL;
3731 		sensor->status = SENSOR_S_WARN;
3732 		break;
3733 
3734 	case MFI_LD_ONLINE:
3735 		sensor->value = SENSOR_DRIVE_ONLINE;
3736 		sensor->status = SENSOR_S_OK;
3737 		break;
3738 
3739 	default:
3740 		sensor->value = 0; /* unknown */
3741 		sensor->status = SENSOR_S_UNKNOWN;
3742 		break;
3743 	}
3744 }
3745 
3746 void
3747 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3748 {
3749 	struct device		*dev;
3750 	struct scsi_link	*link;
3751 	struct ksensor		*sensor;
3752 	int			target;
3753 
3754 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3755 	sensor = &sc->sc_sensors[target];
3756 
3757 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3758 	if (link == NULL) {
3759 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3760 	} else {
3761 		dev = link->device_softc;
3762 		if (dev != NULL)
3763 			strlcpy(sensor->desc, dev->dv_xname,
3764 			    sizeof(sensor->desc));
3765 	}
3766 	sensor->type = SENSOR_DRIVE;
3767 	mfii_refresh_ld_sensor(sc, ld);
3768 }
3769 
3770 int
3771 mfii_create_sensors(struct mfii_softc *sc)
3772 {
3773 	int			i, target;
3774 
3775 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3776 	    sizeof(sc->sc_sensordev.xname));
3777 
3778 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3779 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3780 		    M_DEVBUF, M_WAITOK | M_ZERO);
3781 
3782 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3783 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3784 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3785 		    sizeof(sc->sc_bbu[0].desc));
3786 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3787 
3788 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3789 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3790 		sc->sc_bbu[2].type = SENSOR_AMPS;
3791 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3792 		sc->sc_bbu[3].type = SENSOR_TEMP;
3793 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3794 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3795 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3796 			    sizeof(sc->sc_bbu[i].desc));
3797 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3798 		}
3799 
3800 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3801 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3802 
3803 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3804 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3805 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3806 			strlcpy(sc->sc_bbu_status[i].desc,
3807 			    mfi_bbu_indicators[i],
3808 			    sizeof(sc->sc_bbu_status[i].desc));
3809 
3810 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3811 		}
3812 	}
3813 
3814 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3815 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3816 	if (sc->sc_sensors == NULL)
3817 		return (1);
3818 
3819 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3820 		mfii_init_ld_sensor(sc, i);
3821 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3822 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3823 	}
3824 
3825 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3826 		goto bad;
3827 
3828 	sensordev_install(&sc->sc_sensordev);
3829 
3830 	return (0);
3831 
3832 bad:
3833 	free(sc->sc_sensors, M_DEVBUF,
3834 	    MFI_MAX_LD * sizeof(struct ksensor));
3835 
3836 	return (1);
3837 }
3838 
3839 void
3840 mfii_refresh_sensors(void *arg)
3841 {
3842 	struct mfii_softc	*sc = arg;
3843 	int			i;
3844 
3845 	rw_enter_write(&sc->sc_lock);
3846 	if (sc->sc_bbu != NULL)
3847 		mfii_bbu(sc);
3848 
3849 	mfii_bio_getitall(sc);
3850 	rw_exit_write(&sc->sc_lock);
3851 
3852 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3853 		mfii_refresh_ld_sensor(sc, i);
3854 }
3855 #endif /* SMALL_KERNEL */
3856 #endif /* NBIO > 0 */
3857