xref: /openbsd-src/sys/dev/pci/mfii.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /* $OpenBSD: mfii.c,v 1.87 2022/09/25 08:15:43 stsp Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsibus_softc	*pd_scsibus;
245 	struct mfii_pd_dev_handles *pd_dev_handles;
246 	uint8_t			pd_timeout;
247 };
248 
249 struct mfii_iop {
250 	int bar;
251 	int num_sge_loc;
252 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
253 #define MFII_IOP_NUM_SGE_LOC_35		1
254 	u_int16_t ldio_ctx_reg_lock_flags;
255 	u_int8_t ldio_req_type;
256 	u_int8_t ldio_ctx_type_nseg;
257 	u_int8_t sge_flag_chain;
258 	u_int8_t sge_flag_eol;
259 };
260 
261 struct mfii_softc {
262 	struct device		sc_dev;
263 	const struct mfii_iop	*sc_iop;
264 
265 	pci_chipset_tag_t	sc_pc;
266 	pcitag_t		sc_tag;
267 
268 	bus_space_tag_t		sc_iot;
269 	bus_space_handle_t	sc_ioh;
270 	bus_size_t		sc_ios;
271 	bus_dma_tag_t		sc_dmat;
272 
273 	void			*sc_ih;
274 
275 	struct mutex		sc_ccb_mtx;
276 	struct mutex		sc_post_mtx;
277 
278 	u_int			sc_max_fw_cmds;
279 	u_int			sc_max_cmds;
280 	u_int			sc_max_sgl;
281 
282 	u_int			sc_reply_postq_depth;
283 	u_int			sc_reply_postq_index;
284 	struct mutex		sc_reply_postq_mtx;
285 	struct mfii_dmamem	*sc_reply_postq;
286 
287 	struct mfii_dmamem	*sc_requests;
288 	struct mfii_dmamem	*sc_mfi;
289 	struct mfii_dmamem	*sc_sense;
290 	struct mfii_dmamem	*sc_sgl;
291 
292 	struct mfii_ccb		*sc_ccb;
293 	struct mfii_ccb_list	sc_ccb_freeq;
294 
295 	struct mfii_ccb		*sc_aen_ccb;
296 	struct task		sc_aen_task;
297 
298 	struct mutex		sc_abort_mtx;
299 	struct mfii_ccb_list	sc_abort_list;
300 	struct task		sc_abort_task;
301 
302 	struct scsibus_softc	*sc_scsibus;
303 	struct mfii_pd_softc	*sc_pd;
304 	struct scsi_iopool	sc_iopool;
305 
306 	/* save some useful information for logical drives that is missing
307 	 * in sc_ld_list
308 	 */
309 	struct {
310 		char		ld_dev[16];	/* device name sd? */
311 	}			sc_ld[MFI_MAX_LD];
312 	int			sc_target_lds[MFI_MAX_LD];
313 
314 	/* scsi ioctl from sd device */
315 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
316 
317 	/* bio */
318 	struct mfi_conf		*sc_cfg;
319 	struct mfi_ctrl_info	sc_info;
320 	struct mfi_ld_list	sc_ld_list;
321 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
322 	int			sc_no_pd; /* used physical disks */
323 	int			sc_ld_sz; /* sizeof sc_ld_details */
324 
325 	/* mgmt lock */
326 	struct rwlock		sc_lock;
327 
328 	/* sensors */
329 	struct ksensordev	sc_sensordev;
330 	struct ksensor		*sc_bbu;
331 	struct ksensor		*sc_bbu_status;
332 	struct ksensor		*sc_sensors;
333 };
334 
335 #ifdef MFII_DEBUG
336 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
337 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
338 #define	MFII_D_CMD		0x0001
339 #define	MFII_D_INTR		0x0002
340 #define	MFII_D_MISC		0x0004
341 #define	MFII_D_DMA		0x0008
342 #define	MFII_D_IOCTL		0x0010
343 #define	MFII_D_RW		0x0020
344 #define	MFII_D_MEM		0x0040
345 #define	MFII_D_CCB		0x0080
346 uint32_t	mfii_debug = 0
347 /*		    | MFII_D_CMD */
348 /*		    | MFII_D_INTR */
349 		    | MFII_D_MISC
350 /*		    | MFII_D_DMA */
351 /*		    | MFII_D_IOCTL */
352 /*		    | MFII_D_RW */
353 /*		    | MFII_D_MEM */
354 /*		    | MFII_D_CCB */
355 		;
356 #else
357 #define DPRINTF(x...)
358 #define DNPRINTF(n,x...)
359 #endif
360 
361 int		mfii_match(struct device *, void *, void *);
362 void		mfii_attach(struct device *, struct device *, void *);
363 int		mfii_detach(struct device *, int);
364 int		mfii_activate(struct device *, int);
365 
366 const struct cfattach mfii_ca = {
367 	sizeof(struct mfii_softc),
368 	mfii_match,
369 	mfii_attach,
370 	mfii_detach,
371 	mfii_activate,
372 };
373 
374 struct cfdriver mfii_cd = {
375 	NULL,
376 	"mfii",
377 	DV_DULL
378 };
379 
380 void		mfii_scsi_cmd(struct scsi_xfer *);
381 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
382 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
383 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
384 
385 const struct scsi_adapter mfii_switch = {
386 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
387 };
388 
389 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
390 int		mfii_pd_scsi_probe(struct scsi_link *);
391 
392 const struct scsi_adapter mfii_pd_switch = {
393 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
394 };
395 
396 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
397 
398 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
399 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
400 
401 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
402 void			mfii_dmamem_free(struct mfii_softc *,
403 			    struct mfii_dmamem *);
404 
405 void *			mfii_get_ccb(void *);
406 void			mfii_put_ccb(void *, void *);
407 int			mfii_init_ccb(struct mfii_softc *);
408 void			mfii_scrub_ccb(struct mfii_ccb *);
409 
410 int			mfii_reset_hard(struct mfii_softc *);
411 int			mfii_transition_firmware(struct mfii_softc *);
412 int			mfii_initialise_firmware(struct mfii_softc *);
413 int			mfii_get_info(struct mfii_softc *);
414 int			mfii_syspd(struct mfii_softc *);
415 
416 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
417 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
418 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
419 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
420 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
421 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
422 int			mfii_my_intr(struct mfii_softc *);
423 int			mfii_intr(void *);
424 void			mfii_postq(struct mfii_softc *);
425 
426 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
427 			    void *, int);
428 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
429 			    void *, int);
430 
431 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
432 
433 int			mfii_mgmt(struct mfii_softc *, uint32_t,
434 			    const union mfi_mbox *, void *, size_t, int);
435 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
436 			    uint32_t, const union mfi_mbox *, void *, size_t,
437 			    int);
438 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
439 
440 int			mfii_scsi_cmd_io(struct mfii_softc *,
441 			    struct scsi_xfer *);
442 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
443 			    struct scsi_xfer *);
444 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
445 			    struct scsi_xfer *);
446 void			mfii_scsi_cmd_tmo(void *);
447 
448 int			mfii_dev_handles_update(struct mfii_softc *sc);
449 void			mfii_dev_handles_smr(void *pd_arg);
450 
451 void			mfii_abort_task(void *);
452 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
453 			    uint16_t, uint16_t, uint8_t, uint32_t);
454 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
455 			    struct mfii_ccb *);
456 
457 int			mfii_aen_register(struct mfii_softc *);
458 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
459 			    struct mfii_dmamem *, uint32_t);
460 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
461 void			mfii_aen(void *);
462 void			mfii_aen_unregister(struct mfii_softc *);
463 
464 void			mfii_aen_pd_insert(struct mfii_softc *,
465 			    const struct mfi_evtarg_pd_address *);
466 void			mfii_aen_pd_remove(struct mfii_softc *,
467 			    const struct mfi_evtarg_pd_address *);
468 void			mfii_aen_pd_state_change(struct mfii_softc *,
469 			    const struct mfi_evtarg_pd_state *);
470 void			mfii_aen_ld_update(struct mfii_softc *);
471 
472 #if NBIO > 0
473 int		mfii_ioctl(struct device *, u_long, caddr_t);
474 int		mfii_bio_getitall(struct mfii_softc *);
475 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
476 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
477 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
478 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
479 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
480 int		mfii_ioctl_setstate(struct mfii_softc *,
481 		    struct bioc_setstate *);
482 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
483 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
484 
485 #ifndef SMALL_KERNEL
486 static const char *mfi_bbu_indicators[] = {
487 	"pack missing",
488 	"voltage low",
489 	"temp high",
490 	"charge active",
491 	"discharge active",
492 	"learn cycle req'd",
493 	"learn cycle active",
494 	"learn cycle failed",
495 	"learn cycle timeout",
496 	"I2C errors",
497 	"replace pack",
498 	"low capacity",
499 	"periodic learn req'd"
500 };
501 
502 void		mfii_init_ld_sensor(struct mfii_softc *, int);
503 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
504 int		mfii_create_sensors(struct mfii_softc *);
505 void		mfii_refresh_sensors(void *);
506 void		mfii_bbu(struct mfii_softc *);
507 #endif /* SMALL_KERNEL */
508 #endif /* NBIO > 0 */
509 
510 /*
511  * mfii boards support asynchronous (and non-polled) completion of
512  * dcmds by proxying them through a passthru mpii command that points
513  * at a dcmd frame. since the passthru command is submitted like
514  * the scsi commands using an SMID in the request descriptor,
515  * ccb_request memory * must contain the passthru command because
516  * that is what the SMID refers to. this means ccb_request cannot
517  * contain the dcmd. rather than allocating separate dma memory to
518  * hold the dcmd, we reuse the sense memory buffer for it.
519  */
520 
521 void			mfii_dcmd_start(struct mfii_softc *,
522 			    struct mfii_ccb *);
523 
524 static inline void
525 mfii_dcmd_scrub(struct mfii_ccb *ccb)
526 {
527 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
528 }
529 
530 static inline struct mfi_dcmd_frame *
531 mfii_dcmd_frame(struct mfii_ccb *ccb)
532 {
533 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
534 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
535 }
536 
537 static inline void
538 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
539 {
540 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
541 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
542 }
543 
544 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
545 
546 const struct mfii_iop mfii_iop_thunderbolt = {
547 	MFII_BAR,
548 	MFII_IOP_NUM_SGE_LOC_ORIG,
549 	0,
550 	MFII_REQ_TYPE_LDIO,
551 	0,
552 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
553 	0
554 };
555 
556 /*
557  * a lot of these values depend on us not implementing fastpath yet.
558  */
559 const struct mfii_iop mfii_iop_25 = {
560 	MFII_BAR,
561 	MFII_IOP_NUM_SGE_LOC_ORIG,
562 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
563 	MFII_REQ_TYPE_NO_LOCK,
564 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
565 	MFII_SGE_CHAIN_ELEMENT,
566 	MFII_SGE_END_OF_LIST
567 };
568 
569 const struct mfii_iop mfii_iop_35 = {
570 	MFII_BAR_35,
571 	MFII_IOP_NUM_SGE_LOC_35,
572 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
573 	MFII_REQ_TYPE_NO_LOCK,
574 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
575 	MFII_SGE_CHAIN_ELEMENT,
576 	MFII_SGE_END_OF_LIST
577 };
578 
579 struct mfii_device {
580 	pcireg_t		mpd_vendor;
581 	pcireg_t		mpd_product;
582 	const struct mfii_iop	*mpd_iop;
583 };
584 
585 const struct mfii_device mfii_devices[] = {
586 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
587 	    &mfii_iop_thunderbolt },
588 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
589 	    &mfii_iop_25 },
590 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
591 	    &mfii_iop_25 },
592 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
593 	    &mfii_iop_35 },
594 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
595 	    &mfii_iop_35 },
596 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
597 	    &mfii_iop_35 },
598 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
599 	    &mfii_iop_35 },
600 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
601 	    &mfii_iop_35 },
602 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
603 	    &mfii_iop_35 }
604 };
605 
606 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
607 
608 const struct mfii_iop *
609 mfii_find_iop(struct pci_attach_args *pa)
610 {
611 	const struct mfii_device *mpd;
612 	int i;
613 
614 	for (i = 0; i < nitems(mfii_devices); i++) {
615 		mpd = &mfii_devices[i];
616 
617 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
618 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
619 			return (mpd->mpd_iop);
620 	}
621 
622 	return (NULL);
623 }
624 
625 int
626 mfii_match(struct device *parent, void *match, void *aux)
627 {
628 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
629 }
630 
631 void
632 mfii_attach(struct device *parent, struct device *self, void *aux)
633 {
634 	struct mfii_softc *sc = (struct mfii_softc *)self;
635 	struct pci_attach_args *pa = aux;
636 	pcireg_t memtype;
637 	pci_intr_handle_t ih;
638 	struct scsibus_attach_args saa;
639 	u_int32_t status, scpad2, scpad3;
640 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
641 
642 	/* init sc */
643 	sc->sc_iop = mfii_find_iop(aux);
644 	sc->sc_dmat = pa->pa_dmat;
645 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
646 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
647 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
648 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
649 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
650 
651 	rw_init(&sc->sc_lock, "mfii_lock");
652 
653 	sc->sc_aen_ccb = NULL;
654 	task_set(&sc->sc_aen_task, mfii_aen, sc);
655 
656 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
657 	SIMPLEQ_INIT(&sc->sc_abort_list);
658 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
659 
660 	/* wire up the bus shizz */
661 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
662 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
663 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
664 		printf(": unable to map registers\n");
665 		return;
666 	}
667 
668 	/* disable interrupts */
669 	mfii_write(sc, MFI_OMSK, 0xffffffff);
670 
671 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
672 		printf(": unable to map interrupt\n");
673 		goto pci_unmap;
674 	}
675 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
676 
677 	/* lets get started */
678 	if (mfii_transition_firmware(sc))
679 		goto pci_unmap;
680 
681 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
682 	scpad3 = mfii_read(sc, MFII_OSP3);
683 	status = mfii_fw_state(sc);
684 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
685 	if (sc->sc_max_fw_cmds == 0)
686 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
687 	/*
688 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
689 	 * exceed FW supplied max_fw_cmds.
690 	 */
691 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
692 
693 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
694 	scpad2 = mfii_read(sc, MFII_OSP2);
695 	chain_frame_sz =
696 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
697 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
698 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
699 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
700 
701 	nsge_in_io = (MFII_REQUEST_SIZE -
702 		sizeof(struct mpii_msg_scsi_io) -
703 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
704 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
705 
706 	/* round down to nearest power of two */
707 	sc->sc_max_sgl = 1;
708 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
709 		sc->sc_max_sgl <<= 1;
710 
711 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
712 	    DEVNAME(sc), status, scpad2, scpad3);
713 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
714 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
715 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
716 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
717 	    sc->sc_max_sgl);
718 
719 	/* sense memory */
720 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
721 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
722 	if (sc->sc_sense == NULL) {
723 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
724 		goto pci_unmap;
725 	}
726 
727 	/* reply post queue */
728 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
729 
730 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
731 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
732 	if (sc->sc_reply_postq == NULL)
733 		goto free_sense;
734 
735 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
736 	    MFII_DMA_LEN(sc->sc_reply_postq));
737 
738 	/* MPII request frame array */
739 	sc->sc_requests = mfii_dmamem_alloc(sc,
740 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
741 	if (sc->sc_requests == NULL)
742 		goto free_reply_postq;
743 
744 	/* MFI command frame array */
745 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
746 	if (sc->sc_mfi == NULL)
747 		goto free_requests;
748 
749 	/* MPII SGL array */
750 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
751 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
752 	if (sc->sc_sgl == NULL)
753 		goto free_mfi;
754 
755 	if (mfii_init_ccb(sc) != 0) {
756 		printf("%s: could not init ccb list\n", DEVNAME(sc));
757 		goto free_sgl;
758 	}
759 
760 	/* kickstart firmware with all addresses and pointers */
761 	if (mfii_initialise_firmware(sc) != 0) {
762 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
763 		goto free_sgl;
764 	}
765 
766 	if (mfii_get_info(sc) != 0) {
767 		printf("%s: could not retrieve controller information\n",
768 		    DEVNAME(sc));
769 		goto free_sgl;
770 	}
771 
772 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
773 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
774 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
775 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
776 	printf("\n");
777 
778 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
779 	    mfii_intr, sc, DEVNAME(sc));
780 	if (sc->sc_ih == NULL)
781 		goto free_sgl;
782 
783 	saa.saa_adapter_softc = sc;
784 	saa.saa_adapter = &mfii_switch;
785 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
786 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
787 	saa.saa_luns = 8;
788 	saa.saa_openings = sc->sc_max_cmds;
789 	saa.saa_pool = &sc->sc_iopool;
790 	saa.saa_quirks = saa.saa_flags = 0;
791 	saa.saa_wwpn = saa.saa_wwnn = 0;
792 
793 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
794 	    scsiprint);
795 
796 	mfii_syspd(sc);
797 
798 	if (mfii_aen_register(sc) != 0) {
799 		/* error printed by mfii_aen_register */
800 		goto intr_disestablish;
801 	}
802 
803 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
804 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
805 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
806 		goto intr_disestablish;
807 	}
808 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
809 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
810 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
811 		sc->sc_target_lds[target] = i;
812 	}
813 
814 	/* enable interrupts */
815 	mfii_write(sc, MFI_OSTS, 0xffffffff);
816 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
817 
818 #if NBIO > 0
819 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
820 		panic("%s: controller registration failed", DEVNAME(sc));
821 	else
822 		sc->sc_ioctl = mfii_ioctl;
823 
824 #ifndef SMALL_KERNEL
825 	if (mfii_create_sensors(sc) != 0)
826 		printf("%s: unable to create sensors\n", DEVNAME(sc));
827 #endif
828 #endif /* NBIO > 0 */
829 
830 	return;
831 intr_disestablish:
832 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
833 free_sgl:
834 	mfii_dmamem_free(sc, sc->sc_sgl);
835 free_mfi:
836 	mfii_dmamem_free(sc, sc->sc_mfi);
837 free_requests:
838 	mfii_dmamem_free(sc, sc->sc_requests);
839 free_reply_postq:
840 	mfii_dmamem_free(sc, sc->sc_reply_postq);
841 free_sense:
842 	mfii_dmamem_free(sc, sc->sc_sense);
843 pci_unmap:
844 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
845 }
846 
847 static inline uint16_t
848 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
849 {
850 	struct mfii_pd_dev_handles *handles;
851 	uint16_t handle;
852 
853 	smr_read_enter();
854 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
855 	handle = handles->pd_handles[target];
856 	smr_read_leave();
857 
858 	return (handle);
859 }
860 
861 void
862 mfii_dev_handles_smr(void *pd_arg)
863 {
864 	struct mfii_pd_dev_handles *handles = pd_arg;
865 
866 	free(handles, M_DEVBUF, sizeof(*handles));
867 }
868 
869 int
870 mfii_dev_handles_update(struct mfii_softc *sc)
871 {
872 	struct mfii_ld_map *lm;
873 	struct mfii_pd_dev_handles *handles, *old_handles;
874 	int i;
875 	int rv = 0;
876 
877 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
878 
879 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
880 	    SCSI_DATA_IN|SCSI_NOSLEEP);
881 
882 	if (rv != 0) {
883 		rv = EIO;
884 		goto free_lm;
885 	}
886 
887 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
888 	smr_init(&handles->pd_smr);
889 	for (i = 0; i < MFI_MAX_PD; i++)
890 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
891 
892 	/* commit the updated info */
893 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
894 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
895 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
896 
897 	if (old_handles != NULL)
898 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
899 
900 free_lm:
901 	free(lm, M_TEMP, sizeof(*lm));
902 
903 	return (rv);
904 }
905 
906 int
907 mfii_syspd(struct mfii_softc *sc)
908 {
909 	struct scsibus_attach_args saa;
910 
911 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
912 	if (sc->sc_pd == NULL)
913 		return (1);
914 
915 	if (mfii_dev_handles_update(sc) != 0)
916 		goto free_pdsc;
917 
918 	saa.saa_adapter =  &mfii_pd_switch;
919 	saa.saa_adapter_softc = sc;
920 	saa.saa_adapter_buswidth = MFI_MAX_PD;
921 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
922 	saa.saa_luns = 8;
923 	saa.saa_openings = sc->sc_max_cmds - 1;
924 	saa.saa_pool = &sc->sc_iopool;
925 	saa.saa_quirks = saa.saa_flags = 0;
926 	saa.saa_wwpn = saa.saa_wwnn = 0;
927 
928 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
929 	    config_found(&sc->sc_dev, &saa, scsiprint);
930 
931 	return (0);
932 
933 free_pdsc:
934 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
935 	return (1);
936 }
937 
938 int
939 mfii_detach(struct device *self, int flags)
940 {
941 	struct mfii_softc *sc = (struct mfii_softc *)self;
942 
943 	if (sc->sc_ih == NULL)
944 		return (0);
945 
946 #ifndef SMALL_KERNEL
947 	if (sc->sc_sensors) {
948 		sensordev_deinstall(&sc->sc_sensordev);
949 		free(sc->sc_sensors, M_DEVBUF,
950 		    MFI_MAX_LD * sizeof(struct ksensor));
951 	}
952 
953 	if (sc->sc_bbu) {
954 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
955 	}
956 
957 	if (sc->sc_bbu_status) {
958 		free(sc->sc_bbu_status, M_DEVBUF,
959 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
960 	}
961 #endif /* SMALL_KERNEL */
962 
963 	mfii_aen_unregister(sc);
964 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
965 	mfii_dmamem_free(sc, sc->sc_sgl);
966 	mfii_dmamem_free(sc, sc->sc_mfi);
967 	mfii_dmamem_free(sc, sc->sc_requests);
968 	mfii_dmamem_free(sc, sc->sc_reply_postq);
969 	mfii_dmamem_free(sc, sc->sc_sense);
970 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
971 
972 	return (0);
973 }
974 
975 static void
976 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
977 {
978 #if 0
979 	union mfi_mbox mbox = {
980 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
981 	};
982 	int rv;
983 
984 	mfii_scrub_ccb(ccb);
985 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
986 	    NULL, 0, SCSI_NOSLEEP);
987 	if (rv != 0) {
988 		printf("%s: unable to flush cache\n", DEVNAME(sc));
989 		return;
990 	}
991 #endif
992 }
993 
994 static void
995 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
996 {
997 #if 0
998 	int rv;
999 
1000 	mfii_scrub_ccb(ccb);
1001 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1002 	    NULL, 0, SCSI_POLL);
1003 	if (rv != 0) {
1004 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1005 		return;
1006 	}
1007 #endif
1008 }
1009 
1010 static void
1011 mfii_powerdown(struct mfii_softc *sc)
1012 {
1013 	struct mfii_ccb *ccb;
1014 
1015 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1016 	if (ccb == NULL) {
1017 		printf("%s: unable to allocate ccb for shutdown\n",
1018 		    DEVNAME(sc));
1019 		return;
1020 	}
1021 
1022 	mfii_flush_cache(sc, ccb);
1023 	mfii_shutdown(sc, ccb);
1024 	scsi_io_put(&sc->sc_iopool, ccb);
1025 }
1026 
1027 int
1028 mfii_activate(struct device *self, int act)
1029 {
1030 	struct mfii_softc *sc = (struct mfii_softc *)self;
1031 	int rv;
1032 
1033 	switch (act) {
1034 	case DVACT_POWERDOWN:
1035 		rv = config_activate_children(&sc->sc_dev, act);
1036 		mfii_powerdown(sc);
1037 		break;
1038 	default:
1039 		rv = config_activate_children(&sc->sc_dev, act);
1040 		break;
1041 	}
1042 
1043 	return (rv);
1044 }
1045 
1046 u_int32_t
1047 mfii_read(struct mfii_softc *sc, bus_size_t r)
1048 {
1049 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1050 	    BUS_SPACE_BARRIER_READ);
1051 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1052 }
1053 
1054 void
1055 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1056 {
1057 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1058 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1059 	    BUS_SPACE_BARRIER_WRITE);
1060 }
1061 
1062 struct mfii_dmamem *
1063 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1064 {
1065 	struct mfii_dmamem *m;
1066 	int nsegs;
1067 
1068 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1069 	if (m == NULL)
1070 		return (NULL);
1071 
1072 	m->mdm_size = size;
1073 
1074 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1075 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1076 		goto mdmfree;
1077 
1078 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1079 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1080 		goto destroy;
1081 
1082 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1083 	    BUS_DMA_NOWAIT) != 0)
1084 		goto free;
1085 
1086 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1087 	    BUS_DMA_NOWAIT) != 0)
1088 		goto unmap;
1089 
1090 	return (m);
1091 
1092 unmap:
1093 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1094 free:
1095 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1096 destroy:
1097 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1098 mdmfree:
1099 	free(m, M_DEVBUF, sizeof *m);
1100 
1101 	return (NULL);
1102 }
1103 
1104 void
1105 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1106 {
1107 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1108 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1109 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1110 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1111 	free(m, M_DEVBUF, sizeof *m);
1112 }
1113 
1114 void
1115 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1116 {
1117 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1118 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1119 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1120 
1121 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1122 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1123 	io->chain_offset = io->sgl_offset0 / 4;
1124 
1125 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1126 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1127 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1128 
1129 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1130 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1131 
1132 	mfii_start(sc, ccb);
1133 }
1134 
1135 int
1136 mfii_aen_register(struct mfii_softc *sc)
1137 {
1138 	struct mfi_evt_log_info mel;
1139 	struct mfii_ccb *ccb;
1140 	struct mfii_dmamem *mdm;
1141 	int rv;
1142 
1143 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1144 	if (ccb == NULL) {
1145 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1146 		return (ENOMEM);
1147 	}
1148 
1149 	memset(&mel, 0, sizeof(mel));
1150 	mfii_scrub_ccb(ccb);
1151 
1152 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1153 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1154 	if (rv != 0) {
1155 		scsi_io_put(&sc->sc_iopool, ccb);
1156 		printf("%s: unable to get event info\n", DEVNAME(sc));
1157 		return (EIO);
1158 	}
1159 
1160 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1161 	if (mdm == NULL) {
1162 		scsi_io_put(&sc->sc_iopool, ccb);
1163 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1164 		return (ENOMEM);
1165 	}
1166 
1167 	/* replay all the events from boot */
1168 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1169 
1170 	return (0);
1171 }
1172 
1173 void
1174 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1175     struct mfii_dmamem *mdm, uint32_t seq)
1176 {
1177 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1178 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1179 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1180 	union mfi_evt_class_locale mec;
1181 
1182 	mfii_scrub_ccb(ccb);
1183 	mfii_dcmd_scrub(ccb);
1184 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1185 
1186 	ccb->ccb_cookie = mdm;
1187 	ccb->ccb_done = mfii_aen_done;
1188 	sc->sc_aen_ccb = ccb;
1189 
1190 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1191 	mec.mec_members.reserved = 0;
1192 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1193 
1194 	hdr->mfh_cmd = MFI_CMD_DCMD;
1195 	hdr->mfh_sg_count = 1;
1196 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1197 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1198 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1199 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1200 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1201 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1202 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1203 
1204 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1205 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1206 
1207 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1208 	mfii_dcmd_start(sc, ccb);
1209 }
1210 
1211 void
1212 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1213 {
1214 	KASSERT(sc->sc_aen_ccb == ccb);
1215 
1216 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1217 	task_add(systq, &sc->sc_aen_task);
1218 }
1219 
1220 void
1221 mfii_aen(void *arg)
1222 {
1223 	struct mfii_softc *sc = arg;
1224 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1225 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1226 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1227 	uint32_t code;
1228 
1229 	mfii_dcmd_sync(sc, ccb,
1230 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1231 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1232 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1233 
1234 	code = lemtoh32(&med->med_code);
1235 
1236 #if 0
1237 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1238 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1239 #endif
1240 
1241 	switch (code) {
1242 	case MFI_EVT_PD_INSERTED_EXT:
1243 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1244 			break;
1245 
1246 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1247 		break;
1248  	case MFI_EVT_PD_REMOVED_EXT:
1249 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1250 			break;
1251 
1252 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1253 		break;
1254 
1255 	case MFI_EVT_PD_STATE_CHANGE:
1256 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1257 			break;
1258 
1259 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1260 		break;
1261 
1262 	case MFI_EVT_LD_CREATED:
1263 	case MFI_EVT_LD_DELETED:
1264 		mfii_aen_ld_update(sc);
1265 		break;
1266 
1267 	default:
1268 		break;
1269 	}
1270 
1271 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1272 }
1273 
1274 void
1275 mfii_aen_pd_insert(struct mfii_softc *sc,
1276     const struct mfi_evtarg_pd_address *pd)
1277 {
1278 #if 0
1279 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1280 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1281 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1282 	    pd->scsi_dev_type);
1283 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1284 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1285 	    lemtoh64(&pd->sas_addr[1]));
1286 #endif
1287 
1288 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1289 		return;
1290 
1291 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1292 }
1293 
1294 void
1295 mfii_aen_pd_remove(struct mfii_softc *sc,
1296     const struct mfi_evtarg_pd_address *pd)
1297 {
1298 #if 0
1299 	printf("%s: pd removed ext\n", DEVNAME(sc));
1300 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1301 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1302 	    pd->scsi_dev_type);
1303 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1304 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1305 	    lemtoh64(&pd->sas_addr[1]));
1306 #endif
1307 	uint16_t target = lemtoh16(&pd->device_id);
1308 
1309 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1310 
1311 	/* the firmware will abort outstanding commands for us */
1312 
1313 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1314 }
1315 
1316 void
1317 mfii_aen_pd_state_change(struct mfii_softc *sc,
1318     const struct mfi_evtarg_pd_state *state)
1319 {
1320 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1321 
1322 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1323 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1324 		/* it's been pulled or configured for raid */
1325 
1326 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1327 		    DVACT_DEACTIVATE);
1328 		/* outstanding commands will simply complete or get aborted */
1329 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1330 		    DETACH_FORCE);
1331 
1332 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1333 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1334 		/* the firmware is handing the disk over */
1335 
1336 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1337 	}
1338 }
1339 
1340 void
1341 mfii_aen_ld_update(struct mfii_softc *sc)
1342 {
1343 	int i, state, target, old, nld;
1344 	int newlds[MFI_MAX_LD];
1345 
1346 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1347 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1348 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1349 		    DEVNAME(sc));
1350 		return;
1351 	}
1352 
1353 	memset(newlds, -1, sizeof(newlds));
1354 
1355 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1356 		state = sc->sc_ld_list.mll_list[i].mll_state;
1357 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1358 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1359 		    DEVNAME(sc), target, state);
1360 		newlds[target] = i;
1361 	}
1362 
1363 	for (i = 0; i < MFI_MAX_LD; i++) {
1364 		old = sc->sc_target_lds[i];
1365 		nld = newlds[i];
1366 
1367 		if (old == -1 && nld != -1) {
1368 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1369 			    DEVNAME(sc), i);
1370 
1371 			scsi_probe_target(sc->sc_scsibus, i);
1372 
1373 #ifndef SMALL_KERNEL
1374 			mfii_init_ld_sensor(sc, nld);
1375 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1376 #endif
1377 		} else if (nld == -1 && old != -1) {
1378 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1379 			    DEVNAME(sc), i);
1380 
1381 			scsi_activate(sc->sc_scsibus, i, -1,
1382 			    DVACT_DEACTIVATE);
1383 			scsi_detach_target(sc->sc_scsibus, i,
1384 			    DETACH_FORCE);
1385 #ifndef SMALL_KERNEL
1386 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1387 #endif
1388 		}
1389 	}
1390 
1391 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1392 }
1393 
1394 void
1395 mfii_aen_unregister(struct mfii_softc *sc)
1396 {
1397 	/* XXX */
1398 }
1399 
1400 int
1401 mfii_reset_hard(struct mfii_softc *sc)
1402 {
1403 	u_int16_t		i;
1404 
1405 	mfii_write(sc, MFI_OSTS, 0);
1406 
1407 	/* enable diagnostic register */
1408 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1409 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1410 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1411 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1412 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1413 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1414 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1415 
1416 	delay(100);
1417 
1418 	if ((mfii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1419 		printf("%s: failed to enable diagnostic read/write\n",
1420 		    DEVNAME(sc));
1421 		return(1);
1422 	}
1423 
1424 	/* reset ioc */
1425 	mfii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1426 
1427 	/* 240 milliseconds */
1428 	delay(240000);
1429 
1430 	for (i = 0; i < 30000; i++) {
1431 		if ((mfii_read(sc, MPII_HOSTDIAG) &
1432 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1433 			break;
1434 		delay(10000);
1435 	}
1436 	if (i >= 30000) {
1437 		printf("%s: failed to reset device\n", DEVNAME(sc));
1438 		return (1);
1439 	}
1440 
1441 	/* disable diagnostic register */
1442 	mfii_write(sc, MPII_WRITESEQ, 0xff);
1443 
1444 	return(0);
1445 }
1446 
1447 int
1448 mfii_transition_firmware(struct mfii_softc *sc)
1449 {
1450 	int32_t			fw_state, cur_state;
1451 	int			max_wait, i, reset_on_fault = 1;
1452 
1453 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1454 
1455 	while (fw_state != MFI_STATE_READY) {
1456 		cur_state = fw_state;
1457 		switch (fw_state) {
1458 		case MFI_STATE_FAULT:
1459 			if (!reset_on_fault) {
1460 				printf("%s: firmware fault\n", DEVNAME(sc));
1461 				return (1);
1462 			}
1463 			printf("%s: firmware fault; attempting full device "
1464 			    "reset, this can take some time\n", DEVNAME(sc));
1465 			if (mfii_reset_hard(sc))
1466 				return (1);
1467 			max_wait = 20;
1468 			reset_on_fault = 0;
1469 			break;
1470 		case MFI_STATE_WAIT_HANDSHAKE:
1471 			mfii_write(sc, MFI_SKINNY_IDB,
1472 			    MFI_INIT_CLEAR_HANDSHAKE);
1473 			max_wait = 2;
1474 			break;
1475 		case MFI_STATE_OPERATIONAL:
1476 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1477 			max_wait = 10;
1478 			break;
1479 		case MFI_STATE_BB_INIT:
1480 			max_wait = 20;
1481 			break;
1482 		case MFI_STATE_UNDEFINED:
1483 		case MFI_STATE_FW_INIT:
1484 		case MFI_STATE_FW_INIT_2:
1485 		case MFI_STATE_DEVICE_SCAN:
1486 		case MFI_STATE_FLUSH_CACHE:
1487 			max_wait = 40;
1488 			break;
1489 		case MFI_STATE_BOOT_MESSAGE_PENDING:
1490 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
1491 			max_wait = 10;
1492 			break;
1493 		default:
1494 			printf("%s: unknown firmware state %#x\n",
1495 			    DEVNAME(sc), fw_state);
1496 			return (1);
1497 		}
1498 		for (i = 0; i < (max_wait * 10); i++) {
1499 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1500 			if (fw_state == cur_state)
1501 				DELAY(100000);
1502 			else
1503 				break;
1504 		}
1505 		if (fw_state == cur_state) {
1506 			printf("%s: firmware stuck in state %#x\n",
1507 			    DEVNAME(sc), fw_state);
1508 			return (1);
1509 		} else {
1510 			DPRINTF("%s: firmware state change %#x -> %#x after "
1511 			    "%d iterations\n",
1512 			    DEVNAME(sc), cur_state, fw_state, i);
1513 		}
1514 	}
1515 
1516 	return (0);
1517 }
1518 
1519 int
1520 mfii_get_info(struct mfii_softc *sc)
1521 {
1522 	int i, rv;
1523 
1524 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1525 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1526 
1527 	if (rv != 0)
1528 		return (rv);
1529 
1530 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1531 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1532 		    DEVNAME(sc),
1533 		    sc->sc_info.mci_image_component[i].mic_name,
1534 		    sc->sc_info.mci_image_component[i].mic_version,
1535 		    sc->sc_info.mci_image_component[i].mic_build_date,
1536 		    sc->sc_info.mci_image_component[i].mic_build_time);
1537 	}
1538 
1539 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1540 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1541 		    DEVNAME(sc),
1542 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1543 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1544 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1545 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1546 	}
1547 
1548 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1549 	    DEVNAME(sc),
1550 	    sc->sc_info.mci_max_arms,
1551 	    sc->sc_info.mci_max_spans,
1552 	    sc->sc_info.mci_max_arrays,
1553 	    sc->sc_info.mci_max_lds,
1554 	    sc->sc_info.mci_product_name);
1555 
1556 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1557 	    DEVNAME(sc),
1558 	    sc->sc_info.mci_serial_number,
1559 	    sc->sc_info.mci_hw_present,
1560 	    sc->sc_info.mci_current_fw_time,
1561 	    sc->sc_info.mci_max_cmds,
1562 	    sc->sc_info.mci_max_sg_elements);
1563 
1564 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1565 	    DEVNAME(sc),
1566 	    sc->sc_info.mci_max_request_size,
1567 	    sc->sc_info.mci_lds_present,
1568 	    sc->sc_info.mci_lds_degraded,
1569 	    sc->sc_info.mci_lds_offline,
1570 	    sc->sc_info.mci_pd_present);
1571 
1572 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1573 	    DEVNAME(sc),
1574 	    sc->sc_info.mci_pd_disks_present,
1575 	    sc->sc_info.mci_pd_disks_pred_failure,
1576 	    sc->sc_info.mci_pd_disks_failed);
1577 
1578 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1579 	    DEVNAME(sc),
1580 	    sc->sc_info.mci_nvram_size,
1581 	    sc->sc_info.mci_memory_size,
1582 	    sc->sc_info.mci_flash_size);
1583 
1584 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1585 	    DEVNAME(sc),
1586 	    sc->sc_info.mci_ram_correctable_errors,
1587 	    sc->sc_info.mci_ram_uncorrectable_errors,
1588 	    sc->sc_info.mci_cluster_allowed,
1589 	    sc->sc_info.mci_cluster_active);
1590 
1591 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1592 	    DEVNAME(sc),
1593 	    sc->sc_info.mci_max_strips_per_io,
1594 	    sc->sc_info.mci_raid_levels,
1595 	    sc->sc_info.mci_adapter_ops,
1596 	    sc->sc_info.mci_ld_ops);
1597 
1598 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1599 	    DEVNAME(sc),
1600 	    sc->sc_info.mci_stripe_sz_ops.min,
1601 	    sc->sc_info.mci_stripe_sz_ops.max,
1602 	    sc->sc_info.mci_pd_ops,
1603 	    sc->sc_info.mci_pd_mix_support);
1604 
1605 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1606 	    DEVNAME(sc),
1607 	    sc->sc_info.mci_ecc_bucket_count,
1608 	    sc->sc_info.mci_package_version);
1609 
1610 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1611 	    DEVNAME(sc),
1612 	    sc->sc_info.mci_properties.mcp_seq_num,
1613 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1614 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1615 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1616 
1617 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1618 	    DEVNAME(sc),
1619 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1620 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1621 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1622 	    sc->sc_info.mci_properties.mcp_cc_rate);
1623 
1624 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1625 	    DEVNAME(sc),
1626 	    sc->sc_info.mci_properties.mcp_recon_rate,
1627 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1628 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1629 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1630 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1631 
1632 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1633 	    DEVNAME(sc),
1634 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1635 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1636 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1637 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1638 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1639 
1640 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1641 	    DEVNAME(sc),
1642 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1643 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1644 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1645 
1646 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1647 	    DEVNAME(sc),
1648 	    sc->sc_info.mci_pci.mip_vendor,
1649 	    sc->sc_info.mci_pci.mip_device,
1650 	    sc->sc_info.mci_pci.mip_subvendor,
1651 	    sc->sc_info.mci_pci.mip_subdevice);
1652 
1653 	DPRINTF("%s: type %#x port_count %d port_addr ",
1654 	    DEVNAME(sc),
1655 	    sc->sc_info.mci_host.mih_type,
1656 	    sc->sc_info.mci_host.mih_port_count);
1657 
1658 	for (i = 0; i < 8; i++)
1659 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1660 	DPRINTF("\n");
1661 
1662 	DPRINTF("%s: type %.x port_count %d port_addr ",
1663 	    DEVNAME(sc),
1664 	    sc->sc_info.mci_device.mid_type,
1665 	    sc->sc_info.mci_device.mid_port_count);
1666 
1667 	for (i = 0; i < 8; i++)
1668 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1669 	DPRINTF("\n");
1670 
1671 	return (0);
1672 }
1673 
1674 int
1675 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1676 {
1677 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1678 	u_int64_t r;
1679 	int to = 0, rv = 0;
1680 
1681 #ifdef DIAGNOSTIC
1682 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1683 		panic("mfii_mfa_poll called with cookie or done set");
1684 #endif
1685 
1686 	hdr->mfh_context = ccb->ccb_smid;
1687 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1688 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1689 
1690 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1691 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1692 
1693 	mfii_start(sc, ccb);
1694 
1695 	for (;;) {
1696 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1697 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1698 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1699 
1700 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1701 			break;
1702 
1703 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1704 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1705 			    ccb->ccb_smid);
1706 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1707 			rv = 1;
1708 			break;
1709 		}
1710 
1711 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1712 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1713 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1714 
1715 		delay(1000);
1716 	}
1717 
1718 	if (ccb->ccb_len > 0) {
1719 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1720 		    0, ccb->ccb_dmamap->dm_mapsize,
1721 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1722 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1723 
1724 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1725 	}
1726 
1727 	return (rv);
1728 }
1729 
1730 int
1731 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1732 {
1733 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1734 	void *cookie;
1735 	int rv = 1;
1736 
1737 	done = ccb->ccb_done;
1738 	cookie = ccb->ccb_cookie;
1739 
1740 	ccb->ccb_done = mfii_poll_done;
1741 	ccb->ccb_cookie = &rv;
1742 
1743 	mfii_start(sc, ccb);
1744 
1745 	do {
1746 		delay(10);
1747 		mfii_postq(sc);
1748 	} while (rv == 1);
1749 
1750 	ccb->ccb_cookie = cookie;
1751 	done(sc, ccb);
1752 
1753 	return (0);
1754 }
1755 
1756 void
1757 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1758 {
1759 	int *rv = ccb->ccb_cookie;
1760 
1761 	*rv = 0;
1762 }
1763 
1764 int
1765 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1766 {
1767 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1768 
1769 #ifdef DIAGNOSTIC
1770 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1771 		panic("mfii_exec called with cookie or done set");
1772 #endif
1773 
1774 	ccb->ccb_cookie = &m;
1775 	ccb->ccb_done = mfii_exec_done;
1776 
1777 	mfii_start(sc, ccb);
1778 
1779 	mtx_enter(&m);
1780 	while (ccb->ccb_cookie != NULL)
1781 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1782 	mtx_leave(&m);
1783 
1784 	return (0);
1785 }
1786 
1787 void
1788 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1789 {
1790 	struct mutex *m = ccb->ccb_cookie;
1791 
1792 	mtx_enter(m);
1793 	ccb->ccb_cookie = NULL;
1794 	wakeup_one(ccb);
1795 	mtx_leave(m);
1796 }
1797 
1798 int
1799 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1800     void *buf, size_t len, int flags)
1801 {
1802 	struct mfii_ccb *ccb;
1803 	int rv;
1804 
1805 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1806 	if (ccb == NULL)
1807 		return (ENOMEM);
1808 
1809 	mfii_scrub_ccb(ccb);
1810 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1811 	scsi_io_put(&sc->sc_iopool, ccb);
1812 
1813 	return (rv);
1814 }
1815 
1816 int
1817 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1818     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1819 {
1820 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1821 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1822 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1823 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1824 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1825 	u_int8_t *dma_buf = NULL;
1826 	int rv = EIO;
1827 
1828 	if (cold)
1829 		flags |= SCSI_NOSLEEP;
1830 
1831 	if (buf != NULL) {
1832 		dma_buf = dma_alloc(len, PR_WAITOK);
1833 		if (dma_buf == NULL)
1834 			return (ENOMEM);
1835 	}
1836 
1837 	ccb->ccb_data = dma_buf;
1838 	ccb->ccb_len = len;
1839 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1840 	case SCSI_DATA_IN:
1841 		ccb->ccb_direction = MFII_DATA_IN;
1842 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1843 		break;
1844 	case SCSI_DATA_OUT:
1845 		ccb->ccb_direction = MFII_DATA_OUT;
1846 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1847 		memcpy(dma_buf, buf, len);
1848 		break;
1849 	case 0:
1850 		ccb->ccb_direction = MFII_DATA_NONE;
1851 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1852 		break;
1853 	}
1854 
1855 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1856 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1857 		rv = ENOMEM;
1858 		goto done;
1859 	}
1860 
1861 	hdr->mfh_cmd = MFI_CMD_DCMD;
1862 	hdr->mfh_context = ccb->ccb_smid;
1863 	hdr->mfh_data_len = htole32(len);
1864 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1865 
1866 	dcmd->mdf_opcode = opc;
1867 	/* handle special opcodes */
1868 	if (mbox != NULL)
1869 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1870 
1871 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1872 
1873 	if (len) {
1874 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1875 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1876 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1877 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1878 		sge->sg_flags =
1879 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1880 	}
1881 
1882 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1883 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1884 
1885 	if (ISSET(flags, SCSI_NOSLEEP)) {
1886 		ccb->ccb_done = mfii_empty_done;
1887 		mfii_poll(sc, ccb);
1888 	} else
1889 		mfii_exec(sc, ccb);
1890 
1891 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1892 		rv = 0;
1893 
1894 		if (ccb->ccb_direction == MFII_DATA_IN)
1895 			memcpy(buf, dma_buf, len);
1896 	}
1897 
1898 done:
1899 	if (buf != NULL)
1900 		dma_free(dma_buf, len);
1901 
1902 	return (rv);
1903 }
1904 
1905 void
1906 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1907 {
1908 	return;
1909 }
1910 
1911 int
1912 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1913     void *sglp, int nosleep)
1914 {
1915 	union mfi_sgl *sgl = sglp;
1916 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1917 	int error;
1918 	int i;
1919 
1920 	if (ccb->ccb_len == 0)
1921 		return (0);
1922 
1923 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1924 	    ccb->ccb_data, ccb->ccb_len, NULL,
1925 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1926 	if (error) {
1927 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1928 		return (1);
1929 	}
1930 
1931 	for (i = 0; i < dmap->dm_nsegs; i++) {
1932 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1933 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1934 	}
1935 
1936 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1937 	    ccb->ccb_direction == MFII_DATA_OUT ?
1938 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1939 
1940 	return (0);
1941 }
1942 
1943 void
1944 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1945 {
1946 	u_long *r = (u_long *)&ccb->ccb_req;
1947 
1948 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1949 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1950 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1951 
1952 #if defined(__LP64__)
1953 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1954 #else
1955 	mtx_enter(&sc->sc_post_mtx);
1956 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1957 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1958 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1959 
1960 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1961 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1962 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1963 	mtx_leave(&sc->sc_post_mtx);
1964 #endif
1965 }
1966 
1967 void
1968 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1969 {
1970 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1971 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1972 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1973 
1974 	if (ccb->ccb_sgl_len > 0) {
1975 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1976 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1977 		    BUS_DMASYNC_POSTWRITE);
1978 	}
1979 
1980 	if (ccb->ccb_len > 0) {
1981 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1982 		    0, ccb->ccb_dmamap->dm_mapsize,
1983 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1984 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1985 
1986 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1987 	}
1988 
1989 	ccb->ccb_done(sc, ccb);
1990 }
1991 
1992 int
1993 mfii_initialise_firmware(struct mfii_softc *sc)
1994 {
1995 	struct mpii_msg_iocinit_request *iiq;
1996 	struct mfii_dmamem *m;
1997 	struct mfii_ccb *ccb;
1998 	struct mfi_init_frame *init;
1999 	int rv;
2000 
2001 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2002 	if (m == NULL)
2003 		return (1);
2004 
2005 	iiq = MFII_DMA_KVA(m);
2006 	memset(iiq, 0, sizeof(*iiq));
2007 
2008 	iiq->function = MPII_FUNCTION_IOC_INIT;
2009 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2010 
2011 	iiq->msg_version_maj = 0x02;
2012 	iiq->msg_version_min = 0x00;
2013 	iiq->hdr_version_unit = 0x10;
2014 	iiq->hdr_version_dev = 0x0;
2015 
2016 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2017 
2018 	iiq->reply_descriptor_post_queue_depth =
2019 	    htole16(sc->sc_reply_postq_depth);
2020 	iiq->reply_free_queue_depth = htole16(0);
2021 
2022 	htolem32(&iiq->sense_buffer_address_high,
2023 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
2024 
2025 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
2026 	    MFII_DMA_DVA(sc->sc_reply_postq));
2027 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
2028 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2029 
2030 	htolem32(&iiq->system_request_frame_base_address_lo,
2031 	    MFII_DMA_DVA(sc->sc_requests));
2032 	htolem32(&iiq->system_request_frame_base_address_hi,
2033 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
2034 
2035 	iiq->timestamp = htole64(getuptime());
2036 
2037 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2038 	if (ccb == NULL) {
2039 		/* shouldn't ever run out of ccbs during attach */
2040 		return (1);
2041 	}
2042 	mfii_scrub_ccb(ccb);
2043 	init = ccb->ccb_request;
2044 
2045 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
2046 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2047 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
2048 
2049 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2050 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2051 	    BUS_DMASYNC_PREREAD);
2052 
2053 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2054 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2055 
2056 	rv = mfii_mfa_poll(sc, ccb);
2057 
2058 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2059 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2060 
2061 	scsi_io_put(&sc->sc_iopool, ccb);
2062 	mfii_dmamem_free(sc, m);
2063 
2064 	return (rv);
2065 }
2066 
2067 int
2068 mfii_my_intr(struct mfii_softc *sc)
2069 {
2070 	u_int32_t status;
2071 
2072 	status = mfii_read(sc, MFI_OSTS);
2073 	if (ISSET(status, 0x1)) {
2074 		mfii_write(sc, MFI_OSTS, status);
2075 		return (1);
2076 	}
2077 
2078 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2079 }
2080 
2081 int
2082 mfii_intr(void *arg)
2083 {
2084 	struct mfii_softc *sc = arg;
2085 
2086 	if (!mfii_my_intr(sc))
2087 		return (0);
2088 
2089 	mfii_postq(sc);
2090 
2091 	return (1);
2092 }
2093 
2094 void
2095 mfii_postq(struct mfii_softc *sc)
2096 {
2097 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2098 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2099 	struct mpii_reply_descr *rdp;
2100 	struct mfii_ccb *ccb;
2101 	int rpi = 0;
2102 
2103 	mtx_enter(&sc->sc_reply_postq_mtx);
2104 
2105 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2106 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2107 	    BUS_DMASYNC_POSTREAD);
2108 
2109 	for (;;) {
2110 		rdp = &postq[sc->sc_reply_postq_index];
2111 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2112 		    MPII_REPLY_DESCR_UNUSED)
2113 			break;
2114 		if (rdp->data == 0xffffffff) {
2115 			/*
2116 			 * ioc is still writing to the reply post queue
2117 			 * race condition - bail!
2118 			 */
2119 			break;
2120 		}
2121 
2122 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2123 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2124 		memset(rdp, 0xff, sizeof(*rdp));
2125 
2126 		sc->sc_reply_postq_index++;
2127 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2128 		rpi = 1;
2129 	}
2130 
2131 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2132 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2133 	    BUS_DMASYNC_PREREAD);
2134 
2135 	if (rpi)
2136 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2137 
2138 	mtx_leave(&sc->sc_reply_postq_mtx);
2139 
2140 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2141 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2142 		mfii_done(sc, ccb);
2143 	}
2144 }
2145 
2146 void
2147 mfii_scsi_cmd(struct scsi_xfer *xs)
2148 {
2149 	struct scsi_link *link = xs->sc_link;
2150 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2151 	struct mfii_ccb *ccb = xs->io;
2152 
2153 	mfii_scrub_ccb(ccb);
2154 	ccb->ccb_cookie = xs;
2155 	ccb->ccb_done = mfii_scsi_cmd_done;
2156 	ccb->ccb_data = xs->data;
2157 	ccb->ccb_len = xs->datalen;
2158 
2159 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2160 
2161 	switch (xs->cmd.opcode) {
2162 	case READ_COMMAND:
2163 	case READ_10:
2164 	case READ_12:
2165 	case READ_16:
2166 	case WRITE_COMMAND:
2167 	case WRITE_10:
2168 	case WRITE_12:
2169 	case WRITE_16:
2170 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2171 			goto stuffup;
2172 
2173 		break;
2174 
2175 	default:
2176 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2177 			goto stuffup;
2178 		break;
2179 	}
2180 
2181 	xs->error = XS_NOERROR;
2182 	xs->resid = 0;
2183 
2184 	if (ISSET(xs->flags, SCSI_POLL)) {
2185 		if (mfii_poll(sc, ccb) != 0)
2186 			goto stuffup;
2187 		return;
2188 	}
2189 
2190 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2191 	timeout_add_msec(&xs->stimeout, xs->timeout);
2192 	mfii_start(sc, ccb);
2193 
2194 	return;
2195 
2196 stuffup:
2197 	xs->error = XS_DRIVER_STUFFUP;
2198 	scsi_done(xs);
2199 }
2200 
2201 void
2202 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2203 {
2204 	struct scsi_xfer *xs = ccb->ccb_cookie;
2205 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2206 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2207 	u_int refs = 1;
2208 
2209 	if (timeout_del(&xs->stimeout))
2210 		refs = 2;
2211 
2212 	switch (ctx->status) {
2213 	case MFI_STAT_OK:
2214 		break;
2215 
2216 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2217 		xs->error = XS_SENSE;
2218 		memset(&xs->sense, 0, sizeof(xs->sense));
2219 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2220 		break;
2221 
2222 	case MFI_STAT_LD_OFFLINE:
2223 	case MFI_STAT_DEVICE_NOT_FOUND:
2224 		xs->error = XS_SELTIMEOUT;
2225 		break;
2226 
2227 	default:
2228 		xs->error = XS_DRIVER_STUFFUP;
2229 		break;
2230 	}
2231 
2232 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2233 		scsi_done(xs);
2234 }
2235 
2236 int
2237 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2238 {
2239 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2240 
2241 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2242 
2243 	switch (cmd) {
2244 	case DIOCGCACHE:
2245 	case DIOCSCACHE:
2246 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2247 		break;
2248 
2249 	default:
2250 		if (sc->sc_ioctl)
2251 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2252 		break;
2253 	}
2254 
2255 	return (ENOTTY);
2256 }
2257 
2258 int
2259 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2260 {
2261 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2262 	int			 rv, wrenable, rdenable;
2263 	struct mfi_ld_prop	 ldp;
2264 	union mfi_mbox		 mbox;
2265 
2266 	if (mfii_get_info(sc)) {
2267 		rv = EIO;
2268 		goto done;
2269 	}
2270 
2271 	if (sc->sc_target_lds[link->target] == -1) {
2272 		rv = EIO;
2273 		goto done;
2274 	}
2275 
2276 	memset(&mbox, 0, sizeof(mbox));
2277 	mbox.b[0] = link->target;
2278 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2279 	    SCSI_DATA_IN);
2280 	if (rv != 0)
2281 		goto done;
2282 
2283 	if (sc->sc_info.mci_memory_size > 0) {
2284 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2285 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2286 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2287 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2288 	} else {
2289 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2290 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2291 		rdenable = 0;
2292 	}
2293 
2294 	if (cmd == DIOCGCACHE) {
2295 		dc->wrcache = wrenable;
2296 		dc->rdcache = rdenable;
2297 		goto done;
2298 	} /* else DIOCSCACHE */
2299 
2300 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2301 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2302 		goto done;
2303 
2304 	memset(&mbox, 0, sizeof(mbox));
2305 	mbox.b[0] = ldp.mlp_ld.mld_target;
2306 	mbox.b[1] = ldp.mlp_ld.mld_res;
2307 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2308 
2309 	if (sc->sc_info.mci_memory_size > 0) {
2310 		if (dc->rdcache)
2311 			SET(ldp.mlp_cur_cache_policy,
2312 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2313 		else
2314 			CLR(ldp.mlp_cur_cache_policy,
2315 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2316 		if (dc->wrcache)
2317 			SET(ldp.mlp_cur_cache_policy,
2318 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2319 		else
2320 			CLR(ldp.mlp_cur_cache_policy,
2321 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2322 	} else {
2323 		if (dc->rdcache) {
2324 			rv = EOPNOTSUPP;
2325 			goto done;
2326 		}
2327 		if (dc->wrcache)
2328 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2329 		else
2330 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2331 	}
2332 
2333 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2334 	    SCSI_DATA_OUT);
2335 done:
2336 	return (rv);
2337 }
2338 
2339 int
2340 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2341 {
2342 	struct scsi_link *link = xs->sc_link;
2343 	struct mfii_ccb *ccb = xs->io;
2344 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2345 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2346 	int segs;
2347 
2348 	io->dev_handle = htole16(link->target);
2349 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2350 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2351 	io->sgl_flags = htole16(0x02); /* XXX */
2352 	io->sense_buffer_length = sizeof(xs->sense);
2353 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2354 	io->data_length = htole32(xs->datalen);
2355 	io->io_flags = htole16(xs->cmdlen);
2356 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2357 	case SCSI_DATA_IN:
2358 		ccb->ccb_direction = MFII_DATA_IN;
2359 		io->direction = MPII_SCSIIO_DIR_READ;
2360 		break;
2361 	case SCSI_DATA_OUT:
2362 		ccb->ccb_direction = MFII_DATA_OUT;
2363 		io->direction = MPII_SCSIIO_DIR_WRITE;
2364 		break;
2365 	default:
2366 		ccb->ccb_direction = MFII_DATA_NONE;
2367 		io->direction = MPII_SCSIIO_DIR_NONE;
2368 		break;
2369 	}
2370 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2371 
2372 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2373 	ctx->timeout_value = htole16(0x14); /* XXX */
2374 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2375 	ctx->virtual_disk_target_id = htole16(link->target);
2376 
2377 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2378 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2379 		return (1);
2380 
2381 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2382 	switch (sc->sc_iop->num_sge_loc) {
2383 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2384 		ctx->num_sge = segs;
2385 		break;
2386 	case MFII_IOP_NUM_SGE_LOC_35:
2387 		/* 12 bit field, but we're only using the lower 8 */
2388 		ctx->span_arm = segs;
2389 		break;
2390 	}
2391 
2392 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2393 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2394 
2395 	return (0);
2396 }
2397 
2398 int
2399 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2400 {
2401 	struct scsi_link *link = xs->sc_link;
2402 	struct mfii_ccb *ccb = xs->io;
2403 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2404 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2405 
2406 	io->dev_handle = htole16(link->target);
2407 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2408 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2409 	io->sgl_flags = htole16(0x02); /* XXX */
2410 	io->sense_buffer_length = sizeof(xs->sense);
2411 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2412 	io->data_length = htole32(xs->datalen);
2413 	io->io_flags = htole16(xs->cmdlen);
2414 	io->lun[0] = htobe16(link->lun);
2415 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2416 	case SCSI_DATA_IN:
2417 		ccb->ccb_direction = MFII_DATA_IN;
2418 		io->direction = MPII_SCSIIO_DIR_READ;
2419 		break;
2420 	case SCSI_DATA_OUT:
2421 		ccb->ccb_direction = MFII_DATA_OUT;
2422 		io->direction = MPII_SCSIIO_DIR_WRITE;
2423 		break;
2424 	default:
2425 		ccb->ccb_direction = MFII_DATA_NONE;
2426 		io->direction = MPII_SCSIIO_DIR_NONE;
2427 		break;
2428 	}
2429 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2430 
2431 	ctx->virtual_disk_target_id = htole16(link->target);
2432 
2433 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2434 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2435 		return (1);
2436 
2437 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2438 
2439 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2440 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2441 
2442 	return (0);
2443 }
2444 
2445 void
2446 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2447 {
2448 	struct scsi_link *link = xs->sc_link;
2449 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2450 	struct mfii_ccb *ccb = xs->io;
2451 
2452 	mfii_scrub_ccb(ccb);
2453 	ccb->ccb_cookie = xs;
2454 	ccb->ccb_done = mfii_scsi_cmd_done;
2455 	ccb->ccb_data = xs->data;
2456 	ccb->ccb_len = xs->datalen;
2457 
2458 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2459 
2460 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2461 	if (xs->error != XS_NOERROR)
2462 		goto done;
2463 
2464 	xs->resid = 0;
2465 
2466 	if (ISSET(xs->flags, SCSI_POLL)) {
2467 		if (mfii_poll(sc, ccb) != 0)
2468 			goto stuffup;
2469 		return;
2470 	}
2471 
2472 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2473 	timeout_add_msec(&xs->stimeout, xs->timeout);
2474 	mfii_start(sc, ccb);
2475 
2476 	return;
2477 
2478 stuffup:
2479 	xs->error = XS_DRIVER_STUFFUP;
2480 done:
2481 	scsi_done(xs);
2482 }
2483 
2484 int
2485 mfii_pd_scsi_probe(struct scsi_link *link)
2486 {
2487 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2488 	struct mfi_pd_details mpd;
2489 	union mfi_mbox mbox;
2490 	int rv;
2491 
2492 	if (link->lun > 0)
2493 		return (0);
2494 
2495 	memset(&mbox, 0, sizeof(mbox));
2496 	mbox.s[0] = htole16(link->target);
2497 
2498 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2499 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2500 	if (rv != 0)
2501 		return (EIO);
2502 
2503 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2504 		return (ENXIO);
2505 
2506 	return (0);
2507 }
2508 
2509 int
2510 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2511 {
2512 	struct scsi_link *link = xs->sc_link;
2513 	struct mfii_ccb *ccb = xs->io;
2514 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2515 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2516 	uint16_t dev_handle;
2517 
2518 	dev_handle = mfii_dev_handle(sc, link->target);
2519 	if (dev_handle == htole16(0xffff))
2520 		return (XS_SELTIMEOUT);
2521 
2522 	io->dev_handle = dev_handle;
2523 	io->function = 0;
2524 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2525 	io->sgl_flags = htole16(0x02); /* XXX */
2526 	io->sense_buffer_length = sizeof(xs->sense);
2527 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2528 	io->data_length = htole32(xs->datalen);
2529 	io->io_flags = htole16(xs->cmdlen);
2530 	io->lun[0] = htobe16(link->lun);
2531 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2532 	case SCSI_DATA_IN:
2533 		ccb->ccb_direction = MFII_DATA_IN;
2534 		io->direction = MPII_SCSIIO_DIR_READ;
2535 		break;
2536 	case SCSI_DATA_OUT:
2537 		ccb->ccb_direction = MFII_DATA_OUT;
2538 		io->direction = MPII_SCSIIO_DIR_WRITE;
2539 		break;
2540 	default:
2541 		ccb->ccb_direction = MFII_DATA_NONE;
2542 		io->direction = MPII_SCSIIO_DIR_NONE;
2543 		break;
2544 	}
2545 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2546 
2547 	ctx->virtual_disk_target_id = htole16(link->target);
2548 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2549 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2550 
2551 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2552 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2553 		return (XS_DRIVER_STUFFUP);
2554 
2555 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2556 
2557 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2558 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2559 	ccb->ccb_req.dev_handle = dev_handle;
2560 
2561 	return (XS_NOERROR);
2562 }
2563 
2564 int
2565 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2566     int nosleep)
2567 {
2568 	struct mpii_msg_request *req = ccb->ccb_request;
2569 	struct mfii_sge *sge = NULL, *nsge = sglp;
2570 	struct mfii_sge *ce = NULL;
2571 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2572 	u_int space;
2573 	int i;
2574 
2575 	int error;
2576 
2577 	if (ccb->ccb_len == 0)
2578 		return (0);
2579 
2580 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2581 	    ccb->ccb_data, ccb->ccb_len, NULL,
2582 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2583 	if (error) {
2584 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2585 		return (1);
2586 	}
2587 
2588 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2589 	    sizeof(*nsge);
2590 	if (dmap->dm_nsegs > space) {
2591 		space--;
2592 
2593 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2594 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2595 
2596 		ce = nsge + space;
2597 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2598 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2599 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2600 
2601 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2602 	}
2603 
2604 	for (i = 0; i < dmap->dm_nsegs; i++) {
2605 		if (nsge == ce)
2606 			nsge = ccb->ccb_sgl;
2607 
2608 		sge = nsge;
2609 
2610 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2611 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2612 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2613 
2614 		nsge = sge + 1;
2615 	}
2616 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2617 
2618 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2619 	    ccb->ccb_direction == MFII_DATA_OUT ?
2620 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2621 
2622 	if (ccb->ccb_sgl_len > 0) {
2623 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2624 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2625 		    BUS_DMASYNC_PREWRITE);
2626 	}
2627 
2628 	return (0);
2629 }
2630 
2631 void
2632 mfii_scsi_cmd_tmo(void *xsp)
2633 {
2634 	struct scsi_xfer *xs = xsp;
2635 	struct scsi_link *link = xs->sc_link;
2636 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2637 	struct mfii_ccb *ccb = xs->io;
2638 
2639 	mtx_enter(&sc->sc_abort_mtx);
2640 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2641 	mtx_leave(&sc->sc_abort_mtx);
2642 
2643 	task_add(systqmp, &sc->sc_abort_task);
2644 }
2645 
2646 void
2647 mfii_abort_task(void *scp)
2648 {
2649 	struct mfii_softc *sc = scp;
2650 	struct mfii_ccb *list;
2651 
2652 	mtx_enter(&sc->sc_abort_mtx);
2653 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2654 	SIMPLEQ_INIT(&sc->sc_abort_list);
2655 	mtx_leave(&sc->sc_abort_mtx);
2656 
2657 	while (list != NULL) {
2658 		struct mfii_ccb *ccb = list;
2659 		struct scsi_xfer *xs = ccb->ccb_cookie;
2660 		struct scsi_link *link = xs->sc_link;
2661 
2662 		uint16_t dev_handle;
2663 		struct mfii_ccb *accb;
2664 
2665 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2666 
2667 		dev_handle = mfii_dev_handle(sc, link->target);
2668 		if (dev_handle == htole16(0xffff)) {
2669 			/* device is gone */
2670 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2671 				scsi_done(xs);
2672 			continue;
2673 		}
2674 
2675 		accb = scsi_io_get(&sc->sc_iopool, 0);
2676 		mfii_scrub_ccb(accb);
2677 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2678 		    MPII_SCSI_TASK_ABORT_TASK,
2679 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2680 
2681 		accb->ccb_cookie = ccb;
2682 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2683 
2684 		mfii_start(sc, accb);
2685 	}
2686 }
2687 
2688 void
2689 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2690     uint16_t smid, uint8_t type, uint32_t flags)
2691 {
2692 	struct mfii_task_mgmt *msg;
2693 	struct mpii_msg_scsi_task_request *req;
2694 
2695 	msg = accb->ccb_request;
2696 	req = &msg->mpii_request;
2697 	req->dev_handle = dev_handle;
2698 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2699 	req->task_type = type;
2700 	htolem16(&req->task_mid, smid);
2701 	msg->flags = flags;
2702 
2703 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2704 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2705 }
2706 
2707 void
2708 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2709 {
2710 	struct mfii_ccb *ccb = accb->ccb_cookie;
2711 	struct scsi_xfer *xs = ccb->ccb_cookie;
2712 
2713 	/* XXX check accb completion? */
2714 
2715 	scsi_io_put(&sc->sc_iopool, accb);
2716 
2717 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2718 		scsi_done(xs);
2719 }
2720 
2721 void *
2722 mfii_get_ccb(void *cookie)
2723 {
2724 	struct mfii_softc *sc = cookie;
2725 	struct mfii_ccb *ccb;
2726 
2727 	mtx_enter(&sc->sc_ccb_mtx);
2728 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2729 	if (ccb != NULL)
2730 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2731 	mtx_leave(&sc->sc_ccb_mtx);
2732 
2733 	return (ccb);
2734 }
2735 
2736 void
2737 mfii_scrub_ccb(struct mfii_ccb *ccb)
2738 {
2739 	ccb->ccb_cookie = NULL;
2740 	ccb->ccb_done = NULL;
2741 	ccb->ccb_flags = 0;
2742 	ccb->ccb_data = NULL;
2743 	ccb->ccb_direction = 0;
2744 	ccb->ccb_len = 0;
2745 	ccb->ccb_sgl_len = 0;
2746 	ccb->ccb_refcnt = 1;
2747 
2748 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2749 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2750 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2751 }
2752 
2753 void
2754 mfii_put_ccb(void *cookie, void *io)
2755 {
2756 	struct mfii_softc *sc = cookie;
2757 	struct mfii_ccb *ccb = io;
2758 
2759 	mtx_enter(&sc->sc_ccb_mtx);
2760 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2761 	mtx_leave(&sc->sc_ccb_mtx);
2762 }
2763 
2764 int
2765 mfii_init_ccb(struct mfii_softc *sc)
2766 {
2767 	struct mfii_ccb *ccb;
2768 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2769 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2770 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2771 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2772 	u_int i;
2773 	int error;
2774 
2775 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2776 	    M_DEVBUF, M_WAITOK|M_ZERO);
2777 
2778 	for (i = 0; i < sc->sc_max_cmds; i++) {
2779 		ccb = &sc->sc_ccb[i];
2780 
2781 		/* create a dma map for transfer */
2782 		error = bus_dmamap_create(sc->sc_dmat,
2783 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2784 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2785 		if (error) {
2786 			printf("%s: cannot create ccb dmamap (%d)\n",
2787 			    DEVNAME(sc), error);
2788 			goto destroy;
2789 		}
2790 
2791 		/* select i + 1'th request. 0 is reserved for events */
2792 		ccb->ccb_smid = i + 1;
2793 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2794 		ccb->ccb_request = request + ccb->ccb_request_offset;
2795 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2796 		    ccb->ccb_request_offset;
2797 
2798 		/* select i'th MFI command frame */
2799 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2800 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2801 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2802 		    ccb->ccb_mfi_offset;
2803 
2804 		/* select i'th sense */
2805 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2806 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2807 		    ccb->ccb_sense_offset);
2808 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2809 		    ccb->ccb_sense_offset;
2810 
2811 		/* select i'th sgl */
2812 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2813 		    sc->sc_max_sgl * i;
2814 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2815 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2816 		    ccb->ccb_sgl_offset;
2817 
2818 		/* add ccb to queue */
2819 		mfii_put_ccb(sc, ccb);
2820 	}
2821 
2822 	return (0);
2823 
2824 destroy:
2825 	/* free dma maps and ccb memory */
2826 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2827 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2828 
2829 	free(sc->sc_ccb, M_DEVBUF, 0);
2830 
2831 	return (1);
2832 }
2833 
2834 #if NBIO > 0
2835 int
2836 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2837 {
2838 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2839 	int error = 0;
2840 
2841 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2842 
2843 	rw_enter_write(&sc->sc_lock);
2844 
2845 	switch (cmd) {
2846 	case BIOCINQ:
2847 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2848 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2849 		break;
2850 
2851 	case BIOCVOL:
2852 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2853 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2854 		break;
2855 
2856 	case BIOCDISK:
2857 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2858 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2859 		break;
2860 
2861 	case BIOCALARM:
2862 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2863 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2864 		break;
2865 
2866 	case BIOCBLINK:
2867 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2868 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2869 		break;
2870 
2871 	case BIOCSETSTATE:
2872 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2873 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2874 		break;
2875 
2876 	case BIOCPATROL:
2877 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2878 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2879 		break;
2880 
2881 	default:
2882 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2883 		error = ENOTTY;
2884 	}
2885 
2886 	rw_exit_write(&sc->sc_lock);
2887 
2888 	return (error);
2889 }
2890 
2891 int
2892 mfii_bio_getitall(struct mfii_softc *sc)
2893 {
2894 	int			i, d, rv = EINVAL;
2895 	size_t			size;
2896 	union mfi_mbox		mbox;
2897 	struct mfi_conf		*cfg = NULL;
2898 	struct mfi_ld_details	*ld_det = NULL;
2899 
2900 	/* get info */
2901 	if (mfii_get_info(sc)) {
2902 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2903 		    DEVNAME(sc));
2904 		goto done;
2905 	}
2906 
2907 	/* send single element command to retrieve size for full structure */
2908 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2909 	if (cfg == NULL)
2910 		goto done;
2911 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2912 	    SCSI_DATA_IN)) {
2913 		free(cfg, M_DEVBUF, sizeof *cfg);
2914 		goto done;
2915 	}
2916 
2917 	size = cfg->mfc_size;
2918 	free(cfg, M_DEVBUF, sizeof *cfg);
2919 
2920 	/* memory for read config */
2921 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2922 	if (cfg == NULL)
2923 		goto done;
2924 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2925 		free(cfg, M_DEVBUF, size);
2926 		goto done;
2927 	}
2928 
2929 	/* replace current pointer with new one */
2930 	if (sc->sc_cfg)
2931 		free(sc->sc_cfg, M_DEVBUF, 0);
2932 	sc->sc_cfg = cfg;
2933 
2934 	/* get all ld info */
2935 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2936 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2937 		goto done;
2938 
2939 	/* get memory for all ld structures */
2940 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2941 	if (sc->sc_ld_sz != size) {
2942 		if (sc->sc_ld_details)
2943 			free(sc->sc_ld_details, M_DEVBUF, 0);
2944 
2945 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2946 		if (ld_det == NULL)
2947 			goto done;
2948 		sc->sc_ld_sz = size;
2949 		sc->sc_ld_details = ld_det;
2950 	}
2951 
2952 	/* find used physical disks */
2953 	size = sizeof(struct mfi_ld_details);
2954 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2955 		memset(&mbox, 0, sizeof(mbox));
2956 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2957 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2958 		    SCSI_DATA_IN))
2959 			goto done;
2960 
2961 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2962 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2963 	}
2964 	sc->sc_no_pd = d;
2965 
2966 	rv = 0;
2967 done:
2968 	return (rv);
2969 }
2970 
2971 int
2972 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2973 {
2974 	int			rv = EINVAL;
2975 	struct mfi_conf		*cfg = NULL;
2976 
2977 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2978 
2979 	if (mfii_bio_getitall(sc)) {
2980 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2981 		    DEVNAME(sc));
2982 		goto done;
2983 	}
2984 
2985 	/* count unused disks as volumes */
2986 	if (sc->sc_cfg == NULL)
2987 		goto done;
2988 	cfg = sc->sc_cfg;
2989 
2990 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2991 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2992 #if notyet
2993 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2994 	    (bi->bi_nodisk - sc->sc_no_pd);
2995 #endif
2996 	/* tell bio who we are */
2997 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2998 
2999 	rv = 0;
3000 done:
3001 	return (rv);
3002 }
3003 
3004 int
3005 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3006 {
3007 	int			i, per, target, rv = EINVAL;
3008 	struct scsi_link	*link;
3009 	struct device		*dev;
3010 
3011 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3012 	    DEVNAME(sc), bv->bv_volid);
3013 
3014 	/* we really could skip and expect that inq took care of it */
3015 	if (mfii_bio_getitall(sc)) {
3016 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3017 		    DEVNAME(sc));
3018 		goto done;
3019 	}
3020 
3021 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3022 		/* go do hotspares & unused disks */
3023 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3024 		goto done;
3025 	}
3026 
3027 	i = bv->bv_volid;
3028 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3029 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3030 	if (link == NULL) {
3031 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
3032 	} else {
3033 		dev = link->device_softc;
3034 		if (dev == NULL)
3035 			goto done;
3036 
3037 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
3038 	}
3039 
3040 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
3041 	case MFI_LD_OFFLINE:
3042 		bv->bv_status = BIOC_SVOFFLINE;
3043 		break;
3044 
3045 	case MFI_LD_PART_DEGRADED:
3046 	case MFI_LD_DEGRADED:
3047 		bv->bv_status = BIOC_SVDEGRADED;
3048 		break;
3049 
3050 	case MFI_LD_ONLINE:
3051 		bv->bv_status = BIOC_SVONLINE;
3052 		break;
3053 
3054 	default:
3055 		bv->bv_status = BIOC_SVINVALID;
3056 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3057 		    DEVNAME(sc),
3058 		    sc->sc_ld_list.mll_list[i].mll_state);
3059 	}
3060 
3061 	/* additional status can modify MFI status */
3062 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3063 	case MFI_LD_PROG_CC:
3064 		bv->bv_status = BIOC_SVSCRUB;
3065 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3066 		bv->bv_percent = (per * 100) / 0xffff;
3067 		bv->bv_seconds =
3068 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3069 		break;
3070 
3071 	case MFI_LD_PROG_BGI:
3072 		bv->bv_status = BIOC_SVSCRUB;
3073 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3074 		bv->bv_percent = (per * 100) / 0xffff;
3075 		bv->bv_seconds =
3076 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3077 		break;
3078 
3079 	case MFI_LD_PROG_FGI:
3080 	case MFI_LD_PROG_RECONSTRUCT:
3081 		/* nothing yet */
3082 		break;
3083 	}
3084 
3085 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3086 		bv->bv_cache = BIOC_CVWRITEBACK;
3087 	else
3088 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3089 
3090 	/*
3091 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3092 	 * a subset that is valid for the MFI controller.
3093 	 */
3094 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3095 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3096 		bv->bv_level *= 10;
3097 
3098 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3099 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3100 
3101 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3102 
3103 	rv = 0;
3104 done:
3105 	return (rv);
3106 }
3107 
3108 int
3109 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3110 {
3111 	struct mfi_conf		*cfg;
3112 	struct mfi_array	*ar;
3113 	struct mfi_ld_cfg	*ld;
3114 	struct mfi_pd_details	*pd;
3115 	struct mfi_pd_list	*pl;
3116 	struct mfi_pd_progress	*mfp;
3117 	struct mfi_progress	*mp;
3118 	struct scsi_inquiry_data *inqbuf;
3119 	char			vend[8+16+4+1], *vendp;
3120 	int			i, rv = EINVAL;
3121 	int			arr, vol, disk, span;
3122 	union mfi_mbox		mbox;
3123 
3124 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3125 	    DEVNAME(sc), bd->bd_diskid);
3126 
3127 	/* we really could skip and expect that inq took care of it */
3128 	if (mfii_bio_getitall(sc)) {
3129 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3130 		    DEVNAME(sc));
3131 		return (rv);
3132 	}
3133 	cfg = sc->sc_cfg;
3134 
3135 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3136 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3137 
3138 	ar = cfg->mfc_array;
3139 	vol = bd->bd_volid;
3140 	if (vol >= cfg->mfc_no_ld) {
3141 		/* do hotspares */
3142 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3143 		goto freeme;
3144 	}
3145 
3146 	/* calculate offset to ld structure */
3147 	ld = (struct mfi_ld_cfg *)(
3148 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3149 	    cfg->mfc_array_size * cfg->mfc_no_array);
3150 
3151 	/* use span 0 only when raid group is not spanned */
3152 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3153 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3154 	else
3155 		span = 0;
3156 	arr = ld[vol].mlc_span[span].mls_index;
3157 
3158 	/* offset disk into pd list */
3159 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3160 
3161 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3162 		/* disk is missing but succeed command */
3163 		bd->bd_status = BIOC_SDFAILED;
3164 		rv = 0;
3165 
3166 		/* try to find an unused disk for the target to rebuild */
3167 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3168 		    SCSI_DATA_IN))
3169 			goto freeme;
3170 
3171 		for (i = 0; i < pl->mpl_no_pd; i++) {
3172 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3173 				continue;
3174 
3175 			memset(&mbox, 0, sizeof(mbox));
3176 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3177 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3178 			    SCSI_DATA_IN))
3179 				continue;
3180 
3181 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3182 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3183 				break;
3184 		}
3185 
3186 		if (i == pl->mpl_no_pd)
3187 			goto freeme;
3188 	} else {
3189 		memset(&mbox, 0, sizeof(mbox));
3190 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3191 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3192 		    SCSI_DATA_IN)) {
3193 			bd->bd_status = BIOC_SDINVALID;
3194 			goto freeme;
3195 		}
3196 	}
3197 
3198 	/* get the remaining fields */
3199 	bd->bd_channel = pd->mpd_enc_idx;
3200 	bd->bd_target = pd->mpd_enc_slot;
3201 
3202 	/* get status */
3203 	switch (pd->mpd_fw_state){
3204 	case MFI_PD_UNCONFIG_GOOD:
3205 	case MFI_PD_UNCONFIG_BAD:
3206 		bd->bd_status = BIOC_SDUNUSED;
3207 		break;
3208 
3209 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3210 		bd->bd_status = BIOC_SDHOTSPARE;
3211 		break;
3212 
3213 	case MFI_PD_OFFLINE:
3214 		bd->bd_status = BIOC_SDOFFLINE;
3215 		break;
3216 
3217 	case MFI_PD_FAILED:
3218 		bd->bd_status = BIOC_SDFAILED;
3219 		break;
3220 
3221 	case MFI_PD_REBUILD:
3222 		bd->bd_status = BIOC_SDREBUILD;
3223 		break;
3224 
3225 	case MFI_PD_ONLINE:
3226 		bd->bd_status = BIOC_SDONLINE;
3227 		break;
3228 
3229 	case MFI_PD_COPYBACK:
3230 	case MFI_PD_SYSTEM:
3231 	default:
3232 		bd->bd_status = BIOC_SDINVALID;
3233 		break;
3234 	}
3235 
3236 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3237 
3238 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3239 	vendp = inqbuf->vendor;
3240 	memcpy(vend, vendp, sizeof vend - 1);
3241 	vend[sizeof vend - 1] = '\0';
3242 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3243 
3244 	/* XXX find a way to retrieve serial nr from drive */
3245 	/* XXX find a way to get bd_procdev */
3246 
3247 	mfp = &pd->mpd_progress;
3248 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3249 		mp = &mfp->mfp_patrol_read;
3250 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3251 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3252 	}
3253 
3254 	rv = 0;
3255 freeme:
3256 	free(pd, M_DEVBUF, sizeof *pd);
3257 	free(pl, M_DEVBUF, sizeof *pl);
3258 
3259 	return (rv);
3260 }
3261 
3262 int
3263 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3264 {
3265 	uint32_t		opc, flags = 0;
3266 	int			rv = 0;
3267 	int8_t			ret;
3268 
3269 	switch(ba->ba_opcode) {
3270 	case BIOC_SADISABLE:
3271 		opc = MR_DCMD_SPEAKER_DISABLE;
3272 		break;
3273 
3274 	case BIOC_SAENABLE:
3275 		opc = MR_DCMD_SPEAKER_ENABLE;
3276 		break;
3277 
3278 	case BIOC_SASILENCE:
3279 		opc = MR_DCMD_SPEAKER_SILENCE;
3280 		break;
3281 
3282 	case BIOC_GASTATUS:
3283 		opc = MR_DCMD_SPEAKER_GET;
3284 		flags = SCSI_DATA_IN;
3285 		break;
3286 
3287 	case BIOC_SATEST:
3288 		opc = MR_DCMD_SPEAKER_TEST;
3289 		break;
3290 
3291 	default:
3292 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3293 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3294 		return (EINVAL);
3295 	}
3296 
3297 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3298 		rv = EINVAL;
3299 	else
3300 		if (ba->ba_opcode == BIOC_GASTATUS)
3301 			ba->ba_status = ret;
3302 		else
3303 			ba->ba_status = 0;
3304 
3305 	return (rv);
3306 }
3307 
3308 int
3309 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3310 {
3311 	int			i, found, rv = EINVAL;
3312 	union mfi_mbox		mbox;
3313 	uint32_t		cmd;
3314 	struct mfi_pd_list	*pd;
3315 
3316 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3317 	    bb->bb_status);
3318 
3319 	/* channel 0 means not in an enclosure so can't be blinked */
3320 	if (bb->bb_channel == 0)
3321 		return (EINVAL);
3322 
3323 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3324 
3325 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3326 		goto done;
3327 
3328 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3329 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3330 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3331 			found = 1;
3332 			break;
3333 		}
3334 
3335 	if (!found)
3336 		goto done;
3337 
3338 	memset(&mbox, 0, sizeof(mbox));
3339 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3340 
3341 	switch (bb->bb_status) {
3342 	case BIOC_SBUNBLINK:
3343 		cmd = MR_DCMD_PD_UNBLINK;
3344 		break;
3345 
3346 	case BIOC_SBBLINK:
3347 		cmd = MR_DCMD_PD_BLINK;
3348 		break;
3349 
3350 	case BIOC_SBALARM:
3351 	default:
3352 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3353 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3354 		goto done;
3355 	}
3356 
3357 
3358 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3359 		rv = 0;
3360 
3361 done:
3362 	free(pd, M_DEVBUF, sizeof *pd);
3363 	return (rv);
3364 }
3365 
3366 static int
3367 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3368 {
3369 	struct mfii_foreign_scan_info *fsi;
3370 	struct mfi_pd_details	*pd;
3371 	union mfi_mbox		mbox;
3372 	int			rv;
3373 
3374 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3375 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3376 
3377 	memset(&mbox, 0, sizeof mbox);
3378 	mbox.s[0] = pd_id;
3379 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3380 	if (rv != 0)
3381 		goto done;
3382 
3383 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3384 		mbox.s[0] = pd_id;
3385 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3386 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3387 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3388 		if (rv != 0)
3389 			goto done;
3390 	}
3391 
3392 	memset(&mbox, 0, sizeof mbox);
3393 	mbox.s[0] = pd_id;
3394 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3395 	if (rv != 0)
3396 		goto done;
3397 
3398 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3399 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3400 		    SCSI_DATA_IN);
3401 		if (rv != 0)
3402 			goto done;
3403 
3404 		if (fsi->count > 0) {
3405 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3406 			if (rv != 0)
3407 				goto done;
3408 		}
3409 	}
3410 
3411 	memset(&mbox, 0, sizeof mbox);
3412 	mbox.s[0] = pd_id;
3413 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3414 	if (rv != 0)
3415 		goto done;
3416 
3417 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3418 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3419 		rv = ENXIO;
3420 
3421 done:
3422 	free(fsi, M_DEVBUF, sizeof *fsi);
3423 	free(pd, M_DEVBUF, sizeof *pd);
3424 
3425 	return (rv);
3426 }
3427 
3428 static int
3429 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3430 {
3431 	struct mfi_hotspare	*hs;
3432 	struct mfi_pd_details	*pd;
3433 	union mfi_mbox		mbox;
3434 	size_t			size;
3435 	int			rv = EINVAL;
3436 
3437 	/* we really could skip and expect that inq took care of it */
3438 	if (mfii_bio_getitall(sc)) {
3439 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3440 		    DEVNAME(sc));
3441 		return (rv);
3442 	}
3443 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3444 
3445 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3446 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3447 
3448 	memset(&mbox, 0, sizeof mbox);
3449 	mbox.s[0] = pd_id;
3450 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3451 	    SCSI_DATA_IN);
3452 	if (rv != 0)
3453 		goto done;
3454 
3455 	memset(hs, 0, size);
3456 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3457 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3458 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3459 
3460 done:
3461 	free(hs, M_DEVBUF, size);
3462 	free(pd, M_DEVBUF, sizeof *pd);
3463 
3464 	return (rv);
3465 }
3466 
3467 int
3468 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3469 {
3470 	struct mfi_pd_details	*pd;
3471 	struct mfi_pd_list	*pl;
3472 	int			i, found, rv = EINVAL;
3473 	union mfi_mbox		mbox;
3474 
3475 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3476 	    bs->bs_status);
3477 
3478 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3479 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3480 
3481 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3482 		goto done;
3483 
3484 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3485 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3486 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3487 			found = 1;
3488 			break;
3489 		}
3490 
3491 	if (!found)
3492 		goto done;
3493 
3494 	memset(&mbox, 0, sizeof(mbox));
3495 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3496 
3497 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3498 		goto done;
3499 
3500 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3501 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3502 
3503 	switch (bs->bs_status) {
3504 	case BIOC_SSONLINE:
3505 		mbox.b[4] = MFI_PD_ONLINE;
3506 		break;
3507 
3508 	case BIOC_SSOFFLINE:
3509 		mbox.b[4] = MFI_PD_OFFLINE;
3510 		break;
3511 
3512 	case BIOC_SSHOTSPARE:
3513 		mbox.b[4] = MFI_PD_HOTSPARE;
3514 		break;
3515 
3516 	case BIOC_SSREBUILD:
3517 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3518 			if ((rv = mfii_makegood(sc,
3519 			    pl->mpl_address[i].mpa_pd_id)))
3520 				goto done;
3521 
3522 			if ((rv = mfii_makespare(sc,
3523 			    pl->mpl_address[i].mpa_pd_id)))
3524 				goto done;
3525 
3526 			memset(&mbox, 0, sizeof(mbox));
3527 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3528 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3529 			    SCSI_DATA_IN);
3530 			if (rv != 0)
3531 				goto done;
3532 
3533 			/* rebuilding might be started by mfii_makespare() */
3534 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3535 				rv = 0;
3536 				goto done;
3537 			}
3538 
3539 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3540 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3541 		}
3542 		mbox.b[4] = MFI_PD_REBUILD;
3543 		break;
3544 
3545 	default:
3546 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3547 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3548 		goto done;
3549 	}
3550 
3551 
3552 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3553 done:
3554 	free(pd, M_DEVBUF, sizeof *pd);
3555 	free(pl, M_DEVBUF, sizeof *pl);
3556 	return (rv);
3557 }
3558 
3559 int
3560 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3561 {
3562 	uint32_t		opc;
3563 	int			rv = 0;
3564 	struct mfi_pr_properties prop;
3565 	struct mfi_pr_status	status;
3566 	uint32_t		time, exec_freq;
3567 
3568 	switch (bp->bp_opcode) {
3569 	case BIOC_SPSTOP:
3570 	case BIOC_SPSTART:
3571 		if (bp->bp_opcode == BIOC_SPSTART)
3572 			opc = MR_DCMD_PR_START;
3573 		else
3574 			opc = MR_DCMD_PR_STOP;
3575 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3576 			return (EINVAL);
3577 		break;
3578 
3579 	case BIOC_SPMANUAL:
3580 	case BIOC_SPDISABLE:
3581 	case BIOC_SPAUTO:
3582 		/* Get device's time. */
3583 		opc = MR_DCMD_TIME_SECS_GET;
3584 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3585 			return (EINVAL);
3586 
3587 		opc = MR_DCMD_PR_GET_PROPERTIES;
3588 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3589 			return (EINVAL);
3590 
3591 		switch (bp->bp_opcode) {
3592 		case BIOC_SPMANUAL:
3593 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3594 			break;
3595 		case BIOC_SPDISABLE:
3596 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3597 			break;
3598 		case BIOC_SPAUTO:
3599 			if (bp->bp_autoival != 0) {
3600 				if (bp->bp_autoival == -1)
3601 					/* continuously */
3602 					exec_freq = 0xffffffffU;
3603 				else if (bp->bp_autoival > 0)
3604 					exec_freq = bp->bp_autoival;
3605 				else
3606 					return (EINVAL);
3607 				prop.exec_freq = exec_freq;
3608 			}
3609 			if (bp->bp_autonext != 0) {
3610 				if (bp->bp_autonext < 0)
3611 					return (EINVAL);
3612 				else
3613 					prop.next_exec = time + bp->bp_autonext;
3614 			}
3615 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3616 			break;
3617 		}
3618 
3619 		opc = MR_DCMD_PR_SET_PROPERTIES;
3620 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3621 			return (EINVAL);
3622 
3623 		break;
3624 
3625 	case BIOC_GPSTATUS:
3626 		opc = MR_DCMD_PR_GET_PROPERTIES;
3627 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3628 			return (EINVAL);
3629 
3630 		opc = MR_DCMD_PR_GET_STATUS;
3631 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3632 			return (EINVAL);
3633 
3634 		/* Get device's time. */
3635 		opc = MR_DCMD_TIME_SECS_GET;
3636 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3637 			return (EINVAL);
3638 
3639 		switch (prop.op_mode) {
3640 		case MFI_PR_OPMODE_AUTO:
3641 			bp->bp_mode = BIOC_SPMAUTO;
3642 			bp->bp_autoival = prop.exec_freq;
3643 			bp->bp_autonext = prop.next_exec;
3644 			bp->bp_autonow = time;
3645 			break;
3646 		case MFI_PR_OPMODE_MANUAL:
3647 			bp->bp_mode = BIOC_SPMMANUAL;
3648 			break;
3649 		case MFI_PR_OPMODE_DISABLED:
3650 			bp->bp_mode = BIOC_SPMDISABLED;
3651 			break;
3652 		default:
3653 			printf("%s: unknown patrol mode %d\n",
3654 			    DEVNAME(sc), prop.op_mode);
3655 			break;
3656 		}
3657 
3658 		switch (status.state) {
3659 		case MFI_PR_STATE_STOPPED:
3660 			bp->bp_status = BIOC_SPSSTOPPED;
3661 			break;
3662 		case MFI_PR_STATE_READY:
3663 			bp->bp_status = BIOC_SPSREADY;
3664 			break;
3665 		case MFI_PR_STATE_ACTIVE:
3666 			bp->bp_status = BIOC_SPSACTIVE;
3667 			break;
3668 		case MFI_PR_STATE_ABORTED:
3669 			bp->bp_status = BIOC_SPSABORTED;
3670 			break;
3671 		default:
3672 			printf("%s: unknown patrol state %d\n",
3673 			    DEVNAME(sc), status.state);
3674 			break;
3675 		}
3676 
3677 		break;
3678 
3679 	default:
3680 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3681 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3682 		return (EINVAL);
3683 	}
3684 
3685 	return (rv);
3686 }
3687 
3688 int
3689 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3690 {
3691 	struct mfi_conf		*cfg;
3692 	struct mfi_hotspare	*hs;
3693 	struct mfi_pd_details	*pd;
3694 	struct bioc_disk	*sdhs;
3695 	struct bioc_vol		*vdhs;
3696 	struct scsi_inquiry_data *inqbuf;
3697 	char			vend[8+16+4+1], *vendp;
3698 	int			i, rv = EINVAL;
3699 	uint32_t		size;
3700 	union mfi_mbox		mbox;
3701 
3702 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3703 
3704 	if (!bio_hs)
3705 		return (EINVAL);
3706 
3707 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3708 
3709 	/* send single element command to retrieve size for full structure */
3710 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3711 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3712 		goto freeme;
3713 
3714 	size = cfg->mfc_size;
3715 	free(cfg, M_DEVBUF, sizeof *cfg);
3716 
3717 	/* memory for read config */
3718 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3719 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3720 		goto freeme;
3721 
3722 	/* calculate offset to hs structure */
3723 	hs = (struct mfi_hotspare *)(
3724 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3725 	    cfg->mfc_array_size * cfg->mfc_no_array +
3726 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3727 
3728 	if (volid < cfg->mfc_no_ld)
3729 		goto freeme; /* not a hotspare */
3730 
3731 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3732 		goto freeme; /* not a hotspare */
3733 
3734 	/* offset into hotspare structure */
3735 	i = volid - cfg->mfc_no_ld;
3736 
3737 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3738 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3739 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3740 
3741 	/* get pd fields */
3742 	memset(&mbox, 0, sizeof(mbox));
3743 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3744 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3745 	    SCSI_DATA_IN)) {
3746 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3747 		    DEVNAME(sc));
3748 		goto freeme;
3749 	}
3750 
3751 	switch (type) {
3752 	case MFI_MGMT_VD:
3753 		vdhs = bio_hs;
3754 		vdhs->bv_status = BIOC_SVONLINE;
3755 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3756 		vdhs->bv_level = -1; /* hotspare */
3757 		vdhs->bv_nodisk = 1;
3758 		break;
3759 
3760 	case MFI_MGMT_SD:
3761 		sdhs = bio_hs;
3762 		sdhs->bd_status = BIOC_SDHOTSPARE;
3763 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3764 		sdhs->bd_channel = pd->mpd_enc_idx;
3765 		sdhs->bd_target = pd->mpd_enc_slot;
3766 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3767 		vendp = inqbuf->vendor;
3768 		memcpy(vend, vendp, sizeof vend - 1);
3769 		vend[sizeof vend - 1] = '\0';
3770 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3771 		break;
3772 
3773 	default:
3774 		goto freeme;
3775 	}
3776 
3777 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3778 	rv = 0;
3779 freeme:
3780 	free(pd, M_DEVBUF, sizeof *pd);
3781 	free(cfg, M_DEVBUF, 0);
3782 
3783 	return (rv);
3784 }
3785 
3786 #ifndef SMALL_KERNEL
3787 
3788 #define MFI_BBU_SENSORS 4
3789 
3790 void
3791 mfii_bbu(struct mfii_softc *sc)
3792 {
3793 	struct mfi_bbu_status bbu;
3794 	u_int32_t status;
3795 	u_int32_t mask;
3796 	u_int32_t soh_bad;
3797 	int i;
3798 
3799 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3800 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3801 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3802 			sc->sc_bbu[i].value = 0;
3803 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3804 		}
3805 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3806 			sc->sc_bbu_status[i].value = 0;
3807 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3808 		}
3809 		return;
3810 	}
3811 
3812 	switch (bbu.battery_type) {
3813 	case MFI_BBU_TYPE_IBBU:
3814 		mask = MFI_BBU_STATE_BAD_IBBU;
3815 		soh_bad = 0;
3816 		break;
3817 	case MFI_BBU_TYPE_BBU:
3818 		mask = MFI_BBU_STATE_BAD_BBU;
3819 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3820 		break;
3821 
3822 	case MFI_BBU_TYPE_NONE:
3823 	default:
3824 		sc->sc_bbu[0].value = 0;
3825 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3826 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3827 			sc->sc_bbu[i].value = 0;
3828 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3829 		}
3830 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3831 			sc->sc_bbu_status[i].value = 0;
3832 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3833 		}
3834 		return;
3835 	}
3836 
3837 	status = letoh32(bbu.fw_status);
3838 
3839 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3840 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3841 	    SENSOR_S_OK;
3842 
3843 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3844 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3845 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3846 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3847 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3848 
3849 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3850 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3851 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3852 	}
3853 }
3854 
3855 void
3856 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3857 {
3858 	struct ksensor *sensor;
3859 	int target;
3860 
3861 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3862 	sensor = &sc->sc_sensors[target];
3863 
3864 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3865 	case MFI_LD_OFFLINE:
3866 		sensor->value = SENSOR_DRIVE_FAIL;
3867 		sensor->status = SENSOR_S_CRIT;
3868 		break;
3869 
3870 	case MFI_LD_PART_DEGRADED:
3871 	case MFI_LD_DEGRADED:
3872 		sensor->value = SENSOR_DRIVE_PFAIL;
3873 		sensor->status = SENSOR_S_WARN;
3874 		break;
3875 
3876 	case MFI_LD_ONLINE:
3877 		sensor->value = SENSOR_DRIVE_ONLINE;
3878 		sensor->status = SENSOR_S_OK;
3879 		break;
3880 
3881 	default:
3882 		sensor->value = 0; /* unknown */
3883 		sensor->status = SENSOR_S_UNKNOWN;
3884 		break;
3885 	}
3886 }
3887 
3888 void
3889 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3890 {
3891 	struct device		*dev;
3892 	struct scsi_link	*link;
3893 	struct ksensor		*sensor;
3894 	int			target;
3895 
3896 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3897 	sensor = &sc->sc_sensors[target];
3898 
3899 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3900 	if (link == NULL) {
3901 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3902 	} else {
3903 		dev = link->device_softc;
3904 		if (dev != NULL)
3905 			strlcpy(sensor->desc, dev->dv_xname,
3906 			    sizeof(sensor->desc));
3907 	}
3908 	sensor->type = SENSOR_DRIVE;
3909 	mfii_refresh_ld_sensor(sc, ld);
3910 }
3911 
3912 int
3913 mfii_create_sensors(struct mfii_softc *sc)
3914 {
3915 	int			i, target;
3916 
3917 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3918 	    sizeof(sc->sc_sensordev.xname));
3919 
3920 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3921 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3922 		    M_DEVBUF, M_WAITOK | M_ZERO);
3923 
3924 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3925 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3926 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3927 		    sizeof(sc->sc_bbu[0].desc));
3928 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3929 
3930 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3931 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3932 		sc->sc_bbu[2].type = SENSOR_AMPS;
3933 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3934 		sc->sc_bbu[3].type = SENSOR_TEMP;
3935 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3936 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3937 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3938 			    sizeof(sc->sc_bbu[i].desc));
3939 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3940 		}
3941 
3942 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3943 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3944 
3945 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3946 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3947 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3948 			strlcpy(sc->sc_bbu_status[i].desc,
3949 			    mfi_bbu_indicators[i],
3950 			    sizeof(sc->sc_bbu_status[i].desc));
3951 
3952 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3953 		}
3954 	}
3955 
3956 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3957 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3958 	if (sc->sc_sensors == NULL)
3959 		return (1);
3960 
3961 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3962 		mfii_init_ld_sensor(sc, i);
3963 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3964 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3965 	}
3966 
3967 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3968 		goto bad;
3969 
3970 	sensordev_install(&sc->sc_sensordev);
3971 
3972 	return (0);
3973 
3974 bad:
3975 	free(sc->sc_sensors, M_DEVBUF,
3976 	    MFI_MAX_LD * sizeof(struct ksensor));
3977 
3978 	return (1);
3979 }
3980 
3981 void
3982 mfii_refresh_sensors(void *arg)
3983 {
3984 	struct mfii_softc	*sc = arg;
3985 	int			i;
3986 
3987 	rw_enter_write(&sc->sc_lock);
3988 	if (sc->sc_bbu != NULL)
3989 		mfii_bbu(sc);
3990 
3991 	mfii_bio_getitall(sc);
3992 	rw_exit_write(&sc->sc_lock);
3993 
3994 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3995 		mfii_refresh_ld_sensor(sc, i);
3996 }
3997 #endif /* SMALL_KERNEL */
3998 #endif /* NBIO > 0 */
3999