xref: /openbsd-src/sys/dev/pci/mfii.c (revision d5abdd01d7a5f24fb6f9b0aab446ef59a9e9067a)
1 /* $OpenBSD: mfii.c,v 1.88 2023/05/25 19:35:58 kurt Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsibus_softc	*pd_scsibus;
245 	struct mfii_pd_dev_handles *pd_dev_handles;
246 	uint8_t			pd_timeout;
247 };
248 
249 struct mfii_iop {
250 	int bar;
251 	int num_sge_loc;
252 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
253 #define MFII_IOP_NUM_SGE_LOC_35		1
254 	u_int16_t ldio_ctx_reg_lock_flags;
255 	u_int8_t ldio_req_type;
256 	u_int8_t ldio_ctx_type_nseg;
257 	u_int8_t sge_flag_chain;
258 	u_int8_t sge_flag_eol;
259 };
260 
261 struct mfii_softc {
262 	struct device		sc_dev;
263 	const struct mfii_iop	*sc_iop;
264 
265 	pci_chipset_tag_t	sc_pc;
266 	pcitag_t		sc_tag;
267 
268 	bus_space_tag_t		sc_iot;
269 	bus_space_handle_t	sc_ioh;
270 	bus_size_t		sc_ios;
271 	bus_dma_tag_t		sc_dmat;
272 
273 	void			*sc_ih;
274 
275 	struct mutex		sc_ccb_mtx;
276 	struct mutex		sc_post_mtx;
277 
278 	u_int			sc_max_fw_cmds;
279 	u_int			sc_max_cmds;
280 	u_int			sc_max_sgl;
281 
282 	u_int			sc_reply_postq_depth;
283 	u_int			sc_reply_postq_index;
284 	struct mutex		sc_reply_postq_mtx;
285 	struct mfii_dmamem	*sc_reply_postq;
286 
287 	struct mfii_dmamem	*sc_requests;
288 	struct mfii_dmamem	*sc_mfi;
289 	struct mfii_dmamem	*sc_sense;
290 	struct mfii_dmamem	*sc_sgl;
291 
292 	struct mfii_ccb		*sc_ccb;
293 	struct mfii_ccb_list	sc_ccb_freeq;
294 
295 	struct mfii_ccb		*sc_aen_ccb;
296 	struct task		sc_aen_task;
297 
298 	struct mutex		sc_abort_mtx;
299 	struct mfii_ccb_list	sc_abort_list;
300 	struct task		sc_abort_task;
301 
302 	struct scsibus_softc	*sc_scsibus;
303 	struct mfii_pd_softc	*sc_pd;
304 	struct scsi_iopool	sc_iopool;
305 
306 	/* save some useful information for logical drives that is missing
307 	 * in sc_ld_list
308 	 */
309 	struct {
310 		char		ld_dev[16];	/* device name sd? */
311 	}			sc_ld[MFI_MAX_LD];
312 	int			sc_target_lds[MFI_MAX_LD];
313 
314 	/* scsi ioctl from sd device */
315 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
316 
317 	/* bio */
318 	struct mfi_conf		*sc_cfg;
319 	struct mfi_ctrl_info	sc_info;
320 	struct mfi_ld_list	sc_ld_list;
321 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
322 	int			sc_no_pd; /* used physical disks */
323 	int			sc_ld_sz; /* sizeof sc_ld_details */
324 
325 	/* mgmt lock */
326 	struct rwlock		sc_lock;
327 
328 	/* sensors */
329 	struct ksensordev	sc_sensordev;
330 	struct ksensor		*sc_bbu;
331 	struct ksensor		*sc_bbu_status;
332 	struct ksensor		*sc_sensors;
333 };
334 
335 #ifdef MFII_DEBUG
336 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
337 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
338 #define	MFII_D_CMD		0x0001
339 #define	MFII_D_INTR		0x0002
340 #define	MFII_D_MISC		0x0004
341 #define	MFII_D_DMA		0x0008
342 #define	MFII_D_IOCTL		0x0010
343 #define	MFII_D_RW		0x0020
344 #define	MFII_D_MEM		0x0040
345 #define	MFII_D_CCB		0x0080
346 uint32_t	mfii_debug = 0
347 /*		    | MFII_D_CMD */
348 /*		    | MFII_D_INTR */
349 		    | MFII_D_MISC
350 /*		    | MFII_D_DMA */
351 /*		    | MFII_D_IOCTL */
352 /*		    | MFII_D_RW */
353 /*		    | MFII_D_MEM */
354 /*		    | MFII_D_CCB */
355 		;
356 #else
357 #define DPRINTF(x...)
358 #define DNPRINTF(n,x...)
359 #endif
360 
361 int		mfii_match(struct device *, void *, void *);
362 void		mfii_attach(struct device *, struct device *, void *);
363 int		mfii_detach(struct device *, int);
364 int		mfii_activate(struct device *, int);
365 
366 const struct cfattach mfii_ca = {
367 	sizeof(struct mfii_softc),
368 	mfii_match,
369 	mfii_attach,
370 	mfii_detach,
371 	mfii_activate,
372 };
373 
374 struct cfdriver mfii_cd = {
375 	NULL,
376 	"mfii",
377 	DV_DULL
378 };
379 
380 void		mfii_scsi_cmd(struct scsi_xfer *);
381 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
382 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
383 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
384 
385 const struct scsi_adapter mfii_switch = {
386 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
387 };
388 
389 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
390 int		mfii_pd_scsi_probe(struct scsi_link *);
391 
392 const struct scsi_adapter mfii_pd_switch = {
393 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
394 };
395 
396 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
397 
398 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
399 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
400 
401 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
402 void			mfii_dmamem_free(struct mfii_softc *,
403 			    struct mfii_dmamem *);
404 
405 void *			mfii_get_ccb(void *);
406 void			mfii_put_ccb(void *, void *);
407 int			mfii_init_ccb(struct mfii_softc *);
408 void			mfii_scrub_ccb(struct mfii_ccb *);
409 
410 int			mfii_reset_hard(struct mfii_softc *);
411 int			mfii_transition_firmware(struct mfii_softc *);
412 int			mfii_initialise_firmware(struct mfii_softc *);
413 int			mfii_get_info(struct mfii_softc *);
414 int			mfii_syspd(struct mfii_softc *);
415 
416 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
417 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
418 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
419 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
420 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
421 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
422 int			mfii_my_intr(struct mfii_softc *);
423 int			mfii_intr(void *);
424 void			mfii_postq(struct mfii_softc *);
425 
426 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
427 			    void *, int);
428 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
429 			    void *, int);
430 
431 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
432 
433 int			mfii_mgmt(struct mfii_softc *, uint32_t,
434 			    const union mfi_mbox *, void *, size_t, int);
435 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
436 			    uint32_t, const union mfi_mbox *, void *, size_t,
437 			    int);
438 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
439 
440 int			mfii_scsi_cmd_io(struct mfii_softc *,
441 			    struct scsi_xfer *);
442 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
443 			    struct scsi_xfer *);
444 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
445 			    struct scsi_xfer *);
446 void			mfii_scsi_cmd_tmo(void *);
447 
448 int			mfii_dev_handles_update(struct mfii_softc *sc);
449 void			mfii_dev_handles_smr(void *pd_arg);
450 
451 void			mfii_abort_task(void *);
452 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
453 			    uint16_t, uint16_t, uint8_t, uint32_t);
454 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
455 			    struct mfii_ccb *);
456 
457 int			mfii_aen_register(struct mfii_softc *);
458 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
459 			    struct mfii_dmamem *, uint32_t);
460 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
461 void			mfii_aen(void *);
462 void			mfii_aen_unregister(struct mfii_softc *);
463 
464 void			mfii_aen_pd_insert(struct mfii_softc *,
465 			    const struct mfi_evtarg_pd_address *);
466 void			mfii_aen_pd_remove(struct mfii_softc *,
467 			    const struct mfi_evtarg_pd_address *);
468 void			mfii_aen_pd_state_change(struct mfii_softc *,
469 			    const struct mfi_evtarg_pd_state *);
470 void			mfii_aen_ld_update(struct mfii_softc *);
471 
472 #if NBIO > 0
473 int		mfii_ioctl(struct device *, u_long, caddr_t);
474 int		mfii_bio_getitall(struct mfii_softc *);
475 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
476 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
477 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
478 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
479 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
480 int		mfii_ioctl_setstate(struct mfii_softc *,
481 		    struct bioc_setstate *);
482 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
483 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
484 
485 #ifndef SMALL_KERNEL
486 static const char *mfi_bbu_indicators[] = {
487 	"pack missing",
488 	"voltage low",
489 	"temp high",
490 	"charge active",
491 	"discharge active",
492 	"learn cycle req'd",
493 	"learn cycle active",
494 	"learn cycle failed",
495 	"learn cycle timeout",
496 	"I2C errors",
497 	"replace pack",
498 	"low capacity",
499 	"periodic learn req'd"
500 };
501 
502 void		mfii_init_ld_sensor(struct mfii_softc *, int);
503 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
504 int		mfii_create_sensors(struct mfii_softc *);
505 void		mfii_refresh_sensors(void *);
506 void		mfii_bbu(struct mfii_softc *);
507 #endif /* SMALL_KERNEL */
508 #endif /* NBIO > 0 */
509 
510 /*
511  * mfii boards support asynchronous (and non-polled) completion of
512  * dcmds by proxying them through a passthru mpii command that points
513  * at a dcmd frame. since the passthru command is submitted like
514  * the scsi commands using an SMID in the request descriptor,
515  * ccb_request memory * must contain the passthru command because
516  * that is what the SMID refers to. this means ccb_request cannot
517  * contain the dcmd. rather than allocating separate dma memory to
518  * hold the dcmd, we reuse the sense memory buffer for it.
519  */
520 
521 void			mfii_dcmd_start(struct mfii_softc *,
522 			    struct mfii_ccb *);
523 
524 static inline void
525 mfii_dcmd_scrub(struct mfii_ccb *ccb)
526 {
527 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
528 }
529 
530 static inline struct mfi_dcmd_frame *
531 mfii_dcmd_frame(struct mfii_ccb *ccb)
532 {
533 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
534 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
535 }
536 
537 static inline void
538 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
539 {
540 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
541 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
542 }
543 
544 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
545 
546 const struct mfii_iop mfii_iop_thunderbolt = {
547 	MFII_BAR,
548 	MFII_IOP_NUM_SGE_LOC_ORIG,
549 	0,
550 	MFII_REQ_TYPE_LDIO,
551 	0,
552 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
553 	0
554 };
555 
556 /*
557  * a lot of these values depend on us not implementing fastpath yet.
558  */
559 const struct mfii_iop mfii_iop_25 = {
560 	MFII_BAR,
561 	MFII_IOP_NUM_SGE_LOC_ORIG,
562 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
563 	MFII_REQ_TYPE_NO_LOCK,
564 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
565 	MFII_SGE_CHAIN_ELEMENT,
566 	MFII_SGE_END_OF_LIST
567 };
568 
569 const struct mfii_iop mfii_iop_35 = {
570 	MFII_BAR_35,
571 	MFII_IOP_NUM_SGE_LOC_35,
572 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
573 	MFII_REQ_TYPE_NO_LOCK,
574 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
575 	MFII_SGE_CHAIN_ELEMENT,
576 	MFII_SGE_END_OF_LIST
577 };
578 
579 struct mfii_device {
580 	pcireg_t		mpd_vendor;
581 	pcireg_t		mpd_product;
582 	const struct mfii_iop	*mpd_iop;
583 };
584 
585 const struct mfii_device mfii_devices[] = {
586 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
587 	    &mfii_iop_thunderbolt },
588 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
589 	    &mfii_iop_25 },
590 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
591 	    &mfii_iop_25 },
592 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
593 	    &mfii_iop_35 },
594 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
595 	    &mfii_iop_35 },
596 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
597 	    &mfii_iop_35 },
598 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
599 	    &mfii_iop_35 },
600 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
601 	    &mfii_iop_35 },
602 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
603 	    &mfii_iop_35 }
604 };
605 
606 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
607 
608 const struct mfii_iop *
609 mfii_find_iop(struct pci_attach_args *pa)
610 {
611 	const struct mfii_device *mpd;
612 	int i;
613 
614 	for (i = 0; i < nitems(mfii_devices); i++) {
615 		mpd = &mfii_devices[i];
616 
617 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
618 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
619 			return (mpd->mpd_iop);
620 	}
621 
622 	return (NULL);
623 }
624 
625 int
626 mfii_match(struct device *parent, void *match, void *aux)
627 {
628 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
629 }
630 
631 void
632 mfii_attach(struct device *parent, struct device *self, void *aux)
633 {
634 	struct mfii_softc *sc = (struct mfii_softc *)self;
635 	struct pci_attach_args *pa = aux;
636 	pcireg_t memtype;
637 	pci_intr_handle_t ih;
638 	struct scsibus_attach_args saa;
639 	u_int32_t status, scpad2, scpad3;
640 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
641 
642 	/* init sc */
643 	sc->sc_iop = mfii_find_iop(aux);
644 	sc->sc_dmat = pa->pa_dmat;
645 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
646 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
647 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
648 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
649 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
650 
651 	rw_init(&sc->sc_lock, "mfii_lock");
652 
653 	sc->sc_aen_ccb = NULL;
654 	task_set(&sc->sc_aen_task, mfii_aen, sc);
655 
656 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
657 	SIMPLEQ_INIT(&sc->sc_abort_list);
658 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
659 
660 	/* wire up the bus shizz */
661 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
662 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
663 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
664 		printf(": unable to map registers\n");
665 		return;
666 	}
667 
668 	/* disable interrupts */
669 	mfii_write(sc, MFI_OMSK, 0xffffffff);
670 
671 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
672 		printf(": unable to map interrupt\n");
673 		goto pci_unmap;
674 	}
675 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
676 
677 	/* lets get started */
678 	if (mfii_transition_firmware(sc))
679 		goto pci_unmap;
680 
681 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
682 	scpad3 = mfii_read(sc, MFII_OSP3);
683 	status = mfii_fw_state(sc);
684 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
685 	if (sc->sc_max_fw_cmds == 0)
686 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
687 	/*
688 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
689 	 * exceed FW supplied max_fw_cmds.
690 	 */
691 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
692 
693 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
694 	scpad2 = mfii_read(sc, MFII_OSP2);
695 	chain_frame_sz =
696 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
697 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
698 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
699 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
700 
701 	nsge_in_io = (MFII_REQUEST_SIZE -
702 		sizeof(struct mpii_msg_scsi_io) -
703 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
704 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
705 
706 	/* round down to nearest power of two */
707 	sc->sc_max_sgl = 1;
708 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
709 		sc->sc_max_sgl <<= 1;
710 
711 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
712 	    DEVNAME(sc), status, scpad2, scpad3);
713 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
714 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
715 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
716 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
717 	    sc->sc_max_sgl);
718 
719 	/* sense memory */
720 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
721 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
722 	if (sc->sc_sense == NULL) {
723 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
724 		goto pci_unmap;
725 	}
726 
727 	/* reply post queue */
728 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
729 
730 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
731 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
732 	if (sc->sc_reply_postq == NULL)
733 		goto free_sense;
734 
735 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
736 	    MFII_DMA_LEN(sc->sc_reply_postq));
737 
738 	/* MPII request frame array */
739 	sc->sc_requests = mfii_dmamem_alloc(sc,
740 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
741 	if (sc->sc_requests == NULL)
742 		goto free_reply_postq;
743 
744 	/* MFI command frame array */
745 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
746 	if (sc->sc_mfi == NULL)
747 		goto free_requests;
748 
749 	/* MPII SGL array */
750 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
751 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
752 	if (sc->sc_sgl == NULL)
753 		goto free_mfi;
754 
755 	if (mfii_init_ccb(sc) != 0) {
756 		printf("%s: could not init ccb list\n", DEVNAME(sc));
757 		goto free_sgl;
758 	}
759 
760 	/* kickstart firmware with all addresses and pointers */
761 	if (mfii_initialise_firmware(sc) != 0) {
762 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
763 		goto free_sgl;
764 	}
765 
766 	if (mfii_get_info(sc) != 0) {
767 		printf("%s: could not retrieve controller information\n",
768 		    DEVNAME(sc));
769 		goto free_sgl;
770 	}
771 
772 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
773 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
774 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
775 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
776 	printf("\n");
777 
778 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
779 	    mfii_intr, sc, DEVNAME(sc));
780 	if (sc->sc_ih == NULL)
781 		goto free_sgl;
782 
783 	saa.saa_adapter_softc = sc;
784 	saa.saa_adapter = &mfii_switch;
785 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
786 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
787 	saa.saa_luns = 8;
788 	saa.saa_openings = sc->sc_max_cmds;
789 	saa.saa_pool = &sc->sc_iopool;
790 	saa.saa_quirks = saa.saa_flags = 0;
791 	saa.saa_wwpn = saa.saa_wwnn = 0;
792 
793 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
794 	    scsiprint);
795 
796 	mfii_syspd(sc);
797 
798 	if (mfii_aen_register(sc) != 0) {
799 		/* error printed by mfii_aen_register */
800 		goto intr_disestablish;
801 	}
802 
803 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
804 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
805 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
806 		goto intr_disestablish;
807 	}
808 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
809 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
810 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
811 		sc->sc_target_lds[target] = i;
812 	}
813 
814 	/* enable interrupts */
815 	mfii_write(sc, MFI_OSTS, 0xffffffff);
816 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
817 
818 #if NBIO > 0
819 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
820 		panic("%s: controller registration failed", DEVNAME(sc));
821 	else
822 		sc->sc_ioctl = mfii_ioctl;
823 
824 #ifndef SMALL_KERNEL
825 	if (mfii_create_sensors(sc) != 0)
826 		printf("%s: unable to create sensors\n", DEVNAME(sc));
827 #endif
828 #endif /* NBIO > 0 */
829 
830 	return;
831 intr_disestablish:
832 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
833 free_sgl:
834 	mfii_dmamem_free(sc, sc->sc_sgl);
835 free_mfi:
836 	mfii_dmamem_free(sc, sc->sc_mfi);
837 free_requests:
838 	mfii_dmamem_free(sc, sc->sc_requests);
839 free_reply_postq:
840 	mfii_dmamem_free(sc, sc->sc_reply_postq);
841 free_sense:
842 	mfii_dmamem_free(sc, sc->sc_sense);
843 pci_unmap:
844 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
845 }
846 
847 static inline uint16_t
848 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
849 {
850 	struct mfii_pd_dev_handles *handles;
851 	uint16_t handle;
852 
853 	smr_read_enter();
854 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
855 	handle = handles->pd_handles[target];
856 	smr_read_leave();
857 
858 	return (handle);
859 }
860 
861 void
862 mfii_dev_handles_smr(void *pd_arg)
863 {
864 	struct mfii_pd_dev_handles *handles = pd_arg;
865 
866 	free(handles, M_DEVBUF, sizeof(*handles));
867 }
868 
869 int
870 mfii_dev_handles_update(struct mfii_softc *sc)
871 {
872 	struct mfii_ld_map *lm;
873 	struct mfii_pd_dev_handles *handles, *old_handles;
874 	int i;
875 	int rv = 0;
876 
877 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
878 
879 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
880 	    SCSI_DATA_IN|SCSI_NOSLEEP);
881 
882 	if (rv != 0) {
883 		rv = EIO;
884 		goto free_lm;
885 	}
886 
887 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
888 	smr_init(&handles->pd_smr);
889 	for (i = 0; i < MFI_MAX_PD; i++)
890 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
891 
892 	/* commit the updated info */
893 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
894 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
895 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
896 
897 	if (old_handles != NULL)
898 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
899 
900 free_lm:
901 	free(lm, M_TEMP, sizeof(*lm));
902 
903 	return (rv);
904 }
905 
906 int
907 mfii_syspd(struct mfii_softc *sc)
908 {
909 	struct scsibus_attach_args saa;
910 
911 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
912 	if (sc->sc_pd == NULL)
913 		return (1);
914 
915 	if (mfii_dev_handles_update(sc) != 0)
916 		goto free_pdsc;
917 
918 	saa.saa_adapter =  &mfii_pd_switch;
919 	saa.saa_adapter_softc = sc;
920 	saa.saa_adapter_buswidth = MFI_MAX_PD;
921 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
922 	saa.saa_luns = 8;
923 	saa.saa_openings = sc->sc_max_cmds - 1;
924 	saa.saa_pool = &sc->sc_iopool;
925 	saa.saa_quirks = saa.saa_flags = 0;
926 	saa.saa_wwpn = saa.saa_wwnn = 0;
927 
928 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
929 	    config_found(&sc->sc_dev, &saa, scsiprint);
930 
931 	return (0);
932 
933 free_pdsc:
934 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
935 	return (1);
936 }
937 
938 int
939 mfii_detach(struct device *self, int flags)
940 {
941 	struct mfii_softc *sc = (struct mfii_softc *)self;
942 
943 	if (sc->sc_ih == NULL)
944 		return (0);
945 
946 #ifndef SMALL_KERNEL
947 	if (sc->sc_sensors) {
948 		sensordev_deinstall(&sc->sc_sensordev);
949 		free(sc->sc_sensors, M_DEVBUF,
950 		    MFI_MAX_LD * sizeof(struct ksensor));
951 	}
952 
953 	if (sc->sc_bbu) {
954 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
955 	}
956 
957 	if (sc->sc_bbu_status) {
958 		free(sc->sc_bbu_status, M_DEVBUF,
959 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
960 	}
961 #endif /* SMALL_KERNEL */
962 
963 	mfii_aen_unregister(sc);
964 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
965 	mfii_dmamem_free(sc, sc->sc_sgl);
966 	mfii_dmamem_free(sc, sc->sc_mfi);
967 	mfii_dmamem_free(sc, sc->sc_requests);
968 	mfii_dmamem_free(sc, sc->sc_reply_postq);
969 	mfii_dmamem_free(sc, sc->sc_sense);
970 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
971 
972 	return (0);
973 }
974 
975 static void
976 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
977 {
978 #if 0
979 	union mfi_mbox mbox = {
980 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
981 	};
982 	int rv;
983 
984 	mfii_scrub_ccb(ccb);
985 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
986 	    NULL, 0, SCSI_NOSLEEP);
987 	if (rv != 0) {
988 		printf("%s: unable to flush cache\n", DEVNAME(sc));
989 		return;
990 	}
991 #endif
992 }
993 
994 static void
995 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
996 {
997 #if 0
998 	int rv;
999 
1000 	mfii_scrub_ccb(ccb);
1001 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1002 	    NULL, 0, SCSI_POLL);
1003 	if (rv != 0) {
1004 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1005 		return;
1006 	}
1007 #endif
1008 }
1009 
1010 static void
1011 mfii_powerdown(struct mfii_softc *sc)
1012 {
1013 	struct mfii_ccb *ccb;
1014 
1015 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1016 	if (ccb == NULL) {
1017 		printf("%s: unable to allocate ccb for shutdown\n",
1018 		    DEVNAME(sc));
1019 		return;
1020 	}
1021 
1022 	mfii_flush_cache(sc, ccb);
1023 	mfii_shutdown(sc, ccb);
1024 	scsi_io_put(&sc->sc_iopool, ccb);
1025 }
1026 
1027 int
1028 mfii_activate(struct device *self, int act)
1029 {
1030 	struct mfii_softc *sc = (struct mfii_softc *)self;
1031 	int rv;
1032 
1033 	switch (act) {
1034 	case DVACT_POWERDOWN:
1035 		rv = config_activate_children(&sc->sc_dev, act);
1036 		mfii_powerdown(sc);
1037 		break;
1038 	default:
1039 		rv = config_activate_children(&sc->sc_dev, act);
1040 		break;
1041 	}
1042 
1043 	return (rv);
1044 }
1045 
1046 u_int32_t
1047 mfii_read(struct mfii_softc *sc, bus_size_t r)
1048 {
1049 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1050 	    BUS_SPACE_BARRIER_READ);
1051 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1052 }
1053 
1054 void
1055 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1056 {
1057 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1058 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1059 	    BUS_SPACE_BARRIER_WRITE);
1060 }
1061 
1062 struct mfii_dmamem *
1063 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1064 {
1065 	struct mfii_dmamem *m;
1066 	int nsegs;
1067 
1068 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1069 	if (m == NULL)
1070 		return (NULL);
1071 
1072 	m->mdm_size = size;
1073 
1074 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1075 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1076 		goto mdmfree;
1077 
1078 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1079 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1080 		goto destroy;
1081 
1082 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1083 	    BUS_DMA_NOWAIT) != 0)
1084 		goto free;
1085 
1086 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1087 	    BUS_DMA_NOWAIT) != 0)
1088 		goto unmap;
1089 
1090 	return (m);
1091 
1092 unmap:
1093 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1094 free:
1095 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1096 destroy:
1097 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1098 mdmfree:
1099 	free(m, M_DEVBUF, sizeof *m);
1100 
1101 	return (NULL);
1102 }
1103 
1104 void
1105 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1106 {
1107 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1108 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1109 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1110 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1111 	free(m, M_DEVBUF, sizeof *m);
1112 }
1113 
1114 void
1115 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1116 {
1117 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1118 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1119 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1120 
1121 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1122 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1123 	io->chain_offset = io->sgl_offset0 / 4;
1124 
1125 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1126 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1127 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1128 
1129 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1130 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1131 
1132 	mfii_start(sc, ccb);
1133 }
1134 
1135 int
1136 mfii_aen_register(struct mfii_softc *sc)
1137 {
1138 	struct mfi_evt_log_info mel;
1139 	struct mfii_ccb *ccb;
1140 	struct mfii_dmamem *mdm;
1141 	int rv;
1142 
1143 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1144 	if (ccb == NULL) {
1145 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1146 		return (ENOMEM);
1147 	}
1148 
1149 	memset(&mel, 0, sizeof(mel));
1150 	mfii_scrub_ccb(ccb);
1151 
1152 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1153 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1154 	if (rv != 0) {
1155 		scsi_io_put(&sc->sc_iopool, ccb);
1156 		printf("%s: unable to get event info\n", DEVNAME(sc));
1157 		return (EIO);
1158 	}
1159 
1160 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1161 	if (mdm == NULL) {
1162 		scsi_io_put(&sc->sc_iopool, ccb);
1163 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1164 		return (ENOMEM);
1165 	}
1166 
1167 	/* replay all the events from boot */
1168 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1169 
1170 	return (0);
1171 }
1172 
1173 void
1174 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1175     struct mfii_dmamem *mdm, uint32_t seq)
1176 {
1177 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1178 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1179 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1180 	union mfi_evt_class_locale mec;
1181 
1182 	mfii_scrub_ccb(ccb);
1183 	mfii_dcmd_scrub(ccb);
1184 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1185 
1186 	ccb->ccb_cookie = mdm;
1187 	ccb->ccb_done = mfii_aen_done;
1188 	sc->sc_aen_ccb = ccb;
1189 
1190 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1191 	mec.mec_members.reserved = 0;
1192 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1193 
1194 	hdr->mfh_cmd = MFI_CMD_DCMD;
1195 	hdr->mfh_sg_count = 1;
1196 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1197 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1198 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1199 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1200 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1201 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1202 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1203 
1204 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1205 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1206 
1207 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1208 	mfii_dcmd_start(sc, ccb);
1209 }
1210 
1211 void
1212 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1213 {
1214 	KASSERT(sc->sc_aen_ccb == ccb);
1215 
1216 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1217 	task_add(systq, &sc->sc_aen_task);
1218 }
1219 
1220 void
1221 mfii_aen(void *arg)
1222 {
1223 	struct mfii_softc *sc = arg;
1224 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1225 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1226 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1227 	uint32_t code;
1228 
1229 	mfii_dcmd_sync(sc, ccb,
1230 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1231 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1232 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1233 
1234 	code = lemtoh32(&med->med_code);
1235 
1236 #if 0
1237 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1238 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1239 #endif
1240 
1241 	switch (code) {
1242 	case MFI_EVT_PD_INSERTED_EXT:
1243 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1244 			break;
1245 
1246 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1247 		break;
1248  	case MFI_EVT_PD_REMOVED_EXT:
1249 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1250 			break;
1251 
1252 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1253 		break;
1254 
1255 	case MFI_EVT_PD_STATE_CHANGE:
1256 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1257 			break;
1258 
1259 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1260 		break;
1261 
1262 	case MFI_EVT_LD_CREATED:
1263 	case MFI_EVT_LD_DELETED:
1264 		mfii_aen_ld_update(sc);
1265 		break;
1266 
1267 	default:
1268 		break;
1269 	}
1270 
1271 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1272 }
1273 
1274 void
1275 mfii_aen_pd_insert(struct mfii_softc *sc,
1276     const struct mfi_evtarg_pd_address *pd)
1277 {
1278 #if 0
1279 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1280 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1281 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1282 	    pd->scsi_dev_type);
1283 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1284 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1285 	    lemtoh64(&pd->sas_addr[1]));
1286 #endif
1287 
1288 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1289 		return;
1290 
1291 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1292 }
1293 
1294 void
1295 mfii_aen_pd_remove(struct mfii_softc *sc,
1296     const struct mfi_evtarg_pd_address *pd)
1297 {
1298 #if 0
1299 	printf("%s: pd removed ext\n", DEVNAME(sc));
1300 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1301 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1302 	    pd->scsi_dev_type);
1303 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1304 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1305 	    lemtoh64(&pd->sas_addr[1]));
1306 #endif
1307 	uint16_t target = lemtoh16(&pd->device_id);
1308 
1309 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1310 
1311 	/* the firmware will abort outstanding commands for us */
1312 
1313 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1314 }
1315 
1316 void
1317 mfii_aen_pd_state_change(struct mfii_softc *sc,
1318     const struct mfi_evtarg_pd_state *state)
1319 {
1320 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1321 
1322 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1323 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1324 		/* it's been pulled or configured for raid */
1325 
1326 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1327 		    DVACT_DEACTIVATE);
1328 		/* outstanding commands will simply complete or get aborted */
1329 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1330 		    DETACH_FORCE);
1331 
1332 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1333 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1334 		/* the firmware is handing the disk over */
1335 
1336 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1337 	}
1338 }
1339 
1340 void
1341 mfii_aen_ld_update(struct mfii_softc *sc)
1342 {
1343 	int i, state, target, old, nld;
1344 	int newlds[MFI_MAX_LD];
1345 
1346 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1347 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1348 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1349 		    DEVNAME(sc));
1350 		return;
1351 	}
1352 
1353 	memset(newlds, -1, sizeof(newlds));
1354 
1355 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1356 		state = sc->sc_ld_list.mll_list[i].mll_state;
1357 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1358 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1359 		    DEVNAME(sc), target, state);
1360 		newlds[target] = i;
1361 	}
1362 
1363 	for (i = 0; i < MFI_MAX_LD; i++) {
1364 		old = sc->sc_target_lds[i];
1365 		nld = newlds[i];
1366 
1367 		if (old == -1 && nld != -1) {
1368 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1369 			    DEVNAME(sc), i);
1370 
1371 			scsi_probe_target(sc->sc_scsibus, i);
1372 
1373 #ifndef SMALL_KERNEL
1374 			mfii_init_ld_sensor(sc, nld);
1375 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1376 #endif
1377 		} else if (nld == -1 && old != -1) {
1378 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1379 			    DEVNAME(sc), i);
1380 
1381 			scsi_activate(sc->sc_scsibus, i, -1,
1382 			    DVACT_DEACTIVATE);
1383 			scsi_detach_target(sc->sc_scsibus, i,
1384 			    DETACH_FORCE);
1385 #ifndef SMALL_KERNEL
1386 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1387 #endif
1388 		}
1389 	}
1390 
1391 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1392 }
1393 
1394 void
1395 mfii_aen_unregister(struct mfii_softc *sc)
1396 {
1397 	/* XXX */
1398 }
1399 
1400 int
1401 mfii_reset_hard(struct mfii_softc *sc)
1402 {
1403 	u_int16_t		i;
1404 
1405 	mfii_write(sc, MFI_OSTS, 0);
1406 
1407 	/* enable diagnostic register */
1408 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1409 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1410 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1411 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1412 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1413 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1414 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1415 
1416 	delay(100);
1417 
1418 	if ((mfii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1419 		printf("%s: failed to enable diagnostic read/write\n",
1420 		    DEVNAME(sc));
1421 		return(1);
1422 	}
1423 
1424 	/* reset ioc */
1425 	mfii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1426 
1427 	/* 240 milliseconds */
1428 	delay(240000);
1429 
1430 	for (i = 0; i < 30000; i++) {
1431 		if ((mfii_read(sc, MPII_HOSTDIAG) &
1432 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1433 			break;
1434 		delay(10000);
1435 	}
1436 	if (i >= 30000) {
1437 		printf("%s: failed to reset device\n", DEVNAME(sc));
1438 		return (1);
1439 	}
1440 
1441 	/* disable diagnostic register */
1442 	mfii_write(sc, MPII_WRITESEQ, 0xff);
1443 
1444 	return(0);
1445 }
1446 
1447 int
1448 mfii_transition_firmware(struct mfii_softc *sc)
1449 {
1450 	int32_t			fw_state, cur_state;
1451 	int			max_wait, i, reset_on_fault = 1;
1452 
1453 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1454 
1455 	while (fw_state != MFI_STATE_READY) {
1456 		cur_state = fw_state;
1457 		switch (fw_state) {
1458 		case MFI_STATE_FAULT:
1459 			if (!reset_on_fault) {
1460 				printf("%s: firmware fault\n", DEVNAME(sc));
1461 				return (1);
1462 			}
1463 			printf("%s: firmware fault; attempting full device "
1464 			    "reset, this can take some time\n", DEVNAME(sc));
1465 			if (mfii_reset_hard(sc))
1466 				return (1);
1467 			max_wait = 20;
1468 			reset_on_fault = 0;
1469 			break;
1470 		case MFI_STATE_WAIT_HANDSHAKE:
1471 			mfii_write(sc, MFI_SKINNY_IDB,
1472 			    MFI_INIT_CLEAR_HANDSHAKE);
1473 			max_wait = 2;
1474 			break;
1475 		case MFI_STATE_OPERATIONAL:
1476 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1477 			max_wait = 10;
1478 			break;
1479 		case MFI_STATE_BB_INIT:
1480 			max_wait = 20;
1481 			break;
1482 		case MFI_STATE_UNDEFINED:
1483 		case MFI_STATE_FW_INIT:
1484 		case MFI_STATE_FW_INIT_2:
1485 		case MFI_STATE_DEVICE_SCAN:
1486 		case MFI_STATE_FLUSH_CACHE:
1487 			max_wait = 40;
1488 			break;
1489 		case MFI_STATE_BOOT_MESSAGE_PENDING:
1490 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
1491 			max_wait = 10;
1492 			break;
1493 		default:
1494 			printf("%s: unknown firmware state %#x\n",
1495 			    DEVNAME(sc), fw_state);
1496 			return (1);
1497 		}
1498 		for (i = 0; i < (max_wait * 10); i++) {
1499 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1500 			if (fw_state == cur_state)
1501 				DELAY(100000);
1502 			else
1503 				break;
1504 		}
1505 		if (fw_state == cur_state) {
1506 			printf("%s: firmware stuck in state %#x\n",
1507 			    DEVNAME(sc), fw_state);
1508 			return (1);
1509 		} else {
1510 			DPRINTF("%s: firmware state change %#x -> %#x after "
1511 			    "%d iterations\n",
1512 			    DEVNAME(sc), cur_state, fw_state, i);
1513 		}
1514 	}
1515 
1516 	return (0);
1517 }
1518 
1519 int
1520 mfii_get_info(struct mfii_softc *sc)
1521 {
1522 	int i, rv;
1523 
1524 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1525 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1526 
1527 	if (rv != 0)
1528 		return (rv);
1529 
1530 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1531 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1532 		    DEVNAME(sc),
1533 		    sc->sc_info.mci_image_component[i].mic_name,
1534 		    sc->sc_info.mci_image_component[i].mic_version,
1535 		    sc->sc_info.mci_image_component[i].mic_build_date,
1536 		    sc->sc_info.mci_image_component[i].mic_build_time);
1537 	}
1538 
1539 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1540 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1541 		    DEVNAME(sc),
1542 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1543 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1544 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1545 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1546 	}
1547 
1548 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1549 	    DEVNAME(sc),
1550 	    sc->sc_info.mci_max_arms,
1551 	    sc->sc_info.mci_max_spans,
1552 	    sc->sc_info.mci_max_arrays,
1553 	    sc->sc_info.mci_max_lds,
1554 	    sc->sc_info.mci_product_name);
1555 
1556 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1557 	    DEVNAME(sc),
1558 	    sc->sc_info.mci_serial_number,
1559 	    sc->sc_info.mci_hw_present,
1560 	    sc->sc_info.mci_current_fw_time,
1561 	    sc->sc_info.mci_max_cmds,
1562 	    sc->sc_info.mci_max_sg_elements);
1563 
1564 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1565 	    DEVNAME(sc),
1566 	    sc->sc_info.mci_max_request_size,
1567 	    sc->sc_info.mci_lds_present,
1568 	    sc->sc_info.mci_lds_degraded,
1569 	    sc->sc_info.mci_lds_offline,
1570 	    sc->sc_info.mci_pd_present);
1571 
1572 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1573 	    DEVNAME(sc),
1574 	    sc->sc_info.mci_pd_disks_present,
1575 	    sc->sc_info.mci_pd_disks_pred_failure,
1576 	    sc->sc_info.mci_pd_disks_failed);
1577 
1578 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1579 	    DEVNAME(sc),
1580 	    sc->sc_info.mci_nvram_size,
1581 	    sc->sc_info.mci_memory_size,
1582 	    sc->sc_info.mci_flash_size);
1583 
1584 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1585 	    DEVNAME(sc),
1586 	    sc->sc_info.mci_ram_correctable_errors,
1587 	    sc->sc_info.mci_ram_uncorrectable_errors,
1588 	    sc->sc_info.mci_cluster_allowed,
1589 	    sc->sc_info.mci_cluster_active);
1590 
1591 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1592 	    DEVNAME(sc),
1593 	    sc->sc_info.mci_max_strips_per_io,
1594 	    sc->sc_info.mci_raid_levels,
1595 	    sc->sc_info.mci_adapter_ops,
1596 	    sc->sc_info.mci_ld_ops);
1597 
1598 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1599 	    DEVNAME(sc),
1600 	    sc->sc_info.mci_stripe_sz_ops.min,
1601 	    sc->sc_info.mci_stripe_sz_ops.max,
1602 	    sc->sc_info.mci_pd_ops,
1603 	    sc->sc_info.mci_pd_mix_support);
1604 
1605 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1606 	    DEVNAME(sc),
1607 	    sc->sc_info.mci_ecc_bucket_count,
1608 	    sc->sc_info.mci_package_version);
1609 
1610 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1611 	    DEVNAME(sc),
1612 	    sc->sc_info.mci_properties.mcp_seq_num,
1613 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1614 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1615 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1616 
1617 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1618 	    DEVNAME(sc),
1619 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1620 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1621 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1622 	    sc->sc_info.mci_properties.mcp_cc_rate);
1623 
1624 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1625 	    DEVNAME(sc),
1626 	    sc->sc_info.mci_properties.mcp_recon_rate,
1627 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1628 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1629 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1630 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1631 
1632 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1633 	    DEVNAME(sc),
1634 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1635 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1636 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1637 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1638 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1639 
1640 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1641 	    DEVNAME(sc),
1642 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1643 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1644 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1645 
1646 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1647 	    DEVNAME(sc),
1648 	    sc->sc_info.mci_pci.mip_vendor,
1649 	    sc->sc_info.mci_pci.mip_device,
1650 	    sc->sc_info.mci_pci.mip_subvendor,
1651 	    sc->sc_info.mci_pci.mip_subdevice);
1652 
1653 	DPRINTF("%s: type %#x port_count %d port_addr ",
1654 	    DEVNAME(sc),
1655 	    sc->sc_info.mci_host.mih_type,
1656 	    sc->sc_info.mci_host.mih_port_count);
1657 
1658 	for (i = 0; i < 8; i++)
1659 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1660 	DPRINTF("\n");
1661 
1662 	DPRINTF("%s: type %.x port_count %d port_addr ",
1663 	    DEVNAME(sc),
1664 	    sc->sc_info.mci_device.mid_type,
1665 	    sc->sc_info.mci_device.mid_port_count);
1666 
1667 	for (i = 0; i < 8; i++)
1668 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1669 	DPRINTF("\n");
1670 
1671 	return (0);
1672 }
1673 
1674 int
1675 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1676 {
1677 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1678 	u_int64_t r;
1679 	int to = 0, rv = 0;
1680 
1681 #ifdef DIAGNOSTIC
1682 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1683 		panic("mfii_mfa_poll called with cookie or done set");
1684 #endif
1685 
1686 	hdr->mfh_context = ccb->ccb_smid;
1687 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1688 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1689 
1690 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1691 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1692 
1693 	mfii_start(sc, ccb);
1694 
1695 	for (;;) {
1696 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1697 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1698 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1699 
1700 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1701 			break;
1702 
1703 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1704 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1705 			    ccb->ccb_smid);
1706 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1707 			rv = 1;
1708 			break;
1709 		}
1710 
1711 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1712 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1713 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1714 
1715 		delay(1000);
1716 	}
1717 
1718 	if (ccb->ccb_len > 0) {
1719 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1720 		    0, ccb->ccb_dmamap->dm_mapsize,
1721 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1722 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1723 
1724 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1725 	}
1726 
1727 	return (rv);
1728 }
1729 
1730 int
1731 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1732 {
1733 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1734 	void *cookie;
1735 	int rv = 1;
1736 
1737 	done = ccb->ccb_done;
1738 	cookie = ccb->ccb_cookie;
1739 
1740 	ccb->ccb_done = mfii_poll_done;
1741 	ccb->ccb_cookie = &rv;
1742 
1743 	mfii_start(sc, ccb);
1744 
1745 	do {
1746 		delay(10);
1747 		mfii_postq(sc);
1748 	} while (rv == 1);
1749 
1750 	ccb->ccb_cookie = cookie;
1751 	done(sc, ccb);
1752 
1753 	return (0);
1754 }
1755 
1756 void
1757 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1758 {
1759 	int *rv = ccb->ccb_cookie;
1760 
1761 	*rv = 0;
1762 }
1763 
1764 int
1765 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1766 {
1767 	struct mutex m = MUTEX_INITIALIZER_FLAGS(IPL_BIO, __MTX_NAME,
1768 	    MTX_NOWITNESS);
1769 
1770 #ifdef DIAGNOSTIC
1771 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1772 		panic("mfii_exec called with cookie or done set");
1773 #endif
1774 
1775 	ccb->ccb_cookie = &m;
1776 	ccb->ccb_done = mfii_exec_done;
1777 
1778 	mfii_start(sc, ccb);
1779 
1780 	mtx_enter(&m);
1781 	while (ccb->ccb_cookie != NULL)
1782 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1783 	mtx_leave(&m);
1784 
1785 	return (0);
1786 }
1787 
1788 void
1789 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1790 {
1791 	struct mutex *m = ccb->ccb_cookie;
1792 
1793 	mtx_enter(m);
1794 	ccb->ccb_cookie = NULL;
1795 	wakeup_one(ccb);
1796 	mtx_leave(m);
1797 }
1798 
1799 int
1800 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1801     void *buf, size_t len, int flags)
1802 {
1803 	struct mfii_ccb *ccb;
1804 	int rv;
1805 
1806 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1807 	if (ccb == NULL)
1808 		return (ENOMEM);
1809 
1810 	mfii_scrub_ccb(ccb);
1811 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1812 	scsi_io_put(&sc->sc_iopool, ccb);
1813 
1814 	return (rv);
1815 }
1816 
1817 int
1818 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1819     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1820 {
1821 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1822 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1823 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1824 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1825 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1826 	u_int8_t *dma_buf = NULL;
1827 	int rv = EIO;
1828 
1829 	if (cold)
1830 		flags |= SCSI_NOSLEEP;
1831 
1832 	if (buf != NULL) {
1833 		dma_buf = dma_alloc(len, PR_WAITOK);
1834 		if (dma_buf == NULL)
1835 			return (ENOMEM);
1836 	}
1837 
1838 	ccb->ccb_data = dma_buf;
1839 	ccb->ccb_len = len;
1840 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1841 	case SCSI_DATA_IN:
1842 		ccb->ccb_direction = MFII_DATA_IN;
1843 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1844 		break;
1845 	case SCSI_DATA_OUT:
1846 		ccb->ccb_direction = MFII_DATA_OUT;
1847 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1848 		memcpy(dma_buf, buf, len);
1849 		break;
1850 	case 0:
1851 		ccb->ccb_direction = MFII_DATA_NONE;
1852 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1853 		break;
1854 	}
1855 
1856 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1857 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1858 		rv = ENOMEM;
1859 		goto done;
1860 	}
1861 
1862 	hdr->mfh_cmd = MFI_CMD_DCMD;
1863 	hdr->mfh_context = ccb->ccb_smid;
1864 	hdr->mfh_data_len = htole32(len);
1865 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1866 
1867 	dcmd->mdf_opcode = opc;
1868 	/* handle special opcodes */
1869 	if (mbox != NULL)
1870 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1871 
1872 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1873 
1874 	if (len) {
1875 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1876 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1877 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1878 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1879 		sge->sg_flags =
1880 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1881 	}
1882 
1883 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1884 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1885 
1886 	if (ISSET(flags, SCSI_NOSLEEP)) {
1887 		ccb->ccb_done = mfii_empty_done;
1888 		mfii_poll(sc, ccb);
1889 	} else
1890 		mfii_exec(sc, ccb);
1891 
1892 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1893 		rv = 0;
1894 
1895 		if (ccb->ccb_direction == MFII_DATA_IN)
1896 			memcpy(buf, dma_buf, len);
1897 	}
1898 
1899 done:
1900 	if (buf != NULL)
1901 		dma_free(dma_buf, len);
1902 
1903 	return (rv);
1904 }
1905 
1906 void
1907 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1908 {
1909 	return;
1910 }
1911 
1912 int
1913 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1914     void *sglp, int nosleep)
1915 {
1916 	union mfi_sgl *sgl = sglp;
1917 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1918 	int error;
1919 	int i;
1920 
1921 	if (ccb->ccb_len == 0)
1922 		return (0);
1923 
1924 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1925 	    ccb->ccb_data, ccb->ccb_len, NULL,
1926 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1927 	if (error) {
1928 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1929 		return (1);
1930 	}
1931 
1932 	for (i = 0; i < dmap->dm_nsegs; i++) {
1933 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1934 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1935 	}
1936 
1937 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1938 	    ccb->ccb_direction == MFII_DATA_OUT ?
1939 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1940 
1941 	return (0);
1942 }
1943 
1944 void
1945 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1946 {
1947 	u_long *r = (u_long *)&ccb->ccb_req;
1948 
1949 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1950 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1951 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1952 
1953 #if defined(__LP64__)
1954 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1955 #else
1956 	mtx_enter(&sc->sc_post_mtx);
1957 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1958 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1959 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1960 
1961 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1962 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1963 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1964 	mtx_leave(&sc->sc_post_mtx);
1965 #endif
1966 }
1967 
1968 void
1969 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1970 {
1971 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1972 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1973 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 
1975 	if (ccb->ccb_sgl_len > 0) {
1976 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1977 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1978 		    BUS_DMASYNC_POSTWRITE);
1979 	}
1980 
1981 	if (ccb->ccb_len > 0) {
1982 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1983 		    0, ccb->ccb_dmamap->dm_mapsize,
1984 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1985 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1986 
1987 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1988 	}
1989 
1990 	ccb->ccb_done(sc, ccb);
1991 }
1992 
1993 int
1994 mfii_initialise_firmware(struct mfii_softc *sc)
1995 {
1996 	struct mpii_msg_iocinit_request *iiq;
1997 	struct mfii_dmamem *m;
1998 	struct mfii_ccb *ccb;
1999 	struct mfi_init_frame *init;
2000 	int rv;
2001 
2002 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2003 	if (m == NULL)
2004 		return (1);
2005 
2006 	iiq = MFII_DMA_KVA(m);
2007 	memset(iiq, 0, sizeof(*iiq));
2008 
2009 	iiq->function = MPII_FUNCTION_IOC_INIT;
2010 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2011 
2012 	iiq->msg_version_maj = 0x02;
2013 	iiq->msg_version_min = 0x00;
2014 	iiq->hdr_version_unit = 0x10;
2015 	iiq->hdr_version_dev = 0x0;
2016 
2017 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2018 
2019 	iiq->reply_descriptor_post_queue_depth =
2020 	    htole16(sc->sc_reply_postq_depth);
2021 	iiq->reply_free_queue_depth = htole16(0);
2022 
2023 	htolem32(&iiq->sense_buffer_address_high,
2024 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
2025 
2026 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
2027 	    MFII_DMA_DVA(sc->sc_reply_postq));
2028 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
2029 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2030 
2031 	htolem32(&iiq->system_request_frame_base_address_lo,
2032 	    MFII_DMA_DVA(sc->sc_requests));
2033 	htolem32(&iiq->system_request_frame_base_address_hi,
2034 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
2035 
2036 	iiq->timestamp = htole64(getuptime());
2037 
2038 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2039 	if (ccb == NULL) {
2040 		/* shouldn't ever run out of ccbs during attach */
2041 		return (1);
2042 	}
2043 	mfii_scrub_ccb(ccb);
2044 	init = ccb->ccb_request;
2045 
2046 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
2047 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2048 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
2049 
2050 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2051 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2052 	    BUS_DMASYNC_PREREAD);
2053 
2054 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2055 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2056 
2057 	rv = mfii_mfa_poll(sc, ccb);
2058 
2059 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2060 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2061 
2062 	scsi_io_put(&sc->sc_iopool, ccb);
2063 	mfii_dmamem_free(sc, m);
2064 
2065 	return (rv);
2066 }
2067 
2068 int
2069 mfii_my_intr(struct mfii_softc *sc)
2070 {
2071 	u_int32_t status;
2072 
2073 	status = mfii_read(sc, MFI_OSTS);
2074 	if (ISSET(status, 0x1)) {
2075 		mfii_write(sc, MFI_OSTS, status);
2076 		return (1);
2077 	}
2078 
2079 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2080 }
2081 
2082 int
2083 mfii_intr(void *arg)
2084 {
2085 	struct mfii_softc *sc = arg;
2086 
2087 	if (!mfii_my_intr(sc))
2088 		return (0);
2089 
2090 	mfii_postq(sc);
2091 
2092 	return (1);
2093 }
2094 
2095 void
2096 mfii_postq(struct mfii_softc *sc)
2097 {
2098 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2099 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2100 	struct mpii_reply_descr *rdp;
2101 	struct mfii_ccb *ccb;
2102 	int rpi = 0;
2103 
2104 	mtx_enter(&sc->sc_reply_postq_mtx);
2105 
2106 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2107 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2108 	    BUS_DMASYNC_POSTREAD);
2109 
2110 	for (;;) {
2111 		rdp = &postq[sc->sc_reply_postq_index];
2112 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2113 		    MPII_REPLY_DESCR_UNUSED)
2114 			break;
2115 		if (rdp->data == 0xffffffff) {
2116 			/*
2117 			 * ioc is still writing to the reply post queue
2118 			 * race condition - bail!
2119 			 */
2120 			break;
2121 		}
2122 
2123 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2124 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2125 		memset(rdp, 0xff, sizeof(*rdp));
2126 
2127 		sc->sc_reply_postq_index++;
2128 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2129 		rpi = 1;
2130 	}
2131 
2132 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2133 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2134 	    BUS_DMASYNC_PREREAD);
2135 
2136 	if (rpi)
2137 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2138 
2139 	mtx_leave(&sc->sc_reply_postq_mtx);
2140 
2141 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2142 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2143 		mfii_done(sc, ccb);
2144 	}
2145 }
2146 
2147 void
2148 mfii_scsi_cmd(struct scsi_xfer *xs)
2149 {
2150 	struct scsi_link *link = xs->sc_link;
2151 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2152 	struct mfii_ccb *ccb = xs->io;
2153 
2154 	mfii_scrub_ccb(ccb);
2155 	ccb->ccb_cookie = xs;
2156 	ccb->ccb_done = mfii_scsi_cmd_done;
2157 	ccb->ccb_data = xs->data;
2158 	ccb->ccb_len = xs->datalen;
2159 
2160 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2161 
2162 	switch (xs->cmd.opcode) {
2163 	case READ_COMMAND:
2164 	case READ_10:
2165 	case READ_12:
2166 	case READ_16:
2167 	case WRITE_COMMAND:
2168 	case WRITE_10:
2169 	case WRITE_12:
2170 	case WRITE_16:
2171 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2172 			goto stuffup;
2173 
2174 		break;
2175 
2176 	default:
2177 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2178 			goto stuffup;
2179 		break;
2180 	}
2181 
2182 	xs->error = XS_NOERROR;
2183 	xs->resid = 0;
2184 
2185 	if (ISSET(xs->flags, SCSI_POLL)) {
2186 		if (mfii_poll(sc, ccb) != 0)
2187 			goto stuffup;
2188 		return;
2189 	}
2190 
2191 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2192 	timeout_add_msec(&xs->stimeout, xs->timeout);
2193 	mfii_start(sc, ccb);
2194 
2195 	return;
2196 
2197 stuffup:
2198 	xs->error = XS_DRIVER_STUFFUP;
2199 	scsi_done(xs);
2200 }
2201 
2202 void
2203 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2204 {
2205 	struct scsi_xfer *xs = ccb->ccb_cookie;
2206 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2207 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2208 	u_int refs = 1;
2209 
2210 	if (timeout_del(&xs->stimeout))
2211 		refs = 2;
2212 
2213 	switch (ctx->status) {
2214 	case MFI_STAT_OK:
2215 		break;
2216 
2217 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2218 		xs->error = XS_SENSE;
2219 		memset(&xs->sense, 0, sizeof(xs->sense));
2220 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2221 		break;
2222 
2223 	case MFI_STAT_LD_OFFLINE:
2224 	case MFI_STAT_DEVICE_NOT_FOUND:
2225 		xs->error = XS_SELTIMEOUT;
2226 		break;
2227 
2228 	default:
2229 		xs->error = XS_DRIVER_STUFFUP;
2230 		break;
2231 	}
2232 
2233 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2234 		scsi_done(xs);
2235 }
2236 
2237 int
2238 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2239 {
2240 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2241 
2242 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2243 
2244 	switch (cmd) {
2245 	case DIOCGCACHE:
2246 	case DIOCSCACHE:
2247 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2248 		break;
2249 
2250 	default:
2251 		if (sc->sc_ioctl)
2252 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2253 		break;
2254 	}
2255 
2256 	return (ENOTTY);
2257 }
2258 
2259 int
2260 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2261 {
2262 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2263 	int			 rv, wrenable, rdenable;
2264 	struct mfi_ld_prop	 ldp;
2265 	union mfi_mbox		 mbox;
2266 
2267 	if (mfii_get_info(sc)) {
2268 		rv = EIO;
2269 		goto done;
2270 	}
2271 
2272 	if (sc->sc_target_lds[link->target] == -1) {
2273 		rv = EIO;
2274 		goto done;
2275 	}
2276 
2277 	memset(&mbox, 0, sizeof(mbox));
2278 	mbox.b[0] = link->target;
2279 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2280 	    SCSI_DATA_IN);
2281 	if (rv != 0)
2282 		goto done;
2283 
2284 	if (sc->sc_info.mci_memory_size > 0) {
2285 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2286 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2287 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2288 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2289 	} else {
2290 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2291 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2292 		rdenable = 0;
2293 	}
2294 
2295 	if (cmd == DIOCGCACHE) {
2296 		dc->wrcache = wrenable;
2297 		dc->rdcache = rdenable;
2298 		goto done;
2299 	} /* else DIOCSCACHE */
2300 
2301 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2302 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2303 		goto done;
2304 
2305 	memset(&mbox, 0, sizeof(mbox));
2306 	mbox.b[0] = ldp.mlp_ld.mld_target;
2307 	mbox.b[1] = ldp.mlp_ld.mld_res;
2308 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2309 
2310 	if (sc->sc_info.mci_memory_size > 0) {
2311 		if (dc->rdcache)
2312 			SET(ldp.mlp_cur_cache_policy,
2313 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2314 		else
2315 			CLR(ldp.mlp_cur_cache_policy,
2316 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2317 		if (dc->wrcache)
2318 			SET(ldp.mlp_cur_cache_policy,
2319 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2320 		else
2321 			CLR(ldp.mlp_cur_cache_policy,
2322 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2323 	} else {
2324 		if (dc->rdcache) {
2325 			rv = EOPNOTSUPP;
2326 			goto done;
2327 		}
2328 		if (dc->wrcache)
2329 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2330 		else
2331 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2332 	}
2333 
2334 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2335 	    SCSI_DATA_OUT);
2336 done:
2337 	return (rv);
2338 }
2339 
2340 int
2341 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2342 {
2343 	struct scsi_link *link = xs->sc_link;
2344 	struct mfii_ccb *ccb = xs->io;
2345 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2346 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2347 	int segs;
2348 
2349 	io->dev_handle = htole16(link->target);
2350 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2351 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2352 	io->sgl_flags = htole16(0x02); /* XXX */
2353 	io->sense_buffer_length = sizeof(xs->sense);
2354 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2355 	io->data_length = htole32(xs->datalen);
2356 	io->io_flags = htole16(xs->cmdlen);
2357 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2358 	case SCSI_DATA_IN:
2359 		ccb->ccb_direction = MFII_DATA_IN;
2360 		io->direction = MPII_SCSIIO_DIR_READ;
2361 		break;
2362 	case SCSI_DATA_OUT:
2363 		ccb->ccb_direction = MFII_DATA_OUT;
2364 		io->direction = MPII_SCSIIO_DIR_WRITE;
2365 		break;
2366 	default:
2367 		ccb->ccb_direction = MFII_DATA_NONE;
2368 		io->direction = MPII_SCSIIO_DIR_NONE;
2369 		break;
2370 	}
2371 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2372 
2373 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2374 	ctx->timeout_value = htole16(0x14); /* XXX */
2375 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2376 	ctx->virtual_disk_target_id = htole16(link->target);
2377 
2378 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2379 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2380 		return (1);
2381 
2382 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2383 	switch (sc->sc_iop->num_sge_loc) {
2384 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2385 		ctx->num_sge = segs;
2386 		break;
2387 	case MFII_IOP_NUM_SGE_LOC_35:
2388 		/* 12 bit field, but we're only using the lower 8 */
2389 		ctx->span_arm = segs;
2390 		break;
2391 	}
2392 
2393 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2394 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2395 
2396 	return (0);
2397 }
2398 
2399 int
2400 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2401 {
2402 	struct scsi_link *link = xs->sc_link;
2403 	struct mfii_ccb *ccb = xs->io;
2404 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2405 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2406 
2407 	io->dev_handle = htole16(link->target);
2408 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2409 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2410 	io->sgl_flags = htole16(0x02); /* XXX */
2411 	io->sense_buffer_length = sizeof(xs->sense);
2412 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2413 	io->data_length = htole32(xs->datalen);
2414 	io->io_flags = htole16(xs->cmdlen);
2415 	io->lun[0] = htobe16(link->lun);
2416 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2417 	case SCSI_DATA_IN:
2418 		ccb->ccb_direction = MFII_DATA_IN;
2419 		io->direction = MPII_SCSIIO_DIR_READ;
2420 		break;
2421 	case SCSI_DATA_OUT:
2422 		ccb->ccb_direction = MFII_DATA_OUT;
2423 		io->direction = MPII_SCSIIO_DIR_WRITE;
2424 		break;
2425 	default:
2426 		ccb->ccb_direction = MFII_DATA_NONE;
2427 		io->direction = MPII_SCSIIO_DIR_NONE;
2428 		break;
2429 	}
2430 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2431 
2432 	ctx->virtual_disk_target_id = htole16(link->target);
2433 
2434 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2435 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2436 		return (1);
2437 
2438 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2439 
2440 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2441 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2442 
2443 	return (0);
2444 }
2445 
2446 void
2447 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2448 {
2449 	struct scsi_link *link = xs->sc_link;
2450 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2451 	struct mfii_ccb *ccb = xs->io;
2452 
2453 	mfii_scrub_ccb(ccb);
2454 	ccb->ccb_cookie = xs;
2455 	ccb->ccb_done = mfii_scsi_cmd_done;
2456 	ccb->ccb_data = xs->data;
2457 	ccb->ccb_len = xs->datalen;
2458 
2459 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2460 
2461 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2462 	if (xs->error != XS_NOERROR)
2463 		goto done;
2464 
2465 	xs->resid = 0;
2466 
2467 	if (ISSET(xs->flags, SCSI_POLL)) {
2468 		if (mfii_poll(sc, ccb) != 0)
2469 			goto stuffup;
2470 		return;
2471 	}
2472 
2473 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2474 	timeout_add_msec(&xs->stimeout, xs->timeout);
2475 	mfii_start(sc, ccb);
2476 
2477 	return;
2478 
2479 stuffup:
2480 	xs->error = XS_DRIVER_STUFFUP;
2481 done:
2482 	scsi_done(xs);
2483 }
2484 
2485 int
2486 mfii_pd_scsi_probe(struct scsi_link *link)
2487 {
2488 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2489 	struct mfi_pd_details mpd;
2490 	union mfi_mbox mbox;
2491 	int rv;
2492 
2493 	if (link->lun > 0)
2494 		return (0);
2495 
2496 	memset(&mbox, 0, sizeof(mbox));
2497 	mbox.s[0] = htole16(link->target);
2498 
2499 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2500 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2501 	if (rv != 0)
2502 		return (EIO);
2503 
2504 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2505 		return (ENXIO);
2506 
2507 	return (0);
2508 }
2509 
2510 int
2511 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2512 {
2513 	struct scsi_link *link = xs->sc_link;
2514 	struct mfii_ccb *ccb = xs->io;
2515 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2516 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2517 	uint16_t dev_handle;
2518 
2519 	dev_handle = mfii_dev_handle(sc, link->target);
2520 	if (dev_handle == htole16(0xffff))
2521 		return (XS_SELTIMEOUT);
2522 
2523 	io->dev_handle = dev_handle;
2524 	io->function = 0;
2525 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2526 	io->sgl_flags = htole16(0x02); /* XXX */
2527 	io->sense_buffer_length = sizeof(xs->sense);
2528 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2529 	io->data_length = htole32(xs->datalen);
2530 	io->io_flags = htole16(xs->cmdlen);
2531 	io->lun[0] = htobe16(link->lun);
2532 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2533 	case SCSI_DATA_IN:
2534 		ccb->ccb_direction = MFII_DATA_IN;
2535 		io->direction = MPII_SCSIIO_DIR_READ;
2536 		break;
2537 	case SCSI_DATA_OUT:
2538 		ccb->ccb_direction = MFII_DATA_OUT;
2539 		io->direction = MPII_SCSIIO_DIR_WRITE;
2540 		break;
2541 	default:
2542 		ccb->ccb_direction = MFII_DATA_NONE;
2543 		io->direction = MPII_SCSIIO_DIR_NONE;
2544 		break;
2545 	}
2546 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2547 
2548 	ctx->virtual_disk_target_id = htole16(link->target);
2549 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2550 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2551 
2552 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2553 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2554 		return (XS_DRIVER_STUFFUP);
2555 
2556 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2557 
2558 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2559 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2560 	ccb->ccb_req.dev_handle = dev_handle;
2561 
2562 	return (XS_NOERROR);
2563 }
2564 
2565 int
2566 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2567     int nosleep)
2568 {
2569 	struct mpii_msg_request *req = ccb->ccb_request;
2570 	struct mfii_sge *sge = NULL, *nsge = sglp;
2571 	struct mfii_sge *ce = NULL;
2572 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2573 	u_int space;
2574 	int i;
2575 
2576 	int error;
2577 
2578 	if (ccb->ccb_len == 0)
2579 		return (0);
2580 
2581 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2582 	    ccb->ccb_data, ccb->ccb_len, NULL,
2583 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2584 	if (error) {
2585 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2586 		return (1);
2587 	}
2588 
2589 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2590 	    sizeof(*nsge);
2591 	if (dmap->dm_nsegs > space) {
2592 		space--;
2593 
2594 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2595 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2596 
2597 		ce = nsge + space;
2598 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2599 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2600 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2601 
2602 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2603 	}
2604 
2605 	for (i = 0; i < dmap->dm_nsegs; i++) {
2606 		if (nsge == ce)
2607 			nsge = ccb->ccb_sgl;
2608 
2609 		sge = nsge;
2610 
2611 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2612 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2613 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2614 
2615 		nsge = sge + 1;
2616 	}
2617 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2618 
2619 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2620 	    ccb->ccb_direction == MFII_DATA_OUT ?
2621 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2622 
2623 	if (ccb->ccb_sgl_len > 0) {
2624 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2625 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2626 		    BUS_DMASYNC_PREWRITE);
2627 	}
2628 
2629 	return (0);
2630 }
2631 
2632 void
2633 mfii_scsi_cmd_tmo(void *xsp)
2634 {
2635 	struct scsi_xfer *xs = xsp;
2636 	struct scsi_link *link = xs->sc_link;
2637 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2638 	struct mfii_ccb *ccb = xs->io;
2639 
2640 	mtx_enter(&sc->sc_abort_mtx);
2641 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2642 	mtx_leave(&sc->sc_abort_mtx);
2643 
2644 	task_add(systqmp, &sc->sc_abort_task);
2645 }
2646 
2647 void
2648 mfii_abort_task(void *scp)
2649 {
2650 	struct mfii_softc *sc = scp;
2651 	struct mfii_ccb *list;
2652 
2653 	mtx_enter(&sc->sc_abort_mtx);
2654 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2655 	SIMPLEQ_INIT(&sc->sc_abort_list);
2656 	mtx_leave(&sc->sc_abort_mtx);
2657 
2658 	while (list != NULL) {
2659 		struct mfii_ccb *ccb = list;
2660 		struct scsi_xfer *xs = ccb->ccb_cookie;
2661 		struct scsi_link *link = xs->sc_link;
2662 
2663 		uint16_t dev_handle;
2664 		struct mfii_ccb *accb;
2665 
2666 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2667 
2668 		dev_handle = mfii_dev_handle(sc, link->target);
2669 		if (dev_handle == htole16(0xffff)) {
2670 			/* device is gone */
2671 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2672 				scsi_done(xs);
2673 			continue;
2674 		}
2675 
2676 		accb = scsi_io_get(&sc->sc_iopool, 0);
2677 		mfii_scrub_ccb(accb);
2678 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2679 		    MPII_SCSI_TASK_ABORT_TASK,
2680 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2681 
2682 		accb->ccb_cookie = ccb;
2683 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2684 
2685 		mfii_start(sc, accb);
2686 	}
2687 }
2688 
2689 void
2690 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2691     uint16_t smid, uint8_t type, uint32_t flags)
2692 {
2693 	struct mfii_task_mgmt *msg;
2694 	struct mpii_msg_scsi_task_request *req;
2695 
2696 	msg = accb->ccb_request;
2697 	req = &msg->mpii_request;
2698 	req->dev_handle = dev_handle;
2699 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2700 	req->task_type = type;
2701 	htolem16(&req->task_mid, smid);
2702 	msg->flags = flags;
2703 
2704 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2705 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2706 }
2707 
2708 void
2709 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2710 {
2711 	struct mfii_ccb *ccb = accb->ccb_cookie;
2712 	struct scsi_xfer *xs = ccb->ccb_cookie;
2713 
2714 	/* XXX check accb completion? */
2715 
2716 	scsi_io_put(&sc->sc_iopool, accb);
2717 
2718 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2719 		scsi_done(xs);
2720 }
2721 
2722 void *
2723 mfii_get_ccb(void *cookie)
2724 {
2725 	struct mfii_softc *sc = cookie;
2726 	struct mfii_ccb *ccb;
2727 
2728 	mtx_enter(&sc->sc_ccb_mtx);
2729 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2730 	if (ccb != NULL)
2731 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2732 	mtx_leave(&sc->sc_ccb_mtx);
2733 
2734 	return (ccb);
2735 }
2736 
2737 void
2738 mfii_scrub_ccb(struct mfii_ccb *ccb)
2739 {
2740 	ccb->ccb_cookie = NULL;
2741 	ccb->ccb_done = NULL;
2742 	ccb->ccb_flags = 0;
2743 	ccb->ccb_data = NULL;
2744 	ccb->ccb_direction = 0;
2745 	ccb->ccb_len = 0;
2746 	ccb->ccb_sgl_len = 0;
2747 	ccb->ccb_refcnt = 1;
2748 
2749 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2750 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2751 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2752 }
2753 
2754 void
2755 mfii_put_ccb(void *cookie, void *io)
2756 {
2757 	struct mfii_softc *sc = cookie;
2758 	struct mfii_ccb *ccb = io;
2759 
2760 	mtx_enter(&sc->sc_ccb_mtx);
2761 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2762 	mtx_leave(&sc->sc_ccb_mtx);
2763 }
2764 
2765 int
2766 mfii_init_ccb(struct mfii_softc *sc)
2767 {
2768 	struct mfii_ccb *ccb;
2769 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2770 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2771 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2772 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2773 	u_int i;
2774 	int error;
2775 
2776 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2777 	    M_DEVBUF, M_WAITOK|M_ZERO);
2778 
2779 	for (i = 0; i < sc->sc_max_cmds; i++) {
2780 		ccb = &sc->sc_ccb[i];
2781 
2782 		/* create a dma map for transfer */
2783 		error = bus_dmamap_create(sc->sc_dmat,
2784 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2785 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2786 		if (error) {
2787 			printf("%s: cannot create ccb dmamap (%d)\n",
2788 			    DEVNAME(sc), error);
2789 			goto destroy;
2790 		}
2791 
2792 		/* select i + 1'th request. 0 is reserved for events */
2793 		ccb->ccb_smid = i + 1;
2794 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2795 		ccb->ccb_request = request + ccb->ccb_request_offset;
2796 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2797 		    ccb->ccb_request_offset;
2798 
2799 		/* select i'th MFI command frame */
2800 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2801 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2802 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2803 		    ccb->ccb_mfi_offset;
2804 
2805 		/* select i'th sense */
2806 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2807 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2808 		    ccb->ccb_sense_offset);
2809 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2810 		    ccb->ccb_sense_offset;
2811 
2812 		/* select i'th sgl */
2813 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2814 		    sc->sc_max_sgl * i;
2815 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2816 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2817 		    ccb->ccb_sgl_offset;
2818 
2819 		/* add ccb to queue */
2820 		mfii_put_ccb(sc, ccb);
2821 	}
2822 
2823 	return (0);
2824 
2825 destroy:
2826 	/* free dma maps and ccb memory */
2827 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2828 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2829 
2830 	free(sc->sc_ccb, M_DEVBUF, 0);
2831 
2832 	return (1);
2833 }
2834 
2835 #if NBIO > 0
2836 int
2837 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2838 {
2839 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2840 	int error = 0;
2841 
2842 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2843 
2844 	rw_enter_write(&sc->sc_lock);
2845 
2846 	switch (cmd) {
2847 	case BIOCINQ:
2848 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2849 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2850 		break;
2851 
2852 	case BIOCVOL:
2853 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2854 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2855 		break;
2856 
2857 	case BIOCDISK:
2858 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2859 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2860 		break;
2861 
2862 	case BIOCALARM:
2863 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2864 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2865 		break;
2866 
2867 	case BIOCBLINK:
2868 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2869 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2870 		break;
2871 
2872 	case BIOCSETSTATE:
2873 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2874 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2875 		break;
2876 
2877 	case BIOCPATROL:
2878 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2879 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2880 		break;
2881 
2882 	default:
2883 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2884 		error = ENOTTY;
2885 	}
2886 
2887 	rw_exit_write(&sc->sc_lock);
2888 
2889 	return (error);
2890 }
2891 
2892 int
2893 mfii_bio_getitall(struct mfii_softc *sc)
2894 {
2895 	int			i, d, rv = EINVAL;
2896 	size_t			size;
2897 	union mfi_mbox		mbox;
2898 	struct mfi_conf		*cfg = NULL;
2899 	struct mfi_ld_details	*ld_det = NULL;
2900 
2901 	/* get info */
2902 	if (mfii_get_info(sc)) {
2903 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2904 		    DEVNAME(sc));
2905 		goto done;
2906 	}
2907 
2908 	/* send single element command to retrieve size for full structure */
2909 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2910 	if (cfg == NULL)
2911 		goto done;
2912 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2913 	    SCSI_DATA_IN)) {
2914 		free(cfg, M_DEVBUF, sizeof *cfg);
2915 		goto done;
2916 	}
2917 
2918 	size = cfg->mfc_size;
2919 	free(cfg, M_DEVBUF, sizeof *cfg);
2920 
2921 	/* memory for read config */
2922 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2923 	if (cfg == NULL)
2924 		goto done;
2925 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2926 		free(cfg, M_DEVBUF, size);
2927 		goto done;
2928 	}
2929 
2930 	/* replace current pointer with new one */
2931 	if (sc->sc_cfg)
2932 		free(sc->sc_cfg, M_DEVBUF, 0);
2933 	sc->sc_cfg = cfg;
2934 
2935 	/* get all ld info */
2936 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2937 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2938 		goto done;
2939 
2940 	/* get memory for all ld structures */
2941 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2942 	if (sc->sc_ld_sz != size) {
2943 		if (sc->sc_ld_details)
2944 			free(sc->sc_ld_details, M_DEVBUF, 0);
2945 
2946 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2947 		if (ld_det == NULL)
2948 			goto done;
2949 		sc->sc_ld_sz = size;
2950 		sc->sc_ld_details = ld_det;
2951 	}
2952 
2953 	/* find used physical disks */
2954 	size = sizeof(struct mfi_ld_details);
2955 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2956 		memset(&mbox, 0, sizeof(mbox));
2957 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2958 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2959 		    SCSI_DATA_IN))
2960 			goto done;
2961 
2962 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2963 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2964 	}
2965 	sc->sc_no_pd = d;
2966 
2967 	rv = 0;
2968 done:
2969 	return (rv);
2970 }
2971 
2972 int
2973 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2974 {
2975 	int			rv = EINVAL;
2976 	struct mfi_conf		*cfg = NULL;
2977 
2978 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2979 
2980 	if (mfii_bio_getitall(sc)) {
2981 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2982 		    DEVNAME(sc));
2983 		goto done;
2984 	}
2985 
2986 	/* count unused disks as volumes */
2987 	if (sc->sc_cfg == NULL)
2988 		goto done;
2989 	cfg = sc->sc_cfg;
2990 
2991 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2992 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2993 #if notyet
2994 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2995 	    (bi->bi_nodisk - sc->sc_no_pd);
2996 #endif
2997 	/* tell bio who we are */
2998 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2999 
3000 	rv = 0;
3001 done:
3002 	return (rv);
3003 }
3004 
3005 int
3006 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3007 {
3008 	int			i, per, target, rv = EINVAL;
3009 	struct scsi_link	*link;
3010 	struct device		*dev;
3011 
3012 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3013 	    DEVNAME(sc), bv->bv_volid);
3014 
3015 	/* we really could skip and expect that inq took care of it */
3016 	if (mfii_bio_getitall(sc)) {
3017 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3018 		    DEVNAME(sc));
3019 		goto done;
3020 	}
3021 
3022 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3023 		/* go do hotspares & unused disks */
3024 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3025 		goto done;
3026 	}
3027 
3028 	i = bv->bv_volid;
3029 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3030 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3031 	if (link == NULL) {
3032 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
3033 	} else {
3034 		dev = link->device_softc;
3035 		if (dev == NULL)
3036 			goto done;
3037 
3038 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
3039 	}
3040 
3041 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
3042 	case MFI_LD_OFFLINE:
3043 		bv->bv_status = BIOC_SVOFFLINE;
3044 		break;
3045 
3046 	case MFI_LD_PART_DEGRADED:
3047 	case MFI_LD_DEGRADED:
3048 		bv->bv_status = BIOC_SVDEGRADED;
3049 		break;
3050 
3051 	case MFI_LD_ONLINE:
3052 		bv->bv_status = BIOC_SVONLINE;
3053 		break;
3054 
3055 	default:
3056 		bv->bv_status = BIOC_SVINVALID;
3057 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3058 		    DEVNAME(sc),
3059 		    sc->sc_ld_list.mll_list[i].mll_state);
3060 	}
3061 
3062 	/* additional status can modify MFI status */
3063 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3064 	case MFI_LD_PROG_CC:
3065 		bv->bv_status = BIOC_SVSCRUB;
3066 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3067 		bv->bv_percent = (per * 100) / 0xffff;
3068 		bv->bv_seconds =
3069 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3070 		break;
3071 
3072 	case MFI_LD_PROG_BGI:
3073 		bv->bv_status = BIOC_SVSCRUB;
3074 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3075 		bv->bv_percent = (per * 100) / 0xffff;
3076 		bv->bv_seconds =
3077 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3078 		break;
3079 
3080 	case MFI_LD_PROG_FGI:
3081 	case MFI_LD_PROG_RECONSTRUCT:
3082 		/* nothing yet */
3083 		break;
3084 	}
3085 
3086 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3087 		bv->bv_cache = BIOC_CVWRITEBACK;
3088 	else
3089 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3090 
3091 	/*
3092 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3093 	 * a subset that is valid for the MFI controller.
3094 	 */
3095 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3096 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3097 		bv->bv_level *= 10;
3098 
3099 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3100 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3101 
3102 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3103 
3104 	rv = 0;
3105 done:
3106 	return (rv);
3107 }
3108 
3109 int
3110 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3111 {
3112 	struct mfi_conf		*cfg;
3113 	struct mfi_array	*ar;
3114 	struct mfi_ld_cfg	*ld;
3115 	struct mfi_pd_details	*pd;
3116 	struct mfi_pd_list	*pl;
3117 	struct mfi_pd_progress	*mfp;
3118 	struct mfi_progress	*mp;
3119 	struct scsi_inquiry_data *inqbuf;
3120 	char			vend[8+16+4+1], *vendp;
3121 	int			i, rv = EINVAL;
3122 	int			arr, vol, disk, span;
3123 	union mfi_mbox		mbox;
3124 
3125 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3126 	    DEVNAME(sc), bd->bd_diskid);
3127 
3128 	/* we really could skip and expect that inq took care of it */
3129 	if (mfii_bio_getitall(sc)) {
3130 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3131 		    DEVNAME(sc));
3132 		return (rv);
3133 	}
3134 	cfg = sc->sc_cfg;
3135 
3136 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3137 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3138 
3139 	ar = cfg->mfc_array;
3140 	vol = bd->bd_volid;
3141 	if (vol >= cfg->mfc_no_ld) {
3142 		/* do hotspares */
3143 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3144 		goto freeme;
3145 	}
3146 
3147 	/* calculate offset to ld structure */
3148 	ld = (struct mfi_ld_cfg *)(
3149 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3150 	    cfg->mfc_array_size * cfg->mfc_no_array);
3151 
3152 	/* use span 0 only when raid group is not spanned */
3153 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3154 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3155 	else
3156 		span = 0;
3157 	arr = ld[vol].mlc_span[span].mls_index;
3158 
3159 	/* offset disk into pd list */
3160 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3161 
3162 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3163 		/* disk is missing but succeed command */
3164 		bd->bd_status = BIOC_SDFAILED;
3165 		rv = 0;
3166 
3167 		/* try to find an unused disk for the target to rebuild */
3168 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3169 		    SCSI_DATA_IN))
3170 			goto freeme;
3171 
3172 		for (i = 0; i < pl->mpl_no_pd; i++) {
3173 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3174 				continue;
3175 
3176 			memset(&mbox, 0, sizeof(mbox));
3177 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3178 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3179 			    SCSI_DATA_IN))
3180 				continue;
3181 
3182 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3183 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3184 				break;
3185 		}
3186 
3187 		if (i == pl->mpl_no_pd)
3188 			goto freeme;
3189 	} else {
3190 		memset(&mbox, 0, sizeof(mbox));
3191 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3192 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3193 		    SCSI_DATA_IN)) {
3194 			bd->bd_status = BIOC_SDINVALID;
3195 			goto freeme;
3196 		}
3197 	}
3198 
3199 	/* get the remaining fields */
3200 	bd->bd_channel = pd->mpd_enc_idx;
3201 	bd->bd_target = pd->mpd_enc_slot;
3202 
3203 	/* get status */
3204 	switch (pd->mpd_fw_state){
3205 	case MFI_PD_UNCONFIG_GOOD:
3206 	case MFI_PD_UNCONFIG_BAD:
3207 		bd->bd_status = BIOC_SDUNUSED;
3208 		break;
3209 
3210 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3211 		bd->bd_status = BIOC_SDHOTSPARE;
3212 		break;
3213 
3214 	case MFI_PD_OFFLINE:
3215 		bd->bd_status = BIOC_SDOFFLINE;
3216 		break;
3217 
3218 	case MFI_PD_FAILED:
3219 		bd->bd_status = BIOC_SDFAILED;
3220 		break;
3221 
3222 	case MFI_PD_REBUILD:
3223 		bd->bd_status = BIOC_SDREBUILD;
3224 		break;
3225 
3226 	case MFI_PD_ONLINE:
3227 		bd->bd_status = BIOC_SDONLINE;
3228 		break;
3229 
3230 	case MFI_PD_COPYBACK:
3231 	case MFI_PD_SYSTEM:
3232 	default:
3233 		bd->bd_status = BIOC_SDINVALID;
3234 		break;
3235 	}
3236 
3237 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3238 
3239 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3240 	vendp = inqbuf->vendor;
3241 	memcpy(vend, vendp, sizeof vend - 1);
3242 	vend[sizeof vend - 1] = '\0';
3243 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3244 
3245 	/* XXX find a way to retrieve serial nr from drive */
3246 	/* XXX find a way to get bd_procdev */
3247 
3248 	mfp = &pd->mpd_progress;
3249 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3250 		mp = &mfp->mfp_patrol_read;
3251 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3252 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3253 	}
3254 
3255 	rv = 0;
3256 freeme:
3257 	free(pd, M_DEVBUF, sizeof *pd);
3258 	free(pl, M_DEVBUF, sizeof *pl);
3259 
3260 	return (rv);
3261 }
3262 
3263 int
3264 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3265 {
3266 	uint32_t		opc, flags = 0;
3267 	int			rv = 0;
3268 	int8_t			ret;
3269 
3270 	switch(ba->ba_opcode) {
3271 	case BIOC_SADISABLE:
3272 		opc = MR_DCMD_SPEAKER_DISABLE;
3273 		break;
3274 
3275 	case BIOC_SAENABLE:
3276 		opc = MR_DCMD_SPEAKER_ENABLE;
3277 		break;
3278 
3279 	case BIOC_SASILENCE:
3280 		opc = MR_DCMD_SPEAKER_SILENCE;
3281 		break;
3282 
3283 	case BIOC_GASTATUS:
3284 		opc = MR_DCMD_SPEAKER_GET;
3285 		flags = SCSI_DATA_IN;
3286 		break;
3287 
3288 	case BIOC_SATEST:
3289 		opc = MR_DCMD_SPEAKER_TEST;
3290 		break;
3291 
3292 	default:
3293 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3294 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3295 		return (EINVAL);
3296 	}
3297 
3298 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3299 		rv = EINVAL;
3300 	else
3301 		if (ba->ba_opcode == BIOC_GASTATUS)
3302 			ba->ba_status = ret;
3303 		else
3304 			ba->ba_status = 0;
3305 
3306 	return (rv);
3307 }
3308 
3309 int
3310 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3311 {
3312 	int			i, found, rv = EINVAL;
3313 	union mfi_mbox		mbox;
3314 	uint32_t		cmd;
3315 	struct mfi_pd_list	*pd;
3316 
3317 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3318 	    bb->bb_status);
3319 
3320 	/* channel 0 means not in an enclosure so can't be blinked */
3321 	if (bb->bb_channel == 0)
3322 		return (EINVAL);
3323 
3324 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3325 
3326 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3327 		goto done;
3328 
3329 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3330 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3331 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3332 			found = 1;
3333 			break;
3334 		}
3335 
3336 	if (!found)
3337 		goto done;
3338 
3339 	memset(&mbox, 0, sizeof(mbox));
3340 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3341 
3342 	switch (bb->bb_status) {
3343 	case BIOC_SBUNBLINK:
3344 		cmd = MR_DCMD_PD_UNBLINK;
3345 		break;
3346 
3347 	case BIOC_SBBLINK:
3348 		cmd = MR_DCMD_PD_BLINK;
3349 		break;
3350 
3351 	case BIOC_SBALARM:
3352 	default:
3353 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3354 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3355 		goto done;
3356 	}
3357 
3358 
3359 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3360 		rv = 0;
3361 
3362 done:
3363 	free(pd, M_DEVBUF, sizeof *pd);
3364 	return (rv);
3365 }
3366 
3367 static int
3368 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3369 {
3370 	struct mfii_foreign_scan_info *fsi;
3371 	struct mfi_pd_details	*pd;
3372 	union mfi_mbox		mbox;
3373 	int			rv;
3374 
3375 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3376 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3377 
3378 	memset(&mbox, 0, sizeof mbox);
3379 	mbox.s[0] = pd_id;
3380 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3381 	if (rv != 0)
3382 		goto done;
3383 
3384 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3385 		mbox.s[0] = pd_id;
3386 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3387 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3388 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3389 		if (rv != 0)
3390 			goto done;
3391 	}
3392 
3393 	memset(&mbox, 0, sizeof mbox);
3394 	mbox.s[0] = pd_id;
3395 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3396 	if (rv != 0)
3397 		goto done;
3398 
3399 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3400 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3401 		    SCSI_DATA_IN);
3402 		if (rv != 0)
3403 			goto done;
3404 
3405 		if (fsi->count > 0) {
3406 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3407 			if (rv != 0)
3408 				goto done;
3409 		}
3410 	}
3411 
3412 	memset(&mbox, 0, sizeof mbox);
3413 	mbox.s[0] = pd_id;
3414 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3415 	if (rv != 0)
3416 		goto done;
3417 
3418 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3419 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3420 		rv = ENXIO;
3421 
3422 done:
3423 	free(fsi, M_DEVBUF, sizeof *fsi);
3424 	free(pd, M_DEVBUF, sizeof *pd);
3425 
3426 	return (rv);
3427 }
3428 
3429 static int
3430 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3431 {
3432 	struct mfi_hotspare	*hs;
3433 	struct mfi_pd_details	*pd;
3434 	union mfi_mbox		mbox;
3435 	size_t			size;
3436 	int			rv = EINVAL;
3437 
3438 	/* we really could skip and expect that inq took care of it */
3439 	if (mfii_bio_getitall(sc)) {
3440 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3441 		    DEVNAME(sc));
3442 		return (rv);
3443 	}
3444 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3445 
3446 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3447 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3448 
3449 	memset(&mbox, 0, sizeof mbox);
3450 	mbox.s[0] = pd_id;
3451 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3452 	    SCSI_DATA_IN);
3453 	if (rv != 0)
3454 		goto done;
3455 
3456 	memset(hs, 0, size);
3457 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3458 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3459 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3460 
3461 done:
3462 	free(hs, M_DEVBUF, size);
3463 	free(pd, M_DEVBUF, sizeof *pd);
3464 
3465 	return (rv);
3466 }
3467 
3468 int
3469 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3470 {
3471 	struct mfi_pd_details	*pd;
3472 	struct mfi_pd_list	*pl;
3473 	int			i, found, rv = EINVAL;
3474 	union mfi_mbox		mbox;
3475 
3476 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3477 	    bs->bs_status);
3478 
3479 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3480 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3481 
3482 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3483 		goto done;
3484 
3485 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3486 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3487 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3488 			found = 1;
3489 			break;
3490 		}
3491 
3492 	if (!found)
3493 		goto done;
3494 
3495 	memset(&mbox, 0, sizeof(mbox));
3496 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3497 
3498 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3499 		goto done;
3500 
3501 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3502 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3503 
3504 	switch (bs->bs_status) {
3505 	case BIOC_SSONLINE:
3506 		mbox.b[4] = MFI_PD_ONLINE;
3507 		break;
3508 
3509 	case BIOC_SSOFFLINE:
3510 		mbox.b[4] = MFI_PD_OFFLINE;
3511 		break;
3512 
3513 	case BIOC_SSHOTSPARE:
3514 		mbox.b[4] = MFI_PD_HOTSPARE;
3515 		break;
3516 
3517 	case BIOC_SSREBUILD:
3518 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3519 			if ((rv = mfii_makegood(sc,
3520 			    pl->mpl_address[i].mpa_pd_id)))
3521 				goto done;
3522 
3523 			if ((rv = mfii_makespare(sc,
3524 			    pl->mpl_address[i].mpa_pd_id)))
3525 				goto done;
3526 
3527 			memset(&mbox, 0, sizeof(mbox));
3528 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3529 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3530 			    SCSI_DATA_IN);
3531 			if (rv != 0)
3532 				goto done;
3533 
3534 			/* rebuilding might be started by mfii_makespare() */
3535 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3536 				rv = 0;
3537 				goto done;
3538 			}
3539 
3540 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3541 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3542 		}
3543 		mbox.b[4] = MFI_PD_REBUILD;
3544 		break;
3545 
3546 	default:
3547 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3548 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3549 		goto done;
3550 	}
3551 
3552 
3553 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3554 done:
3555 	free(pd, M_DEVBUF, sizeof *pd);
3556 	free(pl, M_DEVBUF, sizeof *pl);
3557 	return (rv);
3558 }
3559 
3560 int
3561 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3562 {
3563 	uint32_t		opc;
3564 	int			rv = 0;
3565 	struct mfi_pr_properties prop;
3566 	struct mfi_pr_status	status;
3567 	uint32_t		time, exec_freq;
3568 
3569 	switch (bp->bp_opcode) {
3570 	case BIOC_SPSTOP:
3571 	case BIOC_SPSTART:
3572 		if (bp->bp_opcode == BIOC_SPSTART)
3573 			opc = MR_DCMD_PR_START;
3574 		else
3575 			opc = MR_DCMD_PR_STOP;
3576 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3577 			return (EINVAL);
3578 		break;
3579 
3580 	case BIOC_SPMANUAL:
3581 	case BIOC_SPDISABLE:
3582 	case BIOC_SPAUTO:
3583 		/* Get device's time. */
3584 		opc = MR_DCMD_TIME_SECS_GET;
3585 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3586 			return (EINVAL);
3587 
3588 		opc = MR_DCMD_PR_GET_PROPERTIES;
3589 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3590 			return (EINVAL);
3591 
3592 		switch (bp->bp_opcode) {
3593 		case BIOC_SPMANUAL:
3594 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3595 			break;
3596 		case BIOC_SPDISABLE:
3597 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3598 			break;
3599 		case BIOC_SPAUTO:
3600 			if (bp->bp_autoival != 0) {
3601 				if (bp->bp_autoival == -1)
3602 					/* continuously */
3603 					exec_freq = 0xffffffffU;
3604 				else if (bp->bp_autoival > 0)
3605 					exec_freq = bp->bp_autoival;
3606 				else
3607 					return (EINVAL);
3608 				prop.exec_freq = exec_freq;
3609 			}
3610 			if (bp->bp_autonext != 0) {
3611 				if (bp->bp_autonext < 0)
3612 					return (EINVAL);
3613 				else
3614 					prop.next_exec = time + bp->bp_autonext;
3615 			}
3616 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3617 			break;
3618 		}
3619 
3620 		opc = MR_DCMD_PR_SET_PROPERTIES;
3621 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3622 			return (EINVAL);
3623 
3624 		break;
3625 
3626 	case BIOC_GPSTATUS:
3627 		opc = MR_DCMD_PR_GET_PROPERTIES;
3628 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3629 			return (EINVAL);
3630 
3631 		opc = MR_DCMD_PR_GET_STATUS;
3632 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3633 			return (EINVAL);
3634 
3635 		/* Get device's time. */
3636 		opc = MR_DCMD_TIME_SECS_GET;
3637 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3638 			return (EINVAL);
3639 
3640 		switch (prop.op_mode) {
3641 		case MFI_PR_OPMODE_AUTO:
3642 			bp->bp_mode = BIOC_SPMAUTO;
3643 			bp->bp_autoival = prop.exec_freq;
3644 			bp->bp_autonext = prop.next_exec;
3645 			bp->bp_autonow = time;
3646 			break;
3647 		case MFI_PR_OPMODE_MANUAL:
3648 			bp->bp_mode = BIOC_SPMMANUAL;
3649 			break;
3650 		case MFI_PR_OPMODE_DISABLED:
3651 			bp->bp_mode = BIOC_SPMDISABLED;
3652 			break;
3653 		default:
3654 			printf("%s: unknown patrol mode %d\n",
3655 			    DEVNAME(sc), prop.op_mode);
3656 			break;
3657 		}
3658 
3659 		switch (status.state) {
3660 		case MFI_PR_STATE_STOPPED:
3661 			bp->bp_status = BIOC_SPSSTOPPED;
3662 			break;
3663 		case MFI_PR_STATE_READY:
3664 			bp->bp_status = BIOC_SPSREADY;
3665 			break;
3666 		case MFI_PR_STATE_ACTIVE:
3667 			bp->bp_status = BIOC_SPSACTIVE;
3668 			break;
3669 		case MFI_PR_STATE_ABORTED:
3670 			bp->bp_status = BIOC_SPSABORTED;
3671 			break;
3672 		default:
3673 			printf("%s: unknown patrol state %d\n",
3674 			    DEVNAME(sc), status.state);
3675 			break;
3676 		}
3677 
3678 		break;
3679 
3680 	default:
3681 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3682 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3683 		return (EINVAL);
3684 	}
3685 
3686 	return (rv);
3687 }
3688 
3689 int
3690 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3691 {
3692 	struct mfi_conf		*cfg;
3693 	struct mfi_hotspare	*hs;
3694 	struct mfi_pd_details	*pd;
3695 	struct bioc_disk	*sdhs;
3696 	struct bioc_vol		*vdhs;
3697 	struct scsi_inquiry_data *inqbuf;
3698 	char			vend[8+16+4+1], *vendp;
3699 	int			i, rv = EINVAL;
3700 	uint32_t		size;
3701 	union mfi_mbox		mbox;
3702 
3703 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3704 
3705 	if (!bio_hs)
3706 		return (EINVAL);
3707 
3708 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3709 
3710 	/* send single element command to retrieve size for full structure */
3711 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3712 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3713 		goto freeme;
3714 
3715 	size = cfg->mfc_size;
3716 	free(cfg, M_DEVBUF, sizeof *cfg);
3717 
3718 	/* memory for read config */
3719 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3720 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3721 		goto freeme;
3722 
3723 	/* calculate offset to hs structure */
3724 	hs = (struct mfi_hotspare *)(
3725 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3726 	    cfg->mfc_array_size * cfg->mfc_no_array +
3727 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3728 
3729 	if (volid < cfg->mfc_no_ld)
3730 		goto freeme; /* not a hotspare */
3731 
3732 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3733 		goto freeme; /* not a hotspare */
3734 
3735 	/* offset into hotspare structure */
3736 	i = volid - cfg->mfc_no_ld;
3737 
3738 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3739 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3740 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3741 
3742 	/* get pd fields */
3743 	memset(&mbox, 0, sizeof(mbox));
3744 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3745 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3746 	    SCSI_DATA_IN)) {
3747 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3748 		    DEVNAME(sc));
3749 		goto freeme;
3750 	}
3751 
3752 	switch (type) {
3753 	case MFI_MGMT_VD:
3754 		vdhs = bio_hs;
3755 		vdhs->bv_status = BIOC_SVONLINE;
3756 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3757 		vdhs->bv_level = -1; /* hotspare */
3758 		vdhs->bv_nodisk = 1;
3759 		break;
3760 
3761 	case MFI_MGMT_SD:
3762 		sdhs = bio_hs;
3763 		sdhs->bd_status = BIOC_SDHOTSPARE;
3764 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3765 		sdhs->bd_channel = pd->mpd_enc_idx;
3766 		sdhs->bd_target = pd->mpd_enc_slot;
3767 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3768 		vendp = inqbuf->vendor;
3769 		memcpy(vend, vendp, sizeof vend - 1);
3770 		vend[sizeof vend - 1] = '\0';
3771 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3772 		break;
3773 
3774 	default:
3775 		goto freeme;
3776 	}
3777 
3778 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3779 	rv = 0;
3780 freeme:
3781 	free(pd, M_DEVBUF, sizeof *pd);
3782 	free(cfg, M_DEVBUF, 0);
3783 
3784 	return (rv);
3785 }
3786 
3787 #ifndef SMALL_KERNEL
3788 
3789 #define MFI_BBU_SENSORS 4
3790 
3791 void
3792 mfii_bbu(struct mfii_softc *sc)
3793 {
3794 	struct mfi_bbu_status bbu;
3795 	u_int32_t status;
3796 	u_int32_t mask;
3797 	u_int32_t soh_bad;
3798 	int i;
3799 
3800 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3801 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3802 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3803 			sc->sc_bbu[i].value = 0;
3804 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3805 		}
3806 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3807 			sc->sc_bbu_status[i].value = 0;
3808 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3809 		}
3810 		return;
3811 	}
3812 
3813 	switch (bbu.battery_type) {
3814 	case MFI_BBU_TYPE_IBBU:
3815 		mask = MFI_BBU_STATE_BAD_IBBU;
3816 		soh_bad = 0;
3817 		break;
3818 	case MFI_BBU_TYPE_BBU:
3819 		mask = MFI_BBU_STATE_BAD_BBU;
3820 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3821 		break;
3822 
3823 	case MFI_BBU_TYPE_NONE:
3824 	default:
3825 		sc->sc_bbu[0].value = 0;
3826 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3827 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3828 			sc->sc_bbu[i].value = 0;
3829 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3830 		}
3831 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3832 			sc->sc_bbu_status[i].value = 0;
3833 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3834 		}
3835 		return;
3836 	}
3837 
3838 	status = letoh32(bbu.fw_status);
3839 
3840 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3841 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3842 	    SENSOR_S_OK;
3843 
3844 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3845 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3846 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3847 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3848 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3849 
3850 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3851 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3852 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3853 	}
3854 }
3855 
3856 void
3857 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3858 {
3859 	struct ksensor *sensor;
3860 	int target;
3861 
3862 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3863 	sensor = &sc->sc_sensors[target];
3864 
3865 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3866 	case MFI_LD_OFFLINE:
3867 		sensor->value = SENSOR_DRIVE_FAIL;
3868 		sensor->status = SENSOR_S_CRIT;
3869 		break;
3870 
3871 	case MFI_LD_PART_DEGRADED:
3872 	case MFI_LD_DEGRADED:
3873 		sensor->value = SENSOR_DRIVE_PFAIL;
3874 		sensor->status = SENSOR_S_WARN;
3875 		break;
3876 
3877 	case MFI_LD_ONLINE:
3878 		sensor->value = SENSOR_DRIVE_ONLINE;
3879 		sensor->status = SENSOR_S_OK;
3880 		break;
3881 
3882 	default:
3883 		sensor->value = 0; /* unknown */
3884 		sensor->status = SENSOR_S_UNKNOWN;
3885 		break;
3886 	}
3887 }
3888 
3889 void
3890 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3891 {
3892 	struct device		*dev;
3893 	struct scsi_link	*link;
3894 	struct ksensor		*sensor;
3895 	int			target;
3896 
3897 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3898 	sensor = &sc->sc_sensors[target];
3899 
3900 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3901 	if (link == NULL) {
3902 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3903 	} else {
3904 		dev = link->device_softc;
3905 		if (dev != NULL)
3906 			strlcpy(sensor->desc, dev->dv_xname,
3907 			    sizeof(sensor->desc));
3908 	}
3909 	sensor->type = SENSOR_DRIVE;
3910 	mfii_refresh_ld_sensor(sc, ld);
3911 }
3912 
3913 int
3914 mfii_create_sensors(struct mfii_softc *sc)
3915 {
3916 	int			i, target;
3917 
3918 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3919 	    sizeof(sc->sc_sensordev.xname));
3920 
3921 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3922 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3923 		    M_DEVBUF, M_WAITOK | M_ZERO);
3924 
3925 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3926 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3927 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3928 		    sizeof(sc->sc_bbu[0].desc));
3929 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3930 
3931 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3932 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3933 		sc->sc_bbu[2].type = SENSOR_AMPS;
3934 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3935 		sc->sc_bbu[3].type = SENSOR_TEMP;
3936 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3937 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3938 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3939 			    sizeof(sc->sc_bbu[i].desc));
3940 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3941 		}
3942 
3943 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3944 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3945 
3946 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3947 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3948 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3949 			strlcpy(sc->sc_bbu_status[i].desc,
3950 			    mfi_bbu_indicators[i],
3951 			    sizeof(sc->sc_bbu_status[i].desc));
3952 
3953 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3954 		}
3955 	}
3956 
3957 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3958 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3959 	if (sc->sc_sensors == NULL)
3960 		return (1);
3961 
3962 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3963 		mfii_init_ld_sensor(sc, i);
3964 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3965 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3966 	}
3967 
3968 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3969 		goto bad;
3970 
3971 	sensordev_install(&sc->sc_sensordev);
3972 
3973 	return (0);
3974 
3975 bad:
3976 	free(sc->sc_sensors, M_DEVBUF,
3977 	    MFI_MAX_LD * sizeof(struct ksensor));
3978 
3979 	return (1);
3980 }
3981 
3982 void
3983 mfii_refresh_sensors(void *arg)
3984 {
3985 	struct mfii_softc	*sc = arg;
3986 	int			i;
3987 
3988 	rw_enter_write(&sc->sc_lock);
3989 	if (sc->sc_bbu != NULL)
3990 		mfii_bbu(sc);
3991 
3992 	mfii_bio_getitall(sc);
3993 	rw_exit_write(&sc->sc_lock);
3994 
3995 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3996 		mfii_refresh_ld_sensor(sc, i);
3997 }
3998 #endif /* SMALL_KERNEL */
3999 #endif /* NBIO > 0 */
4000