xref: /openbsd-src/sys/dev/pci/mfii.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /* $OpenBSD: mfii.c,v 1.79 2020/07/20 14:41:13 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsi_link	pd_link;
245 	struct scsibus_softc	*pd_scsibus;
246 	struct mfii_pd_dev_handles *pd_dev_handles;
247 	uint8_t			pd_timeout;
248 };
249 
250 struct mfii_iop {
251 	int bar;
252 	int num_sge_loc;
253 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
254 #define MFII_IOP_NUM_SGE_LOC_35		1
255 	u_int16_t ldio_ctx_reg_lock_flags;
256 	u_int8_t ldio_req_type;
257 	u_int8_t ldio_ctx_type_nseg;
258 	u_int8_t sge_flag_chain;
259 	u_int8_t sge_flag_eol;
260 };
261 
262 struct mfii_softc {
263 	struct device		sc_dev;
264 	const struct mfii_iop	*sc_iop;
265 
266 	pci_chipset_tag_t	sc_pc;
267 	pcitag_t		sc_tag;
268 
269 	bus_space_tag_t		sc_iot;
270 	bus_space_handle_t	sc_ioh;
271 	bus_size_t		sc_ios;
272 	bus_dma_tag_t		sc_dmat;
273 
274 	void			*sc_ih;
275 
276 	struct mutex		sc_ccb_mtx;
277 	struct mutex		sc_post_mtx;
278 
279 	u_int			sc_max_fw_cmds;
280 	u_int			sc_max_cmds;
281 	u_int			sc_max_sgl;
282 
283 	u_int			sc_reply_postq_depth;
284 	u_int			sc_reply_postq_index;
285 	struct mutex		sc_reply_postq_mtx;
286 	struct mfii_dmamem	*sc_reply_postq;
287 
288 	struct mfii_dmamem	*sc_requests;
289 	struct mfii_dmamem	*sc_mfi;
290 	struct mfii_dmamem	*sc_sense;
291 	struct mfii_dmamem	*sc_sgl;
292 
293 	struct mfii_ccb		*sc_ccb;
294 	struct mfii_ccb_list	sc_ccb_freeq;
295 
296 	struct mfii_ccb		*sc_aen_ccb;
297 	struct task		sc_aen_task;
298 
299 	struct mutex		sc_abort_mtx;
300 	struct mfii_ccb_list	sc_abort_list;
301 	struct task		sc_abort_task;
302 
303 	struct scsi_link	sc_link;
304 	struct scsibus_softc	*sc_scsibus;
305 	struct mfii_pd_softc	*sc_pd;
306 	struct scsi_iopool	sc_iopool;
307 
308 	/* save some useful information for logical drives that is missing
309 	 * in sc_ld_list
310 	 */
311 	struct {
312 		char		ld_dev[16];	/* device name sd? */
313 	}			sc_ld[MFI_MAX_LD];
314 	int			sc_target_lds[MFI_MAX_LD];
315 
316 	/* scsi ioctl from sd device */
317 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
318 
319 	/* bio */
320 	struct mfi_conf		*sc_cfg;
321 	struct mfi_ctrl_info	sc_info;
322 	struct mfi_ld_list	sc_ld_list;
323 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
324 	int			sc_no_pd; /* used physical disks */
325 	int			sc_ld_sz; /* sizeof sc_ld_details */
326 
327 	/* mgmt lock */
328 	struct rwlock		sc_lock;
329 
330 	/* sensors */
331 	struct ksensordev	sc_sensordev;
332 	struct ksensor		*sc_bbu;
333 	struct ksensor		*sc_bbu_status;
334 	struct ksensor		*sc_sensors;
335 };
336 
337 #ifdef MFII_DEBUG
338 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
339 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
340 #define	MFII_D_CMD		0x0001
341 #define	MFII_D_INTR		0x0002
342 #define	MFII_D_MISC		0x0004
343 #define	MFII_D_DMA		0x0008
344 #define	MFII_D_IOCTL		0x0010
345 #define	MFII_D_RW		0x0020
346 #define	MFII_D_MEM		0x0040
347 #define	MFII_D_CCB		0x0080
348 uint32_t	mfii_debug = 0
349 /*		    | MFII_D_CMD */
350 /*		    | MFII_D_INTR */
351 		    | MFII_D_MISC
352 /*		    | MFII_D_DMA */
353 /*		    | MFII_D_IOCTL */
354 /*		    | MFII_D_RW */
355 /*		    | MFII_D_MEM */
356 /*		    | MFII_D_CCB */
357 		;
358 #else
359 #define DPRINTF(x...)
360 #define DNPRINTF(n,x...)
361 #endif
362 
363 int		mfii_match(struct device *, void *, void *);
364 void		mfii_attach(struct device *, struct device *, void *);
365 int		mfii_detach(struct device *, int);
366 int		mfii_activate(struct device *, int);
367 
368 struct cfattach mfii_ca = {
369 	sizeof(struct mfii_softc),
370 	mfii_match,
371 	mfii_attach,
372 	mfii_detach,
373 	mfii_activate,
374 };
375 
376 struct cfdriver mfii_cd = {
377 	NULL,
378 	"mfii",
379 	DV_DULL
380 };
381 
382 void		mfii_scsi_cmd(struct scsi_xfer *);
383 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
384 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
385 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
386 
387 struct scsi_adapter mfii_switch = {
388 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
389 };
390 
391 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
392 int		mfii_pd_scsi_probe(struct scsi_link *);
393 
394 struct scsi_adapter mfii_pd_switch = {
395 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
396 };
397 
398 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
399 
400 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
401 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
402 
403 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
404 void			mfii_dmamem_free(struct mfii_softc *,
405 			    struct mfii_dmamem *);
406 
407 void *			mfii_get_ccb(void *);
408 void			mfii_put_ccb(void *, void *);
409 int			mfii_init_ccb(struct mfii_softc *);
410 void			mfii_scrub_ccb(struct mfii_ccb *);
411 
412 int			mfii_transition_firmware(struct mfii_softc *);
413 int			mfii_initialise_firmware(struct mfii_softc *);
414 int			mfii_get_info(struct mfii_softc *);
415 int			mfii_syspd(struct mfii_softc *);
416 
417 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
418 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
419 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
420 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
421 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
422 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
423 int			mfii_my_intr(struct mfii_softc *);
424 int			mfii_intr(void *);
425 void			mfii_postq(struct mfii_softc *);
426 
427 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
428 			    void *, int);
429 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
430 			    void *, int);
431 
432 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
433 
434 int			mfii_mgmt(struct mfii_softc *, uint32_t,
435 			    const union mfi_mbox *, void *, size_t, int);
436 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
437 			    uint32_t, const union mfi_mbox *, void *, size_t,
438 			    int);
439 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
440 
441 int			mfii_scsi_cmd_io(struct mfii_softc *,
442 			    struct scsi_xfer *);
443 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
444 			    struct scsi_xfer *);
445 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
446 			    struct scsi_xfer *);
447 void			mfii_scsi_cmd_tmo(void *);
448 
449 int			mfii_dev_handles_update(struct mfii_softc *sc);
450 void			mfii_dev_handles_smr(void *pd_arg);
451 
452 void			mfii_abort_task(void *);
453 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
454 			    uint16_t, uint16_t, uint8_t, uint32_t);
455 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
456 			    struct mfii_ccb *);
457 
458 int			mfii_aen_register(struct mfii_softc *);
459 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
460 			    struct mfii_dmamem *, uint32_t);
461 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
462 void			mfii_aen(void *);
463 void			mfii_aen_unregister(struct mfii_softc *);
464 
465 void			mfii_aen_pd_insert(struct mfii_softc *,
466 			    const struct mfi_evtarg_pd_address *);
467 void			mfii_aen_pd_remove(struct mfii_softc *,
468 			    const struct mfi_evtarg_pd_address *);
469 void			mfii_aen_pd_state_change(struct mfii_softc *,
470 			    const struct mfi_evtarg_pd_state *);
471 void			mfii_aen_ld_update(struct mfii_softc *);
472 
473 #if NBIO > 0
474 int		mfii_ioctl(struct device *, u_long, caddr_t);
475 int		mfii_bio_getitall(struct mfii_softc *);
476 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
477 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
478 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
479 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
480 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
481 int		mfii_ioctl_setstate(struct mfii_softc *,
482 		    struct bioc_setstate *);
483 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
484 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
485 
486 #ifndef SMALL_KERNEL
487 static const char *mfi_bbu_indicators[] = {
488 	"pack missing",
489 	"voltage low",
490 	"temp high",
491 	"charge active",
492 	"discharge active",
493 	"learn cycle req'd",
494 	"learn cycle active",
495 	"learn cycle failed",
496 	"learn cycle timeout",
497 	"I2C errors",
498 	"replace pack",
499 	"low capacity",
500 	"periodic learn req'd"
501 };
502 
503 void		mfii_init_ld_sensor(struct mfii_softc *, int);
504 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
505 int		mfii_create_sensors(struct mfii_softc *);
506 void		mfii_refresh_sensors(void *);
507 void		mfii_bbu(struct mfii_softc *);
508 #endif /* SMALL_KERNEL */
509 #endif /* NBIO > 0 */
510 
511 /*
512  * mfii boards support asynchronous (and non-polled) completion of
513  * dcmds by proxying them through a passthru mpii command that points
514  * at a dcmd frame. since the passthru command is submitted like
515  * the scsi commands using an SMID in the request descriptor,
516  * ccb_request memory * must contain the passthru command because
517  * that is what the SMID refers to. this means ccb_request cannot
518  * contain the dcmd. rather than allocating separate dma memory to
519  * hold the dcmd, we reuse the sense memory buffer for it.
520  */
521 
522 void			mfii_dcmd_start(struct mfii_softc *,
523 			    struct mfii_ccb *);
524 
525 static inline void
526 mfii_dcmd_scrub(struct mfii_ccb *ccb)
527 {
528 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
529 }
530 
531 static inline struct mfi_dcmd_frame *
532 mfii_dcmd_frame(struct mfii_ccb *ccb)
533 {
534 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
535 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
536 }
537 
538 static inline void
539 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
540 {
541 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
542 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
543 }
544 
545 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
546 
547 const struct mfii_iop mfii_iop_thunderbolt = {
548 	MFII_BAR,
549 	MFII_IOP_NUM_SGE_LOC_ORIG,
550 	0,
551 	MFII_REQ_TYPE_LDIO,
552 	0,
553 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
554 	0
555 };
556 
557 /*
558  * a lot of these values depend on us not implementing fastpath yet.
559  */
560 const struct mfii_iop mfii_iop_25 = {
561 	MFII_BAR,
562 	MFII_IOP_NUM_SGE_LOC_ORIG,
563 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
564 	MFII_REQ_TYPE_NO_LOCK,
565 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
566 	MFII_SGE_CHAIN_ELEMENT,
567 	MFII_SGE_END_OF_LIST
568 };
569 
570 const struct mfii_iop mfii_iop_35 = {
571 	MFII_BAR_35,
572 	MFII_IOP_NUM_SGE_LOC_35,
573 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
574 	MFII_REQ_TYPE_NO_LOCK,
575 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
576 	MFII_SGE_CHAIN_ELEMENT,
577 	MFII_SGE_END_OF_LIST
578 };
579 
580 struct mfii_device {
581 	pcireg_t		mpd_vendor;
582 	pcireg_t		mpd_product;
583 	const struct mfii_iop	*mpd_iop;
584 };
585 
586 const struct mfii_device mfii_devices[] = {
587 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
588 	    &mfii_iop_thunderbolt },
589 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
590 	    &mfii_iop_25 },
591 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
592 	    &mfii_iop_25 },
593 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
594 	    &mfii_iop_35 },
595 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
596 	    &mfii_iop_35 },
597 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
598 	    &mfii_iop_35 },
599 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
600 	    &mfii_iop_35 },
601 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
602 	    &mfii_iop_35 },
603 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
604 	    &mfii_iop_35 }
605 };
606 
607 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
608 
609 const struct mfii_iop *
610 mfii_find_iop(struct pci_attach_args *pa)
611 {
612 	const struct mfii_device *mpd;
613 	int i;
614 
615 	for (i = 0; i < nitems(mfii_devices); i++) {
616 		mpd = &mfii_devices[i];
617 
618 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
619 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
620 			return (mpd->mpd_iop);
621 	}
622 
623 	return (NULL);
624 }
625 
626 int
627 mfii_match(struct device *parent, void *match, void *aux)
628 {
629 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
630 }
631 
632 void
633 mfii_attach(struct device *parent, struct device *self, void *aux)
634 {
635 	struct mfii_softc *sc = (struct mfii_softc *)self;
636 	struct pci_attach_args *pa = aux;
637 	pcireg_t memtype;
638 	pci_intr_handle_t ih;
639 	struct scsibus_attach_args saa;
640 	u_int32_t status, scpad2, scpad3;
641 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
642 
643 	/* init sc */
644 	sc->sc_iop = mfii_find_iop(aux);
645 	sc->sc_dmat = pa->pa_dmat;
646 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
647 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
648 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
649 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
650 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
651 
652 	rw_init(&sc->sc_lock, "mfii_lock");
653 
654 	sc->sc_aen_ccb = NULL;
655 	task_set(&sc->sc_aen_task, mfii_aen, sc);
656 
657 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
658 	SIMPLEQ_INIT(&sc->sc_abort_list);
659 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
660 
661 	/* wire up the bus shizz */
662 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
663 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
664 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
665 		printf(": unable to map registers\n");
666 		return;
667 	}
668 
669 	/* disable interrupts */
670 	mfii_write(sc, MFI_OMSK, 0xffffffff);
671 
672 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
673 		printf(": unable to map interrupt\n");
674 		goto pci_unmap;
675 	}
676 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
677 
678 	/* lets get started */
679 	if (mfii_transition_firmware(sc))
680 		goto pci_unmap;
681 
682 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
683 	scpad3 = mfii_read(sc, MFII_OSP3);
684 	status = mfii_fw_state(sc);
685 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
686 	if (sc->sc_max_fw_cmds == 0)
687 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
688 	/*
689 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
690 	 * exceed FW supplied max_fw_cmds.
691 	 */
692 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
693 
694 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
695 	scpad2 = mfii_read(sc, MFII_OSP2);
696 	chain_frame_sz =
697 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
698 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
699 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
700 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
701 
702 	nsge_in_io = (MFII_REQUEST_SIZE -
703 		sizeof(struct mpii_msg_scsi_io) -
704 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
705 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
706 
707 	/* round down to nearest power of two */
708 	sc->sc_max_sgl = 1;
709 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
710 		sc->sc_max_sgl <<= 1;
711 
712 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
713 	    DEVNAME(sc), status, scpad2, scpad3);
714 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
715 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
716 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
717 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
718 	    sc->sc_max_sgl);
719 
720 	/* sense memory */
721 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
722 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
723 	if (sc->sc_sense == NULL) {
724 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
725 		goto pci_unmap;
726 	}
727 
728 	/* reply post queue */
729 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
730 
731 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
732 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
733 	if (sc->sc_reply_postq == NULL)
734 		goto free_sense;
735 
736 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
737 	    MFII_DMA_LEN(sc->sc_reply_postq));
738 
739 	/* MPII request frame array */
740 	sc->sc_requests = mfii_dmamem_alloc(sc,
741 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
742 	if (sc->sc_requests == NULL)
743 		goto free_reply_postq;
744 
745 	/* MFI command frame array */
746 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
747 	if (sc->sc_mfi == NULL)
748 		goto free_requests;
749 
750 	/* MPII SGL array */
751 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
752 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
753 	if (sc->sc_sgl == NULL)
754 		goto free_mfi;
755 
756 	if (mfii_init_ccb(sc) != 0) {
757 		printf("%s: could not init ccb list\n", DEVNAME(sc));
758 		goto free_sgl;
759 	}
760 
761 	/* kickstart firmware with all addresses and pointers */
762 	if (mfii_initialise_firmware(sc) != 0) {
763 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
764 		goto free_sgl;
765 	}
766 
767 	if (mfii_get_info(sc) != 0) {
768 		printf("%s: could not retrieve controller information\n",
769 		    DEVNAME(sc));
770 		goto free_sgl;
771 	}
772 
773 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
774 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
775 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
776 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
777 	printf("\n");
778 
779 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
780 	    mfii_intr, sc, DEVNAME(sc));
781 	if (sc->sc_ih == NULL)
782 		goto free_sgl;
783 
784 	saa.saa_adapter_softc = sc;
785 	saa.saa_adapter = &mfii_switch;
786 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
787 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
788 	saa.saa_luns = 8;
789 	saa.saa_openings = sc->sc_max_cmds;
790 	saa.saa_pool = &sc->sc_iopool;
791 	saa.saa_quirks = saa.saa_flags = 0;
792 	saa.saa_wwpn = saa.saa_wwnn = 0;
793 
794 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
795 	    scsiprint);
796 
797 	mfii_syspd(sc);
798 
799 	if (mfii_aen_register(sc) != 0) {
800 		/* error printed by mfii_aen_register */
801 		goto intr_disestablish;
802 	}
803 
804 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
805 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
806 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
807 		goto intr_disestablish;
808 	}
809 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
810 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
811 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
812 		sc->sc_target_lds[target] = i;
813 	}
814 
815 	/* enable interrupts */
816 	mfii_write(sc, MFI_OSTS, 0xffffffff);
817 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
818 
819 #if NBIO > 0
820 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
821 		panic("%s: controller registration failed", DEVNAME(sc));
822 	else
823 		sc->sc_ioctl = mfii_ioctl;
824 
825 #ifndef SMALL_KERNEL
826 	if (mfii_create_sensors(sc) != 0)
827 		printf("%s: unable to create sensors\n", DEVNAME(sc));
828 #endif
829 #endif /* NBIO > 0 */
830 
831 	return;
832 intr_disestablish:
833 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
834 free_sgl:
835 	mfii_dmamem_free(sc, sc->sc_sgl);
836 free_mfi:
837 	mfii_dmamem_free(sc, sc->sc_mfi);
838 free_requests:
839 	mfii_dmamem_free(sc, sc->sc_requests);
840 free_reply_postq:
841 	mfii_dmamem_free(sc, sc->sc_reply_postq);
842 free_sense:
843 	mfii_dmamem_free(sc, sc->sc_sense);
844 pci_unmap:
845 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
846 }
847 
848 static inline uint16_t
849 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
850 {
851 	struct mfii_pd_dev_handles *handles;
852 	uint16_t handle;
853 
854 	smr_read_enter();
855 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
856 	handle = handles->pd_handles[target];
857 	smr_read_leave();
858 
859 	return (handle);
860 }
861 
862 void
863 mfii_dev_handles_smr(void *pd_arg)
864 {
865 	struct mfii_pd_dev_handles *handles = pd_arg;
866 
867 	free(handles, M_DEVBUF, sizeof(*handles));
868 }
869 
870 int
871 mfii_dev_handles_update(struct mfii_softc *sc)
872 {
873 	struct mfii_ld_map *lm;
874 	struct mfii_pd_dev_handles *handles, *old_handles;
875 	int i;
876 	int rv = 0;
877 
878 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
879 
880 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
881 	    SCSI_DATA_IN|SCSI_NOSLEEP);
882 
883 	if (rv != 0) {
884 		rv = EIO;
885 		goto free_lm;
886 	}
887 
888 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
889 	smr_init(&handles->pd_smr);
890 	for (i = 0; i < MFI_MAX_PD; i++)
891 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
892 
893 	/* commit the updated info */
894 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
895 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
896 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
897 
898 	if (old_handles != NULL)
899 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
900 
901 free_lm:
902 	free(lm, M_TEMP, sizeof(*lm));
903 
904 	return (rv);
905 }
906 
907 int
908 mfii_syspd(struct mfii_softc *sc)
909 {
910 	struct scsibus_attach_args saa;
911 
912 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
913 	if (sc->sc_pd == NULL)
914 		return (1);
915 
916 	if (mfii_dev_handles_update(sc) != 0)
917 		goto free_pdsc;
918 
919 	saa.saa_adapter =  &mfii_pd_switch;
920 	saa.saa_adapter_softc = sc;
921 	saa.saa_adapter_buswidth = MFI_MAX_PD;
922 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
923 	saa.saa_luns = 8;
924 	saa.saa_openings = sc->sc_max_cmds - 1;
925 	saa.saa_pool = &sc->sc_iopool;
926 	saa.saa_quirks = saa.saa_flags = 0;
927 	saa.saa_wwpn = saa.saa_wwnn = 0;
928 
929 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
930 	    config_found(&sc->sc_dev, &saa, scsiprint);
931 
932 	return (0);
933 
934 free_pdsc:
935 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
936 	return (1);
937 }
938 
939 int
940 mfii_detach(struct device *self, int flags)
941 {
942 	struct mfii_softc *sc = (struct mfii_softc *)self;
943 
944 	if (sc->sc_ih == NULL)
945 		return (0);
946 
947 #ifndef SMALL_KERNEL
948 	if (sc->sc_sensors) {
949 		sensordev_deinstall(&sc->sc_sensordev);
950 		free(sc->sc_sensors, M_DEVBUF,
951 		    MFI_MAX_LD * sizeof(struct ksensor));
952 	}
953 
954 	if (sc->sc_bbu) {
955 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
956 	}
957 
958 	if (sc->sc_bbu_status) {
959 		free(sc->sc_bbu_status, M_DEVBUF,
960 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
961 	}
962 #endif /* SMALL_KERNEL */
963 
964 	mfii_aen_unregister(sc);
965 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
966 	mfii_dmamem_free(sc, sc->sc_sgl);
967 	mfii_dmamem_free(sc, sc->sc_mfi);
968 	mfii_dmamem_free(sc, sc->sc_requests);
969 	mfii_dmamem_free(sc, sc->sc_reply_postq);
970 	mfii_dmamem_free(sc, sc->sc_sense);
971 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
972 
973 	return (0);
974 }
975 
976 static void
977 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
978 {
979 	union mfi_mbox mbox = {
980 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
981 	};
982 	int rv;
983 
984 	mfii_scrub_ccb(ccb);
985 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
986 	    NULL, 0, SCSI_NOSLEEP);
987 	if (rv != 0) {
988 		printf("%s: unable to flush cache\n", DEVNAME(sc));
989 		return;
990 	}
991 }
992 
993 static void
994 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
995 {
996 	int rv;
997 
998 	mfii_scrub_ccb(ccb);
999 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1000 	    NULL, 0, SCSI_POLL);
1001 	if (rv != 0) {
1002 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1003 		return;
1004 	}
1005 }
1006 
1007 static void
1008 mfii_powerdown(struct mfii_softc *sc)
1009 {
1010 	struct mfii_ccb *ccb;
1011 
1012 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1013 	if (ccb == NULL) {
1014 		printf("%s: unable to allocate ccb for shutdown\n",
1015 		    DEVNAME(sc));
1016 		return;
1017 	}
1018 
1019 	mfii_flush_cache(sc, ccb);
1020 	mfii_shutdown(sc, ccb);
1021 	scsi_io_put(&sc->sc_iopool, ccb);
1022 }
1023 
1024 int
1025 mfii_activate(struct device *self, int act)
1026 {
1027 	struct mfii_softc *sc = (struct mfii_softc *)self;
1028 	int rv;
1029 
1030 	switch (act) {
1031 	case DVACT_POWERDOWN:
1032 		rv = config_activate_children(&sc->sc_dev, act);
1033 		mfii_powerdown(sc);
1034 		break;
1035 	default:
1036 		rv = config_activate_children(&sc->sc_dev, act);
1037 		break;
1038 	}
1039 
1040 	return (rv);
1041 }
1042 
1043 u_int32_t
1044 mfii_read(struct mfii_softc *sc, bus_size_t r)
1045 {
1046 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1047 	    BUS_SPACE_BARRIER_READ);
1048 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1049 }
1050 
1051 void
1052 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1053 {
1054 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1055 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1056 	    BUS_SPACE_BARRIER_WRITE);
1057 }
1058 
1059 struct mfii_dmamem *
1060 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1061 {
1062 	struct mfii_dmamem *m;
1063 	int nsegs;
1064 
1065 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1066 	if (m == NULL)
1067 		return (NULL);
1068 
1069 	m->mdm_size = size;
1070 
1071 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1072 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1073 		goto mdmfree;
1074 
1075 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1076 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1077 		goto destroy;
1078 
1079 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1080 	    BUS_DMA_NOWAIT) != 0)
1081 		goto free;
1082 
1083 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1084 	    BUS_DMA_NOWAIT) != 0)
1085 		goto unmap;
1086 
1087 	return (m);
1088 
1089 unmap:
1090 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1091 free:
1092 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1093 destroy:
1094 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1095 mdmfree:
1096 	free(m, M_DEVBUF, sizeof *m);
1097 
1098 	return (NULL);
1099 }
1100 
1101 void
1102 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1103 {
1104 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1105 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1106 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1107 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1108 	free(m, M_DEVBUF, sizeof *m);
1109 }
1110 
1111 void
1112 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1113 {
1114 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1115 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1116 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1117 
1118 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1119 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1120 	io->chain_offset = io->sgl_offset0 / 4;
1121 
1122 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1123 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1124 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1125 
1126 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1127 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1128 
1129 	mfii_start(sc, ccb);
1130 }
1131 
1132 int
1133 mfii_aen_register(struct mfii_softc *sc)
1134 {
1135 	struct mfi_evt_log_info mel;
1136 	struct mfii_ccb *ccb;
1137 	struct mfii_dmamem *mdm;
1138 	int rv;
1139 
1140 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1141 	if (ccb == NULL) {
1142 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1143 		return (ENOMEM);
1144 	}
1145 
1146 	memset(&mel, 0, sizeof(mel));
1147 	mfii_scrub_ccb(ccb);
1148 
1149 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1150 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1151 	if (rv != 0) {
1152 		scsi_io_put(&sc->sc_iopool, ccb);
1153 		printf("%s: unable to get event info\n", DEVNAME(sc));
1154 		return (EIO);
1155 	}
1156 
1157 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1158 	if (mdm == NULL) {
1159 		scsi_io_put(&sc->sc_iopool, ccb);
1160 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1161 		return (ENOMEM);
1162 	}
1163 
1164 	/* replay all the events from boot */
1165 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1166 
1167 	return (0);
1168 }
1169 
1170 void
1171 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1172     struct mfii_dmamem *mdm, uint32_t seq)
1173 {
1174 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1175 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1176 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1177 	union mfi_evt_class_locale mec;
1178 
1179 	mfii_scrub_ccb(ccb);
1180 	mfii_dcmd_scrub(ccb);
1181 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1182 
1183 	ccb->ccb_cookie = mdm;
1184 	ccb->ccb_done = mfii_aen_done;
1185 	sc->sc_aen_ccb = ccb;
1186 
1187 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1188 	mec.mec_members.reserved = 0;
1189 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1190 
1191 	hdr->mfh_cmd = MFI_CMD_DCMD;
1192 	hdr->mfh_sg_count = 1;
1193 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1194 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1195 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1196 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1197 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1198 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1199 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1200 
1201 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1202 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1203 
1204 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1205 	mfii_dcmd_start(sc, ccb);
1206 }
1207 
1208 void
1209 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1210 {
1211 	KASSERT(sc->sc_aen_ccb == ccb);
1212 
1213 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1214 	task_add(systq, &sc->sc_aen_task);
1215 }
1216 
1217 void
1218 mfii_aen(void *arg)
1219 {
1220 	struct mfii_softc *sc = arg;
1221 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1222 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1223 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1224 	uint32_t code;
1225 
1226 	mfii_dcmd_sync(sc, ccb,
1227 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1228 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1229 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1230 
1231 	code = lemtoh32(&med->med_code);
1232 
1233 #if 0
1234 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1235 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1236 #endif
1237 
1238 	switch (code) {
1239 	case MFI_EVT_PD_INSERTED_EXT:
1240 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1241 			break;
1242 
1243 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1244 		break;
1245  	case MFI_EVT_PD_REMOVED_EXT:
1246 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1247 			break;
1248 
1249 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1250 		break;
1251 
1252 	case MFI_EVT_PD_STATE_CHANGE:
1253 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1254 			break;
1255 
1256 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1257 		break;
1258 
1259 	case MFI_EVT_LD_CREATED:
1260 	case MFI_EVT_LD_DELETED:
1261 		mfii_aen_ld_update(sc);
1262 		break;
1263 
1264 	default:
1265 		break;
1266 	}
1267 
1268 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1269 }
1270 
1271 void
1272 mfii_aen_pd_insert(struct mfii_softc *sc,
1273     const struct mfi_evtarg_pd_address *pd)
1274 {
1275 #if 0
1276 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1277 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1278 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1279 	    pd->scsi_dev_type);
1280 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1281 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1282 	    lemtoh64(&pd->sas_addr[1]));
1283 #endif
1284 
1285 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1286 		return;
1287 
1288 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1289 }
1290 
1291 void
1292 mfii_aen_pd_remove(struct mfii_softc *sc,
1293     const struct mfi_evtarg_pd_address *pd)
1294 {
1295 #if 0
1296 	printf("%s: pd removed ext\n", DEVNAME(sc));
1297 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1298 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1299 	    pd->scsi_dev_type);
1300 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1301 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1302 	    lemtoh64(&pd->sas_addr[1]));
1303 #endif
1304 	uint16_t target = lemtoh16(&pd->device_id);
1305 
1306 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1307 
1308 	/* the firmware will abort outstanding commands for us */
1309 
1310 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1311 }
1312 
1313 void
1314 mfii_aen_pd_state_change(struct mfii_softc *sc,
1315     const struct mfi_evtarg_pd_state *state)
1316 {
1317 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1318 
1319 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1320 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1321 		/* it's been pulled or configured for raid */
1322 
1323 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1324 		    DVACT_DEACTIVATE);
1325 		/* outstanding commands will simply complete or get aborted */
1326 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1327 		    DETACH_FORCE);
1328 
1329 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1330 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1331 		/* the firmware is handing the disk over */
1332 
1333 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1334 	}
1335 }
1336 
1337 void
1338 mfii_aen_ld_update(struct mfii_softc *sc)
1339 {
1340 	int i, state, target, old, nld;
1341 	int newlds[MFI_MAX_LD];
1342 
1343 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1344 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1345 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1346 		    DEVNAME(sc));
1347 		return;
1348 	}
1349 
1350 	memset(newlds, -1, sizeof(newlds));
1351 
1352 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1353 		state = sc->sc_ld_list.mll_list[i].mll_state;
1354 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1355 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1356 		    DEVNAME(sc), target, state);
1357 		newlds[target] = i;
1358 	}
1359 
1360 	for (i = 0; i < MFI_MAX_LD; i++) {
1361 		old = sc->sc_target_lds[i];
1362 		nld = newlds[i];
1363 
1364 		if (old == -1 && nld != -1) {
1365 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1366 			    DEVNAME(sc), i);
1367 
1368 			scsi_probe_target(sc->sc_scsibus, i);
1369 
1370 #ifndef SMALL_KERNEL
1371 			mfii_init_ld_sensor(sc, nld);
1372 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1373 #endif
1374 		} else if (nld == -1 && old != -1) {
1375 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1376 			    DEVNAME(sc), i);
1377 
1378 			scsi_activate(sc->sc_scsibus, i, -1,
1379 			    DVACT_DEACTIVATE);
1380 			scsi_detach_target(sc->sc_scsibus, i,
1381 			    DETACH_FORCE);
1382 #ifndef SMALL_KERNEL
1383 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1384 #endif
1385 		}
1386 	}
1387 
1388 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1389 }
1390 
1391 void
1392 mfii_aen_unregister(struct mfii_softc *sc)
1393 {
1394 	/* XXX */
1395 }
1396 
1397 int
1398 mfii_transition_firmware(struct mfii_softc *sc)
1399 {
1400 	int32_t			fw_state, cur_state;
1401 	int			max_wait, i;
1402 
1403 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1404 
1405 	while (fw_state != MFI_STATE_READY) {
1406 		cur_state = fw_state;
1407 		switch (fw_state) {
1408 		case MFI_STATE_FAULT:
1409 			printf("%s: firmware fault\n", DEVNAME(sc));
1410 			return (1);
1411 		case MFI_STATE_WAIT_HANDSHAKE:
1412 			mfii_write(sc, MFI_SKINNY_IDB,
1413 			    MFI_INIT_CLEAR_HANDSHAKE);
1414 			max_wait = 2;
1415 			break;
1416 		case MFI_STATE_OPERATIONAL:
1417 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1418 			max_wait = 10;
1419 			break;
1420 		case MFI_STATE_UNDEFINED:
1421 		case MFI_STATE_BB_INIT:
1422 			max_wait = 2;
1423 			break;
1424 		case MFI_STATE_FW_INIT:
1425 		case MFI_STATE_DEVICE_SCAN:
1426 		case MFI_STATE_FLUSH_CACHE:
1427 			max_wait = 20;
1428 			break;
1429 		default:
1430 			printf("%s: unknown firmware state %d\n",
1431 			    DEVNAME(sc), fw_state);
1432 			return (1);
1433 		}
1434 		for (i = 0; i < (max_wait * 10); i++) {
1435 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1436 			if (fw_state == cur_state)
1437 				DELAY(100000);
1438 			else
1439 				break;
1440 		}
1441 		if (fw_state == cur_state) {
1442 			printf("%s: firmware stuck in state %#x\n",
1443 			    DEVNAME(sc), fw_state);
1444 			return (1);
1445 		}
1446 	}
1447 
1448 	return (0);
1449 }
1450 
1451 int
1452 mfii_get_info(struct mfii_softc *sc)
1453 {
1454 	int i, rv;
1455 
1456 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1457 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1458 
1459 	if (rv != 0)
1460 		return (rv);
1461 
1462 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1463 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1464 		    DEVNAME(sc),
1465 		    sc->sc_info.mci_image_component[i].mic_name,
1466 		    sc->sc_info.mci_image_component[i].mic_version,
1467 		    sc->sc_info.mci_image_component[i].mic_build_date,
1468 		    sc->sc_info.mci_image_component[i].mic_build_time);
1469 	}
1470 
1471 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1472 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1473 		    DEVNAME(sc),
1474 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1475 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1476 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1477 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1478 	}
1479 
1480 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1481 	    DEVNAME(sc),
1482 	    sc->sc_info.mci_max_arms,
1483 	    sc->sc_info.mci_max_spans,
1484 	    sc->sc_info.mci_max_arrays,
1485 	    sc->sc_info.mci_max_lds,
1486 	    sc->sc_info.mci_product_name);
1487 
1488 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1489 	    DEVNAME(sc),
1490 	    sc->sc_info.mci_serial_number,
1491 	    sc->sc_info.mci_hw_present,
1492 	    sc->sc_info.mci_current_fw_time,
1493 	    sc->sc_info.mci_max_cmds,
1494 	    sc->sc_info.mci_max_sg_elements);
1495 
1496 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1497 	    DEVNAME(sc),
1498 	    sc->sc_info.mci_max_request_size,
1499 	    sc->sc_info.mci_lds_present,
1500 	    sc->sc_info.mci_lds_degraded,
1501 	    sc->sc_info.mci_lds_offline,
1502 	    sc->sc_info.mci_pd_present);
1503 
1504 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1505 	    DEVNAME(sc),
1506 	    sc->sc_info.mci_pd_disks_present,
1507 	    sc->sc_info.mci_pd_disks_pred_failure,
1508 	    sc->sc_info.mci_pd_disks_failed);
1509 
1510 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1511 	    DEVNAME(sc),
1512 	    sc->sc_info.mci_nvram_size,
1513 	    sc->sc_info.mci_memory_size,
1514 	    sc->sc_info.mci_flash_size);
1515 
1516 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1517 	    DEVNAME(sc),
1518 	    sc->sc_info.mci_ram_correctable_errors,
1519 	    sc->sc_info.mci_ram_uncorrectable_errors,
1520 	    sc->sc_info.mci_cluster_allowed,
1521 	    sc->sc_info.mci_cluster_active);
1522 
1523 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1524 	    DEVNAME(sc),
1525 	    sc->sc_info.mci_max_strips_per_io,
1526 	    sc->sc_info.mci_raid_levels,
1527 	    sc->sc_info.mci_adapter_ops,
1528 	    sc->sc_info.mci_ld_ops);
1529 
1530 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1531 	    DEVNAME(sc),
1532 	    sc->sc_info.mci_stripe_sz_ops.min,
1533 	    sc->sc_info.mci_stripe_sz_ops.max,
1534 	    sc->sc_info.mci_pd_ops,
1535 	    sc->sc_info.mci_pd_mix_support);
1536 
1537 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1538 	    DEVNAME(sc),
1539 	    sc->sc_info.mci_ecc_bucket_count,
1540 	    sc->sc_info.mci_package_version);
1541 
1542 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1543 	    DEVNAME(sc),
1544 	    sc->sc_info.mci_properties.mcp_seq_num,
1545 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1546 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1547 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1548 
1549 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1550 	    DEVNAME(sc),
1551 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1552 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1553 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1554 	    sc->sc_info.mci_properties.mcp_cc_rate);
1555 
1556 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1557 	    DEVNAME(sc),
1558 	    sc->sc_info.mci_properties.mcp_recon_rate,
1559 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1560 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1561 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1562 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1563 
1564 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1565 	    DEVNAME(sc),
1566 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1567 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1568 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1569 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1570 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1571 
1572 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1573 	    DEVNAME(sc),
1574 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1575 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1576 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1577 
1578 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1579 	    DEVNAME(sc),
1580 	    sc->sc_info.mci_pci.mip_vendor,
1581 	    sc->sc_info.mci_pci.mip_device,
1582 	    sc->sc_info.mci_pci.mip_subvendor,
1583 	    sc->sc_info.mci_pci.mip_subdevice);
1584 
1585 	DPRINTF("%s: type %#x port_count %d port_addr ",
1586 	    DEVNAME(sc),
1587 	    sc->sc_info.mci_host.mih_type,
1588 	    sc->sc_info.mci_host.mih_port_count);
1589 
1590 	for (i = 0; i < 8; i++)
1591 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1592 	DPRINTF("\n");
1593 
1594 	DPRINTF("%s: type %.x port_count %d port_addr ",
1595 	    DEVNAME(sc),
1596 	    sc->sc_info.mci_device.mid_type,
1597 	    sc->sc_info.mci_device.mid_port_count);
1598 
1599 	for (i = 0; i < 8; i++)
1600 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1601 	DPRINTF("\n");
1602 
1603 	return (0);
1604 }
1605 
1606 int
1607 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1608 {
1609 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1610 	u_int64_t r;
1611 	int to = 0, rv = 0;
1612 
1613 #ifdef DIAGNOSTIC
1614 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1615 		panic("mfii_mfa_poll called with cookie or done set");
1616 #endif
1617 
1618 	hdr->mfh_context = ccb->ccb_smid;
1619 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1620 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1621 
1622 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1623 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1624 
1625 	mfii_start(sc, ccb);
1626 
1627 	for (;;) {
1628 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1629 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1630 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1631 
1632 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1633 			break;
1634 
1635 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1636 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1637 			    ccb->ccb_smid);
1638 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1639 			rv = 1;
1640 			break;
1641 		}
1642 
1643 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1644 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1645 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1646 
1647 		delay(1000);
1648 	}
1649 
1650 	if (ccb->ccb_len > 0) {
1651 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1652 		    0, ccb->ccb_dmamap->dm_mapsize,
1653 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1654 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1655 
1656 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1657 	}
1658 
1659 	return (rv);
1660 }
1661 
1662 int
1663 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1664 {
1665 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1666 	void *cookie;
1667 	int rv = 1;
1668 
1669 	done = ccb->ccb_done;
1670 	cookie = ccb->ccb_cookie;
1671 
1672 	ccb->ccb_done = mfii_poll_done;
1673 	ccb->ccb_cookie = &rv;
1674 
1675 	mfii_start(sc, ccb);
1676 
1677 	do {
1678 		delay(10);
1679 		mfii_postq(sc);
1680 	} while (rv == 1);
1681 
1682 	ccb->ccb_cookie = cookie;
1683 	done(sc, ccb);
1684 
1685 	return (0);
1686 }
1687 
1688 void
1689 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1690 {
1691 	int *rv = ccb->ccb_cookie;
1692 
1693 	*rv = 0;
1694 }
1695 
1696 int
1697 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1698 {
1699 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1700 
1701 #ifdef DIAGNOSTIC
1702 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1703 		panic("mfii_exec called with cookie or done set");
1704 #endif
1705 
1706 	ccb->ccb_cookie = &m;
1707 	ccb->ccb_done = mfii_exec_done;
1708 
1709 	mfii_start(sc, ccb);
1710 
1711 	mtx_enter(&m);
1712 	while (ccb->ccb_cookie != NULL)
1713 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1714 	mtx_leave(&m);
1715 
1716 	return (0);
1717 }
1718 
1719 void
1720 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1721 {
1722 	struct mutex *m = ccb->ccb_cookie;
1723 
1724 	mtx_enter(m);
1725 	ccb->ccb_cookie = NULL;
1726 	wakeup_one(ccb);
1727 	mtx_leave(m);
1728 }
1729 
1730 int
1731 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1732     void *buf, size_t len, int flags)
1733 {
1734 	struct mfii_ccb *ccb;
1735 	int rv;
1736 
1737 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1738 	if (ccb == NULL)
1739 		return (ENOMEM);
1740 
1741 	mfii_scrub_ccb(ccb);
1742 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1743 	scsi_io_put(&sc->sc_iopool, ccb);
1744 
1745 	return (rv);
1746 }
1747 
1748 int
1749 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1750     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1751 {
1752 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1753 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1754 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1755 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1756 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1757 	u_int8_t *dma_buf = NULL;
1758 	int rv = EIO;
1759 
1760 	if (cold)
1761 		flags |= SCSI_NOSLEEP;
1762 
1763 	if (buf != NULL) {
1764 		dma_buf = dma_alloc(len, PR_WAITOK);
1765 		if (dma_buf == NULL)
1766 			return (ENOMEM);
1767 	}
1768 
1769 	ccb->ccb_data = dma_buf;
1770 	ccb->ccb_len = len;
1771 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1772 	case SCSI_DATA_IN:
1773 		ccb->ccb_direction = MFII_DATA_IN;
1774 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1775 		break;
1776 	case SCSI_DATA_OUT:
1777 		ccb->ccb_direction = MFII_DATA_OUT;
1778 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1779 		memcpy(dma_buf, buf, len);
1780 		break;
1781 	case 0:
1782 		ccb->ccb_direction = MFII_DATA_NONE;
1783 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1784 		break;
1785 	}
1786 
1787 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1788 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1789 		rv = ENOMEM;
1790 		goto done;
1791 	}
1792 
1793 	hdr->mfh_cmd = MFI_CMD_DCMD;
1794 	hdr->mfh_context = ccb->ccb_smid;
1795 	hdr->mfh_data_len = htole32(len);
1796 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1797 
1798 	dcmd->mdf_opcode = opc;
1799 	/* handle special opcodes */
1800 	if (mbox != NULL)
1801 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1802 
1803 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1804 
1805 	if (len) {
1806 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1807 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1808 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1809 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1810 		sge->sg_flags =
1811 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1812 	}
1813 
1814 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1815 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1816 
1817 	if (ISSET(flags, SCSI_NOSLEEP)) {
1818 		ccb->ccb_done = mfii_empty_done;
1819 		mfii_poll(sc, ccb);
1820 	} else
1821 		mfii_exec(sc, ccb);
1822 
1823 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1824 		rv = 0;
1825 
1826 		if (ccb->ccb_direction == MFII_DATA_IN)
1827 			memcpy(buf, dma_buf, len);
1828 	}
1829 
1830 done:
1831 	if (buf != NULL)
1832 		dma_free(dma_buf, len);
1833 
1834 	return (rv);
1835 }
1836 
1837 void
1838 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1839 {
1840 	return;
1841 }
1842 
1843 int
1844 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1845     void *sglp, int nosleep)
1846 {
1847 	union mfi_sgl *sgl = sglp;
1848 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1849 	int error;
1850 	int i;
1851 
1852 	if (ccb->ccb_len == 0)
1853 		return (0);
1854 
1855 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1856 	    ccb->ccb_data, ccb->ccb_len, NULL,
1857 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1858 	if (error) {
1859 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1860 		return (1);
1861 	}
1862 
1863 	for (i = 0; i < dmap->dm_nsegs; i++) {
1864 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1865 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1866 	}
1867 
1868 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1869 	    ccb->ccb_direction == MFII_DATA_OUT ?
1870 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1871 
1872 	return (0);
1873 }
1874 
1875 void
1876 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1877 {
1878 	u_long *r = (u_long *)&ccb->ccb_req;
1879 
1880 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1881 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1882 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1883 
1884 #if defined(__LP64__)
1885 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1886 #else
1887 	mtx_enter(&sc->sc_post_mtx);
1888 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1889 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1890 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1891 
1892 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1893 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1894 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1895 	mtx_leave(&sc->sc_post_mtx);
1896 #endif
1897 }
1898 
1899 void
1900 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1901 {
1902 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1903 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1904 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1905 
1906 	if (ccb->ccb_sgl_len > 0) {
1907 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1908 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1909 		    BUS_DMASYNC_POSTWRITE);
1910 	}
1911 
1912 	if (ccb->ccb_len > 0) {
1913 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1914 		    0, ccb->ccb_dmamap->dm_mapsize,
1915 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1916 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1917 
1918 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1919 	}
1920 
1921 	ccb->ccb_done(sc, ccb);
1922 }
1923 
1924 int
1925 mfii_initialise_firmware(struct mfii_softc *sc)
1926 {
1927 	struct mpii_msg_iocinit_request *iiq;
1928 	struct mfii_dmamem *m;
1929 	struct mfii_ccb *ccb;
1930 	struct mfi_init_frame *init;
1931 	int rv;
1932 
1933 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1934 	if (m == NULL)
1935 		return (1);
1936 
1937 	iiq = MFII_DMA_KVA(m);
1938 	memset(iiq, 0, sizeof(*iiq));
1939 
1940 	iiq->function = MPII_FUNCTION_IOC_INIT;
1941 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1942 
1943 	iiq->msg_version_maj = 0x02;
1944 	iiq->msg_version_min = 0x00;
1945 	iiq->hdr_version_unit = 0x10;
1946 	iiq->hdr_version_dev = 0x0;
1947 
1948 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1949 
1950 	iiq->reply_descriptor_post_queue_depth =
1951 	    htole16(sc->sc_reply_postq_depth);
1952 	iiq->reply_free_queue_depth = htole16(0);
1953 
1954 	htolem32(&iiq->sense_buffer_address_high,
1955 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1956 
1957 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1958 	    MFII_DMA_DVA(sc->sc_reply_postq));
1959 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1960 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1961 
1962 	htolem32(&iiq->system_request_frame_base_address_lo,
1963 	    MFII_DMA_DVA(sc->sc_requests));
1964 	htolem32(&iiq->system_request_frame_base_address_hi,
1965 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1966 
1967 	iiq->timestamp = htole64(getuptime());
1968 
1969 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1970 	if (ccb == NULL) {
1971 		/* shouldn't ever run out of ccbs during attach */
1972 		return (1);
1973 	}
1974 	mfii_scrub_ccb(ccb);
1975 	init = ccb->ccb_request;
1976 
1977 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1978 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1979 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1980 
1981 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1982 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1983 	    BUS_DMASYNC_PREREAD);
1984 
1985 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1986 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1987 
1988 	rv = mfii_mfa_poll(sc, ccb);
1989 
1990 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1991 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1992 
1993 	scsi_io_put(&sc->sc_iopool, ccb);
1994 	mfii_dmamem_free(sc, m);
1995 
1996 	return (rv);
1997 }
1998 
1999 int
2000 mfii_my_intr(struct mfii_softc *sc)
2001 {
2002 	u_int32_t status;
2003 
2004 	status = mfii_read(sc, MFI_OSTS);
2005 	if (ISSET(status, 0x1)) {
2006 		mfii_write(sc, MFI_OSTS, status);
2007 		return (1);
2008 	}
2009 
2010 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2011 }
2012 
2013 int
2014 mfii_intr(void *arg)
2015 {
2016 	struct mfii_softc *sc = arg;
2017 
2018 	if (!mfii_my_intr(sc))
2019 		return (0);
2020 
2021 	mfii_postq(sc);
2022 
2023 	return (1);
2024 }
2025 
2026 void
2027 mfii_postq(struct mfii_softc *sc)
2028 {
2029 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2030 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2031 	struct mpii_reply_descr *rdp;
2032 	struct mfii_ccb *ccb;
2033 	int rpi = 0;
2034 
2035 	mtx_enter(&sc->sc_reply_postq_mtx);
2036 
2037 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2038 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2039 	    BUS_DMASYNC_POSTREAD);
2040 
2041 	for (;;) {
2042 		rdp = &postq[sc->sc_reply_postq_index];
2043 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2044 		    MPII_REPLY_DESCR_UNUSED)
2045 			break;
2046 		if (rdp->data == 0xffffffff) {
2047 			/*
2048 			 * ioc is still writing to the reply post queue
2049 			 * race condition - bail!
2050 			 */
2051 			break;
2052 		}
2053 
2054 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2055 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2056 		memset(rdp, 0xff, sizeof(*rdp));
2057 
2058 		sc->sc_reply_postq_index++;
2059 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2060 		rpi = 1;
2061 	}
2062 
2063 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2064 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2065 	    BUS_DMASYNC_PREREAD);
2066 
2067 	if (rpi)
2068 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2069 
2070 	mtx_leave(&sc->sc_reply_postq_mtx);
2071 
2072 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2073 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2074 		mfii_done(sc, ccb);
2075 	}
2076 }
2077 
2078 void
2079 mfii_scsi_cmd(struct scsi_xfer *xs)
2080 {
2081 	struct scsi_link *link = xs->sc_link;
2082 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2083 	struct mfii_ccb *ccb = xs->io;
2084 
2085 	mfii_scrub_ccb(ccb);
2086 	ccb->ccb_cookie = xs;
2087 	ccb->ccb_done = mfii_scsi_cmd_done;
2088 	ccb->ccb_data = xs->data;
2089 	ccb->ccb_len = xs->datalen;
2090 
2091 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2092 
2093 	switch (xs->cmd->opcode) {
2094 	case READ_COMMAND:
2095 	case READ_BIG:
2096 	case READ_12:
2097 	case READ_16:
2098 	case WRITE_COMMAND:
2099 	case WRITE_BIG:
2100 	case WRITE_12:
2101 	case WRITE_16:
2102 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2103 			goto stuffup;
2104 
2105 		break;
2106 
2107 	default:
2108 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2109 			goto stuffup;
2110 		break;
2111 	}
2112 
2113 	xs->error = XS_NOERROR;
2114 	xs->resid = 0;
2115 
2116 	if (ISSET(xs->flags, SCSI_POLL)) {
2117 		if (mfii_poll(sc, ccb) != 0)
2118 			goto stuffup;
2119 		return;
2120 	}
2121 
2122 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2123 	timeout_add_msec(&xs->stimeout, xs->timeout);
2124 	mfii_start(sc, ccb);
2125 
2126 	return;
2127 
2128 stuffup:
2129 	xs->error = XS_DRIVER_STUFFUP;
2130 	scsi_done(xs);
2131 }
2132 
2133 void
2134 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2135 {
2136 	struct scsi_xfer *xs = ccb->ccb_cookie;
2137 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2138 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2139 	u_int refs = 1;
2140 
2141 	if (timeout_del(&xs->stimeout))
2142 		refs = 2;
2143 
2144 	switch (ctx->status) {
2145 	case MFI_STAT_OK:
2146 		break;
2147 
2148 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2149 		xs->error = XS_SENSE;
2150 		memset(&xs->sense, 0, sizeof(xs->sense));
2151 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2152 		break;
2153 
2154 	case MFI_STAT_LD_OFFLINE:
2155 	case MFI_STAT_DEVICE_NOT_FOUND:
2156 		xs->error = XS_SELTIMEOUT;
2157 		break;
2158 
2159 	default:
2160 		xs->error = XS_DRIVER_STUFFUP;
2161 		break;
2162 	}
2163 
2164 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2165 		scsi_done(xs);
2166 }
2167 
2168 int
2169 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2170 {
2171 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2172 
2173 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2174 
2175 	switch (cmd) {
2176 	case DIOCGCACHE:
2177 	case DIOCSCACHE:
2178 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2179 		break;
2180 
2181 	default:
2182 		if (sc->sc_ioctl)
2183 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2184 		break;
2185 	}
2186 
2187 	return (ENOTTY);
2188 }
2189 
2190 int
2191 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2192 {
2193 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2194 	int			 rv, wrenable, rdenable;
2195 	struct mfi_ld_prop	 ldp;
2196 	union mfi_mbox		 mbox;
2197 
2198 	if (mfii_get_info(sc)) {
2199 		rv = EIO;
2200 		goto done;
2201 	}
2202 
2203 	if (sc->sc_target_lds[link->target] == -1) {
2204 		rv = EIO;
2205 		goto done;
2206 	}
2207 
2208 	memset(&mbox, 0, sizeof(mbox));
2209 	mbox.b[0] = link->target;
2210 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2211 	    SCSI_DATA_IN);
2212 	if (rv != 0)
2213 		goto done;
2214 
2215 	if (sc->sc_info.mci_memory_size > 0) {
2216 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2217 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2218 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2219 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2220 	} else {
2221 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2222 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2223 		rdenable = 0;
2224 	}
2225 
2226 	if (cmd == DIOCGCACHE) {
2227 		dc->wrcache = wrenable;
2228 		dc->rdcache = rdenable;
2229 		goto done;
2230 	} /* else DIOCSCACHE */
2231 
2232 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2233 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2234 		goto done;
2235 
2236 	memset(&mbox, 0, sizeof(mbox));
2237 	mbox.b[0] = ldp.mlp_ld.mld_target;
2238 	mbox.b[1] = ldp.mlp_ld.mld_res;
2239 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2240 
2241 	if (sc->sc_info.mci_memory_size > 0) {
2242 		if (dc->rdcache)
2243 			SET(ldp.mlp_cur_cache_policy,
2244 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2245 		else
2246 			CLR(ldp.mlp_cur_cache_policy,
2247 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2248 		if (dc->wrcache)
2249 			SET(ldp.mlp_cur_cache_policy,
2250 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2251 		else
2252 			CLR(ldp.mlp_cur_cache_policy,
2253 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2254 	} else {
2255 		if (dc->rdcache) {
2256 			rv = EOPNOTSUPP;
2257 			goto done;
2258 		}
2259 		if (dc->wrcache)
2260 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2261 		else
2262 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2263 	}
2264 
2265 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2266 	    SCSI_DATA_OUT);
2267 done:
2268 	return (rv);
2269 }
2270 
2271 int
2272 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2273 {
2274 	struct scsi_link *link = xs->sc_link;
2275 	struct mfii_ccb *ccb = xs->io;
2276 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2277 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2278 	int segs;
2279 
2280 	io->dev_handle = htole16(link->target);
2281 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2282 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2283 	io->sgl_flags = htole16(0x02); /* XXX */
2284 	io->sense_buffer_length = sizeof(xs->sense);
2285 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2286 	io->data_length = htole32(xs->datalen);
2287 	io->io_flags = htole16(xs->cmdlen);
2288 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2289 	case SCSI_DATA_IN:
2290 		ccb->ccb_direction = MFII_DATA_IN;
2291 		io->direction = MPII_SCSIIO_DIR_READ;
2292 		break;
2293 	case SCSI_DATA_OUT:
2294 		ccb->ccb_direction = MFII_DATA_OUT;
2295 		io->direction = MPII_SCSIIO_DIR_WRITE;
2296 		break;
2297 	default:
2298 		ccb->ccb_direction = MFII_DATA_NONE;
2299 		io->direction = MPII_SCSIIO_DIR_NONE;
2300 		break;
2301 	}
2302 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2303 
2304 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2305 	ctx->timeout_value = htole16(0x14); /* XXX */
2306 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2307 	ctx->virtual_disk_target_id = htole16(link->target);
2308 
2309 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2310 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2311 		return (1);
2312 
2313 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2314 	switch (sc->sc_iop->num_sge_loc) {
2315 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2316 		ctx->num_sge = segs;
2317 		break;
2318 	case MFII_IOP_NUM_SGE_LOC_35:
2319 		/* 12 bit field, but we're only using the lower 8 */
2320 		ctx->span_arm = segs;
2321 		break;
2322 	}
2323 
2324 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2325 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2326 
2327 	return (0);
2328 }
2329 
2330 int
2331 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2332 {
2333 	struct scsi_link *link = xs->sc_link;
2334 	struct mfii_ccb *ccb = xs->io;
2335 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2336 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2337 
2338 	io->dev_handle = htole16(link->target);
2339 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2340 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2341 	io->sgl_flags = htole16(0x02); /* XXX */
2342 	io->sense_buffer_length = sizeof(xs->sense);
2343 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2344 	io->data_length = htole32(xs->datalen);
2345 	io->io_flags = htole16(xs->cmdlen);
2346 	io->lun[0] = htobe16(link->lun);
2347 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2348 	case SCSI_DATA_IN:
2349 		ccb->ccb_direction = MFII_DATA_IN;
2350 		io->direction = MPII_SCSIIO_DIR_READ;
2351 		break;
2352 	case SCSI_DATA_OUT:
2353 		ccb->ccb_direction = MFII_DATA_OUT;
2354 		io->direction = MPII_SCSIIO_DIR_WRITE;
2355 		break;
2356 	default:
2357 		ccb->ccb_direction = MFII_DATA_NONE;
2358 		io->direction = MPII_SCSIIO_DIR_NONE;
2359 		break;
2360 	}
2361 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2362 
2363 	ctx->virtual_disk_target_id = htole16(link->target);
2364 
2365 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2366 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2367 		return (1);
2368 
2369 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2370 
2371 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2372 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2373 
2374 	return (0);
2375 }
2376 
2377 void
2378 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2379 {
2380 	struct scsi_link *link = xs->sc_link;
2381 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2382 	struct mfii_ccb *ccb = xs->io;
2383 
2384 	mfii_scrub_ccb(ccb);
2385 	ccb->ccb_cookie = xs;
2386 	ccb->ccb_done = mfii_scsi_cmd_done;
2387 	ccb->ccb_data = xs->data;
2388 	ccb->ccb_len = xs->datalen;
2389 
2390 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2391 
2392 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2393 	if (xs->error != XS_NOERROR)
2394 		goto done;
2395 
2396 	xs->resid = 0;
2397 
2398 	if (ISSET(xs->flags, SCSI_POLL)) {
2399 		if (mfii_poll(sc, ccb) != 0)
2400 			goto stuffup;
2401 		return;
2402 	}
2403 
2404 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2405 	timeout_add_msec(&xs->stimeout, xs->timeout);
2406 	mfii_start(sc, ccb);
2407 
2408 	return;
2409 
2410 stuffup:
2411 	xs->error = XS_DRIVER_STUFFUP;
2412 done:
2413 	scsi_done(xs);
2414 }
2415 
2416 int
2417 mfii_pd_scsi_probe(struct scsi_link *link)
2418 {
2419 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2420 	struct mfi_pd_details mpd;
2421 	union mfi_mbox mbox;
2422 	int rv;
2423 
2424 	if (link->lun > 0)
2425 		return (0);
2426 
2427 	memset(&mbox, 0, sizeof(mbox));
2428 	mbox.s[0] = htole16(link->target);
2429 
2430 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2431 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2432 	if (rv != 0)
2433 		return (EIO);
2434 
2435 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2436 		return (ENXIO);
2437 
2438 	return (0);
2439 }
2440 
2441 int
2442 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2443 {
2444 	struct scsi_link *link = xs->sc_link;
2445 	struct mfii_ccb *ccb = xs->io;
2446 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2447 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2448 	uint16_t dev_handle;
2449 
2450 	dev_handle = mfii_dev_handle(sc, link->target);
2451 	if (dev_handle == htole16(0xffff))
2452 		return (XS_SELTIMEOUT);
2453 
2454 	io->dev_handle = dev_handle;
2455 	io->function = 0;
2456 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2457 	io->sgl_flags = htole16(0x02); /* XXX */
2458 	io->sense_buffer_length = sizeof(xs->sense);
2459 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2460 	io->data_length = htole32(xs->datalen);
2461 	io->io_flags = htole16(xs->cmdlen);
2462 	io->lun[0] = htobe16(link->lun);
2463 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2464 	case SCSI_DATA_IN:
2465 		ccb->ccb_direction = MFII_DATA_IN;
2466 		io->direction = MPII_SCSIIO_DIR_READ;
2467 		break;
2468 	case SCSI_DATA_OUT:
2469 		ccb->ccb_direction = MFII_DATA_OUT;
2470 		io->direction = MPII_SCSIIO_DIR_WRITE;
2471 		break;
2472 	default:
2473 		ccb->ccb_direction = MFII_DATA_NONE;
2474 		io->direction = MPII_SCSIIO_DIR_NONE;
2475 		break;
2476 	}
2477 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2478 
2479 	ctx->virtual_disk_target_id = htole16(link->target);
2480 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2481 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2482 
2483 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2484 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2485 		return (XS_DRIVER_STUFFUP);
2486 
2487 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2488 
2489 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2490 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2491 	ccb->ccb_req.dev_handle = dev_handle;
2492 
2493 	return (XS_NOERROR);
2494 }
2495 
2496 int
2497 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2498     int nosleep)
2499 {
2500 	struct mpii_msg_request *req = ccb->ccb_request;
2501 	struct mfii_sge *sge = NULL, *nsge = sglp;
2502 	struct mfii_sge *ce = NULL;
2503 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2504 	u_int space;
2505 	int i;
2506 
2507 	int error;
2508 
2509 	if (ccb->ccb_len == 0)
2510 		return (0);
2511 
2512 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2513 	    ccb->ccb_data, ccb->ccb_len, NULL,
2514 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2515 	if (error) {
2516 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2517 		return (1);
2518 	}
2519 
2520 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2521 	    sizeof(*nsge);
2522 	if (dmap->dm_nsegs > space) {
2523 		space--;
2524 
2525 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2526 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2527 
2528 		ce = nsge + space;
2529 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2530 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2531 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2532 
2533 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2534 	}
2535 
2536 	for (i = 0; i < dmap->dm_nsegs; i++) {
2537 		if (nsge == ce)
2538 			nsge = ccb->ccb_sgl;
2539 
2540 		sge = nsge;
2541 
2542 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2543 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2544 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2545 
2546 		nsge = sge + 1;
2547 	}
2548 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2549 
2550 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2551 	    ccb->ccb_direction == MFII_DATA_OUT ?
2552 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2553 
2554 	if (ccb->ccb_sgl_len > 0) {
2555 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2556 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2557 		    BUS_DMASYNC_PREWRITE);
2558 	}
2559 
2560 	return (0);
2561 }
2562 
2563 void
2564 mfii_scsi_cmd_tmo(void *xsp)
2565 {
2566 	struct scsi_xfer *xs = xsp;
2567 	struct scsi_link *link = xs->sc_link;
2568 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2569 	struct mfii_ccb *ccb = xs->io;
2570 
2571 	mtx_enter(&sc->sc_abort_mtx);
2572 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2573 	mtx_leave(&sc->sc_abort_mtx);
2574 
2575 	task_add(systqmp, &sc->sc_abort_task);
2576 }
2577 
2578 void
2579 mfii_abort_task(void *scp)
2580 {
2581 	struct mfii_softc *sc = scp;
2582 	struct mfii_ccb *list;
2583 
2584 	mtx_enter(&sc->sc_abort_mtx);
2585 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2586 	SIMPLEQ_INIT(&sc->sc_abort_list);
2587 	mtx_leave(&sc->sc_abort_mtx);
2588 
2589 	while (list != NULL) {
2590 		struct mfii_ccb *ccb = list;
2591 		struct scsi_xfer *xs = ccb->ccb_cookie;
2592 		struct scsi_link *link = xs->sc_link;
2593 
2594 		uint16_t dev_handle;
2595 		struct mfii_ccb *accb;
2596 
2597 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2598 
2599 		dev_handle = mfii_dev_handle(sc, link->target);
2600 		if (dev_handle == htole16(0xffff)) {
2601 			/* device is gone */
2602 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2603 				scsi_done(xs);
2604 			continue;
2605 		}
2606 
2607 		accb = scsi_io_get(&sc->sc_iopool, 0);
2608 		mfii_scrub_ccb(accb);
2609 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2610 		    MPII_SCSI_TASK_ABORT_TASK,
2611 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2612 
2613 		accb->ccb_cookie = ccb;
2614 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2615 
2616 		mfii_start(sc, accb);
2617 	}
2618 }
2619 
2620 void
2621 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2622     uint16_t smid, uint8_t type, uint32_t flags)
2623 {
2624 	struct mfii_task_mgmt *msg;
2625 	struct mpii_msg_scsi_task_request *req;
2626 
2627 	msg = accb->ccb_request;
2628 	req = &msg->mpii_request;
2629 	req->dev_handle = dev_handle;
2630 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2631 	req->task_type = type;
2632 	htolem16(&req->task_mid, smid);
2633 	msg->flags = flags;
2634 
2635 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2636 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2637 }
2638 
2639 void
2640 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2641 {
2642 	struct mfii_ccb *ccb = accb->ccb_cookie;
2643 	struct scsi_xfer *xs = ccb->ccb_cookie;
2644 
2645 	/* XXX check accb completion? */
2646 
2647 	scsi_io_put(&sc->sc_iopool, accb);
2648 
2649 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2650 		scsi_done(xs);
2651 }
2652 
2653 void *
2654 mfii_get_ccb(void *cookie)
2655 {
2656 	struct mfii_softc *sc = cookie;
2657 	struct mfii_ccb *ccb;
2658 
2659 	mtx_enter(&sc->sc_ccb_mtx);
2660 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2661 	if (ccb != NULL)
2662 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2663 	mtx_leave(&sc->sc_ccb_mtx);
2664 
2665 	return (ccb);
2666 }
2667 
2668 void
2669 mfii_scrub_ccb(struct mfii_ccb *ccb)
2670 {
2671 	ccb->ccb_cookie = NULL;
2672 	ccb->ccb_done = NULL;
2673 	ccb->ccb_flags = 0;
2674 	ccb->ccb_data = NULL;
2675 	ccb->ccb_direction = 0;
2676 	ccb->ccb_len = 0;
2677 	ccb->ccb_sgl_len = 0;
2678 	ccb->ccb_refcnt = 1;
2679 
2680 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2681 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2682 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2683 }
2684 
2685 void
2686 mfii_put_ccb(void *cookie, void *io)
2687 {
2688 	struct mfii_softc *sc = cookie;
2689 	struct mfii_ccb *ccb = io;
2690 
2691 	mtx_enter(&sc->sc_ccb_mtx);
2692 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2693 	mtx_leave(&sc->sc_ccb_mtx);
2694 }
2695 
2696 int
2697 mfii_init_ccb(struct mfii_softc *sc)
2698 {
2699 	struct mfii_ccb *ccb;
2700 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2701 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2702 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2703 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2704 	u_int i;
2705 	int error;
2706 
2707 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2708 	    M_DEVBUF, M_WAITOK|M_ZERO);
2709 
2710 	for (i = 0; i < sc->sc_max_cmds; i++) {
2711 		ccb = &sc->sc_ccb[i];
2712 
2713 		/* create a dma map for transfer */
2714 		error = bus_dmamap_create(sc->sc_dmat,
2715 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2716 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2717 		if (error) {
2718 			printf("%s: cannot create ccb dmamap (%d)\n",
2719 			    DEVNAME(sc), error);
2720 			goto destroy;
2721 		}
2722 
2723 		/* select i + 1'th request. 0 is reserved for events */
2724 		ccb->ccb_smid = i + 1;
2725 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2726 		ccb->ccb_request = request + ccb->ccb_request_offset;
2727 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2728 		    ccb->ccb_request_offset;
2729 
2730 		/* select i'th MFI command frame */
2731 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2732 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2733 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2734 		    ccb->ccb_mfi_offset;
2735 
2736 		/* select i'th sense */
2737 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2738 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2739 		    ccb->ccb_sense_offset);
2740 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2741 		    ccb->ccb_sense_offset;
2742 
2743 		/* select i'th sgl */
2744 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2745 		    sc->sc_max_sgl * i;
2746 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2747 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2748 		    ccb->ccb_sgl_offset;
2749 
2750 		/* add ccb to queue */
2751 		mfii_put_ccb(sc, ccb);
2752 	}
2753 
2754 	return (0);
2755 
2756 destroy:
2757 	/* free dma maps and ccb memory */
2758 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2759 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2760 
2761 	free(sc->sc_ccb, M_DEVBUF, 0);
2762 
2763 	return (1);
2764 }
2765 
2766 #if NBIO > 0
2767 int
2768 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2769 {
2770 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2771 	int error = 0;
2772 
2773 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2774 
2775 	rw_enter_write(&sc->sc_lock);
2776 
2777 	switch (cmd) {
2778 	case BIOCINQ:
2779 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2780 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2781 		break;
2782 
2783 	case BIOCVOL:
2784 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2785 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2786 		break;
2787 
2788 	case BIOCDISK:
2789 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2790 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2791 		break;
2792 
2793 	case BIOCALARM:
2794 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2795 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2796 		break;
2797 
2798 	case BIOCBLINK:
2799 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2800 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2801 		break;
2802 
2803 	case BIOCSETSTATE:
2804 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2805 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2806 		break;
2807 
2808 	case BIOCPATROL:
2809 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2810 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2811 		break;
2812 
2813 	default:
2814 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2815 		error = ENOTTY;
2816 	}
2817 
2818 	rw_exit_write(&sc->sc_lock);
2819 
2820 	return (error);
2821 }
2822 
2823 int
2824 mfii_bio_getitall(struct mfii_softc *sc)
2825 {
2826 	int			i, d, rv = EINVAL;
2827 	size_t			size;
2828 	union mfi_mbox		mbox;
2829 	struct mfi_conf		*cfg = NULL;
2830 	struct mfi_ld_details	*ld_det = NULL;
2831 
2832 	/* get info */
2833 	if (mfii_get_info(sc)) {
2834 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2835 		    DEVNAME(sc));
2836 		goto done;
2837 	}
2838 
2839 	/* send single element command to retrieve size for full structure */
2840 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2841 	if (cfg == NULL)
2842 		goto done;
2843 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2844 	    SCSI_DATA_IN)) {
2845 		free(cfg, M_DEVBUF, sizeof *cfg);
2846 		goto done;
2847 	}
2848 
2849 	size = cfg->mfc_size;
2850 	free(cfg, M_DEVBUF, sizeof *cfg);
2851 
2852 	/* memory for read config */
2853 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2854 	if (cfg == NULL)
2855 		goto done;
2856 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2857 		free(cfg, M_DEVBUF, size);
2858 		goto done;
2859 	}
2860 
2861 	/* replace current pointer with new one */
2862 	if (sc->sc_cfg)
2863 		free(sc->sc_cfg, M_DEVBUF, 0);
2864 	sc->sc_cfg = cfg;
2865 
2866 	/* get all ld info */
2867 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2868 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2869 		goto done;
2870 
2871 	/* get memory for all ld structures */
2872 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2873 	if (sc->sc_ld_sz != size) {
2874 		if (sc->sc_ld_details)
2875 			free(sc->sc_ld_details, M_DEVBUF, 0);
2876 
2877 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2878 		if (ld_det == NULL)
2879 			goto done;
2880 		sc->sc_ld_sz = size;
2881 		sc->sc_ld_details = ld_det;
2882 	}
2883 
2884 	/* find used physical disks */
2885 	size = sizeof(struct mfi_ld_details);
2886 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2887 		memset(&mbox, 0, sizeof(mbox));
2888 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2889 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2890 		    SCSI_DATA_IN))
2891 			goto done;
2892 
2893 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2894 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2895 	}
2896 	sc->sc_no_pd = d;
2897 
2898 	rv = 0;
2899 done:
2900 	return (rv);
2901 }
2902 
2903 int
2904 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2905 {
2906 	int			rv = EINVAL;
2907 	struct mfi_conf		*cfg = NULL;
2908 
2909 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2910 
2911 	if (mfii_bio_getitall(sc)) {
2912 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2913 		    DEVNAME(sc));
2914 		goto done;
2915 	}
2916 
2917 	/* count unused disks as volumes */
2918 	if (sc->sc_cfg == NULL)
2919 		goto done;
2920 	cfg = sc->sc_cfg;
2921 
2922 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2923 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2924 #if notyet
2925 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2926 	    (bi->bi_nodisk - sc->sc_no_pd);
2927 #endif
2928 	/* tell bio who we are */
2929 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2930 
2931 	rv = 0;
2932 done:
2933 	return (rv);
2934 }
2935 
2936 int
2937 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2938 {
2939 	int			i, per, target, rv = EINVAL;
2940 	struct scsi_link	*link;
2941 	struct device		*dev;
2942 
2943 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2944 	    DEVNAME(sc), bv->bv_volid);
2945 
2946 	/* we really could skip and expect that inq took care of it */
2947 	if (mfii_bio_getitall(sc)) {
2948 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2949 		    DEVNAME(sc));
2950 		goto done;
2951 	}
2952 
2953 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2954 		/* go do hotspares & unused disks */
2955 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2956 		goto done;
2957 	}
2958 
2959 	i = bv->bv_volid;
2960 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2961 	link = scsi_get_link(sc->sc_scsibus, target, 0);
2962 	if (link == NULL) {
2963 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2964 	} else {
2965 		dev = link->device_softc;
2966 		if (dev == NULL)
2967 			goto done;
2968 
2969 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2970 	}
2971 
2972 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2973 	case MFI_LD_OFFLINE:
2974 		bv->bv_status = BIOC_SVOFFLINE;
2975 		break;
2976 
2977 	case MFI_LD_PART_DEGRADED:
2978 	case MFI_LD_DEGRADED:
2979 		bv->bv_status = BIOC_SVDEGRADED;
2980 		break;
2981 
2982 	case MFI_LD_ONLINE:
2983 		bv->bv_status = BIOC_SVONLINE;
2984 		break;
2985 
2986 	default:
2987 		bv->bv_status = BIOC_SVINVALID;
2988 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2989 		    DEVNAME(sc),
2990 		    sc->sc_ld_list.mll_list[i].mll_state);
2991 	}
2992 
2993 	/* additional status can modify MFI status */
2994 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2995 	case MFI_LD_PROG_CC:
2996 		bv->bv_status = BIOC_SVSCRUB;
2997 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2998 		bv->bv_percent = (per * 100) / 0xffff;
2999 		bv->bv_seconds =
3000 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3001 		break;
3002 
3003 	case MFI_LD_PROG_BGI:
3004 		bv->bv_status = BIOC_SVSCRUB;
3005 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3006 		bv->bv_percent = (per * 100) / 0xffff;
3007 		bv->bv_seconds =
3008 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3009 		break;
3010 
3011 	case MFI_LD_PROG_FGI:
3012 	case MFI_LD_PROG_RECONSTRUCT:
3013 		/* nothing yet */
3014 		break;
3015 	}
3016 
3017 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3018 		bv->bv_cache = BIOC_CVWRITEBACK;
3019 	else
3020 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3021 
3022 	/*
3023 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3024 	 * a subset that is valid for the MFI controller.
3025 	 */
3026 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3027 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3028 		bv->bv_level *= 10;
3029 
3030 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3031 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3032 
3033 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3034 
3035 	rv = 0;
3036 done:
3037 	return (rv);
3038 }
3039 
3040 int
3041 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3042 {
3043 	struct mfi_conf		*cfg;
3044 	struct mfi_array	*ar;
3045 	struct mfi_ld_cfg	*ld;
3046 	struct mfi_pd_details	*pd;
3047 	struct mfi_pd_list	*pl;
3048 	struct mfi_pd_progress	*mfp;
3049 	struct mfi_progress	*mp;
3050 	struct scsi_inquiry_data *inqbuf;
3051 	char			vend[8+16+4+1], *vendp;
3052 	int			i, rv = EINVAL;
3053 	int			arr, vol, disk, span;
3054 	union mfi_mbox		mbox;
3055 
3056 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3057 	    DEVNAME(sc), bd->bd_diskid);
3058 
3059 	/* we really could skip and expect that inq took care of it */
3060 	if (mfii_bio_getitall(sc)) {
3061 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3062 		    DEVNAME(sc));
3063 		return (rv);
3064 	}
3065 	cfg = sc->sc_cfg;
3066 
3067 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3068 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3069 
3070 	ar = cfg->mfc_array;
3071 	vol = bd->bd_volid;
3072 	if (vol >= cfg->mfc_no_ld) {
3073 		/* do hotspares */
3074 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3075 		goto freeme;
3076 	}
3077 
3078 	/* calculate offset to ld structure */
3079 	ld = (struct mfi_ld_cfg *)(
3080 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3081 	    cfg->mfc_array_size * cfg->mfc_no_array);
3082 
3083 	/* use span 0 only when raid group is not spanned */
3084 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3085 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3086 	else
3087 		span = 0;
3088 	arr = ld[vol].mlc_span[span].mls_index;
3089 
3090 	/* offset disk into pd list */
3091 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3092 
3093 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3094 		/* disk is missing but succeed command */
3095 		bd->bd_status = BIOC_SDFAILED;
3096 		rv = 0;
3097 
3098 		/* try to find an unused disk for the target to rebuild */
3099 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3100 		    SCSI_DATA_IN))
3101 			goto freeme;
3102 
3103 		for (i = 0; i < pl->mpl_no_pd; i++) {
3104 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3105 				continue;
3106 
3107 			memset(&mbox, 0, sizeof(mbox));
3108 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3109 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3110 			    SCSI_DATA_IN))
3111 				continue;
3112 
3113 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3114 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3115 				break;
3116 		}
3117 
3118 		if (i == pl->mpl_no_pd)
3119 			goto freeme;
3120 	} else {
3121 		memset(&mbox, 0, sizeof(mbox));
3122 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3123 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3124 		    SCSI_DATA_IN)) {
3125 			bd->bd_status = BIOC_SDINVALID;
3126 			goto freeme;
3127 		}
3128 	}
3129 
3130 	/* get the remaining fields */
3131 	bd->bd_channel = pd->mpd_enc_idx;
3132 	bd->bd_target = pd->mpd_enc_slot;
3133 
3134 	/* get status */
3135 	switch (pd->mpd_fw_state){
3136 	case MFI_PD_UNCONFIG_GOOD:
3137 	case MFI_PD_UNCONFIG_BAD:
3138 		bd->bd_status = BIOC_SDUNUSED;
3139 		break;
3140 
3141 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3142 		bd->bd_status = BIOC_SDHOTSPARE;
3143 		break;
3144 
3145 	case MFI_PD_OFFLINE:
3146 		bd->bd_status = BIOC_SDOFFLINE;
3147 		break;
3148 
3149 	case MFI_PD_FAILED:
3150 		bd->bd_status = BIOC_SDFAILED;
3151 		break;
3152 
3153 	case MFI_PD_REBUILD:
3154 		bd->bd_status = BIOC_SDREBUILD;
3155 		break;
3156 
3157 	case MFI_PD_ONLINE:
3158 		bd->bd_status = BIOC_SDONLINE;
3159 		break;
3160 
3161 	case MFI_PD_COPYBACK:
3162 	case MFI_PD_SYSTEM:
3163 	default:
3164 		bd->bd_status = BIOC_SDINVALID;
3165 		break;
3166 	}
3167 
3168 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3169 
3170 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3171 	vendp = inqbuf->vendor;
3172 	memcpy(vend, vendp, sizeof vend - 1);
3173 	vend[sizeof vend - 1] = '\0';
3174 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3175 
3176 	/* XXX find a way to retrieve serial nr from drive */
3177 	/* XXX find a way to get bd_procdev */
3178 
3179 	mfp = &pd->mpd_progress;
3180 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3181 		mp = &mfp->mfp_patrol_read;
3182 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3183 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3184 	}
3185 
3186 	rv = 0;
3187 freeme:
3188 	free(pd, M_DEVBUF, sizeof *pd);
3189 	free(pl, M_DEVBUF, sizeof *pl);
3190 
3191 	return (rv);
3192 }
3193 
3194 int
3195 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3196 {
3197 	uint32_t		opc, flags = 0;
3198 	int			rv = 0;
3199 	int8_t			ret;
3200 
3201 	switch(ba->ba_opcode) {
3202 	case BIOC_SADISABLE:
3203 		opc = MR_DCMD_SPEAKER_DISABLE;
3204 		break;
3205 
3206 	case BIOC_SAENABLE:
3207 		opc = MR_DCMD_SPEAKER_ENABLE;
3208 		break;
3209 
3210 	case BIOC_SASILENCE:
3211 		opc = MR_DCMD_SPEAKER_SILENCE;
3212 		break;
3213 
3214 	case BIOC_GASTATUS:
3215 		opc = MR_DCMD_SPEAKER_GET;
3216 		flags = SCSI_DATA_IN;
3217 		break;
3218 
3219 	case BIOC_SATEST:
3220 		opc = MR_DCMD_SPEAKER_TEST;
3221 		break;
3222 
3223 	default:
3224 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3225 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3226 		return (EINVAL);
3227 	}
3228 
3229 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3230 		rv = EINVAL;
3231 	else
3232 		if (ba->ba_opcode == BIOC_GASTATUS)
3233 			ba->ba_status = ret;
3234 		else
3235 			ba->ba_status = 0;
3236 
3237 	return (rv);
3238 }
3239 
3240 int
3241 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3242 {
3243 	int			i, found, rv = EINVAL;
3244 	union mfi_mbox		mbox;
3245 	uint32_t		cmd;
3246 	struct mfi_pd_list	*pd;
3247 
3248 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3249 	    bb->bb_status);
3250 
3251 	/* channel 0 means not in an enclosure so can't be blinked */
3252 	if (bb->bb_channel == 0)
3253 		return (EINVAL);
3254 
3255 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3256 
3257 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3258 		goto done;
3259 
3260 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3261 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3262 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3263 			found = 1;
3264 			break;
3265 		}
3266 
3267 	if (!found)
3268 		goto done;
3269 
3270 	memset(&mbox, 0, sizeof(mbox));
3271 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3272 
3273 	switch (bb->bb_status) {
3274 	case BIOC_SBUNBLINK:
3275 		cmd = MR_DCMD_PD_UNBLINK;
3276 		break;
3277 
3278 	case BIOC_SBBLINK:
3279 		cmd = MR_DCMD_PD_BLINK;
3280 		break;
3281 
3282 	case BIOC_SBALARM:
3283 	default:
3284 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3285 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3286 		goto done;
3287 	}
3288 
3289 
3290 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3291 		rv = 0;
3292 
3293 done:
3294 	free(pd, M_DEVBUF, sizeof *pd);
3295 	return (rv);
3296 }
3297 
3298 static int
3299 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3300 {
3301 	struct mfii_foreign_scan_info *fsi;
3302 	struct mfi_pd_details	*pd;
3303 	union mfi_mbox		mbox;
3304 	int			rv;
3305 
3306 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3307 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3308 
3309 	memset(&mbox, 0, sizeof mbox);
3310 	mbox.s[0] = pd_id;
3311 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3312 	if (rv != 0)
3313 		goto done;
3314 
3315 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3316 		mbox.s[0] = pd_id;
3317 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3318 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3319 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3320 		if (rv != 0)
3321 			goto done;
3322 	}
3323 
3324 	memset(&mbox, 0, sizeof mbox);
3325 	mbox.s[0] = pd_id;
3326 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3327 	if (rv != 0)
3328 		goto done;
3329 
3330 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3331 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3332 		    SCSI_DATA_IN);
3333 		if (rv != 0)
3334 			goto done;
3335 
3336 		if (fsi->count > 0) {
3337 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3338 			if (rv != 0)
3339 				goto done;
3340 		}
3341 	}
3342 
3343 	memset(&mbox, 0, sizeof mbox);
3344 	mbox.s[0] = pd_id;
3345 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3346 	if (rv != 0)
3347 		goto done;
3348 
3349 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3350 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3351 		rv = ENXIO;
3352 
3353 done:
3354 	free(fsi, M_DEVBUF, sizeof *fsi);
3355 	free(pd, M_DEVBUF, sizeof *pd);
3356 
3357 	return (rv);
3358 }
3359 
3360 static int
3361 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3362 {
3363 	struct mfi_hotspare	*hs;
3364 	struct mfi_pd_details	*pd;
3365 	union mfi_mbox		mbox;
3366 	size_t			size;
3367 	int			rv = EINVAL;
3368 
3369 	/* we really could skip and expect that inq took care of it */
3370 	if (mfii_bio_getitall(sc)) {
3371 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3372 		    DEVNAME(sc));
3373 		return (rv);
3374 	}
3375 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3376 
3377 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3378 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3379 
3380 	memset(&mbox, 0, sizeof mbox);
3381 	mbox.s[0] = pd_id;
3382 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3383 	    SCSI_DATA_IN);
3384 	if (rv != 0)
3385 		goto done;
3386 
3387 	memset(hs, 0, size);
3388 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3389 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3390 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3391 
3392 done:
3393 	free(hs, M_DEVBUF, size);
3394 	free(pd, M_DEVBUF, sizeof *pd);
3395 
3396 	return (rv);
3397 }
3398 
3399 int
3400 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3401 {
3402 	struct mfi_pd_details	*pd;
3403 	struct mfi_pd_list	*pl;
3404 	int			i, found, rv = EINVAL;
3405 	union mfi_mbox		mbox;
3406 
3407 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3408 	    bs->bs_status);
3409 
3410 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3411 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3412 
3413 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3414 		goto done;
3415 
3416 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3417 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3418 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3419 			found = 1;
3420 			break;
3421 		}
3422 
3423 	if (!found)
3424 		goto done;
3425 
3426 	memset(&mbox, 0, sizeof(mbox));
3427 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3428 
3429 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3430 		goto done;
3431 
3432 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3433 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3434 
3435 	switch (bs->bs_status) {
3436 	case BIOC_SSONLINE:
3437 		mbox.b[4] = MFI_PD_ONLINE;
3438 		break;
3439 
3440 	case BIOC_SSOFFLINE:
3441 		mbox.b[4] = MFI_PD_OFFLINE;
3442 		break;
3443 
3444 	case BIOC_SSHOTSPARE:
3445 		mbox.b[4] = MFI_PD_HOTSPARE;
3446 		break;
3447 
3448 	case BIOC_SSREBUILD:
3449 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3450 			if ((rv = mfii_makegood(sc,
3451 			    pl->mpl_address[i].mpa_pd_id)))
3452 				goto done;
3453 
3454 			if ((rv = mfii_makespare(sc,
3455 			    pl->mpl_address[i].mpa_pd_id)))
3456 				goto done;
3457 
3458 			memset(&mbox, 0, sizeof(mbox));
3459 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3460 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3461 			    SCSI_DATA_IN);
3462 			if (rv != 0)
3463 				goto done;
3464 
3465 			/* rebuilding might be started by mfii_makespare() */
3466 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3467 				rv = 0;
3468 				goto done;
3469 			}
3470 
3471 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3472 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3473 		}
3474 		mbox.b[4] = MFI_PD_REBUILD;
3475 		break;
3476 
3477 	default:
3478 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3479 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3480 		goto done;
3481 	}
3482 
3483 
3484 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3485 done:
3486 	free(pd, M_DEVBUF, sizeof *pd);
3487 	free(pl, M_DEVBUF, sizeof *pl);
3488 	return (rv);
3489 }
3490 
3491 int
3492 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3493 {
3494 	uint32_t		opc;
3495 	int			rv = 0;
3496 	struct mfi_pr_properties prop;
3497 	struct mfi_pr_status	status;
3498 	uint32_t		time, exec_freq;
3499 
3500 	switch (bp->bp_opcode) {
3501 	case BIOC_SPSTOP:
3502 	case BIOC_SPSTART:
3503 		if (bp->bp_opcode == BIOC_SPSTART)
3504 			opc = MR_DCMD_PR_START;
3505 		else
3506 			opc = MR_DCMD_PR_STOP;
3507 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3508 			return (EINVAL);
3509 		break;
3510 
3511 	case BIOC_SPMANUAL:
3512 	case BIOC_SPDISABLE:
3513 	case BIOC_SPAUTO:
3514 		/* Get device's time. */
3515 		opc = MR_DCMD_TIME_SECS_GET;
3516 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3517 			return (EINVAL);
3518 
3519 		opc = MR_DCMD_PR_GET_PROPERTIES;
3520 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3521 			return (EINVAL);
3522 
3523 		switch (bp->bp_opcode) {
3524 		case BIOC_SPMANUAL:
3525 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3526 			break;
3527 		case BIOC_SPDISABLE:
3528 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3529 			break;
3530 		case BIOC_SPAUTO:
3531 			if (bp->bp_autoival != 0) {
3532 				if (bp->bp_autoival == -1)
3533 					/* continuously */
3534 					exec_freq = 0xffffffffU;
3535 				else if (bp->bp_autoival > 0)
3536 					exec_freq = bp->bp_autoival;
3537 				else
3538 					return (EINVAL);
3539 				prop.exec_freq = exec_freq;
3540 			}
3541 			if (bp->bp_autonext != 0) {
3542 				if (bp->bp_autonext < 0)
3543 					return (EINVAL);
3544 				else
3545 					prop.next_exec = time + bp->bp_autonext;
3546 			}
3547 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3548 			break;
3549 		}
3550 
3551 		opc = MR_DCMD_PR_SET_PROPERTIES;
3552 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3553 			return (EINVAL);
3554 
3555 		break;
3556 
3557 	case BIOC_GPSTATUS:
3558 		opc = MR_DCMD_PR_GET_PROPERTIES;
3559 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3560 			return (EINVAL);
3561 
3562 		opc = MR_DCMD_PR_GET_STATUS;
3563 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3564 			return (EINVAL);
3565 
3566 		/* Get device's time. */
3567 		opc = MR_DCMD_TIME_SECS_GET;
3568 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3569 			return (EINVAL);
3570 
3571 		switch (prop.op_mode) {
3572 		case MFI_PR_OPMODE_AUTO:
3573 			bp->bp_mode = BIOC_SPMAUTO;
3574 			bp->bp_autoival = prop.exec_freq;
3575 			bp->bp_autonext = prop.next_exec;
3576 			bp->bp_autonow = time;
3577 			break;
3578 		case MFI_PR_OPMODE_MANUAL:
3579 			bp->bp_mode = BIOC_SPMMANUAL;
3580 			break;
3581 		case MFI_PR_OPMODE_DISABLED:
3582 			bp->bp_mode = BIOC_SPMDISABLED;
3583 			break;
3584 		default:
3585 			printf("%s: unknown patrol mode %d\n",
3586 			    DEVNAME(sc), prop.op_mode);
3587 			break;
3588 		}
3589 
3590 		switch (status.state) {
3591 		case MFI_PR_STATE_STOPPED:
3592 			bp->bp_status = BIOC_SPSSTOPPED;
3593 			break;
3594 		case MFI_PR_STATE_READY:
3595 			bp->bp_status = BIOC_SPSREADY;
3596 			break;
3597 		case MFI_PR_STATE_ACTIVE:
3598 			bp->bp_status = BIOC_SPSACTIVE;
3599 			break;
3600 		case MFI_PR_STATE_ABORTED:
3601 			bp->bp_status = BIOC_SPSABORTED;
3602 			break;
3603 		default:
3604 			printf("%s: unknown patrol state %d\n",
3605 			    DEVNAME(sc), status.state);
3606 			break;
3607 		}
3608 
3609 		break;
3610 
3611 	default:
3612 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3613 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3614 		return (EINVAL);
3615 	}
3616 
3617 	return (rv);
3618 }
3619 
3620 int
3621 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3622 {
3623 	struct mfi_conf		*cfg;
3624 	struct mfi_hotspare	*hs;
3625 	struct mfi_pd_details	*pd;
3626 	struct bioc_disk	*sdhs;
3627 	struct bioc_vol		*vdhs;
3628 	struct scsi_inquiry_data *inqbuf;
3629 	char			vend[8+16+4+1], *vendp;
3630 	int			i, rv = EINVAL;
3631 	uint32_t		size;
3632 	union mfi_mbox		mbox;
3633 
3634 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3635 
3636 	if (!bio_hs)
3637 		return (EINVAL);
3638 
3639 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3640 
3641 	/* send single element command to retrieve size for full structure */
3642 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3643 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3644 		goto freeme;
3645 
3646 	size = cfg->mfc_size;
3647 	free(cfg, M_DEVBUF, sizeof *cfg);
3648 
3649 	/* memory for read config */
3650 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3651 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3652 		goto freeme;
3653 
3654 	/* calculate offset to hs structure */
3655 	hs = (struct mfi_hotspare *)(
3656 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3657 	    cfg->mfc_array_size * cfg->mfc_no_array +
3658 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3659 
3660 	if (volid < cfg->mfc_no_ld)
3661 		goto freeme; /* not a hotspare */
3662 
3663 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3664 		goto freeme; /* not a hotspare */
3665 
3666 	/* offset into hotspare structure */
3667 	i = volid - cfg->mfc_no_ld;
3668 
3669 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3670 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3671 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3672 
3673 	/* get pd fields */
3674 	memset(&mbox, 0, sizeof(mbox));
3675 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3676 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3677 	    SCSI_DATA_IN)) {
3678 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3679 		    DEVNAME(sc));
3680 		goto freeme;
3681 	}
3682 
3683 	switch (type) {
3684 	case MFI_MGMT_VD:
3685 		vdhs = bio_hs;
3686 		vdhs->bv_status = BIOC_SVONLINE;
3687 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3688 		vdhs->bv_level = -1; /* hotspare */
3689 		vdhs->bv_nodisk = 1;
3690 		break;
3691 
3692 	case MFI_MGMT_SD:
3693 		sdhs = bio_hs;
3694 		sdhs->bd_status = BIOC_SDHOTSPARE;
3695 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3696 		sdhs->bd_channel = pd->mpd_enc_idx;
3697 		sdhs->bd_target = pd->mpd_enc_slot;
3698 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3699 		vendp = inqbuf->vendor;
3700 		memcpy(vend, vendp, sizeof vend - 1);
3701 		vend[sizeof vend - 1] = '\0';
3702 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3703 		break;
3704 
3705 	default:
3706 		goto freeme;
3707 	}
3708 
3709 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3710 	rv = 0;
3711 freeme:
3712 	free(pd, M_DEVBUF, sizeof *pd);
3713 	free(cfg, M_DEVBUF, 0);
3714 
3715 	return (rv);
3716 }
3717 
3718 #ifndef SMALL_KERNEL
3719 
3720 #define MFI_BBU_SENSORS 4
3721 
3722 void
3723 mfii_bbu(struct mfii_softc *sc)
3724 {
3725 	struct mfi_bbu_status bbu;
3726 	u_int32_t status;
3727 	u_int32_t mask;
3728 	u_int32_t soh_bad;
3729 	int i;
3730 
3731 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3732 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3733 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3734 			sc->sc_bbu[i].value = 0;
3735 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3736 		}
3737 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3738 			sc->sc_bbu_status[i].value = 0;
3739 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3740 		}
3741 		return;
3742 	}
3743 
3744 	switch (bbu.battery_type) {
3745 	case MFI_BBU_TYPE_IBBU:
3746 		mask = MFI_BBU_STATE_BAD_IBBU;
3747 		soh_bad = 0;
3748 		break;
3749 	case MFI_BBU_TYPE_BBU:
3750 		mask = MFI_BBU_STATE_BAD_BBU;
3751 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3752 		break;
3753 
3754 	case MFI_BBU_TYPE_NONE:
3755 	default:
3756 		sc->sc_bbu[0].value = 0;
3757 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3758 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3759 			sc->sc_bbu[i].value = 0;
3760 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3761 		}
3762 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3763 			sc->sc_bbu_status[i].value = 0;
3764 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3765 		}
3766 		return;
3767 	}
3768 
3769 	status = letoh32(bbu.fw_status);
3770 
3771 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3772 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3773 	    SENSOR_S_OK;
3774 
3775 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3776 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3777 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3778 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3779 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3780 
3781 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3782 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3783 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3784 	}
3785 }
3786 
3787 void
3788 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3789 {
3790 	struct ksensor *sensor;
3791 	int target;
3792 
3793 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3794 	sensor = &sc->sc_sensors[target];
3795 
3796 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3797 	case MFI_LD_OFFLINE:
3798 		sensor->value = SENSOR_DRIVE_FAIL;
3799 		sensor->status = SENSOR_S_CRIT;
3800 		break;
3801 
3802 	case MFI_LD_PART_DEGRADED:
3803 	case MFI_LD_DEGRADED:
3804 		sensor->value = SENSOR_DRIVE_PFAIL;
3805 		sensor->status = SENSOR_S_WARN;
3806 		break;
3807 
3808 	case MFI_LD_ONLINE:
3809 		sensor->value = SENSOR_DRIVE_ONLINE;
3810 		sensor->status = SENSOR_S_OK;
3811 		break;
3812 
3813 	default:
3814 		sensor->value = 0; /* unknown */
3815 		sensor->status = SENSOR_S_UNKNOWN;
3816 		break;
3817 	}
3818 }
3819 
3820 void
3821 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3822 {
3823 	struct device		*dev;
3824 	struct scsi_link	*link;
3825 	struct ksensor		*sensor;
3826 	int			target;
3827 
3828 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3829 	sensor = &sc->sc_sensors[target];
3830 
3831 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3832 	if (link == NULL) {
3833 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3834 	} else {
3835 		dev = link->device_softc;
3836 		if (dev != NULL)
3837 			strlcpy(sensor->desc, dev->dv_xname,
3838 			    sizeof(sensor->desc));
3839 	}
3840 	sensor->type = SENSOR_DRIVE;
3841 	mfii_refresh_ld_sensor(sc, ld);
3842 }
3843 
3844 int
3845 mfii_create_sensors(struct mfii_softc *sc)
3846 {
3847 	int			i, target;
3848 
3849 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3850 	    sizeof(sc->sc_sensordev.xname));
3851 
3852 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3853 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3854 		    M_DEVBUF, M_WAITOK | M_ZERO);
3855 
3856 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3857 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3858 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3859 		    sizeof(sc->sc_bbu[0].desc));
3860 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3861 
3862 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3863 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3864 		sc->sc_bbu[2].type = SENSOR_AMPS;
3865 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3866 		sc->sc_bbu[3].type = SENSOR_TEMP;
3867 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3868 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3869 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3870 			    sizeof(sc->sc_bbu[i].desc));
3871 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3872 		}
3873 
3874 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3875 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3876 
3877 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3878 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3879 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3880 			strlcpy(sc->sc_bbu_status[i].desc,
3881 			    mfi_bbu_indicators[i],
3882 			    sizeof(sc->sc_bbu_status[i].desc));
3883 
3884 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3885 		}
3886 	}
3887 
3888 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3889 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3890 	if (sc->sc_sensors == NULL)
3891 		return (1);
3892 
3893 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3894 		mfii_init_ld_sensor(sc, i);
3895 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3896 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3897 	}
3898 
3899 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3900 		goto bad;
3901 
3902 	sensordev_install(&sc->sc_sensordev);
3903 
3904 	return (0);
3905 
3906 bad:
3907 	free(sc->sc_sensors, M_DEVBUF,
3908 	    MFI_MAX_LD * sizeof(struct ksensor));
3909 
3910 	return (1);
3911 }
3912 
3913 void
3914 mfii_refresh_sensors(void *arg)
3915 {
3916 	struct mfii_softc	*sc = arg;
3917 	int			i;
3918 
3919 	rw_enter_write(&sc->sc_lock);
3920 	if (sc->sc_bbu != NULL)
3921 		mfii_bbu(sc);
3922 
3923 	mfii_bio_getitall(sc);
3924 	rw_exit_write(&sc->sc_lock);
3925 
3926 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3927 		mfii_refresh_ld_sensor(sc, i);
3928 }
3929 #endif /* SMALL_KERNEL */
3930 #endif /* NBIO > 0 */
3931