xref: /openbsd-src/sys/dev/pci/mfii.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /* $OpenBSD: mfii.c,v 1.67 2020/02/05 16:29:30 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsi_link	pd_link;
245 	struct scsibus_softc	*pd_scsibus;
246 	struct mfii_pd_dev_handles *pd_dev_handles;
247 	uint8_t			pd_timeout;
248 };
249 
250 struct mfii_iop {
251 	int bar;
252 	int num_sge_loc;
253 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
254 #define MFII_IOP_NUM_SGE_LOC_35		1
255 	u_int16_t ldio_ctx_reg_lock_flags;
256 	u_int8_t ldio_req_type;
257 	u_int8_t ldio_ctx_type_nseg;
258 	u_int8_t sge_flag_chain;
259 	u_int8_t sge_flag_eol;
260 };
261 
262 struct mfii_softc {
263 	struct device		sc_dev;
264 	const struct mfii_iop	*sc_iop;
265 
266 	pci_chipset_tag_t	sc_pc;
267 	pcitag_t		sc_tag;
268 
269 	bus_space_tag_t		sc_iot;
270 	bus_space_handle_t	sc_ioh;
271 	bus_size_t		sc_ios;
272 	bus_dma_tag_t		sc_dmat;
273 
274 	void			*sc_ih;
275 
276 	struct mutex		sc_ccb_mtx;
277 	struct mutex		sc_post_mtx;
278 
279 	u_int			sc_max_fw_cmds;
280 	u_int			sc_max_cmds;
281 	u_int			sc_max_sgl;
282 
283 	u_int			sc_reply_postq_depth;
284 	u_int			sc_reply_postq_index;
285 	struct mutex		sc_reply_postq_mtx;
286 	struct mfii_dmamem	*sc_reply_postq;
287 
288 	struct mfii_dmamem	*sc_requests;
289 	struct mfii_dmamem	*sc_mfi;
290 	struct mfii_dmamem	*sc_sense;
291 	struct mfii_dmamem	*sc_sgl;
292 
293 	struct mfii_ccb		*sc_ccb;
294 	struct mfii_ccb_list	sc_ccb_freeq;
295 
296 	struct mfii_ccb		*sc_aen_ccb;
297 	struct task		sc_aen_task;
298 
299 	struct mutex		sc_abort_mtx;
300 	struct mfii_ccb_list	sc_abort_list;
301 	struct task		sc_abort_task;
302 
303 	struct scsi_link	sc_link;
304 	struct scsibus_softc	*sc_scsibus;
305 	struct mfii_pd_softc	*sc_pd;
306 	struct scsi_iopool	sc_iopool;
307 
308 	/* save some useful information for logical drives that is missing
309 	 * in sc_ld_list
310 	 */
311 	struct {
312 		char		ld_dev[16];	/* device name sd? */
313 	}			sc_ld[MFI_MAX_LD];
314 	int			sc_target_lds[MFI_MAX_LD];
315 
316 	/* scsi ioctl from sd device */
317 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
318 
319 	/* bio */
320 	struct mfi_conf		*sc_cfg;
321 	struct mfi_ctrl_info	sc_info;
322 	struct mfi_ld_list	sc_ld_list;
323 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
324 	int			sc_no_pd; /* used physical disks */
325 	int			sc_ld_sz; /* sizeof sc_ld_details */
326 
327 	/* mgmt lock */
328 	struct rwlock		sc_lock;
329 
330 	/* sensors */
331 	struct ksensordev	sc_sensordev;
332 	struct ksensor		*sc_bbu;
333 	struct ksensor		*sc_bbu_status;
334 	struct ksensor		*sc_sensors;
335 };
336 
337 #ifdef MFII_DEBUG
338 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
339 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
340 #define	MFII_D_CMD		0x0001
341 #define	MFII_D_INTR		0x0002
342 #define	MFII_D_MISC		0x0004
343 #define	MFII_D_DMA		0x0008
344 #define	MFII_D_IOCTL		0x0010
345 #define	MFII_D_RW		0x0020
346 #define	MFII_D_MEM		0x0040
347 #define	MFII_D_CCB		0x0080
348 uint32_t	mfii_debug = 0
349 /*		    | MFII_D_CMD */
350 /*		    | MFII_D_INTR */
351 		    | MFII_D_MISC
352 /*		    | MFII_D_DMA */
353 /*		    | MFII_D_IOCTL */
354 /*		    | MFII_D_RW */
355 /*		    | MFII_D_MEM */
356 /*		    | MFII_D_CCB */
357 		;
358 #else
359 #define DPRINTF(x...)
360 #define DNPRINTF(n,x...)
361 #endif
362 
363 int		mfii_match(struct device *, void *, void *);
364 void		mfii_attach(struct device *, struct device *, void *);
365 int		mfii_detach(struct device *, int);
366 int		mfii_activate(struct device *, int);
367 
368 struct cfattach mfii_ca = {
369 	sizeof(struct mfii_softc),
370 	mfii_match,
371 	mfii_attach,
372 	mfii_detach,
373 	mfii_activate,
374 };
375 
376 struct cfdriver mfii_cd = {
377 	NULL,
378 	"mfii",
379 	DV_DULL
380 };
381 
382 void		mfii_scsi_cmd(struct scsi_xfer *);
383 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
384 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
385 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
386 
387 struct scsi_adapter mfii_switch = {
388 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
389 };
390 
391 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
392 int		mfii_pd_scsi_probe(struct scsi_link *);
393 
394 struct scsi_adapter mfii_pd_switch = {
395 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
396 };
397 
398 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
399 
400 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
401 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
402 
403 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
404 void			mfii_dmamem_free(struct mfii_softc *,
405 			    struct mfii_dmamem *);
406 
407 void *			mfii_get_ccb(void *);
408 void			mfii_put_ccb(void *, void *);
409 int			mfii_init_ccb(struct mfii_softc *);
410 void			mfii_scrub_ccb(struct mfii_ccb *);
411 
412 int			mfii_transition_firmware(struct mfii_softc *);
413 int			mfii_initialise_firmware(struct mfii_softc *);
414 int			mfii_get_info(struct mfii_softc *);
415 int			mfii_syspd(struct mfii_softc *);
416 
417 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
418 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
419 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
420 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
421 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
422 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
423 int			mfii_my_intr(struct mfii_softc *);
424 int			mfii_intr(void *);
425 void			mfii_postq(struct mfii_softc *);
426 
427 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
428 			    void *, int);
429 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
430 			    void *, int);
431 
432 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
433 
434 int			mfii_mgmt(struct mfii_softc *, uint32_t,
435 			    const union mfi_mbox *, void *, size_t, int);
436 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
437 			    uint32_t, const union mfi_mbox *, void *, size_t,
438 			    int);
439 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
440 
441 int			mfii_scsi_cmd_io(struct mfii_softc *,
442 			    struct scsi_xfer *);
443 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
444 			    struct scsi_xfer *);
445 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
446 			    struct scsi_xfer *);
447 void			mfii_scsi_cmd_tmo(void *);
448 
449 int			mfii_dev_handles_update(struct mfii_softc *sc);
450 void			mfii_dev_handles_smr(void *pd_arg);
451 
452 void			mfii_abort_task(void *);
453 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
454 			    uint16_t, uint16_t, uint8_t, uint32_t);
455 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
456 			    struct mfii_ccb *);
457 
458 int			mfii_aen_register(struct mfii_softc *);
459 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
460 			    struct mfii_dmamem *, uint32_t);
461 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
462 void			mfii_aen(void *);
463 void			mfii_aen_unregister(struct mfii_softc *);
464 
465 void			mfii_aen_pd_insert(struct mfii_softc *,
466 			    const struct mfi_evtarg_pd_address *);
467 void			mfii_aen_pd_remove(struct mfii_softc *,
468 			    const struct mfi_evtarg_pd_address *);
469 void			mfii_aen_pd_state_change(struct mfii_softc *,
470 			    const struct mfi_evtarg_pd_state *);
471 void			mfii_aen_ld_update(struct mfii_softc *);
472 
473 #if NBIO > 0
474 int		mfii_ioctl(struct device *, u_long, caddr_t);
475 int		mfii_bio_getitall(struct mfii_softc *);
476 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
477 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
478 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
479 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
480 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
481 int		mfii_ioctl_setstate(struct mfii_softc *,
482 		    struct bioc_setstate *);
483 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
484 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
485 
486 #ifndef SMALL_KERNEL
487 static const char *mfi_bbu_indicators[] = {
488 	"pack missing",
489 	"voltage low",
490 	"temp high",
491 	"charge active",
492 	"discharge active",
493 	"learn cycle req'd",
494 	"learn cycle active",
495 	"learn cycle failed",
496 	"learn cycle timeout",
497 	"I2C errors",
498 	"replace pack",
499 	"low capacity",
500 	"periodic learn req'd"
501 };
502 
503 void		mfii_init_ld_sensor(struct mfii_softc *, int);
504 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
505 int		mfii_create_sensors(struct mfii_softc *);
506 void		mfii_refresh_sensors(void *);
507 void		mfii_bbu(struct mfii_softc *);
508 #endif /* SMALL_KERNEL */
509 #endif /* NBIO > 0 */
510 
511 /*
512  * mfii boards support asynchronous (and non-polled) completion of
513  * dcmds by proxying them through a passthru mpii command that points
514  * at a dcmd frame. since the passthru command is submitted like
515  * the scsi commands using an SMID in the request descriptor,
516  * ccb_request memory * must contain the passthru command because
517  * that is what the SMID refers to. this means ccb_request cannot
518  * contain the dcmd. rather than allocating separate dma memory to
519  * hold the dcmd, we reuse the sense memory buffer for it.
520  */
521 
522 void			mfii_dcmd_start(struct mfii_softc *,
523 			    struct mfii_ccb *);
524 
525 static inline void
526 mfii_dcmd_scrub(struct mfii_ccb *ccb)
527 {
528 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
529 }
530 
531 static inline struct mfi_dcmd_frame *
532 mfii_dcmd_frame(struct mfii_ccb *ccb)
533 {
534 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
535 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
536 }
537 
538 static inline void
539 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
540 {
541 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
542 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
543 }
544 
545 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
546 
547 const struct mfii_iop mfii_iop_thunderbolt = {
548 	MFII_BAR,
549 	MFII_IOP_NUM_SGE_LOC_ORIG,
550 	0,
551 	MFII_REQ_TYPE_LDIO,
552 	0,
553 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
554 	0
555 };
556 
557 /*
558  * a lot of these values depend on us not implementing fastpath yet.
559  */
560 const struct mfii_iop mfii_iop_25 = {
561 	MFII_BAR,
562 	MFII_IOP_NUM_SGE_LOC_ORIG,
563 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
564 	MFII_REQ_TYPE_NO_LOCK,
565 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
566 	MFII_SGE_CHAIN_ELEMENT,
567 	MFII_SGE_END_OF_LIST
568 };
569 
570 const struct mfii_iop mfii_iop_35 = {
571 	MFII_BAR_35,
572 	MFII_IOP_NUM_SGE_LOC_35,
573 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
574 	MFII_REQ_TYPE_NO_LOCK,
575 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
576 	MFII_SGE_CHAIN_ELEMENT,
577 	MFII_SGE_END_OF_LIST
578 };
579 
580 struct mfii_device {
581 	pcireg_t		mpd_vendor;
582 	pcireg_t		mpd_product;
583 	const struct mfii_iop	*mpd_iop;
584 };
585 
586 const struct mfii_device mfii_devices[] = {
587 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
588 	    &mfii_iop_thunderbolt },
589 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
590 	    &mfii_iop_25 },
591 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
592 	    &mfii_iop_25 },
593 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
594 	    &mfii_iop_35 },
595 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
596 	    &mfii_iop_35 },
597 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
598 	    &mfii_iop_35 },
599 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
600 	    &mfii_iop_35 },
601 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
602 	    &mfii_iop_35 },
603 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
604 	    &mfii_iop_35 }
605 };
606 
607 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
608 
609 const struct mfii_iop *
610 mfii_find_iop(struct pci_attach_args *pa)
611 {
612 	const struct mfii_device *mpd;
613 	int i;
614 
615 	for (i = 0; i < nitems(mfii_devices); i++) {
616 		mpd = &mfii_devices[i];
617 
618 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
619 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
620 			return (mpd->mpd_iop);
621 	}
622 
623 	return (NULL);
624 }
625 
626 int
627 mfii_match(struct device *parent, void *match, void *aux)
628 {
629 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
630 }
631 
632 void
633 mfii_attach(struct device *parent, struct device *self, void *aux)
634 {
635 	struct mfii_softc *sc = (struct mfii_softc *)self;
636 	struct pci_attach_args *pa = aux;
637 	pcireg_t memtype;
638 	pci_intr_handle_t ih;
639 	struct scsibus_attach_args saa;
640 	u_int32_t status, scpad2, scpad3;
641 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
642 
643 	/* init sc */
644 	sc->sc_iop = mfii_find_iop(aux);
645 	sc->sc_dmat = pa->pa_dmat;
646 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
647 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
648 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
649 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
650 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
651 
652 	rw_init(&sc->sc_lock, "mfii_lock");
653 
654 	sc->sc_aen_ccb = NULL;
655 	task_set(&sc->sc_aen_task, mfii_aen, sc);
656 
657 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
658 	SIMPLEQ_INIT(&sc->sc_abort_list);
659 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
660 
661 	/* wire up the bus shizz */
662 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
663 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
664 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
665 		printf(": unable to map registers\n");
666 		return;
667 	}
668 
669 	/* disable interrupts */
670 	mfii_write(sc, MFI_OMSK, 0xffffffff);
671 
672 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
673 		printf(": unable to map interrupt\n");
674 		goto pci_unmap;
675 	}
676 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
677 
678 	/* lets get started */
679 	if (mfii_transition_firmware(sc))
680 		goto pci_unmap;
681 
682 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
683 	scpad3 = mfii_read(sc, MFII_OSP3);
684 	status = mfii_fw_state(sc);
685 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
686 	if (sc->sc_max_fw_cmds == 0)
687 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
688 	/*
689 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
690 	 * exceed FW supplied max_fw_cmds.
691 	 */
692 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
693 
694 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
695 	scpad2 = mfii_read(sc, MFII_OSP2);
696 	chain_frame_sz =
697 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
698 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
699 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
700 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
701 
702 	nsge_in_io = (MFII_REQUEST_SIZE -
703 		sizeof(struct mpii_msg_scsi_io) -
704 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
705 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
706 
707 	/* round down to nearest power of two */
708 	sc->sc_max_sgl = 1;
709 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
710 		sc->sc_max_sgl <<= 1;
711 
712 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
713 	    DEVNAME(sc), status, scpad2, scpad3);
714 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
715 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
716 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
717 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
718 	    sc->sc_max_sgl);
719 
720 	/* sense memory */
721 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
722 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
723 	if (sc->sc_sense == NULL) {
724 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
725 		goto pci_unmap;
726 	}
727 
728 	/* reply post queue */
729 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
730 
731 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
732 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
733 	if (sc->sc_reply_postq == NULL)
734 		goto free_sense;
735 
736 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
737 	    MFII_DMA_LEN(sc->sc_reply_postq));
738 
739 	/* MPII request frame array */
740 	sc->sc_requests = mfii_dmamem_alloc(sc,
741 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
742 	if (sc->sc_requests == NULL)
743 		goto free_reply_postq;
744 
745 	/* MFI command frame array */
746 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
747 	if (sc->sc_mfi == NULL)
748 		goto free_requests;
749 
750 	/* MPII SGL array */
751 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
752 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
753 	if (sc->sc_sgl == NULL)
754 		goto free_mfi;
755 
756 	if (mfii_init_ccb(sc) != 0) {
757 		printf("%s: could not init ccb list\n", DEVNAME(sc));
758 		goto free_sgl;
759 	}
760 
761 	/* kickstart firmware with all addresses and pointers */
762 	if (mfii_initialise_firmware(sc) != 0) {
763 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
764 		goto free_sgl;
765 	}
766 
767 	if (mfii_get_info(sc) != 0) {
768 		printf("%s: could not retrieve controller information\n",
769 		    DEVNAME(sc));
770 		goto free_sgl;
771 	}
772 
773 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
774 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
775 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
776 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
777 	printf("\n");
778 
779 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
780 	    mfii_intr, sc, DEVNAME(sc));
781 	if (sc->sc_ih == NULL)
782 		goto free_sgl;
783 
784 	sc->sc_link.openings = sc->sc_max_cmds;
785 	sc->sc_link.adapter_softc = sc;
786 	sc->sc_link.adapter = &mfii_switch;
787 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
788 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
789 	sc->sc_link.pool = &sc->sc_iopool;
790 
791 	memset(&saa, 0, sizeof(saa));
792 	saa.saa_sc_link = &sc->sc_link;
793 
794 	sc->sc_scsibus = (struct scsibus_softc *)
795 	    config_found(&sc->sc_dev, &saa, scsiprint);
796 
797 	mfii_syspd(sc);
798 
799 	if (mfii_aen_register(sc) != 0) {
800 		/* error printed by mfii_aen_register */
801 		goto intr_disestablish;
802 	}
803 
804 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
805 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
806 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
807 		goto intr_disestablish;
808 	}
809 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
810 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
811 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
812 		sc->sc_target_lds[target] = i;
813 	}
814 
815 	/* enable interrupts */
816 	mfii_write(sc, MFI_OSTS, 0xffffffff);
817 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
818 
819 #if NBIO > 0
820 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
821 		panic("%s: controller registration failed", DEVNAME(sc));
822 	else
823 		sc->sc_ioctl = mfii_ioctl;
824 
825 #ifndef SMALL_KERNEL
826 	if (mfii_create_sensors(sc) != 0)
827 		printf("%s: unable to create sensors\n", DEVNAME(sc));
828 #endif
829 #endif /* NBIO > 0 */
830 
831 	return;
832 intr_disestablish:
833 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
834 free_sgl:
835 	mfii_dmamem_free(sc, sc->sc_sgl);
836 free_mfi:
837 	mfii_dmamem_free(sc, sc->sc_mfi);
838 free_requests:
839 	mfii_dmamem_free(sc, sc->sc_requests);
840 free_reply_postq:
841 	mfii_dmamem_free(sc, sc->sc_reply_postq);
842 free_sense:
843 	mfii_dmamem_free(sc, sc->sc_sense);
844 pci_unmap:
845 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
846 }
847 
848 static inline uint16_t
849 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
850 {
851 	struct mfii_pd_dev_handles *handles;
852 	uint16_t handle;
853 
854 	smr_read_enter();
855 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
856 	handle = handles->pd_handles[target];
857 	smr_read_leave();
858 
859 	return (handle);
860 }
861 
862 void
863 mfii_dev_handles_smr(void *pd_arg)
864 {
865 	struct mfii_pd_dev_handles *handles = pd_arg;
866 
867 	free(handles, M_DEVBUF, sizeof(*handles));
868 }
869 
870 int
871 mfii_dev_handles_update(struct mfii_softc *sc)
872 {
873 	struct mfii_ld_map *lm;
874 	struct mfii_pd_dev_handles *handles, *old_handles;
875 	int i;
876 	int rv = 0;
877 
878 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
879 
880 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
881 	    SCSI_DATA_IN|SCSI_NOSLEEP);
882 
883 	if (rv != 0) {
884 		rv = EIO;
885 		goto free_lm;
886 	}
887 
888 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
889 	smr_init(&handles->pd_smr);
890 	for (i = 0; i < MFI_MAX_PD; i++)
891 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
892 
893 	/* commit the updated info */
894 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
895 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
896 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
897 
898 	if (old_handles != NULL)
899 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
900 
901 free_lm:
902 	free(lm, M_TEMP, sizeof(*lm));
903 
904 	return (rv);
905 }
906 
907 int
908 mfii_syspd(struct mfii_softc *sc)
909 {
910 	struct scsibus_attach_args saa;
911 	struct scsi_link *link;
912 
913 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
914 	if (sc->sc_pd == NULL)
915 		return (1);
916 
917 	if (mfii_dev_handles_update(sc) != 0)
918 		goto free_pdsc;
919 
920 	link = &sc->sc_pd->pd_link;
921 	link->adapter = &mfii_pd_switch;
922 	link->adapter_softc = sc;
923 	link->adapter_buswidth = MFI_MAX_PD;
924 	link->adapter_target = -1;
925 	link->openings = sc->sc_max_cmds - 1;
926 	link->pool = &sc->sc_iopool;
927 
928 	memset(&saa, 0, sizeof(saa));
929 	saa.saa_sc_link = link;
930 
931 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
932 	    config_found(&sc->sc_dev, &saa, scsiprint);
933 
934 	return (0);
935 
936 free_pdsc:
937 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
938 	return (1);
939 }
940 
941 int
942 mfii_detach(struct device *self, int flags)
943 {
944 	struct mfii_softc *sc = (struct mfii_softc *)self;
945 
946 	if (sc->sc_ih == NULL)
947 		return (0);
948 
949 #ifndef SMALL_KERNEL
950 	if (sc->sc_sensors) {
951 		sensordev_deinstall(&sc->sc_sensordev);
952 		free(sc->sc_sensors, M_DEVBUF,
953 		    MFI_MAX_LD * sizeof(struct ksensor));
954 	}
955 
956 	if (sc->sc_bbu) {
957 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
958 	}
959 
960 	if (sc->sc_bbu_status) {
961 		free(sc->sc_bbu_status, M_DEVBUF,
962 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
963 	}
964 #endif /* SMALL_KERNEL */
965 
966 	mfii_aen_unregister(sc);
967 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
968 	mfii_dmamem_free(sc, sc->sc_sgl);
969 	mfii_dmamem_free(sc, sc->sc_mfi);
970 	mfii_dmamem_free(sc, sc->sc_requests);
971 	mfii_dmamem_free(sc, sc->sc_reply_postq);
972 	mfii_dmamem_free(sc, sc->sc_sense);
973 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
974 
975 	return (0);
976 }
977 
978 static void
979 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
980 {
981 	union mfi_mbox mbox = {
982 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
983 	};
984 	int rv;
985 
986 	mfii_scrub_ccb(ccb);
987 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
988 	    NULL, 0, SCSI_NOSLEEP);
989 	if (rv != 0) {
990 		printf("%s: unable to flush cache\n", DEVNAME(sc));
991 		return;
992 	}
993 }
994 
995 static void
996 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
997 {
998 	int rv;
999 
1000 	mfii_scrub_ccb(ccb);
1001 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1002 	    NULL, 0, SCSI_POLL);
1003 	if (rv != 0) {
1004 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1005 		return;
1006 	}
1007 }
1008 
1009 static void
1010 mfii_powerdown(struct mfii_softc *sc)
1011 {
1012 	struct mfii_ccb *ccb;
1013 
1014 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1015 	if (ccb == NULL) {
1016 		printf("%s: unable to allocate ccb for shutdown\n",
1017 		    DEVNAME(sc));
1018 		return;
1019 	}
1020 
1021 	mfii_flush_cache(sc, ccb);
1022 	mfii_shutdown(sc, ccb);
1023 	scsi_io_put(&sc->sc_iopool, ccb);
1024 }
1025 
1026 int
1027 mfii_activate(struct device *self, int act)
1028 {
1029 	struct mfii_softc *sc = (struct mfii_softc *)self;
1030 	int rv;
1031 
1032 	switch (act) {
1033 	case DVACT_POWERDOWN:
1034 		rv = config_activate_children(&sc->sc_dev, act);
1035 		mfii_powerdown(sc);
1036 		break;
1037 	default:
1038 		rv = config_activate_children(&sc->sc_dev, act);
1039 		break;
1040 	}
1041 
1042 	return (rv);
1043 }
1044 
1045 u_int32_t
1046 mfii_read(struct mfii_softc *sc, bus_size_t r)
1047 {
1048 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1049 	    BUS_SPACE_BARRIER_READ);
1050 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1051 }
1052 
1053 void
1054 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1055 {
1056 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1057 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1058 	    BUS_SPACE_BARRIER_WRITE);
1059 }
1060 
1061 struct mfii_dmamem *
1062 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1063 {
1064 	struct mfii_dmamem *m;
1065 	int nsegs;
1066 
1067 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1068 	if (m == NULL)
1069 		return (NULL);
1070 
1071 	m->mdm_size = size;
1072 
1073 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1074 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1075 		goto mdmfree;
1076 
1077 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1078 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1079 		goto destroy;
1080 
1081 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1082 	    BUS_DMA_NOWAIT) != 0)
1083 		goto free;
1084 
1085 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1086 	    BUS_DMA_NOWAIT) != 0)
1087 		goto unmap;
1088 
1089 	return (m);
1090 
1091 unmap:
1092 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1093 free:
1094 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1095 destroy:
1096 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1097 mdmfree:
1098 	free(m, M_DEVBUF, sizeof *m);
1099 
1100 	return (NULL);
1101 }
1102 
1103 void
1104 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1105 {
1106 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1107 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1108 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1109 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1110 	free(m, M_DEVBUF, sizeof *m);
1111 }
1112 
1113 void
1114 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1115 {
1116 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1117 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1118 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1119 
1120 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1121 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1122 	io->chain_offset = io->sgl_offset0 / 4;
1123 
1124 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1125 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1126 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1127 
1128 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1129 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1130 
1131 	mfii_start(sc, ccb);
1132 }
1133 
1134 int
1135 mfii_aen_register(struct mfii_softc *sc)
1136 {
1137 	struct mfi_evt_log_info mel;
1138 	struct mfii_ccb *ccb;
1139 	struct mfii_dmamem *mdm;
1140 	int rv;
1141 
1142 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1143 	if (ccb == NULL) {
1144 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1145 		return (ENOMEM);
1146 	}
1147 
1148 	memset(&mel, 0, sizeof(mel));
1149 	mfii_scrub_ccb(ccb);
1150 
1151 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1152 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1153 	if (rv != 0) {
1154 		scsi_io_put(&sc->sc_iopool, ccb);
1155 		printf("%s: unable to get event info\n", DEVNAME(sc));
1156 		return (EIO);
1157 	}
1158 
1159 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1160 	if (mdm == NULL) {
1161 		scsi_io_put(&sc->sc_iopool, ccb);
1162 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1163 		return (ENOMEM);
1164 	}
1165 
1166 	/* replay all the events from boot */
1167 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1168 
1169 	return (0);
1170 }
1171 
1172 void
1173 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1174     struct mfii_dmamem *mdm, uint32_t seq)
1175 {
1176 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1177 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1178 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1179 	union mfi_evt_class_locale mec;
1180 
1181 	mfii_scrub_ccb(ccb);
1182 	mfii_dcmd_scrub(ccb);
1183 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1184 
1185 	ccb->ccb_cookie = mdm;
1186 	ccb->ccb_done = mfii_aen_done;
1187 	sc->sc_aen_ccb = ccb;
1188 
1189 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1190 	mec.mec_members.reserved = 0;
1191 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1192 
1193 	hdr->mfh_cmd = MFI_CMD_DCMD;
1194 	hdr->mfh_sg_count = 1;
1195 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1196 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1197 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1198 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1199 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1200 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1201 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1202 
1203 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1204 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1205 
1206 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1207 	mfii_dcmd_start(sc, ccb);
1208 }
1209 
1210 void
1211 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1212 {
1213 	KASSERT(sc->sc_aen_ccb == ccb);
1214 
1215 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1216 	task_add(systq, &sc->sc_aen_task);
1217 }
1218 
1219 void
1220 mfii_aen(void *arg)
1221 {
1222 	struct mfii_softc *sc = arg;
1223 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1224 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1225 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1226 	uint32_t code;
1227 
1228 	mfii_dcmd_sync(sc, ccb,
1229 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1230 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1231 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1232 
1233 	code = lemtoh32(&med->med_code);
1234 
1235 #if 0
1236 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1237 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1238 #endif
1239 
1240 	switch (code) {
1241 	case MFI_EVT_PD_INSERTED_EXT:
1242 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1243 			break;
1244 
1245 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1246 		break;
1247  	case MFI_EVT_PD_REMOVED_EXT:
1248 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1249 			break;
1250 
1251 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1252 		break;
1253 
1254 	case MFI_EVT_PD_STATE_CHANGE:
1255 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1256 			break;
1257 
1258 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1259 		break;
1260 
1261 	case MFI_EVT_LD_CREATED:
1262 	case MFI_EVT_LD_DELETED:
1263 		mfii_aen_ld_update(sc);
1264 		break;
1265 
1266 	default:
1267 		break;
1268 	}
1269 
1270 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1271 }
1272 
1273 void
1274 mfii_aen_pd_insert(struct mfii_softc *sc,
1275     const struct mfi_evtarg_pd_address *pd)
1276 {
1277 #if 0
1278 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1279 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1280 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1281 	    pd->scsi_dev_type);
1282 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1283 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1284 	    lemtoh64(&pd->sas_addr[1]));
1285 #endif
1286 
1287 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1288 		return;
1289 
1290 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1291 }
1292 
1293 void
1294 mfii_aen_pd_remove(struct mfii_softc *sc,
1295     const struct mfi_evtarg_pd_address *pd)
1296 {
1297 #if 0
1298 	printf("%s: pd removed ext\n", DEVNAME(sc));
1299 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1300 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1301 	    pd->scsi_dev_type);
1302 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1303 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1304 	    lemtoh64(&pd->sas_addr[1]));
1305 #endif
1306 	uint16_t target = lemtoh16(&pd->device_id);
1307 
1308 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1309 
1310 	/* the firmware will abort outstanding commands for us */
1311 
1312 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1313 }
1314 
1315 void
1316 mfii_aen_pd_state_change(struct mfii_softc *sc,
1317     const struct mfi_evtarg_pd_state *state)
1318 {
1319 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1320 
1321 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1322 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1323 		/* it's been pulled or configured for raid */
1324 
1325 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1326 		    DVACT_DEACTIVATE);
1327 		/* outstanding commands will simply complete or get aborted */
1328 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1329 		    DETACH_FORCE);
1330 
1331 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1332 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1333 		/* the firmware is handing the disk over */
1334 
1335 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1336 	}
1337 }
1338 
1339 void
1340 mfii_aen_ld_update(struct mfii_softc *sc)
1341 {
1342 	int i, state, target, old, nld;
1343 	int newlds[MFI_MAX_LD];
1344 
1345 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1346 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1347 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1348 		    DEVNAME(sc));
1349 		return;
1350 	}
1351 
1352 	memset(newlds, -1, sizeof(newlds));
1353 
1354 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1355 		state = sc->sc_ld_list.mll_list[i].mll_state;
1356 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1357 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1358 		    DEVNAME(sc), target, state);
1359 		newlds[target] = i;
1360 	}
1361 
1362 	for (i = 0; i < MFI_MAX_LD; i++) {
1363 		old = sc->sc_target_lds[i];
1364 		nld = newlds[i];
1365 
1366 		if (old == -1 && nld != -1) {
1367 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1368 			    DEVNAME(sc), i);
1369 
1370 			scsi_probe_target(sc->sc_scsibus, i);
1371 
1372 #ifndef SMALL_KERNEL
1373 			mfii_init_ld_sensor(sc, nld);
1374 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1375 #endif
1376 		} else if (nld == -1 && old != -1) {
1377 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1378 			    DEVNAME(sc), i);
1379 
1380 			scsi_activate(sc->sc_scsibus, i, -1,
1381 			    DVACT_DEACTIVATE);
1382 			scsi_detach_target(sc->sc_scsibus, i,
1383 			    DETACH_FORCE);
1384 #ifndef SMALL_KERNEL
1385 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1386 #endif
1387 		}
1388 	}
1389 
1390 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1391 }
1392 
1393 void
1394 mfii_aen_unregister(struct mfii_softc *sc)
1395 {
1396 	/* XXX */
1397 }
1398 
1399 int
1400 mfii_transition_firmware(struct mfii_softc *sc)
1401 {
1402 	int32_t			fw_state, cur_state;
1403 	int			max_wait, i;
1404 
1405 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1406 
1407 	while (fw_state != MFI_STATE_READY) {
1408 		cur_state = fw_state;
1409 		switch (fw_state) {
1410 		case MFI_STATE_FAULT:
1411 			printf("%s: firmware fault\n", DEVNAME(sc));
1412 			return (1);
1413 		case MFI_STATE_WAIT_HANDSHAKE:
1414 			mfii_write(sc, MFI_SKINNY_IDB,
1415 			    MFI_INIT_CLEAR_HANDSHAKE);
1416 			max_wait = 2;
1417 			break;
1418 		case MFI_STATE_OPERATIONAL:
1419 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1420 			max_wait = 10;
1421 			break;
1422 		case MFI_STATE_UNDEFINED:
1423 		case MFI_STATE_BB_INIT:
1424 			max_wait = 2;
1425 			break;
1426 		case MFI_STATE_FW_INIT:
1427 		case MFI_STATE_DEVICE_SCAN:
1428 		case MFI_STATE_FLUSH_CACHE:
1429 			max_wait = 20;
1430 			break;
1431 		default:
1432 			printf("%s: unknown firmware state %d\n",
1433 			    DEVNAME(sc), fw_state);
1434 			return (1);
1435 		}
1436 		for (i = 0; i < (max_wait * 10); i++) {
1437 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1438 			if (fw_state == cur_state)
1439 				DELAY(100000);
1440 			else
1441 				break;
1442 		}
1443 		if (fw_state == cur_state) {
1444 			printf("%s: firmware stuck in state %#x\n",
1445 			    DEVNAME(sc), fw_state);
1446 			return (1);
1447 		}
1448 	}
1449 
1450 	return (0);
1451 }
1452 
1453 int
1454 mfii_get_info(struct mfii_softc *sc)
1455 {
1456 	int i, rv;
1457 
1458 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1459 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1460 
1461 	if (rv != 0)
1462 		return (rv);
1463 
1464 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1465 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1466 		    DEVNAME(sc),
1467 		    sc->sc_info.mci_image_component[i].mic_name,
1468 		    sc->sc_info.mci_image_component[i].mic_version,
1469 		    sc->sc_info.mci_image_component[i].mic_build_date,
1470 		    sc->sc_info.mci_image_component[i].mic_build_time);
1471 	}
1472 
1473 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1474 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1475 		    DEVNAME(sc),
1476 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1477 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1478 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1479 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1480 	}
1481 
1482 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1483 	    DEVNAME(sc),
1484 	    sc->sc_info.mci_max_arms,
1485 	    sc->sc_info.mci_max_spans,
1486 	    sc->sc_info.mci_max_arrays,
1487 	    sc->sc_info.mci_max_lds,
1488 	    sc->sc_info.mci_product_name);
1489 
1490 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1491 	    DEVNAME(sc),
1492 	    sc->sc_info.mci_serial_number,
1493 	    sc->sc_info.mci_hw_present,
1494 	    sc->sc_info.mci_current_fw_time,
1495 	    sc->sc_info.mci_max_cmds,
1496 	    sc->sc_info.mci_max_sg_elements);
1497 
1498 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1499 	    DEVNAME(sc),
1500 	    sc->sc_info.mci_max_request_size,
1501 	    sc->sc_info.mci_lds_present,
1502 	    sc->sc_info.mci_lds_degraded,
1503 	    sc->sc_info.mci_lds_offline,
1504 	    sc->sc_info.mci_pd_present);
1505 
1506 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1507 	    DEVNAME(sc),
1508 	    sc->sc_info.mci_pd_disks_present,
1509 	    sc->sc_info.mci_pd_disks_pred_failure,
1510 	    sc->sc_info.mci_pd_disks_failed);
1511 
1512 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1513 	    DEVNAME(sc),
1514 	    sc->sc_info.mci_nvram_size,
1515 	    sc->sc_info.mci_memory_size,
1516 	    sc->sc_info.mci_flash_size);
1517 
1518 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1519 	    DEVNAME(sc),
1520 	    sc->sc_info.mci_ram_correctable_errors,
1521 	    sc->sc_info.mci_ram_uncorrectable_errors,
1522 	    sc->sc_info.mci_cluster_allowed,
1523 	    sc->sc_info.mci_cluster_active);
1524 
1525 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1526 	    DEVNAME(sc),
1527 	    sc->sc_info.mci_max_strips_per_io,
1528 	    sc->sc_info.mci_raid_levels,
1529 	    sc->sc_info.mci_adapter_ops,
1530 	    sc->sc_info.mci_ld_ops);
1531 
1532 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1533 	    DEVNAME(sc),
1534 	    sc->sc_info.mci_stripe_sz_ops.min,
1535 	    sc->sc_info.mci_stripe_sz_ops.max,
1536 	    sc->sc_info.mci_pd_ops,
1537 	    sc->sc_info.mci_pd_mix_support);
1538 
1539 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1540 	    DEVNAME(sc),
1541 	    sc->sc_info.mci_ecc_bucket_count,
1542 	    sc->sc_info.mci_package_version);
1543 
1544 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1545 	    DEVNAME(sc),
1546 	    sc->sc_info.mci_properties.mcp_seq_num,
1547 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1548 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1549 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1550 
1551 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1552 	    DEVNAME(sc),
1553 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1554 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1555 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1556 	    sc->sc_info.mci_properties.mcp_cc_rate);
1557 
1558 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1559 	    DEVNAME(sc),
1560 	    sc->sc_info.mci_properties.mcp_recon_rate,
1561 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1562 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1563 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1564 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1565 
1566 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1567 	    DEVNAME(sc),
1568 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1569 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1570 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1571 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1572 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1573 
1574 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1575 	    DEVNAME(sc),
1576 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1577 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1578 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1579 
1580 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1581 	    DEVNAME(sc),
1582 	    sc->sc_info.mci_pci.mip_vendor,
1583 	    sc->sc_info.mci_pci.mip_device,
1584 	    sc->sc_info.mci_pci.mip_subvendor,
1585 	    sc->sc_info.mci_pci.mip_subdevice);
1586 
1587 	DPRINTF("%s: type %#x port_count %d port_addr ",
1588 	    DEVNAME(sc),
1589 	    sc->sc_info.mci_host.mih_type,
1590 	    sc->sc_info.mci_host.mih_port_count);
1591 
1592 	for (i = 0; i < 8; i++)
1593 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1594 	DPRINTF("\n");
1595 
1596 	DPRINTF("%s: type %.x port_count %d port_addr ",
1597 	    DEVNAME(sc),
1598 	    sc->sc_info.mci_device.mid_type,
1599 	    sc->sc_info.mci_device.mid_port_count);
1600 
1601 	for (i = 0; i < 8; i++)
1602 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1603 	DPRINTF("\n");
1604 
1605 	return (0);
1606 }
1607 
1608 int
1609 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1610 {
1611 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1612 	u_int64_t r;
1613 	int to = 0, rv = 0;
1614 
1615 #ifdef DIAGNOSTIC
1616 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1617 		panic("mfii_mfa_poll called with cookie or done set");
1618 #endif
1619 
1620 	hdr->mfh_context = ccb->ccb_smid;
1621 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1622 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1623 
1624 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1625 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1626 
1627 	mfii_start(sc, ccb);
1628 
1629 	for (;;) {
1630 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1631 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1632 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1633 
1634 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1635 			break;
1636 
1637 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1638 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1639 			    ccb->ccb_smid);
1640 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1641 			rv = 1;
1642 			break;
1643 		}
1644 
1645 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1646 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1647 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1648 
1649 		delay(1000);
1650 	}
1651 
1652 	if (ccb->ccb_len > 0) {
1653 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1654 		    0, ccb->ccb_dmamap->dm_mapsize,
1655 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1656 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1657 
1658 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1659 	}
1660 
1661 	return (rv);
1662 }
1663 
1664 int
1665 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1666 {
1667 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1668 	void *cookie;
1669 	int rv = 1;
1670 
1671 	done = ccb->ccb_done;
1672 	cookie = ccb->ccb_cookie;
1673 
1674 	ccb->ccb_done = mfii_poll_done;
1675 	ccb->ccb_cookie = &rv;
1676 
1677 	mfii_start(sc, ccb);
1678 
1679 	do {
1680 		delay(10);
1681 		mfii_postq(sc);
1682 	} while (rv == 1);
1683 
1684 	ccb->ccb_cookie = cookie;
1685 	done(sc, ccb);
1686 
1687 	return (0);
1688 }
1689 
1690 void
1691 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1692 {
1693 	int *rv = ccb->ccb_cookie;
1694 
1695 	*rv = 0;
1696 }
1697 
1698 int
1699 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1700 {
1701 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1702 
1703 #ifdef DIAGNOSTIC
1704 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1705 		panic("mfii_exec called with cookie or done set");
1706 #endif
1707 
1708 	ccb->ccb_cookie = &m;
1709 	ccb->ccb_done = mfii_exec_done;
1710 
1711 	mfii_start(sc, ccb);
1712 
1713 	mtx_enter(&m);
1714 	while (ccb->ccb_cookie != NULL)
1715 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1716 	mtx_leave(&m);
1717 
1718 	return (0);
1719 }
1720 
1721 void
1722 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1723 {
1724 	struct mutex *m = ccb->ccb_cookie;
1725 
1726 	mtx_enter(m);
1727 	ccb->ccb_cookie = NULL;
1728 	wakeup_one(ccb);
1729 	mtx_leave(m);
1730 }
1731 
1732 int
1733 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1734     void *buf, size_t len, int flags)
1735 {
1736 	struct mfii_ccb *ccb;
1737 	int rv;
1738 
1739 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1740 	if (ccb == NULL)
1741 		return (ENOMEM);
1742 
1743 	mfii_scrub_ccb(ccb);
1744 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1745 	scsi_io_put(&sc->sc_iopool, ccb);
1746 
1747 	return (rv);
1748 }
1749 
1750 int
1751 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1752     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1753 {
1754 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1755 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1756 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1757 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1758 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1759 	u_int8_t *dma_buf = NULL;
1760 	int rv = EIO;
1761 
1762 	if (cold)
1763 		flags |= SCSI_NOSLEEP;
1764 
1765 	if (buf != NULL) {
1766 		dma_buf = dma_alloc(len, PR_WAITOK);
1767 		if (dma_buf == NULL)
1768 			return (ENOMEM);
1769 	}
1770 
1771 	ccb->ccb_data = dma_buf;
1772 	ccb->ccb_len = len;
1773 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1774 	case SCSI_DATA_IN:
1775 		ccb->ccb_direction = MFII_DATA_IN;
1776 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1777 		break;
1778 	case SCSI_DATA_OUT:
1779 		ccb->ccb_direction = MFII_DATA_OUT;
1780 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1781 		memcpy(dma_buf, buf, len);
1782 		break;
1783 	case 0:
1784 		ccb->ccb_direction = MFII_DATA_NONE;
1785 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1786 		break;
1787 	}
1788 
1789 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1790 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1791 		rv = ENOMEM;
1792 		goto done;
1793 	}
1794 
1795 	hdr->mfh_cmd = MFI_CMD_DCMD;
1796 	hdr->mfh_context = ccb->ccb_smid;
1797 	hdr->mfh_data_len = htole32(len);
1798 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1799 
1800 	dcmd->mdf_opcode = opc;
1801 	/* handle special opcodes */
1802 	if (mbox != NULL)
1803 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1804 
1805 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1806 
1807 	if (len) {
1808 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1809 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1810 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1811 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1812 		sge->sg_flags =
1813 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1814 	}
1815 
1816 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1817 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1818 
1819 	if (ISSET(flags, SCSI_NOSLEEP)) {
1820 		ccb->ccb_done = mfii_empty_done;
1821 		mfii_poll(sc, ccb);
1822 	} else
1823 		mfii_exec(sc, ccb);
1824 
1825 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1826 		rv = 0;
1827 
1828 		if (ccb->ccb_direction == MFII_DATA_IN)
1829 			memcpy(buf, dma_buf, len);
1830 	}
1831 
1832 done:
1833 	if (buf != NULL)
1834 		dma_free(dma_buf, len);
1835 
1836 	return (rv);
1837 }
1838 
1839 void
1840 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1841 {
1842 	return;
1843 }
1844 
1845 int
1846 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1847     void *sglp, int nosleep)
1848 {
1849 	union mfi_sgl *sgl = sglp;
1850 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1851 	int error;
1852 	int i;
1853 
1854 	if (ccb->ccb_len == 0)
1855 		return (0);
1856 
1857 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1858 	    ccb->ccb_data, ccb->ccb_len, NULL,
1859 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1860 	if (error) {
1861 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1862 		return (1);
1863 	}
1864 
1865 	for (i = 0; i < dmap->dm_nsegs; i++) {
1866 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1867 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1868 	}
1869 
1870 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1871 	    ccb->ccb_direction == MFII_DATA_OUT ?
1872 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1873 
1874 	return (0);
1875 }
1876 
1877 void
1878 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1879 {
1880 	u_long *r = (u_long *)&ccb->ccb_req;
1881 
1882 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1883 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1884 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1885 
1886 #if defined(__LP64__)
1887 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1888 #else
1889 	mtx_enter(&sc->sc_post_mtx);
1890 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1891 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1892 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1893 
1894 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1895 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1896 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1897 	mtx_leave(&sc->sc_post_mtx);
1898 #endif
1899 }
1900 
1901 void
1902 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1903 {
1904 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1905 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1906 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1907 
1908 	if (ccb->ccb_sgl_len > 0) {
1909 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1910 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1911 		    BUS_DMASYNC_POSTWRITE);
1912 	}
1913 
1914 	if (ccb->ccb_len > 0) {
1915 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1916 		    0, ccb->ccb_dmamap->dm_mapsize,
1917 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1918 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1919 
1920 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1921 	}
1922 
1923 	ccb->ccb_done(sc, ccb);
1924 }
1925 
1926 int
1927 mfii_initialise_firmware(struct mfii_softc *sc)
1928 {
1929 	struct mpii_msg_iocinit_request *iiq;
1930 	struct mfii_dmamem *m;
1931 	struct mfii_ccb *ccb;
1932 	struct mfi_init_frame *init;
1933 	int rv;
1934 
1935 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1936 	if (m == NULL)
1937 		return (1);
1938 
1939 	iiq = MFII_DMA_KVA(m);
1940 	memset(iiq, 0, sizeof(*iiq));
1941 
1942 	iiq->function = MPII_FUNCTION_IOC_INIT;
1943 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1944 
1945 	iiq->msg_version_maj = 0x02;
1946 	iiq->msg_version_min = 0x00;
1947 	iiq->hdr_version_unit = 0x10;
1948 	iiq->hdr_version_dev = 0x0;
1949 
1950 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1951 
1952 	iiq->reply_descriptor_post_queue_depth =
1953 	    htole16(sc->sc_reply_postq_depth);
1954 	iiq->reply_free_queue_depth = htole16(0);
1955 
1956 	htolem32(&iiq->sense_buffer_address_high,
1957 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1958 
1959 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1960 	    MFII_DMA_DVA(sc->sc_reply_postq));
1961 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1962 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1963 
1964 	htolem32(&iiq->system_request_frame_base_address_lo,
1965 	    MFII_DMA_DVA(sc->sc_requests));
1966 	htolem32(&iiq->system_request_frame_base_address_hi,
1967 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1968 
1969 	iiq->timestamp = htole64(time_uptime);
1970 
1971 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1972 	if (ccb == NULL) {
1973 		/* shouldn't ever run out of ccbs during attach */
1974 		return (1);
1975 	}
1976 	mfii_scrub_ccb(ccb);
1977 	init = ccb->ccb_request;
1978 
1979 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1980 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1981 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1982 
1983 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1984 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1985 	    BUS_DMASYNC_PREREAD);
1986 
1987 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1988 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1989 
1990 	rv = mfii_mfa_poll(sc, ccb);
1991 
1992 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1993 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1994 
1995 	scsi_io_put(&sc->sc_iopool, ccb);
1996 	mfii_dmamem_free(sc, m);
1997 
1998 	return (rv);
1999 }
2000 
2001 int
2002 mfii_my_intr(struct mfii_softc *sc)
2003 {
2004 	u_int32_t status;
2005 
2006 	status = mfii_read(sc, MFI_OSTS);
2007 	if (ISSET(status, 0x1)) {
2008 		mfii_write(sc, MFI_OSTS, status);
2009 		return (1);
2010 	}
2011 
2012 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2013 }
2014 
2015 int
2016 mfii_intr(void *arg)
2017 {
2018 	struct mfii_softc *sc = arg;
2019 
2020 	if (!mfii_my_intr(sc))
2021 		return (0);
2022 
2023 	mfii_postq(sc);
2024 
2025 	return (1);
2026 }
2027 
2028 void
2029 mfii_postq(struct mfii_softc *sc)
2030 {
2031 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2032 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2033 	struct mpii_reply_descr *rdp;
2034 	struct mfii_ccb *ccb;
2035 	int rpi = 0;
2036 
2037 	mtx_enter(&sc->sc_reply_postq_mtx);
2038 
2039 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2040 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2041 	    BUS_DMASYNC_POSTREAD);
2042 
2043 	for (;;) {
2044 		rdp = &postq[sc->sc_reply_postq_index];
2045 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2046 		    MPII_REPLY_DESCR_UNUSED)
2047 			break;
2048 		if (rdp->data == 0xffffffff) {
2049 			/*
2050 			 * ioc is still writing to the reply post queue
2051 			 * race condition - bail!
2052 			 */
2053 			break;
2054 		}
2055 
2056 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2057 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2058 		memset(rdp, 0xff, sizeof(*rdp));
2059 
2060 		sc->sc_reply_postq_index++;
2061 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2062 		rpi = 1;
2063 	}
2064 
2065 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2066 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2067 	    BUS_DMASYNC_PREREAD);
2068 
2069 	if (rpi)
2070 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2071 
2072 	mtx_leave(&sc->sc_reply_postq_mtx);
2073 
2074 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2075 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2076 		mfii_done(sc, ccb);
2077 	}
2078 }
2079 
2080 void
2081 mfii_scsi_cmd(struct scsi_xfer *xs)
2082 {
2083 	struct scsi_link *link = xs->sc_link;
2084 	struct mfii_softc *sc = link->adapter_softc;
2085 	struct mfii_ccb *ccb = xs->io;
2086 
2087 	mfii_scrub_ccb(ccb);
2088 	ccb->ccb_cookie = xs;
2089 	ccb->ccb_done = mfii_scsi_cmd_done;
2090 	ccb->ccb_data = xs->data;
2091 	ccb->ccb_len = xs->datalen;
2092 
2093 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2094 
2095 	switch (xs->cmd->opcode) {
2096 	case READ_COMMAND:
2097 	case READ_BIG:
2098 	case READ_12:
2099 	case READ_16:
2100 	case WRITE_COMMAND:
2101 	case WRITE_BIG:
2102 	case WRITE_12:
2103 	case WRITE_16:
2104 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2105 			goto stuffup;
2106 
2107 		break;
2108 
2109 	default:
2110 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2111 			goto stuffup;
2112 		break;
2113 	}
2114 
2115 	xs->error = XS_NOERROR;
2116 	xs->resid = 0;
2117 
2118 	if (ISSET(xs->flags, SCSI_POLL)) {
2119 		if (mfii_poll(sc, ccb) != 0)
2120 			goto stuffup;
2121 		return;
2122 	}
2123 
2124 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2125 	timeout_add_msec(&xs->stimeout, xs->timeout);
2126 	mfii_start(sc, ccb);
2127 
2128 	return;
2129 
2130 stuffup:
2131 	xs->error = XS_DRIVER_STUFFUP;
2132 	scsi_done(xs);
2133 }
2134 
2135 void
2136 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2137 {
2138 	struct scsi_xfer *xs = ccb->ccb_cookie;
2139 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2140 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2141 	u_int refs = 1;
2142 
2143 	if (timeout_del(&xs->stimeout))
2144 		refs = 2;
2145 
2146 	switch (ctx->status) {
2147 	case MFI_STAT_OK:
2148 		break;
2149 
2150 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2151 		xs->error = XS_SENSE;
2152 		memset(&xs->sense, 0, sizeof(xs->sense));
2153 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2154 		break;
2155 
2156 	case MFI_STAT_LD_OFFLINE:
2157 	case MFI_STAT_DEVICE_NOT_FOUND:
2158 		xs->error = XS_SELTIMEOUT;
2159 		break;
2160 
2161 	default:
2162 		xs->error = XS_DRIVER_STUFFUP;
2163 		break;
2164 	}
2165 
2166 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2167 		scsi_done(xs);
2168 }
2169 
2170 int
2171 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2172 {
2173 	struct mfii_softc	*sc = (struct mfii_softc *)link->adapter_softc;
2174 
2175 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2176 
2177 	switch (cmd) {
2178 	case DIOCGCACHE:
2179 	case DIOCSCACHE:
2180 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2181 		break;
2182 
2183 	default:
2184 		if (sc->sc_ioctl)
2185 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2186 		break;
2187 	}
2188 
2189 	return (ENOTTY);
2190 }
2191 
2192 int
2193 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2194 {
2195 	struct mfii_softc	*sc = (struct mfii_softc *)link->adapter_softc;
2196 	int			 rv, wrenable, rdenable;
2197 	struct mfi_ld_prop	 ldp;
2198 	union mfi_mbox		 mbox;
2199 
2200 	if (mfii_get_info(sc)) {
2201 		rv = EIO;
2202 		goto done;
2203 	}
2204 
2205 	if (sc->sc_target_lds[link->target] == -1) {
2206 		rv = EIO;
2207 		goto done;
2208 	}
2209 
2210 	memset(&mbox, 0, sizeof(mbox));
2211 	mbox.b[0] = link->target;
2212 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2213 	    SCSI_DATA_IN);
2214 	if (rv != 0)
2215 		goto done;
2216 
2217 	if (sc->sc_info.mci_memory_size > 0) {
2218 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2219 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2220 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2221 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2222 	} else {
2223 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2224 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2225 		rdenable = 0;
2226 	}
2227 
2228 	if (cmd == DIOCGCACHE) {
2229 		dc->wrcache = wrenable;
2230 		dc->rdcache = rdenable;
2231 		goto done;
2232 	} /* else DIOCSCACHE */
2233 
2234 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2235 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2236 		goto done;
2237 
2238 	memset(&mbox, 0, sizeof(mbox));
2239 	mbox.b[0] = ldp.mlp_ld.mld_target;
2240 	mbox.b[1] = ldp.mlp_ld.mld_res;
2241 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2242 
2243 	if (sc->sc_info.mci_memory_size > 0) {
2244 		if (dc->rdcache)
2245 			SET(ldp.mlp_cur_cache_policy,
2246 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2247 		else
2248 			CLR(ldp.mlp_cur_cache_policy,
2249 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2250 		if (dc->wrcache)
2251 			SET(ldp.mlp_cur_cache_policy,
2252 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2253 		else
2254 			CLR(ldp.mlp_cur_cache_policy,
2255 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2256 	} else {
2257 		if (dc->rdcache) {
2258 			rv = EOPNOTSUPP;
2259 			goto done;
2260 		}
2261 		if (dc->wrcache)
2262 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2263 		else
2264 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2265 	}
2266 
2267 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2268 	    SCSI_DATA_OUT);
2269 done:
2270 	return (rv);
2271 }
2272 
2273 int
2274 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2275 {
2276 	struct scsi_link *link = xs->sc_link;
2277 	struct mfii_ccb *ccb = xs->io;
2278 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2279 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2280 	int segs;
2281 
2282 	io->dev_handle = htole16(link->target);
2283 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2284 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2285 	io->sgl_flags = htole16(0x02); /* XXX */
2286 	io->sense_buffer_length = sizeof(xs->sense);
2287 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2288 	io->data_length = htole32(xs->datalen);
2289 	io->io_flags = htole16(xs->cmdlen);
2290 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2291 	case SCSI_DATA_IN:
2292 		ccb->ccb_direction = MFII_DATA_IN;
2293 		io->direction = MPII_SCSIIO_DIR_READ;
2294 		break;
2295 	case SCSI_DATA_OUT:
2296 		ccb->ccb_direction = MFII_DATA_OUT;
2297 		io->direction = MPII_SCSIIO_DIR_WRITE;
2298 		break;
2299 	default:
2300 		ccb->ccb_direction = MFII_DATA_NONE;
2301 		io->direction = MPII_SCSIIO_DIR_NONE;
2302 		break;
2303 	}
2304 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2305 
2306 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2307 	ctx->timeout_value = htole16(0x14); /* XXX */
2308 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2309 	ctx->virtual_disk_target_id = htole16(link->target);
2310 
2311 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2312 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2313 		return (1);
2314 
2315 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2316 	switch (sc->sc_iop->num_sge_loc) {
2317 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2318 		ctx->num_sge = segs;
2319 		break;
2320 	case MFII_IOP_NUM_SGE_LOC_35:
2321 		/* 12 bit field, but we're only using the lower 8 */
2322 		ctx->span_arm = segs;
2323 		break;
2324 	}
2325 
2326 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2327 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2328 
2329 	return (0);
2330 }
2331 
2332 int
2333 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2334 {
2335 	struct scsi_link *link = xs->sc_link;
2336 	struct mfii_ccb *ccb = xs->io;
2337 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2338 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2339 
2340 	io->dev_handle = htole16(link->target);
2341 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2342 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2343 	io->sgl_flags = htole16(0x02); /* XXX */
2344 	io->sense_buffer_length = sizeof(xs->sense);
2345 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2346 	io->data_length = htole32(xs->datalen);
2347 	io->io_flags = htole16(xs->cmdlen);
2348 	io->lun[0] = htobe16(link->lun);
2349 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2350 	case SCSI_DATA_IN:
2351 		ccb->ccb_direction = MFII_DATA_IN;
2352 		io->direction = MPII_SCSIIO_DIR_READ;
2353 		break;
2354 	case SCSI_DATA_OUT:
2355 		ccb->ccb_direction = MFII_DATA_OUT;
2356 		io->direction = MPII_SCSIIO_DIR_WRITE;
2357 		break;
2358 	default:
2359 		ccb->ccb_direction = MFII_DATA_NONE;
2360 		io->direction = MPII_SCSIIO_DIR_NONE;
2361 		break;
2362 	}
2363 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2364 
2365 	ctx->virtual_disk_target_id = htole16(link->target);
2366 
2367 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2368 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2369 		return (1);
2370 
2371 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2372 
2373 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2374 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2375 
2376 	return (0);
2377 }
2378 
2379 void
2380 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2381 {
2382 	struct scsi_link *link = xs->sc_link;
2383 	struct mfii_softc *sc = link->adapter_softc;
2384 	struct mfii_ccb *ccb = xs->io;
2385 
2386 	mfii_scrub_ccb(ccb);
2387 	ccb->ccb_cookie = xs;
2388 	ccb->ccb_done = mfii_scsi_cmd_done;
2389 	ccb->ccb_data = xs->data;
2390 	ccb->ccb_len = xs->datalen;
2391 
2392 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2393 
2394 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2395 	if (xs->error != XS_NOERROR)
2396 		goto done;
2397 
2398 	xs->resid = 0;
2399 
2400 	if (ISSET(xs->flags, SCSI_POLL)) {
2401 		if (mfii_poll(sc, ccb) != 0)
2402 			goto stuffup;
2403 		return;
2404 	}
2405 
2406 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2407 	timeout_add_msec(&xs->stimeout, xs->timeout);
2408 	mfii_start(sc, ccb);
2409 
2410 	return;
2411 
2412 stuffup:
2413 	xs->error = XS_DRIVER_STUFFUP;
2414 done:
2415 	scsi_done(xs);
2416 }
2417 
2418 int
2419 mfii_pd_scsi_probe(struct scsi_link *link)
2420 {
2421 	struct mfii_softc *sc = link->adapter_softc;
2422 	struct mfi_pd_details mpd;
2423 	union mfi_mbox mbox;
2424 	int rv;
2425 
2426 	if (link->lun > 0)
2427 		return (0);
2428 
2429 	memset(&mbox, 0, sizeof(mbox));
2430 	mbox.s[0] = htole16(link->target);
2431 
2432 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2433 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2434 	if (rv != 0)
2435 		return (EIO);
2436 
2437 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2438 		return (ENXIO);
2439 
2440 	return (0);
2441 }
2442 
2443 int
2444 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2445 {
2446 	struct scsi_link *link = xs->sc_link;
2447 	struct mfii_ccb *ccb = xs->io;
2448 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2449 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2450 	uint16_t dev_handle;
2451 
2452 	dev_handle = mfii_dev_handle(sc, link->target);
2453 	if (dev_handle == htole16(0xffff))
2454 		return (XS_SELTIMEOUT);
2455 
2456 	io->dev_handle = dev_handle;
2457 	io->function = 0;
2458 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2459 	io->sgl_flags = htole16(0x02); /* XXX */
2460 	io->sense_buffer_length = sizeof(xs->sense);
2461 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2462 	io->data_length = htole32(xs->datalen);
2463 	io->io_flags = htole16(xs->cmdlen);
2464 	io->lun[0] = htobe16(link->lun);
2465 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2466 	case SCSI_DATA_IN:
2467 		ccb->ccb_direction = MFII_DATA_IN;
2468 		io->direction = MPII_SCSIIO_DIR_READ;
2469 		break;
2470 	case SCSI_DATA_OUT:
2471 		ccb->ccb_direction = MFII_DATA_OUT;
2472 		io->direction = MPII_SCSIIO_DIR_WRITE;
2473 		break;
2474 	default:
2475 		ccb->ccb_direction = MFII_DATA_NONE;
2476 		io->direction = MPII_SCSIIO_DIR_NONE;
2477 		break;
2478 	}
2479 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2480 
2481 	ctx->virtual_disk_target_id = htole16(link->target);
2482 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2483 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2484 
2485 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2486 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2487 		return (XS_DRIVER_STUFFUP);
2488 
2489 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2490 
2491 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2492 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2493 	ccb->ccb_req.dev_handle = dev_handle;
2494 
2495 	return (XS_NOERROR);
2496 }
2497 
2498 int
2499 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2500     int nosleep)
2501 {
2502 	struct mpii_msg_request *req = ccb->ccb_request;
2503 	struct mfii_sge *sge = NULL, *nsge = sglp;
2504 	struct mfii_sge *ce = NULL;
2505 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2506 	u_int space;
2507 	int i;
2508 
2509 	int error;
2510 
2511 	if (ccb->ccb_len == 0)
2512 		return (0);
2513 
2514 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2515 	    ccb->ccb_data, ccb->ccb_len, NULL,
2516 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2517 	if (error) {
2518 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2519 		return (1);
2520 	}
2521 
2522 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2523 	    sizeof(*nsge);
2524 	if (dmap->dm_nsegs > space) {
2525 		space--;
2526 
2527 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2528 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2529 
2530 		ce = nsge + space;
2531 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2532 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2533 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2534 
2535 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2536 	}
2537 
2538 	for (i = 0; i < dmap->dm_nsegs; i++) {
2539 		if (nsge == ce)
2540 			nsge = ccb->ccb_sgl;
2541 
2542 		sge = nsge;
2543 
2544 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2545 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2546 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2547 
2548 		nsge = sge + 1;
2549 	}
2550 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2551 
2552 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2553 	    ccb->ccb_direction == MFII_DATA_OUT ?
2554 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2555 
2556 	if (ccb->ccb_sgl_len > 0) {
2557 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2558 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2559 		    BUS_DMASYNC_PREWRITE);
2560 	}
2561 
2562 	return (0);
2563 }
2564 
2565 void
2566 mfii_scsi_cmd_tmo(void *xsp)
2567 {
2568 	struct scsi_xfer *xs = xsp;
2569 	struct scsi_link *link = xs->sc_link;
2570 	struct mfii_softc *sc = link->adapter_softc;
2571 	struct mfii_ccb *ccb = xs->io;
2572 
2573 	mtx_enter(&sc->sc_abort_mtx);
2574 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2575 	mtx_leave(&sc->sc_abort_mtx);
2576 
2577 	task_add(systqmp, &sc->sc_abort_task);
2578 }
2579 
2580 void
2581 mfii_abort_task(void *scp)
2582 {
2583 	struct mfii_softc *sc = scp;
2584 	struct mfii_ccb *list;
2585 
2586 	mtx_enter(&sc->sc_abort_mtx);
2587 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2588 	SIMPLEQ_INIT(&sc->sc_abort_list);
2589 	mtx_leave(&sc->sc_abort_mtx);
2590 
2591 	while (list != NULL) {
2592 		struct mfii_ccb *ccb = list;
2593 		struct scsi_xfer *xs = ccb->ccb_cookie;
2594 		struct scsi_link *link = xs->sc_link;
2595 
2596 		uint16_t dev_handle;
2597 		struct mfii_ccb *accb;
2598 
2599 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2600 
2601 		dev_handle = mfii_dev_handle(sc, link->target);
2602 		if (dev_handle == htole16(0xffff)) {
2603 			/* device is gone */
2604 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2605 				scsi_done(xs);
2606 			continue;
2607 		}
2608 
2609 		accb = scsi_io_get(&sc->sc_iopool, 0);
2610 		mfii_scrub_ccb(accb);
2611 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2612 		    MPII_SCSI_TASK_ABORT_TASK,
2613 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2614 
2615 		accb->ccb_cookie = ccb;
2616 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2617 
2618 		mfii_start(sc, accb);
2619 	}
2620 }
2621 
2622 void
2623 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2624     uint16_t smid, uint8_t type, uint32_t flags)
2625 {
2626 	struct mfii_task_mgmt *msg;
2627 	struct mpii_msg_scsi_task_request *req;
2628 
2629 	msg = accb->ccb_request;
2630 	req = &msg->mpii_request;
2631 	req->dev_handle = dev_handle;
2632 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2633 	req->task_type = type;
2634 	htolem16(&req->task_mid, smid);
2635 	msg->flags = flags;
2636 
2637 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2638 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2639 }
2640 
2641 void
2642 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2643 {
2644 	struct mfii_ccb *ccb = accb->ccb_cookie;
2645 	struct scsi_xfer *xs = ccb->ccb_cookie;
2646 
2647 	/* XXX check accb completion? */
2648 
2649 	scsi_io_put(&sc->sc_iopool, accb);
2650 
2651 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2652 		scsi_done(xs);
2653 }
2654 
2655 void *
2656 mfii_get_ccb(void *cookie)
2657 {
2658 	struct mfii_softc *sc = cookie;
2659 	struct mfii_ccb *ccb;
2660 
2661 	mtx_enter(&sc->sc_ccb_mtx);
2662 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2663 	if (ccb != NULL)
2664 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2665 	mtx_leave(&sc->sc_ccb_mtx);
2666 
2667 	return (ccb);
2668 }
2669 
2670 void
2671 mfii_scrub_ccb(struct mfii_ccb *ccb)
2672 {
2673 	ccb->ccb_cookie = NULL;
2674 	ccb->ccb_done = NULL;
2675 	ccb->ccb_flags = 0;
2676 	ccb->ccb_data = NULL;
2677 	ccb->ccb_direction = 0;
2678 	ccb->ccb_len = 0;
2679 	ccb->ccb_sgl_len = 0;
2680 	ccb->ccb_refcnt = 1;
2681 
2682 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2683 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2684 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2685 }
2686 
2687 void
2688 mfii_put_ccb(void *cookie, void *io)
2689 {
2690 	struct mfii_softc *sc = cookie;
2691 	struct mfii_ccb *ccb = io;
2692 
2693 	mtx_enter(&sc->sc_ccb_mtx);
2694 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2695 	mtx_leave(&sc->sc_ccb_mtx);
2696 }
2697 
2698 int
2699 mfii_init_ccb(struct mfii_softc *sc)
2700 {
2701 	struct mfii_ccb *ccb;
2702 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2703 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2704 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2705 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2706 	u_int i;
2707 	int error;
2708 
2709 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2710 	    M_DEVBUF, M_WAITOK|M_ZERO);
2711 
2712 	for (i = 0; i < sc->sc_max_cmds; i++) {
2713 		ccb = &sc->sc_ccb[i];
2714 
2715 		/* create a dma map for transfer */
2716 		error = bus_dmamap_create(sc->sc_dmat,
2717 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2718 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2719 		if (error) {
2720 			printf("%s: cannot create ccb dmamap (%d)\n",
2721 			    DEVNAME(sc), error);
2722 			goto destroy;
2723 		}
2724 
2725 		/* select i + 1'th request. 0 is reserved for events */
2726 		ccb->ccb_smid = i + 1;
2727 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2728 		ccb->ccb_request = request + ccb->ccb_request_offset;
2729 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2730 		    ccb->ccb_request_offset;
2731 
2732 		/* select i'th MFI command frame */
2733 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2734 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2735 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2736 		    ccb->ccb_mfi_offset;
2737 
2738 		/* select i'th sense */
2739 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2740 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2741 		    ccb->ccb_sense_offset);
2742 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2743 		    ccb->ccb_sense_offset;
2744 
2745 		/* select i'th sgl */
2746 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2747 		    sc->sc_max_sgl * i;
2748 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2749 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2750 		    ccb->ccb_sgl_offset;
2751 
2752 		/* add ccb to queue */
2753 		mfii_put_ccb(sc, ccb);
2754 	}
2755 
2756 	return (0);
2757 
2758 destroy:
2759 	/* free dma maps and ccb memory */
2760 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2761 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2762 
2763 	free(sc->sc_ccb, M_DEVBUF, 0);
2764 
2765 	return (1);
2766 }
2767 
2768 #if NBIO > 0
2769 int
2770 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2771 {
2772 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2773 	int error = 0;
2774 
2775 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2776 
2777 	rw_enter_write(&sc->sc_lock);
2778 
2779 	switch (cmd) {
2780 	case BIOCINQ:
2781 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2782 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2783 		break;
2784 
2785 	case BIOCVOL:
2786 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2787 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2788 		break;
2789 
2790 	case BIOCDISK:
2791 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2792 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2793 		break;
2794 
2795 	case BIOCALARM:
2796 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2797 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2798 		break;
2799 
2800 	case BIOCBLINK:
2801 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2802 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2803 		break;
2804 
2805 	case BIOCSETSTATE:
2806 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2807 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2808 		break;
2809 
2810 	case BIOCPATROL:
2811 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2812 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2813 		break;
2814 
2815 	default:
2816 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2817 		error = ENOTTY;
2818 	}
2819 
2820 	rw_exit_write(&sc->sc_lock);
2821 
2822 	return (error);
2823 }
2824 
2825 int
2826 mfii_bio_getitall(struct mfii_softc *sc)
2827 {
2828 	int			i, d, rv = EINVAL;
2829 	size_t			size;
2830 	union mfi_mbox		mbox;
2831 	struct mfi_conf		*cfg = NULL;
2832 	struct mfi_ld_details	*ld_det = NULL;
2833 
2834 	/* get info */
2835 	if (mfii_get_info(sc)) {
2836 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2837 		    DEVNAME(sc));
2838 		goto done;
2839 	}
2840 
2841 	/* send single element command to retrieve size for full structure */
2842 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2843 	if (cfg == NULL)
2844 		goto done;
2845 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2846 	    SCSI_DATA_IN)) {
2847 		free(cfg, M_DEVBUF, sizeof *cfg);
2848 		goto done;
2849 	}
2850 
2851 	size = cfg->mfc_size;
2852 	free(cfg, M_DEVBUF, sizeof *cfg);
2853 
2854 	/* memory for read config */
2855 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2856 	if (cfg == NULL)
2857 		goto done;
2858 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2859 		free(cfg, M_DEVBUF, size);
2860 		goto done;
2861 	}
2862 
2863 	/* replace current pointer with new one */
2864 	if (sc->sc_cfg)
2865 		free(sc->sc_cfg, M_DEVBUF, 0);
2866 	sc->sc_cfg = cfg;
2867 
2868 	/* get all ld info */
2869 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2870 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2871 		goto done;
2872 
2873 	/* get memory for all ld structures */
2874 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2875 	if (sc->sc_ld_sz != size) {
2876 		if (sc->sc_ld_details)
2877 			free(sc->sc_ld_details, M_DEVBUF, 0);
2878 
2879 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2880 		if (ld_det == NULL)
2881 			goto done;
2882 		sc->sc_ld_sz = size;
2883 		sc->sc_ld_details = ld_det;
2884 	}
2885 
2886 	/* find used physical disks */
2887 	size = sizeof(struct mfi_ld_details);
2888 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2889 		memset(&mbox, 0, sizeof(mbox));
2890 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2891 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2892 		    SCSI_DATA_IN))
2893 			goto done;
2894 
2895 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2896 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2897 	}
2898 	sc->sc_no_pd = d;
2899 
2900 	rv = 0;
2901 done:
2902 	return (rv);
2903 }
2904 
2905 int
2906 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2907 {
2908 	int			rv = EINVAL;
2909 	struct mfi_conf		*cfg = NULL;
2910 
2911 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2912 
2913 	if (mfii_bio_getitall(sc)) {
2914 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2915 		    DEVNAME(sc));
2916 		goto done;
2917 	}
2918 
2919 	/* count unused disks as volumes */
2920 	if (sc->sc_cfg == NULL)
2921 		goto done;
2922 	cfg = sc->sc_cfg;
2923 
2924 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2925 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2926 #if notyet
2927 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2928 	    (bi->bi_nodisk - sc->sc_no_pd);
2929 #endif
2930 	/* tell bio who we are */
2931 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2932 
2933 	rv = 0;
2934 done:
2935 	return (rv);
2936 }
2937 
2938 int
2939 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2940 {
2941 	int			i, per, target, rv = EINVAL;
2942 	struct scsi_link	*link;
2943 	struct device		*dev;
2944 
2945 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2946 	    DEVNAME(sc), bv->bv_volid);
2947 
2948 	/* we really could skip and expect that inq took care of it */
2949 	if (mfii_bio_getitall(sc)) {
2950 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2951 		    DEVNAME(sc));
2952 		goto done;
2953 	}
2954 
2955 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2956 		/* go do hotspares & unused disks */
2957 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2958 		goto done;
2959 	}
2960 
2961 	i = bv->bv_volid;
2962 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2963 	link = scsi_get_link(sc->sc_scsibus, target, 0);
2964 	if (link == NULL) {
2965 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2966 	} else {
2967 		dev = link->device_softc;
2968 		if (dev == NULL)
2969 			goto done;
2970 
2971 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2972 	}
2973 
2974 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2975 	case MFI_LD_OFFLINE:
2976 		bv->bv_status = BIOC_SVOFFLINE;
2977 		break;
2978 
2979 	case MFI_LD_PART_DEGRADED:
2980 	case MFI_LD_DEGRADED:
2981 		bv->bv_status = BIOC_SVDEGRADED;
2982 		break;
2983 
2984 	case MFI_LD_ONLINE:
2985 		bv->bv_status = BIOC_SVONLINE;
2986 		break;
2987 
2988 	default:
2989 		bv->bv_status = BIOC_SVINVALID;
2990 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2991 		    DEVNAME(sc),
2992 		    sc->sc_ld_list.mll_list[i].mll_state);
2993 	}
2994 
2995 	/* additional status can modify MFI status */
2996 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2997 	case MFI_LD_PROG_CC:
2998 		bv->bv_status = BIOC_SVSCRUB;
2999 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3000 		bv->bv_percent = (per * 100) / 0xffff;
3001 		bv->bv_seconds =
3002 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3003 		break;
3004 
3005 	case MFI_LD_PROG_BGI:
3006 		bv->bv_status = BIOC_SVSCRUB;
3007 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3008 		bv->bv_percent = (per * 100) / 0xffff;
3009 		bv->bv_seconds =
3010 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3011 		break;
3012 
3013 	case MFI_LD_PROG_FGI:
3014 	case MFI_LD_PROG_RECONSTRUCT:
3015 		/* nothing yet */
3016 		break;
3017 	}
3018 
3019 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3020 		bv->bv_cache = BIOC_CVWRITEBACK;
3021 	else
3022 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3023 
3024 	/*
3025 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3026 	 * a subset that is valid for the MFI controller.
3027 	 */
3028 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3029 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3030 		bv->bv_level *= 10;
3031 
3032 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3033 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3034 
3035 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3036 
3037 	rv = 0;
3038 done:
3039 	return (rv);
3040 }
3041 
3042 int
3043 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3044 {
3045 	struct mfi_conf		*cfg;
3046 	struct mfi_array	*ar;
3047 	struct mfi_ld_cfg	*ld;
3048 	struct mfi_pd_details	*pd;
3049 	struct mfi_pd_list	*pl;
3050 	struct mfi_pd_progress	*mfp;
3051 	struct mfi_progress	*mp;
3052 	struct scsi_inquiry_data *inqbuf;
3053 	char			vend[8+16+4+1], *vendp;
3054 	int			i, rv = EINVAL;
3055 	int			arr, vol, disk, span;
3056 	union mfi_mbox		mbox;
3057 
3058 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3059 	    DEVNAME(sc), bd->bd_diskid);
3060 
3061 	/* we really could skip and expect that inq took care of it */
3062 	if (mfii_bio_getitall(sc)) {
3063 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3064 		    DEVNAME(sc));
3065 		return (rv);
3066 	}
3067 	cfg = sc->sc_cfg;
3068 
3069 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3070 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3071 
3072 	ar = cfg->mfc_array;
3073 	vol = bd->bd_volid;
3074 	if (vol >= cfg->mfc_no_ld) {
3075 		/* do hotspares */
3076 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3077 		goto freeme;
3078 	}
3079 
3080 	/* calculate offset to ld structure */
3081 	ld = (struct mfi_ld_cfg *)(
3082 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3083 	    cfg->mfc_array_size * cfg->mfc_no_array);
3084 
3085 	/* use span 0 only when raid group is not spanned */
3086 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3087 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3088 	else
3089 		span = 0;
3090 	arr = ld[vol].mlc_span[span].mls_index;
3091 
3092 	/* offset disk into pd list */
3093 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3094 
3095 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3096 		/* disk is missing but succeed command */
3097 		bd->bd_status = BIOC_SDFAILED;
3098 		rv = 0;
3099 
3100 		/* try to find an unused disk for the target to rebuild */
3101 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3102 		    SCSI_DATA_IN))
3103 			goto freeme;
3104 
3105 		for (i = 0; i < pl->mpl_no_pd; i++) {
3106 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3107 				continue;
3108 
3109 			memset(&mbox, 0, sizeof(mbox));
3110 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3111 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3112 			    SCSI_DATA_IN))
3113 				continue;
3114 
3115 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3116 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3117 				break;
3118 		}
3119 
3120 		if (i == pl->mpl_no_pd)
3121 			goto freeme;
3122 	} else {
3123 		memset(&mbox, 0, sizeof(mbox));
3124 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3125 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3126 		    SCSI_DATA_IN)) {
3127 			bd->bd_status = BIOC_SDINVALID;
3128 			goto freeme;
3129 		}
3130 	}
3131 
3132 	/* get the remaining fields */
3133 	bd->bd_channel = pd->mpd_enc_idx;
3134 	bd->bd_target = pd->mpd_enc_slot;
3135 
3136 	/* get status */
3137 	switch (pd->mpd_fw_state){
3138 	case MFI_PD_UNCONFIG_GOOD:
3139 	case MFI_PD_UNCONFIG_BAD:
3140 		bd->bd_status = BIOC_SDUNUSED;
3141 		break;
3142 
3143 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3144 		bd->bd_status = BIOC_SDHOTSPARE;
3145 		break;
3146 
3147 	case MFI_PD_OFFLINE:
3148 		bd->bd_status = BIOC_SDOFFLINE;
3149 		break;
3150 
3151 	case MFI_PD_FAILED:
3152 		bd->bd_status = BIOC_SDFAILED;
3153 		break;
3154 
3155 	case MFI_PD_REBUILD:
3156 		bd->bd_status = BIOC_SDREBUILD;
3157 		break;
3158 
3159 	case MFI_PD_ONLINE:
3160 		bd->bd_status = BIOC_SDONLINE;
3161 		break;
3162 
3163 	case MFI_PD_COPYBACK:
3164 	case MFI_PD_SYSTEM:
3165 	default:
3166 		bd->bd_status = BIOC_SDINVALID;
3167 		break;
3168 	}
3169 
3170 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3171 
3172 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3173 	vendp = inqbuf->vendor;
3174 	memcpy(vend, vendp, sizeof vend - 1);
3175 	vend[sizeof vend - 1] = '\0';
3176 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3177 
3178 	/* XXX find a way to retrieve serial nr from drive */
3179 	/* XXX find a way to get bd_procdev */
3180 
3181 	mfp = &pd->mpd_progress;
3182 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3183 		mp = &mfp->mfp_patrol_read;
3184 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3185 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3186 	}
3187 
3188 	rv = 0;
3189 freeme:
3190 	free(pd, M_DEVBUF, sizeof *pd);
3191 	free(pl, M_DEVBUF, sizeof *pl);
3192 
3193 	return (rv);
3194 }
3195 
3196 int
3197 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3198 {
3199 	uint32_t		opc, flags = 0;
3200 	int			rv = 0;
3201 	int8_t			ret;
3202 
3203 	switch(ba->ba_opcode) {
3204 	case BIOC_SADISABLE:
3205 		opc = MR_DCMD_SPEAKER_DISABLE;
3206 		break;
3207 
3208 	case BIOC_SAENABLE:
3209 		opc = MR_DCMD_SPEAKER_ENABLE;
3210 		break;
3211 
3212 	case BIOC_SASILENCE:
3213 		opc = MR_DCMD_SPEAKER_SILENCE;
3214 		break;
3215 
3216 	case BIOC_GASTATUS:
3217 		opc = MR_DCMD_SPEAKER_GET;
3218 		flags = SCSI_DATA_IN;
3219 		break;
3220 
3221 	case BIOC_SATEST:
3222 		opc = MR_DCMD_SPEAKER_TEST;
3223 		break;
3224 
3225 	default:
3226 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3227 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3228 		return (EINVAL);
3229 	}
3230 
3231 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3232 		rv = EINVAL;
3233 	else
3234 		if (ba->ba_opcode == BIOC_GASTATUS)
3235 			ba->ba_status = ret;
3236 		else
3237 			ba->ba_status = 0;
3238 
3239 	return (rv);
3240 }
3241 
3242 int
3243 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3244 {
3245 	int			i, found, rv = EINVAL;
3246 	union mfi_mbox		mbox;
3247 	uint32_t		cmd;
3248 	struct mfi_pd_list	*pd;
3249 
3250 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3251 	    bb->bb_status);
3252 
3253 	/* channel 0 means not in an enclosure so can't be blinked */
3254 	if (bb->bb_channel == 0)
3255 		return (EINVAL);
3256 
3257 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3258 
3259 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3260 		goto done;
3261 
3262 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3263 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3264 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3265 			found = 1;
3266 			break;
3267 		}
3268 
3269 	if (!found)
3270 		goto done;
3271 
3272 	memset(&mbox, 0, sizeof(mbox));
3273 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3274 
3275 	switch (bb->bb_status) {
3276 	case BIOC_SBUNBLINK:
3277 		cmd = MR_DCMD_PD_UNBLINK;
3278 		break;
3279 
3280 	case BIOC_SBBLINK:
3281 		cmd = MR_DCMD_PD_BLINK;
3282 		break;
3283 
3284 	case BIOC_SBALARM:
3285 	default:
3286 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3287 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3288 		goto done;
3289 	}
3290 
3291 
3292 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0))
3293 		goto done;
3294 
3295 	rv = 0;
3296 done:
3297 	free(pd, M_DEVBUF, sizeof *pd);
3298 	return (rv);
3299 }
3300 
3301 static int
3302 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3303 {
3304 	struct mfii_foreign_scan_info *fsi;
3305 	struct mfi_pd_details	*pd;
3306 	union mfi_mbox		mbox;
3307 	int			rv;
3308 
3309 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3310 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3311 
3312 	memset(&mbox, 0, sizeof mbox);
3313 	mbox.s[0] = pd_id;
3314 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3315 	if (rv != 0)
3316 		goto done;
3317 
3318 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3319 		mbox.s[0] = pd_id;
3320 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3321 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3322 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3323 		if (rv != 0)
3324 			goto done;
3325 	}
3326 
3327 	memset(&mbox, 0, sizeof mbox);
3328 	mbox.s[0] = pd_id;
3329 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3330 	if (rv != 0)
3331 		goto done;
3332 
3333 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3334 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3335 		    SCSI_DATA_IN);
3336 		if (rv != 0)
3337 			goto done;
3338 
3339 		if (fsi->count > 0) {
3340 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3341 			if (rv != 0)
3342 				goto done;
3343 		}
3344 	}
3345 
3346 	memset(&mbox, 0, sizeof mbox);
3347 	mbox.s[0] = pd_id;
3348 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3349 	if (rv != 0)
3350 		goto done;
3351 
3352 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3353 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3354 		rv = ENXIO;
3355 
3356 done:
3357 	free(fsi, M_DEVBUF, sizeof *fsi);
3358 	free(pd, M_DEVBUF, sizeof *pd);
3359 
3360 	return (rv);
3361 }
3362 
3363 static int
3364 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3365 {
3366 	struct mfi_hotspare	*hs;
3367 	struct mfi_pd_details	*pd;
3368 	union mfi_mbox		mbox;
3369 	size_t			size;
3370 	int			rv = EINVAL;
3371 
3372 	/* we really could skip and expect that inq took care of it */
3373 	if (mfii_bio_getitall(sc)) {
3374 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3375 		    DEVNAME(sc));
3376 		return (rv);
3377 	}
3378 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3379 
3380 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3381 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3382 
3383 	memset(&mbox, 0, sizeof mbox);
3384 	mbox.s[0] = pd_id;
3385 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3386 	    SCSI_DATA_IN);
3387 	if (rv != 0)
3388 		goto done;
3389 
3390 	memset(hs, 0, size);
3391 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3392 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3393 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3394 
3395 done:
3396 	free(hs, M_DEVBUF, size);
3397 	free(pd, M_DEVBUF, sizeof *pd);
3398 
3399 	return (rv);
3400 }
3401 
3402 int
3403 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3404 {
3405 	struct mfi_pd_details	*pd;
3406 	struct mfi_pd_list	*pl;
3407 	int			i, found, rv = EINVAL;
3408 	union mfi_mbox		mbox;
3409 
3410 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3411 	    bs->bs_status);
3412 
3413 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3414 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3415 
3416 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3417 		goto done;
3418 
3419 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3420 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3421 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3422 			found = 1;
3423 			break;
3424 		}
3425 
3426 	if (!found)
3427 		goto done;
3428 
3429 	memset(&mbox, 0, sizeof(mbox));
3430 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3431 
3432 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3433 		goto done;
3434 
3435 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3436 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3437 
3438 	switch (bs->bs_status) {
3439 	case BIOC_SSONLINE:
3440 		mbox.b[4] = MFI_PD_ONLINE;
3441 		break;
3442 
3443 	case BIOC_SSOFFLINE:
3444 		mbox.b[4] = MFI_PD_OFFLINE;
3445 		break;
3446 
3447 	case BIOC_SSHOTSPARE:
3448 		mbox.b[4] = MFI_PD_HOTSPARE;
3449 		break;
3450 
3451 	case BIOC_SSREBUILD:
3452 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3453 			if ((rv = mfii_makegood(sc,
3454 			    pl->mpl_address[i].mpa_pd_id)))
3455 				goto done;
3456 
3457 			if ((rv = mfii_makespare(sc,
3458 			    pl->mpl_address[i].mpa_pd_id)))
3459 				goto done;
3460 
3461 			memset(&mbox, 0, sizeof(mbox));
3462 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3463 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3464 			    SCSI_DATA_IN);
3465 			if (rv != 0)
3466 				goto done;
3467 
3468 			/* rebuilding might be started by mfii_makespare() */
3469 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3470 				rv = 0;
3471 				goto done;
3472 			}
3473 
3474 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3475 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3476 		}
3477 		mbox.b[4] = MFI_PD_REBUILD;
3478 		break;
3479 
3480 	default:
3481 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3482 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3483 		goto done;
3484 	}
3485 
3486 
3487 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3488 done:
3489 	free(pd, M_DEVBUF, sizeof *pd);
3490 	free(pl, M_DEVBUF, sizeof *pl);
3491 	return (rv);
3492 }
3493 
3494 int
3495 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3496 {
3497 	uint32_t		opc;
3498 	int			rv = 0;
3499 	struct mfi_pr_properties prop;
3500 	struct mfi_pr_status	status;
3501 	uint32_t		time, exec_freq;
3502 
3503 	switch (bp->bp_opcode) {
3504 	case BIOC_SPSTOP:
3505 	case BIOC_SPSTART:
3506 		if (bp->bp_opcode == BIOC_SPSTART)
3507 			opc = MR_DCMD_PR_START;
3508 		else
3509 			opc = MR_DCMD_PR_STOP;
3510 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3511 			return (EINVAL);
3512 		break;
3513 
3514 	case BIOC_SPMANUAL:
3515 	case BIOC_SPDISABLE:
3516 	case BIOC_SPAUTO:
3517 		/* Get device's time. */
3518 		opc = MR_DCMD_TIME_SECS_GET;
3519 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3520 			return (EINVAL);
3521 
3522 		opc = MR_DCMD_PR_GET_PROPERTIES;
3523 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3524 			return (EINVAL);
3525 
3526 		switch (bp->bp_opcode) {
3527 		case BIOC_SPMANUAL:
3528 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3529 			break;
3530 		case BIOC_SPDISABLE:
3531 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3532 			break;
3533 		case BIOC_SPAUTO:
3534 			if (bp->bp_autoival != 0) {
3535 				if (bp->bp_autoival == -1)
3536 					/* continuously */
3537 					exec_freq = 0xffffffffU;
3538 				else if (bp->bp_autoival > 0)
3539 					exec_freq = bp->bp_autoival;
3540 				else
3541 					return (EINVAL);
3542 				prop.exec_freq = exec_freq;
3543 			}
3544 			if (bp->bp_autonext != 0) {
3545 				if (bp->bp_autonext < 0)
3546 					return (EINVAL);
3547 				else
3548 					prop.next_exec = time + bp->bp_autonext;
3549 			}
3550 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3551 			break;
3552 		}
3553 
3554 		opc = MR_DCMD_PR_SET_PROPERTIES;
3555 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3556 			return (EINVAL);
3557 
3558 		break;
3559 
3560 	case BIOC_GPSTATUS:
3561 		opc = MR_DCMD_PR_GET_PROPERTIES;
3562 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3563 			return (EINVAL);
3564 
3565 		opc = MR_DCMD_PR_GET_STATUS;
3566 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3567 			return (EINVAL);
3568 
3569 		/* Get device's time. */
3570 		opc = MR_DCMD_TIME_SECS_GET;
3571 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3572 			return (EINVAL);
3573 
3574 		switch (prop.op_mode) {
3575 		case MFI_PR_OPMODE_AUTO:
3576 			bp->bp_mode = BIOC_SPMAUTO;
3577 			bp->bp_autoival = prop.exec_freq;
3578 			bp->bp_autonext = prop.next_exec;
3579 			bp->bp_autonow = time;
3580 			break;
3581 		case MFI_PR_OPMODE_MANUAL:
3582 			bp->bp_mode = BIOC_SPMMANUAL;
3583 			break;
3584 		case MFI_PR_OPMODE_DISABLED:
3585 			bp->bp_mode = BIOC_SPMDISABLED;
3586 			break;
3587 		default:
3588 			printf("%s: unknown patrol mode %d\n",
3589 			    DEVNAME(sc), prop.op_mode);
3590 			break;
3591 		}
3592 
3593 		switch (status.state) {
3594 		case MFI_PR_STATE_STOPPED:
3595 			bp->bp_status = BIOC_SPSSTOPPED;
3596 			break;
3597 		case MFI_PR_STATE_READY:
3598 			bp->bp_status = BIOC_SPSREADY;
3599 			break;
3600 		case MFI_PR_STATE_ACTIVE:
3601 			bp->bp_status = BIOC_SPSACTIVE;
3602 			break;
3603 		case MFI_PR_STATE_ABORTED:
3604 			bp->bp_status = BIOC_SPSABORTED;
3605 			break;
3606 		default:
3607 			printf("%s: unknown patrol state %d\n",
3608 			    DEVNAME(sc), status.state);
3609 			break;
3610 		}
3611 
3612 		break;
3613 
3614 	default:
3615 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3616 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3617 		return (EINVAL);
3618 	}
3619 
3620 	return (rv);
3621 }
3622 
3623 int
3624 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3625 {
3626 	struct mfi_conf		*cfg;
3627 	struct mfi_hotspare	*hs;
3628 	struct mfi_pd_details	*pd;
3629 	struct bioc_disk	*sdhs;
3630 	struct bioc_vol		*vdhs;
3631 	struct scsi_inquiry_data *inqbuf;
3632 	char			vend[8+16+4+1], *vendp;
3633 	int			i, rv = EINVAL;
3634 	uint32_t		size;
3635 	union mfi_mbox		mbox;
3636 
3637 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3638 
3639 	if (!bio_hs)
3640 		return (EINVAL);
3641 
3642 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3643 
3644 	/* send single element command to retrieve size for full structure */
3645 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3646 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3647 		goto freeme;
3648 
3649 	size = cfg->mfc_size;
3650 	free(cfg, M_DEVBUF, sizeof *cfg);
3651 
3652 	/* memory for read config */
3653 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3654 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3655 		goto freeme;
3656 
3657 	/* calculate offset to hs structure */
3658 	hs = (struct mfi_hotspare *)(
3659 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3660 	    cfg->mfc_array_size * cfg->mfc_no_array +
3661 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3662 
3663 	if (volid < cfg->mfc_no_ld)
3664 		goto freeme; /* not a hotspare */
3665 
3666 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3667 		goto freeme; /* not a hotspare */
3668 
3669 	/* offset into hotspare structure */
3670 	i = volid - cfg->mfc_no_ld;
3671 
3672 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3673 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3674 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3675 
3676 	/* get pd fields */
3677 	memset(&mbox, 0, sizeof(mbox));
3678 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3679 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3680 	    SCSI_DATA_IN)) {
3681 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3682 		    DEVNAME(sc));
3683 		goto freeme;
3684 	}
3685 
3686 	switch (type) {
3687 	case MFI_MGMT_VD:
3688 		vdhs = bio_hs;
3689 		vdhs->bv_status = BIOC_SVONLINE;
3690 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3691 		vdhs->bv_level = -1; /* hotspare */
3692 		vdhs->bv_nodisk = 1;
3693 		break;
3694 
3695 	case MFI_MGMT_SD:
3696 		sdhs = bio_hs;
3697 		sdhs->bd_status = BIOC_SDHOTSPARE;
3698 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3699 		sdhs->bd_channel = pd->mpd_enc_idx;
3700 		sdhs->bd_target = pd->mpd_enc_slot;
3701 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3702 		vendp = inqbuf->vendor;
3703 		memcpy(vend, vendp, sizeof vend - 1);
3704 		vend[sizeof vend - 1] = '\0';
3705 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3706 		break;
3707 
3708 	default:
3709 		goto freeme;
3710 	}
3711 
3712 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3713 	rv = 0;
3714 freeme:
3715 	free(pd, M_DEVBUF, sizeof *pd);
3716 	free(cfg, M_DEVBUF, 0);
3717 
3718 	return (rv);
3719 }
3720 
3721 #ifndef SMALL_KERNEL
3722 
3723 #define MFI_BBU_SENSORS 4
3724 
3725 void
3726 mfii_bbu(struct mfii_softc *sc)
3727 {
3728 	struct mfi_bbu_status bbu;
3729 	u_int32_t status;
3730 	u_int32_t mask;
3731 	u_int32_t soh_bad;
3732 	int i;
3733 
3734 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3735 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3736 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3737 			sc->sc_bbu[i].value = 0;
3738 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3739 		}
3740 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3741 			sc->sc_bbu_status[i].value = 0;
3742 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3743 		}
3744 		return;
3745 	}
3746 
3747 	switch (bbu.battery_type) {
3748 	case MFI_BBU_TYPE_IBBU:
3749 		mask = MFI_BBU_STATE_BAD_IBBU;
3750 		soh_bad = 0;
3751 		break;
3752 	case MFI_BBU_TYPE_BBU:
3753 		mask = MFI_BBU_STATE_BAD_BBU;
3754 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3755 		break;
3756 
3757 	case MFI_BBU_TYPE_NONE:
3758 	default:
3759 		sc->sc_bbu[0].value = 0;
3760 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3761 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3762 			sc->sc_bbu[i].value = 0;
3763 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3764 		}
3765 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3766 			sc->sc_bbu_status[i].value = 0;
3767 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3768 		}
3769 		return;
3770 	}
3771 
3772 	status = letoh32(bbu.fw_status);
3773 
3774 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3775 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3776 	    SENSOR_S_OK;
3777 
3778 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3779 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3780 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3781 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3782 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3783 
3784 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3785 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3786 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3787 	}
3788 }
3789 
3790 void
3791 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3792 {
3793 	struct ksensor *sensor;
3794 	int target;
3795 
3796 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3797 	sensor = &sc->sc_sensors[target];
3798 
3799 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3800 	case MFI_LD_OFFLINE:
3801 		sensor->value = SENSOR_DRIVE_FAIL;
3802 		sensor->status = SENSOR_S_CRIT;
3803 		break;
3804 
3805 	case MFI_LD_PART_DEGRADED:
3806 	case MFI_LD_DEGRADED:
3807 		sensor->value = SENSOR_DRIVE_PFAIL;
3808 		sensor->status = SENSOR_S_WARN;
3809 		break;
3810 
3811 	case MFI_LD_ONLINE:
3812 		sensor->value = SENSOR_DRIVE_ONLINE;
3813 		sensor->status = SENSOR_S_OK;
3814 		break;
3815 
3816 	default:
3817 		sensor->value = 0; /* unknown */
3818 		sensor->status = SENSOR_S_UNKNOWN;
3819 		break;
3820 	}
3821 }
3822 
3823 void
3824 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3825 {
3826 	struct device		*dev;
3827 	struct scsi_link	*link;
3828 	struct ksensor		*sensor;
3829 	int			target;
3830 
3831 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3832 	sensor = &sc->sc_sensors[target];
3833 
3834 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3835 	if (link == NULL) {
3836 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3837 	} else {
3838 		dev = link->device_softc;
3839 		if (dev != NULL)
3840 			strlcpy(sensor->desc, dev->dv_xname,
3841 			    sizeof(sensor->desc));
3842 	}
3843 	sensor->type = SENSOR_DRIVE;
3844 	mfii_refresh_ld_sensor(sc, ld);
3845 }
3846 
3847 int
3848 mfii_create_sensors(struct mfii_softc *sc)
3849 {
3850 	int			i, target;
3851 
3852 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3853 	    sizeof(sc->sc_sensordev.xname));
3854 
3855 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3856 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3857 		    M_DEVBUF, M_WAITOK | M_ZERO);
3858 
3859 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3860 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3861 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3862 		    sizeof(sc->sc_bbu[0].desc));
3863 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3864 
3865 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3866 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3867 		sc->sc_bbu[2].type = SENSOR_AMPS;
3868 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3869 		sc->sc_bbu[3].type = SENSOR_TEMP;
3870 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3871 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3872 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3873 			    sizeof(sc->sc_bbu[i].desc));
3874 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3875 		}
3876 
3877 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3878 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3879 
3880 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3881 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3882 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3883 			strlcpy(sc->sc_bbu_status[i].desc,
3884 			    mfi_bbu_indicators[i],
3885 			    sizeof(sc->sc_bbu_status[i].desc));
3886 
3887 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3888 		}
3889 	}
3890 
3891 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3892 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3893 	if (sc->sc_sensors == NULL)
3894 		return (1);
3895 
3896 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3897 		mfii_init_ld_sensor(sc, i);
3898 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3899 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3900 	}
3901 
3902 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3903 		goto bad;
3904 
3905 	sensordev_install(&sc->sc_sensordev);
3906 
3907 	return (0);
3908 
3909 bad:
3910 	free(sc->sc_sensors, M_DEVBUF,
3911 	    MFI_MAX_LD * sizeof(struct ksensor));
3912 
3913 	return (1);
3914 }
3915 
3916 void
3917 mfii_refresh_sensors(void *arg)
3918 {
3919 	struct mfii_softc	*sc = arg;
3920 	int			i;
3921 
3922 	rw_enter_write(&sc->sc_lock);
3923 	if (sc->sc_bbu != NULL)
3924 		mfii_bbu(sc);
3925 
3926 	mfii_bio_getitall(sc);
3927 	rw_exit_write(&sc->sc_lock);
3928 
3929 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3930 		mfii_refresh_ld_sensor(sc, i);
3931 }
3932 #endif /* SMALL_KERNEL */
3933 #endif /* NBIO > 0 */
3934