xref: /netbsd-src/sys/dev/pci/mfii.c (revision 7a48c000b22c08844ee60caa0662579c5f9a6b1e)
1 /* $NetBSD: mfii.c,v 1.32 2024/02/13 14:56:52 msaitoh Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3 
4 /*
5  * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer@lip6.fr>
6  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.32 2024/02/13 14:56:52 msaitoh Exp $");
23 
24 #include "bio.h"
25 
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39 
40 #include <uvm/uvm_param.h>
41 
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include <sys/bus.h>
46 
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49 
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56 
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60 
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63 
64 #define	MFII_BAR		0x14
65 #define MFII_BAR_35		0x10
66 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
67 
68 #define MFII_OSTS_INTR_VALID	0x00000009
69 #define MFII_RPI		0x6c /* reply post host index */
70 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
72 
73 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
75 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
78 
79 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
80 
81 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
83 
84 #define MFII_MAX_CHAIN_UNIT	0x00400000
85 #define MFII_MAX_CHAIN_MASK	0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT	5
87 
88 #define MFII_256K_IO		128
89 #define MFII_1MB_IO		(MFII_256K_IO * 4)
90 
91 #define MFII_CHAIN_FRAME_MIN	1024
92 
93 struct mfii_request_descr {
94 	u_int8_t	flags;
95 	u_int8_t	msix_index;
96 	u_int16_t	smid;
97 
98 	u_int16_t	lmid;
99 	u_int16_t	dev_handle;
100 } __packed;
101 
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
104 
105 struct mfii_raid_context {
106 	u_int8_t	type_nseg;
107 	u_int8_t	_reserved1;
108 	u_int16_t	timeout_value;
109 
110 	u_int16_t	reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
115 
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 	u_int16_t	virtual_disk_target_id;
119 
120 	u_int64_t	reg_lock_row_lba;
121 
122 	u_int32_t	reg_lock_length;
123 
124 	u_int16_t	next_lm_id;
125 	u_int8_t	ex_status;
126 	u_int8_t	status;
127 
128 	u_int8_t	raid_flags;
129 	u_int8_t	num_sge;
130 	u_int16_t	config_seq_num;
131 
132 	u_int8_t	span_arm;
133 	u_int8_t	_reserved3[3];
134 } __packed;
135 
136 struct mfii_sge {
137 	u_int64_t	sg_addr;
138 	u_int32_t	sg_len;
139 	u_int16_t	_reserved;
140 	u_int8_t	sg_next_chain_offset;
141 	u_int8_t	sg_flags;
142 } __packed;
143 
144 #define MFII_SGE_ADDR_MASK		(0x03)
145 #define MFII_SGE_ADDR_SYSTEM		(0x00)
146 #define MFII_SGE_ADDR_IOCDDR		(0x01)
147 #define MFII_SGE_ADDR_IOCPLB		(0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
149 #define MFII_SGE_END_OF_LIST		(0x40)
150 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
151 
152 #define MFII_REQUEST_SIZE	256
153 
154 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
155 
156 #define MFII_MAX_ROW		32
157 #define MFII_MAX_ARRAY		128
158 
159 struct mfii_array_map {
160 	uint16_t		mam_pd[MFII_MAX_ROW];
161 } __packed;
162 
163 struct mfii_dev_handle {
164 	uint16_t		mdh_cur_handle;
165 	uint8_t			mdh_valid;
166 	uint8_t			mdh_reserved;
167 	uint16_t		mdh_handle[2];
168 } __packed;
169 
170 struct mfii_ld_map {
171 	uint32_t		mlm_total_size;
172 	uint32_t		mlm_reserved1[5];
173 	uint32_t		mlm_num_lds;
174 	uint32_t		mlm_reserved2;
175 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 	uint8_t			mlm_pd_timeout;
177 	uint8_t			mlm_reserved3[7];
178 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
179 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181 
182 struct mfii_task_mgmt {
183 	union {
184 		uint8_t			request[128];
185 		struct mpii_msg_scsi_task_request
186 					mpii_request;
187 	} __packed __aligned(8);
188 
189 	union {
190 		uint8_t			reply[128];
191 		uint32_t		flags;
192 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
194 		struct mpii_msg_scsi_task_reply
195 					mpii_reply;
196 	} __packed __aligned(8);
197 } __packed __aligned(8);
198 
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 	char data[24];
202 } __packed;
203 
204 struct mfii_foreign_scan_info {
205 	uint32_t count; /* Number of foreign configs found */
206 	struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208 
209 #define MFII_MAX_LD_EXT		256
210 
211 struct mfii_ld_list_ext {
212 	uint32_t		mll_no_ld;
213 	uint32_t		mll_res;
214 	struct {
215 		struct mfi_ld	mll_ld;
216 		uint8_t		mll_state; /* states are the same as MFI_ */
217 		uint8_t		mll_res2;
218 		uint8_t		mll_res3;
219 		uint8_t		mll_res4;
220 		uint64_t	mll_size;
221 	} mll_list[MFII_MAX_LD_EXT];
222 } __packed;
223 
224 struct mfii_dmamem {
225 	bus_dmamap_t		mdm_map;
226 	bus_dma_segment_t	mdm_seg;
227 	size_t			mdm_size;
228 	void *			mdm_kva;
229 };
230 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
231 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
232 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
233 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
234 
235 struct mfii_softc;
236 
237 typedef enum mfii_direction {
238 	MFII_DATA_NONE = 0,
239 	MFII_DATA_IN,
240 	MFII_DATA_OUT
241 } mfii_direction_t;
242 
243 struct mfii_ccb {
244 	struct mfii_softc	*ccb_sc;
245 	void			*ccb_request;
246 	u_int64_t		ccb_request_dva;
247 	bus_addr_t		ccb_request_offset;
248 
249 	void			*ccb_mfi;
250 	u_int64_t		ccb_mfi_dva;
251 	bus_addr_t		ccb_mfi_offset;
252 
253 	struct mfi_sense	*ccb_sense;
254 	u_int64_t		ccb_sense_dva;
255 	bus_addr_t		ccb_sense_offset;
256 
257 	struct mfii_sge		*ccb_sgl;
258 	u_int64_t		ccb_sgl_dva;
259 	bus_addr_t		ccb_sgl_offset;
260 	u_int			ccb_sgl_len;
261 
262 	struct mfii_request_descr ccb_req;
263 
264 	bus_dmamap_t		ccb_dmamap64;
265 	bus_dmamap_t		ccb_dmamap32;
266 	bool			ccb_dma64;
267 
268 	/* data for sgl */
269 	void			*ccb_data;
270 	size_t			ccb_len;
271 
272 	mfii_direction_t	ccb_direction;
273 
274 	void			*ccb_cookie;
275 	kmutex_t		ccb_mtx;
276 	kcondvar_t		ccb_cv;
277 	void			(*ccb_done)(struct mfii_softc *,
278 				    struct mfii_ccb *);
279 
280 	u_int32_t		ccb_flags;
281 #define MFI_CCB_F_ERR			(1<<0)
282 	u_int			ccb_smid;
283 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
284 };
285 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
286 
287 struct mfii_iop {
288 	int bar;
289 	int num_sge_loc;
290 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
291 #define MFII_IOP_NUM_SGE_LOC_35		1
292 	u_int16_t ldio_ctx_reg_lock_flags;
293 	u_int8_t ldio_req_type;
294 	u_int8_t ldio_ctx_type_nseg;
295 	u_int8_t sge_flag_chain;
296 	u_int8_t sge_flag_eol;
297 	u_int8_t iop_flag;
298 #define MFII_IOP_QUIRK_REGREAD		0x01
299 #define MFII_IOP_HAS_32BITDESC_BIT	0x02
300 };
301 
302 struct mfii_softc {
303 	device_t		sc_dev;
304 	struct scsipi_channel   sc_chan;
305 	struct scsipi_adapter   sc_adapt;
306 
307 	const struct mfii_iop	*sc_iop;
308 	u_int			sc_iop_flag;
309 #define MFII_IOP_DESC_32BIT		0x01
310 
311 	pci_chipset_tag_t	sc_pc;
312 	pcitag_t		sc_tag;
313 
314 	bus_space_tag_t		sc_iot;
315 	bus_space_handle_t	sc_ioh;
316 	bus_size_t		sc_ios;
317 	bus_dma_tag_t		sc_dmat;
318 	bus_dma_tag_t		sc_dmat64;
319 	bool			sc_64bit_dma;
320 
321 	void			*sc_ih;
322 
323 	kmutex_t		sc_ccb_mtx;
324 	kmutex_t		sc_post_mtx;
325 
326 	u_int			sc_max_fw_cmds;
327 	u_int			sc_max_cmds;
328 	u_int			sc_max_sgl;
329 
330 	u_int			sc_reply_postq_depth;
331 	u_int			sc_reply_postq_index;
332 	kmutex_t		sc_reply_postq_mtx;
333 	struct mfii_dmamem	*sc_reply_postq;
334 
335 	struct mfii_dmamem	*sc_requests;
336 	struct mfii_dmamem	*sc_mfi;
337 	struct mfii_dmamem	*sc_sense;
338 	struct mfii_dmamem	*sc_sgl;
339 
340 	struct mfii_ccb		*sc_ccb;
341 	struct mfii_ccb_list	sc_ccb_freeq;
342 
343 	struct mfii_ccb		*sc_aen_ccb;
344 	struct workqueue	*sc_aen_wq;
345 	struct work		sc_aen_work;
346 
347 	kmutex_t		sc_abort_mtx;
348 	struct mfii_ccb_list	sc_abort_list;
349 	struct workqueue	*sc_abort_wq;
350 	struct work		sc_abort_work;
351 
352 	/* save some useful information for logical drives that is missing
353 	 * in sc_ld_list
354 	 */
355 	struct {
356 		bool		ld_present;
357 		char		ld_dev[16];	/* device name sd? */
358 		int		ld_target_id;
359 	}			sc_ld[MFII_MAX_LD_EXT];
360 	int			sc_target_lds[MFII_MAX_LD_EXT];
361 	bool			sc_max256vd;
362 
363 	/* bio */
364 	struct mfi_conf		*sc_cfg;
365 	struct mfi_ctrl_info	sc_info;
366 	struct mfii_ld_list_ext	sc_ld_list;
367 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
368 	int			sc_no_pd; /* used physical disks */
369 	int			sc_ld_sz; /* sizeof sc_ld_details */
370 
371 	/* mgmt lock */
372 	kmutex_t		sc_lock;
373 	bool			sc_running;
374 
375 	/* sensors */
376 	struct sysmon_envsys	*sc_sme;
377 	envsys_data_t		*sc_sensors;
378 	envsys_data_t		*sc_ld_sensors;
379 	bool			sc_bbuok;
380 
381 	device_t		sc_child;
382 };
383 
384 // #define MFII_DEBUG
385 #ifdef MFII_DEBUG
386 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
387 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
388 #define	MFII_D_CMD		0x0001
389 #define	MFII_D_INTR		0x0002
390 #define	MFII_D_MISC		0x0004
391 #define	MFII_D_DMA		0x0008
392 #define	MFII_D_IOCTL		0x0010
393 #define	MFII_D_RW		0x0020
394 #define	MFII_D_MEM		0x0040
395 #define	MFII_D_CCB		0x0080
396 uint32_t	mfii_debug = 0
397 /*		    | MFII_D_CMD */
398 /*		    | MFII_D_INTR */
399 		    | MFII_D_MISC
400 /*		    | MFII_D_DMA */
401 /*		    | MFII_D_IOCTL */
402 /*		    | MFII_D_RW */
403 /*		    | MFII_D_MEM */
404 /*		    | MFII_D_CCB */
405 		;
406 #else
407 #define DPRINTF(x...)
408 #define DNPRINTF(n,x...)
409 #endif
410 
411 static int	mfii_match(device_t, cfdata_t, void *);
412 static void	mfii_attach(device_t, device_t, void *);
413 static int	mfii_detach(device_t, int);
414 static int	mfii_rescan(device_t, const char *, const int *);
415 static void	mfii_childdetached(device_t, device_t);
416 static bool	mfii_suspend(device_t, const pmf_qual_t *);
417 static bool	mfii_resume(device_t, const pmf_qual_t *);
418 static bool	mfii_shutdown(device_t, int);
419 
420 
421 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
422     mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
423 	mfii_childdetached, DVF_DETACH_SHUTDOWN);
424 
425 static void	mfii_scsipi_request(struct scsipi_channel *,
426 			scsipi_adapter_req_t, void *);
427 static void	mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
428 
429 #define DEVNAME(_sc)		(device_xname((_sc)->sc_dev))
430 
431 static u_int32_t	mfii_read(struct mfii_softc *, bus_size_t);
432 static void		mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
433 
434 static struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
435 static void		mfii_dmamem_free(struct mfii_softc *,
436 			    struct mfii_dmamem *);
437 
438 static struct mfii_ccb *	mfii_get_ccb(struct mfii_softc *);
439 static void		mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
440 static int		mfii_init_ccb(struct mfii_softc *);
441 static void		mfii_scrub_ccb(struct mfii_ccb *);
442 
443 static int		mfii_reset_hard(struct mfii_softc *);
444 static int		mfii_transition_firmware(struct mfii_softc *);
445 static int		mfii_initialise_firmware(struct mfii_softc *);
446 static int		mfii_get_info(struct mfii_softc *);
447 
448 static void		mfii_start(struct mfii_softc *, struct mfii_ccb *);
449 static void		mfii_start64(struct mfii_softc *, struct mfii_ccb *);
450 static void		mfii_start_common(struct mfii_softc *,
451 			    struct mfii_ccb *, bool);
452 static void		mfii_done(struct mfii_softc *, struct mfii_ccb *);
453 static int		mfii_poll(struct mfii_softc *, struct mfii_ccb *);
454 static void		mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
455 static int		mfii_exec(struct mfii_softc *, struct mfii_ccb *);
456 static void		mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
457 static int		mfii_my_intr(struct mfii_softc *);
458 static int		mfii_intr(void *);
459 static void		mfii_postq(struct mfii_softc *);
460 
461 static int		mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
462 			    void *, int);
463 static int		mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
464 			    void *, int);
465 
466 static int		mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
467 
468 static int		mfii_mgmt(struct mfii_softc *, uint32_t,
469 			    const union mfi_mbox *, void *, size_t,
470 			    mfii_direction_t, bool);
471 static int		mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
472 			    uint32_t, const union mfi_mbox *, void *, size_t,
473 			    mfii_direction_t, bool);
474 static void		mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
475 
476 static int		mfii_scsi_cmd_io(struct mfii_softc *,
477 			    struct mfii_ccb *, struct scsipi_xfer *);
478 static int		mfii_scsi_cmd_cdb(struct mfii_softc *,
479 			    struct mfii_ccb *, struct scsipi_xfer *);
480 static void		mfii_scsi_cmd_tmo(void *);
481 
482 static void		mfii_abort_task(struct work *, void *);
483 static void		mfii_abort(struct mfii_softc *, struct mfii_ccb *,
484 			    uint16_t, uint16_t, uint8_t, uint32_t);
485 static void		mfii_scsi_cmd_abort_done(struct mfii_softc *,
486 			    struct mfii_ccb *);
487 
488 static int		mfii_aen_register(struct mfii_softc *);
489 static void		mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
490 			    struct mfii_dmamem *, uint32_t);
491 static void		mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
492 static void		mfii_aen(struct work *, void *);
493 static void		mfii_aen_unregister(struct mfii_softc *);
494 
495 static void		mfii_aen_pd_insert(struct mfii_softc *,
496 			    const struct mfi_evtarg_pd_address *);
497 static void		mfii_aen_pd_remove(struct mfii_softc *,
498 			    const struct mfi_evtarg_pd_address *);
499 static void		mfii_aen_pd_state_change(struct mfii_softc *,
500 			    const struct mfi_evtarg_pd_state *);
501 static void		mfii_aen_ld_update(struct mfii_softc *);
502 
503 #if NBIO > 0
504 static int	mfii_ioctl(device_t, u_long, void *);
505 static int	mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
506 static int	mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
507 static int	mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
508 static int	mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
509 static int	mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
510 static int	mfii_ioctl_setstate(struct mfii_softc *,
511 		    struct bioc_setstate *);
512 static int	mfii_bio_hs(struct mfii_softc *, int, int, void *);
513 static int	mfii_bio_getitall(struct mfii_softc *);
514 #endif /* NBIO > 0 */
515 
516 #if 0
517 static const char *mfi_bbu_indicators[] = {
518 	"pack missing",
519 	"voltage low",
520 	"temp high",
521 	"charge active",
522 	"discharge active",
523 	"learn cycle req'd",
524 	"learn cycle active",
525 	"learn cycle failed",
526 	"learn cycle timeout",
527 	"I2C errors",
528 	"replace pack",
529 	"low capacity",
530 	"periodic learn req'd"
531 };
532 #endif
533 
534 #define MFI_BBU_SENSORS 4
535 
536 static void	mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
537 static void	mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
538 static void	mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
539 static int	mfii_create_sensors(struct mfii_softc *);
540 static int	mfii_destroy_sensors(struct mfii_softc *);
541 static void	mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
542 static void	mfii_bbu(struct mfii_softc *, envsys_data_t *);
543 
544 /*
545  * mfii boards support asynchronous (and non-polled) completion of
546  * dcmds by proxying them through a passthru mpii command that points
547  * at a dcmd frame. since the passthru command is submitted like
548  * the scsi commands using an SMID in the request descriptor,
549  * ccb_request memory * must contain the passthru command because
550  * that is what the SMID refers to. this means ccb_request cannot
551  * contain the dcmd. rather than allocating separate dma memory to
552  * hold the dcmd, we reuse the sense memory buffer for it.
553  */
554 
555 static void	mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
556 
557 static inline void
mfii_dcmd_scrub(struct mfii_ccb * ccb)558 mfii_dcmd_scrub(struct mfii_ccb *ccb)
559 {
560 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
561 }
562 
563 static inline struct mfi_dcmd_frame *
mfii_dcmd_frame(struct mfii_ccb * ccb)564 mfii_dcmd_frame(struct mfii_ccb *ccb)
565 {
566 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
567 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
568 }
569 
570 static inline void
mfii_dcmd_sync(struct mfii_softc * sc,struct mfii_ccb * ccb,int flags)571 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
572 {
573 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
574 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
575 }
576 
577 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
578 
579 static const struct mfii_iop mfii_iop_thunderbolt = {
580 	MFII_BAR,
581 	MFII_IOP_NUM_SGE_LOC_ORIG,
582 	0,
583 	MFII_REQ_TYPE_LDIO,
584 	0,
585 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
586 	0,
587 	0
588 };
589 
590 /*
591  * a lot of these values depend on us not implementing fastpath yet.
592  */
593 static const struct mfii_iop mfii_iop_25 = {
594 	MFII_BAR,
595 	MFII_IOP_NUM_SGE_LOC_ORIG,
596 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
597 	MFII_REQ_TYPE_NO_LOCK,
598 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
599 	MFII_SGE_CHAIN_ELEMENT,
600 	MFII_SGE_END_OF_LIST,
601 	0
602 };
603 
604 static const struct mfii_iop mfii_iop_35 = {
605 	MFII_BAR_35,
606 	MFII_IOP_NUM_SGE_LOC_35,
607 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
608 	MFII_REQ_TYPE_NO_LOCK,
609 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
610 	MFII_SGE_CHAIN_ELEMENT,
611 	MFII_SGE_END_OF_LIST,
612 	0
613 };
614 
615 static const struct mfii_iop mfii_iop_aero = {
616 	MFII_BAR_35,
617 	MFII_IOP_NUM_SGE_LOC_35,
618 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
619 	MFII_REQ_TYPE_NO_LOCK,
620 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
621 	MFII_SGE_CHAIN_ELEMENT,
622 	MFII_SGE_END_OF_LIST,
623 	MFII_IOP_QUIRK_REGREAD | MFII_IOP_HAS_32BITDESC_BIT
624 };
625 
626 struct mfii_device {
627 	pcireg_t		mpd_vendor;
628 	pcireg_t		mpd_product;
629 	const struct mfii_iop	*mpd_iop;
630 };
631 
632 static const struct mfii_device mfii_devices[] = {
633 	/* Fusion */
634 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
635 	    &mfii_iop_thunderbolt },
636 	/* Fury */
637 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
638 	    &mfii_iop_25 },
639 	/* Invader */
640 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
641 	    &mfii_iop_25 },
642 	/* Intruder */
643 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
644 	    &mfii_iop_25 },
645 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
646 	    &mfii_iop_25 },
647 	/* Cutlass */
648 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
649 	    &mfii_iop_25 },
650 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
651 	    &mfii_iop_25 },
652 	/* Crusader */
653 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
654 	    &mfii_iop_35 },
655 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
656 	    &mfii_iop_35 },
657 	/* Ventura */
658 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
659 	    &mfii_iop_35 },
660 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
661 	    &mfii_iop_35 },
662 	/* Tomcat */
663 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
664 	    &mfii_iop_35 },
665 	/* Harpoon */
666 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
667 	    &mfii_iop_35 },
668 	/* Aero */
669 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
670 	    &mfii_iop_aero },
671 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_3,
672 	    &mfii_iop_aero },
673 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
674 	    &mfii_iop_aero },
675 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_3,
676 	    &mfii_iop_aero }
677 };
678 
679 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
680 
681 static const struct mfii_iop *
mfii_find_iop(struct pci_attach_args * pa)682 mfii_find_iop(struct pci_attach_args *pa)
683 {
684 	const struct mfii_device *mpd;
685 	int i;
686 
687 	for (i = 0; i < __arraycount(mfii_devices); i++) {
688 		mpd = &mfii_devices[i];
689 
690 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
691 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
692 			return (mpd->mpd_iop);
693 	}
694 
695 	return (NULL);
696 }
697 
698 static int
mfii_match(device_t parent,cfdata_t match,void * aux)699 mfii_match(device_t parent, cfdata_t match, void *aux)
700 {
701 	return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
702 }
703 
704 static void
mfii_attach(device_t parent,device_t self,void * aux)705 mfii_attach(device_t parent, device_t self, void *aux)
706 {
707 	struct mfii_softc *sc = device_private(self);
708 	struct pci_attach_args *pa = aux;
709 	pcireg_t memtype;
710 	pci_intr_handle_t *ihp;
711 	char intrbuf[PCI_INTRSTR_LEN];
712 	const char *intrstr;
713 	u_int32_t status, scpad2, scpad3;
714 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
715 	struct scsipi_adapter *adapt = &sc->sc_adapt;
716 	struct scsipi_channel *chan = &sc->sc_chan;
717 	union mfi_mbox mbox;
718 
719 	/* init sc */
720 	sc->sc_dev = self;
721 	sc->sc_iop = mfii_find_iop(aux);
722 	sc->sc_dmat = pa->pa_dmat;
723 	if (pci_dma64_available(pa)) {
724 		sc->sc_dmat64 = pa->pa_dmat64;
725 		sc->sc_64bit_dma = 1;
726 	} else {
727 		sc->sc_dmat64 = pa->pa_dmat;
728 		sc->sc_64bit_dma = 0;
729 	}
730 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
731 	mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
732 	mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
733 	mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
734 
735 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
736 
737 	sc->sc_aen_ccb = NULL;
738 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
739 	workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
740 	    PRI_BIO, IPL_BIO, WQ_MPSAFE);
741 
742 	snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
743 	workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
744 	    sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
745 
746 	mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
747 	SIMPLEQ_INIT(&sc->sc_abort_list);
748 
749 	/* wire up the bus shizz */
750 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
751 	memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
752 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
753 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
754 		aprint_error(": unable to map registers\n");
755 		return;
756 	}
757 
758 	/* disable interrupts */
759 	mfii_write(sc, MFI_OMSK, 0xffffffff);
760 
761 	if (pci_intr_alloc(pa, &ihp, NULL, 0)) {
762 		aprint_error(": unable to map interrupt\n");
763 		goto pci_unmap;
764 	}
765 	intrstr = pci_intr_string(pa->pa_pc, ihp[0], intrbuf, sizeof(intrbuf));
766 	pci_intr_setattr(pa->pa_pc, &ihp[0], PCI_INTR_MPSAFE, true);
767 
768 	/* lets get started */
769 	if (mfii_transition_firmware(sc))
770 		goto pci_unmap;
771 	sc->sc_running = true;
772 
773 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
774 	scpad3 = mfii_read(sc, MFII_OSP3);
775 	status = mfii_fw_state(sc);
776 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
777 	if (sc->sc_max_fw_cmds == 0)
778 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
779 	/*
780 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
781 	 * exceed FW supplied max_fw_cmds.
782 	 */
783 	sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
784 
785 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
786 	scpad2 = mfii_read(sc, MFII_OSP2);
787 	chain_frame_sz =
788 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
789 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
790 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
791 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
792 
793 	nsge_in_io = (MFII_REQUEST_SIZE -
794 		sizeof(struct mpii_msg_scsi_io) -
795 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
796 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
797 
798 	/* round down to nearest power of two */
799 	sc->sc_max_sgl = 1;
800 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
801 		sc->sc_max_sgl <<= 1;
802 
803 	/* Check for atomic(32bit) descriptor */
804 	if (((sc->sc_iop->iop_flag & MFII_IOP_HAS_32BITDESC_BIT) != 0) &&
805 	    ((scpad2 & MFI_STATE_ATOMIC_DESCRIPTOR) != 0))
806 		sc->sc_iop_flag |= MFII_IOP_DESC_32BIT;
807 
808 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
809 	    DEVNAME(sc), status, scpad2, scpad3);
810 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
811 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
812 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
813 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
814 	    sc->sc_max_sgl);
815 
816 	/* sense memory */
817 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
818 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
819 	if (sc->sc_sense == NULL) {
820 		aprint_error(": unable to allocate sense memory\n");
821 		goto pci_unmap;
822 	}
823 
824 	/* reply post queue */
825 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
826 
827 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
828 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
829 	if (sc->sc_reply_postq == NULL)
830 		goto free_sense;
831 
832 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
833 	    MFII_DMA_LEN(sc->sc_reply_postq));
834 
835 	/* MPII request frame array */
836 	sc->sc_requests = mfii_dmamem_alloc(sc,
837 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
838 	if (sc->sc_requests == NULL)
839 		goto free_reply_postq;
840 
841 	/* MFI command frame array */
842 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
843 	if (sc->sc_mfi == NULL)
844 		goto free_requests;
845 
846 	/* MPII SGL array */
847 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
848 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
849 	if (sc->sc_sgl == NULL)
850 		goto free_mfi;
851 
852 	if (mfii_init_ccb(sc) != 0) {
853 		aprint_error(": could not init ccb list\n");
854 		goto free_sgl;
855 	}
856 
857 	/* kickstart firmware with all addresses and pointers */
858 	if (mfii_initialise_firmware(sc) != 0) {
859 		aprint_error(": could not initialize firmware\n");
860 		goto free_sgl;
861 	}
862 
863 	mutex_enter(&sc->sc_lock);
864 	if (mfii_get_info(sc) != 0) {
865 		mutex_exit(&sc->sc_lock);
866 		aprint_error(": could not retrieve controller information\n");
867 		goto free_sgl;
868 	}
869 	mutex_exit(&sc->sc_lock);
870 
871 	aprint_normal(": \"%s\", firmware %s",
872 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
873 	if (le16toh(sc->sc_info.mci_memory_size) > 0) {
874 		aprint_normal(", %uMB cache",
875 		    le16toh(sc->sc_info.mci_memory_size));
876 	}
877 	aprint_normal("\n");
878 	aprint_naive("\n");
879 
880 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ihp[0], IPL_BIO,
881 	    mfii_intr, sc, DEVNAME(sc));
882 	if (sc->sc_ih == NULL) {
883 		aprint_error_dev(self, "can't establish interrupt");
884 		if (intrstr)
885 			aprint_error(" at %s", intrstr);
886 		aprint_error("\n");
887 		goto free_sgl;
888 	}
889 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
890 
891 	for (i = 0; i < sc->sc_info.mci_lds_present; i++)
892 		sc->sc_ld[i].ld_present = 1;
893 
894 	sc->sc_max256vd =
895 	    (sc->sc_info.mci_adapter_ops3 & MFI_INFO_AOPS3_SUPP_MAX_EXT_LDS) ?
896 	    true : false;
897 
898 	if (sc->sc_max256vd)
899 		aprint_verbose_dev(self, "Max 256 VD support\n");
900 
901 	memset(adapt, 0, sizeof(*adapt));
902 	adapt->adapt_dev = sc->sc_dev;
903 	adapt->adapt_nchannels = 1;
904 	/* keep a few commands for management */
905 	if (sc->sc_max_cmds > 4)
906 		adapt->adapt_openings = sc->sc_max_cmds - 4;
907 	else
908 		adapt->adapt_openings = sc->sc_max_cmds;
909 	adapt->adapt_max_periph = adapt->adapt_openings;
910 	adapt->adapt_request = mfii_scsipi_request;
911 	adapt->adapt_minphys = minphys;
912 	adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
913 
914 	memset(chan, 0, sizeof(*chan));
915 	chan->chan_adapter = adapt;
916 	chan->chan_bustype = &scsi_sas_bustype;
917 	chan->chan_channel = 0;
918 	chan->chan_flags = 0;
919 	chan->chan_nluns = 8;
920 	chan->chan_ntargets = sc->sc_info.mci_max_lds;
921 	chan->chan_id = sc->sc_info.mci_max_lds;
922 
923 	mfii_rescan(sc->sc_dev, NULL, NULL);
924 
925 	if (mfii_aen_register(sc) != 0) {
926 		/* error printed by mfii_aen_register */
927 		goto intr_disestablish;
928 	}
929 
930 	memset(&mbox, 0, sizeof(mbox));
931 	if (sc->sc_max256vd)
932 		mbox.b[0] = 1;
933 	mutex_enter(&sc->sc_lock);
934 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
935 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
936 		mutex_exit(&sc->sc_lock);
937 		aprint_error_dev(self,
938 		    "getting list of logical disks failed\n");
939 		goto intr_disestablish;
940 	}
941 	mutex_exit(&sc->sc_lock);
942 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
943 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
944 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
945 		sc->sc_target_lds[target] = i;
946 		sc->sc_ld[i].ld_target_id = target;
947 	}
948 
949 	/* enable interrupts */
950 	mfii_write(sc, MFI_OSTS, 0xffffffff);
951 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
952 
953 #if NBIO > 0
954 	if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
955 		panic("%s: controller registration failed", DEVNAME(sc));
956 #endif /* NBIO > 0 */
957 
958 	if (mfii_create_sensors(sc) != 0)
959 		aprint_error_dev(self, "unable to create sensors\n");
960 
961 	if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
962 	    mfii_shutdown))
963 		aprint_error_dev(self, "couldn't establish power handler\n");
964 	return;
965 intr_disestablish:
966 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
967 free_sgl:
968 	mfii_dmamem_free(sc, sc->sc_sgl);
969 free_mfi:
970 	mfii_dmamem_free(sc, sc->sc_mfi);
971 free_requests:
972 	mfii_dmamem_free(sc, sc->sc_requests);
973 free_reply_postq:
974 	mfii_dmamem_free(sc, sc->sc_reply_postq);
975 free_sense:
976 	mfii_dmamem_free(sc, sc->sc_sense);
977 pci_unmap:
978 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
979 }
980 
981 #if 0
982 struct srp_gc mfii_dev_handles_gc =
983     SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
984 
985 static inline uint16_t
986 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
987 {
988 	struct srp_ref sr;
989 	uint16_t *map, handle;
990 
991 	map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
992 	handle = map[target];
993 	srp_leave(&sr);
994 
995 	return (handle);
996 }
997 
998 static int
999 mfii_dev_handles_update(struct mfii_softc *sc)
1000 {
1001 	struct mfii_ld_map *lm;
1002 	uint16_t *dev_handles = NULL;
1003 	int i;
1004 	int rv = 0;
1005 
1006 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
1007 
1008 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
1009 	    MFII_DATA_IN, false);
1010 
1011 	if (rv != 0) {
1012 		rv = EIO;
1013 		goto free_lm;
1014 	}
1015 
1016 	dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
1017 	    M_DEVBUF, M_WAITOK);
1018 
1019 	for (i = 0; i < MFI_MAX_PD; i++)
1020 		dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
1021 
1022 	/* commit the updated info */
1023 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
1024 	srp_update_locked(&mfii_dev_handles_gc,
1025 	    &sc->sc_pd->pd_dev_handles, dev_handles);
1026 
1027 free_lm:
1028 	free(lm, M_TEMP, sizeof(*lm));
1029 
1030 	return (rv);
1031 }
1032 
1033 static void
1034 mfii_dev_handles_dtor(void *null, void *v)
1035 {
1036 	uint16_t *dev_handles = v;
1037 
1038 	free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
1039 }
1040 #endif /* 0 */
1041 
1042 static int
mfii_detach(device_t self,int flags)1043 mfii_detach(device_t self, int flags)
1044 {
1045 	struct mfii_softc *sc = device_private(self);
1046 	int error;
1047 
1048 	if (sc->sc_ih == NULL)
1049 		return (0);
1050 
1051 	if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
1052 		return error;
1053 
1054 	mfii_destroy_sensors(sc);
1055 #if NBIO > 0
1056 	bio_unregister(sc->sc_dev);
1057 #endif
1058 	mfii_shutdown(sc->sc_dev, 0);
1059 	mfii_write(sc, MFI_OMSK, 0xffffffff);
1060 
1061 	mfii_aen_unregister(sc);
1062 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1063 	mfii_dmamem_free(sc, sc->sc_sgl);
1064 	mfii_dmamem_free(sc, sc->sc_mfi);
1065 	mfii_dmamem_free(sc, sc->sc_requests);
1066 	mfii_dmamem_free(sc, sc->sc_reply_postq);
1067 	mfii_dmamem_free(sc, sc->sc_sense);
1068 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1069 
1070 	return (0);
1071 }
1072 
1073 static int
mfii_rescan(device_t self,const char * ifattr,const int * locators)1074 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1075 {
1076 	struct mfii_softc *sc = device_private(self);
1077 
1078 	if (sc->sc_child != NULL)
1079 		return 0;
1080 
1081 	sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
1082 	    CFARGS_NONE);
1083 	return 0;
1084 }
1085 
1086 static void
mfii_childdetached(device_t self,device_t child)1087 mfii_childdetached(device_t self, device_t child)
1088 {
1089 	struct mfii_softc *sc = device_private(self);
1090 
1091 	KASSERT(self == sc->sc_dev);
1092 	KASSERT(child == sc->sc_child);
1093 
1094 	if (child == sc->sc_child)
1095 		sc->sc_child = NULL;
1096 }
1097 
1098 static bool
mfii_suspend(device_t dev,const pmf_qual_t * q)1099 mfii_suspend(device_t dev, const pmf_qual_t *q)
1100 {
1101 	/* XXX to be implemented */
1102 	return false;
1103 }
1104 
1105 static bool
mfii_resume(device_t dev,const pmf_qual_t * q)1106 mfii_resume(device_t dev, const pmf_qual_t *q)
1107 {
1108 	/* XXX to be implemented */
1109 	return false;
1110 }
1111 
1112 static bool
mfii_shutdown(device_t dev,int how)1113 mfii_shutdown(device_t dev, int how)
1114 {
1115 	struct mfii_softc	*sc = device_private(dev);
1116 	struct mfii_ccb *ccb;
1117 	union mfi_mbox		mbox;
1118 	bool rv = true;
1119 
1120 	memset(&mbox, 0, sizeof(mbox));
1121 
1122 	mutex_enter(&sc->sc_lock);
1123 	DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1124 	ccb = mfii_get_ccb(sc);
1125 	if (ccb == NULL)
1126 		return false;
1127 	mutex_enter(&sc->sc_ccb_mtx);
1128 	if (sc->sc_running) {
1129 		sc->sc_running = 0; /* prevent new commands */
1130 		mutex_exit(&sc->sc_ccb_mtx);
1131 #if 0 /* XXX why does this hang ? */
1132 		mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1133 		mfii_scrub_ccb(ccb);
1134 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1135 		    NULL, 0, MFII_DATA_NONE, true)) {
1136 			aprint_error_dev(dev,
1137 			    "shutdown: cache flush failed\n");
1138 			rv = false;
1139 			goto fail;
1140 		}
1141 		printf("ok1\n");
1142 #endif
1143 		mbox.b[0] = 0;
1144 		mfii_scrub_ccb(ccb);
1145 		if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1146 		    NULL, 0, MFII_DATA_NONE, true)) {
1147 			aprint_error_dev(dev, "shutdown: "
1148 			    "firmware shutdown failed\n");
1149 			rv = false;
1150 			goto fail;
1151 		}
1152 	} else {
1153 		mutex_exit(&sc->sc_ccb_mtx);
1154 	}
1155 fail:
1156 	mfii_put_ccb(sc, ccb);
1157 	mutex_exit(&sc->sc_lock);
1158 	return rv;
1159 }
1160 
1161 /* Register read function without retry */
1162 static inline u_int32_t
mfii_read_wor(struct mfii_softc * sc,bus_size_t r)1163 mfii_read_wor(struct mfii_softc *sc, bus_size_t r)
1164 {
1165 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1166 	    BUS_SPACE_BARRIER_READ);
1167 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1168 }
1169 
1170 static u_int32_t
mfii_read(struct mfii_softc * sc,bus_size_t r)1171 mfii_read(struct mfii_softc *sc, bus_size_t r)
1172 {
1173 	uint32_t rv;
1174 	int i = 0;
1175 
1176 	if ((sc->sc_iop->iop_flag & MFII_IOP_QUIRK_REGREAD) != 0) {
1177 		do {
1178 			rv = mfii_read_wor(sc, r);
1179 			i++;
1180 		} while ((rv == 0) && (i < 3));
1181 	} else
1182 		rv = mfii_read_wor(sc, r);
1183 
1184 	return rv;
1185 }
1186 
1187 static void
mfii_write(struct mfii_softc * sc,bus_size_t r,u_int32_t v)1188 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1189 {
1190 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1191 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1192 	    BUS_SPACE_BARRIER_WRITE);
1193 }
1194 
1195 static struct mfii_dmamem *
mfii_dmamem_alloc(struct mfii_softc * sc,size_t size)1196 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1197 {
1198 	struct mfii_dmamem *m;
1199 	int nsegs;
1200 
1201 	m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1202 	m->mdm_size = size;
1203 
1204 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1205 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1206 		goto mdmfree;
1207 
1208 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1209 	    &nsegs, BUS_DMA_NOWAIT) != 0)
1210 		goto destroy;
1211 
1212 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1213 	    BUS_DMA_NOWAIT) != 0)
1214 		goto free;
1215 
1216 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1217 	    BUS_DMA_NOWAIT) != 0)
1218 		goto unmap;
1219 
1220 	memset(m->mdm_kva, 0, size);
1221 	return (m);
1222 
1223 unmap:
1224 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1225 free:
1226 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1227 destroy:
1228 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1229 mdmfree:
1230 	free(m, M_DEVBUF);
1231 
1232 	return (NULL);
1233 }
1234 
1235 static void
mfii_dmamem_free(struct mfii_softc * sc,struct mfii_dmamem * m)1236 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1237 {
1238 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1239 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1240 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1241 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1242 	free(m, M_DEVBUF);
1243 }
1244 
1245 static void
mfii_dcmd_start(struct mfii_softc * sc,struct mfii_ccb * ccb)1246 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1247 {
1248 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1249 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1250 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1251 
1252 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1253 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1254 	io->chain_offset = io->sgl_offset0 / 4;
1255 
1256 	sge->sg_addr = htole64(ccb->ccb_sense_dva);
1257 	sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1258 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1259 
1260 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1261 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1262 
1263 	mfii_start(sc, ccb);
1264 }
1265 
1266 static int
mfii_aen_register(struct mfii_softc * sc)1267 mfii_aen_register(struct mfii_softc *sc)
1268 {
1269 	struct mfi_evt_log_info mel;
1270 	struct mfii_ccb *ccb;
1271 	struct mfii_dmamem *mdm;
1272 	int rv;
1273 
1274 	ccb = mfii_get_ccb(sc);
1275 	if (ccb == NULL) {
1276 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1277 		return (ENOMEM);
1278 	}
1279 
1280 	memset(&mel, 0, sizeof(mel));
1281 	mfii_scrub_ccb(ccb);
1282 
1283 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1284 	    &mel, sizeof(mel), MFII_DATA_IN, true);
1285 	if (rv != 0) {
1286 		mfii_put_ccb(sc, ccb);
1287 		aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1288 		return (EIO);
1289 	}
1290 
1291 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1292 	if (mdm == NULL) {
1293 		mfii_put_ccb(sc, ccb);
1294 		aprint_error_dev(sc->sc_dev,
1295 		    "unable to allocate event data\n");
1296 		return (ENOMEM);
1297 	}
1298 
1299 	/* replay all the events from boot */
1300 	mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1301 
1302 	return (0);
1303 }
1304 
1305 static void
mfii_aen_start(struct mfii_softc * sc,struct mfii_ccb * ccb,struct mfii_dmamem * mdm,uint32_t seq)1306 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1307     struct mfii_dmamem *mdm, uint32_t seq)
1308 {
1309 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1310 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1311 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1312 	union mfi_evt_class_locale mec;
1313 
1314 	mfii_scrub_ccb(ccb);
1315 	mfii_dcmd_scrub(ccb);
1316 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1317 
1318 	ccb->ccb_cookie = mdm;
1319 	ccb->ccb_done = mfii_aen_done;
1320 	sc->sc_aen_ccb = ccb;
1321 
1322 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1323 	mec.mec_members.reserved = 0;
1324 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1325 
1326 	hdr->mfh_cmd = MFI_CMD_DCMD;
1327 	hdr->mfh_sg_count = 1;
1328 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1329 	hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1330 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1331 	dcmd->mdf_mbox.w[0] = htole32(seq);
1332 	dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1333 	sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1334 	sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1335 
1336 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1337 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1338 
1339 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1340 	mfii_dcmd_start(sc, ccb);
1341 }
1342 
1343 static void
mfii_aen_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1344 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1345 {
1346 	KASSERT(sc->sc_aen_ccb == ccb);
1347 
1348 	/*
1349 	 * defer to a thread with KERNEL_LOCK so we can run autoconf
1350 	 * We shouldn't have more than one AEN command pending at a time,
1351 	 * so no need to lock
1352 	 */
1353 	if (sc->sc_running)
1354 		workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1355 }
1356 
1357 static void
mfii_aen(struct work * wk,void * arg)1358 mfii_aen(struct work *wk, void *arg)
1359 {
1360 	struct mfii_softc *sc = arg;
1361 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1362 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1363 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1364 
1365 	mfii_dcmd_sync(sc, ccb,
1366 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1367 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1368 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1369 
1370 	DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1371 	    le32toh(med->med_seq_num), le32toh(med->med_code),
1372 	    med->med_arg_type, med->med_description);
1373 
1374 	switch (le32toh(med->med_code)) {
1375 	case MR_EVT_PD_INSERTED_EXT:
1376 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1377 			break;
1378 
1379 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1380 		break;
1381 	case MR_EVT_PD_REMOVED_EXT:
1382 		if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1383 			break;
1384 
1385 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1386 		break;
1387 
1388 	case MR_EVT_PD_STATE_CHANGE:
1389 		if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1390 			break;
1391 
1392 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1393 		break;
1394 
1395 	case MR_EVT_LD_CREATED:
1396 	case MR_EVT_LD_DELETED:
1397 		mfii_aen_ld_update(sc);
1398 		break;
1399 
1400 	default:
1401 		break;
1402 	}
1403 
1404 	mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1405 }
1406 
1407 static void
mfii_aen_pd_insert(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1408 mfii_aen_pd_insert(struct mfii_softc *sc,
1409     const struct mfi_evtarg_pd_address *pd)
1410 {
1411 	printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1412 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1413 }
1414 
1415 static void
mfii_aen_pd_remove(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1416 mfii_aen_pd_remove(struct mfii_softc *sc,
1417     const struct mfi_evtarg_pd_address *pd)
1418 {
1419 	printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1420 	    le16toh(pd->device_id), le16toh(pd->encl_id));
1421 }
1422 
1423 static void
mfii_aen_pd_state_change(struct mfii_softc * sc,const struct mfi_evtarg_pd_state * state)1424 mfii_aen_pd_state_change(struct mfii_softc *sc,
1425     const struct mfi_evtarg_pd_state *state)
1426 {
1427 	return;
1428 }
1429 
1430 static void
mfii_aen_ld_update(struct mfii_softc * sc)1431 mfii_aen_ld_update(struct mfii_softc *sc)
1432 {
1433 	union mfi_mbox mbox;
1434 	int i, target, old, nld;
1435 	int newlds[MFII_MAX_LD_EXT];
1436 
1437 	memset(&mbox, 0, sizeof(mbox));
1438 	if (sc->sc_max256vd)
1439 		mbox.b[0] = 1;
1440 	mutex_enter(&sc->sc_lock);
1441 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
1442 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1443 		mutex_exit(&sc->sc_lock);
1444 		DNPRINTF(MFII_D_MISC,
1445 		    "%s: getting list of logical disks failed\n", DEVNAME(sc));
1446 		return;
1447 	}
1448 	mutex_exit(&sc->sc_lock);
1449 
1450 	memset(newlds, -1, sizeof(newlds));
1451 
1452 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1453 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1454 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1455 		    DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1456 		newlds[target] = i;
1457 		sc->sc_ld[i].ld_target_id = target;
1458 	}
1459 
1460 	for (i = 0; i < MFII_MAX_LD_EXT; i++) {
1461 		old = sc->sc_target_lds[i];
1462 		nld = newlds[i];
1463 
1464 		if (old == -1 && nld != -1) {
1465 			printf("%s: logical drive %d added (target %d)\n",
1466 			    DEVNAME(sc), i, nld);
1467 			sc->sc_ld[i].ld_present = 1;
1468 
1469 			// XXX scsi_probe_target(sc->sc_scsibus, i);
1470 
1471 			mfii_init_ld_sensor(sc, &sc->sc_ld_sensors[i], i);
1472 			mfii_attach_sensor(sc, &sc->sc_ld_sensors[i]);
1473 		} else if (nld == -1 && old != -1) {
1474 			printf("%s: logical drive %d removed (target %d)\n",
1475 			    DEVNAME(sc), i, old);
1476 			sc->sc_ld[i].ld_present = 0;
1477 
1478 			scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1479 			sysmon_envsys_sensor_detach(sc->sc_sme,
1480 			    &sc->sc_ld_sensors[i]);
1481 		}
1482 	}
1483 
1484 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1485 }
1486 
1487 static void
mfii_aen_unregister(struct mfii_softc * sc)1488 mfii_aen_unregister(struct mfii_softc *sc)
1489 {
1490 	/* XXX */
1491 }
1492 
1493 int
mfii_reset_hard(struct mfii_softc * sc)1494 mfii_reset_hard(struct mfii_softc *sc)
1495 {
1496 	uint16_t		i;
1497 
1498 	mfii_write(sc, MFI_OSTS, 0);
1499 
1500 	/* enable diagnostic register */
1501 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1502 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1503 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1504 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1505 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1506 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1507 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1508 
1509 	delay(100);
1510 
1511 	if ((mfii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1512 		aprint_error_dev(sc->sc_dev,
1513 		    "failed to enable diagnostic read/write\n");
1514 		return(1);
1515 	}
1516 
1517 	/* reset ioc */
1518 	mfii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1519 
1520 	/* 240 milliseconds */
1521 	delay(240000);
1522 
1523 	for (i = 0; i < 30000; i++) {
1524 		if ((mfii_read(sc, MPII_HOSTDIAG) &
1525 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1526 			break;
1527 		delay(10000);
1528 	}
1529 	if (i >= 30000) {
1530 		aprint_error_dev(sc->sc_dev, "failed to reset device\n");
1531 		return (1);
1532 	}
1533 
1534 	/* disable diagnostic register */
1535 	mfii_write(sc, MPII_WRITESEQ, 0xff);
1536 
1537 	return(0);
1538 }
1539 
1540 static int
mfii_transition_firmware(struct mfii_softc * sc)1541 mfii_transition_firmware(struct mfii_softc *sc)
1542 {
1543 	int32_t			fw_state, cur_state;
1544 	int			max_wait, i, reset_on_fault = 1;
1545 
1546 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1547 
1548 	while (fw_state != MFI_STATE_READY) {
1549 		cur_state = fw_state;
1550 		switch (fw_state) {
1551 		case MFI_STATE_FAULT:
1552 			if (!reset_on_fault) {
1553 				aprint_error_dev(sc->sc_dev,
1554 				    "firmware fault\n");
1555 				return (1);
1556 			}
1557 			aprint_verbose_dev(sc->sc_dev,
1558 			    "firmware fault; attempting full device reset, "
1559 			    "this can take some time\n");
1560 			if (mfii_reset_hard(sc))
1561 				return (1);
1562 			max_wait = 20;
1563 			reset_on_fault = 0;
1564 			break;
1565 		case MFI_STATE_WAIT_HANDSHAKE:
1566 			mfii_write(sc, MFI_SKINNY_IDB,
1567 			    MFI_INIT_CLEAR_HANDSHAKE);
1568 			max_wait = 2;
1569 			break;
1570 		case MFI_STATE_OPERATIONAL:
1571 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1572 			max_wait = 10;
1573 			break;
1574 		case MFI_STATE_BB_INIT:
1575 			max_wait = 20;
1576 			break;
1577 		case MFI_STATE_UNDEFINED:
1578 		case MFI_STATE_FW_INIT:
1579 		case MFI_STATE_FW_INIT_2:
1580 		case MFI_STATE_DEVICE_SCAN:
1581 		case MFI_STATE_FLUSH_CACHE:
1582 			max_wait = 40;
1583 			break;
1584 		case MFI_STATE_BOOT_MESSAGE_PENDING:
1585 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
1586 			max_wait = 10;
1587 			break;
1588 		default:
1589 			printf("%s: unknown firmware state %#x\n",
1590 			    DEVNAME(sc), fw_state);
1591 			return (1);
1592 		}
1593 		for (i = 0; i < (max_wait * 10); i++) {
1594 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1595 			if (fw_state == cur_state)
1596 				DELAY(100000);
1597 			else
1598 				break;
1599 		}
1600 		if (fw_state == cur_state) {
1601 			printf("%s: firmware stuck in state %#x\n",
1602 			    DEVNAME(sc), fw_state);
1603 			return (1);
1604 		} else {
1605 			DPRINTF("%s: firmware state change %#x -> %#x after "
1606 			    "%d iterations\n",
1607 			    DEVNAME(sc), cur_state, fw_state, i);
1608 		}
1609 	}
1610 
1611 	return (0);
1612 }
1613 
1614 static int
mfii_get_info(struct mfii_softc * sc)1615 mfii_get_info(struct mfii_softc *sc)
1616 {
1617 	int i, rv;
1618 
1619 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1620 	    sizeof(sc->sc_info), MFII_DATA_IN, true);
1621 
1622 	if (rv != 0)
1623 		return (rv);
1624 
1625 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1626 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1627 		    DEVNAME(sc),
1628 		    sc->sc_info.mci_image_component[i].mic_name,
1629 		    sc->sc_info.mci_image_component[i].mic_version,
1630 		    sc->sc_info.mci_image_component[i].mic_build_date,
1631 		    sc->sc_info.mci_image_component[i].mic_build_time);
1632 	}
1633 
1634 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1635 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1636 		    DEVNAME(sc),
1637 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1638 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1639 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1640 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1641 	}
1642 
1643 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1644 	    DEVNAME(sc),
1645 	    sc->sc_info.mci_max_arms,
1646 	    sc->sc_info.mci_max_spans,
1647 	    sc->sc_info.mci_max_arrays,
1648 	    sc->sc_info.mci_max_lds,
1649 	    sc->sc_info.mci_product_name);
1650 
1651 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1652 	    DEVNAME(sc),
1653 	    sc->sc_info.mci_serial_number,
1654 	    sc->sc_info.mci_hw_present,
1655 	    sc->sc_info.mci_current_fw_time,
1656 	    sc->sc_info.mci_max_cmds,
1657 	    sc->sc_info.mci_max_sg_elements);
1658 
1659 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1660 	    DEVNAME(sc),
1661 	    sc->sc_info.mci_max_request_size,
1662 	    sc->sc_info.mci_lds_present,
1663 	    sc->sc_info.mci_lds_degraded,
1664 	    sc->sc_info.mci_lds_offline,
1665 	    sc->sc_info.mci_pd_present);
1666 
1667 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1668 	    DEVNAME(sc),
1669 	    sc->sc_info.mci_pd_disks_present,
1670 	    sc->sc_info.mci_pd_disks_pred_failure,
1671 	    sc->sc_info.mci_pd_disks_failed);
1672 
1673 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1674 	    DEVNAME(sc),
1675 	    sc->sc_info.mci_nvram_size,
1676 	    sc->sc_info.mci_memory_size,
1677 	    sc->sc_info.mci_flash_size);
1678 
1679 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1680 	    DEVNAME(sc),
1681 	    sc->sc_info.mci_ram_correctable_errors,
1682 	    sc->sc_info.mci_ram_uncorrectable_errors,
1683 	    sc->sc_info.mci_cluster_allowed,
1684 	    sc->sc_info.mci_cluster_active);
1685 
1686 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1687 	    DEVNAME(sc),
1688 	    sc->sc_info.mci_max_strips_per_io,
1689 	    sc->sc_info.mci_raid_levels,
1690 	    sc->sc_info.mci_adapter_ops,
1691 	    sc->sc_info.mci_ld_ops);
1692 
1693 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1694 	    DEVNAME(sc),
1695 	    sc->sc_info.mci_stripe_sz_ops.min,
1696 	    sc->sc_info.mci_stripe_sz_ops.max,
1697 	    sc->sc_info.mci_pd_ops,
1698 	    sc->sc_info.mci_pd_mix_support);
1699 
1700 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1701 	    DEVNAME(sc),
1702 	    sc->sc_info.mci_ecc_bucket_count,
1703 	    sc->sc_info.mci_package_version);
1704 
1705 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1706 	    DEVNAME(sc),
1707 	    sc->sc_info.mci_properties.mcp_seq_num,
1708 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1709 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1710 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1711 
1712 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1713 	    DEVNAME(sc),
1714 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1715 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1716 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1717 	    sc->sc_info.mci_properties.mcp_cc_rate);
1718 
1719 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1720 	    DEVNAME(sc),
1721 	    sc->sc_info.mci_properties.mcp_recon_rate,
1722 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1723 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1724 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1725 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1726 
1727 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1728 	    DEVNAME(sc),
1729 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1730 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1731 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1732 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1733 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1734 
1735 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1736 	    DEVNAME(sc),
1737 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1738 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1739 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1740 
1741 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1742 	    DEVNAME(sc),
1743 	    sc->sc_info.mci_pci.mip_vendor,
1744 	    sc->sc_info.mci_pci.mip_device,
1745 	    sc->sc_info.mci_pci.mip_subvendor,
1746 	    sc->sc_info.mci_pci.mip_subdevice);
1747 
1748 	DPRINTF("%s: type %#x port_count %d port_addr ",
1749 	    DEVNAME(sc),
1750 	    sc->sc_info.mci_host.mih_type,
1751 	    sc->sc_info.mci_host.mih_port_count);
1752 
1753 	for (i = 0; i < 8; i++)
1754 		DPRINTF("%.0" PRIx64 " ",
1755 		    sc->sc_info.mci_host.mih_port_addr[i]);
1756 	DPRINTF("\n");
1757 
1758 	DPRINTF("%s: type %.x port_count %d port_addr ",
1759 	    DEVNAME(sc),
1760 	    sc->sc_info.mci_device.mid_type,
1761 	    sc->sc_info.mci_device.mid_port_count);
1762 
1763 	for (i = 0; i < 8; i++)
1764 		DPRINTF("%.0" PRIx64 " ",
1765 		    sc->sc_info.mci_device.mid_port_addr[i]);
1766 	DPRINTF("\n");
1767 
1768 	return (0);
1769 }
1770 
1771 static int
mfii_mfa_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1772 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1773 {
1774 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1775 	u_int64_t r;
1776 	int to = 0, rv = 0;
1777 
1778 #ifdef DIAGNOSTIC
1779 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1780 		panic("mfii_mfa_poll called with cookie or done set");
1781 #endif
1782 
1783 	hdr->mfh_context = ccb->ccb_smid;
1784 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1785 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1786 
1787 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1788 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1789 
1790 	/*
1791 	 * Even if the Aero card supports 32bit descriptor, 64bit descriptor
1792 	 * access is required for MFI_CMD_INIT.
1793 	 * Currently, mfii_mfa_poll() is called for MFI_CMD_INIT only.
1794 	 */
1795 	mfii_start64(sc, ccb);
1796 
1797 	for (;;) {
1798 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1799 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1800 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1801 
1802 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1803 			break;
1804 
1805 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1806 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1807 			    ccb->ccb_smid);
1808 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1809 			rv = 1;
1810 			break;
1811 		}
1812 
1813 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1814 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1815 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1816 
1817 		delay(1000);
1818 	}
1819 
1820 	if (ccb->ccb_len > 0) {
1821 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1822 		    0, ccb->ccb_dmamap32->dm_mapsize,
1823 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1824 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1825 
1826 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1827 	}
1828 
1829 	return (rv);
1830 }
1831 
1832 static int
mfii_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1833 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1834 {
1835 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1836 	void *cookie;
1837 	int rv = 1;
1838 
1839 	done = ccb->ccb_done;
1840 	cookie = ccb->ccb_cookie;
1841 
1842 	ccb->ccb_done = mfii_poll_done;
1843 	ccb->ccb_cookie = &rv;
1844 
1845 	mfii_start(sc, ccb);
1846 
1847 	do {
1848 		delay(10);
1849 		mfii_postq(sc);
1850 	} while (rv == 1);
1851 
1852 	ccb->ccb_cookie = cookie;
1853 	done(sc, ccb);
1854 
1855 	return (0);
1856 }
1857 
1858 static void
mfii_poll_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1859 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1860 {
1861 	int *rv = ccb->ccb_cookie;
1862 
1863 	*rv = 0;
1864 }
1865 
1866 static int
mfii_exec(struct mfii_softc * sc,struct mfii_ccb * ccb)1867 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1868 {
1869 #ifdef DIAGNOSTIC
1870 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1871 		panic("mfii_exec called with cookie or done set");
1872 #endif
1873 
1874 	ccb->ccb_cookie = ccb;
1875 	ccb->ccb_done = mfii_exec_done;
1876 
1877 	mfii_start(sc, ccb);
1878 
1879 	mutex_enter(&ccb->ccb_mtx);
1880 	while (ccb->ccb_cookie != NULL)
1881 		cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1882 	mutex_exit(&ccb->ccb_mtx);
1883 
1884 	return (0);
1885 }
1886 
1887 static void
mfii_exec_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1888 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1889 {
1890 	mutex_enter(&ccb->ccb_mtx);
1891 	ccb->ccb_cookie = NULL;
1892 	cv_signal(&ccb->ccb_cv);
1893 	mutex_exit(&ccb->ccb_mtx);
1894 }
1895 
1896 static int
mfii_mgmt(struct mfii_softc * sc,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,mfii_direction_t dir,bool poll)1897 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1898     void *buf, size_t len, mfii_direction_t dir, bool poll)
1899 {
1900 	struct mfii_ccb *ccb;
1901 	int rv;
1902 
1903 	KASSERT(mutex_owned(&sc->sc_lock));
1904 	if (!sc->sc_running)
1905 		return EAGAIN;
1906 
1907 	ccb = mfii_get_ccb(sc);
1908 	if (ccb == NULL)
1909 		return (ENOMEM);
1910 
1911 	mfii_scrub_ccb(ccb);
1912 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1913 	mfii_put_ccb(sc, ccb);
1914 
1915 	return (rv);
1916 }
1917 
1918 static int
mfii_do_mgmt(struct mfii_softc * sc,struct mfii_ccb * ccb,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,mfii_direction_t dir,bool poll)1919 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1920     const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1921     bool poll)
1922 {
1923 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1924 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1925 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1926 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1927 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1928 	int rv = EIO;
1929 
1930 	if (cold)
1931 		poll = true;
1932 
1933 	ccb->ccb_data = buf;
1934 	ccb->ccb_len = len;
1935 	ccb->ccb_direction = dir;
1936 	switch (dir) {
1937 	case MFII_DATA_IN:
1938 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1939 		break;
1940 	case MFII_DATA_OUT:
1941 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1942 		break;
1943 	case MFII_DATA_NONE:
1944 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1945 		break;
1946 	}
1947 
1948 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1949 		rv = ENOMEM;
1950 		goto done;
1951 	}
1952 
1953 	hdr->mfh_cmd = MFI_CMD_DCMD;
1954 	hdr->mfh_context = ccb->ccb_smid;
1955 	hdr->mfh_data_len = htole32(len);
1956 	hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1957 	KASSERT(!ccb->ccb_dma64);
1958 
1959 	dcmd->mdf_opcode = opc;
1960 	/* handle special opcodes */
1961 	if (mbox != NULL)
1962 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1963 
1964 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1965 	io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1966 	io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1967 
1968 	sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1969 	sge->sg_len = htole32(MFI_FRAME_SIZE);
1970 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1971 
1972 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1973 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1974 
1975 	if (poll) {
1976 		ccb->ccb_done = mfii_empty_done;
1977 		mfii_poll(sc, ccb);
1978 	} else
1979 		mfii_exec(sc, ccb);
1980 
1981 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1982 		rv = 0;
1983 	}
1984 
1985 done:
1986 	return (rv);
1987 }
1988 
1989 static void
mfii_empty_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1990 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1991 {
1992 	return;
1993 }
1994 
1995 static int
mfii_load_mfa(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)1996 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1997     void *sglp, int nosleep)
1998 {
1999 	union mfi_sgl *sgl = sglp;
2000 	bus_dmamap_t dmap = ccb->ccb_dmamap32;
2001 	int error;
2002 	int i;
2003 
2004 	KASSERT(!ccb->ccb_dma64);
2005 	if (ccb->ccb_len == 0)
2006 		return (0);
2007 
2008 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2009 	    ccb->ccb_data, ccb->ccb_len, NULL,
2010 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2011 	if (error) {
2012 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2013 		return (1);
2014 	}
2015 
2016 	for (i = 0; i < dmap->dm_nsegs; i++) {
2017 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
2018 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
2019 	}
2020 
2021 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2022 	    ccb->ccb_direction == MFII_DATA_OUT ?
2023 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2024 
2025 	return (0);
2026 }
2027 
2028 static void
mfii_start(struct mfii_softc * sc,struct mfii_ccb * ccb)2029 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
2030 {
2031 
2032 	mfii_start_common(sc, ccb,
2033 	    ((sc->sc_iop_flag & MFII_IOP_DESC_32BIT) != 0) ? true : false);
2034 }
2035 
2036 static void
mfii_start64(struct mfii_softc * sc,struct mfii_ccb * ccb)2037 mfii_start64(struct mfii_softc *sc, struct mfii_ccb *ccb)
2038 {
2039 
2040 	mfii_start_common(sc, ccb, false);
2041 }
2042 
2043 static void
mfii_start_common(struct mfii_softc * sc,struct mfii_ccb * ccb,bool do32)2044 mfii_start_common(struct mfii_softc *sc, struct mfii_ccb *ccb, bool do32)
2045 {
2046 	uint32_t *r = (uint32_t *)&ccb->ccb_req;
2047 
2048 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2049 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2050 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2051 
2052 	if (do32)
2053 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_ISQP, r[0]);
2054 	else {
2055 #if defined(__LP64__)
2056 		uint64_t buf;
2057 
2058 		buf = ((uint64_t)r[1] << 32) | r[0];
2059 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
2060 #else
2061 		mutex_enter(&sc->sc_post_mtx);
2062 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
2063 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
2064 		bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2065 		    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
2066 		mutex_exit(&sc->sc_post_mtx);
2067 #endif
2068 	}
2069 }
2070 
2071 static void
mfii_done(struct mfii_softc * sc,struct mfii_ccb * ccb)2072 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2073 {
2074 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2075 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2076 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2077 
2078 	if (ccb->ccb_sgl_len > 0) {
2079 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2080 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2081 		    BUS_DMASYNC_POSTWRITE);
2082 	}
2083 
2084 	if (ccb->ccb_dma64) {
2085 		KASSERT(ccb->ccb_len > 0);
2086 		bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
2087 		    0, ccb->ccb_dmamap64->dm_mapsize,
2088 		    (ccb->ccb_direction == MFII_DATA_IN) ?
2089 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2090 
2091 		bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
2092 	} else if (ccb->ccb_len > 0) {
2093 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
2094 		    0, ccb->ccb_dmamap32->dm_mapsize,
2095 		    (ccb->ccb_direction == MFII_DATA_IN) ?
2096 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2097 
2098 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
2099 	}
2100 
2101 	ccb->ccb_done(sc, ccb);
2102 }
2103 
2104 static int
mfii_initialise_firmware(struct mfii_softc * sc)2105 mfii_initialise_firmware(struct mfii_softc *sc)
2106 {
2107 	struct mpii_msg_iocinit_request *iiq;
2108 	struct mfii_dmamem *m;
2109 	struct mfii_ccb *ccb;
2110 	struct mfi_init_frame *init;
2111 	int rv;
2112 
2113 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2114 	if (m == NULL)
2115 		return (1);
2116 
2117 	iiq = MFII_DMA_KVA(m);
2118 	memset(iiq, 0, sizeof(*iiq));
2119 
2120 	iiq->function = MPII_FUNCTION_IOC_INIT;
2121 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2122 
2123 	iiq->msg_version_maj = 0x02;
2124 	iiq->msg_version_min = 0x00;
2125 	iiq->hdr_version_unit = 0x10;
2126 	iiq->hdr_version_dev = 0x0;
2127 
2128 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2129 
2130 	iiq->reply_descriptor_post_queue_depth =
2131 	    htole16(sc->sc_reply_postq_depth);
2132 	iiq->reply_free_queue_depth = htole16(0);
2133 
2134 	iiq->sense_buffer_address_high = htole32(
2135 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
2136 
2137 	iiq->reply_descriptor_post_queue_address_lo =
2138 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq));
2139 	iiq->reply_descriptor_post_queue_address_hi =
2140 	    htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2141 
2142 	iiq->system_request_frame_base_address_lo =
2143 	    htole32(MFII_DMA_DVA(sc->sc_requests));
2144 	iiq->system_request_frame_base_address_hi =
2145 	    htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
2146 
2147 	iiq->timestamp = htole64(time_uptime);
2148 
2149 	ccb = mfii_get_ccb(sc);
2150 	if (ccb == NULL) {
2151 		/* shouldn't ever run out of ccbs during attach */
2152 		return (1);
2153 	}
2154 	mfii_scrub_ccb(ccb);
2155 	init = ccb->ccb_request;
2156 
2157 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
2158 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2159 	init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
2160 	init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
2161 
2162 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2163 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2164 	    BUS_DMASYNC_PREREAD);
2165 
2166 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2167 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2168 
2169 	rv = mfii_mfa_poll(sc, ccb);
2170 
2171 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2172 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2173 
2174 	mfii_put_ccb(sc, ccb);
2175 	mfii_dmamem_free(sc, m);
2176 
2177 	return (rv);
2178 }
2179 
2180 static int
mfii_my_intr(struct mfii_softc * sc)2181 mfii_my_intr(struct mfii_softc *sc)
2182 {
2183 	u_int32_t status;
2184 
2185 	status = mfii_read(sc, MFI_OSTS);
2186 
2187 	DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2188 	if (ISSET(status, 0x1)) {
2189 		mfii_write(sc, MFI_OSTS, status);
2190 		return (1);
2191 	}
2192 
2193 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2194 }
2195 
2196 static int
mfii_intr(void * arg)2197 mfii_intr(void *arg)
2198 {
2199 	struct mfii_softc *sc = arg;
2200 
2201 	if (!mfii_my_intr(sc))
2202 		return (0);
2203 
2204 	mfii_postq(sc);
2205 
2206 	return (1);
2207 }
2208 
2209 static void
mfii_postq(struct mfii_softc * sc)2210 mfii_postq(struct mfii_softc *sc)
2211 {
2212 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2213 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2214 	struct mpii_reply_descr *rdp;
2215 	struct mfii_ccb *ccb;
2216 	int rpi = 0;
2217 
2218 	mutex_enter(&sc->sc_reply_postq_mtx);
2219 
2220 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2221 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2222 	    BUS_DMASYNC_POSTREAD);
2223 
2224 	for (;;) {
2225 		rdp = &postq[sc->sc_reply_postq_index];
2226 		DNPRINTF(MFII_D_INTR,
2227 		    "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2228 		    DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2229 			rdp->data == 0xffffffff);
2230 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2231 		    MPII_REPLY_DESCR_UNUSED)
2232 			break;
2233 		if (rdp->data == 0xffffffff) {
2234 			/*
2235 			 * ioc is still writing to the reply post queue
2236 			 * race condition - bail!
2237 			 */
2238 			break;
2239 		}
2240 
2241 		ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2242 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2243 		memset(rdp, 0xff, sizeof(*rdp));
2244 
2245 		sc->sc_reply_postq_index++;
2246 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2247 		rpi = 1;
2248 	}
2249 
2250 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2251 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2252 	    BUS_DMASYNC_PREREAD);
2253 
2254 	if (rpi)
2255 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2256 
2257 	mutex_exit(&sc->sc_reply_postq_mtx);
2258 
2259 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2260 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2261 		mfii_done(sc, ccb);
2262 	}
2263 }
2264 
2265 static void
mfii_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)2266 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2267     void *arg)
2268 {
2269 	struct scsipi_periph    *periph;
2270 	struct scsipi_xfer	*xs;
2271 	struct scsipi_adapter   *adapt = chan->chan_adapter;
2272 	struct mfii_softc	*sc = device_private(adapt->adapt_dev);
2273 	struct mfii_ccb *ccb;
2274 	int timeout;
2275 	int target;
2276 
2277 	switch (req) {
2278 		case ADAPTER_REQ_GROW_RESOURCES:
2279 		/* Not supported. */
2280 		return;
2281 	case ADAPTER_REQ_SET_XFER_MODE:
2282 	{
2283 		struct scsipi_xfer_mode *xm = arg;
2284 		xm->xm_mode = PERIPH_CAP_TQING;
2285 		xm->xm_period = 0;
2286 		xm->xm_offset = 0;
2287 		scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2288 		return;
2289 	}
2290 	case ADAPTER_REQ_RUN_XFER:
2291 		break;
2292 	}
2293 
2294 	xs = arg;
2295 	periph = xs->xs_periph;
2296 	target = periph->periph_target;
2297 
2298 	if (target >= MFII_MAX_LD_EXT || !sc->sc_ld[target].ld_present ||
2299 	    periph->periph_lun != 0) {
2300 		xs->error = XS_SELTIMEOUT;
2301 		scsipi_done(xs);
2302 		return;
2303 	}
2304 
2305 	if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2306 	    xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2307 		/* the cache is stable storage, don't flush */
2308 		xs->error = XS_NOERROR;
2309 		xs->status = SCSI_OK;
2310 		xs->resid = 0;
2311 		scsipi_done(xs);
2312 		return;
2313 	}
2314 
2315 	ccb = mfii_get_ccb(sc);
2316 	if (ccb == NULL) {
2317 		xs->error = XS_RESOURCE_SHORTAGE;
2318 		scsipi_done(xs);
2319 		return;
2320 	}
2321 	mfii_scrub_ccb(ccb);
2322 	ccb->ccb_cookie = xs;
2323 	ccb->ccb_done = mfii_scsi_cmd_done;
2324 	ccb->ccb_data = xs->data;
2325 	ccb->ccb_len = xs->datalen;
2326 
2327 	timeout = mstohz(xs->timeout);
2328 	if (timeout == 0)
2329 		timeout = 1;
2330 	callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2331 
2332 	switch (xs->cmd->opcode) {
2333 	case SCSI_READ_6_COMMAND:
2334 	case READ_10:
2335 	case READ_12:
2336 	case READ_16:
2337 	case SCSI_WRITE_6_COMMAND:
2338 	case WRITE_10:
2339 	case WRITE_12:
2340 	case WRITE_16:
2341 		if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2342 			goto stuffup;
2343 		break;
2344 
2345 	default:
2346 		if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2347 			goto stuffup;
2348 		break;
2349 	}
2350 
2351 	xs->error = XS_NOERROR;
2352 	xs->resid = 0;
2353 
2354 	DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2355 	    xs->cmd->opcode);
2356 
2357 	if (xs->xs_control & XS_CTL_POLL) {
2358 		if (mfii_poll(sc, ccb) != 0)
2359 			goto stuffup;
2360 		return;
2361 	}
2362 
2363 	mfii_start(sc, ccb);
2364 
2365 	return;
2366 
2367 stuffup:
2368 	xs->error = XS_DRIVER_STUFFUP;
2369 	scsipi_done(xs);
2370 	mfii_put_ccb(sc, ccb);
2371 }
2372 
2373 static void
mfii_scsi_cmd_done(struct mfii_softc * sc,struct mfii_ccb * ccb)2374 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2375 {
2376 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2377 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2378 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2379 
2380 	if (callout_stop(&xs->xs_callout) != 0)
2381 		return;
2382 
2383 	switch (ctx->status) {
2384 	case MFI_STAT_OK:
2385 		break;
2386 
2387 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2388 		xs->error = XS_SENSE;
2389 		memset(&xs->sense, 0, sizeof(xs->sense));
2390 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2391 		break;
2392 
2393 	case MFI_STAT_LD_OFFLINE:
2394 	case MFI_STAT_DEVICE_NOT_FOUND:
2395 		xs->error = XS_SELTIMEOUT;
2396 		break;
2397 
2398 	default:
2399 		xs->error = XS_DRIVER_STUFFUP;
2400 		break;
2401 	}
2402 
2403 	scsipi_done(xs);
2404 	mfii_put_ccb(sc, ccb);
2405 }
2406 
2407 static int
mfii_scsi_cmd_io(struct mfii_softc * sc,struct mfii_ccb * ccb,struct scsipi_xfer * xs)2408 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2409     struct scsipi_xfer *xs)
2410 {
2411 	struct scsipi_periph *periph = xs->xs_periph;
2412 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2413 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2414 	int segs, target;
2415 
2416 	target = sc->sc_ld[periph->periph_target].ld_target_id;
2417 	io->dev_handle = htole16(target);
2418 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2419 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2420 	io->sgl_flags = htole16(0x02); /* XXX */
2421 	io->sense_buffer_length = sizeof(xs->sense);
2422 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2423 	io->data_length = htole32(xs->datalen);
2424 	io->io_flags = htole16(xs->cmdlen);
2425 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2426 	case XS_CTL_DATA_IN:
2427 		ccb->ccb_direction = MFII_DATA_IN;
2428 		io->direction = MPII_SCSIIO_DIR_READ;
2429 		break;
2430 	case XS_CTL_DATA_OUT:
2431 		ccb->ccb_direction = MFII_DATA_OUT;
2432 		io->direction = MPII_SCSIIO_DIR_WRITE;
2433 		break;
2434 	default:
2435 		ccb->ccb_direction = MFII_DATA_NONE;
2436 		io->direction = MPII_SCSIIO_DIR_NONE;
2437 		break;
2438 	}
2439 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2440 
2441 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2442 	ctx->timeout_value = htole16(0x14); /* XXX */
2443 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2444 	ctx->virtual_disk_target_id = htole16(target);
2445 
2446 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2447 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2448 		return (1);
2449 
2450 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2451 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2452 	switch (sc->sc_iop->num_sge_loc) {
2453 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2454 		ctx->num_sge = segs;
2455 		break;
2456 	case MFII_IOP_NUM_SGE_LOC_35:
2457 		/* 12 bit field, but we're only using the lower 8 */
2458 		ctx->span_arm = segs;
2459 		break;
2460 	}
2461 
2462 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2463 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2464 
2465 	return (0);
2466 }
2467 
2468 static int
mfii_scsi_cmd_cdb(struct mfii_softc * sc,struct mfii_ccb * ccb,struct scsipi_xfer * xs)2469 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2470     struct scsipi_xfer *xs)
2471 {
2472 	struct scsipi_periph *periph = xs->xs_periph;
2473 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2474 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2475 	int target;
2476 
2477 	target = sc->sc_ld[periph->periph_target].ld_target_id;
2478 	io->dev_handle = htole16(target);
2479 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2480 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2481 	io->sgl_flags = htole16(0x02); /* XXX */
2482 	io->sense_buffer_length = sizeof(xs->sense);
2483 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2484 	io->data_length = htole32(xs->datalen);
2485 	io->io_flags = htole16(xs->cmdlen);
2486 	io->lun[0] = htobe16(periph->periph_lun);
2487 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2488 	case XS_CTL_DATA_IN:
2489 		ccb->ccb_direction = MFII_DATA_IN;
2490 		io->direction = MPII_SCSIIO_DIR_READ;
2491 		break;
2492 	case XS_CTL_DATA_OUT:
2493 		ccb->ccb_direction = MFII_DATA_OUT;
2494 		io->direction = MPII_SCSIIO_DIR_WRITE;
2495 		break;
2496 	default:
2497 		ccb->ccb_direction = MFII_DATA_NONE;
2498 		io->direction = MPII_SCSIIO_DIR_NONE;
2499 		break;
2500 	}
2501 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2502 
2503 	ctx->virtual_disk_target_id = htole16(target);
2504 
2505 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2506 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2507 		return (1);
2508 
2509 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2510 	KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2511 
2512 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2513 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2514 
2515 	return (0);
2516 }
2517 
2518 #if 0
2519 void
2520 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2521 {
2522 	struct scsi_link *link = xs->sc_link;
2523 	struct mfii_softc *sc = link->adapter_softc;
2524 	struct mfii_ccb *ccb = xs->io;
2525 
2526 	mfii_scrub_ccb(ccb);
2527 	ccb->ccb_cookie = xs;
2528 	ccb->ccb_done = mfii_scsi_cmd_done;
2529 	ccb->ccb_data = xs->data;
2530 	ccb->ccb_len = xs->datalen;
2531 
2532 	// XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2533 
2534 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2535 	if (xs->error != XS_NOERROR)
2536 		goto done;
2537 
2538 	xs->resid = 0;
2539 
2540 	if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2541 		if (mfii_poll(sc, ccb) != 0)
2542 			goto stuffup;
2543 		return;
2544 	}
2545 
2546 	// XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2547 	mfii_start(sc, ccb);
2548 
2549 	return;
2550 
2551 stuffup:
2552 	xs->error = XS_DRIVER_STUFFUP;
2553 done:
2554 	scsi_done(xs);
2555 }
2556 
2557 int
2558 mfii_pd_scsi_probe(struct scsi_link *link)
2559 {
2560 	struct mfii_softc *sc = link->adapter_softc;
2561 	struct mfi_pd_details mpd;
2562 	union mfi_mbox mbox;
2563 	int rv;
2564 
2565 	if (link->lun > 0)
2566 		return (0);
2567 
2568 	memset(&mbox, 0, sizeof(mbox));
2569 	mbox.s[0] = htole16(link->target);
2570 
2571 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2572 	    MFII_DATA_IN, true);
2573 	if (rv != 0)
2574 		return (EIO);
2575 
2576 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2577 		return (ENXIO);
2578 
2579 	return (0);
2580 }
2581 
2582 int
2583 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2584     struct scsipi_xfer *xs)
2585 {
2586 	struct scsi_link *link = xs->sc_link;
2587 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2588 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2589 	uint16_t dev_handle;
2590 
2591 	dev_handle = mfii_dev_handle(sc, link->target);
2592 	if (dev_handle == htole16(0xffff))
2593 		return (XS_SELTIMEOUT);
2594 
2595 	io->dev_handle = dev_handle;
2596 	io->function = 0;
2597 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2598 	io->sgl_flags = htole16(0x02); /* XXX */
2599 	io->sense_buffer_length = sizeof(xs->sense);
2600 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2601 	io->data_length = htole32(xs->datalen);
2602 	io->io_flags = htole16(xs->cmdlen);
2603 	io->lun[0] = htobe16(link->lun);
2604 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2605 	case XS_CTL_DATA_IN:
2606 		ccb->ccb_direction = MFII_DATA_IN;
2607 		io->direction = MPII_SCSIIO_DIR_READ;
2608 		break;
2609 	case XS_CTL_DATA_OUT:
2610 		ccb->ccb_direction = MFII_DATA_OUT;
2611 		io->direction = MPII_SCSIIO_DIR_WRITE;
2612 		break;
2613 	default:
2614 		ccb->ccb_direction = MFII_DATA_NONE;
2615 		io->direction = MPII_SCSIIO_DIR_NONE;
2616 		break;
2617 	}
2618 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2619 
2620 	ctx->virtual_disk_target_id = htole16(link->target);
2621 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2622 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2623 
2624 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2625 	    ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2626 		return (XS_DRIVER_STUFFUP);
2627 
2628 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2629 	KASSERT(ccb->ccb_dma64);
2630 
2631 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2632 	ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2633 	ccb->ccb_req.dev_handle = dev_handle;
2634 
2635 	return (XS_NOERROR);
2636 }
2637 #endif
2638 
2639 static int
mfii_load_ccb(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)2640 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2641     int nosleep)
2642 {
2643 	struct mpii_msg_request *req = ccb->ccb_request;
2644 	struct mfii_sge *sge = NULL, *nsge = sglp;
2645 	struct mfii_sge *ce = NULL;
2646 	bus_dmamap_t dmap = ccb->ccb_dmamap64;
2647 	u_int space;
2648 	int i;
2649 
2650 	int error;
2651 
2652 	if (ccb->ccb_len == 0)
2653 		return (0);
2654 
2655 	ccb->ccb_dma64 = true;
2656 	error = bus_dmamap_load(sc->sc_dmat64, dmap,
2657 	    ccb->ccb_data, ccb->ccb_len, NULL,
2658 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2659 	if (error) {
2660 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2661 		return (1);
2662 	}
2663 
2664 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2665 	    sizeof(*nsge);
2666 	if (dmap->dm_nsegs > space) {
2667 		space--;
2668 
2669 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2670 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2671 
2672 		ce = nsge + space;
2673 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2674 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2675 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2676 
2677 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2678 	}
2679 
2680 	for (i = 0; i < dmap->dm_nsegs; i++) {
2681 		if (nsge == ce)
2682 			nsge = ccb->ccb_sgl;
2683 
2684 		sge = nsge;
2685 
2686 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2687 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2688 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2689 
2690 		nsge = sge + 1;
2691 	}
2692 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2693 
2694 	bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2695 	    ccb->ccb_direction == MFII_DATA_OUT ?
2696 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2697 
2698 	if (ccb->ccb_sgl_len > 0) {
2699 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2700 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2701 		    BUS_DMASYNC_PREWRITE);
2702 	}
2703 
2704 	return (0);
2705 }
2706 
2707 static void
mfii_scsi_cmd_tmo(void * p)2708 mfii_scsi_cmd_tmo(void *p)
2709 {
2710 	struct mfii_ccb *ccb = p;
2711 	struct mfii_softc *sc = ccb->ccb_sc;
2712 	bool start_abort;
2713 
2714 	printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2715 
2716 	mutex_enter(&sc->sc_abort_mtx);
2717 	start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2718 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2719 	if (start_abort)
2720 		workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2721 	mutex_exit(&sc->sc_abort_mtx);
2722 }
2723 
2724 static void
mfii_abort_task(struct work * wk,void * scp)2725 mfii_abort_task(struct work *wk, void *scp)
2726 {
2727 	struct mfii_softc *sc = scp;
2728 	struct mfii_ccb *list;
2729 
2730 	mutex_enter(&sc->sc_abort_mtx);
2731 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2732 	SIMPLEQ_INIT(&sc->sc_abort_list);
2733 	mutex_exit(&sc->sc_abort_mtx);
2734 
2735 	while (list != NULL) {
2736 		struct mfii_ccb *ccb = list;
2737 		struct scsipi_xfer *xs = ccb->ccb_cookie;
2738 		struct scsipi_periph *periph = xs->xs_periph;
2739 		struct mfii_ccb *accb;
2740 
2741 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2742 
2743 		if (!sc->sc_ld[periph->periph_target].ld_present) {
2744 			/* device is gone */
2745 			xs->error = XS_SELTIMEOUT;
2746 			scsipi_done(xs);
2747 			mfii_put_ccb(sc, ccb);
2748 			continue;
2749 		}
2750 
2751 		accb = mfii_get_ccb(sc);
2752 		mfii_scrub_ccb(accb);
2753 		mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2754 		    MPII_SCSI_TASK_ABORT_TASK,
2755 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2756 
2757 		accb->ccb_cookie = ccb;
2758 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2759 
2760 		mfii_start(sc, accb);
2761 	}
2762 }
2763 
2764 static void
mfii_abort(struct mfii_softc * sc,struct mfii_ccb * accb,uint16_t dev_handle,uint16_t smid,uint8_t type,uint32_t flags)2765 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2766     uint16_t smid, uint8_t type, uint32_t flags)
2767 {
2768 	struct mfii_task_mgmt *msg;
2769 	struct mpii_msg_scsi_task_request *req;
2770 
2771 	msg = accb->ccb_request;
2772 	req = &msg->mpii_request;
2773 	req->dev_handle = dev_handle;
2774 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2775 	req->task_type = type;
2776 	req->task_mid = htole16( smid);
2777 	msg->flags = flags;
2778 
2779 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2780 	accb->ccb_req.smid = le16toh(accb->ccb_smid);
2781 }
2782 
2783 static void
mfii_scsi_cmd_abort_done(struct mfii_softc * sc,struct mfii_ccb * accb)2784 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2785 {
2786 	struct mfii_ccb *ccb = accb->ccb_cookie;
2787 	struct scsipi_xfer *xs = ccb->ccb_cookie;
2788 
2789 	/* XXX check accb completion? */
2790 
2791 	mfii_put_ccb(sc, accb);
2792 	printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2793 
2794 	xs->error = XS_TIMEOUT;
2795 	scsipi_done(xs);
2796 	mfii_put_ccb(sc, ccb);
2797 }
2798 
2799 static struct mfii_ccb *
mfii_get_ccb(struct mfii_softc * sc)2800 mfii_get_ccb(struct mfii_softc *sc)
2801 {
2802 	struct mfii_ccb *ccb;
2803 
2804 	mutex_enter(&sc->sc_ccb_mtx);
2805 	if (!sc->sc_running) {
2806 		ccb = NULL;
2807 	} else {
2808 		ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2809 		if (ccb != NULL)
2810 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2811 	}
2812 	mutex_exit(&sc->sc_ccb_mtx);
2813 	return (ccb);
2814 }
2815 
2816 static void
mfii_scrub_ccb(struct mfii_ccb * ccb)2817 mfii_scrub_ccb(struct mfii_ccb *ccb)
2818 {
2819 	ccb->ccb_cookie = NULL;
2820 	ccb->ccb_done = NULL;
2821 	ccb->ccb_flags = 0;
2822 	ccb->ccb_data = NULL;
2823 	ccb->ccb_direction = MFII_DATA_NONE;
2824 	ccb->ccb_dma64 = false;
2825 	ccb->ccb_len = 0;
2826 	ccb->ccb_sgl_len = 0;
2827 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2828 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2829 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2830 }
2831 
2832 static void
mfii_put_ccb(struct mfii_softc * sc,struct mfii_ccb * ccb)2833 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2834 {
2835 	mutex_enter(&sc->sc_ccb_mtx);
2836 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2837 	mutex_exit(&sc->sc_ccb_mtx);
2838 }
2839 
2840 static int
mfii_init_ccb(struct mfii_softc * sc)2841 mfii_init_ccb(struct mfii_softc *sc)
2842 {
2843 	struct mfii_ccb *ccb;
2844 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2845 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2846 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2847 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2848 	u_int i;
2849 	int error;
2850 
2851 	sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2852 	    M_DEVBUF, M_WAITOK|M_ZERO);
2853 
2854 	for (i = 0; i < sc->sc_max_cmds; i++) {
2855 		ccb = &sc->sc_ccb[i];
2856 		ccb->ccb_sc = sc;
2857 
2858 		/* create a dma map for transfer */
2859 		error = bus_dmamap_create(sc->sc_dmat,
2860 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2861 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2862 		if (error) {
2863 			printf("%s: cannot create ccb dmamap32 (%d)\n",
2864 			    DEVNAME(sc), error);
2865 			goto destroy;
2866 		}
2867 		error = bus_dmamap_create(sc->sc_dmat64,
2868 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2869 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2870 		if (error) {
2871 			printf("%s: cannot create ccb dmamap64 (%d)\n",
2872 			    DEVNAME(sc), error);
2873 			goto destroy32;
2874 		}
2875 
2876 		/* select i + 1'th request. 0 is reserved for events */
2877 		ccb->ccb_smid = i + 1;
2878 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2879 		ccb->ccb_request = request + ccb->ccb_request_offset;
2880 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2881 		    ccb->ccb_request_offset;
2882 
2883 		/* select i'th MFI command frame */
2884 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2885 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2886 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2887 		    ccb->ccb_mfi_offset;
2888 
2889 		/* select i'th sense */
2890 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2891 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2892 		    ccb->ccb_sense_offset);
2893 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2894 		    ccb->ccb_sense_offset;
2895 
2896 		/* select i'th sgl */
2897 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2898 		    sc->sc_max_sgl * i;
2899 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2900 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2901 		    ccb->ccb_sgl_offset;
2902 
2903 		mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2904 		cv_init(&ccb->ccb_cv, "mfiiexec");
2905 
2906 		/* add ccb to queue */
2907 		mfii_put_ccb(sc, ccb);
2908 	}
2909 
2910 	return (0);
2911 
2912 destroy32:
2913 	bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2914 destroy:
2915 	/* free dma maps and ccb memory */
2916 	while ((ccb = mfii_get_ccb(sc)) != NULL) {
2917 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2918 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2919 	}
2920 
2921 	free(sc->sc_ccb, M_DEVBUF);
2922 
2923 	return (1);
2924 }
2925 
2926 #if NBIO > 0
2927 static int
mfii_ioctl(device_t dev,u_long cmd,void * addr)2928 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2929 {
2930 	struct mfii_softc	*sc = device_private(dev);
2931 	int error = 0;
2932 
2933 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2934 
2935 	mutex_enter(&sc->sc_lock);
2936 
2937 	switch (cmd) {
2938 	case BIOCINQ:
2939 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2940 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2941 		break;
2942 
2943 	case BIOCVOL:
2944 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2945 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2946 		break;
2947 
2948 	case BIOCDISK:
2949 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2950 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2951 		break;
2952 
2953 	case BIOCALARM:
2954 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2955 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2956 		break;
2957 
2958 	case BIOCBLINK:
2959 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2960 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2961 		break;
2962 
2963 	case BIOCSETSTATE:
2964 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2965 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2966 		break;
2967 
2968 #if 0
2969 	case BIOCPATROL:
2970 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2971 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2972 		break;
2973 #endif
2974 
2975 	default:
2976 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2977 		error = ENOTTY;
2978 	}
2979 
2980 	mutex_exit(&sc->sc_lock);
2981 
2982 	return (error);
2983 }
2984 
2985 static int
mfii_bio_getitall(struct mfii_softc * sc)2986 mfii_bio_getitall(struct mfii_softc *sc)
2987 {
2988 	int			i, d, rv = EINVAL;
2989 	size_t			size;
2990 	union mfi_mbox		mbox;
2991 	struct mfi_conf		*cfg = NULL;
2992 	struct mfi_ld_details	*ld_det = NULL;
2993 
2994 	/* get info */
2995 	if (mfii_get_info(sc)) {
2996 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2997 		    DEVNAME(sc));
2998 		goto done;
2999 	}
3000 
3001 	/* send single element command to retrieve size for full structure */
3002 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
3003 	if (cfg == NULL)
3004 		goto done;
3005 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3006 	    MFII_DATA_IN, false)) {
3007 		free(cfg, M_DEVBUF);
3008 		goto done;
3009 	}
3010 
3011 	size = cfg->mfc_size;
3012 	free(cfg, M_DEVBUF);
3013 
3014 	/* memory for read config */
3015 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3016 	if (cfg == NULL)
3017 		goto done;
3018 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3019 	    MFII_DATA_IN, false)) {
3020 		free(cfg, M_DEVBUF);
3021 		goto done;
3022 	}
3023 
3024 	/* replace current pointer with new one */
3025 	if (sc->sc_cfg)
3026 		free(sc->sc_cfg, M_DEVBUF);
3027 	sc->sc_cfg = cfg;
3028 
3029 	/* get all ld info */
3030 	memset(&mbox, 0, sizeof(mbox));
3031 	if (sc->sc_max256vd)
3032 		mbox.b[0] = 1;
3033 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
3034 	    sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
3035 		goto done;
3036 
3037 	/* get memory for all ld structures */
3038 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
3039 	if (sc->sc_ld_sz != size) {
3040 		if (sc->sc_ld_details)
3041 			free(sc->sc_ld_details, M_DEVBUF);
3042 
3043 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3044 		if (ld_det == NULL)
3045 			goto done;
3046 		sc->sc_ld_sz = size;
3047 		sc->sc_ld_details = ld_det;
3048 	}
3049 
3050 	/* find used physical disks */
3051 	size = sizeof(struct mfi_ld_details);
3052 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
3053 		memset(&mbox, 0, sizeof(mbox));
3054 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3055 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
3056 		    &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
3057 			goto done;
3058 
3059 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3060 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3061 	}
3062 	sc->sc_no_pd = d;
3063 
3064 	rv = 0;
3065 done:
3066 	return (rv);
3067 }
3068 
3069 static int
mfii_ioctl_inq(struct mfii_softc * sc,struct bioc_inq * bi)3070 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
3071 {
3072 	int			rv = EINVAL;
3073 	struct mfi_conf		*cfg = NULL;
3074 
3075 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
3076 
3077 	if (mfii_bio_getitall(sc)) {
3078 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3079 		    DEVNAME(sc));
3080 		goto done;
3081 	}
3082 
3083 	/* count unused disks as volumes */
3084 	if (sc->sc_cfg == NULL)
3085 		goto done;
3086 	cfg = sc->sc_cfg;
3087 
3088 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3089 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3090 #if notyet
3091 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3092 	    (bi->bi_nodisk - sc->sc_no_pd);
3093 #endif
3094 	/* tell bio who we are */
3095 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3096 
3097 	rv = 0;
3098 done:
3099 	return (rv);
3100 }
3101 
3102 static int
mfii_ioctl_vol(struct mfii_softc * sc,struct bioc_vol * bv)3103 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3104 {
3105 	int			i, per, rv = EINVAL;
3106 
3107 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3108 	    DEVNAME(sc), bv->bv_volid);
3109 
3110 	/* we really could skip and expect that inq took care of it */
3111 	if (mfii_bio_getitall(sc)) {
3112 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3113 		    DEVNAME(sc));
3114 		goto done;
3115 	}
3116 
3117 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3118 		/* go do hotspares & unused disks */
3119 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3120 		goto done;
3121 	}
3122 
3123 	i = bv->bv_volid;
3124 	strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
3125 	    sizeof(bv->bv_dev));
3126 
3127 	switch (sc->sc_ld_list.mll_list[i].mll_state) {
3128 	case MFI_LD_OFFLINE:
3129 		bv->bv_status = BIOC_SVOFFLINE;
3130 		break;
3131 
3132 	case MFI_LD_PART_DEGRADED:
3133 	case MFI_LD_DEGRADED:
3134 		bv->bv_status = BIOC_SVDEGRADED;
3135 		break;
3136 
3137 	case MFI_LD_ONLINE:
3138 		bv->bv_status = BIOC_SVONLINE;
3139 		break;
3140 
3141 	default:
3142 		bv->bv_status = BIOC_SVINVALID;
3143 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3144 		    DEVNAME(sc),
3145 		    sc->sc_ld_list.mll_list[i].mll_state);
3146 	}
3147 
3148 	/* additional status can modify MFI status */
3149 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3150 	case MFI_LD_PROG_CC:
3151 		bv->bv_status = BIOC_SVSCRUB;
3152 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3153 		bv->bv_percent = (per * 100) / 0xffff;
3154 		bv->bv_seconds =
3155 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3156 		break;
3157 
3158 	case MFI_LD_PROG_BGI:
3159 		bv->bv_status = BIOC_SVSCRUB;
3160 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3161 		bv->bv_percent = (per * 100) / 0xffff;
3162 		bv->bv_seconds =
3163 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3164 		break;
3165 
3166 	case MFI_LD_PROG_FGI:
3167 	case MFI_LD_PROG_RECONSTRUCT:
3168 		/* nothing yet */
3169 		break;
3170 	}
3171 
3172 #if 0
3173 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3174 		bv->bv_cache = BIOC_CVWRITEBACK;
3175 	else
3176 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3177 #endif
3178 
3179 	/*
3180 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3181 	 * a subset that is valid for the MFI controller.
3182 	 */
3183 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3184 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3185 		bv->bv_level *= 10;
3186 
3187 	bv->bv_nodisk =
3188 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3189 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3190 
3191 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3192 	bv->bv_stripe_size =
3193 	    (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3194 	    / 1024; /* in KB */
3195 
3196 	rv = 0;
3197 done:
3198 	return (rv);
3199 }
3200 
3201 static int
mfii_ioctl_disk(struct mfii_softc * sc,struct bioc_disk * bd)3202 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3203 {
3204 	struct mfi_conf		*cfg;
3205 	struct mfi_array	*ar;
3206 	struct mfi_ld_cfg	*ld;
3207 	struct mfi_pd_details	*pd;
3208 	struct mfi_pd_list	*pl;
3209 	struct scsipi_inquiry_data *inqbuf;
3210 	char			vend[8+16+4+1], *vendp;
3211 	int			i, rv = EINVAL;
3212 	int			arr, vol, disk, span;
3213 	union mfi_mbox		mbox;
3214 
3215 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3216 	    DEVNAME(sc), bd->bd_diskid);
3217 
3218 	/* we really could skip and expect that inq took care of it */
3219 	if (mfii_bio_getitall(sc)) {
3220 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3221 		    DEVNAME(sc));
3222 		return (rv);
3223 	}
3224 	cfg = sc->sc_cfg;
3225 
3226 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3227 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3228 
3229 	ar = cfg->mfc_array;
3230 	vol = bd->bd_volid;
3231 	if (vol >= cfg->mfc_no_ld) {
3232 		/* do hotspares */
3233 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3234 		goto freeme;
3235 	}
3236 
3237 	/* calculate offset to ld structure */
3238 	ld = (struct mfi_ld_cfg *)(
3239 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3240 	    cfg->mfc_array_size * cfg->mfc_no_array);
3241 
3242 	/* use span 0 only when raid group is not spanned */
3243 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3244 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3245 	else
3246 		span = 0;
3247 	arr = ld[vol].mlc_span[span].mls_index;
3248 
3249 	/* offset disk into pd list */
3250 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3251 
3252 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3253 		/* disk is missing but succeed command */
3254 		bd->bd_status = BIOC_SDFAILED;
3255 		rv = 0;
3256 
3257 		/* try to find an unused disk for the target to rebuild */
3258 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3259 		    MFII_DATA_IN, false))
3260 			goto freeme;
3261 
3262 		for (i = 0; i < pl->mpl_no_pd; i++) {
3263 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3264 				continue;
3265 
3266 			memset(&mbox, 0, sizeof(mbox));
3267 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3268 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3269 			    pd, sizeof(*pd), MFII_DATA_IN, false))
3270 				continue;
3271 
3272 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3273 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3274 				break;
3275 		}
3276 
3277 		if (i == pl->mpl_no_pd)
3278 			goto freeme;
3279 	} else {
3280 		memset(&mbox, 0, sizeof(mbox));
3281 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3282 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3283 		    MFII_DATA_IN, false)) {
3284 			bd->bd_status = BIOC_SDINVALID;
3285 			goto freeme;
3286 		}
3287 	}
3288 
3289 	/* get the remaining fields */
3290 	bd->bd_channel = pd->mpd_enc_idx;
3291 	bd->bd_target = pd->mpd_enc_slot;
3292 
3293 	/* get status */
3294 	switch (pd->mpd_fw_state){
3295 	case MFI_PD_UNCONFIG_GOOD:
3296 	case MFI_PD_UNCONFIG_BAD:
3297 		bd->bd_status = BIOC_SDUNUSED;
3298 		break;
3299 
3300 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3301 		bd->bd_status = BIOC_SDHOTSPARE;
3302 		break;
3303 
3304 	case MFI_PD_OFFLINE:
3305 		bd->bd_status = BIOC_SDOFFLINE;
3306 		break;
3307 
3308 	case MFI_PD_FAILED:
3309 		bd->bd_status = BIOC_SDFAILED;
3310 		break;
3311 
3312 	case MFI_PD_REBUILD:
3313 		bd->bd_status = BIOC_SDREBUILD;
3314 		break;
3315 
3316 	case MFI_PD_ONLINE:
3317 		bd->bd_status = BIOC_SDONLINE;
3318 		break;
3319 
3320 	case MFI_PD_COPYBACK:
3321 	case MFI_PD_SYSTEM:
3322 	default:
3323 		bd->bd_status = BIOC_SDINVALID;
3324 		break;
3325 	}
3326 
3327 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3328 
3329 	inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3330 	vendp = inqbuf->vendor;
3331 	memcpy(vend, vendp, sizeof vend - 1);
3332 	vend[sizeof vend - 1] = '\0';
3333 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3334 
3335 	/* XXX find a way to retrieve serial nr from drive */
3336 	/* XXX find a way to get bd_procdev */
3337 
3338 #if 0
3339 	mfp = &pd->mpd_progress;
3340 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3341 		mp = &mfp->mfp_patrol_read;
3342 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3343 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3344 	}
3345 #endif
3346 
3347 	rv = 0;
3348 freeme:
3349 	free(pd, M_DEVBUF);
3350 	free(pl, M_DEVBUF);
3351 
3352 	return (rv);
3353 }
3354 
3355 static int
mfii_ioctl_alarm(struct mfii_softc * sc,struct bioc_alarm * ba)3356 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3357 {
3358 	uint32_t		opc;
3359 	int			rv = 0;
3360 	int8_t			ret;
3361 	mfii_direction_t dir = MFII_DATA_NONE;
3362 
3363 	switch (ba->ba_opcode) {
3364 	case BIOC_SADISABLE:
3365 		opc = MR_DCMD_SPEAKER_DISABLE;
3366 		break;
3367 
3368 	case BIOC_SAENABLE:
3369 		opc = MR_DCMD_SPEAKER_ENABLE;
3370 		break;
3371 
3372 	case BIOC_SASILENCE:
3373 		opc = MR_DCMD_SPEAKER_SILENCE;
3374 		break;
3375 
3376 	case BIOC_GASTATUS:
3377 		opc = MR_DCMD_SPEAKER_GET;
3378 		dir = MFII_DATA_IN;
3379 		break;
3380 
3381 	case BIOC_SATEST:
3382 		opc = MR_DCMD_SPEAKER_TEST;
3383 		break;
3384 
3385 	default:
3386 		DNPRINTF(MFII_D_IOCTL,
3387 		    "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3388 		    DEVNAME(sc), ba->ba_opcode);
3389 		return (EINVAL);
3390 	}
3391 
3392 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3393 		rv = EINVAL;
3394 	else
3395 		if (ba->ba_opcode == BIOC_GASTATUS)
3396 			ba->ba_status = ret;
3397 		else
3398 			ba->ba_status = 0;
3399 
3400 	return (rv);
3401 }
3402 
3403 static int
mfii_ioctl_blink(struct mfii_softc * sc,struct bioc_blink * bb)3404 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3405 {
3406 	int			i, found, rv = EINVAL;
3407 	union mfi_mbox		mbox;
3408 	uint32_t		cmd;
3409 	struct mfi_pd_list	*pd;
3410 
3411 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3412 	    bb->bb_status);
3413 
3414 	/* channel 0 means not in an enclosure so can't be blinked */
3415 	if (bb->bb_channel == 0)
3416 		return (EINVAL);
3417 
3418 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3419 
3420 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3421 	    MFII_DATA_IN, false))
3422 		goto done;
3423 
3424 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3425 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3426 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3427 			found = 1;
3428 			break;
3429 		}
3430 
3431 	if (!found)
3432 		goto done;
3433 
3434 	memset(&mbox, 0, sizeof(mbox));
3435 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3436 
3437 	switch (bb->bb_status) {
3438 	case BIOC_SBUNBLINK:
3439 		cmd = MR_DCMD_PD_UNBLINK;
3440 		break;
3441 
3442 	case BIOC_SBBLINK:
3443 		cmd = MR_DCMD_PD_BLINK;
3444 		break;
3445 
3446 	case BIOC_SBALARM:
3447 	default:
3448 		DNPRINTF(MFII_D_IOCTL,
3449 		    "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3450 		    DEVNAME(sc), bb->bb_status);
3451 		goto done;
3452 	}
3453 
3454 
3455 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3456 		goto done;
3457 
3458 	rv = 0;
3459 done:
3460 	free(pd, M_DEVBUF);
3461 	return (rv);
3462 }
3463 
3464 static int
mfii_makegood(struct mfii_softc * sc,uint16_t pd_id)3465 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3466 {
3467 	struct mfii_foreign_scan_info *fsi;
3468 	struct mfi_pd_details	*pd;
3469 	union mfi_mbox		mbox;
3470 	int			rv;
3471 
3472 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3473 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3474 
3475 	memset(&mbox, 0, sizeof mbox);
3476 	mbox.s[0] = pd_id;
3477 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3478 	    MFII_DATA_IN, false);
3479 	if (rv != 0)
3480 		goto done;
3481 
3482 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3483 		mbox.s[0] = pd_id;
3484 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3485 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3486 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3487 		    MFII_DATA_NONE, false);
3488 		if (rv != 0)
3489 			goto done;
3490 	}
3491 
3492 	memset(&mbox, 0, sizeof mbox);
3493 	mbox.s[0] = pd_id;
3494 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3495 	    MFII_DATA_IN, false);
3496 	if (rv != 0)
3497 		goto done;
3498 
3499 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3500 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3501 		    fsi, sizeof(*fsi), MFII_DATA_IN, false);
3502 		if (rv != 0)
3503 			goto done;
3504 
3505 		if (fsi->count > 0) {
3506 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3507 			    NULL, 0, MFII_DATA_NONE, false);
3508 			if (rv != 0)
3509 				goto done;
3510 		}
3511 	}
3512 
3513 	memset(&mbox, 0, sizeof mbox);
3514 	mbox.s[0] = pd_id;
3515 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3516 	    MFII_DATA_IN, false);
3517 	if (rv != 0)
3518 		goto done;
3519 
3520 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3521 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3522 		rv = ENXIO;
3523 
3524 done:
3525 	free(fsi, M_DEVBUF);
3526 	free(pd, M_DEVBUF);
3527 
3528 	return (rv);
3529 }
3530 
3531 static int
mfii_makespare(struct mfii_softc * sc,uint16_t pd_id)3532 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3533 {
3534 	struct mfi_hotspare	*hs;
3535 	struct mfi_pd_details	*pd;
3536 	union mfi_mbox		mbox;
3537 	size_t			size;
3538 	int			rv = EINVAL;
3539 
3540 	/* we really could skip and expect that inq took care of it */
3541 	if (mfii_bio_getitall(sc)) {
3542 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3543 		    DEVNAME(sc));
3544 		return (rv);
3545 	}
3546 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3547 
3548 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3549 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3550 
3551 	memset(&mbox, 0, sizeof mbox);
3552 	mbox.s[0] = pd_id;
3553 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3554 	    MFII_DATA_IN, false);
3555 	if (rv != 0)
3556 		goto done;
3557 
3558 	memset(hs, 0, size);
3559 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3560 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3561 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3562 	    MFII_DATA_OUT, false);
3563 
3564 done:
3565 	free(hs, M_DEVBUF);
3566 	free(pd, M_DEVBUF);
3567 
3568 	return (rv);
3569 }
3570 
3571 static int
mfii_ioctl_setstate(struct mfii_softc * sc,struct bioc_setstate * bs)3572 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3573 {
3574 	struct mfi_pd_details	*pd;
3575 	struct mfi_pd_list	*pl;
3576 	int			i, found, rv = EINVAL;
3577 	union mfi_mbox		mbox;
3578 
3579 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3580 	    bs->bs_status);
3581 
3582 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3583 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3584 
3585 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3586 	    MFII_DATA_IN, false))
3587 		goto done;
3588 
3589 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3590 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3591 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3592 			found = 1;
3593 			break;
3594 		}
3595 
3596 	if (!found)
3597 		goto done;
3598 
3599 	memset(&mbox, 0, sizeof(mbox));
3600 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3601 
3602 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3603 	    MFII_DATA_IN, false))
3604 		goto done;
3605 
3606 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3607 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3608 
3609 	switch (bs->bs_status) {
3610 	case BIOC_SSONLINE:
3611 		mbox.b[4] = MFI_PD_ONLINE;
3612 		break;
3613 
3614 	case BIOC_SSOFFLINE:
3615 		mbox.b[4] = MFI_PD_OFFLINE;
3616 		break;
3617 
3618 	case BIOC_SSHOTSPARE:
3619 		mbox.b[4] = MFI_PD_HOTSPARE;
3620 		break;
3621 
3622 	case BIOC_SSREBUILD:
3623 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3624 			if ((rv = mfii_makegood(sc,
3625 			    pl->mpl_address[i].mpa_pd_id)))
3626 				goto done;
3627 
3628 			if ((rv = mfii_makespare(sc,
3629 			    pl->mpl_address[i].mpa_pd_id)))
3630 				goto done;
3631 
3632 			memset(&mbox, 0, sizeof(mbox));
3633 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3634 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3635 			    pd, sizeof(*pd), MFII_DATA_IN, false);
3636 			if (rv != 0)
3637 				goto done;
3638 
3639 			/* rebuilding might be started by mfii_makespare() */
3640 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3641 				rv = 0;
3642 				goto done;
3643 			}
3644 
3645 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3646 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3647 		}
3648 		mbox.b[4] = MFI_PD_REBUILD;
3649 		break;
3650 
3651 	default:
3652 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3653 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3654 		goto done;
3655 	}
3656 
3657 
3658 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3659 	    MFII_DATA_NONE, false);
3660 done:
3661 	free(pd, M_DEVBUF);
3662 	free(pl, M_DEVBUF);
3663 	return (rv);
3664 }
3665 
3666 #if 0
3667 int
3668 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3669 {
3670 	uint32_t		opc;
3671 	int			rv = 0;
3672 	struct mfi_pr_properties prop;
3673 	struct mfi_pr_status	status;
3674 	uint32_t		time, exec_freq;
3675 
3676 	switch (bp->bp_opcode) {
3677 	case BIOC_SPSTOP:
3678 	case BIOC_SPSTART:
3679 		if (bp->bp_opcode == BIOC_SPSTART)
3680 			opc = MR_DCMD_PR_START;
3681 		else
3682 			opc = MR_DCMD_PR_STOP;
3683 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3684 			return (EINVAL);
3685 		break;
3686 
3687 	case BIOC_SPMANUAL:
3688 	case BIOC_SPDISABLE:
3689 	case BIOC_SPAUTO:
3690 		/* Get device's time. */
3691 		opc = MR_DCMD_TIME_SECS_GET;
3692 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3693 		    MFII_DATA_IN, false))
3694 			return (EINVAL);
3695 
3696 		opc = MR_DCMD_PR_GET_PROPERTIES;
3697 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3698 		    MFII_DATA_IN, false))
3699 			return (EINVAL);
3700 
3701 		switch (bp->bp_opcode) {
3702 		case BIOC_SPMANUAL:
3703 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3704 			break;
3705 		case BIOC_SPDISABLE:
3706 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3707 			break;
3708 		case BIOC_SPAUTO:
3709 			if (bp->bp_autoival != 0) {
3710 				if (bp->bp_autoival == -1)
3711 					/* continuously */
3712 					exec_freq = 0xffffffffU;
3713 				else if (bp->bp_autoival > 0)
3714 					exec_freq = bp->bp_autoival;
3715 				else
3716 					return (EINVAL);
3717 				prop.exec_freq = exec_freq;
3718 			}
3719 			if (bp->bp_autonext != 0) {
3720 				if (bp->bp_autonext < 0)
3721 					return (EINVAL);
3722 				else
3723 					prop.next_exec =
3724 					    time + bp->bp_autonext;
3725 			}
3726 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3727 			break;
3728 		}
3729 
3730 		opc = MR_DCMD_PR_SET_PROPERTIES;
3731 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3732 		    MFII_DATA_OUT, false))
3733 			return (EINVAL);
3734 
3735 		break;
3736 
3737 	case BIOC_GPSTATUS:
3738 		opc = MR_DCMD_PR_GET_PROPERTIES;
3739 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3740 		    MFII_DATA_IN, false))
3741 			return (EINVAL);
3742 
3743 		opc = MR_DCMD_PR_GET_STATUS;
3744 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3745 		    MFII_DATA_IN, false))
3746 			return (EINVAL);
3747 
3748 		/* Get device's time. */
3749 		opc = MR_DCMD_TIME_SECS_GET;
3750 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3751 		    MFII_DATA_IN, false))
3752 			return (EINVAL);
3753 
3754 		switch (prop.op_mode) {
3755 		case MFI_PR_OPMODE_AUTO:
3756 			bp->bp_mode = BIOC_SPMAUTO;
3757 			bp->bp_autoival = prop.exec_freq;
3758 			bp->bp_autonext = prop.next_exec;
3759 			bp->bp_autonow = time;
3760 			break;
3761 		case MFI_PR_OPMODE_MANUAL:
3762 			bp->bp_mode = BIOC_SPMMANUAL;
3763 			break;
3764 		case MFI_PR_OPMODE_DISABLED:
3765 			bp->bp_mode = BIOC_SPMDISABLED;
3766 			break;
3767 		default:
3768 			printf("%s: unknown patrol mode %d\n",
3769 			    DEVNAME(sc), prop.op_mode);
3770 			break;
3771 		}
3772 
3773 		switch (status.state) {
3774 		case MFI_PR_STATE_STOPPED:
3775 			bp->bp_status = BIOC_SPSSTOPPED;
3776 			break;
3777 		case MFI_PR_STATE_READY:
3778 			bp->bp_status = BIOC_SPSREADY;
3779 			break;
3780 		case MFI_PR_STATE_ACTIVE:
3781 			bp->bp_status = BIOC_SPSACTIVE;
3782 			break;
3783 		case MFI_PR_STATE_ABORTED:
3784 			bp->bp_status = BIOC_SPSABORTED;
3785 			break;
3786 		default:
3787 			printf("%s: unknown patrol state %d\n",
3788 			    DEVNAME(sc), status.state);
3789 			break;
3790 		}
3791 
3792 		break;
3793 
3794 	default:
3795 		DNPRINTF(MFII_D_IOCTL,
3796 		    "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3797 		    DEVNAME(sc), bp->bp_opcode);
3798 		return (EINVAL);
3799 	}
3800 
3801 	return (rv);
3802 }
3803 #endif
3804 
3805 static int
mfii_bio_hs(struct mfii_softc * sc,int volid,int type,void * bio_hs)3806 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3807 {
3808 	struct mfi_conf		*cfg;
3809 	struct mfi_hotspare	*hs;
3810 	struct mfi_pd_details	*pd;
3811 	struct bioc_disk	*sdhs;
3812 	struct bioc_vol		*vdhs;
3813 	struct scsipi_inquiry_data *inqbuf;
3814 	char			vend[8+16+4+1], *vendp;
3815 	int			i, rv = EINVAL;
3816 	uint32_t		size;
3817 	union mfi_mbox		mbox;
3818 
3819 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3820 
3821 	if (!bio_hs)
3822 		return (EINVAL);
3823 
3824 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3825 
3826 	/* send single element command to retrieve size for full structure */
3827 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3828 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3829 	    MFII_DATA_IN, false))
3830 		goto freeme;
3831 
3832 	size = cfg->mfc_size;
3833 	free(cfg, M_DEVBUF);
3834 
3835 	/* memory for read config */
3836 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3837 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3838 	    MFII_DATA_IN, false))
3839 		goto freeme;
3840 
3841 	/* calculate offset to hs structure */
3842 	hs = (struct mfi_hotspare *)(
3843 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3844 	    cfg->mfc_array_size * cfg->mfc_no_array +
3845 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3846 
3847 	if (volid < cfg->mfc_no_ld)
3848 		goto freeme; /* not a hotspare */
3849 
3850 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3851 		goto freeme; /* not a hotspare */
3852 
3853 	/* offset into hotspare structure */
3854 	i = volid - cfg->mfc_no_ld;
3855 
3856 	DNPRINTF(MFII_D_IOCTL,
3857 	    "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3858 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3859 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3860 
3861 	/* get pd fields */
3862 	memset(&mbox, 0, sizeof(mbox));
3863 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3864 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3865 	    MFII_DATA_IN, false)) {
3866 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3867 		    DEVNAME(sc));
3868 		goto freeme;
3869 	}
3870 
3871 	switch (type) {
3872 	case MFI_MGMT_VD:
3873 		vdhs = bio_hs;
3874 		vdhs->bv_status = BIOC_SVONLINE;
3875 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3876 		vdhs->bv_level = -1; /* hotspare */
3877 		vdhs->bv_nodisk = 1;
3878 		break;
3879 
3880 	case MFI_MGMT_SD:
3881 		sdhs = bio_hs;
3882 		sdhs->bd_status = BIOC_SDHOTSPARE;
3883 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3884 		sdhs->bd_channel = pd->mpd_enc_idx;
3885 		sdhs->bd_target = pd->mpd_enc_slot;
3886 		inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3887 		vendp = inqbuf->vendor;
3888 		memcpy(vend, vendp, sizeof vend - 1);
3889 		vend[sizeof vend - 1] = '\0';
3890 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3891 		break;
3892 
3893 	default:
3894 		goto freeme;
3895 	}
3896 
3897 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3898 	rv = 0;
3899 freeme:
3900 	free(pd, M_DEVBUF);
3901 	free(cfg, M_DEVBUF);
3902 
3903 	return (rv);
3904 }
3905 
3906 #endif /* NBIO > 0 */
3907 
3908 static void
mfii_bbu(struct mfii_softc * sc,envsys_data_t * edata)3909 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3910 {
3911 	struct mfi_bbu_status bbu;
3912 	u_int32_t status;
3913 	u_int32_t mask;
3914 	u_int32_t soh_bad;
3915 	int rv;
3916 
3917 	mutex_enter(&sc->sc_lock);
3918 	rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3919 	    sizeof(bbu), MFII_DATA_IN, false);
3920 	mutex_exit(&sc->sc_lock);
3921 	if (rv != 0) {
3922 		edata->state = ENVSYS_SINVALID;
3923 		edata->value_cur = 0;
3924 		return;
3925 	}
3926 
3927 	switch (bbu.battery_type) {
3928 	case MFI_BBU_TYPE_IBBU:
3929 	case MFI_BBU_TYPE_IBBU09:
3930 	case MFI_BBU_TYPE_CVPM02:
3931 		mask = MFI_BBU_STATE_BAD_IBBU;
3932 		soh_bad = 0;
3933 		break;
3934 	case MFI_BBU_TYPE_BBU:
3935 		mask = MFI_BBU_STATE_BAD_BBU;
3936 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3937 		break;
3938 
3939 	case MFI_BBU_TYPE_NONE:
3940 	default:
3941 		edata->state = ENVSYS_SCRITICAL;
3942 		edata->value_cur = 0;
3943 		return;
3944 	}
3945 
3946 	status = le32toh(bbu.fw_status) & mask;
3947 	switch (edata->sensor) {
3948 	case 0:
3949 		edata->value_cur = (status || soh_bad) ? 0 : 1;
3950 		edata->state =
3951 		    edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3952 		return;
3953 	case 1:
3954 		edata->value_cur = le16toh(bbu.voltage) * 1000;
3955 		edata->state = ENVSYS_SVALID;
3956 		return;
3957 	case 2:
3958 		edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3959 		edata->state = ENVSYS_SVALID;
3960 		return;
3961 	case 3:
3962 		edata->value_cur =
3963 		    le16toh(bbu.temperature) * 1000000 + 273150000;
3964 		edata->state = ENVSYS_SVALID;
3965 		return;
3966 	}
3967 }
3968 
3969 static void
mfii_refresh_ld_sensor(struct mfii_softc * sc,envsys_data_t * edata)3970 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3971 {
3972 	struct bioc_vol bv;
3973 	int error;
3974 
3975 	memset(&bv, 0, sizeof(bv));
3976 	bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3977 	mutex_enter(&sc->sc_lock);
3978 	error = mfii_ioctl_vol(sc, &bv);
3979 	mutex_exit(&sc->sc_lock);
3980 	if (error)
3981 		bv.bv_status = BIOC_SVINVALID;
3982 	bio_vol_to_envsys(edata, &bv);
3983 }
3984 
3985 static void
mfii_init_ld_sensor(struct mfii_softc * sc,envsys_data_t * sensor,int i)3986 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3987 {
3988 	sensor->units = ENVSYS_DRIVE;
3989 	sensor->state = ENVSYS_SINVALID;
3990 	sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3991 	/* Enable monitoring for drive state changes */
3992 	sensor->flags |= ENVSYS_FMONSTCHANGED;
3993 	snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3994 }
3995 
3996 static void
mfii_attach_sensor(struct mfii_softc * sc,envsys_data_t * s)3997 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3998 {
3999 	if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
4000 		aprint_error_dev(sc->sc_dev,
4001 		    "failed to attach sensor %s\n", s->desc);
4002 }
4003 
4004 static int
mfii_create_sensors(struct mfii_softc * sc)4005 mfii_create_sensors(struct mfii_softc *sc)
4006 {
4007 	int i, rv;
4008 	const int nsensors = MFI_BBU_SENSORS + MFII_MAX_LD_EXT;
4009 
4010 	sc->sc_sme = sysmon_envsys_create();
4011 	sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
4012 	    M_DEVBUF, M_WAITOK | M_ZERO);
4013 
4014 	/* BBU */
4015 	sc->sc_sensors[0].units = ENVSYS_INDICATOR;
4016 	sc->sc_sensors[0].state = ENVSYS_SINVALID;
4017 	sc->sc_sensors[0].value_cur = 0;
4018 	sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
4019 	sc->sc_sensors[1].state = ENVSYS_SINVALID;
4020 	sc->sc_sensors[1].value_cur = 0;
4021 	sc->sc_sensors[2].units = ENVSYS_SAMPS;
4022 	sc->sc_sensors[2].state = ENVSYS_SINVALID;
4023 	sc->sc_sensors[2].value_cur = 0;
4024 	sc->sc_sensors[3].units = ENVSYS_STEMP;
4025 	sc->sc_sensors[3].state = ENVSYS_SINVALID;
4026 	sc->sc_sensors[3].value_cur = 0;
4027 	sc->sc_ld_sensors = sc->sc_sensors + MFI_BBU_SENSORS;
4028 
4029 	if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
4030 		sc->sc_bbuok = true;
4031 		sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
4032 		snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
4033 		    "%s BBU state", DEVNAME(sc));
4034 		snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
4035 		    "%s BBU voltage", DEVNAME(sc));
4036 		snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
4037 		    "%s BBU current", DEVNAME(sc));
4038 		snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
4039 		    "%s BBU temperature", DEVNAME(sc));
4040 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
4041 			mfii_attach_sensor(sc, &sc->sc_sensors[i]);
4042 		}
4043 	}
4044 
4045 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
4046 		mfii_init_ld_sensor(sc, &sc->sc_ld_sensors[i], i);
4047 		mfii_attach_sensor(sc, &sc->sc_ld_sensors[i]);
4048 	}
4049 
4050 	sc->sc_sme->sme_name = DEVNAME(sc);
4051 	sc->sc_sme->sme_cookie = sc;
4052 	sc->sc_sme->sme_refresh = mfii_refresh_sensor;
4053 	rv = sysmon_envsys_register(sc->sc_sme);
4054 	if (rv) {
4055 		aprint_error_dev(sc->sc_dev,
4056 		    "unable to register with sysmon (rv = %d)\n", rv);
4057 		sysmon_envsys_destroy(sc->sc_sme);
4058 		sc->sc_sme = NULL;
4059 	}
4060 	return rv;
4061 
4062 }
4063 
4064 static int
mfii_destroy_sensors(struct mfii_softc * sc)4065 mfii_destroy_sensors(struct mfii_softc *sc)
4066 {
4067 	if (sc->sc_sme == NULL)
4068 		return 0;
4069 	sysmon_envsys_unregister(sc->sc_sme);
4070 	sc->sc_sme = NULL;
4071 	free(sc->sc_sensors, M_DEVBUF);
4072 	return 0;
4073 }
4074 
4075 static void
mfii_refresh_sensor(struct sysmon_envsys * sme,envsys_data_t * edata)4076 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
4077 {
4078 	struct mfii_softc	*sc = sme->sme_cookie;
4079 
4080 	if (edata->sensor >= MFI_BBU_SENSORS + MFII_MAX_LD_EXT)
4081 		return;
4082 
4083 	if (edata->sensor < MFI_BBU_SENSORS) {
4084 		if (sc->sc_bbuok)
4085 			mfii_bbu(sc, edata);
4086 	} else {
4087 		mfii_refresh_ld_sensor(sc, edata);
4088 	}
4089 }
4090