xref: /openbsd-src/sys/dev/ic/mpi.c (revision 6d832bc26091e607ccff6c00a03838065999bad0)
1 /*	$OpenBSD: mpi.c,v 1.210 2020/01/25 21:48:42 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/mutex.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/task.h>
33 
34 #include <machine/bus.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/biovar.h>
40 #include <dev/ic/mpireg.h>
41 #include <dev/ic/mpivar.h>
42 
43 #ifdef MPI_DEBUG
44 uint32_t	mpi_debug = 0
45 /*		    | MPI_D_CMD */
46 /*		    | MPI_D_INTR */
47 /*		    | MPI_D_MISC */
48 /*		    | MPI_D_DMA */
49 /*		    | MPI_D_IOCTL */
50 /*		    | MPI_D_RW */
51 /*		    | MPI_D_MEM */
52 /*		    | MPI_D_CCB */
53 /*		    | MPI_D_PPR */
54 /*		    | MPI_D_RAID */
55 /*		    | MPI_D_EVT */
56 		;
57 #endif
58 
59 struct cfdriver mpi_cd = {
60 	NULL,
61 	"mpi",
62 	DV_DULL
63 };
64 
65 void			mpi_scsi_cmd(struct scsi_xfer *);
66 void			mpi_scsi_cmd_done(struct mpi_ccb *);
67 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
68 int			mpi_scsi_probe(struct scsi_link *);
69 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
70 			    int);
71 
72 struct scsi_adapter mpi_switch = {
73 	mpi_scsi_cmd, mpi_minphys, mpi_scsi_probe, NULL, mpi_scsi_ioctl
74 };
75 
76 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
77 void			mpi_dmamem_free(struct mpi_softc *,
78 			    struct mpi_dmamem *);
79 int			mpi_alloc_ccbs(struct mpi_softc *);
80 void			*mpi_get_ccb(void *);
81 void			mpi_put_ccb(void *, void *);
82 int			mpi_alloc_replies(struct mpi_softc *);
83 void			mpi_push_replies(struct mpi_softc *);
84 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
85 
86 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
87 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
88 void			mpi_poll_done(struct mpi_ccb *);
89 void			mpi_reply(struct mpi_softc *, u_int32_t);
90 
91 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
92 void			mpi_wait_done(struct mpi_ccb *);
93 
94 int			mpi_cfg_spi_port(struct mpi_softc *);
95 void			mpi_squash_ppr(struct mpi_softc *);
96 void			mpi_run_ppr(struct mpi_softc *);
97 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
98 			    struct mpi_cfg_raid_physdisk *, int, int, int);
99 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
100 
101 int			mpi_cfg_sas(struct mpi_softc *);
102 int			mpi_cfg_fc(struct mpi_softc *);
103 
104 void			mpi_timeout_xs(void *);
105 int			mpi_load_xs(struct mpi_ccb *);
106 
107 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
108 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
109 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
110 			    u_int32_t);
111 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
112 			    u_int32_t);
113 
114 int			mpi_init(struct mpi_softc *);
115 int			mpi_reset_soft(struct mpi_softc *);
116 int			mpi_reset_hard(struct mpi_softc *);
117 
118 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
119 int			mpi_handshake_recv_dword(struct mpi_softc *,
120 			    u_int32_t *);
121 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
122 
123 void			mpi_empty_done(struct mpi_ccb *);
124 
125 int			mpi_iocinit(struct mpi_softc *);
126 int			mpi_iocfacts(struct mpi_softc *);
127 int			mpi_portfacts(struct mpi_softc *);
128 int			mpi_portenable(struct mpi_softc *);
129 int			mpi_cfg_coalescing(struct mpi_softc *);
130 void			mpi_get_raid(struct mpi_softc *);
131 int			mpi_fwupload(struct mpi_softc *);
132 int			mpi_manufacturing(struct mpi_softc *);
133 int			mpi_scsi_probe_virtual(struct scsi_link *);
134 
135 int			mpi_eventnotify(struct mpi_softc *);
136 void			mpi_eventnotify_done(struct mpi_ccb *);
137 void			mpi_eventnotify_free(struct mpi_softc *,
138 			    struct mpi_rcb *);
139 void			mpi_eventack(void *, void *);
140 void			mpi_eventack_done(struct mpi_ccb *);
141 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
142 void			mpi_evt_sas_detach(void *, void *);
143 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
144 void			mpi_fc_rescan(void *);
145 
146 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
147 			    u_int8_t, u_int32_t, int, void *);
148 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
149 			    void *, int, void *, size_t);
150 
151 int			mpi_ioctl_cache(struct scsi_link *, u_long,
152 			    struct dk_cache *);
153 
154 #if NBIO > 0
155 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
156 int		mpi_ioctl(struct device *, u_long, caddr_t);
157 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
158 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
159 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
160 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
161 #ifndef SMALL_KERNEL
162 int		mpi_create_sensors(struct mpi_softc *);
163 void		mpi_refresh_sensors(void *);
164 #endif /* SMALL_KERNEL */
165 #endif /* NBIO > 0 */
166 
167 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
168 
169 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
170 
171 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
172 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
173 #define mpi_read_intr(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
174 				    MPI_INTR_STATUS)
175 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
176 #define mpi_pop_reply(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
177 				    MPI_REPLY_QUEUE)
178 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
179 				    MPI_REPLY_QUEUE, (v))
180 
181 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
182 				    MPI_INTR_STATUS_DOORBELL, 0)
183 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
184 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
185 
186 #define MPI_PG_EXTENDED		(1<<0)
187 #define MPI_PG_POLL		(1<<1)
188 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
189 
190 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
191 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
192 	    MPI_PG_POLL, (_h))
193 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
194 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
195 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
196 
197 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
198 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
199 	    (_h), (_r), (_p), (_l))
200 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
201 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
202 	    (_h), (_r), (_p), (_l))
203 
204 static inline void
205 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
206 {
207 	htolem32(&sge->sg_addr_lo, dva);
208 	htolem32(&sge->sg_addr_hi, dva >> 32);
209 }
210 
211 int
212 mpi_attach(struct mpi_softc *sc)
213 {
214 	struct scsibus_attach_args	saa;
215 	struct mpi_ccb			*ccb;
216 
217 	printf("\n");
218 
219 	rw_init(&sc->sc_lock, "mpi_lock");
220 	task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
221 
222 	/* disable interrupts */
223 	mpi_write(sc, MPI_INTR_MASK,
224 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
225 
226 	if (mpi_init(sc) != 0) {
227 		printf("%s: unable to initialise\n", DEVNAME(sc));
228 		return (1);
229 	}
230 
231 	if (mpi_iocfacts(sc) != 0) {
232 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
233 		return (1);
234 	}
235 
236 	if (mpi_alloc_ccbs(sc) != 0) {
237 		/* error already printed */
238 		return (1);
239 	}
240 
241 	if (mpi_alloc_replies(sc) != 0) {
242 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
243 		goto free_ccbs;
244 	}
245 
246 	if (mpi_iocinit(sc) != 0) {
247 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
248 		goto free_ccbs;
249 	}
250 
251 	/* spin until we're operational */
252 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
253 	    MPI_DOORBELL_STATE_OPER) != 0) {
254 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
255 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
256 		printf("%s: operational state timeout\n", DEVNAME(sc));
257 		goto free_ccbs;
258 	}
259 
260 	mpi_push_replies(sc);
261 
262 	if (mpi_portfacts(sc) != 0) {
263 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
264 		goto free_replies;
265 	}
266 
267 	if (mpi_cfg_coalescing(sc) != 0) {
268 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
269 		goto free_replies;
270 	}
271 
272 	switch (sc->sc_porttype) {
273 	case MPI_PORTFACTS_PORTTYPE_SAS:
274 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
275 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
276 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
277 		    mpi_evt_sas_detach, sc);
278 		/* FALLTHROUGH */
279 	case MPI_PORTFACTS_PORTTYPE_FC:
280 		if (mpi_eventnotify(sc) != 0) {
281 			printf("%s: unable to enable events\n", DEVNAME(sc));
282 			goto free_replies;
283 		}
284 		break;
285 	}
286 
287 	if (mpi_portenable(sc) != 0) {
288 		printf("%s: unable to enable port\n", DEVNAME(sc));
289 		goto free_replies;
290 	}
291 
292 	if (mpi_fwupload(sc) != 0) {
293 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
294 		goto free_replies;
295 	}
296 
297 	if (mpi_manufacturing(sc) != 0) {
298 		printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc));		goto free_replies;
299 	}
300 
301 	switch (sc->sc_porttype) {
302 	case MPI_PORTFACTS_PORTTYPE_SCSI:
303 		if (mpi_cfg_spi_port(sc) != 0) {
304 			printf("%s: unable to configure spi\n", DEVNAME(sc));
305 			goto free_replies;
306 		}
307 		mpi_squash_ppr(sc);
308 		break;
309 	case MPI_PORTFACTS_PORTTYPE_SAS:
310 		if (mpi_cfg_sas(sc) != 0) {
311 			printf("%s: unable to configure sas\n", DEVNAME(sc));
312 			goto free_replies;
313 		}
314 		break;
315 	case MPI_PORTFACTS_PORTTYPE_FC:
316 		if (mpi_cfg_fc(sc) != 0) {
317 			printf("%s: unable to configure fc\n", DEVNAME(sc));
318 			goto free_replies;
319 		}
320 		break;
321 	}
322 
323 	/* get raid pages */
324 	mpi_get_raid(sc);
325 #if NBIO > 0
326 	if (sc->sc_flags & MPI_F_RAID) {
327 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
328 			panic("%s: controller registration failed",
329 			    DEVNAME(sc));
330 		else {
331 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
332 			    2, 0, &sc->sc_cfg_hdr) != 0) {
333 				panic("%s: can't get IOC page 2 hdr",
334 				    DEVNAME(sc));
335 			}
336 
337 			sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
338 			    4, M_TEMP, M_WAITOK | M_CANFAIL);
339 			if (sc->sc_vol_page == NULL) {
340 				panic("%s: can't get memory for IOC page 2, "
341 				    "bio disabled", DEVNAME(sc));
342 			}
343 
344 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
345 			    sc->sc_vol_page,
346 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
347 				panic("%s: can't get IOC page 2", DEVNAME(sc));
348 			}
349 
350 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
351 			    (sc->sc_vol_page + 1);
352 
353 			sc->sc_ioctl = mpi_ioctl;
354 		}
355 	}
356 #endif /* NBIO > 0 */
357 
358 	/* we should be good to go now, attach scsibus */
359 	sc->sc_link.adapter = &mpi_switch;
360 	sc->sc_link.adapter_softc = sc;
361 	sc->sc_link.adapter_target = sc->sc_target;
362 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
363 	sc->sc_link.openings = sc->sc_maxcmds - 1;
364 	sc->sc_link.pool = &sc->sc_iopool;
365 
366 	memset(&saa, 0, sizeof(saa));
367 	saa.saa_sc_link = &sc->sc_link;
368 
369 	/* config_found() returns the scsibus attached to us */
370 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
371 	    &saa, scsiprint);
372 
373 	/* do domain validation */
374 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
375 		mpi_run_ppr(sc);
376 
377 	/* enable interrupts */
378 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
379 
380 #if NBIO > 0
381 #ifndef SMALL_KERNEL
382 	mpi_create_sensors(sc);
383 #endif /* SMALL_KERNEL */
384 #endif /* NBIO > 0 */
385 
386 	return (0);
387 
388 free_replies:
389 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
390 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
391 	mpi_dmamem_free(sc, sc->sc_replies);
392 free_ccbs:
393 	while ((ccb = mpi_get_ccb(sc)) != NULL)
394 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
395 	mpi_dmamem_free(sc, sc->sc_requests);
396 	free(sc->sc_ccbs, M_DEVBUF, 0);
397 
398 	return(1);
399 }
400 
401 int
402 mpi_cfg_spi_port(struct mpi_softc *sc)
403 {
404 	struct mpi_cfg_hdr		hdr;
405 	struct mpi_cfg_spi_port_pg1	port;
406 
407 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
408 	    &hdr) != 0)
409 		return (1);
410 
411 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
412 		return (1);
413 
414 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
415 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
416 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
417 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
418 	    letoh32(port.port_scsi_id));
419 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
420 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
421 
422 	if (port.port_scsi_id == sc->sc_target &&
423 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
424 	    port.on_bus_timer_value != htole32(0x0))
425 		return (0);
426 
427 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
428 	    sc->sc_target);
429 	port.port_scsi_id = sc->sc_target;
430 	port.port_resp_ids = htole16(1 << sc->sc_target);
431 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
432 
433 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
434 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
435 		return (1);
436 	}
437 
438 	return (0);
439 }
440 
441 void
442 mpi_squash_ppr(struct mpi_softc *sc)
443 {
444 	struct mpi_cfg_hdr		hdr;
445 	struct mpi_cfg_spi_dev_pg1	page;
446 	int				i;
447 
448 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
449 
450 	for (i = 0; i < sc->sc_buswidth; i++) {
451 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
452 		    1, i, &hdr) != 0)
453 			return;
454 
455 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
456 			return;
457 
458 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
459 		    "req_offset: 0x%02x req_period: 0x%02x "
460 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
461 		    page.req_params1, page.req_offset, page.req_period,
462 		    page.req_params2, letoh32(page.configuration));
463 
464 		page.req_params1 = 0x0;
465 		page.req_offset = 0x0;
466 		page.req_period = 0x0;
467 		page.req_params2 = 0x0;
468 		page.configuration = htole32(0x0);
469 
470 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
471 			return;
472 	}
473 }
474 
475 void
476 mpi_run_ppr(struct mpi_softc *sc)
477 {
478 	struct mpi_cfg_hdr		hdr;
479 	struct mpi_cfg_spi_port_pg0	port_pg;
480 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
481 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
482 	size_t				pagelen;
483 	struct scsi_link		*link;
484 	int				i, tries;
485 
486 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
487 	    &hdr) != 0) {
488 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
489 		    DEVNAME(sc));
490 		return;
491 	}
492 
493 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
494 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
495 		    DEVNAME(sc));
496 		return;
497 	}
498 
499 	for (i = 0; i < sc->sc_buswidth; i++) {
500 		link = scsi_get_link(sc->sc_scsibus, i, 0);
501 		if (link == NULL)
502 			continue;
503 
504 		/* do not ppr volumes */
505 		if (link->flags & SDEV_VIRTUAL)
506 			continue;
507 
508 		tries = 0;
509 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
510 		    port_pg.max_offset, tries) == EAGAIN)
511 			tries++;
512 	}
513 
514 	if ((sc->sc_flags & MPI_F_RAID) == 0)
515 		return;
516 
517 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
518 	    &hdr) != 0) {
519 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
520 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
521 		return;
522 	}
523 
524 	pagelen = hdr.page_length * 4; /* dwords to bytes */
525 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
526 	if (physdisk_pg == NULL) {
527 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
528 		    "allocate ioc pg 3\n", DEVNAME(sc));
529 		return;
530 	}
531 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
532 
533 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
534 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
535 		    "fetch ioc page 3\n", DEVNAME(sc));
536 		goto out;
537 	}
538 
539 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
540 	    physdisk_pg->no_phys_disks);
541 
542 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
543 		physdisk = &physdisk_list[i];
544 
545 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
546 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
547 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
548 		    physdisk->phys_disk_num);
549 
550 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
551 			continue;
552 
553 		tries = 0;
554 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
555 		    port_pg.max_offset, tries) == EAGAIN)
556 			tries++;
557 	}
558 
559 out:
560 	free(physdisk_pg, M_TEMP, pagelen);
561 }
562 
563 int
564 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
565     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
566 {
567 	struct mpi_cfg_hdr		hdr0, hdr1;
568 	struct mpi_cfg_spi_dev_pg0	pg0;
569 	struct mpi_cfg_spi_dev_pg1	pg1;
570 	u_int32_t			address;
571 	int				id;
572 	int				raid = 0;
573 
574 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
575 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
576 	    link->quirks);
577 
578 	if (try >= 3)
579 		return (EIO);
580 
581 	if (physdisk == NULL) {
582 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
583 			return (EIO);
584 
585 		address = link->target;
586 		id = link->target;
587 	} else {
588 		raid = 1;
589 		address = (physdisk->phys_disk_bus << 8) |
590 		    (physdisk->phys_disk_id);
591 		id = physdisk->phys_disk_num;
592 	}
593 
594 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
595 	    address, &hdr0) != 0) {
596 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
597 		    DEVNAME(sc));
598 		return (EIO);
599 	}
600 
601 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
602 	    address, &hdr1) != 0) {
603 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
604 		    DEVNAME(sc));
605 		return (EIO);
606 	}
607 
608 #ifdef MPI_DEBUG
609 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
610 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
611 		    DEVNAME(sc));
612 		return (EIO);
613 	}
614 
615 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
616 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
617 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
618 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
619 #endif
620 
621 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
622 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
623 		    DEVNAME(sc));
624 		return (EIO);
625 	}
626 
627 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
628 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
629 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
630 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
631 
632 	pg1.req_params1 = 0;
633 	pg1.req_offset = offset;
634 	pg1.req_period = period;
635 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
636 
637 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
638 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
639 
640 		switch (try) {
641 		case 0: /* U320 */
642 			break;
643 		case 1: /* U160 */
644 			pg1.req_period = 0x09;
645 			break;
646 		case 2: /* U80 */
647 			pg1.req_period = 0x0a;
648 			break;
649 		}
650 
651 		if (pg1.req_period < 0x09) {
652 			/* Ultra320: enable QAS & PACKETIZED */
653 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
654 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
655 		}
656 		if (pg1.req_period < 0xa) {
657 			/* >= Ultra160: enable dual xfers */
658 			pg1.req_params1 |=
659 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
660 		}
661 	}
662 
663 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
664 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
665 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
666 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
667 
668 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
669 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
670 		    DEVNAME(sc));
671 		return (EIO);
672 	}
673 
674 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
675 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
676 		    DEVNAME(sc));
677 		return (EIO);
678 	}
679 
680 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
681 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
682 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
683 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
684 
685 	if (mpi_inq(sc, id, raid) != 0) {
686 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
687 		    "target %d\n", DEVNAME(sc), link->target);
688 		return (EIO);
689 	}
690 
691 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
692 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
693 		    "inquiry\n", DEVNAME(sc));
694 		return (EIO);
695 	}
696 
697 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
698 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
699 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
700 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
701 
702 	if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) {
703 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
704 		    DEVNAME(sc));
705 		return (EAGAIN);
706 	}
707 
708 	if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
709 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
710 		    DEVNAME(sc));
711 		return (EAGAIN);
712 	}
713 
714 	if (lemtoh32(&pg0.information) & 0x0e) {
715 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
716 		    DEVNAME(sc), lemtoh32(&pg0.information));
717 		return (EAGAIN);
718 	}
719 
720 	switch(pg0.neg_period) {
721 	case 0x08:
722 		period = 160;
723 		break;
724 	case 0x09:
725 		period = 80;
726 		break;
727 	case 0x0a:
728 		period = 40;
729 		break;
730 	case 0x0b:
731 		period = 20;
732 		break;
733 	case 0x0c:
734 		period = 10;
735 		break;
736 	default:
737 		period = 0;
738 		break;
739 	}
740 
741 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
742 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
743 	    id, period ? "Sync" : "Async", period,
744 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
745 	    pg0.neg_offset,
746 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
747 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
748 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
749 
750 	return (0);
751 }
752 
753 int
754 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
755 {
756 	struct mpi_ccb			*ccb;
757 	struct scsi_inquiry		inq;
758 	struct inq_bundle {
759 		struct mpi_msg_scsi_io		io;
760 		struct mpi_sge			sge;
761 		struct scsi_inquiry_data	inqbuf;
762 		struct scsi_sense_data		sense;
763 	} __packed			*bundle;
764 	struct mpi_msg_scsi_io		*io;
765 	struct mpi_sge			*sge;
766 
767 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
768 
769 	memset(&inq, 0, sizeof(inq));
770 	inq.opcode = INQUIRY;
771 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
772 
773 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
774 	if (ccb == NULL)
775 		return (1);
776 
777 	ccb->ccb_done = mpi_empty_done;
778 
779 	bundle = ccb->ccb_cmd;
780 	io = &bundle->io;
781 	sge = &bundle->sge;
782 
783 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
784 	    MPI_FUNCTION_SCSI_IO_REQUEST;
785 	/*
786 	 * bus is always 0
787 	 * io->bus = htole16(sc->sc_bus);
788 	 */
789 	io->target_id = target;
790 
791 	io->cdb_length = sizeof(inq);
792 	io->sense_buf_len = sizeof(struct scsi_sense_data);
793 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
794 
795 	/*
796 	 * always lun 0
797 	 * io->lun[0] = htobe16(link->lun);
798 	 */
799 
800 	io->direction = MPI_SCSIIO_DIR_READ;
801 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
802 
803 	memcpy(io->cdb, &inq, sizeof(inq));
804 
805 	htolem32(&io->data_length, sizeof(struct scsi_inquiry_data));
806 
807 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
808 	    offsetof(struct inq_bundle, sense));
809 
810 	htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
811 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
812 	    (u_int32_t)sizeof(inq));
813 
814 	mpi_dvatosge(sge, ccb->ccb_cmd_dva +
815 	    offsetof(struct inq_bundle, inqbuf));
816 
817 	if (mpi_poll(sc, ccb, 5000) != 0)
818 		return (1);
819 
820 	if (ccb->ccb_rcb != NULL)
821 		mpi_push_reply(sc, ccb->ccb_rcb);
822 
823 	scsi_io_put(&sc->sc_iopool, ccb);
824 
825 	return (0);
826 }
827 
828 int
829 mpi_cfg_sas(struct mpi_softc *sc)
830 {
831 	struct mpi_ecfg_hdr		ehdr;
832 	struct mpi_cfg_sas_iou_pg1	*pg;
833 	size_t				pagelen;
834 	int				rv = 0;
835 
836 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
837 	    &ehdr) != 0)
838 		return (0);
839 
840 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
841 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
842 	if (pg == NULL)
843 		return (ENOMEM);
844 
845 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
846 		goto out;
847 
848 	if (pg->max_sata_q_depth != 32) {
849 		pg->max_sata_q_depth = 32;
850 
851 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
852 			goto out;
853 	}
854 
855 out:
856 	free(pg, M_TEMP, pagelen);
857 	return (rv);
858 }
859 
860 int
861 mpi_cfg_fc(struct mpi_softc *sc)
862 {
863 	struct mpi_cfg_hdr		hdr;
864 	struct mpi_cfg_fc_port_pg0	pg0;
865 	struct mpi_cfg_fc_port_pg1	pg1;
866 
867 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
868 	    &hdr) != 0) {
869 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
870 		return (1);
871 	}
872 
873 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
874 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
875 		return (1);
876 	}
877 
878 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
879 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
880 
881 	/* configure port config more to our liking */
882 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
883 	    &hdr) != 0) {
884 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
885 		return (1);
886 	}
887 
888 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
889 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
890 		return (1);
891 	}
892 
893 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
894 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
895 
896 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
897 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
898 		return (1);
899 	}
900 
901 	return (0);
902 }
903 
904 void
905 mpi_detach(struct mpi_softc *sc)
906 {
907 
908 }
909 
910 int
911 mpi_intr(void *arg)
912 {
913 	struct mpi_softc		*sc = arg;
914 	u_int32_t			reg;
915 	int				rv = 0;
916 
917 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
918 		return (rv);
919 
920 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
921 		mpi_reply(sc, reg);
922 		rv = 1;
923 	}
924 
925 	return (rv);
926 }
927 
928 void
929 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
930 {
931 	struct mpi_ccb			*ccb;
932 	struct mpi_rcb			*rcb = NULL;
933 	struct mpi_msg_reply		*reply = NULL;
934 	u_int32_t			reply_dva;
935 	int				id;
936 	int				i;
937 
938 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
939 
940 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
941 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
942 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
943 		    MPI_REPLY_SIZE;
944 		rcb = &sc->sc_rcbs[i];
945 
946 		bus_dmamap_sync(sc->sc_dmat,
947 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
948 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
949 
950 		reply = rcb->rcb_reply;
951 
952 		id = lemtoh32(&reply->msg_context);
953 	} else {
954 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
955 		case MPI_REPLY_QUEUE_TYPE_INIT:
956 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
957 			break;
958 
959 		default:
960 			panic("%s: unsupported context reply",
961 			    DEVNAME(sc));
962 		}
963 	}
964 
965 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
966 	    DEVNAME(sc), id, reply);
967 
968 	ccb = &sc->sc_ccbs[id];
969 
970 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
971 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
972 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
973 	ccb->ccb_state = MPI_CCB_READY;
974 	ccb->ccb_rcb = rcb;
975 
976 	ccb->ccb_done(ccb);
977 }
978 
979 struct mpi_dmamem *
980 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
981 {
982 	struct mpi_dmamem		*mdm;
983 	int				nsegs;
984 
985 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
986 	if (mdm == NULL)
987 		return (NULL);
988 
989 	mdm->mdm_size = size;
990 
991 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
992 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
993 		goto mdmfree;
994 
995 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
996 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
997 		goto destroy;
998 
999 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1000 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1001 		goto free;
1002 
1003 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1004 	    NULL, BUS_DMA_NOWAIT) != 0)
1005 		goto unmap;
1006 
1007 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1008 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1009 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1010 
1011 	return (mdm);
1012 
1013 unmap:
1014 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1015 free:
1016 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1017 destroy:
1018 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1019 mdmfree:
1020 	free(mdm, M_DEVBUF, sizeof *mdm);
1021 
1022 	return (NULL);
1023 }
1024 
1025 void
1026 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1027 {
1028 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1029 
1030 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1031 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1032 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1033 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1034 	free(mdm, M_DEVBUF, sizeof *mdm);
1035 }
1036 
1037 int
1038 mpi_alloc_ccbs(struct mpi_softc *sc)
1039 {
1040 	struct mpi_ccb			*ccb;
1041 	u_int8_t			*cmd;
1042 	int				i;
1043 
1044 	SLIST_INIT(&sc->sc_ccb_free);
1045 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1046 
1047 	sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1048 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1049 	if (sc->sc_ccbs == NULL) {
1050 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1051 		return (1);
1052 	}
1053 
1054 	sc->sc_requests = mpi_dmamem_alloc(sc,
1055 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1056 	if (sc->sc_requests == NULL) {
1057 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1058 		goto free_ccbs;
1059 	}
1060 	cmd = MPI_DMA_KVA(sc->sc_requests);
1061 	memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1062 
1063 	for (i = 0; i < sc->sc_maxcmds; i++) {
1064 		ccb = &sc->sc_ccbs[i];
1065 
1066 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1067 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1068 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1069 		    &ccb->ccb_dmamap) != 0) {
1070 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1071 			goto free_maps;
1072 		}
1073 
1074 		ccb->ccb_sc = sc;
1075 		ccb->ccb_id = i;
1076 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1077 		ccb->ccb_state = MPI_CCB_READY;
1078 
1079 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1080 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1081 		    ccb->ccb_offset;
1082 
1083 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1084 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1085 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1086 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1087 		    ccb->ccb_cmd_dva);
1088 
1089 		mpi_put_ccb(sc, ccb);
1090 	}
1091 
1092 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1093 
1094 	return (0);
1095 
1096 free_maps:
1097 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1098 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1099 
1100 	mpi_dmamem_free(sc, sc->sc_requests);
1101 free_ccbs:
1102 	free(sc->sc_ccbs, M_DEVBUF, 0);
1103 
1104 	return (1);
1105 }
1106 
1107 void *
1108 mpi_get_ccb(void *xsc)
1109 {
1110 	struct mpi_softc		*sc = xsc;
1111 	struct mpi_ccb			*ccb;
1112 
1113 	mtx_enter(&sc->sc_ccb_mtx);
1114 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1115 	if (ccb != NULL) {
1116 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1117 		ccb->ccb_state = MPI_CCB_READY;
1118 	}
1119 	mtx_leave(&sc->sc_ccb_mtx);
1120 
1121 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1122 
1123 	return (ccb);
1124 }
1125 
1126 void
1127 mpi_put_ccb(void *xsc, void *io)
1128 {
1129 	struct mpi_softc		*sc = xsc;
1130 	struct mpi_ccb			*ccb = io;
1131 
1132 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1133 
1134 #ifdef DIAGNOSTIC
1135 	if (ccb->ccb_state == MPI_CCB_FREE)
1136 		panic("mpi_put_ccb: double free");
1137 #endif
1138 
1139 	ccb->ccb_state = MPI_CCB_FREE;
1140 	ccb->ccb_cookie = NULL;
1141 	ccb->ccb_done = NULL;
1142 	memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1143 	mtx_enter(&sc->sc_ccb_mtx);
1144 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1145 	mtx_leave(&sc->sc_ccb_mtx);
1146 }
1147 
1148 int
1149 mpi_alloc_replies(struct mpi_softc *sc)
1150 {
1151 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1152 
1153 	sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF,
1154 	    M_WAITOK|M_CANFAIL);
1155 	if (sc->sc_rcbs == NULL)
1156 		return (1);
1157 
1158 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1159 	if (sc->sc_replies == NULL) {
1160 		free(sc->sc_rcbs, M_DEVBUF, 0);
1161 		return (1);
1162 	}
1163 
1164 	return (0);
1165 }
1166 
1167 void
1168 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1169 {
1170 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1171 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1172 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1173 }
1174 
1175 void
1176 mpi_push_replies(struct mpi_softc *sc)
1177 {
1178 	struct mpi_rcb			*rcb;
1179 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1180 	int				i;
1181 
1182 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1183 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1184 
1185 	for (i = 0; i < sc->sc_repq; i++) {
1186 		rcb = &sc->sc_rcbs[i];
1187 
1188 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1189 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1190 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1191 		    MPI_REPLY_SIZE * i;
1192 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1193 	}
1194 }
1195 
1196 void
1197 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1198 {
1199 	struct mpi_msg_request *msg;
1200 
1201 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1202 	    ccb->ccb_cmd_dva);
1203 
1204 	msg = ccb->ccb_cmd;
1205 	htolem32(&msg->msg_context, ccb->ccb_id);
1206 
1207 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1208 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1209 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 
1211 	ccb->ccb_state = MPI_CCB_QUEUED;
1212 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1213 	    MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1214 }
1215 
1216 int
1217 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1218 {
1219 	void				(*done)(struct mpi_ccb *);
1220 	void				*cookie;
1221 	int				rv = 1;
1222 	u_int32_t			reg;
1223 
1224 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1225 	    timeout);
1226 
1227 	done = ccb->ccb_done;
1228 	cookie = ccb->ccb_cookie;
1229 
1230 	ccb->ccb_done = mpi_poll_done;
1231 	ccb->ccb_cookie = &rv;
1232 
1233 	mpi_start(sc, ccb);
1234 	while (rv == 1) {
1235 		reg = mpi_pop_reply(sc);
1236 		if (reg == 0xffffffff) {
1237 			if (timeout-- == 0) {
1238 				printf("%s: timeout\n", DEVNAME(sc));
1239 				goto timeout;
1240 			}
1241 
1242 			delay(1000);
1243 			continue;
1244 		}
1245 
1246 		mpi_reply(sc, reg);
1247 	}
1248 
1249 	ccb->ccb_cookie = cookie;
1250 	done(ccb);
1251 
1252 timeout:
1253 	return (rv);
1254 }
1255 
1256 void
1257 mpi_poll_done(struct mpi_ccb *ccb)
1258 {
1259 	int				*rv = ccb->ccb_cookie;
1260 
1261 	*rv = 0;
1262 }
1263 
1264 void
1265 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1266 {
1267 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1268 	void				(*done)(struct mpi_ccb *);
1269 
1270 	done = ccb->ccb_done;
1271 	ccb->ccb_done = mpi_wait_done;
1272 	ccb->ccb_cookie = &cookie;
1273 
1274 	/* XXX this will wait forever for the ccb to complete */
1275 
1276 	mpi_start(sc, ccb);
1277 
1278 	mtx_enter(&cookie);
1279 	while (ccb->ccb_cookie != NULL)
1280 		msleep_nsec(ccb, &cookie, PRIBIO, "mpiwait", INFSLP);
1281 	mtx_leave(&cookie);
1282 
1283 	done(ccb);
1284 }
1285 
1286 void
1287 mpi_wait_done(struct mpi_ccb *ccb)
1288 {
1289 	struct mutex			*cookie = ccb->ccb_cookie;
1290 
1291 	mtx_enter(cookie);
1292 	ccb->ccb_cookie = NULL;
1293 	wakeup_one(ccb);
1294 	mtx_leave(cookie);
1295 }
1296 
1297 void
1298 mpi_scsi_cmd(struct scsi_xfer *xs)
1299 {
1300 	struct scsi_link		*link = xs->sc_link;
1301 	struct mpi_softc		*sc = link->adapter_softc;
1302 	struct mpi_ccb			*ccb;
1303 	struct mpi_ccb_bundle		*mcb;
1304 	struct mpi_msg_scsi_io		*io;
1305 
1306 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1307 
1308 	KERNEL_UNLOCK();
1309 
1310 	if (xs->cmdlen > MPI_CDB_LEN) {
1311 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1312 		    DEVNAME(sc), xs->cmdlen);
1313 		memset(&xs->sense, 0, sizeof(xs->sense));
1314 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1315 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1316 		xs->sense.add_sense_code = 0x20;
1317 		xs->error = XS_SENSE;
1318 		goto done;
1319 	}
1320 
1321 	ccb = xs->io;
1322 
1323 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1324 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1325 
1326 	ccb->ccb_cookie = xs;
1327 	ccb->ccb_done = mpi_scsi_cmd_done;
1328 
1329 	mcb = ccb->ccb_cmd;
1330 	io = &mcb->mcb_io;
1331 
1332 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1333 	/*
1334 	 * bus is always 0
1335 	 * io->bus = htole16(sc->sc_bus);
1336 	 */
1337 	io->target_id = link->target;
1338 
1339 	io->cdb_length = xs->cmdlen;
1340 	io->sense_buf_len = sizeof(xs->sense);
1341 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1342 
1343 	htobem16(&io->lun[0], link->lun);
1344 
1345 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1346 	case SCSI_DATA_IN:
1347 		io->direction = MPI_SCSIIO_DIR_READ;
1348 		break;
1349 	case SCSI_DATA_OUT:
1350 		io->direction = MPI_SCSIIO_DIR_WRITE;
1351 		break;
1352 	default:
1353 		io->direction = MPI_SCSIIO_DIR_NONE;
1354 		break;
1355 	}
1356 
1357 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1358 	    (link->quirks & SDEV_NOTAGS))
1359 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1360 	else
1361 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1362 
1363 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1364 
1365 	htolem32(&io->data_length, xs->datalen);
1366 
1367 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1368 	    offsetof(struct mpi_ccb_bundle, mcb_sense));
1369 
1370 	if (mpi_load_xs(ccb) != 0)
1371 		goto stuffup;
1372 
1373 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1374 
1375 	if (xs->flags & SCSI_POLL) {
1376 		if (mpi_poll(sc, ccb, xs->timeout) != 0)
1377 			goto stuffup;
1378 	} else
1379 		mpi_start(sc, ccb);
1380 
1381 	KERNEL_LOCK();
1382 	return;
1383 
1384 stuffup:
1385 	xs->error = XS_DRIVER_STUFFUP;
1386 done:
1387 	KERNEL_LOCK();
1388 	scsi_done(xs);
1389 }
1390 
1391 void
1392 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1393 {
1394 	struct mpi_softc		*sc = ccb->ccb_sc;
1395 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1396 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1397 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1398 	struct mpi_msg_scsi_io_error	*sie;
1399 
1400 	if (xs->datalen != 0) {
1401 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1402 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1403 		    BUS_DMASYNC_POSTWRITE);
1404 
1405 		bus_dmamap_unload(sc->sc_dmat, dmap);
1406 	}
1407 
1408 	/* timeout_del */
1409 	xs->error = XS_NOERROR;
1410 	xs->resid = 0;
1411 
1412 	if (ccb->ccb_rcb == NULL) {
1413 		/* no scsi error, we're ok so drop out early */
1414 		xs->status = SCSI_OK;
1415 		KERNEL_LOCK();
1416 		scsi_done(xs);
1417 		KERNEL_UNLOCK();
1418 		return;
1419 	}
1420 
1421 	sie = ccb->ccb_rcb->rcb_reply;
1422 
1423 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1424 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1425 	    xs->flags);
1426 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1427 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1428 	    sie->msg_length, sie->function);
1429 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1430 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1431 	    sie->sense_buf_len, sie->msg_flags);
1432 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1433 	    letoh32(sie->msg_context));
1434 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1435 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1436 	    sie->scsi_state, letoh16(sie->ioc_status));
1437 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1438 	    letoh32(sie->ioc_loginfo));
1439 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1440 	    letoh32(sie->transfer_count));
1441 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1442 	    letoh32(sie->sense_count));
1443 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1444 	    letoh32(sie->response_info));
1445 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1446 	    letoh16(sie->tag));
1447 
1448 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS)
1449 		xs->status = SCSI_TERMINATED;
1450 	else
1451 		xs->status = sie->scsi_status;
1452 	xs->resid = 0;
1453 
1454 	switch (lemtoh16(&sie->ioc_status)) {
1455 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1456 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1457 		/* FALLTHROUGH */
1458 	case MPI_IOCSTATUS_SUCCESS:
1459 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1460 		switch (xs->status) {
1461 		case SCSI_OK:
1462 			xs->error = XS_NOERROR;
1463 			break;
1464 
1465 		case SCSI_CHECK:
1466 			xs->error = XS_SENSE;
1467 			break;
1468 
1469 		case SCSI_BUSY:
1470 		case SCSI_QUEUE_FULL:
1471 			xs->error = XS_BUSY;
1472 			break;
1473 
1474 		default:
1475 			xs->error = XS_DRIVER_STUFFUP;
1476 			break;
1477 		}
1478 		break;
1479 
1480 	case MPI_IOCSTATUS_BUSY:
1481 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1482 		xs->error = XS_BUSY;
1483 		break;
1484 
1485 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1486 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1487 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1488 		xs->error = XS_SELTIMEOUT;
1489 		break;
1490 
1491 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1492 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1493 		xs->error = XS_RESET;
1494 		break;
1495 
1496 	default:
1497 		xs->error = XS_DRIVER_STUFFUP;
1498 		break;
1499 	}
1500 
1501 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1502 		memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1503 
1504 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1505 	    xs->error, xs->status);
1506 
1507 	mpi_push_reply(sc, ccb->ccb_rcb);
1508 	KERNEL_LOCK();
1509 	scsi_done(xs);
1510 	KERNEL_UNLOCK();
1511 }
1512 
1513 void
1514 mpi_timeout_xs(void *arg)
1515 {
1516 	/* XXX */
1517 }
1518 
1519 int
1520 mpi_load_xs(struct mpi_ccb *ccb)
1521 {
1522 	struct mpi_softc		*sc = ccb->ccb_sc;
1523 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1524 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1525 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1526 	struct mpi_sge			*sge = NULL;
1527 	struct mpi_sge			*nsge = &mcb->mcb_sgl[0];
1528 	struct mpi_sge			*ce = NULL, *nce;
1529 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1530 	u_int32_t			addr, flags;
1531 	int				i, error;
1532 
1533 	if (xs->datalen == 0) {
1534 		htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
1535 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1536 		return (0);
1537 	}
1538 
1539 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1540 	    xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1541 	    ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1542 	if (error) {
1543 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1544 		return (1);
1545 	}
1546 
1547 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1548 	if (xs->flags & SCSI_DATA_OUT)
1549 		flags |= MPI_SGE_FL_DIR_OUT;
1550 
1551 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1552 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1553 		io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1554 	}
1555 
1556 	for (i = 0; i < dmap->dm_nsegs; i++) {
1557 
1558 		if (nsge == ce) {
1559 			nsge++;
1560 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1561 
1562 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1563 				nce = &nsge[sc->sc_chain_len - 1];
1564 				addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1565 				addr = addr << 16 |
1566 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1567 			} else {
1568 				nce = NULL;
1569 				addr = sizeof(struct mpi_sge) *
1570 				    (dmap->dm_nsegs - i);
1571 			}
1572 
1573 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1574 			    MPI_SGE_FL_SIZE_64 | addr);
1575 
1576 			mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1577 			    ((u_int8_t *)nsge - (u_int8_t *)mcb));
1578 
1579 			ce = nce;
1580 		}
1581 
1582 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1583 		    i, dmap->dm_segs[i].ds_len,
1584 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1585 
1586 		sge = nsge++;
1587 
1588 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1589 		mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1590 	}
1591 
1592 	/* terminate list */
1593 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1594 	    MPI_SGE_FL_EOL);
1595 
1596 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1597 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1598 	    BUS_DMASYNC_PREWRITE);
1599 
1600 	return (0);
1601 }
1602 
1603 void
1604 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1605 {
1606 	/* XXX */
1607 	if (bp->b_bcount > MAXPHYS)
1608 		bp->b_bcount = MAXPHYS;
1609 }
1610 
1611 int
1612 mpi_scsi_probe_virtual(struct scsi_link *link)
1613 {
1614 	struct mpi_softc		*sc = link->adapter_softc;
1615 	struct mpi_cfg_hdr		hdr;
1616 	struct mpi_cfg_raid_vol_pg0	*rp0;
1617 	int				len;
1618 	int				rv;
1619 
1620 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1621 		return (0);
1622 
1623 	if (link->lun > 0)
1624 		return (0);
1625 
1626 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1627 	    0, link->target, MPI_PG_POLL, &hdr);
1628 	if (rv != 0)
1629 		return (0);
1630 
1631 	len = hdr.page_length * 4;
1632 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1633 	if (rp0 == NULL)
1634 		return (ENOMEM);
1635 
1636 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1637 	if (rv == 0)
1638 		SET(link->flags, SDEV_VIRTUAL);
1639 
1640 	free(rp0, M_TEMP, len);
1641 	return (0);
1642 }
1643 
1644 int
1645 mpi_scsi_probe(struct scsi_link *link)
1646 {
1647 	struct mpi_softc		*sc = link->adapter_softc;
1648 	struct mpi_ecfg_hdr		ehdr;
1649 	struct mpi_cfg_sas_dev_pg0	pg0;
1650 	u_int32_t			address;
1651 	int				rv;
1652 
1653 	rv = mpi_scsi_probe_virtual(link);
1654 	if (rv != 0)
1655 		return (rv);
1656 
1657 	if (ISSET(link->flags, SDEV_VIRTUAL))
1658 		return (0);
1659 
1660 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1661 		return (0);
1662 
1663 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1664 
1665 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1666 	    address, &ehdr) != 0)
1667 		return (EIO);
1668 
1669 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1670 		return (0);
1671 
1672 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1673 	    DEVNAME(sc), link->target);
1674 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1675 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1676 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1677 	    letoh64(pg0.sas_addr));
1678 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1679 	    "access_status: 0x%02x\n", DEVNAME(sc),
1680 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1681 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1682 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1683 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1684 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1685 	    letoh32(pg0.device_info));
1686 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1687 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1688 
1689 	if (ISSET(lemtoh32(&pg0.device_info),
1690 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1691 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1692 		    DEVNAME(sc), link->target);
1693 		link->flags |= SDEV_ATAPI;
1694 		link->quirks |= SDEV_ONLYBIG;
1695 	}
1696 
1697 	return (0);
1698 }
1699 
1700 u_int32_t
1701 mpi_read(struct mpi_softc *sc, bus_size_t r)
1702 {
1703 	u_int32_t			rv;
1704 
1705 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1706 	    BUS_SPACE_BARRIER_READ);
1707 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1708 
1709 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1710 
1711 	return (rv);
1712 }
1713 
1714 void
1715 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1716 {
1717 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1718 
1719 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1720 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1721 	    BUS_SPACE_BARRIER_WRITE);
1722 }
1723 
1724 int
1725 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1726     u_int32_t target)
1727 {
1728 	int				i;
1729 
1730 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1731 	    mask, target);
1732 
1733 	for (i = 0; i < 10000; i++) {
1734 		if ((mpi_read(sc, r) & mask) == target)
1735 			return (0);
1736 		delay(1000);
1737 	}
1738 
1739 	return (1);
1740 }
1741 
1742 int
1743 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1744     u_int32_t target)
1745 {
1746 	int				i;
1747 
1748 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1749 	    mask, target);
1750 
1751 	for (i = 0; i < 10000; i++) {
1752 		if ((mpi_read(sc, r) & mask) != target)
1753 			return (0);
1754 		delay(1000);
1755 	}
1756 
1757 	return (1);
1758 }
1759 
1760 int
1761 mpi_init(struct mpi_softc *sc)
1762 {
1763 	u_int32_t			db;
1764 	int				i;
1765 
1766 	/* spin until the IOC leaves the RESET state */
1767 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1768 	    MPI_DOORBELL_STATE_RESET) != 0) {
1769 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1770 		    "reset state\n", DEVNAME(sc));
1771 		return (1);
1772 	}
1773 
1774 	/* check current ownership */
1775 	db = mpi_read_db(sc);
1776 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1777 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1778 		    DEVNAME(sc));
1779 		return (0);
1780 	}
1781 
1782 	for (i = 0; i < 5; i++) {
1783 		switch (db & MPI_DOORBELL_STATE) {
1784 		case MPI_DOORBELL_STATE_READY:
1785 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1786 			    DEVNAME(sc));
1787 			return (0);
1788 
1789 		case MPI_DOORBELL_STATE_OPER:
1790 		case MPI_DOORBELL_STATE_FAULT:
1791 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1792 			    "reset\n" , DEVNAME(sc));
1793 			if (mpi_reset_soft(sc) != 0)
1794 				mpi_reset_hard(sc);
1795 			break;
1796 
1797 		case MPI_DOORBELL_STATE_RESET:
1798 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1799 			    "out of reset\n", DEVNAME(sc));
1800 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1801 			    MPI_DOORBELL_STATE_RESET) != 0)
1802 				return (1);
1803 			break;
1804 		}
1805 		db = mpi_read_db(sc);
1806 	}
1807 
1808 	return (1);
1809 }
1810 
1811 int
1812 mpi_reset_soft(struct mpi_softc *sc)
1813 {
1814 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1815 
1816 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1817 		return (1);
1818 
1819 	mpi_write_db(sc,
1820 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1821 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1822 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1823 		return (1);
1824 
1825 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1826 	    MPI_DOORBELL_STATE_READY) != 0)
1827 		return (1);
1828 
1829 	return (0);
1830 }
1831 
1832 int
1833 mpi_reset_hard(struct mpi_softc *sc)
1834 {
1835 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1836 
1837 	/* enable diagnostic register */
1838 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1839 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1840 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1841 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1842 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1843 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1844 
1845 	/* reset ioc */
1846 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1847 
1848 	delay(10000);
1849 
1850 	/* disable diagnostic register */
1851 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1852 
1853 	/* restore pci bits? */
1854 
1855 	/* firmware bits? */
1856 	return (0);
1857 }
1858 
1859 int
1860 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1861 {
1862 	u_int32_t				*query = buf;
1863 	int					i;
1864 
1865 	/* make sure the doorbell is not in use. */
1866 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1867 		return (1);
1868 
1869 	/* clear pending doorbell interrupts */
1870 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1871 		mpi_write_intr(sc, 0);
1872 
1873 	/*
1874 	 * first write the doorbell with the handshake function and the
1875 	 * dword count.
1876 	 */
1877 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1878 	    MPI_DOORBELL_DWORDS(dwords));
1879 
1880 	/*
1881 	 * the doorbell used bit will be set because a doorbell function has
1882 	 * started. Wait for the interrupt and then ack it.
1883 	 */
1884 	if (mpi_wait_db_int(sc) != 0)
1885 		return (1);
1886 	mpi_write_intr(sc, 0);
1887 
1888 	/* poll for the acknowledgement. */
1889 	if (mpi_wait_db_ack(sc) != 0)
1890 		return (1);
1891 
1892 	/* write the query through the doorbell. */
1893 	for (i = 0; i < dwords; i++) {
1894 		mpi_write_db(sc, htole32(query[i]));
1895 		if (mpi_wait_db_ack(sc) != 0)
1896 			return (1);
1897 	}
1898 
1899 	return (0);
1900 }
1901 
1902 int
1903 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1904 {
1905 	u_int16_t				*words = (u_int16_t *)dword;
1906 	int					i;
1907 
1908 	for (i = 0; i < 2; i++) {
1909 		if (mpi_wait_db_int(sc) != 0)
1910 			return (1);
1911 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1912 		mpi_write_intr(sc, 0);
1913 	}
1914 
1915 	return (0);
1916 }
1917 
1918 int
1919 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1920 {
1921 	struct mpi_msg_reply			*reply = buf;
1922 	u_int32_t				*dbuf = buf, dummy;
1923 	int					i;
1924 
1925 	/* get the first dword so we can read the length out of the header. */
1926 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1927 		return (1);
1928 
1929 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1930 	    DEVNAME(sc), dwords, reply->msg_length);
1931 
1932 	/*
1933 	 * the total length, in dwords, is in the message length field of the
1934 	 * reply header.
1935 	 */
1936 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1937 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1938 			return (1);
1939 	}
1940 
1941 	/* if there's extra stuff to come off the ioc, discard it */
1942 	while (i++ < reply->msg_length) {
1943 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1944 			return (1);
1945 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1946 		    "0x%08x\n", DEVNAME(sc), dummy);
1947 	}
1948 
1949 	/* wait for the doorbell used bit to be reset and clear the intr */
1950 	if (mpi_wait_db_int(sc) != 0)
1951 		return (1);
1952 	mpi_write_intr(sc, 0);
1953 
1954 	return (0);
1955 }
1956 
1957 void
1958 mpi_empty_done(struct mpi_ccb *ccb)
1959 {
1960 	/* nothing to do */
1961 }
1962 
1963 int
1964 mpi_iocfacts(struct mpi_softc *sc)
1965 {
1966 	struct mpi_msg_iocfacts_request		ifq;
1967 	struct mpi_msg_iocfacts_reply		ifp;
1968 
1969 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1970 
1971 	memset(&ifq, 0, sizeof(ifq));
1972 	memset(&ifp, 0, sizeof(ifp));
1973 
1974 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1975 	ifq.chain_offset = 0;
1976 	ifq.msg_flags = 0;
1977 	ifq.msg_context = htole32(0xdeadbeef);
1978 
1979 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1980 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1981 		    DEVNAME(sc));
1982 		return (1);
1983 	}
1984 
1985 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1986 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1987 		    DEVNAME(sc));
1988 		return (1);
1989 	}
1990 
1991 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1992 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1993 	    ifp.msg_version_maj, ifp.msg_version_min);
1994 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
1995 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1996 	    ifp.ioc_number, ifp.header_version_maj,
1997 	    ifp.header_version_min);
1998 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
1999 	    letoh32(ifp.msg_context));
2000 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
2001 	    DEVNAME(sc), letoh16(ifp.ioc_status),
2002 	    letoh16(ifp.ioc_exceptions));
2003 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2004 	    letoh32(ifp.ioc_loginfo));
2005 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2006 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2007 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2008 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2009 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2010 	    letoh16(ifp.reply_queue_depth));
2011 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2012 	    letoh16(ifp.product_id));
2013 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2014 	    letoh32(ifp.current_host_mfa_hi_addr));
2015 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2016 	    "global_credits: %d\n",
2017 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2018 	    letoh16(ifp.global_credits));
2019 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2020 	    letoh32(ifp.current_sense_buffer_hi_addr));
2021 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2022 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2023 	    letoh16(ifp.current_reply_frame_size));
2024 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2025 	    letoh32(ifp.fw_image_size));
2026 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2027 	    letoh32(ifp.ioc_capabilities));
2028 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2029 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2030 	    ifp.fw_version_maj, ifp.fw_version_min,
2031 	    ifp.fw_version_unit, ifp.fw_version_dev);
2032 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2033 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2034 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2035 	    "addr 0x%08lx%08lx\n", DEVNAME(sc),
2036 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2037 	    letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2038 	    letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2039 
2040 	sc->sc_fw_maj = ifp.fw_version_maj;
2041 	sc->sc_fw_min = ifp.fw_version_min;
2042 	sc->sc_fw_unit = ifp.fw_version_unit;
2043 	sc->sc_fw_dev = ifp.fw_version_dev;
2044 
2045 	sc->sc_maxcmds = lemtoh16(&ifp.global_credits);
2046 	sc->sc_maxchdepth = ifp.max_chain_depth;
2047 	sc->sc_ioc_number = ifp.ioc_number;
2048 	if (sc->sc_flags & MPI_F_SPI)
2049 		sc->sc_buswidth = 16;
2050 	else
2051 		sc->sc_buswidth =
2052 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2053 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2054 		sc->sc_fw_len = lemtoh32(&ifp.fw_image_size);
2055 
2056 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth));
2057 
2058 	/*
2059 	 * you can fit sg elements on the end of the io cmd if they fit in the
2060 	 * request frame size.
2061 	 */
2062 	sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) -
2063 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2064 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2065 	    sc->sc_first_sgl_len);
2066 
2067 	sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) /
2068 	    sizeof(struct mpi_sge);
2069 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2070 	    sc->sc_chain_len);
2071 
2072 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2073 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2074 	/* the sgl chains lose an entry for each chain element */
2075 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2076 	    sc->sc_chain_len;
2077 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2078 	    sc->sc_max_sgl_len);
2079 
2080 	/* XXX we're ignoring the max chain depth */
2081 
2082 	return (0);
2083 }
2084 
2085 int
2086 mpi_iocinit(struct mpi_softc *sc)
2087 {
2088 	struct mpi_msg_iocinit_request		iiq;
2089 	struct mpi_msg_iocinit_reply		iip;
2090 	u_int32_t				hi_addr;
2091 
2092 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2093 
2094 	memset(&iiq, 0, sizeof(iiq));
2095 	memset(&iip, 0, sizeof(iip));
2096 
2097 	iiq.function = MPI_FUNCTION_IOC_INIT;
2098 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2099 
2100 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2101 	iiq.max_buses = 1;
2102 
2103 	iiq.msg_context = htole32(0xd00fd00f);
2104 
2105 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2106 
2107 	hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2108 	htolem32(&iiq.host_mfa_hi_addr, hi_addr);
2109 	htolem32(&iiq.sense_buffer_hi_addr, hi_addr);
2110 
2111 	iiq.msg_version_maj = 0x01;
2112 	iiq.msg_version_min = 0x02;
2113 
2114 	iiq.hdr_version_unit = 0x0d;
2115 	iiq.hdr_version_dev = 0x00;
2116 
2117 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2118 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2119 		    DEVNAME(sc));
2120 		return (1);
2121 	}
2122 
2123 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2124 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2125 		    DEVNAME(sc));
2126 		return (1);
2127 	}
2128 
2129 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2130 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2131 	    iip.msg_length, iip.whoinit);
2132 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2133 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2134 	    iip.max_buses, iip.max_devices, iip.flags);
2135 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2136 	    letoh32(iip.msg_context));
2137 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2138 	    letoh16(iip.ioc_status));
2139 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2140 	    letoh32(iip.ioc_loginfo));
2141 
2142 	return (0);
2143 }
2144 
2145 int
2146 mpi_portfacts(struct mpi_softc *sc)
2147 {
2148 	struct mpi_ccb				*ccb;
2149 	struct mpi_msg_portfacts_request	*pfq;
2150 	volatile struct mpi_msg_portfacts_reply	*pfp;
2151 	int					rv = 1;
2152 
2153 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2154 
2155 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2156 	if (ccb == NULL) {
2157 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2158 		    DEVNAME(sc));
2159 		return (rv);
2160 	}
2161 
2162 	ccb->ccb_done = mpi_empty_done;
2163 	pfq = ccb->ccb_cmd;
2164 
2165 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2166 	pfq->chain_offset = 0;
2167 	pfq->msg_flags = 0;
2168 	pfq->port_number = 0;
2169 
2170 	if (mpi_poll(sc, ccb, 50000) != 0) {
2171 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2172 		goto err;
2173 	}
2174 
2175 	if (ccb->ccb_rcb == NULL) {
2176 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2177 		    DEVNAME(sc));
2178 		goto err;
2179 	}
2180 	pfp = ccb->ccb_rcb->rcb_reply;
2181 
2182 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2183 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2184 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2185 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2186 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2187 	    letoh32(pfp->msg_context));
2188 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2189 	    letoh16(pfp->ioc_status));
2190 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2191 	    letoh32(pfp->ioc_loginfo));
2192 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2193 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2194 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2195 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2196 	    letoh16(pfp->port_scsi_id));
2197 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2198 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2199 	    letoh16(pfp->max_persistent_ids),
2200 	    letoh16(pfp->max_posted_cmd_buffers));
2201 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2202 	    letoh16(pfp->max_lan_buckets));
2203 
2204 	sc->sc_porttype = pfp->port_type;
2205 	if (sc->sc_target == -1)
2206 		sc->sc_target = lemtoh16(&pfp->port_scsi_id);
2207 
2208 	mpi_push_reply(sc, ccb->ccb_rcb);
2209 	rv = 0;
2210 err:
2211 	scsi_io_put(&sc->sc_iopool, ccb);
2212 
2213 	return (rv);
2214 }
2215 
2216 int
2217 mpi_cfg_coalescing(struct mpi_softc *sc)
2218 {
2219 	struct mpi_cfg_hdr		hdr;
2220 	struct mpi_cfg_ioc_pg1		pg;
2221 	u_int32_t			flags;
2222 
2223 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2224 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2225 		    DEVNAME(sc));
2226 		return (1);
2227 	}
2228 
2229 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2230 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2231 		    DEVNAME(sc));
2232 		return (1);
2233 	}
2234 
2235 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2236 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2237 	    letoh32(pg.flags));
2238 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2239 	    letoh32(pg.coalescing_timeout));
2240 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2241 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2242 
2243 	flags = lemtoh32(&pg.flags);
2244 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2245 		return (0);
2246 
2247 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2248 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2249 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2250 		    DEVNAME(sc));
2251 		return (1);
2252 	}
2253 
2254 	return (0);
2255 }
2256 
2257 int
2258 mpi_eventnotify(struct mpi_softc *sc)
2259 {
2260 	struct mpi_ccb				*ccb;
2261 	struct mpi_msg_event_request		*enq;
2262 
2263 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2264 	if (ccb == NULL) {
2265 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2266 		    DEVNAME(sc));
2267 		return (1);
2268 	}
2269 
2270 	sc->sc_evt_ccb = ccb;
2271 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2272 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2273 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2274 	    mpi_eventack, sc);
2275 
2276 	ccb->ccb_done = mpi_eventnotify_done;
2277 	enq = ccb->ccb_cmd;
2278 
2279 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2280 	enq->chain_offset = 0;
2281 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2282 
2283 	mpi_start(sc, ccb);
2284 	return (0);
2285 }
2286 
2287 void
2288 mpi_eventnotify_done(struct mpi_ccb *ccb)
2289 {
2290 	struct mpi_softc			*sc = ccb->ccb_sc;
2291 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2292 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2293 
2294 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2295 
2296 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2297 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2298 	    letoh16(enp->data_length));
2299 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2300 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2301 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2302 	    letoh32(enp->msg_context));
2303 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2304 	    letoh16(enp->ioc_status));
2305 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2306 	    letoh32(enp->ioc_loginfo));
2307 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2308 	    letoh32(enp->event));
2309 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2310 	    letoh32(enp->event_context));
2311 
2312 	switch (lemtoh32(&enp->event)) {
2313 	/* ignore these */
2314 	case MPI_EVENT_EVENT_CHANGE:
2315 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2316 		break;
2317 
2318 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2319 		if (sc->sc_scsibus == NULL)
2320 			break;
2321 
2322 		if (mpi_evt_sas(sc, rcb) != 0) {
2323 			/* reply is freed later on */
2324 			return;
2325 		}
2326 		break;
2327 
2328 	case MPI_EVENT_RESCAN:
2329 		if (sc->sc_scsibus != NULL &&
2330 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2331 			task_add(systq, &sc->sc_evt_rescan);
2332 		break;
2333 
2334 	default:
2335 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2336 		    DEVNAME(sc), lemtoh32(&enp->event));
2337 		break;
2338 	}
2339 
2340 	mpi_eventnotify_free(sc, rcb);
2341 }
2342 
2343 void
2344 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2345 {
2346 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2347 
2348 	if (enp->ack_required) {
2349 		mtx_enter(&sc->sc_evt_ack_mtx);
2350 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2351 		mtx_leave(&sc->sc_evt_ack_mtx);
2352 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2353 	} else
2354 		mpi_push_reply(sc, rcb);
2355 }
2356 
2357 int
2358 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2359 {
2360 	struct mpi_evt_sas_change		*ch;
2361 	u_int8_t				*data;
2362 
2363 	data = rcb->rcb_reply;
2364 	data += sizeof(struct mpi_msg_event_reply);
2365 	ch = (struct mpi_evt_sas_change *)data;
2366 
2367 	if (ch->bus != 0)
2368 		return (0);
2369 
2370 	switch (ch->reason) {
2371 	case MPI_EVT_SASCH_REASON_ADDED:
2372 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2373 		KERNEL_LOCK();
2374 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2375 			printf("%s: unable to request attach of %d\n",
2376 			    DEVNAME(sc), ch->target);
2377 		}
2378 		KERNEL_UNLOCK();
2379 		break;
2380 
2381 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2382 		KERNEL_LOCK();
2383 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2384 		KERNEL_UNLOCK();
2385 
2386 		mtx_enter(&sc->sc_evt_scan_mtx);
2387 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2388 		mtx_leave(&sc->sc_evt_scan_mtx);
2389 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2390 
2391 		/* we'll handle event ack later on */
2392 		return (1);
2393 
2394 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2395 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2396 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2397 		break;
2398 	default:
2399 		printf("%s: unknown reason for SAS device status change: "
2400 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2401 		break;
2402 	}
2403 
2404 	return (0);
2405 }
2406 
2407 void
2408 mpi_evt_sas_detach(void *cookie, void *io)
2409 {
2410 	struct mpi_softc			*sc = cookie;
2411 	struct mpi_ccb				*ccb = io;
2412 	struct mpi_rcb				*rcb, *next;
2413 	struct mpi_msg_event_reply		*enp;
2414 	struct mpi_evt_sas_change		*ch;
2415 	struct mpi_msg_scsi_task_request	*str;
2416 
2417 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2418 
2419 	mtx_enter(&sc->sc_evt_scan_mtx);
2420 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2421 	if (rcb != NULL) {
2422 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2423 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2424 	}
2425 	mtx_leave(&sc->sc_evt_scan_mtx);
2426 
2427 	if (rcb == NULL) {
2428 		scsi_io_put(&sc->sc_iopool, ccb);
2429 		return;
2430 	}
2431 
2432 	enp = rcb->rcb_reply;
2433 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2434 
2435 	ccb->ccb_done = mpi_evt_sas_detach_done;
2436 	str = ccb->ccb_cmd;
2437 
2438 	str->target_id = ch->target;
2439 	str->bus = 0;
2440 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2441 
2442 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2443 
2444 	mpi_eventnotify_free(sc, rcb);
2445 
2446 	mpi_start(sc, ccb);
2447 
2448 	if (next != NULL)
2449 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2450 }
2451 
2452 void
2453 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2454 {
2455 	struct mpi_softc			*sc = ccb->ccb_sc;
2456 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2457 
2458 	KERNEL_LOCK();
2459 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2460 	    DETACH_FORCE) != 0) {
2461 		printf("%s: unable to request detach of %d\n",
2462 		    DEVNAME(sc), r->target_id);
2463 	}
2464 	KERNEL_UNLOCK();
2465 
2466 	mpi_push_reply(sc, ccb->ccb_rcb);
2467 	scsi_io_put(&sc->sc_iopool, ccb);
2468 }
2469 
2470 void
2471 mpi_fc_rescan(void *xsc)
2472 {
2473 	struct mpi_softc			*sc = xsc;
2474 	struct mpi_cfg_hdr			hdr;
2475 	struct mpi_cfg_fc_device_pg0		pg;
2476 	struct scsi_link			*link;
2477 	u_int8_t				devmap[256 / NBBY];
2478 	u_int32_t				id = 0xffffff;
2479 	int					i;
2480 
2481 	memset(devmap, 0, sizeof(devmap));
2482 
2483 	do {
2484 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2485 		    id, 0, &hdr) != 0) {
2486 			printf("%s: header get for rescan of 0x%08x failed\n",
2487 			    DEVNAME(sc), id);
2488 			return;
2489 		}
2490 
2491 		memset(&pg, 0, sizeof(pg));
2492 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2493 			break;
2494 
2495 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2496 		    pg.current_bus == 0)
2497 			setbit(devmap, pg.current_target_id);
2498 
2499 		id = lemtoh32(&pg.port_id);
2500 	} while (id <= 0xff0000);
2501 
2502 	for (i = 0; i < sc->sc_buswidth; i++) {
2503 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2504 
2505 		if (isset(devmap, i)) {
2506 			if (link == NULL)
2507 				scsi_probe_target(sc->sc_scsibus, i);
2508 		} else {
2509 			if (link != NULL) {
2510 				scsi_activate(sc->sc_scsibus, i, -1,
2511 				    DVACT_DEACTIVATE);
2512 				scsi_detach_target(sc->sc_scsibus, i,
2513 				    DETACH_FORCE);
2514 			}
2515 		}
2516 	}
2517 }
2518 
2519 void
2520 mpi_eventack(void *cookie, void *io)
2521 {
2522 	struct mpi_softc			*sc = cookie;
2523 	struct mpi_ccb				*ccb = io;
2524 	struct mpi_rcb				*rcb, *next;
2525 	struct mpi_msg_event_reply		*enp;
2526 	struct mpi_msg_eventack_request		*eaq;
2527 
2528 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2529 
2530 	mtx_enter(&sc->sc_evt_ack_mtx);
2531 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2532 	if (rcb != NULL) {
2533 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2534 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2535 	}
2536 	mtx_leave(&sc->sc_evt_ack_mtx);
2537 
2538 	if (rcb == NULL) {
2539 		scsi_io_put(&sc->sc_iopool, ccb);
2540 		return;
2541 	}
2542 
2543 	enp = rcb->rcb_reply;
2544 
2545 	ccb->ccb_done = mpi_eventack_done;
2546 	eaq = ccb->ccb_cmd;
2547 
2548 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2549 
2550 	eaq->event = enp->event;
2551 	eaq->event_context = enp->event_context;
2552 
2553 	mpi_push_reply(sc, rcb);
2554 	mpi_start(sc, ccb);
2555 
2556 	if (next != NULL)
2557 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2558 }
2559 
2560 void
2561 mpi_eventack_done(struct mpi_ccb *ccb)
2562 {
2563 	struct mpi_softc			*sc = ccb->ccb_sc;
2564 
2565 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2566 
2567 	mpi_push_reply(sc, ccb->ccb_rcb);
2568 	scsi_io_put(&sc->sc_iopool, ccb);
2569 }
2570 
2571 int
2572 mpi_portenable(struct mpi_softc *sc)
2573 {
2574 	struct mpi_ccb				*ccb;
2575 	struct mpi_msg_portenable_request	*peq;
2576 	int					rv = 0;
2577 
2578 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2579 
2580 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2581 	if (ccb == NULL) {
2582 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2583 		    DEVNAME(sc));
2584 		return (1);
2585 	}
2586 
2587 	ccb->ccb_done = mpi_empty_done;
2588 	peq = ccb->ccb_cmd;
2589 
2590 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2591 	peq->port_number = 0;
2592 
2593 	if (mpi_poll(sc, ccb, 50000) != 0) {
2594 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2595 		return (1);
2596 	}
2597 
2598 	if (ccb->ccb_rcb == NULL) {
2599 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2600 		    DEVNAME(sc));
2601 		rv = 1;
2602 	} else
2603 		mpi_push_reply(sc, ccb->ccb_rcb);
2604 
2605 	scsi_io_put(&sc->sc_iopool, ccb);
2606 
2607 	return (rv);
2608 }
2609 
2610 int
2611 mpi_fwupload(struct mpi_softc *sc)
2612 {
2613 	struct mpi_ccb				*ccb;
2614 	struct {
2615 		struct mpi_msg_fwupload_request		req;
2616 		struct mpi_sge				sge;
2617 	} __packed				*bundle;
2618 	struct mpi_msg_fwupload_reply		*upp;
2619 	int					rv = 0;
2620 
2621 	if (sc->sc_fw_len == 0)
2622 		return (0);
2623 
2624 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2625 
2626 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2627 	if (sc->sc_fw == NULL) {
2628 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2629 		    DEVNAME(sc), sc->sc_fw_len);
2630 		return (1);
2631 	}
2632 
2633 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2634 	if (ccb == NULL) {
2635 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2636 		    DEVNAME(sc));
2637 		goto err;
2638 	}
2639 
2640 	ccb->ccb_done = mpi_empty_done;
2641 	bundle = ccb->ccb_cmd;
2642 
2643 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2644 
2645 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2646 
2647 	bundle->req.tce.details_length = 12;
2648 	htolem32(&bundle->req.tce.image_size, sc->sc_fw_len);
2649 
2650 	htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2651 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2652 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2653 	mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2654 
2655 	if (mpi_poll(sc, ccb, 50000) != 0) {
2656 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2657 		goto err;
2658 	}
2659 
2660 	if (ccb->ccb_rcb == NULL)
2661 		panic("%s: unable to do fw upload", DEVNAME(sc));
2662 	upp = ccb->ccb_rcb->rcb_reply;
2663 
2664 	if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2665 		rv = 1;
2666 
2667 	mpi_push_reply(sc, ccb->ccb_rcb);
2668 	scsi_io_put(&sc->sc_iopool, ccb);
2669 
2670 	return (rv);
2671 
2672 err:
2673 	mpi_dmamem_free(sc, sc->sc_fw);
2674 	return (1);
2675 }
2676 
2677 int
2678 mpi_manufacturing(struct mpi_softc *sc)
2679 {
2680 	char board_name[33];
2681 	struct mpi_cfg_hdr hdr;
2682 	struct mpi_cfg_manufacturing_pg0 *pg;
2683 	size_t pagelen;
2684 	int rv = 1;
2685 
2686 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,
2687 	    0, 0, &hdr) != 0)
2688 		return (1);
2689 
2690 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2691 	if (pagelen < sizeof(*pg))
2692 		return (1);
2693 
2694 	pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2695 	if (pg == NULL)
2696 		return (1);
2697 
2698 	if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0)
2699 		goto out;
2700 
2701 	scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2702 
2703 	printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name,
2704 	    sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2705 
2706 	rv = 0;
2707 
2708 out:
2709 	free(pg, M_TEMP, pagelen);
2710 	return (rv);
2711 }
2712 
2713 void
2714 mpi_get_raid(struct mpi_softc *sc)
2715 {
2716 	struct mpi_cfg_hdr		hdr;
2717 	struct mpi_cfg_ioc_pg2		*vol_page;
2718 	size_t				pagelen;
2719 	u_int32_t			capabilities;
2720 
2721 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2722 
2723 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2724 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2725 		    "for IOC page 2\n", DEVNAME(sc));
2726 		return;
2727 	}
2728 
2729 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2730 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2731 	if (vol_page == NULL) {
2732 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2733 		    "space for ioc config page 2\n", DEVNAME(sc));
2734 		return;
2735 	}
2736 
2737 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2738 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2739 		    "page 2\n", DEVNAME(sc));
2740 		goto out;
2741 	}
2742 
2743 	capabilities = lemtoh32(&vol_page->capabilities);
2744 
2745 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2746 	    letoh32(vol_page->capabilities));
2747 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2748 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2749 	    vol_page->active_vols, vol_page->max_vols,
2750 	    vol_page->active_physdisks, vol_page->max_physdisks);
2751 
2752 	/* don't walk list if there are no RAID capability */
2753 	if (capabilities == 0xdeadbeef) {
2754 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2755 		goto out;
2756 	}
2757 
2758 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2759 		sc->sc_flags |= MPI_F_RAID;
2760 
2761 out:
2762 	free(vol_page, M_TEMP, pagelen);
2763 }
2764 
2765 int
2766 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2767     u_int32_t address, int flags, void *p)
2768 {
2769 	struct mpi_ccb				*ccb;
2770 	struct mpi_msg_config_request		*cq;
2771 	struct mpi_msg_config_reply		*cp;
2772 	struct mpi_cfg_hdr			*hdr = p;
2773 	struct mpi_ecfg_hdr			*ehdr = p;
2774 	int					etype = 0;
2775 	int					rv = 0;
2776 
2777 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2778 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2779 	    address, flags, MPI_PG_FMT);
2780 
2781 	ccb = scsi_io_get(&sc->sc_iopool,
2782 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2783 	if (ccb == NULL) {
2784 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2785 		    DEVNAME(sc));
2786 		return (1);
2787 	}
2788 
2789 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2790 		etype = type;
2791 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2792 	}
2793 
2794 	cq = ccb->ccb_cmd;
2795 
2796 	cq->function = MPI_FUNCTION_CONFIG;
2797 
2798 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2799 
2800 	cq->config_header.page_number = number;
2801 	cq->config_header.page_type = type;
2802 	cq->ext_page_type = etype;
2803 	htolem32(&cq->page_address, address);
2804 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2805 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2806 
2807 	ccb->ccb_done = mpi_empty_done;
2808 	if (ISSET(flags, MPI_PG_POLL)) {
2809 		if (mpi_poll(sc, ccb, 50000) != 0) {
2810 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2811 			    DEVNAME(sc));
2812 			return (1);
2813 		}
2814 	} else
2815 		mpi_wait(sc, ccb);
2816 
2817 	if (ccb->ccb_rcb == NULL)
2818 		panic("%s: unable to fetch config header", DEVNAME(sc));
2819 	cp = ccb->ccb_rcb->rcb_reply;
2820 
2821 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2822 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2823 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2824 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2825 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2826 	    cp->msg_flags);
2827 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2828 	    letoh32(cp->msg_context));
2829 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2830 	    letoh16(cp->ioc_status));
2831 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2832 	    letoh32(cp->ioc_loginfo));
2833 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2834 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2835 	    cp->config_header.page_version,
2836 	    cp->config_header.page_length,
2837 	    cp->config_header.page_number,
2838 	    cp->config_header.page_type);
2839 
2840 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2841 		rv = 1;
2842 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2843 		memset(ehdr, 0, sizeof(*ehdr));
2844 		ehdr->page_version = cp->config_header.page_version;
2845 		ehdr->page_number = cp->config_header.page_number;
2846 		ehdr->page_type = cp->config_header.page_type;
2847 		ehdr->ext_page_length = cp->ext_page_length;
2848 		ehdr->ext_page_type = cp->ext_page_type;
2849 	} else
2850 		*hdr = cp->config_header;
2851 
2852 	mpi_push_reply(sc, ccb->ccb_rcb);
2853 	scsi_io_put(&sc->sc_iopool, ccb);
2854 
2855 	return (rv);
2856 }
2857 
2858 int
2859 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2860     void *p, int read, void *page, size_t len)
2861 {
2862 	struct mpi_ccb				*ccb;
2863 	struct mpi_msg_config_request		*cq;
2864 	struct mpi_msg_config_reply		*cp;
2865 	struct mpi_cfg_hdr			*hdr = p;
2866 	struct mpi_ecfg_hdr			*ehdr = p;
2867 	char					*kva;
2868 	int					page_length;
2869 	int					rv = 0;
2870 
2871 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2872 	    DEVNAME(sc), address, read, hdr->page_type);
2873 
2874 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2875 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2876 
2877 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2878 	    len < page_length * 4)
2879 		return (1);
2880 
2881 	ccb = scsi_io_get(&sc->sc_iopool,
2882 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2883 	if (ccb == NULL) {
2884 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2885 		return (1);
2886 	}
2887 
2888 	cq = ccb->ccb_cmd;
2889 
2890 	cq->function = MPI_FUNCTION_CONFIG;
2891 
2892 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2893 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2894 
2895 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2896 		cq->config_header.page_version = ehdr->page_version;
2897 		cq->config_header.page_number = ehdr->page_number;
2898 		cq->config_header.page_type = ehdr->page_type;
2899 		cq->ext_page_len = ehdr->ext_page_length;
2900 		cq->ext_page_type = ehdr->ext_page_type;
2901 	} else
2902 		cq->config_header = *hdr;
2903 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2904 	htolem32(&cq->page_address, address);
2905 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2906 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2907 	    (page_length * 4) |
2908 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2909 
2910 	/* bounce the page via the request space to avoid more bus_dma games */
2911 	mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2912 	    sizeof(struct mpi_msg_config_request));
2913 
2914 	kva = ccb->ccb_cmd;
2915 	kva += sizeof(struct mpi_msg_config_request);
2916 	if (!read)
2917 		memcpy(kva, page, len);
2918 
2919 	ccb->ccb_done = mpi_empty_done;
2920 	if (ISSET(flags, MPI_PG_POLL)) {
2921 		if (mpi_poll(sc, ccb, 50000) != 0) {
2922 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2923 			    DEVNAME(sc));
2924 			return (1);
2925 		}
2926 	} else
2927 		mpi_wait(sc, ccb);
2928 
2929 	if (ccb->ccb_rcb == NULL) {
2930 		scsi_io_put(&sc->sc_iopool, ccb);
2931 		return (1);
2932 	}
2933 	cp = ccb->ccb_rcb->rcb_reply;
2934 
2935 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2936 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2937 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2938 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2939 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2940 	    cp->msg_flags);
2941 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2942 	    letoh32(cp->msg_context));
2943 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2944 	    letoh16(cp->ioc_status));
2945 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2946 	    letoh32(cp->ioc_loginfo));
2947 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2948 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2949 	    cp->config_header.page_version,
2950 	    cp->config_header.page_length,
2951 	    cp->config_header.page_number,
2952 	    cp->config_header.page_type);
2953 
2954 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2955 		rv = 1;
2956 	else if (read)
2957 		memcpy(page, kva, len);
2958 
2959 	mpi_push_reply(sc, ccb->ccb_rcb);
2960 	scsi_io_put(&sc->sc_iopool, ccb);
2961 
2962 	return (rv);
2963 }
2964 
2965 int
2966 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2967 {
2968 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2969 
2970 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2971 
2972 	switch (cmd) {
2973 	case DIOCGCACHE:
2974 	case DIOCSCACHE:
2975 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2976 			return (mpi_ioctl_cache(link, cmd,
2977 			    (struct dk_cache *)addr));
2978 		}
2979 		break;
2980 
2981 	default:
2982 		if (sc->sc_ioctl)
2983 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2984 
2985 		break;
2986 	}
2987 
2988 	return (ENOTTY);
2989 }
2990 
2991 int
2992 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2993 {
2994 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2995 	struct mpi_ccb		*ccb;
2996 	int			len, rv;
2997 	struct mpi_cfg_hdr	hdr;
2998 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2999 	int			enabled;
3000 	struct mpi_msg_raid_action_request *req;
3001 	struct mpi_msg_raid_action_reply *rep;
3002 	struct mpi_raid_settings settings;
3003 
3004 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3005 	    link->target, MPI_PG_POLL, &hdr);
3006 	if (rv != 0)
3007 		return (EIO);
3008 
3009 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3010 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3011 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
3012 	if (rpg0 == NULL)
3013 		return (ENOMEM);
3014 
3015 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
3016 	    rpg0, len) != 0) {
3017 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3018 		    DEVNAME(sc));
3019 		rv = EIO;
3020 		goto done;
3021 	}
3022 
3023 	enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),
3024 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3025 
3026 	if (cmd == DIOCGCACHE) {
3027 		dc->wrcache = enabled;
3028 		dc->rdcache = 0;
3029 		goto done;
3030 	} /* else DIOCSCACHE */
3031 
3032 	if (dc->rdcache) {
3033 		rv = EOPNOTSUPP;
3034 		goto done;
3035 	}
3036 
3037 	if (((dc->wrcache) ? 1 : 0) == enabled)
3038 		goto done;
3039 
3040 	settings = rpg0->settings;
3041 	if (dc->wrcache) {
3042 		SET(settings.volume_settings,
3043 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3044 	} else {
3045 		CLR(settings.volume_settings,
3046 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3047 	}
3048 
3049 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3050 	if (ccb == NULL) {
3051 		rv = ENOMEM;
3052 		goto done;
3053 	}
3054 
3055 	req = ccb->ccb_cmd;
3056 	req->function = MPI_FUNCTION_RAID_ACTION;
3057 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3058 	req->vol_id = rpg0->volume_id;
3059 	req->vol_bus = rpg0->volume_bus;
3060 
3061 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3062 	ccb->ccb_done = mpi_empty_done;
3063 	if (mpi_poll(sc, ccb, 50000) != 0) {
3064 		rv = EIO;
3065 		goto done;
3066 	}
3067 
3068 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3069 	if (rep == NULL)
3070 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3071 
3072 	switch (lemtoh16(&rep->action_status)) {
3073 	case MPI_RAID_ACTION_STATUS_OK:
3074 		rv = 0;
3075 		break;
3076 	default:
3077 		rv = EIO;
3078 		break;
3079 	}
3080 
3081 	mpi_push_reply(sc, ccb->ccb_rcb);
3082 	scsi_io_put(&sc->sc_iopool, ccb);
3083 
3084 done:
3085 	free(rpg0, M_TEMP, len);
3086 	return (rv);
3087 }
3088 
3089 #if NBIO > 0
3090 int
3091 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3092 {
3093 	int			len, rv = EINVAL;
3094 	u_int32_t		address;
3095 	struct mpi_cfg_hdr	hdr;
3096 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3097 
3098 	/* get IOC page 2 */
3099 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3100 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3101 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3102 		    "fetch IOC page 2\n", DEVNAME(sc));
3103 		goto done;
3104 	}
3105 
3106 	/* XXX return something else than EINVAL to indicate within hs range */
3107 	if (id > sc->sc_vol_page->active_vols) {
3108 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3109 		    "id: %d\n", DEVNAME(sc), id);
3110 		goto done;
3111 	}
3112 
3113 	/* replace current buffer with new one */
3114 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3115 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3116 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3117 	if (rpg0 == NULL) {
3118 		printf("%s: can't get memory for RAID page 0, "
3119 		    "bio disabled\n", DEVNAME(sc));
3120 		goto done;
3121 	}
3122 	if (sc->sc_rpg0)
3123 		free(sc->sc_rpg0, M_DEVBUF, 0);
3124 	sc->sc_rpg0 = rpg0;
3125 
3126 	/* get raid vol page 0 */
3127 	address = sc->sc_vol_list[id].vol_id |
3128 	    (sc->sc_vol_list[id].vol_bus << 8);
3129 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3130 	    address, 0, &hdr) != 0)
3131 		goto done;
3132 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3133 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3134 		    DEVNAME(sc));
3135 		goto done;
3136 	}
3137 
3138 	rv = 0;
3139 done:
3140 	return (rv);
3141 }
3142 
3143 int
3144 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3145 {
3146 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3147 	int error = 0;
3148 
3149 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3150 
3151 	/* make sure we have bio enabled */
3152 	if (sc->sc_ioctl != mpi_ioctl)
3153 		return (EINVAL);
3154 
3155 	rw_enter_write(&sc->sc_lock);
3156 
3157 	switch (cmd) {
3158 	case BIOCINQ:
3159 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3160 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3161 		break;
3162 
3163 	case BIOCVOL:
3164 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3165 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3166 		break;
3167 
3168 	case BIOCDISK:
3169 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3170 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3171 		break;
3172 
3173 	case BIOCALARM:
3174 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3175 		break;
3176 
3177 	case BIOCBLINK:
3178 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3179 		break;
3180 
3181 	case BIOCSETSTATE:
3182 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3183 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3184 		break;
3185 
3186 	default:
3187 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3188 		error = ENOTTY;
3189 	}
3190 
3191 	rw_exit_write(&sc->sc_lock);
3192 
3193 	return (error);
3194 }
3195 
3196 int
3197 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3198 {
3199 	if (!(sc->sc_flags & MPI_F_RAID)) {
3200 		bi->bi_novol = 0;
3201 		bi->bi_nodisk = 0;
3202 	}
3203 
3204 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3205 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3206 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3207 		    "page 2\n", DEVNAME(sc));
3208 		return (EINVAL);
3209 	}
3210 
3211 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3212 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3213 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3214 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3215 
3216 	bi->bi_novol = sc->sc_vol_page->active_vols;
3217 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3218 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3219 
3220 	return (0);
3221 }
3222 
3223 int
3224 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3225 {
3226 	int			i, vol, id, rv = EINVAL;
3227 	struct device		*dev;
3228 	struct scsi_link	*link;
3229 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3230 	char			*vendp;
3231 
3232 	id = bv->bv_volid;
3233 	if (mpi_bio_get_pg0_raid(sc, id))
3234 		goto done;
3235 
3236 	if (id > sc->sc_vol_page->active_vols)
3237 		return (EINVAL); /* XXX deal with hot spares */
3238 
3239 	rpg0 = sc->sc_rpg0;
3240 	if (rpg0 == NULL)
3241 		goto done;
3242 
3243 	/* determine status */
3244 	switch (rpg0->volume_state) {
3245 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3246 		bv->bv_status = BIOC_SVONLINE;
3247 		break;
3248 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3249 		bv->bv_status = BIOC_SVDEGRADED;
3250 		break;
3251 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3252 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3253 		bv->bv_status = BIOC_SVOFFLINE;
3254 		break;
3255 	default:
3256 		bv->bv_status = BIOC_SVINVALID;
3257 	}
3258 
3259 	/* override status if scrubbing or something */
3260 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3261 		bv->bv_status = BIOC_SVREBUILD;
3262 
3263 	bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba) * 512;
3264 
3265 	switch (sc->sc_vol_list[id].vol_type) {
3266 	case MPI_CFG_RAID_TYPE_RAID_IS:
3267 		bv->bv_level = 0;
3268 		break;
3269 	case MPI_CFG_RAID_TYPE_RAID_IME:
3270 	case MPI_CFG_RAID_TYPE_RAID_IM:
3271 		bv->bv_level = 1;
3272 		break;
3273 	case MPI_CFG_RAID_TYPE_RAID_5:
3274 		bv->bv_level = 5;
3275 		break;
3276 	case MPI_CFG_RAID_TYPE_RAID_6:
3277 		bv->bv_level = 6;
3278 		break;
3279 	case MPI_CFG_RAID_TYPE_RAID_10:
3280 		bv->bv_level = 10;
3281 		break;
3282 	case MPI_CFG_RAID_TYPE_RAID_50:
3283 		bv->bv_level = 50;
3284 		break;
3285 	default:
3286 		bv->bv_level = -1;
3287 	}
3288 
3289 	bv->bv_nodisk = rpg0->num_phys_disks;
3290 
3291 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3292 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3293 		if (link == NULL)
3294 			continue;
3295 
3296 		/* skip if not a virtual disk */
3297 		if (!(link->flags & SDEV_VIRTUAL))
3298 			continue;
3299 
3300 		vol++;
3301 		/* are we it? */
3302 		if (vol == bv->bv_volid) {
3303 			dev = link->device_softc;
3304 			vendp = link->inqdata.vendor;
3305 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3306 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3307 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3308 			break;
3309 		}
3310 	}
3311 	rv = 0;
3312 done:
3313 	return (rv);
3314 }
3315 
3316 int
3317 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3318 {
3319 	int			pdid, id, rv = EINVAL;
3320 	u_int32_t		address;
3321 	struct mpi_cfg_hdr	hdr;
3322 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3323 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3324 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3325 
3326 	id = bd->bd_volid;
3327 	if (mpi_bio_get_pg0_raid(sc, id))
3328 		goto done;
3329 
3330 	if (id > sc->sc_vol_page->active_vols)
3331 		return (EINVAL); /* XXX deal with hot spares */
3332 
3333 	rpg0 = sc->sc_rpg0;
3334 	if (rpg0 == NULL)
3335 		goto done;
3336 
3337 	pdid = bd->bd_diskid;
3338 	if (pdid > rpg0->num_phys_disks)
3339 		goto done;
3340 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3341 	physdisk += pdid;
3342 
3343 	/* get raid phys disk page 0 */
3344 	address = physdisk->phys_disk_num;
3345 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3346 	    &hdr) != 0)
3347 		goto done;
3348 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3349 		bd->bd_status = BIOC_SDFAILED;
3350 		return (0);
3351 	}
3352 	bd->bd_channel = pdpg0.phys_disk_bus;
3353 	bd->bd_target = pdpg0.phys_disk_id;
3354 	bd->bd_lun = 0;
3355 	bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba) * 512;
3356 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3357 
3358 	switch (pdpg0.phys_disk_state) {
3359 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3360 		bd->bd_status = BIOC_SDONLINE;
3361 		break;
3362 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3363 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3364 		bd->bd_status = BIOC_SDFAILED;
3365 		break;
3366 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3367 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3368 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3369 		bd->bd_status = BIOC_SDOFFLINE;
3370 		break;
3371 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3372 		bd->bd_status = BIOC_SDSCRUB;
3373 		break;
3374 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3375 	default:
3376 		bd->bd_status = BIOC_SDINVALID;
3377 		break;
3378 	}
3379 
3380 	/* XXX figure this out */
3381 	/* bd_serial[32]; */
3382 	/* bd_procdev[16]; */
3383 
3384 	rv = 0;
3385 done:
3386 	return (rv);
3387 }
3388 
3389 int
3390 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3391 {
3392 	return (ENOTTY);
3393 }
3394 
3395 #ifndef SMALL_KERNEL
3396 int
3397 mpi_create_sensors(struct mpi_softc *sc)
3398 {
3399 	struct device		*dev;
3400 	struct scsi_link	*link;
3401 	int			i, vol, nsensors;
3402 
3403 	/* count volumes */
3404 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3405 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3406 		if (link == NULL)
3407 			continue;
3408 		/* skip if not a virtual disk */
3409 		if (!(link->flags & SDEV_VIRTUAL))
3410 			continue;
3411 
3412 		vol++;
3413 	}
3414 	if (vol == 0)
3415 		return (0);
3416 
3417 	sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3418 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3419 	if (sc->sc_sensors == NULL)
3420 		return (1);
3421 	nsensors = vol;
3422 
3423 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3424 	    sizeof(sc->sc_sensordev.xname));
3425 
3426 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3427 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3428 		if (link == NULL)
3429 			continue;
3430 		/* skip if not a virtual disk */
3431 		if (!(link->flags & SDEV_VIRTUAL))
3432 			continue;
3433 
3434 		dev = link->device_softc;
3435 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3436 		    sizeof(sc->sc_sensors[vol].desc));
3437 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3438 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3439 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3440 
3441 		vol++;
3442 	}
3443 
3444 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3445 		goto bad;
3446 
3447 	sensordev_install(&sc->sc_sensordev);
3448 
3449 	return (0);
3450 
3451 bad:
3452 	free(sc->sc_sensors, M_DEVBUF, nsensors * sizeof(struct ksensor));
3453 	return (1);
3454 }
3455 
3456 void
3457 mpi_refresh_sensors(void *arg)
3458 {
3459 	int			i, vol;
3460 	struct scsi_link	*link;
3461 	struct mpi_softc	*sc = arg;
3462 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3463 
3464 	rw_enter_write(&sc->sc_lock);
3465 
3466 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3467 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3468 		if (link == NULL)
3469 			continue;
3470 		/* skip if not a virtual disk */
3471 		if (!(link->flags & SDEV_VIRTUAL))
3472 			continue;
3473 
3474 		if (mpi_bio_get_pg0_raid(sc, vol))
3475 			continue;
3476 
3477 		rpg0 = sc->sc_rpg0;
3478 		if (rpg0 == NULL)
3479 			goto done;
3480 
3481 		/* determine status */
3482 		switch (rpg0->volume_state) {
3483 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3484 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3485 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3486 			break;
3487 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3488 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3489 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3490 			break;
3491 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3492 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3493 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3494 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3495 			break;
3496 		default:
3497 			sc->sc_sensors[vol].value = 0; /* unknown */
3498 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3499 		}
3500 
3501 		/* override status if scrubbing or something */
3502 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3503 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3504 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3505 		}
3506 
3507 		vol++;
3508 	}
3509 done:
3510 	rw_exit_write(&sc->sc_lock);
3511 }
3512 #endif /* SMALL_KERNEL */
3513 #endif /* NBIO > 0 */
3514