xref: /openbsd-src/sys/dev/ic/mpi.c (revision e419548092f59c20a140404818050eb2ab331a19)
1 /*	$OpenBSD: mpi.c,v 1.199 2015/01/27 03:17:36 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/mutex.h>
30 #include <sys/rwlock.h>
31 #include <sys/sensors.h>
32 #include <sys/dkio.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsiconf.h>
39 
40 #include <dev/biovar.h>
41 #include <dev/ic/mpireg.h>
42 #include <dev/ic/mpivar.h>
43 
44 #ifdef MPI_DEBUG
45 uint32_t	mpi_debug = 0
46 /*		    | MPI_D_CMD */
47 /*		    | MPI_D_INTR */
48 /*		    | MPI_D_MISC */
49 /*		    | MPI_D_DMA */
50 /*		    | MPI_D_IOCTL */
51 /*		    | MPI_D_RW */
52 /*		    | MPI_D_MEM */
53 /*		    | MPI_D_CCB */
54 /*		    | MPI_D_PPR */
55 /*		    | MPI_D_RAID */
56 /*		    | MPI_D_EVT */
57 		;
58 #endif
59 
60 struct cfdriver mpi_cd = {
61 	NULL,
62 	"mpi",
63 	DV_DULL
64 };
65 
66 void			mpi_scsi_cmd(struct scsi_xfer *);
67 void			mpi_scsi_cmd_done(struct mpi_ccb *);
68 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
69 int			mpi_scsi_probe(struct scsi_link *);
70 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
71 			    int);
72 
73 struct scsi_adapter mpi_switch = {
74 	mpi_scsi_cmd,
75 	mpi_minphys,
76 	mpi_scsi_probe,
77 	NULL,
78 	mpi_scsi_ioctl
79 };
80 
81 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
82 void			mpi_dmamem_free(struct mpi_softc *,
83 			    struct mpi_dmamem *);
84 int			mpi_alloc_ccbs(struct mpi_softc *);
85 void			*mpi_get_ccb(void *);
86 void			mpi_put_ccb(void *, void *);
87 int			mpi_alloc_replies(struct mpi_softc *);
88 void			mpi_push_replies(struct mpi_softc *);
89 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
90 
91 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
92 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
93 void			mpi_poll_done(struct mpi_ccb *);
94 void			mpi_reply(struct mpi_softc *, u_int32_t);
95 
96 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
97 void			mpi_wait_done(struct mpi_ccb *);
98 
99 int			mpi_cfg_spi_port(struct mpi_softc *);
100 void			mpi_squash_ppr(struct mpi_softc *);
101 void			mpi_run_ppr(struct mpi_softc *);
102 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
103 			    struct mpi_cfg_raid_physdisk *, int, int, int);
104 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
105 
106 int			mpi_cfg_sas(struct mpi_softc *);
107 int			mpi_cfg_fc(struct mpi_softc *);
108 
109 void			mpi_timeout_xs(void *);
110 int			mpi_load_xs(struct mpi_ccb *);
111 
112 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
113 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
114 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
115 			    u_int32_t);
116 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
117 			    u_int32_t);
118 
119 int			mpi_init(struct mpi_softc *);
120 int			mpi_reset_soft(struct mpi_softc *);
121 int			mpi_reset_hard(struct mpi_softc *);
122 
123 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
124 int			mpi_handshake_recv_dword(struct mpi_softc *,
125 			    u_int32_t *);
126 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
127 
128 void			mpi_empty_done(struct mpi_ccb *);
129 
130 int			mpi_iocinit(struct mpi_softc *);
131 int			mpi_iocfacts(struct mpi_softc *);
132 int			mpi_portfacts(struct mpi_softc *);
133 int			mpi_portenable(struct mpi_softc *);
134 int			mpi_cfg_coalescing(struct mpi_softc *);
135 void			mpi_get_raid(struct mpi_softc *);
136 int			mpi_fwupload(struct mpi_softc *);
137 int			mpi_manufacturing(struct mpi_softc *);
138 int			mpi_scsi_probe_virtual(struct scsi_link *);
139 
140 int			mpi_eventnotify(struct mpi_softc *);
141 void			mpi_eventnotify_done(struct mpi_ccb *);
142 void			mpi_eventnotify_free(struct mpi_softc *,
143 			    struct mpi_rcb *);
144 void			mpi_eventack(void *, void *);
145 void			mpi_eventack_done(struct mpi_ccb *);
146 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
147 void			mpi_evt_sas_detach(void *, void *);
148 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
149 void			mpi_fc_rescan(void *);
150 
151 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
152 			    u_int8_t, u_int32_t, int, void *);
153 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
154 			    void *, int, void *, size_t);
155 
156 int			mpi_ioctl_cache(struct scsi_link *, u_long,
157 			    struct dk_cache *);
158 
159 #if NBIO > 0
160 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
161 int		mpi_ioctl(struct device *, u_long, caddr_t);
162 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
163 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
164 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
165 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
166 #ifndef SMALL_KERNEL
167 int		mpi_create_sensors(struct mpi_softc *);
168 void		mpi_refresh_sensors(void *);
169 #endif /* SMALL_KERNEL */
170 #endif /* NBIO > 0 */
171 
172 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
173 
174 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
175 
176 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
177 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
178 #define mpi_read_intr(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
179 				    MPI_INTR_STATUS)
180 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
181 #define mpi_pop_reply(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
182 				    MPI_REPLY_QUEUE)
183 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
184 				    MPI_REPLY_QUEUE, (v))
185 
186 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
187 				    MPI_INTR_STATUS_DOORBELL, 0)
188 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
189 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
190 
191 #define MPI_PG_EXTENDED		(1<<0)
192 #define MPI_PG_POLL		(1<<1)
193 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
194 
195 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
196 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
197 	    MPI_PG_POLL, (_h))
198 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
199 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
200 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
201 
202 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
203 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
204 	    (_h), (_r), (_p), (_l))
205 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
206 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
207 	    (_h), (_r), (_p), (_l))
208 
209 static inline void
210 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
211 {
212 	htolem32(&sge->sg_addr_lo, dva);
213 	htolem32(&sge->sg_addr_hi, dva >> 32);
214 }
215 
216 int
217 mpi_attach(struct mpi_softc *sc)
218 {
219 	struct scsibus_attach_args	saa;
220 	struct mpi_ccb			*ccb;
221 
222 	printf("\n");
223 
224 	rw_init(&sc->sc_lock, "mpi_lock");
225 	task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
226 
227 	/* disable interrupts */
228 	mpi_write(sc, MPI_INTR_MASK,
229 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
230 
231 	if (mpi_init(sc) != 0) {
232 		printf("%s: unable to initialise\n", DEVNAME(sc));
233 		return (1);
234 	}
235 
236 	if (mpi_iocfacts(sc) != 0) {
237 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
238 		return (1);
239 	}
240 
241 	if (mpi_alloc_ccbs(sc) != 0) {
242 		/* error already printed */
243 		return (1);
244 	}
245 
246 	if (mpi_alloc_replies(sc) != 0) {
247 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
248 		goto free_ccbs;
249 	}
250 
251 	if (mpi_iocinit(sc) != 0) {
252 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
253 		goto free_ccbs;
254 	}
255 
256 	/* spin until we're operational */
257 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
258 	    MPI_DOORBELL_STATE_OPER) != 0) {
259 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
260 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
261 		printf("%s: operational state timeout\n", DEVNAME(sc));
262 		goto free_ccbs;
263 	}
264 
265 	mpi_push_replies(sc);
266 
267 	if (mpi_portfacts(sc) != 0) {
268 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
269 		goto free_replies;
270 	}
271 
272 	if (mpi_cfg_coalescing(sc) != 0) {
273 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
274 		goto free_replies;
275 	}
276 
277 	switch (sc->sc_porttype) {
278 	case MPI_PORTFACTS_PORTTYPE_SAS:
279 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
280 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
281 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
282 		    mpi_evt_sas_detach, sc);
283 		/* FALLTHROUGH */
284 	case MPI_PORTFACTS_PORTTYPE_FC:
285 		if (mpi_eventnotify(sc) != 0) {
286 			printf("%s: unable to enable events\n", DEVNAME(sc));
287 			goto free_replies;
288 		}
289 		break;
290 	}
291 
292 	if (mpi_portenable(sc) != 0) {
293 		printf("%s: unable to enable port\n", DEVNAME(sc));
294 		goto free_replies;
295 	}
296 
297 	if (mpi_fwupload(sc) != 0) {
298 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
299 		goto free_replies;
300 	}
301 
302 	if (mpi_manufacturing(sc) != 0) {
303 		printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc));		goto free_replies;
304 	}
305 
306 	switch (sc->sc_porttype) {
307 	case MPI_PORTFACTS_PORTTYPE_SCSI:
308 		if (mpi_cfg_spi_port(sc) != 0) {
309 			printf("%s: unable to configure spi\n", DEVNAME(sc));
310 			goto free_replies;
311 		}
312 		mpi_squash_ppr(sc);
313 		break;
314 	case MPI_PORTFACTS_PORTTYPE_SAS:
315 		if (mpi_cfg_sas(sc) != 0) {
316 			printf("%s: unable to configure sas\n", DEVNAME(sc));
317 			goto free_replies;
318 		}
319 		break;
320 	case MPI_PORTFACTS_PORTTYPE_FC:
321 		if (mpi_cfg_fc(sc) != 0) {
322 			printf("%s: unable to configure fc\n", DEVNAME(sc));
323 			goto free_replies;
324 		}
325 		break;
326 	}
327 
328 	/* get raid pages */
329 	mpi_get_raid(sc);
330 #if NBIO > 0
331 	if (sc->sc_flags & MPI_F_RAID) {
332 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
333 			panic("%s: controller registration failed",
334 			    DEVNAME(sc));
335 		else {
336 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
337 			    2, 0, &sc->sc_cfg_hdr) != 0) {
338 				panic("%s: can't get IOC page 2 hdr",
339 				    DEVNAME(sc));
340 			}
341 
342 			sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
343 			    4, M_TEMP, M_WAITOK | M_CANFAIL);
344 			if (sc->sc_vol_page == NULL) {
345 				panic("%s: can't get memory for IOC page 2, "
346 				    "bio disabled", DEVNAME(sc));
347 			}
348 
349 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
350 			    sc->sc_vol_page,
351 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
352 				panic("%s: can't get IOC page 2", DEVNAME(sc));
353 			}
354 
355 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
356 			    (sc->sc_vol_page + 1);
357 
358 			sc->sc_ioctl = mpi_ioctl;
359 		}
360 	}
361 #endif /* NBIO > 0 */
362 
363 	/* we should be good to go now, attach scsibus */
364 	sc->sc_link.adapter = &mpi_switch;
365 	sc->sc_link.adapter_softc = sc;
366 	sc->sc_link.adapter_target = sc->sc_target;
367 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
368 	sc->sc_link.openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16);
369 	sc->sc_link.pool = &sc->sc_iopool;
370 
371 	memset(&saa, 0, sizeof(saa));
372 	saa.saa_sc_link = &sc->sc_link;
373 
374 	/* config_found() returns the scsibus attached to us */
375 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
376 	    &saa, scsiprint);
377 
378 	/* do domain validation */
379 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
380 		mpi_run_ppr(sc);
381 
382 	/* enable interrupts */
383 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
384 
385 #if NBIO > 0
386 #ifndef SMALL_KERNEL
387 	mpi_create_sensors(sc);
388 #endif /* SMALL_KERNEL */
389 #endif /* NBIO > 0 */
390 
391 	return (0);
392 
393 free_replies:
394 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
395 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
396 	mpi_dmamem_free(sc, sc->sc_replies);
397 free_ccbs:
398 	while ((ccb = mpi_get_ccb(sc)) != NULL)
399 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
400 	mpi_dmamem_free(sc, sc->sc_requests);
401 	free(sc->sc_ccbs, M_DEVBUF, 0);
402 
403 	return(1);
404 }
405 
406 int
407 mpi_cfg_spi_port(struct mpi_softc *sc)
408 {
409 	struct mpi_cfg_hdr		hdr;
410 	struct mpi_cfg_spi_port_pg1	port;
411 
412 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
413 	    &hdr) != 0)
414 		return (1);
415 
416 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
417 		return (1);
418 
419 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
420 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
421 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
422 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
423 	    letoh32(port.port_scsi_id));
424 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
425 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
426 
427 	if (port.port_scsi_id == sc->sc_target &&
428 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
429 	    port.on_bus_timer_value != htole32(0x0))
430 		return (0);
431 
432 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
433 	    sc->sc_target);
434 	port.port_scsi_id = sc->sc_target;
435 	port.port_resp_ids = htole16(1 << sc->sc_target);
436 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
437 
438 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
439 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
440 		return (1);
441 	}
442 
443 	return (0);
444 }
445 
446 void
447 mpi_squash_ppr(struct mpi_softc *sc)
448 {
449 	struct mpi_cfg_hdr		hdr;
450 	struct mpi_cfg_spi_dev_pg1	page;
451 	int				i;
452 
453 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
454 
455 	for (i = 0; i < sc->sc_buswidth; i++) {
456 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
457 		    1, i, &hdr) != 0)
458 			return;
459 
460 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
461 			return;
462 
463 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
464 		    "req_offset: 0x%02x req_period: 0x%02x "
465 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
466 		    page.req_params1, page.req_offset, page.req_period,
467 		    page.req_params2, letoh32(page.configuration));
468 
469 		page.req_params1 = 0x0;
470 		page.req_offset = 0x0;
471 		page.req_period = 0x0;
472 		page.req_params2 = 0x0;
473 		page.configuration = htole32(0x0);
474 
475 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
476 			return;
477 	}
478 }
479 
480 void
481 mpi_run_ppr(struct mpi_softc *sc)
482 {
483 	struct mpi_cfg_hdr		hdr;
484 	struct mpi_cfg_spi_port_pg0	port_pg;
485 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
486 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
487 	size_t				pagelen;
488 	struct scsi_link		*link;
489 	int				i, tries;
490 
491 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
492 	    &hdr) != 0) {
493 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
494 		    DEVNAME(sc));
495 		return;
496 	}
497 
498 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
499 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
500 		    DEVNAME(sc));
501 		return;
502 	}
503 
504 	for (i = 0; i < sc->sc_buswidth; i++) {
505 		link = scsi_get_link(sc->sc_scsibus, i, 0);
506 		if (link == NULL)
507 			continue;
508 
509 		/* do not ppr volumes */
510 		if (link->flags & SDEV_VIRTUAL)
511 			continue;
512 
513 		tries = 0;
514 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
515 		    port_pg.max_offset, tries) == EAGAIN)
516 			tries++;
517 	}
518 
519 	if ((sc->sc_flags & MPI_F_RAID) == 0)
520 		return;
521 
522 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
523 	    &hdr) != 0) {
524 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
525 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
526 		return;
527 	}
528 
529 	pagelen = hdr.page_length * 4; /* dwords to bytes */
530 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
531 	if (physdisk_pg == NULL) {
532 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
533 		    "allocate ioc pg 3\n", DEVNAME(sc));
534 		return;
535 	}
536 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
537 
538 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
539 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
540 		    "fetch ioc page 3\n", DEVNAME(sc));
541 		goto out;
542 	}
543 
544 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
545 	    physdisk_pg->no_phys_disks);
546 
547 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
548 		physdisk = &physdisk_list[i];
549 
550 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
551 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
552 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
553 		    physdisk->phys_disk_num);
554 
555 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
556 			continue;
557 
558 		tries = 0;
559 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
560 		    port_pg.max_offset, tries) == EAGAIN)
561 			tries++;
562 	}
563 
564 out:
565 	free(physdisk_pg, M_TEMP, 0);
566 }
567 
568 int
569 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
570     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
571 {
572 	struct mpi_cfg_hdr		hdr0, hdr1;
573 	struct mpi_cfg_spi_dev_pg0	pg0;
574 	struct mpi_cfg_spi_dev_pg1	pg1;
575 	u_int32_t			address;
576 	int				id;
577 	int				raid = 0;
578 
579 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
580 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
581 	    link->quirks);
582 
583 	if (try >= 3)
584 		return (EIO);
585 
586 	if (physdisk == NULL) {
587 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
588 			return (EIO);
589 
590 		address = link->target;
591 		id = link->target;
592 	} else {
593 		raid = 1;
594 		address = (physdisk->phys_disk_bus << 8) |
595 		    (physdisk->phys_disk_id);
596 		id = physdisk->phys_disk_num;
597 	}
598 
599 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
600 	    address, &hdr0) != 0) {
601 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
602 		    DEVNAME(sc));
603 		return (EIO);
604 	}
605 
606 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
607 	    address, &hdr1) != 0) {
608 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
609 		    DEVNAME(sc));
610 		return (EIO);
611 	}
612 
613 #ifdef MPI_DEBUG
614 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
615 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
616 		    DEVNAME(sc));
617 		return (EIO);
618 	}
619 
620 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
621 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
622 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
623 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
624 #endif
625 
626 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
627 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
628 		    DEVNAME(sc));
629 		return (EIO);
630 	}
631 
632 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
633 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
634 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
635 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
636 
637 	pg1.req_params1 = 0;
638 	pg1.req_offset = offset;
639 	pg1.req_period = period;
640 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
641 
642 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
643 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
644 
645 		switch (try) {
646 		case 0: /* U320 */
647 			break;
648 		case 1: /* U160 */
649 			pg1.req_period = 0x09;
650 			break;
651 		case 2: /* U80 */
652 			pg1.req_period = 0x0a;
653 			break;
654 		}
655 
656 		if (pg1.req_period < 0x09) {
657 			/* Ultra320: enable QAS & PACKETIZED */
658 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
659 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
660 		}
661 		if (pg1.req_period < 0xa) {
662 			/* >= Ultra160: enable dual xfers */
663 			pg1.req_params1 |=
664 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
665 		}
666 	}
667 
668 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
669 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
670 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
671 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
672 
673 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
674 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
675 		    DEVNAME(sc));
676 		return (EIO);
677 	}
678 
679 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
680 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
681 		    DEVNAME(sc));
682 		return (EIO);
683 	}
684 
685 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
686 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
687 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
688 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
689 
690 	if (mpi_inq(sc, id, raid) != 0) {
691 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
692 		    "target %d\n", DEVNAME(sc), link->target);
693 		return (EIO);
694 	}
695 
696 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
697 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
698 		    "inquiry\n", DEVNAME(sc));
699 		return (EIO);
700 	}
701 
702 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
703 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
704 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
705 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
706 
707 	if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) {
708 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
709 		    DEVNAME(sc));
710 		return (EAGAIN);
711 	}
712 
713 	if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
714 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
715 		    DEVNAME(sc));
716 		return (EAGAIN);
717 	}
718 
719 	if (lemtoh32(&pg0.information) & 0x0e) {
720 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
721 		    DEVNAME(sc), lemtoh32(&pg0.information));
722 		return (EAGAIN);
723 	}
724 
725 	switch(pg0.neg_period) {
726 	case 0x08:
727 		period = 160;
728 		break;
729 	case 0x09:
730 		period = 80;
731 		break;
732 	case 0x0a:
733 		period = 40;
734 		break;
735 	case 0x0b:
736 		period = 20;
737 		break;
738 	case 0x0c:
739 		period = 10;
740 		break;
741 	default:
742 		period = 0;
743 		break;
744 	}
745 
746 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
747 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
748 	    id, period ? "Sync" : "Async", period,
749 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
750 	    pg0.neg_offset,
751 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
752 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
753 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
754 
755 	return (0);
756 }
757 
758 int
759 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
760 {
761 	struct mpi_ccb			*ccb;
762 	struct scsi_inquiry		inq;
763 	struct {
764 		struct mpi_msg_scsi_io		io;
765 		struct mpi_sge			sge;
766 		struct scsi_inquiry_data	inqbuf;
767 		struct scsi_sense_data		sense;
768 	} __packed			*bundle;
769 	struct mpi_msg_scsi_io		*io;
770 	struct mpi_sge			*sge;
771 
772 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
773 
774 	memset(&inq, 0, sizeof(inq));
775 	inq.opcode = INQUIRY;
776 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
777 
778 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
779 	if (ccb == NULL)
780 		return (1);
781 
782 	ccb->ccb_done = mpi_empty_done;
783 
784 	bundle = ccb->ccb_cmd;
785 	io = &bundle->io;
786 	sge = &bundle->sge;
787 
788 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
789 	    MPI_FUNCTION_SCSI_IO_REQUEST;
790 	/*
791 	 * bus is always 0
792 	 * io->bus = htole16(sc->sc_bus);
793 	 */
794 	io->target_id = target;
795 
796 	io->cdb_length = sizeof(inq);
797 	io->sense_buf_len = sizeof(struct scsi_sense_data);
798 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
799 
800 	/*
801 	 * always lun 0
802 	 * io->lun[0] = htobe16(link->lun);
803 	 */
804 
805 	io->direction = MPI_SCSIIO_DIR_READ;
806 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
807 
808 	memcpy(io->cdb, &inq, sizeof(inq));
809 
810 	htolem32(&io->data_length, sizeof(struct scsi_inquiry_data));
811 
812 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
813 	    ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
814 
815 	htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
816 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
817 	    (u_int32_t)sizeof(inq));
818 
819 	mpi_dvatosge(sge, ccb->ccb_cmd_dva +
820 	    ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle));
821 
822 	if (mpi_poll(sc, ccb, 5000) != 0)
823 		return (1);
824 
825 	if (ccb->ccb_rcb != NULL)
826 		mpi_push_reply(sc, ccb->ccb_rcb);
827 
828 	scsi_io_put(&sc->sc_iopool, ccb);
829 
830 	return (0);
831 }
832 
833 int
834 mpi_cfg_sas(struct mpi_softc *sc)
835 {
836 	struct mpi_ecfg_hdr		ehdr;
837 	struct mpi_cfg_sas_iou_pg1	*pg;
838 	size_t				pagelen;
839 	int				rv = 0;
840 
841 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
842 	    &ehdr) != 0)
843 		return (0);
844 
845 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
846 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
847 	if (pg == NULL)
848 		return (ENOMEM);
849 
850 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
851 		goto out;
852 
853 	if (pg->max_sata_q_depth != 32) {
854 		pg->max_sata_q_depth = 32;
855 
856 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
857 			goto out;
858 	}
859 
860 out:
861 	free(pg, M_TEMP, 0);
862 	return (rv);
863 }
864 
865 int
866 mpi_cfg_fc(struct mpi_softc *sc)
867 {
868 	struct mpi_cfg_hdr		hdr;
869 	struct mpi_cfg_fc_port_pg0	pg0;
870 	struct mpi_cfg_fc_port_pg1	pg1;
871 
872 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
873 	    &hdr) != 0) {
874 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
875 		return (1);
876 	}
877 
878 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
879 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
880 		return (1);
881 	}
882 
883 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
884 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
885 
886 	/* configure port config more to our liking */
887 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
888 	    &hdr) != 0) {
889 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
890 		return (1);
891 	}
892 
893 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
894 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
895 		return (1);
896 	}
897 
898 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
899 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
900 
901 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
902 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
903 		return (1);
904 	}
905 
906 	return (0);
907 }
908 
909 void
910 mpi_detach(struct mpi_softc *sc)
911 {
912 
913 }
914 
915 int
916 mpi_intr(void *arg)
917 {
918 	struct mpi_softc		*sc = arg;
919 	u_int32_t			reg;
920 	int				rv = 0;
921 
922 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
923 		return (rv);
924 
925 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
926 		mpi_reply(sc, reg);
927 		rv = 1;
928 	}
929 
930 	return (rv);
931 }
932 
933 void
934 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
935 {
936 	struct mpi_ccb			*ccb;
937 	struct mpi_rcb			*rcb = NULL;
938 	struct mpi_msg_reply		*reply = NULL;
939 	u_int32_t			reply_dva;
940 	int				id;
941 	int				i;
942 
943 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
944 
945 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
946 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
947 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
948 		    MPI_REPLY_SIZE;
949 		rcb = &sc->sc_rcbs[i];
950 
951 		bus_dmamap_sync(sc->sc_dmat,
952 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
953 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
954 
955 		reply = rcb->rcb_reply;
956 
957 		id = lemtoh32(&reply->msg_context);
958 	} else {
959 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
960 		case MPI_REPLY_QUEUE_TYPE_INIT:
961 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
962 			break;
963 
964 		default:
965 			panic("%s: unsupported context reply",
966 			    DEVNAME(sc));
967 		}
968 	}
969 
970 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
971 	    DEVNAME(sc), id, reply);
972 
973 	ccb = &sc->sc_ccbs[id];
974 
975 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
976 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
977 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
978 	ccb->ccb_state = MPI_CCB_READY;
979 	ccb->ccb_rcb = rcb;
980 
981 	ccb->ccb_done(ccb);
982 }
983 
984 struct mpi_dmamem *
985 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
986 {
987 	struct mpi_dmamem		*mdm;
988 	int				nsegs;
989 
990 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
991 	if (mdm == NULL)
992 		return (NULL);
993 
994 	mdm->mdm_size = size;
995 
996 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
997 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
998 		goto mdmfree;
999 
1000 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
1001 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1002 		goto destroy;
1003 
1004 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1005 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1006 		goto free;
1007 
1008 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1009 	    NULL, BUS_DMA_NOWAIT) != 0)
1010 		goto unmap;
1011 
1012 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1013 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1014 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1015 
1016 	return (mdm);
1017 
1018 unmap:
1019 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1020 free:
1021 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1022 destroy:
1023 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1024 mdmfree:
1025 	free(mdm, M_DEVBUF, 0);
1026 
1027 	return (NULL);
1028 }
1029 
1030 void
1031 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1032 {
1033 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1034 
1035 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1036 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1037 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1038 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1039 	free(mdm, M_DEVBUF, 0);
1040 }
1041 
1042 int
1043 mpi_alloc_ccbs(struct mpi_softc *sc)
1044 {
1045 	struct mpi_ccb			*ccb;
1046 	u_int8_t			*cmd;
1047 	int				i;
1048 
1049 	SLIST_INIT(&sc->sc_ccb_free);
1050 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1051 
1052 	sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1053 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1054 	if (sc->sc_ccbs == NULL) {
1055 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1056 		return (1);
1057 	}
1058 
1059 	sc->sc_requests = mpi_dmamem_alloc(sc,
1060 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1061 	if (sc->sc_requests == NULL) {
1062 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1063 		goto free_ccbs;
1064 	}
1065 	cmd = MPI_DMA_KVA(sc->sc_requests);
1066 	memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1067 
1068 	for (i = 0; i < sc->sc_maxcmds; i++) {
1069 		ccb = &sc->sc_ccbs[i];
1070 
1071 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1072 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1073 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1074 		    &ccb->ccb_dmamap) != 0) {
1075 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1076 			goto free_maps;
1077 		}
1078 
1079 		ccb->ccb_sc = sc;
1080 		ccb->ccb_id = i;
1081 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1082 		ccb->ccb_state = MPI_CCB_READY;
1083 
1084 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1085 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1086 		    ccb->ccb_offset;
1087 
1088 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1089 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1090 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1091 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1092 		    ccb->ccb_cmd_dva);
1093 
1094 		mpi_put_ccb(sc, ccb);
1095 	}
1096 
1097 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1098 
1099 	return (0);
1100 
1101 free_maps:
1102 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1103 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1104 
1105 	mpi_dmamem_free(sc, sc->sc_requests);
1106 free_ccbs:
1107 	free(sc->sc_ccbs, M_DEVBUF, 0);
1108 
1109 	return (1);
1110 }
1111 
1112 void *
1113 mpi_get_ccb(void *xsc)
1114 {
1115 	struct mpi_softc		*sc = xsc;
1116 	struct mpi_ccb			*ccb;
1117 
1118 	mtx_enter(&sc->sc_ccb_mtx);
1119 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1120 	if (ccb != NULL) {
1121 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1122 		ccb->ccb_state = MPI_CCB_READY;
1123 	}
1124 	mtx_leave(&sc->sc_ccb_mtx);
1125 
1126 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1127 
1128 	return (ccb);
1129 }
1130 
1131 void
1132 mpi_put_ccb(void *xsc, void *io)
1133 {
1134 	struct mpi_softc		*sc = xsc;
1135 	struct mpi_ccb			*ccb = io;
1136 
1137 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1138 
1139 #ifdef DIAGNOSTIC
1140 	if (ccb->ccb_state == MPI_CCB_FREE)
1141 		panic("mpi_put_ccb: double free");
1142 #endif
1143 
1144 	ccb->ccb_state = MPI_CCB_FREE;
1145 	ccb->ccb_cookie = NULL;
1146 	ccb->ccb_done = NULL;
1147 	memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1148 	mtx_enter(&sc->sc_ccb_mtx);
1149 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1150 	mtx_leave(&sc->sc_ccb_mtx);
1151 }
1152 
1153 int
1154 mpi_alloc_replies(struct mpi_softc *sc)
1155 {
1156 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1157 
1158 	sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF,
1159 	    M_WAITOK|M_CANFAIL);
1160 	if (sc->sc_rcbs == NULL)
1161 		return (1);
1162 
1163 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1164 	if (sc->sc_replies == NULL) {
1165 		free(sc->sc_rcbs, M_DEVBUF, 0);
1166 		return (1);
1167 	}
1168 
1169 	return (0);
1170 }
1171 
1172 void
1173 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1174 {
1175 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1176 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1177 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1178 }
1179 
1180 void
1181 mpi_push_replies(struct mpi_softc *sc)
1182 {
1183 	struct mpi_rcb			*rcb;
1184 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1185 	int				i;
1186 
1187 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1188 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1189 
1190 	for (i = 0; i < sc->sc_repq; i++) {
1191 		rcb = &sc->sc_rcbs[i];
1192 
1193 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1194 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1195 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1196 		    MPI_REPLY_SIZE * i;
1197 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1198 	}
1199 }
1200 
1201 void
1202 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1203 {
1204 	struct mpi_msg_request *msg;
1205 
1206 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1207 	    ccb->ccb_cmd_dva);
1208 
1209 	msg = ccb->ccb_cmd;
1210 	htolem32(&msg->msg_context, ccb->ccb_id);
1211 
1212 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1213 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1214 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1215 
1216 	ccb->ccb_state = MPI_CCB_QUEUED;
1217 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1218 	    MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1219 }
1220 
1221 int
1222 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1223 {
1224 	void				(*done)(struct mpi_ccb *);
1225 	void				*cookie;
1226 	int				rv = 1;
1227 	u_int32_t			reg;
1228 
1229 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1230 	    timeout);
1231 
1232 	done = ccb->ccb_done;
1233 	cookie = ccb->ccb_cookie;
1234 
1235 	ccb->ccb_done = mpi_poll_done;
1236 	ccb->ccb_cookie = &rv;
1237 
1238 	mpi_start(sc, ccb);
1239 	while (rv == 1) {
1240 		reg = mpi_pop_reply(sc);
1241 		if (reg == 0xffffffff) {
1242 			if (timeout-- == 0) {
1243 				printf("%s: timeout\n", DEVNAME(sc));
1244 				goto timeout;
1245 			}
1246 
1247 			delay(1000);
1248 			continue;
1249 		}
1250 
1251 		mpi_reply(sc, reg);
1252 	}
1253 
1254 	ccb->ccb_cookie = cookie;
1255 	done(ccb);
1256 
1257 timeout:
1258 	return (rv);
1259 }
1260 
1261 void
1262 mpi_poll_done(struct mpi_ccb *ccb)
1263 {
1264 	int				*rv = ccb->ccb_cookie;
1265 
1266 	*rv = 0;
1267 }
1268 
1269 void
1270 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1271 {
1272 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1273 	void				(*done)(struct mpi_ccb *);
1274 
1275 	done = ccb->ccb_done;
1276 	ccb->ccb_done = mpi_wait_done;
1277 	ccb->ccb_cookie = &cookie;
1278 
1279 	/* XXX this will wait forever for the ccb to complete */
1280 
1281 	mpi_start(sc, ccb);
1282 
1283 	mtx_enter(&cookie);
1284 	while (ccb->ccb_cookie != NULL)
1285 		msleep(ccb, &cookie, PRIBIO, "mpiwait", 0);
1286 	mtx_leave(&cookie);
1287 
1288 	done(ccb);
1289 }
1290 
1291 void
1292 mpi_wait_done(struct mpi_ccb *ccb)
1293 {
1294 	struct mutex			*cookie = ccb->ccb_cookie;
1295 
1296 	mtx_enter(cookie);
1297 	ccb->ccb_cookie = NULL;
1298 	wakeup_one(ccb);
1299 	mtx_leave(cookie);
1300 }
1301 
1302 void
1303 mpi_scsi_cmd(struct scsi_xfer *xs)
1304 {
1305 	struct scsi_link		*link = xs->sc_link;
1306 	struct mpi_softc		*sc = link->adapter_softc;
1307 	struct mpi_ccb			*ccb;
1308 	struct mpi_ccb_bundle		*mcb;
1309 	struct mpi_msg_scsi_io		*io;
1310 
1311 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1312 
1313 	KERNEL_UNLOCK();
1314 
1315 	if (xs->cmdlen > MPI_CDB_LEN) {
1316 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1317 		    DEVNAME(sc), xs->cmdlen);
1318 		memset(&xs->sense, 0, sizeof(xs->sense));
1319 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1320 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1321 		xs->sense.add_sense_code = 0x20;
1322 		xs->error = XS_SENSE;
1323 		goto done;
1324 	}
1325 
1326 	ccb = xs->io;
1327 
1328 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1329 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1330 
1331 	ccb->ccb_cookie = xs;
1332 	ccb->ccb_done = mpi_scsi_cmd_done;
1333 
1334 	mcb = ccb->ccb_cmd;
1335 	io = &mcb->mcb_io;
1336 
1337 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1338 	/*
1339 	 * bus is always 0
1340 	 * io->bus = htole16(sc->sc_bus);
1341 	 */
1342 	io->target_id = link->target;
1343 
1344 	io->cdb_length = xs->cmdlen;
1345 	io->sense_buf_len = sizeof(xs->sense);
1346 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1347 
1348 	htobem16(&io->lun[0], link->lun);
1349 
1350 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1351 	case SCSI_DATA_IN:
1352 		io->direction = MPI_SCSIIO_DIR_READ;
1353 		break;
1354 	case SCSI_DATA_OUT:
1355 		io->direction = MPI_SCSIIO_DIR_WRITE;
1356 		break;
1357 	default:
1358 		io->direction = MPI_SCSIIO_DIR_NONE;
1359 		break;
1360 	}
1361 
1362 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1363 	    (link->quirks & SDEV_NOTAGS))
1364 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1365 	else
1366 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1367 
1368 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1369 
1370 	htolem32(&io->data_length, xs->datalen);
1371 
1372 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1373 	    ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1374 
1375 	if (mpi_load_xs(ccb) != 0)
1376 		goto stuffup;
1377 
1378 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1379 
1380 	if (xs->flags & SCSI_POLL) {
1381 		if (mpi_poll(sc, ccb, xs->timeout) != 0)
1382 			goto stuffup;
1383 	} else
1384 		mpi_start(sc, ccb);
1385 
1386 	KERNEL_LOCK();
1387 	return;
1388 
1389 stuffup:
1390 	xs->error = XS_DRIVER_STUFFUP;
1391 done:
1392 	KERNEL_LOCK();
1393 	scsi_done(xs);
1394 }
1395 
1396 void
1397 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1398 {
1399 	struct mpi_softc		*sc = ccb->ccb_sc;
1400 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1401 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1402 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1403 	struct mpi_msg_scsi_io_error	*sie;
1404 
1405 	if (xs->datalen != 0) {
1406 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1407 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1408 		    BUS_DMASYNC_POSTWRITE);
1409 
1410 		bus_dmamap_unload(sc->sc_dmat, dmap);
1411 	}
1412 
1413 	/* timeout_del */
1414 	xs->error = XS_NOERROR;
1415 	xs->resid = 0;
1416 
1417 	if (ccb->ccb_rcb == NULL) {
1418 		/* no scsi error, we're ok so drop out early */
1419 		xs->status = SCSI_OK;
1420 		KERNEL_LOCK();
1421 		scsi_done(xs);
1422 		KERNEL_UNLOCK();
1423 		return;
1424 	}
1425 
1426 	sie = ccb->ccb_rcb->rcb_reply;
1427 
1428 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1429 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1430 	    xs->flags);
1431 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1432 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1433 	    sie->msg_length, sie->function);
1434 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1435 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1436 	    sie->sense_buf_len, sie->msg_flags);
1437 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1438 	    letoh32(sie->msg_context));
1439 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1440 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1441 	    sie->scsi_state, letoh16(sie->ioc_status));
1442 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1443 	    letoh32(sie->ioc_loginfo));
1444 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1445 	    letoh32(sie->transfer_count));
1446 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1447 	    letoh32(sie->sense_count));
1448 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1449 	    letoh32(sie->response_info));
1450 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1451 	    letoh16(sie->tag));
1452 
1453 	xs->status = sie->scsi_status;
1454 	switch (lemtoh16(&sie->ioc_status)) {
1455 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1456 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1457 		if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1458 			xs->error = XS_DRIVER_STUFFUP;
1459 			break;
1460 		}
1461 		/* FALLTHROUGH */
1462 	case MPI_IOCSTATUS_SUCCESS:
1463 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1464 		switch (xs->status) {
1465 		case SCSI_OK:
1466 			xs->resid = 0;
1467 			break;
1468 
1469 		case SCSI_CHECK:
1470 			xs->error = XS_SENSE;
1471 			break;
1472 
1473 		case SCSI_BUSY:
1474 		case SCSI_QUEUE_FULL:
1475 			xs->error = XS_BUSY;
1476 			break;
1477 
1478 		default:
1479 			xs->error = XS_DRIVER_STUFFUP;
1480 			break;
1481 		}
1482 		break;
1483 
1484 	case MPI_IOCSTATUS_BUSY:
1485 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1486 		xs->error = XS_BUSY;
1487 		break;
1488 
1489 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1490 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1491 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1492 		xs->error = XS_SELTIMEOUT;
1493 		break;
1494 
1495 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1496 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1497 		xs->error = XS_RESET;
1498 		break;
1499 
1500 	default:
1501 		xs->error = XS_DRIVER_STUFFUP;
1502 		break;
1503 	}
1504 
1505 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1506 		memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1507 
1508 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1509 	    xs->error, xs->status);
1510 
1511 	mpi_push_reply(sc, ccb->ccb_rcb);
1512 	KERNEL_LOCK();
1513 	scsi_done(xs);
1514 	KERNEL_UNLOCK();
1515 }
1516 
1517 void
1518 mpi_timeout_xs(void *arg)
1519 {
1520 	/* XXX */
1521 }
1522 
1523 int
1524 mpi_load_xs(struct mpi_ccb *ccb)
1525 {
1526 	struct mpi_softc		*sc = ccb->ccb_sc;
1527 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1528 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1529 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1530 	struct mpi_sge			*sge = NULL;
1531 	struct mpi_sge			*nsge = &mcb->mcb_sgl[0];
1532 	struct mpi_sge			*ce = NULL, *nce;
1533 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1534 	u_int32_t			addr, flags;
1535 	int				i, error;
1536 
1537 	if (xs->datalen == 0) {
1538 		htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
1539 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1540 		return (0);
1541 	}
1542 
1543 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1544 	    xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1545 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1546 	if (error) {
1547 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1548 		return (1);
1549 	}
1550 
1551 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1552 	if (xs->flags & SCSI_DATA_OUT)
1553 		flags |= MPI_SGE_FL_DIR_OUT;
1554 
1555 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1556 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1557 		io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1558 	}
1559 
1560 	for (i = 0; i < dmap->dm_nsegs; i++) {
1561 
1562 		if (nsge == ce) {
1563 			nsge++;
1564 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1565 
1566 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1567 				nce = &nsge[sc->sc_chain_len - 1];
1568 				addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1569 				addr = addr << 16 |
1570 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1571 			} else {
1572 				nce = NULL;
1573 				addr = sizeof(struct mpi_sge) *
1574 				    (dmap->dm_nsegs - i);
1575 			}
1576 
1577 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1578 			    MPI_SGE_FL_SIZE_64 | addr);
1579 
1580 			mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1581 			    ((u_int8_t *)nsge - (u_int8_t *)mcb));
1582 
1583 			ce = nce;
1584 		}
1585 
1586 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1587 		    i, dmap->dm_segs[i].ds_len,
1588 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1589 
1590 		sge = nsge++;
1591 
1592 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1593 		mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1594 	}
1595 
1596 	/* terminate list */
1597 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1598 	    MPI_SGE_FL_EOL);
1599 
1600 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1601 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1602 	    BUS_DMASYNC_PREWRITE);
1603 
1604 	return (0);
1605 }
1606 
1607 void
1608 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1609 {
1610 	/* XXX */
1611 	if (bp->b_bcount > MAXPHYS)
1612 		bp->b_bcount = MAXPHYS;
1613 	minphys(bp);
1614 }
1615 
1616 int
1617 mpi_scsi_probe_virtual(struct scsi_link *link)
1618 {
1619 	struct mpi_softc		*sc = link->adapter_softc;
1620 	struct mpi_cfg_hdr		hdr;
1621 	struct mpi_cfg_raid_vol_pg0	*rp0;
1622 	int				len;
1623 	int				rv;
1624 
1625 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1626 		return (0);
1627 
1628 	if (link->lun > 0)
1629 		return (0);
1630 
1631 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1632 	    0, link->target, MPI_PG_POLL, &hdr);
1633 	if (rv != 0)
1634 		return (0);
1635 
1636 	len = hdr.page_length * 4;
1637 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1638 	if (rp0 == NULL)
1639 		return (ENOMEM);
1640 
1641 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1642 	if (rv == 0)
1643 		SET(link->flags, SDEV_VIRTUAL);
1644 
1645 	free(rp0, M_TEMP, 0);
1646 	return (0);
1647 }
1648 
1649 int
1650 mpi_scsi_probe(struct scsi_link *link)
1651 {
1652 	struct mpi_softc		*sc = link->adapter_softc;
1653 	struct mpi_ecfg_hdr		ehdr;
1654 	struct mpi_cfg_sas_dev_pg0	pg0;
1655 	u_int32_t			address;
1656 	int				rv;
1657 
1658 	rv = mpi_scsi_probe_virtual(link);
1659 	if (rv != 0)
1660 		return (rv);
1661 
1662 	if (ISSET(link->flags, SDEV_VIRTUAL))
1663 		return (0);
1664 
1665 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1666 		return (0);
1667 
1668 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1669 
1670 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1671 	    address, &ehdr) != 0)
1672 		return (EIO);
1673 
1674 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1675 		return (0);
1676 
1677 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1678 	    DEVNAME(sc), link->target);
1679 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1680 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1681 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1682 	    letoh64(pg0.sas_addr));
1683 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1684 	    "access_status: 0x%02x\n", DEVNAME(sc),
1685 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1686 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1687 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1688 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1689 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1690 	    letoh32(pg0.device_info));
1691 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1692 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1693 
1694 	if (ISSET(lemtoh32(&pg0.device_info),
1695 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1696 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1697 		    DEVNAME(sc), link->target);
1698 		link->flags |= SDEV_ATAPI;
1699 		link->quirks |= SDEV_ONLYBIG;
1700 	}
1701 
1702 	return (0);
1703 }
1704 
1705 u_int32_t
1706 mpi_read(struct mpi_softc *sc, bus_size_t r)
1707 {
1708 	u_int32_t			rv;
1709 
1710 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1711 	    BUS_SPACE_BARRIER_READ);
1712 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1713 
1714 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1715 
1716 	return (rv);
1717 }
1718 
1719 void
1720 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1721 {
1722 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1723 
1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1725 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1726 	    BUS_SPACE_BARRIER_WRITE);
1727 }
1728 
1729 int
1730 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1731     u_int32_t target)
1732 {
1733 	int				i;
1734 
1735 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1736 	    mask, target);
1737 
1738 	for (i = 0; i < 10000; i++) {
1739 		if ((mpi_read(sc, r) & mask) == target)
1740 			return (0);
1741 		delay(1000);
1742 	}
1743 
1744 	return (1);
1745 }
1746 
1747 int
1748 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1749     u_int32_t target)
1750 {
1751 	int				i;
1752 
1753 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1754 	    mask, target);
1755 
1756 	for (i = 0; i < 10000; i++) {
1757 		if ((mpi_read(sc, r) & mask) != target)
1758 			return (0);
1759 		delay(1000);
1760 	}
1761 
1762 	return (1);
1763 }
1764 
1765 int
1766 mpi_init(struct mpi_softc *sc)
1767 {
1768 	u_int32_t			db;
1769 	int				i;
1770 
1771 	/* spin until the IOC leaves the RESET state */
1772 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1773 	    MPI_DOORBELL_STATE_RESET) != 0) {
1774 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1775 		    "reset state\n", DEVNAME(sc));
1776 		return (1);
1777 	}
1778 
1779 	/* check current ownership */
1780 	db = mpi_read_db(sc);
1781 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1782 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1783 		    DEVNAME(sc));
1784 		return (0);
1785 	}
1786 
1787 	for (i = 0; i < 5; i++) {
1788 		switch (db & MPI_DOORBELL_STATE) {
1789 		case MPI_DOORBELL_STATE_READY:
1790 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1791 			    DEVNAME(sc));
1792 			return (0);
1793 
1794 		case MPI_DOORBELL_STATE_OPER:
1795 		case MPI_DOORBELL_STATE_FAULT:
1796 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1797 			    "reset\n" , DEVNAME(sc));
1798 			if (mpi_reset_soft(sc) != 0)
1799 				mpi_reset_hard(sc);
1800 			break;
1801 
1802 		case MPI_DOORBELL_STATE_RESET:
1803 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1804 			    "out of reset\n", DEVNAME(sc));
1805 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1806 			    MPI_DOORBELL_STATE_RESET) != 0)
1807 				return (1);
1808 			break;
1809 		}
1810 		db = mpi_read_db(sc);
1811 	}
1812 
1813 	return (1);
1814 }
1815 
1816 int
1817 mpi_reset_soft(struct mpi_softc *sc)
1818 {
1819 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1820 
1821 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1822 		return (1);
1823 
1824 	mpi_write_db(sc,
1825 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1826 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1827 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1828 		return (1);
1829 
1830 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1831 	    MPI_DOORBELL_STATE_READY) != 0)
1832 		return (1);
1833 
1834 	return (0);
1835 }
1836 
1837 int
1838 mpi_reset_hard(struct mpi_softc *sc)
1839 {
1840 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1841 
1842 	/* enable diagnostic register */
1843 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1844 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1845 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1846 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1847 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1848 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1849 
1850 	/* reset ioc */
1851 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1852 
1853 	delay(10000);
1854 
1855 	/* disable diagnostic register */
1856 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1857 
1858 	/* restore pci bits? */
1859 
1860 	/* firmware bits? */
1861 	return (0);
1862 }
1863 
1864 int
1865 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1866 {
1867 	u_int32_t				*query = buf;
1868 	int					i;
1869 
1870 	/* make sure the doorbell is not in use. */
1871 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1872 		return (1);
1873 
1874 	/* clear pending doorbell interrupts */
1875 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1876 		mpi_write_intr(sc, 0);
1877 
1878 	/*
1879 	 * first write the doorbell with the handshake function and the
1880 	 * dword count.
1881 	 */
1882 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1883 	    MPI_DOORBELL_DWORDS(dwords));
1884 
1885 	/*
1886 	 * the doorbell used bit will be set because a doorbell function has
1887 	 * started. Wait for the interrupt and then ack it.
1888 	 */
1889 	if (mpi_wait_db_int(sc) != 0)
1890 		return (1);
1891 	mpi_write_intr(sc, 0);
1892 
1893 	/* poll for the acknowledgement. */
1894 	if (mpi_wait_db_ack(sc) != 0)
1895 		return (1);
1896 
1897 	/* write the query through the doorbell. */
1898 	for (i = 0; i < dwords; i++) {
1899 		mpi_write_db(sc, htole32(query[i]));
1900 		if (mpi_wait_db_ack(sc) != 0)
1901 			return (1);
1902 	}
1903 
1904 	return (0);
1905 }
1906 
1907 int
1908 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1909 {
1910 	u_int16_t				*words = (u_int16_t *)dword;
1911 	int					i;
1912 
1913 	for (i = 0; i < 2; i++) {
1914 		if (mpi_wait_db_int(sc) != 0)
1915 			return (1);
1916 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1917 		mpi_write_intr(sc, 0);
1918 	}
1919 
1920 	return (0);
1921 }
1922 
1923 int
1924 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1925 {
1926 	struct mpi_msg_reply			*reply = buf;
1927 	u_int32_t				*dbuf = buf, dummy;
1928 	int					i;
1929 
1930 	/* get the first dword so we can read the length out of the header. */
1931 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1932 		return (1);
1933 
1934 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1935 	    DEVNAME(sc), dwords, reply->msg_length);
1936 
1937 	/*
1938 	 * the total length, in dwords, is in the message length field of the
1939 	 * reply header.
1940 	 */
1941 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1942 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1943 			return (1);
1944 	}
1945 
1946 	/* if there's extra stuff to come off the ioc, discard it */
1947 	while (i++ < reply->msg_length) {
1948 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1949 			return (1);
1950 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1951 		    "0x%08x\n", DEVNAME(sc), dummy);
1952 	}
1953 
1954 	/* wait for the doorbell used bit to be reset and clear the intr */
1955 	if (mpi_wait_db_int(sc) != 0)
1956 		return (1);
1957 	mpi_write_intr(sc, 0);
1958 
1959 	return (0);
1960 }
1961 
1962 void
1963 mpi_empty_done(struct mpi_ccb *ccb)
1964 {
1965 	/* nothing to do */
1966 }
1967 
1968 int
1969 mpi_iocfacts(struct mpi_softc *sc)
1970 {
1971 	struct mpi_msg_iocfacts_request		ifq;
1972 	struct mpi_msg_iocfacts_reply		ifp;
1973 
1974 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1975 
1976 	memset(&ifq, 0, sizeof(ifq));
1977 	memset(&ifp, 0, sizeof(ifp));
1978 
1979 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1980 	ifq.chain_offset = 0;
1981 	ifq.msg_flags = 0;
1982 	ifq.msg_context = htole32(0xdeadbeef);
1983 
1984 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1985 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1986 		    DEVNAME(sc));
1987 		return (1);
1988 	}
1989 
1990 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1991 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1992 		    DEVNAME(sc));
1993 		return (1);
1994 	}
1995 
1996 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1997 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1998 	    ifp.msg_version_maj, ifp.msg_version_min);
1999 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
2000 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
2001 	    ifp.ioc_number, ifp.header_version_maj,
2002 	    ifp.header_version_min);
2003 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
2004 	    letoh32(ifp.msg_context));
2005 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
2006 	    DEVNAME(sc), letoh16(ifp.ioc_status),
2007 	    letoh16(ifp.ioc_exceptions));
2008 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2009 	    letoh32(ifp.ioc_loginfo));
2010 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2011 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2012 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2013 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2014 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2015 	    letoh16(ifp.reply_queue_depth));
2016 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2017 	    letoh16(ifp.product_id));
2018 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2019 	    letoh32(ifp.current_host_mfa_hi_addr));
2020 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2021 	    "global_credits: %d\n",
2022 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2023 	    letoh16(ifp.global_credits));
2024 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2025 	    letoh32(ifp.current_sense_buffer_hi_addr));
2026 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2027 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2028 	    letoh16(ifp.current_reply_frame_size));
2029 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2030 	    letoh32(ifp.fw_image_size));
2031 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2032 	    letoh32(ifp.ioc_capabilities));
2033 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2034 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2035 	    ifp.fw_version_maj, ifp.fw_version_min,
2036 	    ifp.fw_version_unit, ifp.fw_version_dev);
2037 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2038 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2039 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2040 	    "addr 0x%08lx%08lx\n", DEVNAME(sc),
2041 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2042 	    letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2043 	    letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2044 
2045 	sc->sc_fw_maj = ifp.fw_version_maj;
2046 	sc->sc_fw_min = ifp.fw_version_min;
2047 	sc->sc_fw_unit = ifp.fw_version_unit;
2048 	sc->sc_fw_dev = ifp.fw_version_dev;
2049 
2050 	sc->sc_maxcmds = lemtoh16(&ifp.global_credits);
2051 	sc->sc_maxchdepth = ifp.max_chain_depth;
2052 	sc->sc_ioc_number = ifp.ioc_number;
2053 	if (sc->sc_flags & MPI_F_SPI)
2054 		sc->sc_buswidth = 16;
2055 	else
2056 		sc->sc_buswidth =
2057 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2058 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2059 		sc->sc_fw_len = lemtoh32(&ifp.fw_image_size);
2060 
2061 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth));
2062 
2063 	/*
2064 	 * you can fit sg elements on the end of the io cmd if they fit in the
2065 	 * request frame size.
2066 	 */
2067 	sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) -
2068 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2069 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2070 	    sc->sc_first_sgl_len);
2071 
2072 	sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) /
2073 	    sizeof(struct mpi_sge);
2074 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2075 	    sc->sc_chain_len);
2076 
2077 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2078 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2079 	/* the sgl chains lose an entry for each chain element */
2080 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2081 	    sc->sc_chain_len;
2082 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2083 	    sc->sc_max_sgl_len);
2084 
2085 	/* XXX we're ignoring the max chain depth */
2086 
2087 	return (0);
2088 }
2089 
2090 int
2091 mpi_iocinit(struct mpi_softc *sc)
2092 {
2093 	struct mpi_msg_iocinit_request		iiq;
2094 	struct mpi_msg_iocinit_reply		iip;
2095 	u_int32_t				hi_addr;
2096 
2097 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2098 
2099 	memset(&iiq, 0, sizeof(iiq));
2100 	memset(&iip, 0, sizeof(iip));
2101 
2102 	iiq.function = MPI_FUNCTION_IOC_INIT;
2103 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2104 
2105 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2106 	iiq.max_buses = 1;
2107 
2108 	iiq.msg_context = htole32(0xd00fd00f);
2109 
2110 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2111 
2112 	hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2113 	htolem32(&iiq.host_mfa_hi_addr, hi_addr);
2114 	htolem32(&iiq.sense_buffer_hi_addr, hi_addr);
2115 
2116 	iiq.msg_version_maj = 0x01;
2117 	iiq.msg_version_min = 0x02;
2118 
2119 	iiq.hdr_version_unit = 0x0d;
2120 	iiq.hdr_version_dev = 0x00;
2121 
2122 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2123 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2124 		    DEVNAME(sc));
2125 		return (1);
2126 	}
2127 
2128 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2129 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2130 		    DEVNAME(sc));
2131 		return (1);
2132 	}
2133 
2134 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2135 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2136 	    iip.msg_length, iip.whoinit);
2137 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2138 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2139 	    iip.max_buses, iip.max_devices, iip.flags);
2140 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2141 	    letoh32(iip.msg_context));
2142 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2143 	    letoh16(iip.ioc_status));
2144 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2145 	    letoh32(iip.ioc_loginfo));
2146 
2147 	return (0);
2148 }
2149 
2150 int
2151 mpi_portfacts(struct mpi_softc *sc)
2152 {
2153 	struct mpi_ccb				*ccb;
2154 	struct mpi_msg_portfacts_request	*pfq;
2155 	volatile struct mpi_msg_portfacts_reply	*pfp;
2156 	int					rv = 1;
2157 
2158 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2159 
2160 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2161 	if (ccb == NULL) {
2162 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2163 		    DEVNAME(sc));
2164 		return (rv);
2165 	}
2166 
2167 	ccb->ccb_done = mpi_empty_done;
2168 	pfq = ccb->ccb_cmd;
2169 
2170 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2171 	pfq->chain_offset = 0;
2172 	pfq->msg_flags = 0;
2173 	pfq->port_number = 0;
2174 
2175 	if (mpi_poll(sc, ccb, 50000) != 0) {
2176 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2177 		goto err;
2178 	}
2179 
2180 	if (ccb->ccb_rcb == NULL) {
2181 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2182 		    DEVNAME(sc));
2183 		goto err;
2184 	}
2185 	pfp = ccb->ccb_rcb->rcb_reply;
2186 
2187 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2188 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2189 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2190 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2191 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2192 	    letoh32(pfp->msg_context));
2193 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2194 	    letoh16(pfp->ioc_status));
2195 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2196 	    letoh32(pfp->ioc_loginfo));
2197 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2198 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2199 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2200 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2201 	    letoh16(pfp->port_scsi_id));
2202 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2203 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2204 	    letoh16(pfp->max_persistent_ids),
2205 	    letoh16(pfp->max_posted_cmd_buffers));
2206 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2207 	    letoh16(pfp->max_lan_buckets));
2208 
2209 	sc->sc_porttype = pfp->port_type;
2210 	if (sc->sc_target == -1)
2211 		sc->sc_target = lemtoh16(&pfp->port_scsi_id);
2212 
2213 	mpi_push_reply(sc, ccb->ccb_rcb);
2214 	rv = 0;
2215 err:
2216 	scsi_io_put(&sc->sc_iopool, ccb);
2217 
2218 	return (rv);
2219 }
2220 
2221 int
2222 mpi_cfg_coalescing(struct mpi_softc *sc)
2223 {
2224 	struct mpi_cfg_hdr		hdr;
2225 	struct mpi_cfg_ioc_pg1		pg;
2226 	u_int32_t			flags;
2227 
2228 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2229 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2230 		    DEVNAME(sc));
2231 		return (1);
2232 	}
2233 
2234 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2235 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2236 		    DEVNAME(sc));
2237 		return (1);
2238 	}
2239 
2240 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2241 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2242 	    letoh32(pg.flags));
2243 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2244 	    letoh32(pg.coalescing_timeout));
2245 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2246 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2247 
2248 	flags = lemtoh32(&pg.flags);
2249 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2250 		return (0);
2251 
2252 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2253 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2254 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2255 		    DEVNAME(sc));
2256 		return (1);
2257 	}
2258 
2259 	return (0);
2260 }
2261 
2262 int
2263 mpi_eventnotify(struct mpi_softc *sc)
2264 {
2265 	struct mpi_ccb				*ccb;
2266 	struct mpi_msg_event_request		*enq;
2267 
2268 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2269 	if (ccb == NULL) {
2270 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2271 		    DEVNAME(sc));
2272 		return (1);
2273 	}
2274 
2275 	sc->sc_evt_ccb = ccb;
2276 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2277 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2278 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2279 	    mpi_eventack, sc);
2280 
2281 	ccb->ccb_done = mpi_eventnotify_done;
2282 	enq = ccb->ccb_cmd;
2283 
2284 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2285 	enq->chain_offset = 0;
2286 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2287 
2288 	mpi_start(sc, ccb);
2289 	return (0);
2290 }
2291 
2292 void
2293 mpi_eventnotify_done(struct mpi_ccb *ccb)
2294 {
2295 	struct mpi_softc			*sc = ccb->ccb_sc;
2296 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2297 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2298 
2299 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2300 
2301 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2302 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2303 	    letoh16(enp->data_length));
2304 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2305 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2306 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2307 	    letoh32(enp->msg_context));
2308 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2309 	    letoh16(enp->ioc_status));
2310 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2311 	    letoh32(enp->ioc_loginfo));
2312 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2313 	    letoh32(enp->event));
2314 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2315 	    letoh32(enp->event_context));
2316 
2317 	switch (lemtoh32(&enp->event)) {
2318 	/* ignore these */
2319 	case MPI_EVENT_EVENT_CHANGE:
2320 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2321 		break;
2322 
2323 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2324 		if (sc->sc_scsibus == NULL)
2325 			break;
2326 
2327 		if (mpi_evt_sas(sc, rcb) != 0) {
2328 			/* reply is freed later on */
2329 			return;
2330 		}
2331 		break;
2332 
2333 	case MPI_EVENT_RESCAN:
2334 		if (sc->sc_scsibus != NULL &&
2335 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2336 			task_add(systq, &sc->sc_evt_rescan);
2337 		break;
2338 
2339 	default:
2340 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2341 		    DEVNAME(sc), lemtoh32(&enp->event));
2342 		break;
2343 	}
2344 
2345 	mpi_eventnotify_free(sc, rcb);
2346 }
2347 
2348 void
2349 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2350 {
2351 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2352 
2353 	if (enp->ack_required) {
2354 		mtx_enter(&sc->sc_evt_ack_mtx);
2355 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2356 		mtx_leave(&sc->sc_evt_ack_mtx);
2357 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2358 	} else
2359 		mpi_push_reply(sc, rcb);
2360 }
2361 
2362 int
2363 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2364 {
2365 	struct mpi_evt_sas_change		*ch;
2366 	u_int8_t				*data;
2367 
2368 	data = rcb->rcb_reply;
2369 	data += sizeof(struct mpi_msg_event_reply);
2370 	ch = (struct mpi_evt_sas_change *)data;
2371 
2372 	if (ch->bus != 0)
2373 		return (0);
2374 
2375 	switch (ch->reason) {
2376 	case MPI_EVT_SASCH_REASON_ADDED:
2377 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2378 		KERNEL_LOCK();
2379 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2380 			printf("%s: unable to request attach of %d\n",
2381 			    DEVNAME(sc), ch->target);
2382 		}
2383 		KERNEL_UNLOCK();
2384 		break;
2385 
2386 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2387 		KERNEL_LOCK();
2388 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2389 		KERNEL_UNLOCK();
2390 
2391 		mtx_enter(&sc->sc_evt_scan_mtx);
2392 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2393 		mtx_leave(&sc->sc_evt_scan_mtx);
2394 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2395 
2396 		/* we'll handle event ack later on */
2397 		return (1);
2398 
2399 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2400 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2401 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2402 		break;
2403 	default:
2404 		printf("%s: unknown reason for SAS device status change: "
2405 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2406 		break;
2407 	}
2408 
2409 	return (0);
2410 }
2411 
2412 void
2413 mpi_evt_sas_detach(void *cookie, void *io)
2414 {
2415 	struct mpi_softc			*sc = cookie;
2416 	struct mpi_ccb				*ccb = io;
2417 	struct mpi_rcb				*rcb, *next;
2418 	struct mpi_msg_event_reply		*enp;
2419 	struct mpi_evt_sas_change		*ch;
2420 	struct mpi_msg_scsi_task_request	*str;
2421 
2422 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2423 
2424 	mtx_enter(&sc->sc_evt_scan_mtx);
2425 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2426 	if (rcb != NULL) {
2427 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2428 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2429 	}
2430 	mtx_leave(&sc->sc_evt_scan_mtx);
2431 
2432 	if (rcb == NULL) {
2433 		scsi_io_put(&sc->sc_iopool, ccb);
2434 		return;
2435 	}
2436 
2437 	enp = rcb->rcb_reply;
2438 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2439 
2440 	ccb->ccb_done = mpi_evt_sas_detach_done;
2441 	str = ccb->ccb_cmd;
2442 
2443 	str->target_id = ch->target;
2444 	str->bus = 0;
2445 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2446 
2447 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2448 
2449 	mpi_eventnotify_free(sc, rcb);
2450 
2451 	mpi_start(sc, ccb);
2452 
2453 	if (next != NULL)
2454 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2455 }
2456 
2457 void
2458 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2459 {
2460 	struct mpi_softc			*sc = ccb->ccb_sc;
2461 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2462 
2463 	KERNEL_LOCK();
2464 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2465 	    DETACH_FORCE) != 0) {
2466 		printf("%s: unable to request detach of %d\n",
2467 		    DEVNAME(sc), r->target_id);
2468 	}
2469 	KERNEL_UNLOCK();
2470 
2471 	mpi_push_reply(sc, ccb->ccb_rcb);
2472 	scsi_io_put(&sc->sc_iopool, ccb);
2473 }
2474 
2475 void
2476 mpi_fc_rescan(void *xsc)
2477 {
2478 	struct mpi_softc			*sc = xsc;
2479 	struct mpi_cfg_hdr			hdr;
2480 	struct mpi_cfg_fc_device_pg0		pg;
2481 	struct scsi_link			*link;
2482 	u_int8_t				devmap[256 / NBBY];
2483 	u_int32_t				id = 0xffffff;
2484 	int					i;
2485 
2486 	memset(devmap, 0, sizeof(devmap));
2487 
2488 	do {
2489 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2490 		    id, 0, &hdr) != 0) {
2491 			printf("%s: header get for rescan of 0x%08x failed\n",
2492 			    DEVNAME(sc), id);
2493 			return;
2494 		}
2495 
2496 		memset(&pg, 0, sizeof(pg));
2497 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2498 			break;
2499 
2500 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2501 		    pg.current_bus == 0)
2502 			setbit(devmap, pg.current_target_id);
2503 
2504 		id = lemtoh32(&pg.port_id);
2505 	} while (id <= 0xff0000);
2506 
2507 	for (i = 0; i < sc->sc_buswidth; i++) {
2508 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2509 
2510 		if (isset(devmap, i)) {
2511 			if (link == NULL)
2512 				scsi_probe_target(sc->sc_scsibus, i);
2513 		} else {
2514 			if (link != NULL) {
2515 				scsi_activate(sc->sc_scsibus, i, -1,
2516 				    DVACT_DEACTIVATE);
2517 				scsi_detach_target(sc->sc_scsibus, i,
2518 				    DETACH_FORCE);
2519 			}
2520 		}
2521 	}
2522 }
2523 
2524 void
2525 mpi_eventack(void *cookie, void *io)
2526 {
2527 	struct mpi_softc			*sc = cookie;
2528 	struct mpi_ccb				*ccb = io;
2529 	struct mpi_rcb				*rcb, *next;
2530 	struct mpi_msg_event_reply		*enp;
2531 	struct mpi_msg_eventack_request		*eaq;
2532 
2533 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2534 
2535 	mtx_enter(&sc->sc_evt_ack_mtx);
2536 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2537 	if (rcb != NULL) {
2538 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2539 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2540 	}
2541 	mtx_leave(&sc->sc_evt_ack_mtx);
2542 
2543 	if (rcb == NULL) {
2544 		scsi_io_put(&sc->sc_iopool, ccb);
2545 		return;
2546 	}
2547 
2548 	enp = rcb->rcb_reply;
2549 
2550 	ccb->ccb_done = mpi_eventack_done;
2551 	eaq = ccb->ccb_cmd;
2552 
2553 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2554 
2555 	eaq->event = enp->event;
2556 	eaq->event_context = enp->event_context;
2557 
2558 	mpi_push_reply(sc, rcb);
2559 	mpi_start(sc, ccb);
2560 
2561 	if (next != NULL)
2562 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2563 }
2564 
2565 void
2566 mpi_eventack_done(struct mpi_ccb *ccb)
2567 {
2568 	struct mpi_softc			*sc = ccb->ccb_sc;
2569 
2570 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2571 
2572 	mpi_push_reply(sc, ccb->ccb_rcb);
2573 	scsi_io_put(&sc->sc_iopool, ccb);
2574 }
2575 
2576 int
2577 mpi_portenable(struct mpi_softc *sc)
2578 {
2579 	struct mpi_ccb				*ccb;
2580 	struct mpi_msg_portenable_request	*peq;
2581 	int					rv = 0;
2582 
2583 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2584 
2585 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2586 	if (ccb == NULL) {
2587 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2588 		    DEVNAME(sc));
2589 		return (1);
2590 	}
2591 
2592 	ccb->ccb_done = mpi_empty_done;
2593 	peq = ccb->ccb_cmd;
2594 
2595 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2596 	peq->port_number = 0;
2597 
2598 	if (mpi_poll(sc, ccb, 50000) != 0) {
2599 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2600 		return (1);
2601 	}
2602 
2603 	if (ccb->ccb_rcb == NULL) {
2604 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2605 		    DEVNAME(sc));
2606 		rv = 1;
2607 	} else
2608 		mpi_push_reply(sc, ccb->ccb_rcb);
2609 
2610 	scsi_io_put(&sc->sc_iopool, ccb);
2611 
2612 	return (rv);
2613 }
2614 
2615 int
2616 mpi_fwupload(struct mpi_softc *sc)
2617 {
2618 	struct mpi_ccb				*ccb;
2619 	struct {
2620 		struct mpi_msg_fwupload_request		req;
2621 		struct mpi_sge				sge;
2622 	} __packed				*bundle;
2623 	struct mpi_msg_fwupload_reply		*upp;
2624 	int					rv = 0;
2625 
2626 	if (sc->sc_fw_len == 0)
2627 		return (0);
2628 
2629 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2630 
2631 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2632 	if (sc->sc_fw == NULL) {
2633 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2634 		    DEVNAME(sc), sc->sc_fw_len);
2635 		return (1);
2636 	}
2637 
2638 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2639 	if (ccb == NULL) {
2640 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2641 		    DEVNAME(sc));
2642 		goto err;
2643 	}
2644 
2645 	ccb->ccb_done = mpi_empty_done;
2646 	bundle = ccb->ccb_cmd;
2647 
2648 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2649 
2650 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2651 
2652 	bundle->req.tce.details_length = 12;
2653 	htolem32(&bundle->req.tce.image_size, sc->sc_fw_len);
2654 
2655 	htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2656 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2657 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2658 	mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2659 
2660 	if (mpi_poll(sc, ccb, 50000) != 0) {
2661 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2662 		goto err;
2663 	}
2664 
2665 	if (ccb->ccb_rcb == NULL)
2666 		panic("%s: unable to do fw upload", DEVNAME(sc));
2667 	upp = ccb->ccb_rcb->rcb_reply;
2668 
2669 	if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2670 		rv = 1;
2671 
2672 	mpi_push_reply(sc, ccb->ccb_rcb);
2673 	scsi_io_put(&sc->sc_iopool, ccb);
2674 
2675 	return (rv);
2676 
2677 err:
2678 	mpi_dmamem_free(sc, sc->sc_fw);
2679 	return (1);
2680 }
2681 
2682 int
2683 mpi_manufacturing(struct mpi_softc *sc)
2684 {
2685 	char board_name[33];
2686 	struct mpi_cfg_hdr hdr;
2687 	struct mpi_cfg_manufacturing_pg0 *pg;
2688 	size_t pagelen;
2689 	int rv = 1;
2690 
2691 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,
2692 	    0, 0, &hdr) != 0)
2693 		return (1);
2694 
2695 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2696 	if (pagelen < sizeof(*pg))
2697 		return (1);
2698 
2699 	pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2700 	if (pg == NULL)
2701 		return (1);
2702 
2703 	if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0)
2704 		goto out;
2705 
2706 	scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2707 
2708 	printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name,
2709 	    sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2710 
2711 	rv = 0;
2712 
2713 out:
2714 	free(pg, M_TEMP, 0);
2715 	return (rv);
2716 }
2717 
2718 void
2719 mpi_get_raid(struct mpi_softc *sc)
2720 {
2721 	struct mpi_cfg_hdr		hdr;
2722 	struct mpi_cfg_ioc_pg2		*vol_page;
2723 	size_t				pagelen;
2724 	u_int32_t			capabilities;
2725 
2726 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2727 
2728 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2729 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2730 		    "for IOC page 2\n", DEVNAME(sc));
2731 		return;
2732 	}
2733 
2734 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2735 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2736 	if (vol_page == NULL) {
2737 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2738 		    "space for ioc config page 2\n", DEVNAME(sc));
2739 		return;
2740 	}
2741 
2742 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2743 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2744 		    "page 2\n", DEVNAME(sc));
2745 		goto out;
2746 	}
2747 
2748 	capabilities = lemtoh32(&vol_page->capabilities);
2749 
2750 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2751 	    letoh32(vol_page->capabilities));
2752 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2753 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2754 	    vol_page->active_vols, vol_page->max_vols,
2755 	    vol_page->active_physdisks, vol_page->max_physdisks);
2756 
2757 	/* don't walk list if there are no RAID capability */
2758 	if (capabilities == 0xdeadbeef) {
2759 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2760 		goto out;
2761 	}
2762 
2763 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2764 		sc->sc_flags |= MPI_F_RAID;
2765 
2766 out:
2767 	free(vol_page, M_TEMP, 0);
2768 }
2769 
2770 int
2771 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2772     u_int32_t address, int flags, void *p)
2773 {
2774 	struct mpi_ccb				*ccb;
2775 	struct mpi_msg_config_request		*cq;
2776 	struct mpi_msg_config_reply		*cp;
2777 	struct mpi_cfg_hdr			*hdr = p;
2778 	struct mpi_ecfg_hdr			*ehdr = p;
2779 	int					etype = 0;
2780 	int					rv = 0;
2781 
2782 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2783 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2784 	    address, flags, MPI_PG_FMT);
2785 
2786 	ccb = scsi_io_get(&sc->sc_iopool,
2787 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2788 	if (ccb == NULL) {
2789 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2790 		    DEVNAME(sc));
2791 		return (1);
2792 	}
2793 
2794 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2795 		etype = type;
2796 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2797 	}
2798 
2799 	cq = ccb->ccb_cmd;
2800 
2801 	cq->function = MPI_FUNCTION_CONFIG;
2802 
2803 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2804 
2805 	cq->config_header.page_number = number;
2806 	cq->config_header.page_type = type;
2807 	cq->ext_page_type = etype;
2808 	htolem32(&cq->page_address, address);
2809 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2810 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2811 
2812 	ccb->ccb_done = mpi_empty_done;
2813 	if (ISSET(flags, MPI_PG_POLL)) {
2814 		if (mpi_poll(sc, ccb, 50000) != 0) {
2815 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2816 			    DEVNAME(sc));
2817 			return (1);
2818 		}
2819 	} else
2820 		mpi_wait(sc, ccb);
2821 
2822 	if (ccb->ccb_rcb == NULL)
2823 		panic("%s: unable to fetch config header", DEVNAME(sc));
2824 	cp = ccb->ccb_rcb->rcb_reply;
2825 
2826 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2827 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2828 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2829 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2830 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2831 	    cp->msg_flags);
2832 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2833 	    letoh32(cp->msg_context));
2834 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2835 	    letoh16(cp->ioc_status));
2836 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2837 	    letoh32(cp->ioc_loginfo));
2838 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2839 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2840 	    cp->config_header.page_version,
2841 	    cp->config_header.page_length,
2842 	    cp->config_header.page_number,
2843 	    cp->config_header.page_type);
2844 
2845 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2846 		rv = 1;
2847 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2848 		memset(ehdr, 0, sizeof(*ehdr));
2849 		ehdr->page_version = cp->config_header.page_version;
2850 		ehdr->page_number = cp->config_header.page_number;
2851 		ehdr->page_type = cp->config_header.page_type;
2852 		ehdr->ext_page_length = cp->ext_page_length;
2853 		ehdr->ext_page_type = cp->ext_page_type;
2854 	} else
2855 		*hdr = cp->config_header;
2856 
2857 	mpi_push_reply(sc, ccb->ccb_rcb);
2858 	scsi_io_put(&sc->sc_iopool, ccb);
2859 
2860 	return (rv);
2861 }
2862 
2863 int
2864 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2865     void *p, int read, void *page, size_t len)
2866 {
2867 	struct mpi_ccb				*ccb;
2868 	struct mpi_msg_config_request		*cq;
2869 	struct mpi_msg_config_reply		*cp;
2870 	struct mpi_cfg_hdr			*hdr = p;
2871 	struct mpi_ecfg_hdr			*ehdr = p;
2872 	char					*kva;
2873 	int					page_length;
2874 	int					rv = 0;
2875 
2876 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2877 	    DEVNAME(sc), address, read, hdr->page_type);
2878 
2879 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2880 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2881 
2882 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2883 	    len < page_length * 4)
2884 		return (1);
2885 
2886 	ccb = scsi_io_get(&sc->sc_iopool,
2887 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2888 	if (ccb == NULL) {
2889 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2890 		return (1);
2891 	}
2892 
2893 	cq = ccb->ccb_cmd;
2894 
2895 	cq->function = MPI_FUNCTION_CONFIG;
2896 
2897 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2898 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2899 
2900 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2901 		cq->config_header.page_version = ehdr->page_version;
2902 		cq->config_header.page_number = ehdr->page_number;
2903 		cq->config_header.page_type = ehdr->page_type;
2904 		cq->ext_page_len = ehdr->ext_page_length;
2905 		cq->ext_page_type = ehdr->ext_page_type;
2906 	} else
2907 		cq->config_header = *hdr;
2908 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2909 	htolem32(&cq->page_address, address);
2910 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2911 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2912 	    (page_length * 4) |
2913 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2914 
2915 	/* bounce the page via the request space to avoid more bus_dma games */
2916 	mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2917 	    sizeof(struct mpi_msg_config_request));
2918 
2919 	kva = ccb->ccb_cmd;
2920 	kva += sizeof(struct mpi_msg_config_request);
2921 	if (!read)
2922 		memcpy(kva, page, len);
2923 
2924 	ccb->ccb_done = mpi_empty_done;
2925 	if (ISSET(flags, MPI_PG_POLL)) {
2926 		if (mpi_poll(sc, ccb, 50000) != 0) {
2927 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2928 			    DEVNAME(sc));
2929 			return (1);
2930 		}
2931 	} else
2932 		mpi_wait(sc, ccb);
2933 
2934 	if (ccb->ccb_rcb == NULL) {
2935 		scsi_io_put(&sc->sc_iopool, ccb);
2936 		return (1);
2937 	}
2938 	cp = ccb->ccb_rcb->rcb_reply;
2939 
2940 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2941 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2942 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2943 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2944 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2945 	    cp->msg_flags);
2946 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2947 	    letoh32(cp->msg_context));
2948 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2949 	    letoh16(cp->ioc_status));
2950 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2951 	    letoh32(cp->ioc_loginfo));
2952 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2953 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2954 	    cp->config_header.page_version,
2955 	    cp->config_header.page_length,
2956 	    cp->config_header.page_number,
2957 	    cp->config_header.page_type);
2958 
2959 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2960 		rv = 1;
2961 	else if (read)
2962 		memcpy(page, kva, len);
2963 
2964 	mpi_push_reply(sc, ccb->ccb_rcb);
2965 	scsi_io_put(&sc->sc_iopool, ccb);
2966 
2967 	return (rv);
2968 }
2969 
2970 int
2971 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2972 {
2973 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2974 
2975 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2976 
2977 	switch (cmd) {
2978 	case DIOCGCACHE:
2979 	case DIOCSCACHE:
2980 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2981 			return (mpi_ioctl_cache(link, cmd,
2982 			    (struct dk_cache *)addr));
2983 		}
2984 		break;
2985 
2986 	default:
2987 		if (sc->sc_ioctl)
2988 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2989 
2990 		break;
2991 	}
2992 
2993 	return (ENOTTY);
2994 }
2995 
2996 int
2997 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2998 {
2999 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
3000 	struct mpi_ccb		*ccb;
3001 	int			len, rv;
3002 	struct mpi_cfg_hdr	hdr;
3003 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3004 	int			enabled;
3005 	struct mpi_msg_raid_action_request *req;
3006 	struct mpi_msg_raid_action_reply *rep;
3007 	struct mpi_raid_settings settings;
3008 
3009 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3010 	    link->target, MPI_PG_POLL, &hdr);
3011 	if (rv != 0)
3012 		return (EIO);
3013 
3014 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3015 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3016 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
3017 	if (rpg0 == NULL)
3018 		return (ENOMEM);
3019 
3020 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
3021 	    rpg0, len) != 0) {
3022 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3023 		    DEVNAME(sc));
3024 		rv = EIO;
3025 		goto done;
3026 	}
3027 
3028 	enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),
3029 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3030 
3031 	if (cmd == DIOCGCACHE) {
3032 		dc->wrcache = enabled;
3033 		dc->rdcache = 0;
3034 		goto done;
3035 	} /* else DIOCSCACHE */
3036 
3037 	if (dc->rdcache) {
3038 		rv = EOPNOTSUPP;
3039 		goto done;
3040 	}
3041 
3042 	if (((dc->wrcache) ? 1 : 0) == enabled)
3043 		goto done;
3044 
3045 	settings = rpg0->settings;
3046 	if (dc->wrcache) {
3047 		SET(settings.volume_settings,
3048 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3049 	} else {
3050 		CLR(settings.volume_settings,
3051 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3052 	}
3053 
3054 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3055 	if (ccb == NULL) {
3056 		rv = ENOMEM;
3057 		goto done;
3058 	}
3059 
3060 	req = ccb->ccb_cmd;
3061 	req->function = MPI_FUNCTION_RAID_ACTION;
3062 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3063 	req->vol_id = rpg0->volume_id;
3064 	req->vol_bus = rpg0->volume_bus;
3065 
3066 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3067 	ccb->ccb_done = mpi_empty_done;
3068 	if (mpi_poll(sc, ccb, 50000) != 0) {
3069 		rv = EIO;
3070 		goto done;
3071 	}
3072 
3073 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3074 	if (rep == NULL)
3075 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3076 
3077 	switch (lemtoh16(&rep->action_status)) {
3078 	case MPI_RAID_ACTION_STATUS_OK:
3079 		rv = 0;
3080 		break;
3081 	default:
3082 		rv = EIO;
3083 		break;
3084 	}
3085 
3086 	mpi_push_reply(sc, ccb->ccb_rcb);
3087 	scsi_io_put(&sc->sc_iopool, ccb);
3088 
3089 done:
3090 	free(rpg0, M_TEMP, 0);
3091 	return (rv);
3092 }
3093 
3094 #if NBIO > 0
3095 int
3096 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3097 {
3098 	int			len, rv = EINVAL;
3099 	u_int32_t		address;
3100 	struct mpi_cfg_hdr	hdr;
3101 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3102 
3103 	/* get IOC page 2 */
3104 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3105 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3106 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3107 		    "fetch IOC page 2\n", DEVNAME(sc));
3108 		goto done;
3109 	}
3110 
3111 	/* XXX return something else than EINVAL to indicate within hs range */
3112 	if (id > sc->sc_vol_page->active_vols) {
3113 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3114 		    "id: %d\n", DEVNAME(sc), id);
3115 		goto done;
3116 	}
3117 
3118 	/* replace current buffer with new one */
3119 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3120 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3121 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3122 	if (rpg0 == NULL) {
3123 		printf("%s: can't get memory for RAID page 0, "
3124 		    "bio disabled\n", DEVNAME(sc));
3125 		goto done;
3126 	}
3127 	if (sc->sc_rpg0)
3128 		free(sc->sc_rpg0, M_DEVBUF, 0);
3129 	sc->sc_rpg0 = rpg0;
3130 
3131 	/* get raid vol page 0 */
3132 	address = sc->sc_vol_list[id].vol_id |
3133 	    (sc->sc_vol_list[id].vol_bus << 8);
3134 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3135 	    address, 0, &hdr) != 0)
3136 		goto done;
3137 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3138 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3139 		    DEVNAME(sc));
3140 		goto done;
3141 	}
3142 
3143 	rv = 0;
3144 done:
3145 	return (rv);
3146 }
3147 
3148 int
3149 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3150 {
3151 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3152 	int error = 0;
3153 
3154 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3155 
3156 	/* make sure we have bio enabled */
3157 	if (sc->sc_ioctl != mpi_ioctl)
3158 		return (EINVAL);
3159 
3160 	rw_enter_write(&sc->sc_lock);
3161 
3162 	switch (cmd) {
3163 	case BIOCINQ:
3164 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3165 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3166 		break;
3167 
3168 	case BIOCVOL:
3169 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3170 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3171 		break;
3172 
3173 	case BIOCDISK:
3174 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3175 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3176 		break;
3177 
3178 	case BIOCALARM:
3179 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3180 		break;
3181 
3182 	case BIOCBLINK:
3183 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3184 		break;
3185 
3186 	case BIOCSETSTATE:
3187 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3188 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3189 		break;
3190 
3191 	default:
3192 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3193 		error = EINVAL;
3194 	}
3195 
3196 	rw_exit_write(&sc->sc_lock);
3197 
3198 	return (error);
3199 }
3200 
3201 int
3202 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3203 {
3204 	if (!(sc->sc_flags & MPI_F_RAID)) {
3205 		bi->bi_novol = 0;
3206 		bi->bi_nodisk = 0;
3207 	}
3208 
3209 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3210 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3211 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3212 		    "page 2\n", DEVNAME(sc));
3213 		return (EINVAL);
3214 	}
3215 
3216 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3217 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3218 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3219 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3220 
3221 	bi->bi_novol = sc->sc_vol_page->active_vols;
3222 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3223 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3224 
3225 	return (0);
3226 }
3227 
3228 int
3229 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3230 {
3231 	int			i, vol, id, rv = EINVAL;
3232 	struct device		*dev;
3233 	struct scsi_link	*link;
3234 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3235 	char			*vendp;
3236 
3237 	id = bv->bv_volid;
3238 	if (mpi_bio_get_pg0_raid(sc, id))
3239 		goto done;
3240 
3241 	if (id > sc->sc_vol_page->active_vols)
3242 		return (EINVAL); /* XXX deal with hot spares */
3243 
3244 	rpg0 = sc->sc_rpg0;
3245 	if (rpg0 == NULL)
3246 		goto done;
3247 
3248 	/* determine status */
3249 	switch (rpg0->volume_state) {
3250 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3251 		bv->bv_status = BIOC_SVONLINE;
3252 		break;
3253 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3254 		bv->bv_status = BIOC_SVDEGRADED;
3255 		break;
3256 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3257 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3258 		bv->bv_status = BIOC_SVOFFLINE;
3259 		break;
3260 	default:
3261 		bv->bv_status = BIOC_SVINVALID;
3262 	}
3263 
3264 	/* override status if scrubbing or something */
3265 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3266 		bv->bv_status = BIOC_SVREBUILD;
3267 
3268 	bv->bv_size = (u_quad_t)lemtoh32(&rpg0->max_lba) * 512;
3269 
3270 	switch (sc->sc_vol_list[id].vol_type) {
3271 	case MPI_CFG_RAID_TYPE_RAID_IS:
3272 		bv->bv_level = 0;
3273 		break;
3274 	case MPI_CFG_RAID_TYPE_RAID_IME:
3275 	case MPI_CFG_RAID_TYPE_RAID_IM:
3276 		bv->bv_level = 1;
3277 		break;
3278 	case MPI_CFG_RAID_TYPE_RAID_5:
3279 		bv->bv_level = 5;
3280 		break;
3281 	case MPI_CFG_RAID_TYPE_RAID_6:
3282 		bv->bv_level = 6;
3283 		break;
3284 	case MPI_CFG_RAID_TYPE_RAID_10:
3285 		bv->bv_level = 10;
3286 		break;
3287 	case MPI_CFG_RAID_TYPE_RAID_50:
3288 		bv->bv_level = 50;
3289 		break;
3290 	default:
3291 		bv->bv_level = -1;
3292 	}
3293 
3294 	bv->bv_nodisk = rpg0->num_phys_disks;
3295 
3296 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3297 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3298 		if (link == NULL)
3299 			continue;
3300 
3301 		/* skip if not a virtual disk */
3302 		if (!(link->flags & SDEV_VIRTUAL))
3303 			continue;
3304 
3305 		vol++;
3306 		/* are we it? */
3307 		if (vol == bv->bv_volid) {
3308 			dev = link->device_softc;
3309 			vendp = link->inqdata.vendor;
3310 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3311 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3312 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3313 			break;
3314 		}
3315 	}
3316 	rv = 0;
3317 done:
3318 	return (rv);
3319 }
3320 
3321 int
3322 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3323 {
3324 	int			pdid, id, rv = EINVAL;
3325 	u_int32_t		address;
3326 	struct mpi_cfg_hdr	hdr;
3327 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3328 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3329 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3330 
3331 	id = bd->bd_volid;
3332 	if (mpi_bio_get_pg0_raid(sc, id))
3333 		goto done;
3334 
3335 	if (id > sc->sc_vol_page->active_vols)
3336 		return (EINVAL); /* XXX deal with hot spares */
3337 
3338 	rpg0 = sc->sc_rpg0;
3339 	if (rpg0 == NULL)
3340 		goto done;
3341 
3342 	pdid = bd->bd_diskid;
3343 	if (pdid > rpg0->num_phys_disks)
3344 		goto done;
3345 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3346 	physdisk += pdid;
3347 
3348 	/* get raid phys disk page 0 */
3349 	address = physdisk->phys_disk_num;
3350 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3351 	    &hdr) != 0)
3352 		goto done;
3353 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3354 		bd->bd_status = BIOC_SDFAILED;
3355 		return (0);
3356 	}
3357 	bd->bd_channel = pdpg0.phys_disk_bus;
3358 	bd->bd_target = pdpg0.phys_disk_id;
3359 	bd->bd_lun = 0;
3360 	bd->bd_size = (u_quad_t)lemtoh32(&pdpg0.max_lba) * 512;
3361 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3362 
3363 	switch (pdpg0.phys_disk_state) {
3364 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3365 		bd->bd_status = BIOC_SDONLINE;
3366 		break;
3367 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3368 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3369 		bd->bd_status = BIOC_SDFAILED;
3370 		break;
3371 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3372 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3373 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3374 		bd->bd_status = BIOC_SDOFFLINE;
3375 		break;
3376 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3377 		bd->bd_status = BIOC_SDSCRUB;
3378 		break;
3379 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3380 	default:
3381 		bd->bd_status = BIOC_SDINVALID;
3382 		break;
3383 	}
3384 
3385 	/* XXX figure this out */
3386 	/* bd_serial[32]; */
3387 	/* bd_procdev[16]; */
3388 
3389 	rv = 0;
3390 done:
3391 	return (rv);
3392 }
3393 
3394 int
3395 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3396 {
3397 	return (ENOTTY);
3398 }
3399 
3400 #ifndef SMALL_KERNEL
3401 int
3402 mpi_create_sensors(struct mpi_softc *sc)
3403 {
3404 	struct device		*dev;
3405 	struct scsi_link	*link;
3406 	int			i, vol;
3407 
3408 	/* count volumes */
3409 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3410 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3411 		if (link == NULL)
3412 			continue;
3413 		/* skip if not a virtual disk */
3414 		if (!(link->flags & SDEV_VIRTUAL))
3415 			continue;
3416 
3417 		vol++;
3418 	}
3419 	if (vol == 0)
3420 		return (0);
3421 
3422 	sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3423 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3424 	if (sc->sc_sensors == NULL)
3425 		return (1);
3426 
3427 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3428 	    sizeof(sc->sc_sensordev.xname));
3429 
3430 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3431 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3432 		if (link == NULL)
3433 			continue;
3434 		/* skip if not a virtual disk */
3435 		if (!(link->flags & SDEV_VIRTUAL))
3436 			continue;
3437 
3438 		dev = link->device_softc;
3439 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3440 		    sizeof(sc->sc_sensors[vol].desc));
3441 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3442 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3443 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3444 
3445 		vol++;
3446 	}
3447 
3448 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3449 		goto bad;
3450 
3451 	sensordev_install(&sc->sc_sensordev);
3452 
3453 	return (0);
3454 
3455 bad:
3456 	free(sc->sc_sensors, M_DEVBUF, 0);
3457 	return (1);
3458 }
3459 
3460 void
3461 mpi_refresh_sensors(void *arg)
3462 {
3463 	int			i, vol;
3464 	struct scsi_link	*link;
3465 	struct mpi_softc	*sc = arg;
3466 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3467 
3468 	rw_enter_write(&sc->sc_lock);
3469 
3470 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3471 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3472 		if (link == NULL)
3473 			continue;
3474 		/* skip if not a virtual disk */
3475 		if (!(link->flags & SDEV_VIRTUAL))
3476 			continue;
3477 
3478 		if (mpi_bio_get_pg0_raid(sc, vol))
3479 			continue;
3480 
3481 		rpg0 = sc->sc_rpg0;
3482 		if (rpg0 == NULL)
3483 			goto done;
3484 
3485 		/* determine status */
3486 		switch (rpg0->volume_state) {
3487 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3488 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3489 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3490 			break;
3491 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3492 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3493 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3494 			break;
3495 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3496 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3497 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3498 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3499 			break;
3500 		default:
3501 			sc->sc_sensors[vol].value = 0; /* unknown */
3502 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3503 		}
3504 
3505 		/* override status if scrubbing or something */
3506 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3507 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3508 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3509 		}
3510 
3511 		vol++;
3512 	}
3513 done:
3514 	rw_exit_write(&sc->sc_lock);
3515 }
3516 #endif /* SMALL_KERNEL */
3517 #endif /* NBIO > 0 */
3518