xref: /openbsd-src/sys/dev/ic/mpi.c (revision 419292d6be89842dfcc6220e2db86c24e80b6ece)
1 /*	$OpenBSD: mpi.c,v 1.201 2015/05/04 03:59:42 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/mutex.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/task.h>
33 
34 #include <machine/bus.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/biovar.h>
40 #include <dev/ic/mpireg.h>
41 #include <dev/ic/mpivar.h>
42 
43 #ifdef MPI_DEBUG
44 uint32_t	mpi_debug = 0
45 /*		    | MPI_D_CMD */
46 /*		    | MPI_D_INTR */
47 /*		    | MPI_D_MISC */
48 /*		    | MPI_D_DMA */
49 /*		    | MPI_D_IOCTL */
50 /*		    | MPI_D_RW */
51 /*		    | MPI_D_MEM */
52 /*		    | MPI_D_CCB */
53 /*		    | MPI_D_PPR */
54 /*		    | MPI_D_RAID */
55 /*		    | MPI_D_EVT */
56 		;
57 #endif
58 
59 struct cfdriver mpi_cd = {
60 	NULL,
61 	"mpi",
62 	DV_DULL
63 };
64 
65 void			mpi_scsi_cmd(struct scsi_xfer *);
66 void			mpi_scsi_cmd_done(struct mpi_ccb *);
67 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
68 int			mpi_scsi_probe(struct scsi_link *);
69 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
70 			    int);
71 
72 struct scsi_adapter mpi_switch = {
73 	mpi_scsi_cmd,
74 	mpi_minphys,
75 	mpi_scsi_probe,
76 	NULL,
77 	mpi_scsi_ioctl
78 };
79 
80 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
81 void			mpi_dmamem_free(struct mpi_softc *,
82 			    struct mpi_dmamem *);
83 int			mpi_alloc_ccbs(struct mpi_softc *);
84 void			*mpi_get_ccb(void *);
85 void			mpi_put_ccb(void *, void *);
86 int			mpi_alloc_replies(struct mpi_softc *);
87 void			mpi_push_replies(struct mpi_softc *);
88 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
89 
90 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
91 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
92 void			mpi_poll_done(struct mpi_ccb *);
93 void			mpi_reply(struct mpi_softc *, u_int32_t);
94 
95 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
96 void			mpi_wait_done(struct mpi_ccb *);
97 
98 int			mpi_cfg_spi_port(struct mpi_softc *);
99 void			mpi_squash_ppr(struct mpi_softc *);
100 void			mpi_run_ppr(struct mpi_softc *);
101 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
102 			    struct mpi_cfg_raid_physdisk *, int, int, int);
103 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
104 
105 int			mpi_cfg_sas(struct mpi_softc *);
106 int			mpi_cfg_fc(struct mpi_softc *);
107 
108 void			mpi_timeout_xs(void *);
109 int			mpi_load_xs(struct mpi_ccb *);
110 
111 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
112 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
113 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
114 			    u_int32_t);
115 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
116 			    u_int32_t);
117 
118 int			mpi_init(struct mpi_softc *);
119 int			mpi_reset_soft(struct mpi_softc *);
120 int			mpi_reset_hard(struct mpi_softc *);
121 
122 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
123 int			mpi_handshake_recv_dword(struct mpi_softc *,
124 			    u_int32_t *);
125 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
126 
127 void			mpi_empty_done(struct mpi_ccb *);
128 
129 int			mpi_iocinit(struct mpi_softc *);
130 int			mpi_iocfacts(struct mpi_softc *);
131 int			mpi_portfacts(struct mpi_softc *);
132 int			mpi_portenable(struct mpi_softc *);
133 int			mpi_cfg_coalescing(struct mpi_softc *);
134 void			mpi_get_raid(struct mpi_softc *);
135 int			mpi_fwupload(struct mpi_softc *);
136 int			mpi_manufacturing(struct mpi_softc *);
137 int			mpi_scsi_probe_virtual(struct scsi_link *);
138 
139 int			mpi_eventnotify(struct mpi_softc *);
140 void			mpi_eventnotify_done(struct mpi_ccb *);
141 void			mpi_eventnotify_free(struct mpi_softc *,
142 			    struct mpi_rcb *);
143 void			mpi_eventack(void *, void *);
144 void			mpi_eventack_done(struct mpi_ccb *);
145 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
146 void			mpi_evt_sas_detach(void *, void *);
147 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
148 void			mpi_fc_rescan(void *);
149 
150 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
151 			    u_int8_t, u_int32_t, int, void *);
152 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
153 			    void *, int, void *, size_t);
154 
155 int			mpi_ioctl_cache(struct scsi_link *, u_long,
156 			    struct dk_cache *);
157 
158 #if NBIO > 0
159 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
160 int		mpi_ioctl(struct device *, u_long, caddr_t);
161 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
162 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
163 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
164 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
165 #ifndef SMALL_KERNEL
166 int		mpi_create_sensors(struct mpi_softc *);
167 void		mpi_refresh_sensors(void *);
168 #endif /* SMALL_KERNEL */
169 #endif /* NBIO > 0 */
170 
171 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
172 
173 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
174 
175 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
176 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
177 #define mpi_read_intr(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
178 				    MPI_INTR_STATUS)
179 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
180 #define mpi_pop_reply(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
181 				    MPI_REPLY_QUEUE)
182 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
183 				    MPI_REPLY_QUEUE, (v))
184 
185 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
186 				    MPI_INTR_STATUS_DOORBELL, 0)
187 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
188 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
189 
190 #define MPI_PG_EXTENDED		(1<<0)
191 #define MPI_PG_POLL		(1<<1)
192 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
193 
194 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
195 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
196 	    MPI_PG_POLL, (_h))
197 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
198 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
199 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
200 
201 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
202 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
203 	    (_h), (_r), (_p), (_l))
204 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
205 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
206 	    (_h), (_r), (_p), (_l))
207 
208 static inline void
209 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
210 {
211 	htolem32(&sge->sg_addr_lo, dva);
212 	htolem32(&sge->sg_addr_hi, dva >> 32);
213 }
214 
215 int
216 mpi_attach(struct mpi_softc *sc)
217 {
218 	struct scsibus_attach_args	saa;
219 	struct mpi_ccb			*ccb;
220 
221 	printf("\n");
222 
223 	rw_init(&sc->sc_lock, "mpi_lock");
224 	task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
225 
226 	/* disable interrupts */
227 	mpi_write(sc, MPI_INTR_MASK,
228 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
229 
230 	if (mpi_init(sc) != 0) {
231 		printf("%s: unable to initialise\n", DEVNAME(sc));
232 		return (1);
233 	}
234 
235 	if (mpi_iocfacts(sc) != 0) {
236 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
237 		return (1);
238 	}
239 
240 	if (mpi_alloc_ccbs(sc) != 0) {
241 		/* error already printed */
242 		return (1);
243 	}
244 
245 	if (mpi_alloc_replies(sc) != 0) {
246 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
247 		goto free_ccbs;
248 	}
249 
250 	if (mpi_iocinit(sc) != 0) {
251 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
252 		goto free_ccbs;
253 	}
254 
255 	/* spin until we're operational */
256 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
257 	    MPI_DOORBELL_STATE_OPER) != 0) {
258 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
259 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
260 		printf("%s: operational state timeout\n", DEVNAME(sc));
261 		goto free_ccbs;
262 	}
263 
264 	mpi_push_replies(sc);
265 
266 	if (mpi_portfacts(sc) != 0) {
267 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
268 		goto free_replies;
269 	}
270 
271 	if (mpi_cfg_coalescing(sc) != 0) {
272 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
273 		goto free_replies;
274 	}
275 
276 	switch (sc->sc_porttype) {
277 	case MPI_PORTFACTS_PORTTYPE_SAS:
278 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
279 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
280 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
281 		    mpi_evt_sas_detach, sc);
282 		/* FALLTHROUGH */
283 	case MPI_PORTFACTS_PORTTYPE_FC:
284 		if (mpi_eventnotify(sc) != 0) {
285 			printf("%s: unable to enable events\n", DEVNAME(sc));
286 			goto free_replies;
287 		}
288 		break;
289 	}
290 
291 	if (mpi_portenable(sc) != 0) {
292 		printf("%s: unable to enable port\n", DEVNAME(sc));
293 		goto free_replies;
294 	}
295 
296 	if (mpi_fwupload(sc) != 0) {
297 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
298 		goto free_replies;
299 	}
300 
301 	if (mpi_manufacturing(sc) != 0) {
302 		printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc));		goto free_replies;
303 	}
304 
305 	switch (sc->sc_porttype) {
306 	case MPI_PORTFACTS_PORTTYPE_SCSI:
307 		if (mpi_cfg_spi_port(sc) != 0) {
308 			printf("%s: unable to configure spi\n", DEVNAME(sc));
309 			goto free_replies;
310 		}
311 		mpi_squash_ppr(sc);
312 		break;
313 	case MPI_PORTFACTS_PORTTYPE_SAS:
314 		if (mpi_cfg_sas(sc) != 0) {
315 			printf("%s: unable to configure sas\n", DEVNAME(sc));
316 			goto free_replies;
317 		}
318 		break;
319 	case MPI_PORTFACTS_PORTTYPE_FC:
320 		if (mpi_cfg_fc(sc) != 0) {
321 			printf("%s: unable to configure fc\n", DEVNAME(sc));
322 			goto free_replies;
323 		}
324 		break;
325 	}
326 
327 	/* get raid pages */
328 	mpi_get_raid(sc);
329 #if NBIO > 0
330 	if (sc->sc_flags & MPI_F_RAID) {
331 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
332 			panic("%s: controller registration failed",
333 			    DEVNAME(sc));
334 		else {
335 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
336 			    2, 0, &sc->sc_cfg_hdr) != 0) {
337 				panic("%s: can't get IOC page 2 hdr",
338 				    DEVNAME(sc));
339 			}
340 
341 			sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
342 			    4, M_TEMP, M_WAITOK | M_CANFAIL);
343 			if (sc->sc_vol_page == NULL) {
344 				panic("%s: can't get memory for IOC page 2, "
345 				    "bio disabled", DEVNAME(sc));
346 			}
347 
348 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
349 			    sc->sc_vol_page,
350 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
351 				panic("%s: can't get IOC page 2", DEVNAME(sc));
352 			}
353 
354 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
355 			    (sc->sc_vol_page + 1);
356 
357 			sc->sc_ioctl = mpi_ioctl;
358 		}
359 	}
360 #endif /* NBIO > 0 */
361 
362 	/* we should be good to go now, attach scsibus */
363 	sc->sc_link.adapter = &mpi_switch;
364 	sc->sc_link.adapter_softc = sc;
365 	sc->sc_link.adapter_target = sc->sc_target;
366 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
367 	sc->sc_link.openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16);
368 	sc->sc_link.pool = &sc->sc_iopool;
369 
370 	memset(&saa, 0, sizeof(saa));
371 	saa.saa_sc_link = &sc->sc_link;
372 
373 	/* config_found() returns the scsibus attached to us */
374 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
375 	    &saa, scsiprint);
376 
377 	/* do domain validation */
378 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
379 		mpi_run_ppr(sc);
380 
381 	/* enable interrupts */
382 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
383 
384 #if NBIO > 0
385 #ifndef SMALL_KERNEL
386 	mpi_create_sensors(sc);
387 #endif /* SMALL_KERNEL */
388 #endif /* NBIO > 0 */
389 
390 	return (0);
391 
392 free_replies:
393 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
394 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
395 	mpi_dmamem_free(sc, sc->sc_replies);
396 free_ccbs:
397 	while ((ccb = mpi_get_ccb(sc)) != NULL)
398 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
399 	mpi_dmamem_free(sc, sc->sc_requests);
400 	free(sc->sc_ccbs, M_DEVBUF, 0);
401 
402 	return(1);
403 }
404 
405 int
406 mpi_cfg_spi_port(struct mpi_softc *sc)
407 {
408 	struct mpi_cfg_hdr		hdr;
409 	struct mpi_cfg_spi_port_pg1	port;
410 
411 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
412 	    &hdr) != 0)
413 		return (1);
414 
415 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
416 		return (1);
417 
418 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
419 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
420 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
421 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
422 	    letoh32(port.port_scsi_id));
423 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
424 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
425 
426 	if (port.port_scsi_id == sc->sc_target &&
427 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
428 	    port.on_bus_timer_value != htole32(0x0))
429 		return (0);
430 
431 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
432 	    sc->sc_target);
433 	port.port_scsi_id = sc->sc_target;
434 	port.port_resp_ids = htole16(1 << sc->sc_target);
435 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
436 
437 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
438 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
439 		return (1);
440 	}
441 
442 	return (0);
443 }
444 
445 void
446 mpi_squash_ppr(struct mpi_softc *sc)
447 {
448 	struct mpi_cfg_hdr		hdr;
449 	struct mpi_cfg_spi_dev_pg1	page;
450 	int				i;
451 
452 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
453 
454 	for (i = 0; i < sc->sc_buswidth; i++) {
455 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
456 		    1, i, &hdr) != 0)
457 			return;
458 
459 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
460 			return;
461 
462 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
463 		    "req_offset: 0x%02x req_period: 0x%02x "
464 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
465 		    page.req_params1, page.req_offset, page.req_period,
466 		    page.req_params2, letoh32(page.configuration));
467 
468 		page.req_params1 = 0x0;
469 		page.req_offset = 0x0;
470 		page.req_period = 0x0;
471 		page.req_params2 = 0x0;
472 		page.configuration = htole32(0x0);
473 
474 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
475 			return;
476 	}
477 }
478 
479 void
480 mpi_run_ppr(struct mpi_softc *sc)
481 {
482 	struct mpi_cfg_hdr		hdr;
483 	struct mpi_cfg_spi_port_pg0	port_pg;
484 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
485 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
486 	size_t				pagelen;
487 	struct scsi_link		*link;
488 	int				i, tries;
489 
490 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
491 	    &hdr) != 0) {
492 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
493 		    DEVNAME(sc));
494 		return;
495 	}
496 
497 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
498 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
499 		    DEVNAME(sc));
500 		return;
501 	}
502 
503 	for (i = 0; i < sc->sc_buswidth; i++) {
504 		link = scsi_get_link(sc->sc_scsibus, i, 0);
505 		if (link == NULL)
506 			continue;
507 
508 		/* do not ppr volumes */
509 		if (link->flags & SDEV_VIRTUAL)
510 			continue;
511 
512 		tries = 0;
513 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
514 		    port_pg.max_offset, tries) == EAGAIN)
515 			tries++;
516 	}
517 
518 	if ((sc->sc_flags & MPI_F_RAID) == 0)
519 		return;
520 
521 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
522 	    &hdr) != 0) {
523 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
524 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
525 		return;
526 	}
527 
528 	pagelen = hdr.page_length * 4; /* dwords to bytes */
529 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
530 	if (physdisk_pg == NULL) {
531 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
532 		    "allocate ioc pg 3\n", DEVNAME(sc));
533 		return;
534 	}
535 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
536 
537 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
538 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
539 		    "fetch ioc page 3\n", DEVNAME(sc));
540 		goto out;
541 	}
542 
543 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
544 	    physdisk_pg->no_phys_disks);
545 
546 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
547 		physdisk = &physdisk_list[i];
548 
549 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
550 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
551 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
552 		    physdisk->phys_disk_num);
553 
554 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
555 			continue;
556 
557 		tries = 0;
558 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
559 		    port_pg.max_offset, tries) == EAGAIN)
560 			tries++;
561 	}
562 
563 out:
564 	free(physdisk_pg, M_TEMP, 0);
565 }
566 
567 int
568 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
569     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
570 {
571 	struct mpi_cfg_hdr		hdr0, hdr1;
572 	struct mpi_cfg_spi_dev_pg0	pg0;
573 	struct mpi_cfg_spi_dev_pg1	pg1;
574 	u_int32_t			address;
575 	int				id;
576 	int				raid = 0;
577 
578 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
579 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
580 	    link->quirks);
581 
582 	if (try >= 3)
583 		return (EIO);
584 
585 	if (physdisk == NULL) {
586 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
587 			return (EIO);
588 
589 		address = link->target;
590 		id = link->target;
591 	} else {
592 		raid = 1;
593 		address = (physdisk->phys_disk_bus << 8) |
594 		    (physdisk->phys_disk_id);
595 		id = physdisk->phys_disk_num;
596 	}
597 
598 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
599 	    address, &hdr0) != 0) {
600 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
601 		    DEVNAME(sc));
602 		return (EIO);
603 	}
604 
605 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
606 	    address, &hdr1) != 0) {
607 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
608 		    DEVNAME(sc));
609 		return (EIO);
610 	}
611 
612 #ifdef MPI_DEBUG
613 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
614 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
615 		    DEVNAME(sc));
616 		return (EIO);
617 	}
618 
619 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
620 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
621 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
622 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
623 #endif
624 
625 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
626 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
627 		    DEVNAME(sc));
628 		return (EIO);
629 	}
630 
631 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
632 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
633 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
634 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
635 
636 	pg1.req_params1 = 0;
637 	pg1.req_offset = offset;
638 	pg1.req_period = period;
639 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
640 
641 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
642 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
643 
644 		switch (try) {
645 		case 0: /* U320 */
646 			break;
647 		case 1: /* U160 */
648 			pg1.req_period = 0x09;
649 			break;
650 		case 2: /* U80 */
651 			pg1.req_period = 0x0a;
652 			break;
653 		}
654 
655 		if (pg1.req_period < 0x09) {
656 			/* Ultra320: enable QAS & PACKETIZED */
657 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
658 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
659 		}
660 		if (pg1.req_period < 0xa) {
661 			/* >= Ultra160: enable dual xfers */
662 			pg1.req_params1 |=
663 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
664 		}
665 	}
666 
667 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
668 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
669 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
670 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
671 
672 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
673 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
674 		    DEVNAME(sc));
675 		return (EIO);
676 	}
677 
678 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
679 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
680 		    DEVNAME(sc));
681 		return (EIO);
682 	}
683 
684 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
685 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
686 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
687 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
688 
689 	if (mpi_inq(sc, id, raid) != 0) {
690 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
691 		    "target %d\n", DEVNAME(sc), link->target);
692 		return (EIO);
693 	}
694 
695 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
696 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
697 		    "inquiry\n", DEVNAME(sc));
698 		return (EIO);
699 	}
700 
701 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
702 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
703 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
704 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
705 
706 	if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) {
707 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
708 		    DEVNAME(sc));
709 		return (EAGAIN);
710 	}
711 
712 	if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
713 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
714 		    DEVNAME(sc));
715 		return (EAGAIN);
716 	}
717 
718 	if (lemtoh32(&pg0.information) & 0x0e) {
719 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
720 		    DEVNAME(sc), lemtoh32(&pg0.information));
721 		return (EAGAIN);
722 	}
723 
724 	switch(pg0.neg_period) {
725 	case 0x08:
726 		period = 160;
727 		break;
728 	case 0x09:
729 		period = 80;
730 		break;
731 	case 0x0a:
732 		period = 40;
733 		break;
734 	case 0x0b:
735 		period = 20;
736 		break;
737 	case 0x0c:
738 		period = 10;
739 		break;
740 	default:
741 		period = 0;
742 		break;
743 	}
744 
745 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
746 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
747 	    id, period ? "Sync" : "Async", period,
748 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
749 	    pg0.neg_offset,
750 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
751 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
752 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
753 
754 	return (0);
755 }
756 
757 int
758 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
759 {
760 	struct mpi_ccb			*ccb;
761 	struct scsi_inquiry		inq;
762 	struct {
763 		struct mpi_msg_scsi_io		io;
764 		struct mpi_sge			sge;
765 		struct scsi_inquiry_data	inqbuf;
766 		struct scsi_sense_data		sense;
767 	} __packed			*bundle;
768 	struct mpi_msg_scsi_io		*io;
769 	struct mpi_sge			*sge;
770 
771 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
772 
773 	memset(&inq, 0, sizeof(inq));
774 	inq.opcode = INQUIRY;
775 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
776 
777 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
778 	if (ccb == NULL)
779 		return (1);
780 
781 	ccb->ccb_done = mpi_empty_done;
782 
783 	bundle = ccb->ccb_cmd;
784 	io = &bundle->io;
785 	sge = &bundle->sge;
786 
787 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
788 	    MPI_FUNCTION_SCSI_IO_REQUEST;
789 	/*
790 	 * bus is always 0
791 	 * io->bus = htole16(sc->sc_bus);
792 	 */
793 	io->target_id = target;
794 
795 	io->cdb_length = sizeof(inq);
796 	io->sense_buf_len = sizeof(struct scsi_sense_data);
797 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
798 
799 	/*
800 	 * always lun 0
801 	 * io->lun[0] = htobe16(link->lun);
802 	 */
803 
804 	io->direction = MPI_SCSIIO_DIR_READ;
805 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
806 
807 	memcpy(io->cdb, &inq, sizeof(inq));
808 
809 	htolem32(&io->data_length, sizeof(struct scsi_inquiry_data));
810 
811 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
812 	    ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
813 
814 	htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
815 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
816 	    (u_int32_t)sizeof(inq));
817 
818 	mpi_dvatosge(sge, ccb->ccb_cmd_dva +
819 	    ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle));
820 
821 	if (mpi_poll(sc, ccb, 5000) != 0)
822 		return (1);
823 
824 	if (ccb->ccb_rcb != NULL)
825 		mpi_push_reply(sc, ccb->ccb_rcb);
826 
827 	scsi_io_put(&sc->sc_iopool, ccb);
828 
829 	return (0);
830 }
831 
832 int
833 mpi_cfg_sas(struct mpi_softc *sc)
834 {
835 	struct mpi_ecfg_hdr		ehdr;
836 	struct mpi_cfg_sas_iou_pg1	*pg;
837 	size_t				pagelen;
838 	int				rv = 0;
839 
840 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
841 	    &ehdr) != 0)
842 		return (0);
843 
844 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
845 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
846 	if (pg == NULL)
847 		return (ENOMEM);
848 
849 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
850 		goto out;
851 
852 	if (pg->max_sata_q_depth != 32) {
853 		pg->max_sata_q_depth = 32;
854 
855 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
856 			goto out;
857 	}
858 
859 out:
860 	free(pg, M_TEMP, 0);
861 	return (rv);
862 }
863 
864 int
865 mpi_cfg_fc(struct mpi_softc *sc)
866 {
867 	struct mpi_cfg_hdr		hdr;
868 	struct mpi_cfg_fc_port_pg0	pg0;
869 	struct mpi_cfg_fc_port_pg1	pg1;
870 
871 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
872 	    &hdr) != 0) {
873 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
874 		return (1);
875 	}
876 
877 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
878 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
879 		return (1);
880 	}
881 
882 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
883 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
884 
885 	/* configure port config more to our liking */
886 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
887 	    &hdr) != 0) {
888 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
889 		return (1);
890 	}
891 
892 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
893 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
894 		return (1);
895 	}
896 
897 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
898 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
899 
900 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
901 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
902 		return (1);
903 	}
904 
905 	return (0);
906 }
907 
908 void
909 mpi_detach(struct mpi_softc *sc)
910 {
911 
912 }
913 
914 int
915 mpi_intr(void *arg)
916 {
917 	struct mpi_softc		*sc = arg;
918 	u_int32_t			reg;
919 	int				rv = 0;
920 
921 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
922 		return (rv);
923 
924 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
925 		mpi_reply(sc, reg);
926 		rv = 1;
927 	}
928 
929 	return (rv);
930 }
931 
932 void
933 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
934 {
935 	struct mpi_ccb			*ccb;
936 	struct mpi_rcb			*rcb = NULL;
937 	struct mpi_msg_reply		*reply = NULL;
938 	u_int32_t			reply_dva;
939 	int				id;
940 	int				i;
941 
942 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
943 
944 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
945 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
946 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
947 		    MPI_REPLY_SIZE;
948 		rcb = &sc->sc_rcbs[i];
949 
950 		bus_dmamap_sync(sc->sc_dmat,
951 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
952 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
953 
954 		reply = rcb->rcb_reply;
955 
956 		id = lemtoh32(&reply->msg_context);
957 	} else {
958 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
959 		case MPI_REPLY_QUEUE_TYPE_INIT:
960 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
961 			break;
962 
963 		default:
964 			panic("%s: unsupported context reply",
965 			    DEVNAME(sc));
966 		}
967 	}
968 
969 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
970 	    DEVNAME(sc), id, reply);
971 
972 	ccb = &sc->sc_ccbs[id];
973 
974 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
975 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
976 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
977 	ccb->ccb_state = MPI_CCB_READY;
978 	ccb->ccb_rcb = rcb;
979 
980 	ccb->ccb_done(ccb);
981 }
982 
983 struct mpi_dmamem *
984 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
985 {
986 	struct mpi_dmamem		*mdm;
987 	int				nsegs;
988 
989 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
990 	if (mdm == NULL)
991 		return (NULL);
992 
993 	mdm->mdm_size = size;
994 
995 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
996 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
997 		goto mdmfree;
998 
999 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
1000 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1001 		goto destroy;
1002 
1003 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1004 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1005 		goto free;
1006 
1007 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1008 	    NULL, BUS_DMA_NOWAIT) != 0)
1009 		goto unmap;
1010 
1011 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1012 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1013 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1014 
1015 	return (mdm);
1016 
1017 unmap:
1018 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1019 free:
1020 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1021 destroy:
1022 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1023 mdmfree:
1024 	free(mdm, M_DEVBUF, 0);
1025 
1026 	return (NULL);
1027 }
1028 
1029 void
1030 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1031 {
1032 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1033 
1034 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1035 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1036 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1037 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1038 	free(mdm, M_DEVBUF, 0);
1039 }
1040 
1041 int
1042 mpi_alloc_ccbs(struct mpi_softc *sc)
1043 {
1044 	struct mpi_ccb			*ccb;
1045 	u_int8_t			*cmd;
1046 	int				i;
1047 
1048 	SLIST_INIT(&sc->sc_ccb_free);
1049 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1050 
1051 	sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1052 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1053 	if (sc->sc_ccbs == NULL) {
1054 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1055 		return (1);
1056 	}
1057 
1058 	sc->sc_requests = mpi_dmamem_alloc(sc,
1059 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1060 	if (sc->sc_requests == NULL) {
1061 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1062 		goto free_ccbs;
1063 	}
1064 	cmd = MPI_DMA_KVA(sc->sc_requests);
1065 	memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1066 
1067 	for (i = 0; i < sc->sc_maxcmds; i++) {
1068 		ccb = &sc->sc_ccbs[i];
1069 
1070 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1071 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1072 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1073 		    &ccb->ccb_dmamap) != 0) {
1074 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1075 			goto free_maps;
1076 		}
1077 
1078 		ccb->ccb_sc = sc;
1079 		ccb->ccb_id = i;
1080 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1081 		ccb->ccb_state = MPI_CCB_READY;
1082 
1083 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1084 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1085 		    ccb->ccb_offset;
1086 
1087 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1088 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1089 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1090 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1091 		    ccb->ccb_cmd_dva);
1092 
1093 		mpi_put_ccb(sc, ccb);
1094 	}
1095 
1096 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1097 
1098 	return (0);
1099 
1100 free_maps:
1101 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1102 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1103 
1104 	mpi_dmamem_free(sc, sc->sc_requests);
1105 free_ccbs:
1106 	free(sc->sc_ccbs, M_DEVBUF, 0);
1107 
1108 	return (1);
1109 }
1110 
1111 void *
1112 mpi_get_ccb(void *xsc)
1113 {
1114 	struct mpi_softc		*sc = xsc;
1115 	struct mpi_ccb			*ccb;
1116 
1117 	mtx_enter(&sc->sc_ccb_mtx);
1118 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1119 	if (ccb != NULL) {
1120 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1121 		ccb->ccb_state = MPI_CCB_READY;
1122 	}
1123 	mtx_leave(&sc->sc_ccb_mtx);
1124 
1125 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1126 
1127 	return (ccb);
1128 }
1129 
1130 void
1131 mpi_put_ccb(void *xsc, void *io)
1132 {
1133 	struct mpi_softc		*sc = xsc;
1134 	struct mpi_ccb			*ccb = io;
1135 
1136 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1137 
1138 #ifdef DIAGNOSTIC
1139 	if (ccb->ccb_state == MPI_CCB_FREE)
1140 		panic("mpi_put_ccb: double free");
1141 #endif
1142 
1143 	ccb->ccb_state = MPI_CCB_FREE;
1144 	ccb->ccb_cookie = NULL;
1145 	ccb->ccb_done = NULL;
1146 	memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1147 	mtx_enter(&sc->sc_ccb_mtx);
1148 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1149 	mtx_leave(&sc->sc_ccb_mtx);
1150 }
1151 
1152 int
1153 mpi_alloc_replies(struct mpi_softc *sc)
1154 {
1155 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1156 
1157 	sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF,
1158 	    M_WAITOK|M_CANFAIL);
1159 	if (sc->sc_rcbs == NULL)
1160 		return (1);
1161 
1162 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1163 	if (sc->sc_replies == NULL) {
1164 		free(sc->sc_rcbs, M_DEVBUF, 0);
1165 		return (1);
1166 	}
1167 
1168 	return (0);
1169 }
1170 
1171 void
1172 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1173 {
1174 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1175 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1176 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1177 }
1178 
1179 void
1180 mpi_push_replies(struct mpi_softc *sc)
1181 {
1182 	struct mpi_rcb			*rcb;
1183 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1184 	int				i;
1185 
1186 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1187 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1188 
1189 	for (i = 0; i < sc->sc_repq; i++) {
1190 		rcb = &sc->sc_rcbs[i];
1191 
1192 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1193 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1194 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1195 		    MPI_REPLY_SIZE * i;
1196 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1197 	}
1198 }
1199 
1200 void
1201 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1202 {
1203 	struct mpi_msg_request *msg;
1204 
1205 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1206 	    ccb->ccb_cmd_dva);
1207 
1208 	msg = ccb->ccb_cmd;
1209 	htolem32(&msg->msg_context, ccb->ccb_id);
1210 
1211 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1212 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1213 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214 
1215 	ccb->ccb_state = MPI_CCB_QUEUED;
1216 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1217 	    MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1218 }
1219 
1220 int
1221 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1222 {
1223 	void				(*done)(struct mpi_ccb *);
1224 	void				*cookie;
1225 	int				rv = 1;
1226 	u_int32_t			reg;
1227 
1228 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1229 	    timeout);
1230 
1231 	done = ccb->ccb_done;
1232 	cookie = ccb->ccb_cookie;
1233 
1234 	ccb->ccb_done = mpi_poll_done;
1235 	ccb->ccb_cookie = &rv;
1236 
1237 	mpi_start(sc, ccb);
1238 	while (rv == 1) {
1239 		reg = mpi_pop_reply(sc);
1240 		if (reg == 0xffffffff) {
1241 			if (timeout-- == 0) {
1242 				printf("%s: timeout\n", DEVNAME(sc));
1243 				goto timeout;
1244 			}
1245 
1246 			delay(1000);
1247 			continue;
1248 		}
1249 
1250 		mpi_reply(sc, reg);
1251 	}
1252 
1253 	ccb->ccb_cookie = cookie;
1254 	done(ccb);
1255 
1256 timeout:
1257 	return (rv);
1258 }
1259 
1260 void
1261 mpi_poll_done(struct mpi_ccb *ccb)
1262 {
1263 	int				*rv = ccb->ccb_cookie;
1264 
1265 	*rv = 0;
1266 }
1267 
1268 void
1269 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1270 {
1271 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1272 	void				(*done)(struct mpi_ccb *);
1273 
1274 	done = ccb->ccb_done;
1275 	ccb->ccb_done = mpi_wait_done;
1276 	ccb->ccb_cookie = &cookie;
1277 
1278 	/* XXX this will wait forever for the ccb to complete */
1279 
1280 	mpi_start(sc, ccb);
1281 
1282 	mtx_enter(&cookie);
1283 	while (ccb->ccb_cookie != NULL)
1284 		msleep(ccb, &cookie, PRIBIO, "mpiwait", 0);
1285 	mtx_leave(&cookie);
1286 
1287 	done(ccb);
1288 }
1289 
1290 void
1291 mpi_wait_done(struct mpi_ccb *ccb)
1292 {
1293 	struct mutex			*cookie = ccb->ccb_cookie;
1294 
1295 	mtx_enter(cookie);
1296 	ccb->ccb_cookie = NULL;
1297 	wakeup_one(ccb);
1298 	mtx_leave(cookie);
1299 }
1300 
1301 void
1302 mpi_scsi_cmd(struct scsi_xfer *xs)
1303 {
1304 	struct scsi_link		*link = xs->sc_link;
1305 	struct mpi_softc		*sc = link->adapter_softc;
1306 	struct mpi_ccb			*ccb;
1307 	struct mpi_ccb_bundle		*mcb;
1308 	struct mpi_msg_scsi_io		*io;
1309 
1310 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1311 
1312 	KERNEL_UNLOCK();
1313 
1314 	if (xs->cmdlen > MPI_CDB_LEN) {
1315 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1316 		    DEVNAME(sc), xs->cmdlen);
1317 		memset(&xs->sense, 0, sizeof(xs->sense));
1318 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1319 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1320 		xs->sense.add_sense_code = 0x20;
1321 		xs->error = XS_SENSE;
1322 		goto done;
1323 	}
1324 
1325 	ccb = xs->io;
1326 
1327 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1328 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1329 
1330 	ccb->ccb_cookie = xs;
1331 	ccb->ccb_done = mpi_scsi_cmd_done;
1332 
1333 	mcb = ccb->ccb_cmd;
1334 	io = &mcb->mcb_io;
1335 
1336 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1337 	/*
1338 	 * bus is always 0
1339 	 * io->bus = htole16(sc->sc_bus);
1340 	 */
1341 	io->target_id = link->target;
1342 
1343 	io->cdb_length = xs->cmdlen;
1344 	io->sense_buf_len = sizeof(xs->sense);
1345 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1346 
1347 	htobem16(&io->lun[0], link->lun);
1348 
1349 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1350 	case SCSI_DATA_IN:
1351 		io->direction = MPI_SCSIIO_DIR_READ;
1352 		break;
1353 	case SCSI_DATA_OUT:
1354 		io->direction = MPI_SCSIIO_DIR_WRITE;
1355 		break;
1356 	default:
1357 		io->direction = MPI_SCSIIO_DIR_NONE;
1358 		break;
1359 	}
1360 
1361 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1362 	    (link->quirks & SDEV_NOTAGS))
1363 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1364 	else
1365 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1366 
1367 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1368 
1369 	htolem32(&io->data_length, xs->datalen);
1370 
1371 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1372 	    ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1373 
1374 	if (mpi_load_xs(ccb) != 0)
1375 		goto stuffup;
1376 
1377 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1378 
1379 	if (xs->flags & SCSI_POLL) {
1380 		if (mpi_poll(sc, ccb, xs->timeout) != 0)
1381 			goto stuffup;
1382 	} else
1383 		mpi_start(sc, ccb);
1384 
1385 	KERNEL_LOCK();
1386 	return;
1387 
1388 stuffup:
1389 	xs->error = XS_DRIVER_STUFFUP;
1390 done:
1391 	KERNEL_LOCK();
1392 	scsi_done(xs);
1393 }
1394 
1395 void
1396 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1397 {
1398 	struct mpi_softc		*sc = ccb->ccb_sc;
1399 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1400 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1401 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1402 	struct mpi_msg_scsi_io_error	*sie;
1403 
1404 	if (xs->datalen != 0) {
1405 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1406 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1407 		    BUS_DMASYNC_POSTWRITE);
1408 
1409 		bus_dmamap_unload(sc->sc_dmat, dmap);
1410 	}
1411 
1412 	/* timeout_del */
1413 	xs->error = XS_NOERROR;
1414 	xs->resid = 0;
1415 
1416 	if (ccb->ccb_rcb == NULL) {
1417 		/* no scsi error, we're ok so drop out early */
1418 		xs->status = SCSI_OK;
1419 		KERNEL_LOCK();
1420 		scsi_done(xs);
1421 		KERNEL_UNLOCK();
1422 		return;
1423 	}
1424 
1425 	sie = ccb->ccb_rcb->rcb_reply;
1426 
1427 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1428 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1429 	    xs->flags);
1430 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1431 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1432 	    sie->msg_length, sie->function);
1433 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1434 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1435 	    sie->sense_buf_len, sie->msg_flags);
1436 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1437 	    letoh32(sie->msg_context));
1438 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1439 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1440 	    sie->scsi_state, letoh16(sie->ioc_status));
1441 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1442 	    letoh32(sie->ioc_loginfo));
1443 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1444 	    letoh32(sie->transfer_count));
1445 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1446 	    letoh32(sie->sense_count));
1447 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1448 	    letoh32(sie->response_info));
1449 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1450 	    letoh16(sie->tag));
1451 
1452 	xs->status = sie->scsi_status;
1453 	switch (lemtoh16(&sie->ioc_status)) {
1454 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1455 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1456 		if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1457 			xs->error = XS_DRIVER_STUFFUP;
1458 			break;
1459 		}
1460 		/* FALLTHROUGH */
1461 	case MPI_IOCSTATUS_SUCCESS:
1462 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1463 		switch (xs->status) {
1464 		case SCSI_OK:
1465 			xs->resid = 0;
1466 			break;
1467 
1468 		case SCSI_CHECK:
1469 			xs->error = XS_SENSE;
1470 			break;
1471 
1472 		case SCSI_BUSY:
1473 		case SCSI_QUEUE_FULL:
1474 			xs->error = XS_BUSY;
1475 			break;
1476 
1477 		default:
1478 			xs->error = XS_DRIVER_STUFFUP;
1479 			break;
1480 		}
1481 		break;
1482 
1483 	case MPI_IOCSTATUS_BUSY:
1484 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1485 		xs->error = XS_BUSY;
1486 		break;
1487 
1488 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1489 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1490 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1491 		xs->error = XS_SELTIMEOUT;
1492 		break;
1493 
1494 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1495 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1496 		xs->error = XS_RESET;
1497 		break;
1498 
1499 	default:
1500 		xs->error = XS_DRIVER_STUFFUP;
1501 		break;
1502 	}
1503 
1504 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1505 		memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1506 
1507 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1508 	    xs->error, xs->status);
1509 
1510 	mpi_push_reply(sc, ccb->ccb_rcb);
1511 	KERNEL_LOCK();
1512 	scsi_done(xs);
1513 	KERNEL_UNLOCK();
1514 }
1515 
1516 void
1517 mpi_timeout_xs(void *arg)
1518 {
1519 	/* XXX */
1520 }
1521 
1522 int
1523 mpi_load_xs(struct mpi_ccb *ccb)
1524 {
1525 	struct mpi_softc		*sc = ccb->ccb_sc;
1526 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1527 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1528 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1529 	struct mpi_sge			*sge = NULL;
1530 	struct mpi_sge			*nsge = &mcb->mcb_sgl[0];
1531 	struct mpi_sge			*ce = NULL, *nce;
1532 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1533 	u_int32_t			addr, flags;
1534 	int				i, error;
1535 
1536 	if (xs->datalen == 0) {
1537 		htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
1538 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1539 		return (0);
1540 	}
1541 
1542 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1543 	    xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1544 	    ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1545 	if (error) {
1546 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1547 		return (1);
1548 	}
1549 
1550 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1551 	if (xs->flags & SCSI_DATA_OUT)
1552 		flags |= MPI_SGE_FL_DIR_OUT;
1553 
1554 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1555 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1556 		io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1557 	}
1558 
1559 	for (i = 0; i < dmap->dm_nsegs; i++) {
1560 
1561 		if (nsge == ce) {
1562 			nsge++;
1563 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1564 
1565 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1566 				nce = &nsge[sc->sc_chain_len - 1];
1567 				addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1568 				addr = addr << 16 |
1569 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1570 			} else {
1571 				nce = NULL;
1572 				addr = sizeof(struct mpi_sge) *
1573 				    (dmap->dm_nsegs - i);
1574 			}
1575 
1576 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1577 			    MPI_SGE_FL_SIZE_64 | addr);
1578 
1579 			mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1580 			    ((u_int8_t *)nsge - (u_int8_t *)mcb));
1581 
1582 			ce = nce;
1583 		}
1584 
1585 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1586 		    i, dmap->dm_segs[i].ds_len,
1587 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1588 
1589 		sge = nsge++;
1590 
1591 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1592 		mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1593 	}
1594 
1595 	/* terminate list */
1596 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1597 	    MPI_SGE_FL_EOL);
1598 
1599 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1600 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1601 	    BUS_DMASYNC_PREWRITE);
1602 
1603 	return (0);
1604 }
1605 
1606 void
1607 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1608 {
1609 	/* XXX */
1610 	if (bp->b_bcount > MAXPHYS)
1611 		bp->b_bcount = MAXPHYS;
1612 	minphys(bp);
1613 }
1614 
1615 int
1616 mpi_scsi_probe_virtual(struct scsi_link *link)
1617 {
1618 	struct mpi_softc		*sc = link->adapter_softc;
1619 	struct mpi_cfg_hdr		hdr;
1620 	struct mpi_cfg_raid_vol_pg0	*rp0;
1621 	int				len;
1622 	int				rv;
1623 
1624 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1625 		return (0);
1626 
1627 	if (link->lun > 0)
1628 		return (0);
1629 
1630 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1631 	    0, link->target, MPI_PG_POLL, &hdr);
1632 	if (rv != 0)
1633 		return (0);
1634 
1635 	len = hdr.page_length * 4;
1636 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1637 	if (rp0 == NULL)
1638 		return (ENOMEM);
1639 
1640 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1641 	if (rv == 0)
1642 		SET(link->flags, SDEV_VIRTUAL);
1643 
1644 	free(rp0, M_TEMP, 0);
1645 	return (0);
1646 }
1647 
1648 int
1649 mpi_scsi_probe(struct scsi_link *link)
1650 {
1651 	struct mpi_softc		*sc = link->adapter_softc;
1652 	struct mpi_ecfg_hdr		ehdr;
1653 	struct mpi_cfg_sas_dev_pg0	pg0;
1654 	u_int32_t			address;
1655 	int				rv;
1656 
1657 	rv = mpi_scsi_probe_virtual(link);
1658 	if (rv != 0)
1659 		return (rv);
1660 
1661 	if (ISSET(link->flags, SDEV_VIRTUAL))
1662 		return (0);
1663 
1664 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1665 		return (0);
1666 
1667 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1668 
1669 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1670 	    address, &ehdr) != 0)
1671 		return (EIO);
1672 
1673 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1674 		return (0);
1675 
1676 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1677 	    DEVNAME(sc), link->target);
1678 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1679 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1680 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1681 	    letoh64(pg0.sas_addr));
1682 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1683 	    "access_status: 0x%02x\n", DEVNAME(sc),
1684 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1685 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1686 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1687 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1688 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1689 	    letoh32(pg0.device_info));
1690 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1691 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1692 
1693 	if (ISSET(lemtoh32(&pg0.device_info),
1694 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1695 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1696 		    DEVNAME(sc), link->target);
1697 		link->flags |= SDEV_ATAPI;
1698 		link->quirks |= SDEV_ONLYBIG;
1699 	}
1700 
1701 	return (0);
1702 }
1703 
1704 u_int32_t
1705 mpi_read(struct mpi_softc *sc, bus_size_t r)
1706 {
1707 	u_int32_t			rv;
1708 
1709 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1710 	    BUS_SPACE_BARRIER_READ);
1711 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1712 
1713 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1714 
1715 	return (rv);
1716 }
1717 
1718 void
1719 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1720 {
1721 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1722 
1723 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1724 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1725 	    BUS_SPACE_BARRIER_WRITE);
1726 }
1727 
1728 int
1729 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1730     u_int32_t target)
1731 {
1732 	int				i;
1733 
1734 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1735 	    mask, target);
1736 
1737 	for (i = 0; i < 10000; i++) {
1738 		if ((mpi_read(sc, r) & mask) == target)
1739 			return (0);
1740 		delay(1000);
1741 	}
1742 
1743 	return (1);
1744 }
1745 
1746 int
1747 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1748     u_int32_t target)
1749 {
1750 	int				i;
1751 
1752 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1753 	    mask, target);
1754 
1755 	for (i = 0; i < 10000; i++) {
1756 		if ((mpi_read(sc, r) & mask) != target)
1757 			return (0);
1758 		delay(1000);
1759 	}
1760 
1761 	return (1);
1762 }
1763 
1764 int
1765 mpi_init(struct mpi_softc *sc)
1766 {
1767 	u_int32_t			db;
1768 	int				i;
1769 
1770 	/* spin until the IOC leaves the RESET state */
1771 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1772 	    MPI_DOORBELL_STATE_RESET) != 0) {
1773 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1774 		    "reset state\n", DEVNAME(sc));
1775 		return (1);
1776 	}
1777 
1778 	/* check current ownership */
1779 	db = mpi_read_db(sc);
1780 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1781 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1782 		    DEVNAME(sc));
1783 		return (0);
1784 	}
1785 
1786 	for (i = 0; i < 5; i++) {
1787 		switch (db & MPI_DOORBELL_STATE) {
1788 		case MPI_DOORBELL_STATE_READY:
1789 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1790 			    DEVNAME(sc));
1791 			return (0);
1792 
1793 		case MPI_DOORBELL_STATE_OPER:
1794 		case MPI_DOORBELL_STATE_FAULT:
1795 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1796 			    "reset\n" , DEVNAME(sc));
1797 			if (mpi_reset_soft(sc) != 0)
1798 				mpi_reset_hard(sc);
1799 			break;
1800 
1801 		case MPI_DOORBELL_STATE_RESET:
1802 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1803 			    "out of reset\n", DEVNAME(sc));
1804 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1805 			    MPI_DOORBELL_STATE_RESET) != 0)
1806 				return (1);
1807 			break;
1808 		}
1809 		db = mpi_read_db(sc);
1810 	}
1811 
1812 	return (1);
1813 }
1814 
1815 int
1816 mpi_reset_soft(struct mpi_softc *sc)
1817 {
1818 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1819 
1820 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1821 		return (1);
1822 
1823 	mpi_write_db(sc,
1824 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1825 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1826 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1827 		return (1);
1828 
1829 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1830 	    MPI_DOORBELL_STATE_READY) != 0)
1831 		return (1);
1832 
1833 	return (0);
1834 }
1835 
1836 int
1837 mpi_reset_hard(struct mpi_softc *sc)
1838 {
1839 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1840 
1841 	/* enable diagnostic register */
1842 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1843 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1844 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1845 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1846 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1847 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1848 
1849 	/* reset ioc */
1850 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1851 
1852 	delay(10000);
1853 
1854 	/* disable diagnostic register */
1855 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1856 
1857 	/* restore pci bits? */
1858 
1859 	/* firmware bits? */
1860 	return (0);
1861 }
1862 
1863 int
1864 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1865 {
1866 	u_int32_t				*query = buf;
1867 	int					i;
1868 
1869 	/* make sure the doorbell is not in use. */
1870 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1871 		return (1);
1872 
1873 	/* clear pending doorbell interrupts */
1874 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1875 		mpi_write_intr(sc, 0);
1876 
1877 	/*
1878 	 * first write the doorbell with the handshake function and the
1879 	 * dword count.
1880 	 */
1881 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1882 	    MPI_DOORBELL_DWORDS(dwords));
1883 
1884 	/*
1885 	 * the doorbell used bit will be set because a doorbell function has
1886 	 * started. Wait for the interrupt and then ack it.
1887 	 */
1888 	if (mpi_wait_db_int(sc) != 0)
1889 		return (1);
1890 	mpi_write_intr(sc, 0);
1891 
1892 	/* poll for the acknowledgement. */
1893 	if (mpi_wait_db_ack(sc) != 0)
1894 		return (1);
1895 
1896 	/* write the query through the doorbell. */
1897 	for (i = 0; i < dwords; i++) {
1898 		mpi_write_db(sc, htole32(query[i]));
1899 		if (mpi_wait_db_ack(sc) != 0)
1900 			return (1);
1901 	}
1902 
1903 	return (0);
1904 }
1905 
1906 int
1907 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1908 {
1909 	u_int16_t				*words = (u_int16_t *)dword;
1910 	int					i;
1911 
1912 	for (i = 0; i < 2; i++) {
1913 		if (mpi_wait_db_int(sc) != 0)
1914 			return (1);
1915 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1916 		mpi_write_intr(sc, 0);
1917 	}
1918 
1919 	return (0);
1920 }
1921 
1922 int
1923 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1924 {
1925 	struct mpi_msg_reply			*reply = buf;
1926 	u_int32_t				*dbuf = buf, dummy;
1927 	int					i;
1928 
1929 	/* get the first dword so we can read the length out of the header. */
1930 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1931 		return (1);
1932 
1933 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1934 	    DEVNAME(sc), dwords, reply->msg_length);
1935 
1936 	/*
1937 	 * the total length, in dwords, is in the message length field of the
1938 	 * reply header.
1939 	 */
1940 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1941 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1942 			return (1);
1943 	}
1944 
1945 	/* if there's extra stuff to come off the ioc, discard it */
1946 	while (i++ < reply->msg_length) {
1947 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1948 			return (1);
1949 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1950 		    "0x%08x\n", DEVNAME(sc), dummy);
1951 	}
1952 
1953 	/* wait for the doorbell used bit to be reset and clear the intr */
1954 	if (mpi_wait_db_int(sc) != 0)
1955 		return (1);
1956 	mpi_write_intr(sc, 0);
1957 
1958 	return (0);
1959 }
1960 
1961 void
1962 mpi_empty_done(struct mpi_ccb *ccb)
1963 {
1964 	/* nothing to do */
1965 }
1966 
1967 int
1968 mpi_iocfacts(struct mpi_softc *sc)
1969 {
1970 	struct mpi_msg_iocfacts_request		ifq;
1971 	struct mpi_msg_iocfacts_reply		ifp;
1972 
1973 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1974 
1975 	memset(&ifq, 0, sizeof(ifq));
1976 	memset(&ifp, 0, sizeof(ifp));
1977 
1978 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1979 	ifq.chain_offset = 0;
1980 	ifq.msg_flags = 0;
1981 	ifq.msg_context = htole32(0xdeadbeef);
1982 
1983 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1984 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1985 		    DEVNAME(sc));
1986 		return (1);
1987 	}
1988 
1989 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1990 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1991 		    DEVNAME(sc));
1992 		return (1);
1993 	}
1994 
1995 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1996 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1997 	    ifp.msg_version_maj, ifp.msg_version_min);
1998 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
1999 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
2000 	    ifp.ioc_number, ifp.header_version_maj,
2001 	    ifp.header_version_min);
2002 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
2003 	    letoh32(ifp.msg_context));
2004 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
2005 	    DEVNAME(sc), letoh16(ifp.ioc_status),
2006 	    letoh16(ifp.ioc_exceptions));
2007 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2008 	    letoh32(ifp.ioc_loginfo));
2009 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2010 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2011 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2012 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2013 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2014 	    letoh16(ifp.reply_queue_depth));
2015 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2016 	    letoh16(ifp.product_id));
2017 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2018 	    letoh32(ifp.current_host_mfa_hi_addr));
2019 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2020 	    "global_credits: %d\n",
2021 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2022 	    letoh16(ifp.global_credits));
2023 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2024 	    letoh32(ifp.current_sense_buffer_hi_addr));
2025 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2026 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2027 	    letoh16(ifp.current_reply_frame_size));
2028 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2029 	    letoh32(ifp.fw_image_size));
2030 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2031 	    letoh32(ifp.ioc_capabilities));
2032 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2033 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2034 	    ifp.fw_version_maj, ifp.fw_version_min,
2035 	    ifp.fw_version_unit, ifp.fw_version_dev);
2036 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2037 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2038 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2039 	    "addr 0x%08lx%08lx\n", DEVNAME(sc),
2040 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2041 	    letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2042 	    letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2043 
2044 	sc->sc_fw_maj = ifp.fw_version_maj;
2045 	sc->sc_fw_min = ifp.fw_version_min;
2046 	sc->sc_fw_unit = ifp.fw_version_unit;
2047 	sc->sc_fw_dev = ifp.fw_version_dev;
2048 
2049 	sc->sc_maxcmds = lemtoh16(&ifp.global_credits);
2050 	sc->sc_maxchdepth = ifp.max_chain_depth;
2051 	sc->sc_ioc_number = ifp.ioc_number;
2052 	if (sc->sc_flags & MPI_F_SPI)
2053 		sc->sc_buswidth = 16;
2054 	else
2055 		sc->sc_buswidth =
2056 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2057 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2058 		sc->sc_fw_len = lemtoh32(&ifp.fw_image_size);
2059 
2060 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth));
2061 
2062 	/*
2063 	 * you can fit sg elements on the end of the io cmd if they fit in the
2064 	 * request frame size.
2065 	 */
2066 	sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) -
2067 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2068 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2069 	    sc->sc_first_sgl_len);
2070 
2071 	sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) /
2072 	    sizeof(struct mpi_sge);
2073 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2074 	    sc->sc_chain_len);
2075 
2076 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2077 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2078 	/* the sgl chains lose an entry for each chain element */
2079 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2080 	    sc->sc_chain_len;
2081 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2082 	    sc->sc_max_sgl_len);
2083 
2084 	/* XXX we're ignoring the max chain depth */
2085 
2086 	return (0);
2087 }
2088 
2089 int
2090 mpi_iocinit(struct mpi_softc *sc)
2091 {
2092 	struct mpi_msg_iocinit_request		iiq;
2093 	struct mpi_msg_iocinit_reply		iip;
2094 	u_int32_t				hi_addr;
2095 
2096 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2097 
2098 	memset(&iiq, 0, sizeof(iiq));
2099 	memset(&iip, 0, sizeof(iip));
2100 
2101 	iiq.function = MPI_FUNCTION_IOC_INIT;
2102 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2103 
2104 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2105 	iiq.max_buses = 1;
2106 
2107 	iiq.msg_context = htole32(0xd00fd00f);
2108 
2109 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2110 
2111 	hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2112 	htolem32(&iiq.host_mfa_hi_addr, hi_addr);
2113 	htolem32(&iiq.sense_buffer_hi_addr, hi_addr);
2114 
2115 	iiq.msg_version_maj = 0x01;
2116 	iiq.msg_version_min = 0x02;
2117 
2118 	iiq.hdr_version_unit = 0x0d;
2119 	iiq.hdr_version_dev = 0x00;
2120 
2121 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2122 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2123 		    DEVNAME(sc));
2124 		return (1);
2125 	}
2126 
2127 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2128 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2129 		    DEVNAME(sc));
2130 		return (1);
2131 	}
2132 
2133 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2134 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2135 	    iip.msg_length, iip.whoinit);
2136 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2137 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2138 	    iip.max_buses, iip.max_devices, iip.flags);
2139 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2140 	    letoh32(iip.msg_context));
2141 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2142 	    letoh16(iip.ioc_status));
2143 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2144 	    letoh32(iip.ioc_loginfo));
2145 
2146 	return (0);
2147 }
2148 
2149 int
2150 mpi_portfacts(struct mpi_softc *sc)
2151 {
2152 	struct mpi_ccb				*ccb;
2153 	struct mpi_msg_portfacts_request	*pfq;
2154 	volatile struct mpi_msg_portfacts_reply	*pfp;
2155 	int					rv = 1;
2156 
2157 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2158 
2159 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2160 	if (ccb == NULL) {
2161 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2162 		    DEVNAME(sc));
2163 		return (rv);
2164 	}
2165 
2166 	ccb->ccb_done = mpi_empty_done;
2167 	pfq = ccb->ccb_cmd;
2168 
2169 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2170 	pfq->chain_offset = 0;
2171 	pfq->msg_flags = 0;
2172 	pfq->port_number = 0;
2173 
2174 	if (mpi_poll(sc, ccb, 50000) != 0) {
2175 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2176 		goto err;
2177 	}
2178 
2179 	if (ccb->ccb_rcb == NULL) {
2180 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2181 		    DEVNAME(sc));
2182 		goto err;
2183 	}
2184 	pfp = ccb->ccb_rcb->rcb_reply;
2185 
2186 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2187 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2188 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2189 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2190 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2191 	    letoh32(pfp->msg_context));
2192 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2193 	    letoh16(pfp->ioc_status));
2194 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2195 	    letoh32(pfp->ioc_loginfo));
2196 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2197 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2198 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2199 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2200 	    letoh16(pfp->port_scsi_id));
2201 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2202 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2203 	    letoh16(pfp->max_persistent_ids),
2204 	    letoh16(pfp->max_posted_cmd_buffers));
2205 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2206 	    letoh16(pfp->max_lan_buckets));
2207 
2208 	sc->sc_porttype = pfp->port_type;
2209 	if (sc->sc_target == -1)
2210 		sc->sc_target = lemtoh16(&pfp->port_scsi_id);
2211 
2212 	mpi_push_reply(sc, ccb->ccb_rcb);
2213 	rv = 0;
2214 err:
2215 	scsi_io_put(&sc->sc_iopool, ccb);
2216 
2217 	return (rv);
2218 }
2219 
2220 int
2221 mpi_cfg_coalescing(struct mpi_softc *sc)
2222 {
2223 	struct mpi_cfg_hdr		hdr;
2224 	struct mpi_cfg_ioc_pg1		pg;
2225 	u_int32_t			flags;
2226 
2227 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2228 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2229 		    DEVNAME(sc));
2230 		return (1);
2231 	}
2232 
2233 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2234 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2235 		    DEVNAME(sc));
2236 		return (1);
2237 	}
2238 
2239 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2240 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2241 	    letoh32(pg.flags));
2242 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2243 	    letoh32(pg.coalescing_timeout));
2244 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2245 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2246 
2247 	flags = lemtoh32(&pg.flags);
2248 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2249 		return (0);
2250 
2251 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2252 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2253 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2254 		    DEVNAME(sc));
2255 		return (1);
2256 	}
2257 
2258 	return (0);
2259 }
2260 
2261 int
2262 mpi_eventnotify(struct mpi_softc *sc)
2263 {
2264 	struct mpi_ccb				*ccb;
2265 	struct mpi_msg_event_request		*enq;
2266 
2267 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2268 	if (ccb == NULL) {
2269 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2270 		    DEVNAME(sc));
2271 		return (1);
2272 	}
2273 
2274 	sc->sc_evt_ccb = ccb;
2275 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2276 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2277 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2278 	    mpi_eventack, sc);
2279 
2280 	ccb->ccb_done = mpi_eventnotify_done;
2281 	enq = ccb->ccb_cmd;
2282 
2283 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2284 	enq->chain_offset = 0;
2285 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2286 
2287 	mpi_start(sc, ccb);
2288 	return (0);
2289 }
2290 
2291 void
2292 mpi_eventnotify_done(struct mpi_ccb *ccb)
2293 {
2294 	struct mpi_softc			*sc = ccb->ccb_sc;
2295 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2296 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2297 
2298 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2299 
2300 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2301 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2302 	    letoh16(enp->data_length));
2303 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2304 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2305 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2306 	    letoh32(enp->msg_context));
2307 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2308 	    letoh16(enp->ioc_status));
2309 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2310 	    letoh32(enp->ioc_loginfo));
2311 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2312 	    letoh32(enp->event));
2313 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2314 	    letoh32(enp->event_context));
2315 
2316 	switch (lemtoh32(&enp->event)) {
2317 	/* ignore these */
2318 	case MPI_EVENT_EVENT_CHANGE:
2319 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2320 		break;
2321 
2322 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2323 		if (sc->sc_scsibus == NULL)
2324 			break;
2325 
2326 		if (mpi_evt_sas(sc, rcb) != 0) {
2327 			/* reply is freed later on */
2328 			return;
2329 		}
2330 		break;
2331 
2332 	case MPI_EVENT_RESCAN:
2333 		if (sc->sc_scsibus != NULL &&
2334 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2335 			task_add(systq, &sc->sc_evt_rescan);
2336 		break;
2337 
2338 	default:
2339 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2340 		    DEVNAME(sc), lemtoh32(&enp->event));
2341 		break;
2342 	}
2343 
2344 	mpi_eventnotify_free(sc, rcb);
2345 }
2346 
2347 void
2348 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2349 {
2350 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2351 
2352 	if (enp->ack_required) {
2353 		mtx_enter(&sc->sc_evt_ack_mtx);
2354 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2355 		mtx_leave(&sc->sc_evt_ack_mtx);
2356 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2357 	} else
2358 		mpi_push_reply(sc, rcb);
2359 }
2360 
2361 int
2362 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2363 {
2364 	struct mpi_evt_sas_change		*ch;
2365 	u_int8_t				*data;
2366 
2367 	data = rcb->rcb_reply;
2368 	data += sizeof(struct mpi_msg_event_reply);
2369 	ch = (struct mpi_evt_sas_change *)data;
2370 
2371 	if (ch->bus != 0)
2372 		return (0);
2373 
2374 	switch (ch->reason) {
2375 	case MPI_EVT_SASCH_REASON_ADDED:
2376 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2377 		KERNEL_LOCK();
2378 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2379 			printf("%s: unable to request attach of %d\n",
2380 			    DEVNAME(sc), ch->target);
2381 		}
2382 		KERNEL_UNLOCK();
2383 		break;
2384 
2385 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2386 		KERNEL_LOCK();
2387 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2388 		KERNEL_UNLOCK();
2389 
2390 		mtx_enter(&sc->sc_evt_scan_mtx);
2391 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2392 		mtx_leave(&sc->sc_evt_scan_mtx);
2393 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2394 
2395 		/* we'll handle event ack later on */
2396 		return (1);
2397 
2398 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2399 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2400 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2401 		break;
2402 	default:
2403 		printf("%s: unknown reason for SAS device status change: "
2404 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2405 		break;
2406 	}
2407 
2408 	return (0);
2409 }
2410 
2411 void
2412 mpi_evt_sas_detach(void *cookie, void *io)
2413 {
2414 	struct mpi_softc			*sc = cookie;
2415 	struct mpi_ccb				*ccb = io;
2416 	struct mpi_rcb				*rcb, *next;
2417 	struct mpi_msg_event_reply		*enp;
2418 	struct mpi_evt_sas_change		*ch;
2419 	struct mpi_msg_scsi_task_request	*str;
2420 
2421 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2422 
2423 	mtx_enter(&sc->sc_evt_scan_mtx);
2424 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2425 	if (rcb != NULL) {
2426 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2427 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2428 	}
2429 	mtx_leave(&sc->sc_evt_scan_mtx);
2430 
2431 	if (rcb == NULL) {
2432 		scsi_io_put(&sc->sc_iopool, ccb);
2433 		return;
2434 	}
2435 
2436 	enp = rcb->rcb_reply;
2437 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2438 
2439 	ccb->ccb_done = mpi_evt_sas_detach_done;
2440 	str = ccb->ccb_cmd;
2441 
2442 	str->target_id = ch->target;
2443 	str->bus = 0;
2444 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2445 
2446 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2447 
2448 	mpi_eventnotify_free(sc, rcb);
2449 
2450 	mpi_start(sc, ccb);
2451 
2452 	if (next != NULL)
2453 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2454 }
2455 
2456 void
2457 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2458 {
2459 	struct mpi_softc			*sc = ccb->ccb_sc;
2460 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2461 
2462 	KERNEL_LOCK();
2463 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2464 	    DETACH_FORCE) != 0) {
2465 		printf("%s: unable to request detach of %d\n",
2466 		    DEVNAME(sc), r->target_id);
2467 	}
2468 	KERNEL_UNLOCK();
2469 
2470 	mpi_push_reply(sc, ccb->ccb_rcb);
2471 	scsi_io_put(&sc->sc_iopool, ccb);
2472 }
2473 
2474 void
2475 mpi_fc_rescan(void *xsc)
2476 {
2477 	struct mpi_softc			*sc = xsc;
2478 	struct mpi_cfg_hdr			hdr;
2479 	struct mpi_cfg_fc_device_pg0		pg;
2480 	struct scsi_link			*link;
2481 	u_int8_t				devmap[256 / NBBY];
2482 	u_int32_t				id = 0xffffff;
2483 	int					i;
2484 
2485 	memset(devmap, 0, sizeof(devmap));
2486 
2487 	do {
2488 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2489 		    id, 0, &hdr) != 0) {
2490 			printf("%s: header get for rescan of 0x%08x failed\n",
2491 			    DEVNAME(sc), id);
2492 			return;
2493 		}
2494 
2495 		memset(&pg, 0, sizeof(pg));
2496 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2497 			break;
2498 
2499 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2500 		    pg.current_bus == 0)
2501 			setbit(devmap, pg.current_target_id);
2502 
2503 		id = lemtoh32(&pg.port_id);
2504 	} while (id <= 0xff0000);
2505 
2506 	for (i = 0; i < sc->sc_buswidth; i++) {
2507 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2508 
2509 		if (isset(devmap, i)) {
2510 			if (link == NULL)
2511 				scsi_probe_target(sc->sc_scsibus, i);
2512 		} else {
2513 			if (link != NULL) {
2514 				scsi_activate(sc->sc_scsibus, i, -1,
2515 				    DVACT_DEACTIVATE);
2516 				scsi_detach_target(sc->sc_scsibus, i,
2517 				    DETACH_FORCE);
2518 			}
2519 		}
2520 	}
2521 }
2522 
2523 void
2524 mpi_eventack(void *cookie, void *io)
2525 {
2526 	struct mpi_softc			*sc = cookie;
2527 	struct mpi_ccb				*ccb = io;
2528 	struct mpi_rcb				*rcb, *next;
2529 	struct mpi_msg_event_reply		*enp;
2530 	struct mpi_msg_eventack_request		*eaq;
2531 
2532 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2533 
2534 	mtx_enter(&sc->sc_evt_ack_mtx);
2535 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2536 	if (rcb != NULL) {
2537 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2538 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2539 	}
2540 	mtx_leave(&sc->sc_evt_ack_mtx);
2541 
2542 	if (rcb == NULL) {
2543 		scsi_io_put(&sc->sc_iopool, ccb);
2544 		return;
2545 	}
2546 
2547 	enp = rcb->rcb_reply;
2548 
2549 	ccb->ccb_done = mpi_eventack_done;
2550 	eaq = ccb->ccb_cmd;
2551 
2552 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2553 
2554 	eaq->event = enp->event;
2555 	eaq->event_context = enp->event_context;
2556 
2557 	mpi_push_reply(sc, rcb);
2558 	mpi_start(sc, ccb);
2559 
2560 	if (next != NULL)
2561 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2562 }
2563 
2564 void
2565 mpi_eventack_done(struct mpi_ccb *ccb)
2566 {
2567 	struct mpi_softc			*sc = ccb->ccb_sc;
2568 
2569 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2570 
2571 	mpi_push_reply(sc, ccb->ccb_rcb);
2572 	scsi_io_put(&sc->sc_iopool, ccb);
2573 }
2574 
2575 int
2576 mpi_portenable(struct mpi_softc *sc)
2577 {
2578 	struct mpi_ccb				*ccb;
2579 	struct mpi_msg_portenable_request	*peq;
2580 	int					rv = 0;
2581 
2582 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2583 
2584 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2585 	if (ccb == NULL) {
2586 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2587 		    DEVNAME(sc));
2588 		return (1);
2589 	}
2590 
2591 	ccb->ccb_done = mpi_empty_done;
2592 	peq = ccb->ccb_cmd;
2593 
2594 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2595 	peq->port_number = 0;
2596 
2597 	if (mpi_poll(sc, ccb, 50000) != 0) {
2598 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2599 		return (1);
2600 	}
2601 
2602 	if (ccb->ccb_rcb == NULL) {
2603 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2604 		    DEVNAME(sc));
2605 		rv = 1;
2606 	} else
2607 		mpi_push_reply(sc, ccb->ccb_rcb);
2608 
2609 	scsi_io_put(&sc->sc_iopool, ccb);
2610 
2611 	return (rv);
2612 }
2613 
2614 int
2615 mpi_fwupload(struct mpi_softc *sc)
2616 {
2617 	struct mpi_ccb				*ccb;
2618 	struct {
2619 		struct mpi_msg_fwupload_request		req;
2620 		struct mpi_sge				sge;
2621 	} __packed				*bundle;
2622 	struct mpi_msg_fwupload_reply		*upp;
2623 	int					rv = 0;
2624 
2625 	if (sc->sc_fw_len == 0)
2626 		return (0);
2627 
2628 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2629 
2630 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2631 	if (sc->sc_fw == NULL) {
2632 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2633 		    DEVNAME(sc), sc->sc_fw_len);
2634 		return (1);
2635 	}
2636 
2637 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2638 	if (ccb == NULL) {
2639 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2640 		    DEVNAME(sc));
2641 		goto err;
2642 	}
2643 
2644 	ccb->ccb_done = mpi_empty_done;
2645 	bundle = ccb->ccb_cmd;
2646 
2647 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2648 
2649 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2650 
2651 	bundle->req.tce.details_length = 12;
2652 	htolem32(&bundle->req.tce.image_size, sc->sc_fw_len);
2653 
2654 	htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2655 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2656 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2657 	mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2658 
2659 	if (mpi_poll(sc, ccb, 50000) != 0) {
2660 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2661 		goto err;
2662 	}
2663 
2664 	if (ccb->ccb_rcb == NULL)
2665 		panic("%s: unable to do fw upload", DEVNAME(sc));
2666 	upp = ccb->ccb_rcb->rcb_reply;
2667 
2668 	if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2669 		rv = 1;
2670 
2671 	mpi_push_reply(sc, ccb->ccb_rcb);
2672 	scsi_io_put(&sc->sc_iopool, ccb);
2673 
2674 	return (rv);
2675 
2676 err:
2677 	mpi_dmamem_free(sc, sc->sc_fw);
2678 	return (1);
2679 }
2680 
2681 int
2682 mpi_manufacturing(struct mpi_softc *sc)
2683 {
2684 	char board_name[33];
2685 	struct mpi_cfg_hdr hdr;
2686 	struct mpi_cfg_manufacturing_pg0 *pg;
2687 	size_t pagelen;
2688 	int rv = 1;
2689 
2690 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,
2691 	    0, 0, &hdr) != 0)
2692 		return (1);
2693 
2694 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2695 	if (pagelen < sizeof(*pg))
2696 		return (1);
2697 
2698 	pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2699 	if (pg == NULL)
2700 		return (1);
2701 
2702 	if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0)
2703 		goto out;
2704 
2705 	scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2706 
2707 	printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name,
2708 	    sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2709 
2710 	rv = 0;
2711 
2712 out:
2713 	free(pg, M_TEMP, 0);
2714 	return (rv);
2715 }
2716 
2717 void
2718 mpi_get_raid(struct mpi_softc *sc)
2719 {
2720 	struct mpi_cfg_hdr		hdr;
2721 	struct mpi_cfg_ioc_pg2		*vol_page;
2722 	size_t				pagelen;
2723 	u_int32_t			capabilities;
2724 
2725 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2726 
2727 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2728 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2729 		    "for IOC page 2\n", DEVNAME(sc));
2730 		return;
2731 	}
2732 
2733 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2734 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2735 	if (vol_page == NULL) {
2736 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2737 		    "space for ioc config page 2\n", DEVNAME(sc));
2738 		return;
2739 	}
2740 
2741 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2742 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2743 		    "page 2\n", DEVNAME(sc));
2744 		goto out;
2745 	}
2746 
2747 	capabilities = lemtoh32(&vol_page->capabilities);
2748 
2749 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2750 	    letoh32(vol_page->capabilities));
2751 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2752 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2753 	    vol_page->active_vols, vol_page->max_vols,
2754 	    vol_page->active_physdisks, vol_page->max_physdisks);
2755 
2756 	/* don't walk list if there are no RAID capability */
2757 	if (capabilities == 0xdeadbeef) {
2758 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2759 		goto out;
2760 	}
2761 
2762 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2763 		sc->sc_flags |= MPI_F_RAID;
2764 
2765 out:
2766 	free(vol_page, M_TEMP, 0);
2767 }
2768 
2769 int
2770 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2771     u_int32_t address, int flags, void *p)
2772 {
2773 	struct mpi_ccb				*ccb;
2774 	struct mpi_msg_config_request		*cq;
2775 	struct mpi_msg_config_reply		*cp;
2776 	struct mpi_cfg_hdr			*hdr = p;
2777 	struct mpi_ecfg_hdr			*ehdr = p;
2778 	int					etype = 0;
2779 	int					rv = 0;
2780 
2781 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2782 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2783 	    address, flags, MPI_PG_FMT);
2784 
2785 	ccb = scsi_io_get(&sc->sc_iopool,
2786 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2787 	if (ccb == NULL) {
2788 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2789 		    DEVNAME(sc));
2790 		return (1);
2791 	}
2792 
2793 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2794 		etype = type;
2795 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2796 	}
2797 
2798 	cq = ccb->ccb_cmd;
2799 
2800 	cq->function = MPI_FUNCTION_CONFIG;
2801 
2802 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2803 
2804 	cq->config_header.page_number = number;
2805 	cq->config_header.page_type = type;
2806 	cq->ext_page_type = etype;
2807 	htolem32(&cq->page_address, address);
2808 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2809 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2810 
2811 	ccb->ccb_done = mpi_empty_done;
2812 	if (ISSET(flags, MPI_PG_POLL)) {
2813 		if (mpi_poll(sc, ccb, 50000) != 0) {
2814 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2815 			    DEVNAME(sc));
2816 			return (1);
2817 		}
2818 	} else
2819 		mpi_wait(sc, ccb);
2820 
2821 	if (ccb->ccb_rcb == NULL)
2822 		panic("%s: unable to fetch config header", DEVNAME(sc));
2823 	cp = ccb->ccb_rcb->rcb_reply;
2824 
2825 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2826 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2827 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2828 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2829 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2830 	    cp->msg_flags);
2831 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2832 	    letoh32(cp->msg_context));
2833 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2834 	    letoh16(cp->ioc_status));
2835 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2836 	    letoh32(cp->ioc_loginfo));
2837 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2838 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2839 	    cp->config_header.page_version,
2840 	    cp->config_header.page_length,
2841 	    cp->config_header.page_number,
2842 	    cp->config_header.page_type);
2843 
2844 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2845 		rv = 1;
2846 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2847 		memset(ehdr, 0, sizeof(*ehdr));
2848 		ehdr->page_version = cp->config_header.page_version;
2849 		ehdr->page_number = cp->config_header.page_number;
2850 		ehdr->page_type = cp->config_header.page_type;
2851 		ehdr->ext_page_length = cp->ext_page_length;
2852 		ehdr->ext_page_type = cp->ext_page_type;
2853 	} else
2854 		*hdr = cp->config_header;
2855 
2856 	mpi_push_reply(sc, ccb->ccb_rcb);
2857 	scsi_io_put(&sc->sc_iopool, ccb);
2858 
2859 	return (rv);
2860 }
2861 
2862 int
2863 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2864     void *p, int read, void *page, size_t len)
2865 {
2866 	struct mpi_ccb				*ccb;
2867 	struct mpi_msg_config_request		*cq;
2868 	struct mpi_msg_config_reply		*cp;
2869 	struct mpi_cfg_hdr			*hdr = p;
2870 	struct mpi_ecfg_hdr			*ehdr = p;
2871 	char					*kva;
2872 	int					page_length;
2873 	int					rv = 0;
2874 
2875 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2876 	    DEVNAME(sc), address, read, hdr->page_type);
2877 
2878 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2879 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2880 
2881 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2882 	    len < page_length * 4)
2883 		return (1);
2884 
2885 	ccb = scsi_io_get(&sc->sc_iopool,
2886 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2887 	if (ccb == NULL) {
2888 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2889 		return (1);
2890 	}
2891 
2892 	cq = ccb->ccb_cmd;
2893 
2894 	cq->function = MPI_FUNCTION_CONFIG;
2895 
2896 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2897 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2898 
2899 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2900 		cq->config_header.page_version = ehdr->page_version;
2901 		cq->config_header.page_number = ehdr->page_number;
2902 		cq->config_header.page_type = ehdr->page_type;
2903 		cq->ext_page_len = ehdr->ext_page_length;
2904 		cq->ext_page_type = ehdr->ext_page_type;
2905 	} else
2906 		cq->config_header = *hdr;
2907 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2908 	htolem32(&cq->page_address, address);
2909 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2910 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2911 	    (page_length * 4) |
2912 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2913 
2914 	/* bounce the page via the request space to avoid more bus_dma games */
2915 	mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2916 	    sizeof(struct mpi_msg_config_request));
2917 
2918 	kva = ccb->ccb_cmd;
2919 	kva += sizeof(struct mpi_msg_config_request);
2920 	if (!read)
2921 		memcpy(kva, page, len);
2922 
2923 	ccb->ccb_done = mpi_empty_done;
2924 	if (ISSET(flags, MPI_PG_POLL)) {
2925 		if (mpi_poll(sc, ccb, 50000) != 0) {
2926 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2927 			    DEVNAME(sc));
2928 			return (1);
2929 		}
2930 	} else
2931 		mpi_wait(sc, ccb);
2932 
2933 	if (ccb->ccb_rcb == NULL) {
2934 		scsi_io_put(&sc->sc_iopool, ccb);
2935 		return (1);
2936 	}
2937 	cp = ccb->ccb_rcb->rcb_reply;
2938 
2939 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2940 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2941 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2942 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2943 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2944 	    cp->msg_flags);
2945 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2946 	    letoh32(cp->msg_context));
2947 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2948 	    letoh16(cp->ioc_status));
2949 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2950 	    letoh32(cp->ioc_loginfo));
2951 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2952 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2953 	    cp->config_header.page_version,
2954 	    cp->config_header.page_length,
2955 	    cp->config_header.page_number,
2956 	    cp->config_header.page_type);
2957 
2958 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2959 		rv = 1;
2960 	else if (read)
2961 		memcpy(page, kva, len);
2962 
2963 	mpi_push_reply(sc, ccb->ccb_rcb);
2964 	scsi_io_put(&sc->sc_iopool, ccb);
2965 
2966 	return (rv);
2967 }
2968 
2969 int
2970 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2971 {
2972 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2973 
2974 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2975 
2976 	switch (cmd) {
2977 	case DIOCGCACHE:
2978 	case DIOCSCACHE:
2979 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2980 			return (mpi_ioctl_cache(link, cmd,
2981 			    (struct dk_cache *)addr));
2982 		}
2983 		break;
2984 
2985 	default:
2986 		if (sc->sc_ioctl)
2987 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2988 
2989 		break;
2990 	}
2991 
2992 	return (ENOTTY);
2993 }
2994 
2995 int
2996 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2997 {
2998 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2999 	struct mpi_ccb		*ccb;
3000 	int			len, rv;
3001 	struct mpi_cfg_hdr	hdr;
3002 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3003 	int			enabled;
3004 	struct mpi_msg_raid_action_request *req;
3005 	struct mpi_msg_raid_action_reply *rep;
3006 	struct mpi_raid_settings settings;
3007 
3008 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3009 	    link->target, MPI_PG_POLL, &hdr);
3010 	if (rv != 0)
3011 		return (EIO);
3012 
3013 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3014 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3015 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
3016 	if (rpg0 == NULL)
3017 		return (ENOMEM);
3018 
3019 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
3020 	    rpg0, len) != 0) {
3021 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3022 		    DEVNAME(sc));
3023 		rv = EIO;
3024 		goto done;
3025 	}
3026 
3027 	enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),
3028 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3029 
3030 	if (cmd == DIOCGCACHE) {
3031 		dc->wrcache = enabled;
3032 		dc->rdcache = 0;
3033 		goto done;
3034 	} /* else DIOCSCACHE */
3035 
3036 	if (dc->rdcache) {
3037 		rv = EOPNOTSUPP;
3038 		goto done;
3039 	}
3040 
3041 	if (((dc->wrcache) ? 1 : 0) == enabled)
3042 		goto done;
3043 
3044 	settings = rpg0->settings;
3045 	if (dc->wrcache) {
3046 		SET(settings.volume_settings,
3047 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3048 	} else {
3049 		CLR(settings.volume_settings,
3050 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3051 	}
3052 
3053 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3054 	if (ccb == NULL) {
3055 		rv = ENOMEM;
3056 		goto done;
3057 	}
3058 
3059 	req = ccb->ccb_cmd;
3060 	req->function = MPI_FUNCTION_RAID_ACTION;
3061 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3062 	req->vol_id = rpg0->volume_id;
3063 	req->vol_bus = rpg0->volume_bus;
3064 
3065 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3066 	ccb->ccb_done = mpi_empty_done;
3067 	if (mpi_poll(sc, ccb, 50000) != 0) {
3068 		rv = EIO;
3069 		goto done;
3070 	}
3071 
3072 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3073 	if (rep == NULL)
3074 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3075 
3076 	switch (lemtoh16(&rep->action_status)) {
3077 	case MPI_RAID_ACTION_STATUS_OK:
3078 		rv = 0;
3079 		break;
3080 	default:
3081 		rv = EIO;
3082 		break;
3083 	}
3084 
3085 	mpi_push_reply(sc, ccb->ccb_rcb);
3086 	scsi_io_put(&sc->sc_iopool, ccb);
3087 
3088 done:
3089 	free(rpg0, M_TEMP, 0);
3090 	return (rv);
3091 }
3092 
3093 #if NBIO > 0
3094 int
3095 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3096 {
3097 	int			len, rv = EINVAL;
3098 	u_int32_t		address;
3099 	struct mpi_cfg_hdr	hdr;
3100 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3101 
3102 	/* get IOC page 2 */
3103 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3104 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3105 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3106 		    "fetch IOC page 2\n", DEVNAME(sc));
3107 		goto done;
3108 	}
3109 
3110 	/* XXX return something else than EINVAL to indicate within hs range */
3111 	if (id > sc->sc_vol_page->active_vols) {
3112 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3113 		    "id: %d\n", DEVNAME(sc), id);
3114 		goto done;
3115 	}
3116 
3117 	/* replace current buffer with new one */
3118 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3119 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3120 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3121 	if (rpg0 == NULL) {
3122 		printf("%s: can't get memory for RAID page 0, "
3123 		    "bio disabled\n", DEVNAME(sc));
3124 		goto done;
3125 	}
3126 	if (sc->sc_rpg0)
3127 		free(sc->sc_rpg0, M_DEVBUF, 0);
3128 	sc->sc_rpg0 = rpg0;
3129 
3130 	/* get raid vol page 0 */
3131 	address = sc->sc_vol_list[id].vol_id |
3132 	    (sc->sc_vol_list[id].vol_bus << 8);
3133 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3134 	    address, 0, &hdr) != 0)
3135 		goto done;
3136 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3137 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3138 		    DEVNAME(sc));
3139 		goto done;
3140 	}
3141 
3142 	rv = 0;
3143 done:
3144 	return (rv);
3145 }
3146 
3147 int
3148 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3149 {
3150 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3151 	int error = 0;
3152 
3153 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3154 
3155 	/* make sure we have bio enabled */
3156 	if (sc->sc_ioctl != mpi_ioctl)
3157 		return (EINVAL);
3158 
3159 	rw_enter_write(&sc->sc_lock);
3160 
3161 	switch (cmd) {
3162 	case BIOCINQ:
3163 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3164 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3165 		break;
3166 
3167 	case BIOCVOL:
3168 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3169 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3170 		break;
3171 
3172 	case BIOCDISK:
3173 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3174 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3175 		break;
3176 
3177 	case BIOCALARM:
3178 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3179 		break;
3180 
3181 	case BIOCBLINK:
3182 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3183 		break;
3184 
3185 	case BIOCSETSTATE:
3186 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3187 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3188 		break;
3189 
3190 	default:
3191 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3192 		error = EINVAL;
3193 	}
3194 
3195 	rw_exit_write(&sc->sc_lock);
3196 
3197 	return (error);
3198 }
3199 
3200 int
3201 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3202 {
3203 	if (!(sc->sc_flags & MPI_F_RAID)) {
3204 		bi->bi_novol = 0;
3205 		bi->bi_nodisk = 0;
3206 	}
3207 
3208 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3209 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3210 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3211 		    "page 2\n", DEVNAME(sc));
3212 		return (EINVAL);
3213 	}
3214 
3215 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3216 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3217 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3218 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3219 
3220 	bi->bi_novol = sc->sc_vol_page->active_vols;
3221 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3222 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3223 
3224 	return (0);
3225 }
3226 
3227 int
3228 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3229 {
3230 	int			i, vol, id, rv = EINVAL;
3231 	struct device		*dev;
3232 	struct scsi_link	*link;
3233 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3234 	char			*vendp;
3235 
3236 	id = bv->bv_volid;
3237 	if (mpi_bio_get_pg0_raid(sc, id))
3238 		goto done;
3239 
3240 	if (id > sc->sc_vol_page->active_vols)
3241 		return (EINVAL); /* XXX deal with hot spares */
3242 
3243 	rpg0 = sc->sc_rpg0;
3244 	if (rpg0 == NULL)
3245 		goto done;
3246 
3247 	/* determine status */
3248 	switch (rpg0->volume_state) {
3249 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3250 		bv->bv_status = BIOC_SVONLINE;
3251 		break;
3252 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3253 		bv->bv_status = BIOC_SVDEGRADED;
3254 		break;
3255 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3256 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3257 		bv->bv_status = BIOC_SVOFFLINE;
3258 		break;
3259 	default:
3260 		bv->bv_status = BIOC_SVINVALID;
3261 	}
3262 
3263 	/* override status if scrubbing or something */
3264 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3265 		bv->bv_status = BIOC_SVREBUILD;
3266 
3267 	bv->bv_size = (u_quad_t)lemtoh32(&rpg0->max_lba) * 512;
3268 
3269 	switch (sc->sc_vol_list[id].vol_type) {
3270 	case MPI_CFG_RAID_TYPE_RAID_IS:
3271 		bv->bv_level = 0;
3272 		break;
3273 	case MPI_CFG_RAID_TYPE_RAID_IME:
3274 	case MPI_CFG_RAID_TYPE_RAID_IM:
3275 		bv->bv_level = 1;
3276 		break;
3277 	case MPI_CFG_RAID_TYPE_RAID_5:
3278 		bv->bv_level = 5;
3279 		break;
3280 	case MPI_CFG_RAID_TYPE_RAID_6:
3281 		bv->bv_level = 6;
3282 		break;
3283 	case MPI_CFG_RAID_TYPE_RAID_10:
3284 		bv->bv_level = 10;
3285 		break;
3286 	case MPI_CFG_RAID_TYPE_RAID_50:
3287 		bv->bv_level = 50;
3288 		break;
3289 	default:
3290 		bv->bv_level = -1;
3291 	}
3292 
3293 	bv->bv_nodisk = rpg0->num_phys_disks;
3294 
3295 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3296 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3297 		if (link == NULL)
3298 			continue;
3299 
3300 		/* skip if not a virtual disk */
3301 		if (!(link->flags & SDEV_VIRTUAL))
3302 			continue;
3303 
3304 		vol++;
3305 		/* are we it? */
3306 		if (vol == bv->bv_volid) {
3307 			dev = link->device_softc;
3308 			vendp = link->inqdata.vendor;
3309 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3310 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3311 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3312 			break;
3313 		}
3314 	}
3315 	rv = 0;
3316 done:
3317 	return (rv);
3318 }
3319 
3320 int
3321 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3322 {
3323 	int			pdid, id, rv = EINVAL;
3324 	u_int32_t		address;
3325 	struct mpi_cfg_hdr	hdr;
3326 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3327 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3328 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3329 
3330 	id = bd->bd_volid;
3331 	if (mpi_bio_get_pg0_raid(sc, id))
3332 		goto done;
3333 
3334 	if (id > sc->sc_vol_page->active_vols)
3335 		return (EINVAL); /* XXX deal with hot spares */
3336 
3337 	rpg0 = sc->sc_rpg0;
3338 	if (rpg0 == NULL)
3339 		goto done;
3340 
3341 	pdid = bd->bd_diskid;
3342 	if (pdid > rpg0->num_phys_disks)
3343 		goto done;
3344 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3345 	physdisk += pdid;
3346 
3347 	/* get raid phys disk page 0 */
3348 	address = physdisk->phys_disk_num;
3349 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3350 	    &hdr) != 0)
3351 		goto done;
3352 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3353 		bd->bd_status = BIOC_SDFAILED;
3354 		return (0);
3355 	}
3356 	bd->bd_channel = pdpg0.phys_disk_bus;
3357 	bd->bd_target = pdpg0.phys_disk_id;
3358 	bd->bd_lun = 0;
3359 	bd->bd_size = (u_quad_t)lemtoh32(&pdpg0.max_lba) * 512;
3360 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3361 
3362 	switch (pdpg0.phys_disk_state) {
3363 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3364 		bd->bd_status = BIOC_SDONLINE;
3365 		break;
3366 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3367 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3368 		bd->bd_status = BIOC_SDFAILED;
3369 		break;
3370 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3371 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3372 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3373 		bd->bd_status = BIOC_SDOFFLINE;
3374 		break;
3375 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3376 		bd->bd_status = BIOC_SDSCRUB;
3377 		break;
3378 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3379 	default:
3380 		bd->bd_status = BIOC_SDINVALID;
3381 		break;
3382 	}
3383 
3384 	/* XXX figure this out */
3385 	/* bd_serial[32]; */
3386 	/* bd_procdev[16]; */
3387 
3388 	rv = 0;
3389 done:
3390 	return (rv);
3391 }
3392 
3393 int
3394 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3395 {
3396 	return (ENOTTY);
3397 }
3398 
3399 #ifndef SMALL_KERNEL
3400 int
3401 mpi_create_sensors(struct mpi_softc *sc)
3402 {
3403 	struct device		*dev;
3404 	struct scsi_link	*link;
3405 	int			i, vol;
3406 
3407 	/* count volumes */
3408 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3409 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3410 		if (link == NULL)
3411 			continue;
3412 		/* skip if not a virtual disk */
3413 		if (!(link->flags & SDEV_VIRTUAL))
3414 			continue;
3415 
3416 		vol++;
3417 	}
3418 	if (vol == 0)
3419 		return (0);
3420 
3421 	sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3422 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3423 	if (sc->sc_sensors == NULL)
3424 		return (1);
3425 
3426 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3427 	    sizeof(sc->sc_sensordev.xname));
3428 
3429 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3430 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3431 		if (link == NULL)
3432 			continue;
3433 		/* skip if not a virtual disk */
3434 		if (!(link->flags & SDEV_VIRTUAL))
3435 			continue;
3436 
3437 		dev = link->device_softc;
3438 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3439 		    sizeof(sc->sc_sensors[vol].desc));
3440 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3441 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3442 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3443 
3444 		vol++;
3445 	}
3446 
3447 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3448 		goto bad;
3449 
3450 	sensordev_install(&sc->sc_sensordev);
3451 
3452 	return (0);
3453 
3454 bad:
3455 	free(sc->sc_sensors, M_DEVBUF, 0);
3456 	return (1);
3457 }
3458 
3459 void
3460 mpi_refresh_sensors(void *arg)
3461 {
3462 	int			i, vol;
3463 	struct scsi_link	*link;
3464 	struct mpi_softc	*sc = arg;
3465 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3466 
3467 	rw_enter_write(&sc->sc_lock);
3468 
3469 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3470 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3471 		if (link == NULL)
3472 			continue;
3473 		/* skip if not a virtual disk */
3474 		if (!(link->flags & SDEV_VIRTUAL))
3475 			continue;
3476 
3477 		if (mpi_bio_get_pg0_raid(sc, vol))
3478 			continue;
3479 
3480 		rpg0 = sc->sc_rpg0;
3481 		if (rpg0 == NULL)
3482 			goto done;
3483 
3484 		/* determine status */
3485 		switch (rpg0->volume_state) {
3486 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3487 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3488 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3489 			break;
3490 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3491 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3492 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3493 			break;
3494 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3495 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3496 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3497 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3498 			break;
3499 		default:
3500 			sc->sc_sensors[vol].value = 0; /* unknown */
3501 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3502 		}
3503 
3504 		/* override status if scrubbing or something */
3505 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3506 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3507 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3508 		}
3509 
3510 		vol++;
3511 	}
3512 done:
3513 	rw_exit_write(&sc->sc_lock);
3514 }
3515 #endif /* SMALL_KERNEL */
3516 #endif /* NBIO > 0 */
3517