xref: /openbsd-src/sys/dev/ic/mpi.c (revision a01cfbd6d393ea15525c938ce9084b2ba3fecffb)
1 /*	$OpenBSD: mpi.c,v 1.189 2014/03/25 07:26:50 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/proc.h>
28 #include <sys/malloc.h>
29 #include <sys/kernel.h>
30 #include <sys/mutex.h>
31 #include <sys/rwlock.h>
32 #include <sys/sensors.h>
33 #include <sys/dkio.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsiconf.h>
39 
40 #include <dev/biovar.h>
41 #include <dev/ic/mpireg.h>
42 #include <dev/ic/mpivar.h>
43 
44 #ifdef MPI_DEBUG
45 uint32_t	mpi_debug = 0
46 /*		    | MPI_D_CMD */
47 /*		    | MPI_D_INTR */
48 /*		    | MPI_D_MISC */
49 /*		    | MPI_D_DMA */
50 /*		    | MPI_D_IOCTL */
51 /*		    | MPI_D_RW */
52 /*		    | MPI_D_MEM */
53 /*		    | MPI_D_CCB */
54 /*		    | MPI_D_PPR */
55 /*		    | MPI_D_RAID */
56 /*		    | MPI_D_EVT */
57 		;
58 #endif
59 
60 struct cfdriver mpi_cd = {
61 	NULL,
62 	"mpi",
63 	DV_DULL
64 };
65 
66 void			mpi_scsi_cmd(struct scsi_xfer *);
67 void			mpi_scsi_cmd_done(struct mpi_ccb *);
68 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
69 int			mpi_scsi_probe(struct scsi_link *);
70 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
71 			    int);
72 
73 struct scsi_adapter mpi_switch = {
74 	mpi_scsi_cmd,
75 	mpi_minphys,
76 	mpi_scsi_probe,
77 	NULL,
78 	mpi_scsi_ioctl
79 };
80 
81 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
82 void			mpi_dmamem_free(struct mpi_softc *,
83 			    struct mpi_dmamem *);
84 int			mpi_alloc_ccbs(struct mpi_softc *);
85 void			*mpi_get_ccb(void *);
86 void			mpi_put_ccb(void *, void *);
87 int			mpi_alloc_replies(struct mpi_softc *);
88 void			mpi_push_replies(struct mpi_softc *);
89 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
90 
91 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
92 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
93 void			mpi_poll_done(struct mpi_ccb *);
94 void			mpi_reply(struct mpi_softc *, u_int32_t);
95 
96 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
97 void			mpi_wait_done(struct mpi_ccb *);
98 
99 int			mpi_cfg_spi_port(struct mpi_softc *);
100 void			mpi_squash_ppr(struct mpi_softc *);
101 void			mpi_run_ppr(struct mpi_softc *);
102 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
103 			    struct mpi_cfg_raid_physdisk *, int, int, int);
104 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
105 
106 int			mpi_cfg_sas(struct mpi_softc *);
107 int			mpi_cfg_fc(struct mpi_softc *);
108 
109 void			mpi_timeout_xs(void *);
110 int			mpi_load_xs(struct mpi_ccb *);
111 
112 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
113 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
114 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
115 			    u_int32_t);
116 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
117 			    u_int32_t);
118 
119 int			mpi_init(struct mpi_softc *);
120 int			mpi_reset_soft(struct mpi_softc *);
121 int			mpi_reset_hard(struct mpi_softc *);
122 
123 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
124 int			mpi_handshake_recv_dword(struct mpi_softc *,
125 			    u_int32_t *);
126 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
127 
128 void			mpi_empty_done(struct mpi_ccb *);
129 
130 int			mpi_iocinit(struct mpi_softc *);
131 int			mpi_iocfacts(struct mpi_softc *);
132 int			mpi_portfacts(struct mpi_softc *);
133 int			mpi_portenable(struct mpi_softc *);
134 int			mpi_cfg_coalescing(struct mpi_softc *);
135 void			mpi_get_raid(struct mpi_softc *);
136 int			mpi_fwupload(struct mpi_softc *);
137 int			mpi_scsi_probe_virtual(struct scsi_link *);
138 
139 int			mpi_eventnotify(struct mpi_softc *);
140 void			mpi_eventnotify_done(struct mpi_ccb *);
141 void			mpi_eventnotify_free(struct mpi_softc *,
142 			    struct mpi_rcb *);
143 void			mpi_eventack(void *, void *);
144 void			mpi_eventack_done(struct mpi_ccb *);
145 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
146 void			mpi_evt_sas_detach(void *, void *);
147 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
148 void			mpi_evt_fc_rescan(struct mpi_softc *);
149 void			mpi_fc_rescan(void *, void *);
150 
151 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
152 			    u_int8_t, u_int32_t, int, void *);
153 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
154 			    void *, int, void *, size_t);
155 
156 int			mpi_ioctl_cache(struct scsi_link *, u_long,
157 			    struct dk_cache *);
158 
159 #if NBIO > 0
160 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
161 int		mpi_ioctl(struct device *, u_long, caddr_t);
162 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
163 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
164 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
165 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
166 #ifndef SMALL_KERNEL
167 int		mpi_create_sensors(struct mpi_softc *);
168 void		mpi_refresh_sensors(void *);
169 #endif /* SMALL_KERNEL */
170 #endif /* NBIO > 0 */
171 
172 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
173 
174 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
175 
176 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
177 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
178 #define mpi_read_intr(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
179 				    MPI_INTR_STATUS)
180 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
181 #define mpi_pop_reply(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
182 				    MPI_REPLY_QUEUE)
183 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
184 				    MPI_REPLY_QUEUE, (v))
185 
186 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
187 				    MPI_INTR_STATUS_DOORBELL, 0)
188 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
189 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
190 
191 #define MPI_PG_EXTENDED		(1<<0)
192 #define MPI_PG_POLL		(1<<1)
193 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
194 
195 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
196 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
197 	    MPI_PG_POLL, (_h))
198 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
199 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
200 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
201 
202 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
203 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
204 	    (_h), (_r), (_p), (_l))
205 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
206 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
207 	    (_h), (_r), (_p), (_l))
208 
209 static inline void
210 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
211 {
212 	htolem32(&sge->sg_addr_lo, dva);
213 	htolem32(&sge->sg_addr_hi, dva >> 32);
214 }
215 
216 int
217 mpi_attach(struct mpi_softc *sc)
218 {
219 	struct scsibus_attach_args	saa;
220 	struct mpi_ccb			*ccb;
221 
222 	printf("\n");
223 
224 	rw_init(&sc->sc_lock, "mpi_lock");
225 	mtx_init(&sc->sc_evt_rescan_mtx, IPL_BIO);
226 
227 	/* disable interrupts */
228 	mpi_write(sc, MPI_INTR_MASK,
229 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
230 
231 	if (mpi_init(sc) != 0) {
232 		printf("%s: unable to initialise\n", DEVNAME(sc));
233 		return (1);
234 	}
235 
236 	if (mpi_iocfacts(sc) != 0) {
237 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
238 		return (1);
239 	}
240 
241 	if (mpi_alloc_ccbs(sc) != 0) {
242 		/* error already printed */
243 		return (1);
244 	}
245 
246 	if (mpi_alloc_replies(sc) != 0) {
247 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
248 		goto free_ccbs;
249 	}
250 
251 	if (mpi_iocinit(sc) != 0) {
252 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
253 		goto free_ccbs;
254 	}
255 
256 	/* spin until we're operational */
257 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
258 	    MPI_DOORBELL_STATE_OPER) != 0) {
259 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
260 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
261 		printf("%s: operational state timeout\n", DEVNAME(sc));
262 		goto free_ccbs;
263 	}
264 
265 	mpi_push_replies(sc);
266 
267 	if (mpi_portfacts(sc) != 0) {
268 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
269 		goto free_replies;
270 	}
271 
272 	if (mpi_cfg_coalescing(sc) != 0) {
273 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
274 		goto free_replies;
275 	}
276 
277 	switch (sc->sc_porttype) {
278 	case MPI_PORTFACTS_PORTTYPE_SAS:
279 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
280 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
281 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
282 		    mpi_evt_sas_detach, sc);
283 		/* FALLTHROUGH */
284 	case MPI_PORTFACTS_PORTTYPE_FC:
285 		if (mpi_eventnotify(sc) != 0) {
286 			printf("%s: unable to enable events\n", DEVNAME(sc));
287 			goto free_replies;
288 		}
289 		break;
290 	}
291 
292 	if (mpi_portenable(sc) != 0) {
293 		printf("%s: unable to enable port\n", DEVNAME(sc));
294 		goto free_replies;
295 	}
296 
297 	if (mpi_fwupload(sc) != 0) {
298 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
299 		goto free_replies;
300 	}
301 
302 	switch (sc->sc_porttype) {
303 	case MPI_PORTFACTS_PORTTYPE_SCSI:
304 		if (mpi_cfg_spi_port(sc) != 0) {
305 			printf("%s: unable to configure spi\n", DEVNAME(sc));
306 			goto free_replies;
307 		}
308 		mpi_squash_ppr(sc);
309 		break;
310 	case MPI_PORTFACTS_PORTTYPE_SAS:
311 		if (mpi_cfg_sas(sc) != 0) {
312 			printf("%s: unable to configure sas\n", DEVNAME(sc));
313 			goto free_replies;
314 		}
315 		break;
316 	case MPI_PORTFACTS_PORTTYPE_FC:
317 		if (mpi_cfg_fc(sc) != 0) {
318 			printf("%s: unable to configure fc\n", DEVNAME(sc));
319 			goto free_replies;
320 		}
321 		break;
322 	}
323 
324 	/* get raid pages */
325 	mpi_get_raid(sc);
326 #if NBIO > 0
327 	if (sc->sc_flags & MPI_F_RAID) {
328 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
329 			panic("%s: controller registration failed",
330 			    DEVNAME(sc));
331 		else {
332 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
333 			    2, 0, &sc->sc_cfg_hdr) != 0) {
334 				panic("%s: can't get IOC page 2 hdr",
335 				    DEVNAME(sc));
336 			}
337 
338 			sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4,
339 			    M_TEMP, M_WAITOK | M_CANFAIL);
340 			if (sc->sc_vol_page == NULL) {
341 				panic("%s: can't get memory for IOC page 2, "
342 				    "bio disabled", DEVNAME(sc));
343 			}
344 
345 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
346 			    sc->sc_vol_page,
347 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
348 				panic("%s: can't get IOC page 2", DEVNAME(sc));
349 			}
350 
351 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
352 			    (sc->sc_vol_page + 1);
353 
354 			sc->sc_ioctl = mpi_ioctl;
355 		}
356 	}
357 #endif /* NBIO > 0 */
358 
359 	/* we should be good to go now, attach scsibus */
360 	sc->sc_link.adapter = &mpi_switch;
361 	sc->sc_link.adapter_softc = sc;
362 	sc->sc_link.adapter_target = sc->sc_target;
363 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
364 	sc->sc_link.openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16);
365 	sc->sc_link.pool = &sc->sc_iopool;
366 
367 	memset(&saa, 0, sizeof(saa));
368 	saa.saa_sc_link = &sc->sc_link;
369 
370 	/* config_found() returns the scsibus attached to us */
371 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
372 	    &saa, scsiprint);
373 
374 	/* do domain validation */
375 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
376 		mpi_run_ppr(sc);
377 
378 	/* enable interrupts */
379 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
380 
381 #if NBIO > 0
382 #ifndef SMALL_KERNEL
383 	mpi_create_sensors(sc);
384 #endif /* SMALL_KERNEL */
385 #endif /* NBIO > 0 */
386 
387 	return (0);
388 
389 free_replies:
390 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
391 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
392 	mpi_dmamem_free(sc, sc->sc_replies);
393 free_ccbs:
394 	while ((ccb = mpi_get_ccb(sc)) != NULL)
395 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
396 	mpi_dmamem_free(sc, sc->sc_requests);
397 	free(sc->sc_ccbs, M_DEVBUF);
398 
399 	return(1);
400 }
401 
402 int
403 mpi_cfg_spi_port(struct mpi_softc *sc)
404 {
405 	struct mpi_cfg_hdr		hdr;
406 	struct mpi_cfg_spi_port_pg1	port;
407 
408 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
409 	    &hdr) != 0)
410 		return (1);
411 
412 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
413 		return (1);
414 
415 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
416 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
417 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
418 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
419 	    letoh32(port.port_scsi_id));
420 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
421 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
422 
423 	if (port.port_scsi_id == sc->sc_target &&
424 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
425 	    port.on_bus_timer_value != htole32(0x0))
426 		return (0);
427 
428 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
429 	    sc->sc_target);
430 	port.port_scsi_id = sc->sc_target;
431 	port.port_resp_ids = htole16(1 << sc->sc_target);
432 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
433 
434 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
435 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
436 		return (1);
437 	}
438 
439 	return (0);
440 }
441 
442 void
443 mpi_squash_ppr(struct mpi_softc *sc)
444 {
445 	struct mpi_cfg_hdr		hdr;
446 	struct mpi_cfg_spi_dev_pg1	page;
447 	int				i;
448 
449 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
450 
451 	for (i = 0; i < sc->sc_buswidth; i++) {
452 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
453 		    1, i, &hdr) != 0)
454 			return;
455 
456 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
457 			return;
458 
459 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
460 		    "req_offset: 0x%02x req_period: 0x%02x "
461 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
462 		    page.req_params1, page.req_offset, page.req_period,
463 		    page.req_params2, letoh32(page.configuration));
464 
465 		page.req_params1 = 0x0;
466 		page.req_offset = 0x0;
467 		page.req_period = 0x0;
468 		page.req_params2 = 0x0;
469 		page.configuration = htole32(0x0);
470 
471 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
472 			return;
473 	}
474 }
475 
476 void
477 mpi_run_ppr(struct mpi_softc *sc)
478 {
479 	struct mpi_cfg_hdr		hdr;
480 	struct mpi_cfg_spi_port_pg0	port_pg;
481 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
482 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
483 	size_t				pagelen;
484 	struct scsi_link		*link;
485 	int				i, tries;
486 
487 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
488 	    &hdr) != 0) {
489 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
490 		    DEVNAME(sc));
491 		return;
492 	}
493 
494 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
495 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
496 		    DEVNAME(sc));
497 		return;
498 	}
499 
500 	for (i = 0; i < sc->sc_buswidth; i++) {
501 		link = scsi_get_link(sc->sc_scsibus, i, 0);
502 		if (link == NULL)
503 			continue;
504 
505 		/* do not ppr volumes */
506 		if (link->flags & SDEV_VIRTUAL)
507 			continue;
508 
509 		tries = 0;
510 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
511 		    port_pg.max_offset, tries) == EAGAIN)
512 			tries++;
513 	}
514 
515 	if ((sc->sc_flags & MPI_F_RAID) == 0)
516 		return;
517 
518 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
519 	    &hdr) != 0) {
520 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
521 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
522 		return;
523 	}
524 
525 	pagelen = hdr.page_length * 4; /* dwords to bytes */
526 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
527 	if (physdisk_pg == NULL) {
528 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
529 		    "allocate ioc pg 3\n", DEVNAME(sc));
530 		return;
531 	}
532 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
533 
534 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
535 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
536 		    "fetch ioc page 3\n", DEVNAME(sc));
537 		goto out;
538 	}
539 
540 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
541 	    physdisk_pg->no_phys_disks);
542 
543 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
544 		physdisk = &physdisk_list[i];
545 
546 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
547 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
548 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
549 		    physdisk->phys_disk_num);
550 
551 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
552 			continue;
553 
554 		tries = 0;
555 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
556 		    port_pg.max_offset, tries) == EAGAIN)
557 			tries++;
558 	}
559 
560 out:
561 	free(physdisk_pg, M_TEMP);
562 }
563 
564 int
565 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
566     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
567 {
568 	struct mpi_cfg_hdr		hdr0, hdr1;
569 	struct mpi_cfg_spi_dev_pg0	pg0;
570 	struct mpi_cfg_spi_dev_pg1	pg1;
571 	u_int32_t			address;
572 	int				id;
573 	int				raid = 0;
574 
575 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
576 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
577 	    link->quirks);
578 
579 	if (try >= 3)
580 		return (EIO);
581 
582 	if (physdisk == NULL) {
583 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
584 			return (EIO);
585 
586 		address = link->target;
587 		id = link->target;
588 	} else {
589 		raid = 1;
590 		address = (physdisk->phys_disk_bus << 8) |
591 		    (physdisk->phys_disk_id);
592 		id = physdisk->phys_disk_num;
593 	}
594 
595 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
596 	    address, &hdr0) != 0) {
597 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
598 		    DEVNAME(sc));
599 		return (EIO);
600 	}
601 
602 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
603 	    address, &hdr1) != 0) {
604 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
605 		    DEVNAME(sc));
606 		return (EIO);
607 	}
608 
609 #ifdef MPI_DEBUG
610 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
611 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
612 		    DEVNAME(sc));
613 		return (EIO);
614 	}
615 
616 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
617 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
618 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
619 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
620 #endif
621 
622 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
623 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
624 		    DEVNAME(sc));
625 		return (EIO);
626 	}
627 
628 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
629 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
630 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
631 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
632 
633 	pg1.req_params1 = 0;
634 	pg1.req_offset = offset;
635 	pg1.req_period = period;
636 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
637 
638 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
639 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
640 
641 		switch (try) {
642 		case 0: /* U320 */
643 			break;
644 		case 1: /* U160 */
645 			pg1.req_period = 0x09;
646 			break;
647 		case 2: /* U80 */
648 			pg1.req_period = 0x0a;
649 			break;
650 		}
651 
652 		if (pg1.req_period < 0x09) {
653 			/* Ultra320: enable QAS & PACKETIZED */
654 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
655 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
656 		}
657 		if (pg1.req_period < 0xa) {
658 			/* >= Ultra160: enable dual xfers */
659 			pg1.req_params1 |=
660 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
661 		}
662 	}
663 
664 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
665 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
666 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
667 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
668 
669 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
670 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
671 		    DEVNAME(sc));
672 		return (EIO);
673 	}
674 
675 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
676 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
677 		    DEVNAME(sc));
678 		return (EIO);
679 	}
680 
681 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
682 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
683 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
684 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
685 
686 	if (mpi_inq(sc, id, raid) != 0) {
687 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
688 		    "target %d\n", DEVNAME(sc), link->target);
689 		return (EIO);
690 	}
691 
692 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
693 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
694 		    "inquiry\n", DEVNAME(sc));
695 		return (EIO);
696 	}
697 
698 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
699 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
700 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
701 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
702 
703 	if (!(letoh32(pg0.information) & 0x07) && (try == 0)) {
704 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
705 		    DEVNAME(sc));
706 		return (EAGAIN);
707 	}
708 
709 	if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
710 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
711 		    DEVNAME(sc));
712 		return (EAGAIN);
713 	}
714 
715 	if (letoh32(pg0.information) & 0x0e) {
716 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
717 		    DEVNAME(sc), letoh32(pg0.information));
718 		return (EAGAIN);
719 	}
720 
721 	switch(pg0.neg_period) {
722 	case 0x08:
723 		period = 160;
724 		break;
725 	case 0x09:
726 		period = 80;
727 		break;
728 	case 0x0a:
729 		period = 40;
730 		break;
731 	case 0x0b:
732 		period = 20;
733 		break;
734 	case 0x0c:
735 		period = 10;
736 		break;
737 	default:
738 		period = 0;
739 		break;
740 	}
741 
742 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
743 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
744 	    id, period ? "Sync" : "Async", period,
745 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
746 	    pg0.neg_offset,
747 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
748 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
749 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
750 
751 	return (0);
752 }
753 
754 int
755 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
756 {
757 	struct mpi_ccb			*ccb;
758 	struct scsi_inquiry		inq;
759 	struct {
760 		struct mpi_msg_scsi_io		io;
761 		struct mpi_sge			sge;
762 		struct scsi_inquiry_data	inqbuf;
763 		struct scsi_sense_data		sense;
764 	} __packed			*bundle;
765 	struct mpi_msg_scsi_io		*io;
766 	struct mpi_sge			*sge;
767 
768 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
769 
770 	memset(&inq, 0, sizeof(inq));
771 	inq.opcode = INQUIRY;
772 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
773 
774 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
775 	if (ccb == NULL)
776 		return (1);
777 
778 	ccb->ccb_done = mpi_empty_done;
779 
780 	bundle = ccb->ccb_cmd;
781 	io = &bundle->io;
782 	sge = &bundle->sge;
783 
784 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
785 	    MPI_FUNCTION_SCSI_IO_REQUEST;
786 	/*
787 	 * bus is always 0
788 	 * io->bus = htole16(sc->sc_bus);
789 	 */
790 	io->target_id = target;
791 
792 	io->cdb_length = sizeof(inq);
793 	io->sense_buf_len = sizeof(struct scsi_sense_data);
794 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
795 
796 	/*
797 	 * always lun 0
798 	 * io->lun[0] = htobe16(link->lun);
799 	 */
800 
801 	io->direction = MPI_SCSIIO_DIR_READ;
802 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
803 
804 	memcpy(io->cdb, &inq, sizeof(inq));
805 
806 	io->data_length = htole32(sizeof(struct scsi_inquiry_data));
807 
808 	io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
809 	    ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
810 
811 	sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
812 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
813 	    (u_int32_t)sizeof(inq));
814 
815 	mpi_dvatosge(sge, ccb->ccb_cmd_dva +
816 	    ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle));
817 
818 	if (mpi_poll(sc, ccb, 5000) != 0)
819 		return (1);
820 
821 	if (ccb->ccb_rcb != NULL)
822 		mpi_push_reply(sc, ccb->ccb_rcb);
823 
824 	scsi_io_put(&sc->sc_iopool, ccb);
825 
826 	return (0);
827 }
828 
829 int
830 mpi_cfg_sas(struct mpi_softc *sc)
831 {
832 	struct mpi_ecfg_hdr		ehdr;
833 	struct mpi_cfg_sas_iou_pg1	*pg;
834 	size_t				pagelen;
835 	int				rv = 0;
836 
837 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
838 	    &ehdr) != 0)
839 		return (0);
840 
841 	pagelen = letoh16(ehdr.ext_page_length) * 4;
842 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
843 	if (pg == NULL)
844 		return (ENOMEM);
845 
846 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
847 		goto out;
848 
849 	if (pg->max_sata_q_depth != 32) {
850 		pg->max_sata_q_depth = 32;
851 
852 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
853 			goto out;
854 	}
855 
856 out:
857 	free(pg, M_TEMP);
858 	return (rv);
859 }
860 
861 int
862 mpi_cfg_fc(struct mpi_softc *sc)
863 {
864 	struct mpi_cfg_hdr		hdr;
865 	struct mpi_cfg_fc_port_pg0	pg0;
866 	struct mpi_cfg_fc_port_pg1	pg1;
867 
868 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
869 	    &hdr) != 0) {
870 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
871 		return (1);
872 	}
873 
874 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
875 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
876 		return (1);
877 	}
878 
879 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
880 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
881 
882 	/* configure port config more to our liking */
883 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
884 	    &hdr) != 0) {
885 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
886 		return (1);
887 	}
888 
889 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
890 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
891 		return (1);
892 	}
893 
894 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
895 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
896 
897 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
898 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
899 		return (1);
900 	}
901 
902 	return (0);
903 }
904 
905 void
906 mpi_detach(struct mpi_softc *sc)
907 {
908 
909 }
910 
911 int
912 mpi_intr(void *arg)
913 {
914 	struct mpi_softc		*sc = arg;
915 	u_int32_t			reg;
916 	int				rv = 0;
917 
918 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
919 		return (rv);
920 
921 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
922 		mpi_reply(sc, reg);
923 		rv = 1;
924 	}
925 
926 	return (rv);
927 }
928 
929 void
930 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
931 {
932 	struct mpi_ccb			*ccb;
933 	struct mpi_rcb			*rcb = NULL;
934 	struct mpi_msg_reply		*reply = NULL;
935 	u_int32_t			reply_dva;
936 	int				id;
937 	int				i;
938 
939 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
940 
941 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
942 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
943 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
944 		    MPI_REPLY_SIZE;
945 		rcb = &sc->sc_rcbs[i];
946 
947 		bus_dmamap_sync(sc->sc_dmat,
948 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
949 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
950 
951 		reply = rcb->rcb_reply;
952 
953 		id = lemtoh32(&reply->msg_context);
954 	} else {
955 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
956 		case MPI_REPLY_QUEUE_TYPE_INIT:
957 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
958 			break;
959 
960 		default:
961 			panic("%s: unsupported context reply",
962 			    DEVNAME(sc));
963 		}
964 	}
965 
966 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
967 	    DEVNAME(sc), id, reply);
968 
969 	ccb = &sc->sc_ccbs[id];
970 
971 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
972 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
973 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974 	ccb->ccb_state = MPI_CCB_READY;
975 	ccb->ccb_rcb = rcb;
976 
977 	ccb->ccb_done(ccb);
978 }
979 
980 struct mpi_dmamem *
981 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
982 {
983 	struct mpi_dmamem		*mdm;
984 	int				nsegs;
985 
986 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
987 	if (mdm == NULL)
988 		return (NULL);
989 
990 	mdm->mdm_size = size;
991 
992 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
993 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
994 		goto mdmfree;
995 
996 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
997 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
998 		goto destroy;
999 
1000 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1001 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1002 		goto free;
1003 
1004 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1005 	    NULL, BUS_DMA_NOWAIT) != 0)
1006 		goto unmap;
1007 
1008 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1009 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1010 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1011 
1012 	return (mdm);
1013 
1014 unmap:
1015 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1016 free:
1017 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1018 destroy:
1019 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1020 mdmfree:
1021 	free(mdm, M_DEVBUF);
1022 
1023 	return (NULL);
1024 }
1025 
1026 void
1027 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1028 {
1029 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1030 
1031 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1032 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1033 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1034 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1035 	free(mdm, M_DEVBUF);
1036 }
1037 
1038 int
1039 mpi_alloc_ccbs(struct mpi_softc *sc)
1040 {
1041 	struct mpi_ccb			*ccb;
1042 	u_int8_t			*cmd;
1043 	int				i;
1044 
1045 	SLIST_INIT(&sc->sc_ccb_free);
1046 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1047 
1048 	sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds,
1049 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1050 	if (sc->sc_ccbs == NULL) {
1051 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1052 		return (1);
1053 	}
1054 
1055 	sc->sc_requests = mpi_dmamem_alloc(sc,
1056 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1057 	if (sc->sc_requests == NULL) {
1058 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1059 		goto free_ccbs;
1060 	}
1061 	cmd = MPI_DMA_KVA(sc->sc_requests);
1062 	memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1063 
1064 	for (i = 0; i < sc->sc_maxcmds; i++) {
1065 		ccb = &sc->sc_ccbs[i];
1066 
1067 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1068 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1069 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1070 		    &ccb->ccb_dmamap) != 0) {
1071 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1072 			goto free_maps;
1073 		}
1074 
1075 		ccb->ccb_sc = sc;
1076 		ccb->ccb_id = i;
1077 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1078 		ccb->ccb_state = MPI_CCB_READY;
1079 
1080 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1081 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1082 		    ccb->ccb_offset;
1083 
1084 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1085 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1086 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1087 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1088 		    ccb->ccb_cmd_dva);
1089 
1090 		mpi_put_ccb(sc, ccb);
1091 	}
1092 
1093 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1094 
1095 	return (0);
1096 
1097 free_maps:
1098 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1099 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1100 
1101 	mpi_dmamem_free(sc, sc->sc_requests);
1102 free_ccbs:
1103 	free(sc->sc_ccbs, M_DEVBUF);
1104 
1105 	return (1);
1106 }
1107 
1108 void *
1109 mpi_get_ccb(void *xsc)
1110 {
1111 	struct mpi_softc		*sc = xsc;
1112 	struct mpi_ccb			*ccb;
1113 
1114 	mtx_enter(&sc->sc_ccb_mtx);
1115 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1116 	if (ccb != NULL) {
1117 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1118 		ccb->ccb_state = MPI_CCB_READY;
1119 	}
1120 	mtx_leave(&sc->sc_ccb_mtx);
1121 
1122 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1123 
1124 	return (ccb);
1125 }
1126 
1127 void
1128 mpi_put_ccb(void *xsc, void *io)
1129 {
1130 	struct mpi_softc		*sc = xsc;
1131 	struct mpi_ccb			*ccb = io;
1132 
1133 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1134 
1135 #ifdef DIAGNOSTIC
1136 	if (ccb->ccb_state == MPI_CCB_FREE)
1137 		panic("mpi_put_ccb: double free");
1138 #endif
1139 
1140 	ccb->ccb_state = MPI_CCB_FREE;
1141 	ccb->ccb_cookie = NULL;
1142 	ccb->ccb_done = NULL;
1143 	memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1144 	mtx_enter(&sc->sc_ccb_mtx);
1145 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1146 	mtx_leave(&sc->sc_ccb_mtx);
1147 }
1148 
1149 int
1150 mpi_alloc_replies(struct mpi_softc *sc)
1151 {
1152 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1153 
1154 	sc->sc_rcbs = malloc(sc->sc_repq * sizeof(struct mpi_rcb), M_DEVBUF,
1155 	    M_WAITOK|M_CANFAIL);
1156 	if (sc->sc_rcbs == NULL)
1157 		return (1);
1158 
1159 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1160 	if (sc->sc_replies == NULL) {
1161 		free(sc->sc_rcbs, M_DEVBUF);
1162 		return (1);
1163 	}
1164 
1165 	return (0);
1166 }
1167 
1168 void
1169 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1170 {
1171 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1172 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1173 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1174 }
1175 
1176 void
1177 mpi_push_replies(struct mpi_softc *sc)
1178 {
1179 	struct mpi_rcb			*rcb;
1180 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1181 	int				i;
1182 
1183 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1184 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1185 
1186 	for (i = 0; i < sc->sc_repq; i++) {
1187 		rcb = &sc->sc_rcbs[i];
1188 
1189 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1190 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1191 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1192 		    MPI_REPLY_SIZE * i;
1193 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1194 	}
1195 }
1196 
1197 void
1198 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1199 {
1200 	struct mpi_msg_request *msg;
1201 
1202 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1203 	    ccb->ccb_cmd_dva);
1204 
1205 	msg = ccb->ccb_cmd;
1206 	htolem32(&msg->msg_context, ccb->ccb_id);
1207 
1208 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1209 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1210 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1211 
1212 	ccb->ccb_state = MPI_CCB_QUEUED;
1213 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1214 	    MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1215 }
1216 
1217 int
1218 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1219 {
1220 	void				(*done)(struct mpi_ccb *);
1221 	void				*cookie;
1222 	int				rv = 1;
1223 	u_int32_t			reg;
1224 
1225 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1226 	    timeout);
1227 
1228 	done = ccb->ccb_done;
1229 	cookie = ccb->ccb_cookie;
1230 
1231 	ccb->ccb_done = mpi_poll_done;
1232 	ccb->ccb_cookie = &rv;
1233 
1234 	mpi_start(sc, ccb);
1235 	while (rv == 1) {
1236 		reg = mpi_pop_reply(sc);
1237 		if (reg == 0xffffffff) {
1238 			if (timeout-- == 0) {
1239 				printf("%s: timeout\n", DEVNAME(sc));
1240 				goto timeout;
1241 			}
1242 
1243 			delay(1000);
1244 			continue;
1245 		}
1246 
1247 		mpi_reply(sc, reg);
1248 	}
1249 
1250 	ccb->ccb_cookie = cookie;
1251 	done(ccb);
1252 
1253 timeout:
1254 	return (rv);
1255 }
1256 
1257 void
1258 mpi_poll_done(struct mpi_ccb *ccb)
1259 {
1260 	int				*rv = ccb->ccb_cookie;
1261 
1262 	*rv = 0;
1263 }
1264 
1265 void
1266 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1267 {
1268 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1269 	void				(*done)(struct mpi_ccb *);
1270 
1271 	done = ccb->ccb_done;
1272 	ccb->ccb_done = mpi_wait_done;
1273 	ccb->ccb_cookie = &cookie;
1274 
1275 	/* XXX this will wait forever for the ccb to complete */
1276 
1277 	mpi_start(sc, ccb);
1278 
1279 	mtx_enter(&cookie);
1280 	while (ccb->ccb_cookie != NULL)
1281 		msleep(ccb, &cookie, PRIBIO, "mpiwait", 0);
1282 	mtx_leave(&cookie);
1283 
1284 	done(ccb);
1285 }
1286 
1287 void
1288 mpi_wait_done(struct mpi_ccb *ccb)
1289 {
1290 	struct mutex			*cookie = ccb->ccb_cookie;
1291 
1292 	mtx_enter(cookie);
1293 	ccb->ccb_cookie = NULL;
1294 	wakeup_one(ccb);
1295 	mtx_leave(cookie);
1296 }
1297 
1298 void
1299 mpi_scsi_cmd(struct scsi_xfer *xs)
1300 {
1301 	struct scsi_link		*link = xs->sc_link;
1302 	struct mpi_softc		*sc = link->adapter_softc;
1303 	struct mpi_ccb			*ccb;
1304 	struct mpi_ccb_bundle		*mcb;
1305 	struct mpi_msg_scsi_io		*io;
1306 
1307 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1308 
1309 	if (xs->cmdlen > MPI_CDB_LEN) {
1310 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1311 		    DEVNAME(sc), xs->cmdlen);
1312 		memset(&xs->sense, 0, sizeof(xs->sense));
1313 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1314 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1315 		xs->sense.add_sense_code = 0x20;
1316 		xs->error = XS_SENSE;
1317 		scsi_done(xs);
1318 		return;
1319 	}
1320 
1321 	ccb = xs->io;
1322 
1323 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1324 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1325 
1326 	ccb->ccb_cookie = xs;
1327 	ccb->ccb_done = mpi_scsi_cmd_done;
1328 
1329 	mcb = ccb->ccb_cmd;
1330 	io = &mcb->mcb_io;
1331 
1332 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1333 	/*
1334 	 * bus is always 0
1335 	 * io->bus = htole16(sc->sc_bus);
1336 	 */
1337 	io->target_id = link->target;
1338 
1339 	io->cdb_length = xs->cmdlen;
1340 	io->sense_buf_len = sizeof(xs->sense);
1341 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1342 
1343 	htobem16(&io->lun[0], link->lun);
1344 
1345 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1346 	case SCSI_DATA_IN:
1347 		io->direction = MPI_SCSIIO_DIR_READ;
1348 		break;
1349 	case SCSI_DATA_OUT:
1350 		io->direction = MPI_SCSIIO_DIR_WRITE;
1351 		break;
1352 	default:
1353 		io->direction = MPI_SCSIIO_DIR_NONE;
1354 		break;
1355 	}
1356 
1357 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1358 	    (link->quirks & SDEV_NOTAGS))
1359 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1360 	else
1361 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1362 
1363 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1364 
1365 	htolem32(&io->data_length, xs->datalen);
1366 
1367 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1368 	    ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1369 
1370 	if (mpi_load_xs(ccb) != 0) {
1371 		xs->error = XS_DRIVER_STUFFUP;
1372 		scsi_done(xs);
1373 		return;
1374 	}
1375 
1376 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1377 
1378 	if (xs->flags & SCSI_POLL) {
1379 		if (mpi_poll(sc, ccb, xs->timeout) != 0) {
1380 			xs->error = XS_DRIVER_STUFFUP;
1381 			scsi_done(xs);
1382 		}
1383 		return;
1384 	}
1385 
1386 	mpi_start(sc, ccb);
1387 }
1388 
1389 void
1390 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1391 {
1392 	struct mpi_softc		*sc = ccb->ccb_sc;
1393 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1394 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1395 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1396 	struct mpi_msg_scsi_io_error	*sie;
1397 
1398 	if (xs->datalen != 0) {
1399 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1400 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1401 		    BUS_DMASYNC_POSTWRITE);
1402 
1403 		bus_dmamap_unload(sc->sc_dmat, dmap);
1404 	}
1405 
1406 	/* timeout_del */
1407 	xs->error = XS_NOERROR;
1408 	xs->resid = 0;
1409 
1410 	if (ccb->ccb_rcb == NULL) {
1411 		/* no scsi error, we're ok so drop out early */
1412 		xs->status = SCSI_OK;
1413 		scsi_done(xs);
1414 		return;
1415 	}
1416 
1417 	sie = ccb->ccb_rcb->rcb_reply;
1418 
1419 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1420 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1421 	    xs->flags);
1422 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1423 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1424 	    sie->msg_length, sie->function);
1425 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1426 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1427 	    sie->sense_buf_len, sie->msg_flags);
1428 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1429 	    letoh32(sie->msg_context));
1430 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1431 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1432 	    sie->scsi_state, letoh16(sie->ioc_status));
1433 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1434 	    letoh32(sie->ioc_loginfo));
1435 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1436 	    letoh32(sie->transfer_count));
1437 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1438 	    letoh32(sie->sense_count));
1439 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1440 	    letoh32(sie->response_info));
1441 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1442 	    letoh16(sie->tag));
1443 
1444 	xs->status = sie->scsi_status;
1445 	switch (lemtoh16(&sie->ioc_status)) {
1446 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1447 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1448 		if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1449 			xs->error = XS_DRIVER_STUFFUP;
1450 			break;
1451 		}
1452 		/* FALLTHROUGH */
1453 	case MPI_IOCSTATUS_SUCCESS:
1454 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1455 		switch (xs->status) {
1456 		case SCSI_OK:
1457 			xs->resid = 0;
1458 			break;
1459 
1460 		case SCSI_CHECK:
1461 			xs->error = XS_SENSE;
1462 			break;
1463 
1464 		case SCSI_BUSY:
1465 		case SCSI_QUEUE_FULL:
1466 			xs->error = XS_BUSY;
1467 			break;
1468 
1469 		default:
1470 			xs->error = XS_DRIVER_STUFFUP;
1471 			break;
1472 		}
1473 		break;
1474 
1475 	case MPI_IOCSTATUS_BUSY:
1476 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1477 		xs->error = XS_BUSY;
1478 		break;
1479 
1480 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1481 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1482 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1483 		xs->error = XS_SELTIMEOUT;
1484 		break;
1485 
1486 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1487 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1488 		xs->error = XS_RESET;
1489 		break;
1490 
1491 	default:
1492 		xs->error = XS_DRIVER_STUFFUP;
1493 		break;
1494 	}
1495 
1496 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1497 		memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1498 
1499 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1500 	    xs->error, xs->status);
1501 
1502 	mpi_push_reply(sc, ccb->ccb_rcb);
1503 	KERNEL_LOCK();
1504 	scsi_done(xs);
1505 	KERNEL_UNLOCK();
1506 }
1507 
1508 void
1509 mpi_timeout_xs(void *arg)
1510 {
1511 	/* XXX */
1512 }
1513 
1514 int
1515 mpi_load_xs(struct mpi_ccb *ccb)
1516 {
1517 	struct mpi_softc		*sc = ccb->ccb_sc;
1518 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1519 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1520 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1521 	struct mpi_sge			*sge, *nsge = &mcb->mcb_sgl[0];
1522 	struct mpi_sge			*ce = NULL, *nce;
1523 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1524 	u_int32_t			addr, flags;
1525 	int				i, error;
1526 
1527 	if (xs->datalen == 0) {
1528 		nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
1529 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1530 		return (0);
1531 	}
1532 
1533 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1534 	    xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1535 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1536 	if (error) {
1537 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1538 		return (1);
1539 	}
1540 
1541 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1542 	if (xs->flags & SCSI_DATA_OUT)
1543 		flags |= MPI_SGE_FL_DIR_OUT;
1544 
1545 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1546 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1547 		io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1548 	}
1549 
1550 	for (i = 0; i < dmap->dm_nsegs; i++) {
1551 
1552 		if (nsge == ce) {
1553 			nsge++;
1554 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1555 
1556 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1557 				nce = &nsge[sc->sc_chain_len - 1];
1558 				addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1559 				addr = addr << 16 |
1560 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1561 			} else {
1562 				nce = NULL;
1563 				addr = sizeof(struct mpi_sge) *
1564 				    (dmap->dm_nsegs - i);
1565 			}
1566 
1567 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1568 			    MPI_SGE_FL_SIZE_64 | addr);
1569 
1570 			mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1571 			    ((u_int8_t *)nsge - (u_int8_t *)mcb));
1572 
1573 			ce = nce;
1574 		}
1575 
1576 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1577 		    i, dmap->dm_segs[i].ds_len,
1578 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1579 
1580 		sge = nsge;
1581 
1582 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1583 		mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1584 
1585 		nsge = sge + 1;
1586 	}
1587 
1588 	/* terminate list */
1589 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1590 	    MPI_SGE_FL_EOL);
1591 
1592 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1593 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1594 	    BUS_DMASYNC_PREWRITE);
1595 
1596 	return (0);
1597 }
1598 
1599 void
1600 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1601 {
1602 	/* XXX */
1603 	if (bp->b_bcount > MAXPHYS)
1604 		bp->b_bcount = MAXPHYS;
1605 	minphys(bp);
1606 }
1607 
1608 int
1609 mpi_scsi_probe_virtual(struct scsi_link *link)
1610 {
1611 	struct mpi_softc		*sc = link->adapter_softc;
1612 	struct mpi_cfg_hdr		hdr;
1613 	struct mpi_cfg_raid_vol_pg0	*rp0;
1614 	int				len;
1615 	int				rv;
1616 
1617 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1618 		return (0);
1619 
1620 	if (link->lun > 0)
1621 		return (0);
1622 
1623 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1624 	    0, link->target, MPI_PG_POLL, &hdr);
1625 	if (rv != 0)
1626 		return (0);
1627 
1628 	len = hdr.page_length * 4;
1629 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1630 	if (rp0 == NULL)
1631 		return (ENOMEM);
1632 
1633 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1634 	if (rv == 0)
1635 		SET(link->flags, SDEV_VIRTUAL);
1636 
1637 	free(rp0, M_TEMP);
1638 	return (0);
1639 }
1640 
1641 int
1642 mpi_scsi_probe(struct scsi_link *link)
1643 {
1644 	struct mpi_softc		*sc = link->adapter_softc;
1645 	struct mpi_ecfg_hdr		ehdr;
1646 	struct mpi_cfg_sas_dev_pg0	pg0;
1647 	u_int32_t			address;
1648 	int				rv;
1649 
1650 	rv = mpi_scsi_probe_virtual(link);
1651 	if (rv != 0)
1652 		return (rv);
1653 
1654 	if (ISSET(link->flags, SDEV_VIRTUAL))
1655 		return (0);
1656 
1657 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1658 		return (0);
1659 
1660 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1661 
1662 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1663 	    address, &ehdr) != 0)
1664 		return (EIO);
1665 
1666 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1667 		return (0);
1668 
1669 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1670 	    DEVNAME(sc), link->target);
1671 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1672 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1673 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1674 	    letoh64(pg0.sas_addr));
1675 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1676 	    "access_status: 0x%02x\n", DEVNAME(sc),
1677 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1678 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1679 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1680 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1681 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1682 	    letoh32(pg0.device_info));
1683 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1684 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1685 
1686 	if (ISSET(letoh32(pg0.device_info),
1687 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1688 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1689 		    DEVNAME(sc), link->target);
1690 		link->flags |= SDEV_ATAPI;
1691 		link->quirks |= SDEV_ONLYBIG;
1692 	}
1693 
1694 	return (0);
1695 }
1696 
1697 u_int32_t
1698 mpi_read(struct mpi_softc *sc, bus_size_t r)
1699 {
1700 	u_int32_t			rv;
1701 
1702 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1703 	    BUS_SPACE_BARRIER_READ);
1704 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1705 
1706 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1707 
1708 	return (rv);
1709 }
1710 
1711 void
1712 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1713 {
1714 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1715 
1716 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1717 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1718 	    BUS_SPACE_BARRIER_WRITE);
1719 }
1720 
1721 int
1722 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1723     u_int32_t target)
1724 {
1725 	int				i;
1726 
1727 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1728 	    mask, target);
1729 
1730 	for (i = 0; i < 10000; i++) {
1731 		if ((mpi_read(sc, r) & mask) == target)
1732 			return (0);
1733 		delay(1000);
1734 	}
1735 
1736 	return (1);
1737 }
1738 
1739 int
1740 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1741     u_int32_t target)
1742 {
1743 	int				i;
1744 
1745 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1746 	    mask, target);
1747 
1748 	for (i = 0; i < 10000; i++) {
1749 		if ((mpi_read(sc, r) & mask) != target)
1750 			return (0);
1751 		delay(1000);
1752 	}
1753 
1754 	return (1);
1755 }
1756 
1757 int
1758 mpi_init(struct mpi_softc *sc)
1759 {
1760 	u_int32_t			db;
1761 	int				i;
1762 
1763 	/* spin until the IOC leaves the RESET state */
1764 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1765 	    MPI_DOORBELL_STATE_RESET) != 0) {
1766 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1767 		    "reset state\n", DEVNAME(sc));
1768 		return (1);
1769 	}
1770 
1771 	/* check current ownership */
1772 	db = mpi_read_db(sc);
1773 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1774 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1775 		    DEVNAME(sc));
1776 		return (0);
1777 	}
1778 
1779 	for (i = 0; i < 5; i++) {
1780 		switch (db & MPI_DOORBELL_STATE) {
1781 		case MPI_DOORBELL_STATE_READY:
1782 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1783 			    DEVNAME(sc));
1784 			return (0);
1785 
1786 		case MPI_DOORBELL_STATE_OPER:
1787 		case MPI_DOORBELL_STATE_FAULT:
1788 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1789 			    "reset\n" , DEVNAME(sc));
1790 			if (mpi_reset_soft(sc) != 0)
1791 				mpi_reset_hard(sc);
1792 			break;
1793 
1794 		case MPI_DOORBELL_STATE_RESET:
1795 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1796 			    "out of reset\n", DEVNAME(sc));
1797 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1798 			    MPI_DOORBELL_STATE_RESET) != 0)
1799 				return (1);
1800 			break;
1801 		}
1802 		db = mpi_read_db(sc);
1803 	}
1804 
1805 	return (1);
1806 }
1807 
1808 int
1809 mpi_reset_soft(struct mpi_softc *sc)
1810 {
1811 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1812 
1813 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1814 		return (1);
1815 
1816 	mpi_write_db(sc,
1817 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1818 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1819 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1820 		return (1);
1821 
1822 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1823 	    MPI_DOORBELL_STATE_READY) != 0)
1824 		return (1);
1825 
1826 	return (0);
1827 }
1828 
1829 int
1830 mpi_reset_hard(struct mpi_softc *sc)
1831 {
1832 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1833 
1834 	/* enable diagnostic register */
1835 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1836 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1837 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1838 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1839 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1840 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1841 
1842 	/* reset ioc */
1843 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1844 
1845 	delay(10000);
1846 
1847 	/* disable diagnostic register */
1848 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1849 
1850 	/* restore pci bits? */
1851 
1852 	/* firmware bits? */
1853 	return (0);
1854 }
1855 
1856 int
1857 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1858 {
1859 	u_int32_t				*query = buf;
1860 	int					i;
1861 
1862 	/* make sure the doorbell is not in use. */
1863 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1864 		return (1);
1865 
1866 	/* clear pending doorbell interrupts */
1867 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1868 		mpi_write_intr(sc, 0);
1869 
1870 	/*
1871 	 * first write the doorbell with the handshake function and the
1872 	 * dword count.
1873 	 */
1874 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1875 	    MPI_DOORBELL_DWORDS(dwords));
1876 
1877 	/*
1878 	 * the doorbell used bit will be set because a doorbell function has
1879 	 * started. Wait for the interrupt and then ack it.
1880 	 */
1881 	if (mpi_wait_db_int(sc) != 0)
1882 		return (1);
1883 	mpi_write_intr(sc, 0);
1884 
1885 	/* poll for the acknowledgement. */
1886 	if (mpi_wait_db_ack(sc) != 0)
1887 		return (1);
1888 
1889 	/* write the query through the doorbell. */
1890 	for (i = 0; i < dwords; i++) {
1891 		mpi_write_db(sc, htole32(query[i]));
1892 		if (mpi_wait_db_ack(sc) != 0)
1893 			return (1);
1894 	}
1895 
1896 	return (0);
1897 }
1898 
1899 int
1900 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1901 {
1902 	u_int16_t				*words = (u_int16_t *)dword;
1903 	int					i;
1904 
1905 	for (i = 0; i < 2; i++) {
1906 		if (mpi_wait_db_int(sc) != 0)
1907 			return (1);
1908 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1909 		mpi_write_intr(sc, 0);
1910 	}
1911 
1912 	return (0);
1913 }
1914 
1915 int
1916 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1917 {
1918 	struct mpi_msg_reply			*reply = buf;
1919 	u_int32_t				*dbuf = buf, dummy;
1920 	int					i;
1921 
1922 	/* get the first dword so we can read the length out of the header. */
1923 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1924 		return (1);
1925 
1926 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1927 	    DEVNAME(sc), dwords, reply->msg_length);
1928 
1929 	/*
1930 	 * the total length, in dwords, is in the message length field of the
1931 	 * reply header.
1932 	 */
1933 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1934 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1935 			return (1);
1936 	}
1937 
1938 	/* if there's extra stuff to come off the ioc, discard it */
1939 	while (i++ < reply->msg_length) {
1940 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1941 			return (1);
1942 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1943 		    "0x%08x\n", DEVNAME(sc), dummy);
1944 	}
1945 
1946 	/* wait for the doorbell used bit to be reset and clear the intr */
1947 	if (mpi_wait_db_int(sc) != 0)
1948 		return (1);
1949 	mpi_write_intr(sc, 0);
1950 
1951 	return (0);
1952 }
1953 
1954 void
1955 mpi_empty_done(struct mpi_ccb *ccb)
1956 {
1957 	/* nothing to do */
1958 }
1959 
1960 int
1961 mpi_iocfacts(struct mpi_softc *sc)
1962 {
1963 	struct mpi_msg_iocfacts_request		ifq;
1964 	struct mpi_msg_iocfacts_reply		ifp;
1965 
1966 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1967 
1968 	memset(&ifq, 0, sizeof(ifq));
1969 	memset(&ifp, 0, sizeof(ifp));
1970 
1971 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1972 	ifq.chain_offset = 0;
1973 	ifq.msg_flags = 0;
1974 	ifq.msg_context = htole32(0xdeadbeef);
1975 
1976 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1977 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1978 		    DEVNAME(sc));
1979 		return (1);
1980 	}
1981 
1982 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1983 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1984 		    DEVNAME(sc));
1985 		return (1);
1986 	}
1987 
1988 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1989 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1990 	    ifp.msg_version_maj, ifp.msg_version_min);
1991 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
1992 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1993 	    ifp.ioc_number, ifp.header_version_maj,
1994 	    ifp.header_version_min);
1995 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
1996 	    letoh32(ifp.msg_context));
1997 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
1998 	    DEVNAME(sc), letoh16(ifp.ioc_status),
1999 	    letoh16(ifp.ioc_exceptions));
2000 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2001 	    letoh32(ifp.ioc_loginfo));
2002 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2003 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2004 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2005 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2006 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2007 	    letoh16(ifp.reply_queue_depth));
2008 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2009 	    letoh16(ifp.product_id));
2010 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2011 	    letoh32(ifp.current_host_mfa_hi_addr));
2012 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2013 	    "global_credits: %d\n",
2014 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2015 	    letoh16(ifp.global_credits));
2016 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2017 	    letoh32(ifp.current_sense_buffer_hi_addr));
2018 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2019 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2020 	    letoh16(ifp.current_reply_frame_size));
2021 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2022 	    letoh32(ifp.fw_image_size));
2023 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2024 	    letoh32(ifp.ioc_capabilities));
2025 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2026 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2027 	    ifp.fw_version_maj, ifp.fw_version_min,
2028 	    ifp.fw_version_unit, ifp.fw_version_dev);
2029 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2030 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2031 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2032 	    "addr 0x%08lx%08lx\n", DEVNAME(sc),
2033 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2034 	    letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2035 	    letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2036 	sc->sc_maxcmds = letoh16(ifp.global_credits);
2037 	sc->sc_maxchdepth = ifp.max_chain_depth;
2038 	sc->sc_ioc_number = ifp.ioc_number;
2039 	if (sc->sc_flags & MPI_F_SPI)
2040 		sc->sc_buswidth = 16;
2041 	else
2042 		sc->sc_buswidth =
2043 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2044 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2045 		sc->sc_fw_len = letoh32(ifp.fw_image_size);
2046 
2047 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, letoh16(ifp.reply_queue_depth));
2048 
2049 	/*
2050 	 * you can fit sg elements on the end of the io cmd if they fit in the
2051 	 * request frame size.
2052 	 */
2053 	sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) -
2054 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2055 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2056 	    sc->sc_first_sgl_len);
2057 
2058 	sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) /
2059 	    sizeof(struct mpi_sge);
2060 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2061 	    sc->sc_chain_len);
2062 
2063 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2064 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2065 	/* the sgl chains lose an entry for each chain element */
2066 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2067 	    sc->sc_chain_len;
2068 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2069 	    sc->sc_max_sgl_len);
2070 
2071 	/* XXX we're ignoring the max chain depth */
2072 
2073 	return (0);
2074 }
2075 
2076 int
2077 mpi_iocinit(struct mpi_softc *sc)
2078 {
2079 	struct mpi_msg_iocinit_request		iiq;
2080 	struct mpi_msg_iocinit_reply		iip;
2081 	u_int32_t				hi_addr;
2082 
2083 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2084 
2085 	memset(&iiq, 0, sizeof(iiq));
2086 	memset(&iip, 0, sizeof(iip));
2087 
2088 	iiq.function = MPI_FUNCTION_IOC_INIT;
2089 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2090 
2091 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2092 	iiq.max_buses = 1;
2093 
2094 	iiq.msg_context = htole32(0xd00fd00f);
2095 
2096 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2097 
2098 	hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2099 	iiq.host_mfa_hi_addr = htole32(hi_addr);
2100 	iiq.sense_buffer_hi_addr = htole32(hi_addr);
2101 
2102 	iiq.msg_version_maj = 0x01;
2103 	iiq.msg_version_min = 0x02;
2104 
2105 	iiq.hdr_version_unit = 0x0d;
2106 	iiq.hdr_version_dev = 0x00;
2107 
2108 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2109 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2110 		    DEVNAME(sc));
2111 		return (1);
2112 	}
2113 
2114 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2115 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2116 		    DEVNAME(sc));
2117 		return (1);
2118 	}
2119 
2120 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2121 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2122 	    iip.msg_length, iip.whoinit);
2123 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2124 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2125 	    iip.max_buses, iip.max_devices, iip.flags);
2126 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2127 	    letoh32(iip.msg_context));
2128 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2129 	    letoh16(iip.ioc_status));
2130 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2131 	    letoh32(iip.ioc_loginfo));
2132 
2133 	return (0);
2134 }
2135 
2136 int
2137 mpi_portfacts(struct mpi_softc *sc)
2138 {
2139 	struct mpi_ccb				*ccb;
2140 	struct mpi_msg_portfacts_request	*pfq;
2141 	volatile struct mpi_msg_portfacts_reply	*pfp;
2142 	int					rv = 1;
2143 
2144 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2145 
2146 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2147 	if (ccb == NULL) {
2148 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2149 		    DEVNAME(sc));
2150 		return (rv);
2151 	}
2152 
2153 	ccb->ccb_done = mpi_empty_done;
2154 	pfq = ccb->ccb_cmd;
2155 
2156 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2157 	pfq->chain_offset = 0;
2158 	pfq->msg_flags = 0;
2159 	pfq->port_number = 0;
2160 
2161 	if (mpi_poll(sc, ccb, 50000) != 0) {
2162 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2163 		goto err;
2164 	}
2165 
2166 	if (ccb->ccb_rcb == NULL) {
2167 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2168 		    DEVNAME(sc));
2169 		goto err;
2170 	}
2171 	pfp = ccb->ccb_rcb->rcb_reply;
2172 
2173 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2174 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2175 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2176 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2177 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2178 	    letoh32(pfp->msg_context));
2179 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2180 	    letoh16(pfp->ioc_status));
2181 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2182 	    letoh32(pfp->ioc_loginfo));
2183 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2184 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2185 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2186 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2187 	    letoh16(pfp->port_scsi_id));
2188 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2189 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2190 	    letoh16(pfp->max_persistent_ids),
2191 	    letoh16(pfp->max_posted_cmd_buffers));
2192 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2193 	    letoh16(pfp->max_lan_buckets));
2194 
2195 	sc->sc_porttype = pfp->port_type;
2196 	if (sc->sc_target == -1)
2197 		sc->sc_target = letoh16(pfp->port_scsi_id);
2198 
2199 	mpi_push_reply(sc, ccb->ccb_rcb);
2200 	rv = 0;
2201 err:
2202 	scsi_io_put(&sc->sc_iopool, ccb);
2203 
2204 	return (rv);
2205 }
2206 
2207 int
2208 mpi_cfg_coalescing(struct mpi_softc *sc)
2209 {
2210 	struct mpi_cfg_hdr		hdr;
2211 	struct mpi_cfg_ioc_pg1		pg;
2212 	u_int32_t			flags;
2213 
2214 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2215 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2216 		    DEVNAME(sc));
2217 		return (1);
2218 	}
2219 
2220 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2221 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2222 		    DEVNAME(sc));
2223 		return (1);
2224 	}
2225 
2226 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2227 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2228 	    letoh32(pg.flags));
2229 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2230 	    letoh32(pg.coalescing_timeout));
2231 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2232 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2233 
2234 	flags = letoh32(pg.flags);
2235 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2236 		return (0);
2237 
2238 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2239 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2240 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2241 		    DEVNAME(sc));
2242 		return (1);
2243 	}
2244 
2245 	return (0);
2246 }
2247 
2248 int
2249 mpi_eventnotify(struct mpi_softc *sc)
2250 {
2251 	struct mpi_ccb				*ccb;
2252 	struct mpi_msg_event_request		*enq;
2253 
2254 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2255 	if (ccb == NULL) {
2256 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2257 		    DEVNAME(sc));
2258 		return (1);
2259 	}
2260 
2261 	sc->sc_evt_ccb = ccb;
2262 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2263 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2264 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2265 	    mpi_eventack, sc);
2266 
2267 	ccb->ccb_done = mpi_eventnotify_done;
2268 	enq = ccb->ccb_cmd;
2269 
2270 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2271 	enq->chain_offset = 0;
2272 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2273 
2274 	mpi_start(sc, ccb);
2275 	return (0);
2276 }
2277 
2278 void
2279 mpi_eventnotify_done(struct mpi_ccb *ccb)
2280 {
2281 	struct mpi_softc			*sc = ccb->ccb_sc;
2282 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2283 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2284 
2285 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2286 
2287 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2288 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2289 	    letoh16(enp->data_length));
2290 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2291 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2292 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2293 	    letoh32(enp->msg_context));
2294 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2295 	    letoh16(enp->ioc_status));
2296 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2297 	    letoh32(enp->ioc_loginfo));
2298 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2299 	    letoh32(enp->event));
2300 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2301 	    letoh32(enp->event_context));
2302 
2303 	switch (letoh32(enp->event)) {
2304 	/* ignore these */
2305 	case MPI_EVENT_EVENT_CHANGE:
2306 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2307 		break;
2308 
2309 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2310 		if (sc->sc_scsibus == NULL)
2311 			break;
2312 
2313 		if (mpi_evt_sas(sc, rcb) != 0) {
2314 			/* reply is freed later on */
2315 			return;
2316 		}
2317 		break;
2318 
2319 	case MPI_EVENT_RESCAN:
2320 		if (sc->sc_scsibus != NULL &&
2321 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2322 			mpi_evt_fc_rescan(sc);
2323 		break;
2324 
2325 	default:
2326 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2327 		    DEVNAME(sc), letoh32(enp->event));
2328 		break;
2329 	}
2330 
2331 	mpi_eventnotify_free(sc, rcb);
2332 }
2333 
2334 void
2335 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2336 {
2337 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2338 
2339 	if (enp->ack_required) {
2340 		mtx_enter(&sc->sc_evt_ack_mtx);
2341 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2342 		mtx_leave(&sc->sc_evt_ack_mtx);
2343 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2344 	} else
2345 		mpi_push_reply(sc, rcb);
2346 }
2347 
2348 int
2349 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2350 {
2351 	struct mpi_evt_sas_change		*ch;
2352 	u_int8_t				*data;
2353 
2354 	data = rcb->rcb_reply;
2355 	data += sizeof(struct mpi_msg_event_reply);
2356 	ch = (struct mpi_evt_sas_change *)data;
2357 
2358 	if (ch->bus != 0)
2359 		return (0);
2360 
2361 	switch (ch->reason) {
2362 	case MPI_EVT_SASCH_REASON_ADDED:
2363 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2364 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2365 			printf("%s: unable to request attach of %d\n",
2366 			    DEVNAME(sc), ch->target);
2367 		}
2368 		break;
2369 
2370 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2371 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2372 
2373 		mtx_enter(&sc->sc_evt_scan_mtx);
2374 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2375 		mtx_leave(&sc->sc_evt_scan_mtx);
2376 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2377 
2378 		/* we'll handle event ack later on */
2379 		return (1);
2380 
2381 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2382 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2383 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2384 		break;
2385 	default:
2386 		printf("%s: unknown reason for SAS device status change: "
2387 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2388 		break;
2389 	}
2390 
2391 	return (0);
2392 }
2393 
2394 void
2395 mpi_evt_sas_detach(void *cookie, void *io)
2396 {
2397 	struct mpi_softc			*sc = cookie;
2398 	struct mpi_ccb				*ccb = io;
2399 	struct mpi_rcb				*rcb, *next;
2400 	struct mpi_msg_event_reply		*enp;
2401 	struct mpi_evt_sas_change		*ch;
2402 	struct mpi_msg_scsi_task_request	*str;
2403 
2404 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2405 
2406 	mtx_enter(&sc->sc_evt_scan_mtx);
2407 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2408 	if (rcb != NULL) {
2409 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2410 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2411 	}
2412 	mtx_leave(&sc->sc_evt_scan_mtx);
2413 
2414 	if (rcb == NULL) {
2415 		scsi_io_put(&sc->sc_iopool, ccb);
2416 		return;
2417 	}
2418 
2419 	enp = rcb->rcb_reply;
2420 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2421 
2422 	ccb->ccb_done = mpi_evt_sas_detach_done;
2423 	str = ccb->ccb_cmd;
2424 
2425 	str->target_id = ch->target;
2426 	str->bus = 0;
2427 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2428 
2429 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2430 
2431 	mpi_eventnotify_free(sc, rcb);
2432 
2433 	mpi_start(sc, ccb);
2434 
2435 	if (next != NULL)
2436 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2437 }
2438 
2439 void
2440 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2441 {
2442 	struct mpi_softc			*sc = ccb->ccb_sc;
2443 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2444 
2445 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2446 	    DETACH_FORCE) != 0) {
2447 		printf("%s: unable to request detach of %d\n",
2448 		    DEVNAME(sc), r->target_id);
2449 	}
2450 
2451 	mpi_push_reply(sc, ccb->ccb_rcb);
2452 	scsi_io_put(&sc->sc_iopool, ccb);
2453 }
2454 
2455 void
2456 mpi_evt_fc_rescan(struct mpi_softc *sc)
2457 {
2458 	int					queue = 1;
2459 
2460 	mtx_enter(&sc->sc_evt_rescan_mtx);
2461 	if (sc->sc_evt_rescan_sem)
2462 		queue = 0;
2463 	else
2464 		sc->sc_evt_rescan_sem = 1;
2465 	mtx_leave(&sc->sc_evt_rescan_mtx);
2466 
2467 	if (queue) {
2468 		workq_queue_task(NULL, &sc->sc_evt_rescan, 0,
2469 		    mpi_fc_rescan, sc, NULL);
2470 	}
2471 }
2472 
2473 void
2474 mpi_fc_rescan(void *xsc, void *xarg)
2475 {
2476 	struct mpi_softc			*sc = xsc;
2477 	struct mpi_cfg_hdr			hdr;
2478 	struct mpi_cfg_fc_device_pg0		pg;
2479 	struct scsi_link			*link;
2480 	u_int8_t				devmap[256 / NBBY];
2481 	u_int32_t				id = 0xffffff;
2482 	int					i;
2483 
2484 	mtx_enter(&sc->sc_evt_rescan_mtx);
2485 	sc->sc_evt_rescan_sem = 0;
2486 	mtx_leave(&sc->sc_evt_rescan_mtx);
2487 
2488 	memset(devmap, 0, sizeof(devmap));
2489 
2490 	do {
2491 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2492 		    id, 0, &hdr) != 0) {
2493 			printf("%s: header get for rescan of 0x%08x failed\n",
2494 			    DEVNAME(sc), id);
2495 			return;
2496 		}
2497 
2498 		memset(&pg, 0, sizeof(pg));
2499 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2500 			break;
2501 
2502 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2503 		    pg.current_bus == 0)
2504 			setbit(devmap, pg.current_target_id);
2505 
2506 		id = letoh32(pg.port_id);
2507 	} while (id <= 0xff0000);
2508 
2509 	for (i = 0; i < sc->sc_buswidth; i++) {
2510 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2511 
2512 		if (isset(devmap, i)) {
2513 			if (link == NULL)
2514 				scsi_probe_target(sc->sc_scsibus, i);
2515 		} else {
2516 			if (link != NULL) {
2517 				scsi_activate(sc->sc_scsibus, i, -1,
2518 				    DVACT_DEACTIVATE);
2519 				scsi_detach_target(sc->sc_scsibus, i,
2520 				    DETACH_FORCE);
2521 			}
2522 		}
2523 	}
2524 }
2525 
2526 void
2527 mpi_eventack(void *cookie, void *io)
2528 {
2529 	struct mpi_softc			*sc = cookie;
2530 	struct mpi_ccb				*ccb = io;
2531 	struct mpi_rcb				*rcb, *next;
2532 	struct mpi_msg_event_reply		*enp;
2533 	struct mpi_msg_eventack_request		*eaq;
2534 
2535 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2536 
2537 	mtx_enter(&sc->sc_evt_ack_mtx);
2538 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2539 	if (rcb != NULL) {
2540 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2541 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2542 	}
2543 	mtx_leave(&sc->sc_evt_ack_mtx);
2544 
2545 	if (rcb == NULL) {
2546 		scsi_io_put(&sc->sc_iopool, ccb);
2547 		return;
2548 	}
2549 
2550 	enp = rcb->rcb_reply;
2551 
2552 	ccb->ccb_done = mpi_eventack_done;
2553 	eaq = ccb->ccb_cmd;
2554 
2555 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2556 
2557 	eaq->event = enp->event;
2558 	eaq->event_context = enp->event_context;
2559 
2560 	mpi_push_reply(sc, rcb);
2561 	mpi_start(sc, ccb);
2562 
2563 	if (next != NULL)
2564 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2565 }
2566 
2567 void
2568 mpi_eventack_done(struct mpi_ccb *ccb)
2569 {
2570 	struct mpi_softc			*sc = ccb->ccb_sc;
2571 
2572 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2573 
2574 	mpi_push_reply(sc, ccb->ccb_rcb);
2575 	scsi_io_put(&sc->sc_iopool, ccb);
2576 }
2577 
2578 int
2579 mpi_portenable(struct mpi_softc *sc)
2580 {
2581 	struct mpi_ccb				*ccb;
2582 	struct mpi_msg_portenable_request	*peq;
2583 	int					rv = 0;
2584 
2585 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2586 
2587 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2588 	if (ccb == NULL) {
2589 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2590 		    DEVNAME(sc));
2591 		return (1);
2592 	}
2593 
2594 	ccb->ccb_done = mpi_empty_done;
2595 	peq = ccb->ccb_cmd;
2596 
2597 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2598 	peq->port_number = 0;
2599 
2600 	if (mpi_poll(sc, ccb, 50000) != 0) {
2601 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2602 		return (1);
2603 	}
2604 
2605 	if (ccb->ccb_rcb == NULL) {
2606 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2607 		    DEVNAME(sc));
2608 		rv = 1;
2609 	} else
2610 		mpi_push_reply(sc, ccb->ccb_rcb);
2611 
2612 	scsi_io_put(&sc->sc_iopool, ccb);
2613 
2614 	return (rv);
2615 }
2616 
2617 int
2618 mpi_fwupload(struct mpi_softc *sc)
2619 {
2620 	struct mpi_ccb				*ccb;
2621 	struct {
2622 		struct mpi_msg_fwupload_request		req;
2623 		struct mpi_sge				sge;
2624 	} __packed				*bundle;
2625 	struct mpi_msg_fwupload_reply		*upp;
2626 	int					rv = 0;
2627 
2628 	if (sc->sc_fw_len == 0)
2629 		return (0);
2630 
2631 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2632 
2633 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2634 	if (sc->sc_fw == NULL) {
2635 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2636 		    DEVNAME(sc), sc->sc_fw_len);
2637 		return (1);
2638 	}
2639 
2640 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2641 	if (ccb == NULL) {
2642 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2643 		    DEVNAME(sc));
2644 		goto err;
2645 	}
2646 
2647 	ccb->ccb_done = mpi_empty_done;
2648 	bundle = ccb->ccb_cmd;
2649 
2650 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2651 
2652 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2653 
2654 	bundle->req.tce.details_length = 12;
2655 	bundle->req.tce.image_size = htole32(sc->sc_fw_len);
2656 
2657 	bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2658 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2659 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2660 	mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2661 
2662 	if (mpi_poll(sc, ccb, 50000) != 0) {
2663 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2664 		goto err;
2665 	}
2666 
2667 	if (ccb->ccb_rcb == NULL)
2668 		panic("%s: unable to do fw upload", DEVNAME(sc));
2669 	upp = ccb->ccb_rcb->rcb_reply;
2670 
2671 	if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2672 		rv = 1;
2673 
2674 	mpi_push_reply(sc, ccb->ccb_rcb);
2675 	scsi_io_put(&sc->sc_iopool, ccb);
2676 
2677 	return (rv);
2678 
2679 err:
2680 	mpi_dmamem_free(sc, sc->sc_fw);
2681 	return (1);
2682 }
2683 
2684 void
2685 mpi_get_raid(struct mpi_softc *sc)
2686 {
2687 	struct mpi_cfg_hdr		hdr;
2688 	struct mpi_cfg_ioc_pg2		*vol_page;
2689 	size_t				pagelen;
2690 	u_int32_t			capabilities;
2691 
2692 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2693 
2694 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2695 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2696 		    "for IOC page 2\n", DEVNAME(sc));
2697 		return;
2698 	}
2699 
2700 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2701 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2702 	if (vol_page == NULL) {
2703 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2704 		    "space for ioc config page 2\n", DEVNAME(sc));
2705 		return;
2706 	}
2707 
2708 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2709 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2710 		    "page 2\n", DEVNAME(sc));
2711 		goto out;
2712 	}
2713 
2714 	capabilities = letoh32(vol_page->capabilities);
2715 
2716 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2717 	    letoh32(vol_page->capabilities));
2718 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2719 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2720 	    vol_page->active_vols, vol_page->max_vols,
2721 	    vol_page->active_physdisks, vol_page->max_physdisks);
2722 
2723 	/* don't walk list if there are no RAID capability */
2724 	if (capabilities == 0xdeadbeef) {
2725 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2726 		goto out;
2727 	}
2728 
2729 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2730 		sc->sc_flags |= MPI_F_RAID;
2731 
2732 out:
2733 	free(vol_page, M_TEMP);
2734 }
2735 
2736 int
2737 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2738     u_int32_t address, int flags, void *p)
2739 {
2740 	struct mpi_ccb				*ccb;
2741 	struct mpi_msg_config_request		*cq;
2742 	struct mpi_msg_config_reply		*cp;
2743 	struct mpi_cfg_hdr			*hdr = p;
2744 	struct mpi_ecfg_hdr			*ehdr = p;
2745 	int					etype = 0;
2746 	int					rv = 0;
2747 
2748 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2749 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2750 	    address, flags, MPI_PG_FMT);
2751 
2752 	ccb = scsi_io_get(&sc->sc_iopool,
2753 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2754 	if (ccb == NULL) {
2755 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2756 		    DEVNAME(sc));
2757 		return (1);
2758 	}
2759 
2760 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2761 		etype = type;
2762 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2763 	}
2764 
2765 	cq = ccb->ccb_cmd;
2766 
2767 	cq->function = MPI_FUNCTION_CONFIG;
2768 
2769 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2770 
2771 	cq->config_header.page_number = number;
2772 	cq->config_header.page_type = type;
2773 	cq->ext_page_type = etype;
2774 	cq->page_address = htole32(address);
2775 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2776 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2777 
2778 	ccb->ccb_done = mpi_empty_done;
2779 	if (ISSET(flags, MPI_PG_POLL)) {
2780 		if (mpi_poll(sc, ccb, 50000) != 0) {
2781 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2782 			    DEVNAME(sc));
2783 			return (1);
2784 		}
2785 	} else
2786 		mpi_wait(sc, ccb);
2787 
2788 	if (ccb->ccb_rcb == NULL)
2789 		panic("%s: unable to fetch config header", DEVNAME(sc));
2790 	cp = ccb->ccb_rcb->rcb_reply;
2791 
2792 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2793 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2794 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2795 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2796 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2797 	    cp->msg_flags);
2798 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2799 	    letoh32(cp->msg_context));
2800 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2801 	    letoh16(cp->ioc_status));
2802 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2803 	    letoh32(cp->ioc_loginfo));
2804 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2805 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2806 	    cp->config_header.page_version,
2807 	    cp->config_header.page_length,
2808 	    cp->config_header.page_number,
2809 	    cp->config_header.page_type);
2810 
2811 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2812 		rv = 1;
2813 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2814 		memset(ehdr, 0, sizeof(*ehdr));
2815 		ehdr->page_version = cp->config_header.page_version;
2816 		ehdr->page_number = cp->config_header.page_number;
2817 		ehdr->page_type = cp->config_header.page_type;
2818 		ehdr->ext_page_length = cp->ext_page_length;
2819 		ehdr->ext_page_type = cp->ext_page_type;
2820 	} else
2821 		*hdr = cp->config_header;
2822 
2823 	mpi_push_reply(sc, ccb->ccb_rcb);
2824 	scsi_io_put(&sc->sc_iopool, ccb);
2825 
2826 	return (rv);
2827 }
2828 
2829 int
2830 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2831     void *p, int read, void *page, size_t len)
2832 {
2833 	struct mpi_ccb				*ccb;
2834 	struct mpi_msg_config_request		*cq;
2835 	struct mpi_msg_config_reply		*cp;
2836 	struct mpi_cfg_hdr			*hdr = p;
2837 	struct mpi_ecfg_hdr			*ehdr = p;
2838 	char					*kva;
2839 	int					page_length;
2840 	int					rv = 0;
2841 
2842 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2843 	    DEVNAME(sc), address, read, hdr->page_type);
2844 
2845 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2846 	    letoh16(ehdr->ext_page_length) : hdr->page_length;
2847 
2848 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2849 	    len < page_length * 4)
2850 		return (1);
2851 
2852 	ccb = scsi_io_get(&sc->sc_iopool,
2853 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2854 	if (ccb == NULL) {
2855 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2856 		return (1);
2857 	}
2858 
2859 	cq = ccb->ccb_cmd;
2860 
2861 	cq->function = MPI_FUNCTION_CONFIG;
2862 
2863 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2864 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2865 
2866 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2867 		cq->config_header.page_version = ehdr->page_version;
2868 		cq->config_header.page_number = ehdr->page_number;
2869 		cq->config_header.page_type = ehdr->page_type;
2870 		cq->ext_page_len = ehdr->ext_page_length;
2871 		cq->ext_page_type = ehdr->ext_page_type;
2872 	} else
2873 		cq->config_header = *hdr;
2874 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2875 	cq->page_address = htole32(address);
2876 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2877 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2878 	    (page_length * 4) |
2879 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2880 
2881 	/* bounce the page via the request space to avoid more bus_dma games */
2882 	mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2883 	    sizeof(struct mpi_msg_config_request));
2884 
2885 	kva = ccb->ccb_cmd;
2886 	kva += sizeof(struct mpi_msg_config_request);
2887 	if (!read)
2888 		memcpy(kva, page, len);
2889 
2890 	ccb->ccb_done = mpi_empty_done;
2891 	if (ISSET(flags, MPI_PG_POLL)) {
2892 		if (mpi_poll(sc, ccb, 50000) != 0) {
2893 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2894 			    DEVNAME(sc));
2895 			return (1);
2896 		}
2897 	} else
2898 		mpi_wait(sc, ccb);
2899 
2900 	if (ccb->ccb_rcb == NULL) {
2901 		scsi_io_put(&sc->sc_iopool, ccb);
2902 		return (1);
2903 	}
2904 	cp = ccb->ccb_rcb->rcb_reply;
2905 
2906 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2907 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2908 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2909 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2910 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2911 	    cp->msg_flags);
2912 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2913 	    letoh32(cp->msg_context));
2914 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2915 	    letoh16(cp->ioc_status));
2916 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2917 	    letoh32(cp->ioc_loginfo));
2918 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2919 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2920 	    cp->config_header.page_version,
2921 	    cp->config_header.page_length,
2922 	    cp->config_header.page_number,
2923 	    cp->config_header.page_type);
2924 
2925 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2926 		rv = 1;
2927 	else if (read)
2928 		memcpy(page, kva, len);
2929 
2930 	mpi_push_reply(sc, ccb->ccb_rcb);
2931 	scsi_io_put(&sc->sc_iopool, ccb);
2932 
2933 	return (rv);
2934 }
2935 
2936 int
2937 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2938 {
2939 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2940 
2941 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2942 
2943 	switch (cmd) {
2944 	case DIOCGCACHE:
2945 	case DIOCSCACHE:
2946 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2947 			return (mpi_ioctl_cache(link, cmd,
2948 			    (struct dk_cache *)addr));
2949 		}
2950 		break;
2951 
2952 	default:
2953 		if (sc->sc_ioctl)
2954 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2955 
2956 		break;
2957 	}
2958 
2959 	return (ENOTTY);
2960 }
2961 
2962 int
2963 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2964 {
2965 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2966 	struct mpi_ccb		*ccb;
2967 	int			len, rv;
2968 	struct mpi_cfg_hdr	hdr;
2969 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2970 	int			enabled;
2971 	struct mpi_msg_raid_action_request *req;
2972 	struct mpi_msg_raid_action_reply *rep;
2973 	struct mpi_raid_settings settings;
2974 
2975 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
2976 	    link->target, MPI_PG_POLL, &hdr);
2977 	if (rv != 0)
2978 		return (EIO);
2979 
2980 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
2981 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
2982 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
2983 	if (rpg0 == NULL)
2984 		return (ENOMEM);
2985 
2986 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
2987 	    rpg0, len) != 0) {
2988 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
2989 		    DEVNAME(sc));
2990 		rv = EIO;
2991 		goto done;
2992 	}
2993 
2994 	enabled = ISSET(letoh16(rpg0->settings.volume_settings),
2995 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
2996 
2997 	if (cmd == DIOCGCACHE) {
2998 		dc->wrcache = enabled;
2999 		dc->rdcache = 0;
3000 		goto done;
3001 	} /* else DIOCSCACHE */
3002 
3003 	if (dc->rdcache) {
3004 		rv = EOPNOTSUPP;
3005 		goto done;
3006 	}
3007 
3008 	if (((dc->wrcache) ? 1 : 0) == enabled)
3009 		goto done;
3010 
3011 	settings = rpg0->settings;
3012 	if (dc->wrcache) {
3013 		SET(settings.volume_settings,
3014 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3015 	} else {
3016 		CLR(settings.volume_settings,
3017 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3018 	}
3019 
3020 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3021 	if (ccb == NULL) {
3022 		rv = ENOMEM;
3023 		goto done;
3024 	}
3025 
3026 	req = ccb->ccb_cmd;
3027 	req->function = MPI_FUNCTION_RAID_ACTION;
3028 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3029 	req->vol_id = rpg0->volume_id;
3030 	req->vol_bus = rpg0->volume_bus;
3031 
3032 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3033 	ccb->ccb_done = mpi_empty_done;
3034 	if (mpi_poll(sc, ccb, 50000) != 0) {
3035 		rv = EIO;
3036 		goto done;
3037 	}
3038 
3039 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3040 	if (rep == NULL)
3041 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3042 
3043 	switch (letoh16(rep->action_status)) {
3044 	case MPI_RAID_ACTION_STATUS_OK:
3045 		rv = 0;
3046 		break;
3047 	default:
3048 		rv = EIO;
3049 		break;
3050 	}
3051 
3052 	mpi_push_reply(sc, ccb->ccb_rcb);
3053 	scsi_io_put(&sc->sc_iopool, ccb);
3054 
3055 done:
3056 	free(rpg0, M_TEMP);
3057 	return (rv);
3058 }
3059 
3060 #if NBIO > 0
3061 int
3062 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3063 {
3064 	int			len, rv = EINVAL;
3065 	u_int32_t		address;
3066 	struct mpi_cfg_hdr	hdr;
3067 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3068 
3069 	/* get IOC page 2 */
3070 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3071 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3072 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3073 		    "fetch IOC page 2\n", DEVNAME(sc));
3074 		goto done;
3075 	}
3076 
3077 	/* XXX return something else than EINVAL to indicate within hs range */
3078 	if (id > sc->sc_vol_page->active_vols) {
3079 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3080 		    "id: %d\n", DEVNAME(sc), id);
3081 		goto done;
3082 	}
3083 
3084 	/* replace current buffer with new one */
3085 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3086 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3087 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3088 	if (rpg0 == NULL) {
3089 		printf("%s: can't get memory for RAID page 0, "
3090 		    "bio disabled\n", DEVNAME(sc));
3091 		goto done;
3092 	}
3093 	if (sc->sc_rpg0)
3094 		free(sc->sc_rpg0, M_DEVBUF);
3095 	sc->sc_rpg0 = rpg0;
3096 
3097 	/* get raid vol page 0 */
3098 	address = sc->sc_vol_list[id].vol_id |
3099 	    (sc->sc_vol_list[id].vol_bus << 8);
3100 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3101 	    address, 0, &hdr) != 0)
3102 		goto done;
3103 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3104 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3105 		    DEVNAME(sc));
3106 		goto done;
3107 	}
3108 
3109 	rv = 0;
3110 done:
3111 	return (rv);
3112 }
3113 
3114 int
3115 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3116 {
3117 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3118 	int error = 0;
3119 
3120 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3121 
3122 	/* make sure we have bio enabled */
3123 	if (sc->sc_ioctl != mpi_ioctl)
3124 		return (EINVAL);
3125 
3126 	rw_enter_write(&sc->sc_lock);
3127 
3128 	switch (cmd) {
3129 	case BIOCINQ:
3130 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3131 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3132 		break;
3133 
3134 	case BIOCVOL:
3135 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3136 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3137 		break;
3138 
3139 	case BIOCDISK:
3140 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3141 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3142 		break;
3143 
3144 	case BIOCALARM:
3145 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3146 		break;
3147 
3148 	case BIOCBLINK:
3149 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3150 		break;
3151 
3152 	case BIOCSETSTATE:
3153 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3154 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3155 		break;
3156 
3157 	default:
3158 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3159 		error = EINVAL;
3160 	}
3161 
3162 	rw_exit_write(&sc->sc_lock);
3163 
3164 	return (error);
3165 }
3166 
3167 int
3168 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3169 {
3170 	if (!(sc->sc_flags & MPI_F_RAID)) {
3171 		bi->bi_novol = 0;
3172 		bi->bi_nodisk = 0;
3173 	}
3174 
3175 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3176 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3177 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3178 		    "page 2\n", DEVNAME(sc));
3179 		return (EINVAL);
3180 	}
3181 
3182 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3183 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3184 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3185 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3186 
3187 	bi->bi_novol = sc->sc_vol_page->active_vols;
3188 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3189 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3190 
3191 	return (0);
3192 }
3193 
3194 int
3195 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3196 {
3197 	int			i, vol, id, rv = EINVAL;
3198 	struct device		*dev;
3199 	struct scsi_link	*link;
3200 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3201 	char			*vendp;
3202 
3203 	id = bv->bv_volid;
3204 	if (mpi_bio_get_pg0_raid(sc, id))
3205 		goto done;
3206 
3207 	if (id > sc->sc_vol_page->active_vols)
3208 		return (EINVAL); /* XXX deal with hot spares */
3209 
3210 	rpg0 = sc->sc_rpg0;
3211 	if (rpg0 == NULL)
3212 		goto done;
3213 
3214 	/* determine status */
3215 	switch (rpg0->volume_state) {
3216 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3217 		bv->bv_status = BIOC_SVONLINE;
3218 		break;
3219 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3220 		bv->bv_status = BIOC_SVDEGRADED;
3221 		break;
3222 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3223 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3224 		bv->bv_status = BIOC_SVOFFLINE;
3225 		break;
3226 	default:
3227 		bv->bv_status = BIOC_SVINVALID;
3228 	}
3229 
3230 	/* override status if scrubbing or something */
3231 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3232 		bv->bv_status = BIOC_SVREBUILD;
3233 
3234 	bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512;
3235 
3236 	switch (sc->sc_vol_list[id].vol_type) {
3237 	case MPI_CFG_RAID_TYPE_RAID_IS:
3238 		bv->bv_level = 0;
3239 		break;
3240 	case MPI_CFG_RAID_TYPE_RAID_IME:
3241 	case MPI_CFG_RAID_TYPE_RAID_IM:
3242 		bv->bv_level = 1;
3243 		break;
3244 	case MPI_CFG_RAID_TYPE_RAID_5:
3245 		bv->bv_level = 5;
3246 		break;
3247 	case MPI_CFG_RAID_TYPE_RAID_6:
3248 		bv->bv_level = 6;
3249 		break;
3250 	case MPI_CFG_RAID_TYPE_RAID_10:
3251 		bv->bv_level = 10;
3252 		break;
3253 	case MPI_CFG_RAID_TYPE_RAID_50:
3254 		bv->bv_level = 50;
3255 		break;
3256 	default:
3257 		bv->bv_level = -1;
3258 	}
3259 
3260 	bv->bv_nodisk = rpg0->num_phys_disks;
3261 
3262 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3263 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3264 		if (link == NULL)
3265 			continue;
3266 
3267 		/* skip if not a virtual disk */
3268 		if (!(link->flags & SDEV_VIRTUAL))
3269 			continue;
3270 
3271 		vol++;
3272 		/* are we it? */
3273 		if (vol == bv->bv_volid) {
3274 			dev = link->device_softc;
3275 			vendp = link->inqdata.vendor;
3276 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3277 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3278 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3279 			break;
3280 		}
3281 	}
3282 	rv = 0;
3283 done:
3284 	return (rv);
3285 }
3286 
3287 int
3288 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3289 {
3290 	int			pdid, id, rv = EINVAL;
3291 	u_int32_t		address;
3292 	struct mpi_cfg_hdr	hdr;
3293 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3294 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3295 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3296 
3297 	id = bd->bd_volid;
3298 	if (mpi_bio_get_pg0_raid(sc, id))
3299 		goto done;
3300 
3301 	if (id > sc->sc_vol_page->active_vols)
3302 		return (EINVAL); /* XXX deal with hot spares */
3303 
3304 	rpg0 = sc->sc_rpg0;
3305 	if (rpg0 == NULL)
3306 		goto done;
3307 
3308 	pdid = bd->bd_diskid;
3309 	if (pdid > rpg0->num_phys_disks)
3310 		goto done;
3311 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3312 	physdisk += pdid;
3313 
3314 	/* get raid phys disk page 0 */
3315 	address = physdisk->phys_disk_num;
3316 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3317 	    &hdr) != 0)
3318 		goto done;
3319 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3320 		bd->bd_status = BIOC_SDFAILED;
3321 		return (0);
3322 	}
3323 	bd->bd_channel = pdpg0.phys_disk_bus;
3324 	bd->bd_target = pdpg0.phys_disk_id;
3325 	bd->bd_lun = 0;
3326 	bd->bd_size = (u_quad_t)letoh32(pdpg0.max_lba) * 512;
3327 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3328 
3329 	switch (pdpg0.phys_disk_state) {
3330 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3331 		bd->bd_status = BIOC_SDONLINE;
3332 		break;
3333 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3334 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3335 		bd->bd_status = BIOC_SDFAILED;
3336 		break;
3337 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3338 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3339 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3340 		bd->bd_status = BIOC_SDOFFLINE;
3341 		break;
3342 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3343 		bd->bd_status = BIOC_SDSCRUB;
3344 		break;
3345 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3346 	default:
3347 		bd->bd_status = BIOC_SDINVALID;
3348 		break;
3349 	}
3350 
3351 	/* XXX figure this out */
3352 	/* bd_serial[32]; */
3353 	/* bd_procdev[16]; */
3354 
3355 	rv = 0;
3356 done:
3357 	return (rv);
3358 }
3359 
3360 int
3361 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3362 {
3363 	return (ENOTTY);
3364 }
3365 
3366 #ifndef SMALL_KERNEL
3367 int
3368 mpi_create_sensors(struct mpi_softc *sc)
3369 {
3370 	struct device		*dev;
3371 	struct scsi_link	*link;
3372 	int			i, vol;
3373 
3374 	/* count volumes */
3375 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3376 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3377 		if (link == NULL)
3378 			continue;
3379 		/* skip if not a virtual disk */
3380 		if (!(link->flags & SDEV_VIRTUAL))
3381 			continue;
3382 
3383 		vol++;
3384 	}
3385 	if (vol == 0)
3386 		return (0);
3387 
3388 	sc->sc_sensors = malloc(sizeof(struct ksensor) * vol,
3389 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3390 	if (sc->sc_sensors == NULL)
3391 		return (1);
3392 
3393 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3394 	    sizeof(sc->sc_sensordev.xname));
3395 
3396 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3397 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3398 		if (link == NULL)
3399 			continue;
3400 		/* skip if not a virtual disk */
3401 		if (!(link->flags & SDEV_VIRTUAL))
3402 			continue;
3403 
3404 		dev = link->device_softc;
3405 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3406 		    sizeof(sc->sc_sensors[vol].desc));
3407 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3408 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3409 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3410 
3411 		vol++;
3412 	}
3413 
3414 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3415 		goto bad;
3416 
3417 	sensordev_install(&sc->sc_sensordev);
3418 
3419 	return (0);
3420 
3421 bad:
3422 	free(sc->sc_sensors, M_DEVBUF);
3423 	return (1);
3424 }
3425 
3426 void
3427 mpi_refresh_sensors(void *arg)
3428 {
3429 	int			i, vol;
3430 	struct scsi_link	*link;
3431 	struct mpi_softc	*sc = arg;
3432 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3433 
3434 	rw_enter_write(&sc->sc_lock);
3435 
3436 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3437 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3438 		if (link == NULL)
3439 			continue;
3440 		/* skip if not a virtual disk */
3441 		if (!(link->flags & SDEV_VIRTUAL))
3442 			continue;
3443 
3444 		if (mpi_bio_get_pg0_raid(sc, vol))
3445 			continue;
3446 
3447 		rpg0 = sc->sc_rpg0;
3448 		if (rpg0 == NULL)
3449 			goto done;
3450 
3451 		/* determine status */
3452 		switch (rpg0->volume_state) {
3453 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3454 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3455 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3456 			break;
3457 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3458 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3459 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3460 			break;
3461 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3462 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3463 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3464 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3465 			break;
3466 		default:
3467 			sc->sc_sensors[vol].value = 0; /* unknown */
3468 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3469 		}
3470 
3471 		/* override status if scrubbing or something */
3472 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3473 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3474 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3475 		}
3476 
3477 		vol++;
3478 	}
3479 done:
3480 	rw_exit_write(&sc->sc_lock);
3481 }
3482 #endif /* SMALL_KERNEL */
3483 #endif /* NBIO > 0 */
3484