xref: /openbsd-src/sys/dev/ic/mpi.c (revision ac9b4aacc1da35008afea06a5d23c2f2dea9b93e)
1 /*	$OpenBSD: mpi.c,v 1.176 2012/08/26 11:33:44 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/proc.h>
28 #include <sys/malloc.h>
29 #include <sys/kernel.h>
30 #include <sys/mutex.h>
31 #include <sys/rwlock.h>
32 #include <sys/sensors.h>
33 #include <sys/dkio.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsiconf.h>
39 
40 #include <dev/biovar.h>
41 #include <dev/ic/mpireg.h>
42 #include <dev/ic/mpivar.h>
43 
44 #ifdef MPI_DEBUG
45 uint32_t	mpi_debug = 0
46 /*		    | MPI_D_CMD */
47 /*		    | MPI_D_INTR */
48 /*		    | MPI_D_MISC */
49 /*		    | MPI_D_DMA */
50 /*		    | MPI_D_IOCTL */
51 /*		    | MPI_D_RW */
52 /*		    | MPI_D_MEM */
53 /*		    | MPI_D_CCB */
54 /*		    | MPI_D_PPR */
55 /*		    | MPI_D_RAID */
56 /*		    | MPI_D_EVT */
57 		;
58 #endif
59 
60 struct cfdriver mpi_cd = {
61 	NULL,
62 	"mpi",
63 	DV_DULL
64 };
65 
66 void			mpi_scsi_cmd(struct scsi_xfer *);
67 void			mpi_scsi_cmd_done(struct mpi_ccb *);
68 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
69 int			mpi_scsi_probe(struct scsi_link *);
70 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
71 			    int);
72 
73 struct scsi_adapter mpi_switch = {
74 	mpi_scsi_cmd,
75 	mpi_minphys,
76 	mpi_scsi_probe,
77 	NULL,
78 	mpi_scsi_ioctl
79 };
80 
81 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
82 void			mpi_dmamem_free(struct mpi_softc *,
83 			    struct mpi_dmamem *);
84 int			mpi_alloc_ccbs(struct mpi_softc *);
85 void			*mpi_get_ccb(void *);
86 void			mpi_put_ccb(void *, void *);
87 int			mpi_alloc_replies(struct mpi_softc *);
88 void			mpi_push_replies(struct mpi_softc *);
89 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
90 
91 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
92 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
93 void			mpi_poll_done(struct mpi_ccb *);
94 void			mpi_reply(struct mpi_softc *, u_int32_t);
95 
96 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
97 void			mpi_wait_done(struct mpi_ccb *);
98 
99 int			mpi_cfg_spi_port(struct mpi_softc *);
100 void			mpi_squash_ppr(struct mpi_softc *);
101 void			mpi_run_ppr(struct mpi_softc *);
102 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
103 			    struct mpi_cfg_raid_physdisk *, int, int, int);
104 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
105 
106 int			mpi_cfg_sas(struct mpi_softc *);
107 int			mpi_cfg_fc(struct mpi_softc *);
108 
109 void			mpi_timeout_xs(void *);
110 int			mpi_load_xs(struct mpi_ccb *);
111 
112 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
113 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
114 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
115 			    u_int32_t);
116 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
117 			    u_int32_t);
118 
119 int			mpi_init(struct mpi_softc *);
120 int			mpi_reset_soft(struct mpi_softc *);
121 int			mpi_reset_hard(struct mpi_softc *);
122 
123 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
124 int			mpi_handshake_recv_dword(struct mpi_softc *,
125 			    u_int32_t *);
126 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
127 
128 void			mpi_empty_done(struct mpi_ccb *);
129 
130 int			mpi_iocinit(struct mpi_softc *);
131 int			mpi_iocfacts(struct mpi_softc *);
132 int			mpi_portfacts(struct mpi_softc *);
133 int			mpi_portenable(struct mpi_softc *);
134 int			mpi_cfg_coalescing(struct mpi_softc *);
135 void			mpi_get_raid(struct mpi_softc *);
136 int			mpi_fwupload(struct mpi_softc *);
137 int			mpi_scsi_probe_virtual(struct scsi_link *);
138 
139 int			mpi_eventnotify(struct mpi_softc *);
140 void			mpi_eventnotify_done(struct mpi_ccb *);
141 void			mpi_eventnotify_free(struct mpi_softc *,
142 			    struct mpi_rcb *);
143 void			mpi_eventack(void *, void *);
144 void			mpi_eventack_done(struct mpi_ccb *);
145 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
146 void			mpi_evt_sas_detach(void *, void *);
147 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
148 void			mpi_evt_fc_rescan(struct mpi_softc *);
149 void			mpi_fc_rescan(void *, void *);
150 
151 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
152 			    u_int8_t, u_int32_t, int, void *);
153 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
154 			    void *, int, void *, size_t);
155 
156 int			mpi_ioctl_cache(struct scsi_link *, u_long,
157 			    struct dk_cache *);
158 
159 #if NBIO > 0
160 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
161 int		mpi_ioctl(struct device *, u_long, caddr_t);
162 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
163 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
164 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
165 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
166 #ifndef SMALL_KERNEL
167 int		mpi_create_sensors(struct mpi_softc *);
168 void		mpi_refresh_sensors(void *);
169 #endif /* SMALL_KERNEL */
170 #endif /* NBIO > 0 */
171 
172 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
173 
174 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
175 
176 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
177 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
178 #define mpi_read_intr(s)	mpi_read((s), MPI_INTR_STATUS)
179 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
180 #define mpi_pop_reply(s)	mpi_read((s), MPI_REPLY_QUEUE)
181 #define mpi_push_reply_db(s, v)	mpi_write((s), MPI_REPLY_QUEUE, (v))
182 
183 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
184 				    MPI_INTR_STATUS_DOORBELL, 0)
185 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
186 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
187 
188 #define MPI_PG_EXTENDED		(1<<0)
189 #define MPI_PG_POLL		(1<<1)
190 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
191 
192 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
193 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
194 	    MPI_PG_POLL, (_h))
195 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
196 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
197 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
198 
199 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
200 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
201 	    (_h), (_r), (_p), (_l))
202 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
203 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
204 	    (_h), (_r), (_p), (_l))
205 
206 int
207 mpi_attach(struct mpi_softc *sc)
208 {
209 	struct scsibus_attach_args	saa;
210 	struct mpi_ccb			*ccb;
211 
212 	printf("\n");
213 
214 	rw_init(&sc->sc_lock, "mpi_lock");
215 	mtx_init(&sc->sc_evt_rescan_mtx, IPL_BIO);
216 
217 	/* disable interrupts */
218 	mpi_write(sc, MPI_INTR_MASK,
219 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
220 
221 	if (mpi_init(sc) != 0) {
222 		printf("%s: unable to initialise\n", DEVNAME(sc));
223 		return (1);
224 	}
225 
226 	if (mpi_iocfacts(sc) != 0) {
227 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
228 		return (1);
229 	}
230 
231 	if (mpi_alloc_ccbs(sc) != 0) {
232 		/* error already printed */
233 		return (1);
234 	}
235 
236 	if (mpi_alloc_replies(sc) != 0) {
237 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
238 		goto free_ccbs;
239 	}
240 
241 	if (mpi_iocinit(sc) != 0) {
242 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
243 		goto free_ccbs;
244 	}
245 
246 	/* spin until we're operational */
247 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
248 	    MPI_DOORBELL_STATE_OPER) != 0) {
249 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
250 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
251 		printf("%s: operational state timeout\n", DEVNAME(sc));
252 		goto free_ccbs;
253 	}
254 
255 	mpi_push_replies(sc);
256 
257 	if (mpi_portfacts(sc) != 0) {
258 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
259 		goto free_replies;
260 	}
261 
262 	if (mpi_cfg_coalescing(sc) != 0) {
263 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
264 		goto free_replies;
265 	}
266 
267 	switch (sc->sc_porttype) {
268 	case MPI_PORTFACTS_PORTTYPE_SAS:
269 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
270 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
271 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
272 		    mpi_evt_sas_detach, sc);
273 		/* FALLTHROUGH */
274 	case MPI_PORTFACTS_PORTTYPE_FC:
275 		if (mpi_eventnotify(sc) != 0) {
276 			printf("%s: unable to enable events\n", DEVNAME(sc));
277 			goto free_replies;
278 		}
279 		break;
280 	}
281 
282 	if (mpi_portenable(sc) != 0) {
283 		printf("%s: unable to enable port\n", DEVNAME(sc));
284 		goto free_replies;
285 	}
286 
287 	if (mpi_fwupload(sc) != 0) {
288 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
289 		goto free_replies;
290 	}
291 
292 	switch (sc->sc_porttype) {
293 	case MPI_PORTFACTS_PORTTYPE_SCSI:
294 		if (mpi_cfg_spi_port(sc) != 0)
295 			goto free_replies;
296 		mpi_squash_ppr(sc);
297 		break;
298 	case MPI_PORTFACTS_PORTTYPE_SAS:
299 		if (mpi_cfg_sas(sc) != 0)
300 			goto free_replies;
301 		break;
302 	case MPI_PORTFACTS_PORTTYPE_FC:
303 		if (mpi_cfg_fc(sc) != 0)
304 			goto free_replies;
305 		break;
306 	}
307 
308 	/* get raid pages */
309 	mpi_get_raid(sc);
310 #if NBIO > 0
311 	if (sc->sc_flags & MPI_F_RAID) {
312 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
313 			panic("%s: controller registration failed",
314 			    DEVNAME(sc));
315 		else {
316 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
317 			    2, 0, &sc->sc_cfg_hdr) != 0) {
318 				panic("%s: can't get IOC page 2 hdr",
319 				    DEVNAME(sc));
320 			}
321 
322 			sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4,
323 			    M_TEMP, M_WAITOK | M_CANFAIL);
324 			if (sc->sc_vol_page == NULL) {
325 				panic("%s: can't get memory for IOC page 2, "
326 				    "bio disabled", DEVNAME(sc));
327 			}
328 
329 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
330 			    sc->sc_vol_page,
331 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
332 				panic("%s: can't get IOC page 2", DEVNAME(sc));
333 			}
334 
335 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
336 			    (sc->sc_vol_page + 1);
337 
338 			sc->sc_ioctl = mpi_ioctl;
339 		}
340 	}
341 #endif /* NBIO > 0 */
342 
343 	/* we should be good to go now, attach scsibus */
344 	sc->sc_link.adapter = &mpi_switch;
345 	sc->sc_link.adapter_softc = sc;
346 	sc->sc_link.adapter_target = sc->sc_target;
347 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
348 	sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth;
349 	sc->sc_link.pool = &sc->sc_iopool;
350 
351 	bzero(&saa, sizeof(saa));
352 	saa.saa_sc_link = &sc->sc_link;
353 
354 	/* config_found() returns the scsibus attached to us */
355 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
356 	    &saa, scsiprint);
357 
358 	/* do domain validation */
359 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
360 		mpi_run_ppr(sc);
361 
362 	/* enable interrupts */
363 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
364 
365 #if NBIO > 0
366 #ifndef SMALL_KERNEL
367 	mpi_create_sensors(sc);
368 #endif /* SMALL_KERNEL */
369 #endif /* NBIO > 0 */
370 
371 	return (0);
372 
373 free_replies:
374 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
375 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
376 	mpi_dmamem_free(sc, sc->sc_replies);
377 free_ccbs:
378 	while ((ccb = mpi_get_ccb(sc)) != NULL)
379 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
380 	mpi_dmamem_free(sc, sc->sc_requests);
381 	free(sc->sc_ccbs, M_DEVBUF);
382 
383 	return(1);
384 }
385 
386 int
387 mpi_cfg_spi_port(struct mpi_softc *sc)
388 {
389 	struct mpi_cfg_hdr		hdr;
390 	struct mpi_cfg_spi_port_pg1	port;
391 
392 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
393 	    &hdr) != 0)
394 		return (1);
395 
396 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
397 		return (1);
398 
399 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
400 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
401 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
402 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
403 	    letoh32(port.port_scsi_id));
404 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
405 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
406 
407 	if (port.port_scsi_id == sc->sc_target &&
408 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
409 	    port.on_bus_timer_value != htole32(0x0))
410 		return (0);
411 
412 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
413 	    sc->sc_target);
414 	port.port_scsi_id = sc->sc_target;
415 	port.port_resp_ids = htole16(1 << sc->sc_target);
416 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
417 
418 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
419 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
420 		return (1);
421 	}
422 
423 	return (0);
424 }
425 
426 void
427 mpi_squash_ppr(struct mpi_softc *sc)
428 {
429 	struct mpi_cfg_hdr		hdr;
430 	struct mpi_cfg_spi_dev_pg1	page;
431 	int				i;
432 
433 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
434 
435 	for (i = 0; i < sc->sc_buswidth; i++) {
436 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
437 		    1, i, &hdr) != 0)
438 			return;
439 
440 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
441 			return;
442 
443 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
444 		    "req_offset: 0x%02x req_period: 0x%02x "
445 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
446 		    page.req_params1, page.req_offset, page.req_period,
447 		    page.req_params2, letoh32(page.configuration));
448 
449 		page.req_params1 = 0x0;
450 		page.req_offset = 0x0;
451 		page.req_period = 0x0;
452 		page.req_params2 = 0x0;
453 		page.configuration = htole32(0x0);
454 
455 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
456 			return;
457 	}
458 }
459 
460 void
461 mpi_run_ppr(struct mpi_softc *sc)
462 {
463 	struct mpi_cfg_hdr		hdr;
464 	struct mpi_cfg_spi_port_pg0	port_pg;
465 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
466 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
467 	size_t				pagelen;
468 	struct scsi_link		*link;
469 	int				i, tries;
470 
471 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
472 	    &hdr) != 0) {
473 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
474 		    DEVNAME(sc));
475 		return;
476 	}
477 
478 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
479 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
480 		    DEVNAME(sc));
481 		return;
482 	}
483 
484 	for (i = 0; i < sc->sc_buswidth; i++) {
485 		link = scsi_get_link(sc->sc_scsibus, i, 0);
486 		if (link == NULL)
487 			continue;
488 
489 		/* do not ppr volumes */
490 		if (link->flags & SDEV_VIRTUAL)
491 			continue;
492 
493 		tries = 0;
494 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
495 		    port_pg.max_offset, tries) == EAGAIN)
496 			tries++;
497 	}
498 
499 	if ((sc->sc_flags & MPI_F_RAID) == 0)
500 		return;
501 
502 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
503 	    &hdr) != 0) {
504 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
505 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
506 		return;
507 	}
508 
509 	pagelen = hdr.page_length * 4; /* dwords to bytes */
510 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
511 	if (physdisk_pg == NULL) {
512 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
513 		    "allocate ioc pg 3\n", DEVNAME(sc));
514 		return;
515 	}
516 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
517 
518 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
519 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
520 		    "fetch ioc page 3\n", DEVNAME(sc));
521 		goto out;
522 	}
523 
524 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
525 	    physdisk_pg->no_phys_disks);
526 
527 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
528 		physdisk = &physdisk_list[i];
529 
530 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
531 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
532 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
533 		    physdisk->phys_disk_num);
534 
535 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
536 			continue;
537 
538 		tries = 0;
539 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
540 		    port_pg.max_offset, tries) == EAGAIN)
541 			tries++;
542 	}
543 
544 out:
545 	free(physdisk_pg, M_TEMP);
546 }
547 
548 int
549 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
550     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
551 {
552 	struct mpi_cfg_hdr		hdr0, hdr1;
553 	struct mpi_cfg_spi_dev_pg0	pg0;
554 	struct mpi_cfg_spi_dev_pg1	pg1;
555 	u_int32_t			address;
556 	int				id;
557 	int				raid = 0;
558 
559 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
560 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
561 	    link->quirks);
562 
563 	if (try >= 3)
564 		return (EIO);
565 
566 	if (physdisk == NULL) {
567 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
568 			return (EIO);
569 
570 		address = link->target;
571 		id = link->target;
572 	} else {
573 		raid = 1;
574 		address = (physdisk->phys_disk_bus << 8) |
575 		    (physdisk->phys_disk_id);
576 		id = physdisk->phys_disk_num;
577 	}
578 
579 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
580 	    address, &hdr0) != 0) {
581 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
582 		    DEVNAME(sc));
583 		return (EIO);
584 	}
585 
586 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
587 	    address, &hdr1) != 0) {
588 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
589 		    DEVNAME(sc));
590 		return (EIO);
591 	}
592 
593 #ifdef MPI_DEBUG
594 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
595 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
596 		    DEVNAME(sc));
597 		return (EIO);
598 	}
599 
600 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
601 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
602 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
603 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
604 #endif
605 
606 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
607 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
608 		    DEVNAME(sc));
609 		return (EIO);
610 	}
611 
612 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
613 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
614 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
615 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
616 
617 	pg1.req_params1 = 0;
618 	pg1.req_offset = offset;
619 	pg1.req_period = period;
620 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
621 
622 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
623 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
624 
625 		switch (try) {
626 		case 0: /* U320 */
627 			break;
628 		case 1: /* U160 */
629 			pg1.req_period = 0x09;
630 			break;
631 		case 2: /* U80 */
632 			pg1.req_period = 0x0a;
633 			break;
634 		}
635 
636 		if (pg1.req_period < 0x09) {
637 			/* Ultra320: enable QAS & PACKETIZED */
638 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
639 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
640 		}
641 		if (pg1.req_period < 0xa) {
642 			/* >= Ultra160: enable dual xfers */
643 			pg1.req_params1 |=
644 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
645 		}
646 	}
647 
648 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
649 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
650 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
651 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
652 
653 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
654 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
655 		    DEVNAME(sc));
656 		return (EIO);
657 	}
658 
659 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
660 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
661 		    DEVNAME(sc));
662 		return (EIO);
663 	}
664 
665 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
666 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
667 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
668 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
669 
670 	if (mpi_inq(sc, id, raid) != 0) {
671 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
672 		    "target %d\n", DEVNAME(sc), link->target);
673 		return (EIO);
674 	}
675 
676 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
677 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
678 		    "inquiry\n", DEVNAME(sc));
679 		return (EIO);
680 	}
681 
682 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
683 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
684 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
685 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
686 
687 	if (!(letoh32(pg0.information) & 0x07) && (try == 0)) {
688 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
689 		    DEVNAME(sc));
690 		return (EAGAIN);
691 	}
692 
693 	if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
694 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
695 		    DEVNAME(sc));
696 		return (EAGAIN);
697 	}
698 
699 	if (letoh32(pg0.information) & 0x0e) {
700 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
701 		    DEVNAME(sc), letoh32(pg0.information));
702 		return (EAGAIN);
703 	}
704 
705 	switch(pg0.neg_period) {
706 	case 0x08:
707 		period = 160;
708 		break;
709 	case 0x09:
710 		period = 80;
711 		break;
712 	case 0x0a:
713 		period = 40;
714 		break;
715 	case 0x0b:
716 		period = 20;
717 		break;
718 	case 0x0c:
719 		period = 10;
720 		break;
721 	default:
722 		period = 0;
723 		break;
724 	}
725 
726 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
727 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
728 	    id, period ? "Sync" : "Async", period,
729 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
730 	    pg0.neg_offset,
731 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
732 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
733 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
734 
735 	return (0);
736 }
737 
738 int
739 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
740 {
741 	struct mpi_ccb			*ccb;
742 	struct scsi_inquiry		inq;
743 	struct {
744 		struct mpi_msg_scsi_io		io;
745 		struct mpi_sge			sge;
746 		struct scsi_inquiry_data	inqbuf;
747 		struct scsi_sense_data		sense;
748 	} __packed			*bundle;
749 	struct mpi_msg_scsi_io		*io;
750 	struct mpi_sge			*sge;
751 	u_int64_t			addr;
752 
753 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
754 
755 	bzero(&inq, sizeof(inq));
756 	inq.opcode = INQUIRY;
757 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
758 
759 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
760 	if (ccb == NULL)
761 		return (1);
762 
763 	ccb->ccb_done = mpi_empty_done;
764 
765 	bundle = ccb->ccb_cmd;
766 	io = &bundle->io;
767 	sge = &bundle->sge;
768 
769 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
770 	    MPI_FUNCTION_SCSI_IO_REQUEST;
771 	/*
772 	 * bus is always 0
773 	 * io->bus = htole16(sc->sc_bus);
774 	 */
775 	io->target_id = target;
776 
777 	io->cdb_length = sizeof(inq);
778 	io->sense_buf_len = sizeof(struct scsi_sense_data);
779 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
780 
781 	io->msg_context = htole32(ccb->ccb_id);
782 
783 	/*
784 	 * always lun 0
785 	 * io->lun[0] = htobe16(link->lun);
786 	 */
787 
788 	io->direction = MPI_SCSIIO_DIR_READ;
789 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
790 
791 	bcopy(&inq, io->cdb, sizeof(inq));
792 
793 	io->data_length = htole32(sizeof(struct scsi_inquiry_data));
794 
795 	io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
796 	    ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
797 
798 	sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
799 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
800 	    (u_int32_t)sizeof(inq));
801 
802 	addr = ccb->ccb_cmd_dva +
803 	    ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle);
804 	sge->sg_addr = htole64(addr);
805 
806 	if (mpi_poll(sc, ccb, 5000) != 0)
807 		return (1);
808 
809 	if (ccb->ccb_rcb != NULL)
810 		mpi_push_reply(sc, ccb->ccb_rcb);
811 
812 	scsi_io_put(&sc->sc_iopool, ccb);
813 
814 	return (0);
815 }
816 
817 int
818 mpi_cfg_sas(struct mpi_softc *sc)
819 {
820 	struct mpi_ecfg_hdr		ehdr;
821 	struct mpi_cfg_sas_iou_pg1	*pg;
822 	size_t				pagelen;
823 	int				rv = 0;
824 
825 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
826 	    &ehdr) != 0)
827 		return (EIO);
828 
829 	pagelen = letoh16(ehdr.ext_page_length) * 4;
830 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
831 	if (pg == NULL)
832 		return (ENOMEM);
833 
834 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0) {
835 		rv = EIO;
836 		goto out;
837 	}
838 
839 	if (pg->max_sata_q_depth != 32) {
840 		pg->max_sata_q_depth = 32;
841 
842 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0) {
843 			rv = EIO;
844 			goto out;
845 		}
846 	}
847 
848 out:
849 	free(pg, M_TEMP);
850 	return (rv);
851 }
852 
853 int
854 mpi_cfg_fc(struct mpi_softc *sc)
855 {
856 	struct mpi_cfg_hdr		hdr;
857 	struct mpi_cfg_fc_port_pg0	pg0;
858 	struct mpi_cfg_fc_port_pg1	pg1;
859 
860 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
861 	    &hdr) != 0) {
862 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
863 		return (1);
864 	}
865 
866 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
867 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
868 		return (1);
869 	}
870 
871 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
872 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
873 
874 	/* configure port config more to our liking */
875 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
876 	    &hdr) != 0) {
877 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
878 		return (1);
879 	}
880 
881 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
882 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
883 		return (1);
884 	}
885 
886 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
887 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
888 
889 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
890 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
891 		return (1);
892 	}
893 
894 	return (0);
895 }
896 
897 void
898 mpi_detach(struct mpi_softc *sc)
899 {
900 
901 }
902 
903 int
904 mpi_intr(void *arg)
905 {
906 	struct mpi_softc		*sc = arg;
907 	u_int32_t			reg;
908 	int				rv = 0;
909 
910 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
911 		return (rv);
912 
913 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
914 		mpi_reply(sc, reg);
915 		rv = 1;
916 	}
917 
918 	return (rv);
919 }
920 
921 void
922 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
923 {
924 	struct mpi_ccb			*ccb;
925 	struct mpi_rcb			*rcb = NULL;
926 	struct mpi_msg_reply		*reply = NULL;
927 	u_int32_t			reply_dva;
928 	int				id;
929 	int				i;
930 
931 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
932 
933 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
934 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
935 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
936 		    MPI_REPLY_SIZE;
937 		rcb = &sc->sc_rcbs[i];
938 
939 		bus_dmamap_sync(sc->sc_dmat,
940 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
941 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
942 
943 		reply = rcb->rcb_reply;
944 
945 		id = letoh32(reply->msg_context);
946 	} else {
947 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
948 		case MPI_REPLY_QUEUE_TYPE_INIT:
949 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
950 			break;
951 
952 		default:
953 			panic("%s: unsupported context reply",
954 			    DEVNAME(sc));
955 		}
956 	}
957 
958 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
959 	    DEVNAME(sc), id, reply);
960 
961 	ccb = &sc->sc_ccbs[id];
962 
963 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
964 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
965 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
966 	ccb->ccb_state = MPI_CCB_READY;
967 	ccb->ccb_rcb = rcb;
968 
969 	ccb->ccb_done(ccb);
970 }
971 
972 struct mpi_dmamem *
973 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
974 {
975 	struct mpi_dmamem		*mdm;
976 	int				nsegs;
977 
978 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
979 	if (mdm == NULL)
980 		return (NULL);
981 
982 	mdm->mdm_size = size;
983 
984 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
985 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
986 		goto mdmfree;
987 
988 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
989 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
990 		goto destroy;
991 
992 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
993 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
994 		goto free;
995 
996 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
997 	    NULL, BUS_DMA_NOWAIT) != 0)
998 		goto unmap;
999 
1000 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1001 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1002 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1003 
1004 	return (mdm);
1005 
1006 unmap:
1007 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1008 free:
1009 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1010 destroy:
1011 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1012 mdmfree:
1013 	free(mdm, M_DEVBUF);
1014 
1015 	return (NULL);
1016 }
1017 
1018 void
1019 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1020 {
1021 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1022 
1023 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1024 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1025 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1026 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1027 	free(mdm, M_DEVBUF);
1028 }
1029 
1030 int
1031 mpi_alloc_ccbs(struct mpi_softc *sc)
1032 {
1033 	struct mpi_ccb			*ccb;
1034 	u_int8_t			*cmd;
1035 	int				i;
1036 
1037 	SLIST_INIT(&sc->sc_ccb_free);
1038 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1039 
1040 	sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds,
1041 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1042 	if (sc->sc_ccbs == NULL) {
1043 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1044 		return (1);
1045 	}
1046 
1047 	sc->sc_requests = mpi_dmamem_alloc(sc,
1048 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1049 	if (sc->sc_requests == NULL) {
1050 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1051 		goto free_ccbs;
1052 	}
1053 	cmd = MPI_DMA_KVA(sc->sc_requests);
1054 	bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1055 
1056 	for (i = 0; i < sc->sc_maxcmds; i++) {
1057 		ccb = &sc->sc_ccbs[i];
1058 
1059 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1060 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1061 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1062 		    &ccb->ccb_dmamap) != 0) {
1063 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1064 			goto free_maps;
1065 		}
1066 
1067 		ccb->ccb_sc = sc;
1068 		ccb->ccb_id = i;
1069 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1070 		ccb->ccb_state = MPI_CCB_READY;
1071 
1072 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1073 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1074 		    ccb->ccb_offset;
1075 
1076 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1077 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1078 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1079 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1080 		    ccb->ccb_cmd_dva);
1081 
1082 		mpi_put_ccb(sc, ccb);
1083 	}
1084 
1085 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1086 
1087 	return (0);
1088 
1089 free_maps:
1090 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1091 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1092 
1093 	mpi_dmamem_free(sc, sc->sc_requests);
1094 free_ccbs:
1095 	free(sc->sc_ccbs, M_DEVBUF);
1096 
1097 	return (1);
1098 }
1099 
1100 void *
1101 mpi_get_ccb(void *xsc)
1102 {
1103 	struct mpi_softc		*sc = xsc;
1104 	struct mpi_ccb			*ccb;
1105 
1106 	mtx_enter(&sc->sc_ccb_mtx);
1107 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1108 	if (ccb != NULL) {
1109 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1110 		ccb->ccb_state = MPI_CCB_READY;
1111 	}
1112 	mtx_leave(&sc->sc_ccb_mtx);
1113 
1114 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1115 
1116 	return (ccb);
1117 }
1118 
1119 void
1120 mpi_put_ccb(void *xsc, void *io)
1121 {
1122 	struct mpi_softc		*sc = xsc;
1123 	struct mpi_ccb			*ccb = io;
1124 
1125 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1126 
1127 #ifdef DIAGNOSTIC
1128 	if (ccb->ccb_state == MPI_CCB_FREE)
1129 		panic("mpi_put_ccb: double free");
1130 #endif
1131 
1132 	ccb->ccb_state = MPI_CCB_FREE;
1133 	ccb->ccb_cookie = NULL;
1134 	ccb->ccb_done = NULL;
1135 	bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE);
1136 	mtx_enter(&sc->sc_ccb_mtx);
1137 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1138 	mtx_leave(&sc->sc_ccb_mtx);
1139 }
1140 
1141 int
1142 mpi_alloc_replies(struct mpi_softc *sc)
1143 {
1144 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1145 
1146 	sc->sc_rcbs = malloc(sc->sc_repq * sizeof(struct mpi_rcb), M_DEVBUF,
1147 	    M_WAITOK|M_CANFAIL);
1148 	if (sc->sc_rcbs == NULL)
1149 		return (1);
1150 
1151 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1152 	if (sc->sc_replies == NULL) {
1153 		free(sc->sc_rcbs, M_DEVBUF);
1154 		return (1);
1155 	}
1156 
1157 	return (0);
1158 }
1159 
1160 void
1161 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1162 {
1163 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1164 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1165 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1166 }
1167 
1168 void
1169 mpi_push_replies(struct mpi_softc *sc)
1170 {
1171 	struct mpi_rcb			*rcb;
1172 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1173 	int				i;
1174 
1175 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1176 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1177 
1178 	for (i = 0; i < sc->sc_repq; i++) {
1179 		rcb = &sc->sc_rcbs[i];
1180 
1181 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1182 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1183 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1184 		    MPI_REPLY_SIZE * i;
1185 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1186 	}
1187 }
1188 
1189 void
1190 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1191 {
1192 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1193 	    ccb->ccb_cmd_dva);
1194 
1195 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1196 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1197 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1198 
1199 	ccb->ccb_state = MPI_CCB_QUEUED;
1200 	mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1201 }
1202 
1203 int
1204 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1205 {
1206 	void				(*done)(struct mpi_ccb *);
1207 	void				*cookie;
1208 	int				rv = 1;
1209 	u_int32_t			reg;
1210 
1211 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1212 	    timeout);
1213 
1214 	done = ccb->ccb_done;
1215 	cookie = ccb->ccb_cookie;
1216 
1217 	ccb->ccb_done = mpi_poll_done;
1218 	ccb->ccb_cookie = &rv;
1219 
1220 	mpi_start(sc, ccb);
1221 	while (rv == 1) {
1222 		reg = mpi_pop_reply(sc);
1223 		if (reg == 0xffffffff) {
1224 			if (timeout-- == 0) {
1225 				printf("%s: timeout\n", DEVNAME(sc));
1226 				goto timeout;
1227 			}
1228 
1229 			delay(1000);
1230 			continue;
1231 		}
1232 
1233 		mpi_reply(sc, reg);
1234 	}
1235 
1236 	ccb->ccb_cookie = cookie;
1237 	done(ccb);
1238 
1239 timeout:
1240 	return (rv);
1241 }
1242 
1243 void
1244 mpi_poll_done(struct mpi_ccb *ccb)
1245 {
1246 	int				*rv = ccb->ccb_cookie;
1247 
1248 	*rv = 0;
1249 }
1250 
1251 void
1252 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1253 {
1254 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1255 	void				(*done)(struct mpi_ccb *);
1256 
1257 	done = ccb->ccb_done;
1258 	ccb->ccb_done = mpi_wait_done;
1259 	ccb->ccb_cookie = &cookie;
1260 
1261 	/* XXX this will wait forever for the ccb to complete */
1262 
1263 	mpi_start(sc, ccb);
1264 
1265 	mtx_enter(&cookie);
1266 	while (ccb->ccb_cookie != NULL)
1267 		msleep(ccb, &cookie, PRIBIO, "mpiwait", 0);
1268 	mtx_leave(&cookie);
1269 
1270 	done(ccb);
1271 }
1272 
1273 void
1274 mpi_wait_done(struct mpi_ccb *ccb)
1275 {
1276 	struct mutex			*cookie = ccb->ccb_cookie;
1277 
1278 	mtx_enter(cookie);
1279 	ccb->ccb_cookie = NULL;
1280 	wakeup_one(ccb);
1281 	mtx_leave(cookie);
1282 }
1283 
1284 void
1285 mpi_scsi_cmd(struct scsi_xfer *xs)
1286 {
1287 	struct scsi_link		*link = xs->sc_link;
1288 	struct mpi_softc		*sc = link->adapter_softc;
1289 	struct mpi_ccb			*ccb;
1290 	struct mpi_ccb_bundle		*mcb;
1291 	struct mpi_msg_scsi_io		*io;
1292 
1293 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1294 
1295 	if (xs->cmdlen > MPI_CDB_LEN) {
1296 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1297 		    DEVNAME(sc), xs->cmdlen);
1298 		bzero(&xs->sense, sizeof(xs->sense));
1299 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1300 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1301 		xs->sense.add_sense_code = 0x20;
1302 		xs->error = XS_SENSE;
1303 		scsi_done(xs);
1304 		return;
1305 	}
1306 
1307 	ccb = xs->io;
1308 
1309 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1310 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1311 
1312 	ccb->ccb_cookie = xs;
1313 	ccb->ccb_done = mpi_scsi_cmd_done;
1314 
1315 	mcb = ccb->ccb_cmd;
1316 	io = &mcb->mcb_io;
1317 
1318 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1319 	/*
1320 	 * bus is always 0
1321 	 * io->bus = htole16(sc->sc_bus);
1322 	 */
1323 	io->target_id = link->target;
1324 
1325 	io->cdb_length = xs->cmdlen;
1326 	io->sense_buf_len = sizeof(xs->sense);
1327 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1328 
1329 	io->msg_context = htole32(ccb->ccb_id);
1330 
1331 	io->lun[0] = htobe16(link->lun);
1332 
1333 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1334 	case SCSI_DATA_IN:
1335 		io->direction = MPI_SCSIIO_DIR_READ;
1336 		break;
1337 	case SCSI_DATA_OUT:
1338 		io->direction = MPI_SCSIIO_DIR_WRITE;
1339 		break;
1340 	default:
1341 		io->direction = MPI_SCSIIO_DIR_NONE;
1342 		break;
1343 	}
1344 
1345 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1346 	    (link->quirks & SDEV_NOTAGS))
1347 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1348 	else
1349 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1350 
1351 	bcopy(xs->cmd, io->cdb, xs->cmdlen);
1352 
1353 	io->data_length = htole32(xs->datalen);
1354 
1355 	io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
1356 	    ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1357 
1358 	if (mpi_load_xs(ccb) != 0) {
1359 		xs->error = XS_DRIVER_STUFFUP;
1360 		scsi_done(xs);
1361 		return;
1362 	}
1363 
1364 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1365 
1366 	if (xs->flags & SCSI_POLL) {
1367 		if (mpi_poll(sc, ccb, xs->timeout) != 0) {
1368 			xs->error = XS_DRIVER_STUFFUP;
1369 			scsi_done(xs);
1370 		}
1371 		return;
1372 	}
1373 
1374 	mpi_start(sc, ccb);
1375 }
1376 
1377 void
1378 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1379 {
1380 	struct mpi_softc		*sc = ccb->ccb_sc;
1381 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1382 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1383 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1384 	struct mpi_msg_scsi_io_error	*sie;
1385 
1386 	if (xs->datalen != 0) {
1387 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1388 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1389 		    BUS_DMASYNC_POSTWRITE);
1390 
1391 		bus_dmamap_unload(sc->sc_dmat, dmap);
1392 	}
1393 
1394 	/* timeout_del */
1395 	xs->error = XS_NOERROR;
1396 	xs->resid = 0;
1397 
1398 	if (ccb->ccb_rcb == NULL) {
1399 		/* no scsi error, we're ok so drop out early */
1400 		xs->status = SCSI_OK;
1401 		scsi_done(xs);
1402 		return;
1403 	}
1404 
1405 	sie = ccb->ccb_rcb->rcb_reply;
1406 
1407 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1408 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1409 	    xs->flags);
1410 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1411 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1412 	    sie->msg_length, sie->function);
1413 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1414 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1415 	    sie->sense_buf_len, sie->msg_flags);
1416 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1417 	    letoh32(sie->msg_context));
1418 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1419 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1420 	    sie->scsi_state, letoh16(sie->ioc_status));
1421 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1422 	    letoh32(sie->ioc_loginfo));
1423 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1424 	    letoh32(sie->transfer_count));
1425 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1426 	    letoh32(sie->sense_count));
1427 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1428 	    letoh32(sie->response_info));
1429 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1430 	    letoh16(sie->tag));
1431 
1432 	xs->status = sie->scsi_status;
1433 	switch (letoh16(sie->ioc_status)) {
1434 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1435 		xs->resid = xs->datalen - letoh32(sie->transfer_count);
1436 		if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1437 			xs->error = XS_DRIVER_STUFFUP;
1438 			break;
1439 		}
1440 		/* FALLTHROUGH */
1441 	case MPI_IOCSTATUS_SUCCESS:
1442 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1443 		switch (xs->status) {
1444 		case SCSI_OK:
1445 			xs->resid = 0;
1446 			break;
1447 
1448 		case SCSI_CHECK:
1449 			xs->error = XS_SENSE;
1450 			break;
1451 
1452 		case SCSI_BUSY:
1453 		case SCSI_QUEUE_FULL:
1454 			xs->error = XS_BUSY;
1455 			break;
1456 
1457 		default:
1458 			xs->error = XS_DRIVER_STUFFUP;
1459 			break;
1460 		}
1461 		break;
1462 
1463 	case MPI_IOCSTATUS_BUSY:
1464 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1465 		xs->error = XS_BUSY;
1466 		break;
1467 
1468 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1469 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1470 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1471 		xs->error = XS_SELTIMEOUT;
1472 		break;
1473 
1474 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1475 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1476 		xs->error = XS_RESET;
1477 		break;
1478 
1479 	default:
1480 		xs->error = XS_DRIVER_STUFFUP;
1481 		break;
1482 	}
1483 
1484 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1485 		bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense));
1486 
1487 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1488 	    xs->error, xs->status);
1489 
1490 	mpi_push_reply(sc, ccb->ccb_rcb);
1491 	scsi_done(xs);
1492 }
1493 
1494 void
1495 mpi_timeout_xs(void *arg)
1496 {
1497 	/* XXX */
1498 }
1499 
1500 int
1501 mpi_load_xs(struct mpi_ccb *ccb)
1502 {
1503 	struct mpi_softc		*sc = ccb->ccb_sc;
1504 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1505 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1506 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1507 	struct mpi_sge			*sge, *nsge = &mcb->mcb_sgl[0];
1508 	struct mpi_sge			*ce = NULL, *nce;
1509 	u_int64_t			ce_dva;
1510 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1511 	u_int32_t			addr, flags;
1512 	int				i, error;
1513 
1514 	if (xs->datalen == 0) {
1515 		nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
1516 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1517 		return (0);
1518 	}
1519 
1520 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1521 	    xs->data, xs->datalen, NULL,
1522 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1523 	if (error) {
1524 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1525 		return (1);
1526 	}
1527 
1528 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1529 	if (xs->flags & SCSI_DATA_OUT)
1530 		flags |= MPI_SGE_FL_DIR_OUT;
1531 
1532 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1533 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1534 		io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4;
1535 	}
1536 
1537 	for (i = 0; i < dmap->dm_nsegs; i++) {
1538 
1539 		if (nsge == ce) {
1540 			nsge++;
1541 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1542 
1543 			DNPRINTF(MPI_D_DMA, "%s:   - 0x%08x 0x%08x 0x%08x\n",
1544 			    DEVNAME(sc), sge->sg_hdr,
1545 			    sge->sg_hi_addr, sge->sg_lo_addr);
1546 
1547 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1548 				nce = &nsge[sc->sc_chain_len - 1];
1549 				addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4;
1550 				addr = addr << 16 |
1551 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1552 			} else {
1553 				nce = NULL;
1554 				addr = sizeof(struct mpi_sge) *
1555 				    (dmap->dm_nsegs - i);
1556 			}
1557 
1558 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1559 			    MPI_SGE_FL_SIZE_64 | addr);
1560 
1561 			ce_dva = ccb->ccb_cmd_dva +
1562 			    ((u_int8_t *)nsge - (u_int8_t *)mcb);
1563 
1564 			ce->sg_addr = htole64(ce_dva);
1565 
1566 			DNPRINTF(MPI_D_DMA, "%s:  ce: 0x%08x 0x%08x 0x%08x\n",
1567 			    DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr,
1568 			    ce->sg_lo_addr);
1569 
1570 			ce = nce;
1571 		}
1572 
1573 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1574 		    i, dmap->dm_segs[i].ds_len,
1575 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1576 
1577 		sge = nsge;
1578 
1579 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1580 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
1581 
1582 		DNPRINTF(MPI_D_DMA, "%s:  %d: 0x%08x 0x%08x 0x%08x\n",
1583 		    DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr,
1584 		    sge->sg_lo_addr);
1585 
1586 		nsge = sge + 1;
1587 	}
1588 
1589 	/* terminate list */
1590 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1591 	    MPI_SGE_FL_EOL);
1592 
1593 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1594 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1595 	    BUS_DMASYNC_PREWRITE);
1596 
1597 	return (0);
1598 }
1599 
1600 void
1601 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1602 {
1603 	/* XXX */
1604 	if (bp->b_bcount > MAXPHYS)
1605 		bp->b_bcount = MAXPHYS;
1606 	minphys(bp);
1607 }
1608 
1609 int
1610 mpi_scsi_probe_virtual(struct scsi_link *link)
1611 {
1612 	struct mpi_softc		*sc = link->adapter_softc;
1613 	struct mpi_cfg_hdr		hdr;
1614 	struct mpi_cfg_raid_vol_pg0	*rp0;
1615 	int				len;
1616 	int				rv;
1617 
1618 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1619 		return (0);
1620 
1621 	if (link->lun > 0)
1622 		return (0);
1623 
1624 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1625 	    0, link->target, MPI_PG_POLL, &hdr);
1626 	if (rv != 0)
1627 		return (0);
1628 
1629 	len = hdr.page_length * 4;
1630 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1631 	if (rp0 == NULL)
1632 		return (ENOMEM);
1633 
1634 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1635 	if (rv == 0)
1636 		SET(link->flags, SDEV_VIRTUAL);
1637 
1638 	free(rp0, M_TEMP);
1639 	return (0);
1640 }
1641 
1642 int
1643 mpi_scsi_probe(struct scsi_link *link)
1644 {
1645 	struct mpi_softc		*sc = link->adapter_softc;
1646 	struct mpi_ecfg_hdr		ehdr;
1647 	struct mpi_cfg_sas_dev_pg0	pg0;
1648 	u_int32_t			address;
1649 	int				rv;
1650 
1651 	rv = mpi_scsi_probe_virtual(link);
1652 	if (rv != 0)
1653 		return (rv);
1654 
1655 	if (ISSET(link->flags, SDEV_VIRTUAL))
1656 		return (0);
1657 
1658 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1659 		return (0);
1660 
1661 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1662 
1663 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1664 	    address, &ehdr) != 0)
1665 		return (EIO);
1666 
1667 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1668 		return (0);
1669 
1670 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1671 	    DEVNAME(sc), link->target);
1672 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1673 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1674 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1675 	    letoh64(pg0.sas_addr));
1676 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1677 	    "access_status: 0x%02x\n", DEVNAME(sc),
1678 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1679 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1680 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1681 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1682 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1683 	    letoh32(pg0.device_info));
1684 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1685 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1686 
1687 	if (ISSET(letoh32(pg0.device_info),
1688 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1689 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1690 		    DEVNAME(sc), link->target);
1691 		link->flags |= SDEV_ATAPI;
1692 		link->quirks |= SDEV_ONLYBIG;
1693 	}
1694 
1695 	return (0);
1696 }
1697 
1698 u_int32_t
1699 mpi_read(struct mpi_softc *sc, bus_size_t r)
1700 {
1701 	u_int32_t			rv;
1702 
1703 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1704 	    BUS_SPACE_BARRIER_READ);
1705 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1706 
1707 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1708 
1709 	return (rv);
1710 }
1711 
1712 void
1713 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1714 {
1715 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1716 
1717 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1718 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1719 	    BUS_SPACE_BARRIER_WRITE);
1720 }
1721 
1722 int
1723 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1724     u_int32_t target)
1725 {
1726 	int				i;
1727 
1728 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1729 	    mask, target);
1730 
1731 	for (i = 0; i < 10000; i++) {
1732 		if ((mpi_read(sc, r) & mask) == target)
1733 			return (0);
1734 		delay(1000);
1735 	}
1736 
1737 	return (1);
1738 }
1739 
1740 int
1741 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1742     u_int32_t target)
1743 {
1744 	int				i;
1745 
1746 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1747 	    mask, target);
1748 
1749 	for (i = 0; i < 10000; i++) {
1750 		if ((mpi_read(sc, r) & mask) != target)
1751 			return (0);
1752 		delay(1000);
1753 	}
1754 
1755 	return (1);
1756 }
1757 
1758 int
1759 mpi_init(struct mpi_softc *sc)
1760 {
1761 	u_int32_t			db;
1762 	int				i;
1763 
1764 	/* spin until the IOC leaves the RESET state */
1765 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1766 	    MPI_DOORBELL_STATE_RESET) != 0) {
1767 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1768 		    "reset state\n", DEVNAME(sc));
1769 		return (1);
1770 	}
1771 
1772 	/* check current ownership */
1773 	db = mpi_read_db(sc);
1774 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1775 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1776 		    DEVNAME(sc));
1777 		return (0);
1778 	}
1779 
1780 	for (i = 0; i < 5; i++) {
1781 		switch (db & MPI_DOORBELL_STATE) {
1782 		case MPI_DOORBELL_STATE_READY:
1783 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1784 			    DEVNAME(sc));
1785 			return (0);
1786 
1787 		case MPI_DOORBELL_STATE_OPER:
1788 		case MPI_DOORBELL_STATE_FAULT:
1789 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1790 			    "reset\n" , DEVNAME(sc));
1791 			if (mpi_reset_soft(sc) != 0)
1792 				mpi_reset_hard(sc);
1793 			break;
1794 
1795 		case MPI_DOORBELL_STATE_RESET:
1796 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1797 			    "out of reset\n", DEVNAME(sc));
1798 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1799 			    MPI_DOORBELL_STATE_RESET) != 0)
1800 				return (1);
1801 			break;
1802 		}
1803 		db = mpi_read_db(sc);
1804 	}
1805 
1806 	return (1);
1807 }
1808 
1809 int
1810 mpi_reset_soft(struct mpi_softc *sc)
1811 {
1812 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1813 
1814 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1815 		return (1);
1816 
1817 	mpi_write_db(sc,
1818 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1819 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1820 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1821 		return (1);
1822 
1823 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1824 	    MPI_DOORBELL_STATE_READY) != 0)
1825 		return (1);
1826 
1827 	return (0);
1828 }
1829 
1830 int
1831 mpi_reset_hard(struct mpi_softc *sc)
1832 {
1833 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1834 
1835 	/* enable diagnostic register */
1836 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1837 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1838 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1839 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1840 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1841 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1842 
1843 	/* reset ioc */
1844 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1845 
1846 	delay(10000);
1847 
1848 	/* disable diagnostic register */
1849 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1850 
1851 	/* restore pci bits? */
1852 
1853 	/* firmware bits? */
1854 	return (0);
1855 }
1856 
1857 int
1858 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1859 {
1860 	u_int32_t				*query = buf;
1861 	int					i;
1862 
1863 	/* make sure the doorbell is not in use. */
1864 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1865 		return (1);
1866 
1867 	/* clear pending doorbell interrupts */
1868 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1869 		mpi_write_intr(sc, 0);
1870 
1871 	/*
1872 	 * first write the doorbell with the handshake function and the
1873 	 * dword count.
1874 	 */
1875 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1876 	    MPI_DOORBELL_DWORDS(dwords));
1877 
1878 	/*
1879 	 * the doorbell used bit will be set because a doorbell function has
1880 	 * started. Wait for the interrupt and then ack it.
1881 	 */
1882 	if (mpi_wait_db_int(sc) != 0)
1883 		return (1);
1884 	mpi_write_intr(sc, 0);
1885 
1886 	/* poll for the acknowledgement. */
1887 	if (mpi_wait_db_ack(sc) != 0)
1888 		return (1);
1889 
1890 	/* write the query through the doorbell. */
1891 	for (i = 0; i < dwords; i++) {
1892 		mpi_write_db(sc, htole32(query[i]));
1893 		if (mpi_wait_db_ack(sc) != 0)
1894 			return (1);
1895 	}
1896 
1897 	return (0);
1898 }
1899 
1900 int
1901 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1902 {
1903 	u_int16_t				*words = (u_int16_t *)dword;
1904 	int					i;
1905 
1906 	for (i = 0; i < 2; i++) {
1907 		if (mpi_wait_db_int(sc) != 0)
1908 			return (1);
1909 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1910 		mpi_write_intr(sc, 0);
1911 	}
1912 
1913 	return (0);
1914 }
1915 
1916 int
1917 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1918 {
1919 	struct mpi_msg_reply			*reply = buf;
1920 	u_int32_t				*dbuf = buf, dummy;
1921 	int					i;
1922 
1923 	/* get the first dword so we can read the length out of the header. */
1924 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1925 		return (1);
1926 
1927 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1928 	    DEVNAME(sc), dwords, reply->msg_length);
1929 
1930 	/*
1931 	 * the total length, in dwords, is in the message length field of the
1932 	 * reply header.
1933 	 */
1934 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1935 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1936 			return (1);
1937 	}
1938 
1939 	/* if there's extra stuff to come off the ioc, discard it */
1940 	while (i++ < reply->msg_length) {
1941 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1942 			return (1);
1943 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1944 		    "0x%08x\n", DEVNAME(sc), dummy);
1945 	}
1946 
1947 	/* wait for the doorbell used bit to be reset and clear the intr */
1948 	if (mpi_wait_db_int(sc) != 0)
1949 		return (1);
1950 	mpi_write_intr(sc, 0);
1951 
1952 	return (0);
1953 }
1954 
1955 void
1956 mpi_empty_done(struct mpi_ccb *ccb)
1957 {
1958 	/* nothing to do */
1959 }
1960 
1961 int
1962 mpi_iocfacts(struct mpi_softc *sc)
1963 {
1964 	struct mpi_msg_iocfacts_request		ifq;
1965 	struct mpi_msg_iocfacts_reply		ifp;
1966 
1967 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1968 
1969 	bzero(&ifq, sizeof(ifq));
1970 	bzero(&ifp, sizeof(ifp));
1971 
1972 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1973 	ifq.chain_offset = 0;
1974 	ifq.msg_flags = 0;
1975 	ifq.msg_context = htole32(0xdeadbeef);
1976 
1977 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1978 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1979 		    DEVNAME(sc));
1980 		return (1);
1981 	}
1982 
1983 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1984 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1985 		    DEVNAME(sc));
1986 		return (1);
1987 	}
1988 
1989 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1990 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1991 	    ifp.msg_version_maj, ifp.msg_version_min);
1992 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
1993 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1994 	    ifp.ioc_number, ifp.header_version_maj,
1995 	    ifp.header_version_min);
1996 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
1997 	    letoh32(ifp.msg_context));
1998 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
1999 	    DEVNAME(sc), letoh16(ifp.ioc_status),
2000 	    letoh16(ifp.ioc_exceptions));
2001 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2002 	    letoh32(ifp.ioc_loginfo));
2003 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2004 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2005 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2006 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2007 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2008 	    letoh16(ifp.reply_queue_depth));
2009 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2010 	    letoh16(ifp.product_id));
2011 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2012 	    letoh32(ifp.current_host_mfa_hi_addr));
2013 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2014 	    "global_credits: %d\n",
2015 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2016 	    letoh16(ifp.global_credits));
2017 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2018 	    letoh32(ifp.current_sense_buffer_hi_addr));
2019 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2020 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2021 	    letoh16(ifp.current_reply_frame_size));
2022 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2023 	    letoh32(ifp.fw_image_size));
2024 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2025 	    letoh32(ifp.ioc_capabilities));
2026 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2027 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2028 	    ifp.fw_version_maj, ifp.fw_version_min,
2029 	    ifp.fw_version_unit, ifp.fw_version_dev);
2030 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2031 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2032 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2033 	    "addr 0x%08x %08x\n", DEVNAME(sc),
2034 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2035 	    letoh32(ifp.host_page_buffer_sge.sg_hi_addr),
2036 	    letoh32(ifp.host_page_buffer_sge.sg_lo_addr));
2037 
2038 	sc->sc_maxcmds = letoh16(ifp.global_credits);
2039 	sc->sc_maxchdepth = ifp.max_chain_depth;
2040 	sc->sc_ioc_number = ifp.ioc_number;
2041 	if (sc->sc_flags & MPI_F_SPI)
2042 		sc->sc_buswidth = 16;
2043 	else
2044 		sc->sc_buswidth =
2045 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2046 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2047 		sc->sc_fw_len = letoh32(ifp.fw_image_size);
2048 
2049 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, letoh16(ifp.reply_queue_depth));
2050 
2051 	/*
2052 	 * you can fit sg elements on the end of the io cmd if they fit in the
2053 	 * request frame size.
2054 	 */
2055 	sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) -
2056 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2057 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2058 	    sc->sc_first_sgl_len);
2059 
2060 	sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) /
2061 	    sizeof(struct mpi_sge);
2062 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2063 	    sc->sc_chain_len);
2064 
2065 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2066 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2067 	/* the sgl chains lose an entry for each chain element */
2068 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2069 	    sc->sc_chain_len;
2070 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2071 	    sc->sc_max_sgl_len);
2072 
2073 	/* XXX we're ignoring the max chain depth */
2074 
2075 	return (0);
2076 }
2077 
2078 int
2079 mpi_iocinit(struct mpi_softc *sc)
2080 {
2081 	struct mpi_msg_iocinit_request		iiq;
2082 	struct mpi_msg_iocinit_reply		iip;
2083 	u_int32_t				hi_addr;
2084 
2085 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2086 
2087 	bzero(&iiq, sizeof(iiq));
2088 	bzero(&iip, sizeof(iip));
2089 
2090 	iiq.function = MPI_FUNCTION_IOC_INIT;
2091 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2092 
2093 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2094 	iiq.max_buses = 1;
2095 
2096 	iiq.msg_context = htole32(0xd00fd00f);
2097 
2098 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2099 
2100 	hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32);
2101 	iiq.host_mfa_hi_addr = htole32(hi_addr);
2102 	iiq.sense_buffer_hi_addr = htole32(hi_addr);
2103 
2104 	iiq.msg_version_maj = 0x01;
2105 	iiq.msg_version_min = 0x02;
2106 
2107 	iiq.hdr_version_unit = 0x0d;
2108 	iiq.hdr_version_dev = 0x00;
2109 
2110 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2111 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2112 		    DEVNAME(sc));
2113 		return (1);
2114 	}
2115 
2116 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2117 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2118 		    DEVNAME(sc));
2119 		return (1);
2120 	}
2121 
2122 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2123 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2124 	    iip.msg_length, iip.whoinit);
2125 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2126 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2127 	    iip.max_buses, iip.max_devices, iip.flags);
2128 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2129 	    letoh32(iip.msg_context));
2130 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2131 	    letoh16(iip.ioc_status));
2132 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2133 	    letoh32(iip.ioc_loginfo));
2134 
2135 	return (0);
2136 }
2137 
2138 int
2139 mpi_portfacts(struct mpi_softc *sc)
2140 {
2141 	struct mpi_ccb				*ccb;
2142 	struct mpi_msg_portfacts_request	*pfq;
2143 	volatile struct mpi_msg_portfacts_reply	*pfp;
2144 	int					rv = 1;
2145 
2146 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2147 
2148 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2149 	if (ccb == NULL) {
2150 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2151 		    DEVNAME(sc));
2152 		return (rv);
2153 	}
2154 
2155 	ccb->ccb_done = mpi_empty_done;
2156 	pfq = ccb->ccb_cmd;
2157 
2158 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2159 	pfq->chain_offset = 0;
2160 	pfq->msg_flags = 0;
2161 	pfq->port_number = 0;
2162 	pfq->msg_context = htole32(ccb->ccb_id);
2163 
2164 	if (mpi_poll(sc, ccb, 50000) != 0) {
2165 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2166 		goto err;
2167 	}
2168 
2169 	if (ccb->ccb_rcb == NULL) {
2170 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2171 		    DEVNAME(sc));
2172 		goto err;
2173 	}
2174 	pfp = ccb->ccb_rcb->rcb_reply;
2175 
2176 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2177 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2178 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2179 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2180 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2181 	    letoh32(pfp->msg_context));
2182 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2183 	    letoh16(pfp->ioc_status));
2184 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2185 	    letoh32(pfp->ioc_loginfo));
2186 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2187 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2188 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2189 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2190 	    letoh16(pfp->port_scsi_id));
2191 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2192 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2193 	    letoh16(pfp->max_persistent_ids),
2194 	    letoh16(pfp->max_posted_cmd_buffers));
2195 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2196 	    letoh16(pfp->max_lan_buckets));
2197 
2198 	sc->sc_porttype = pfp->port_type;
2199 	if (sc->sc_target == -1)
2200 		sc->sc_target = letoh16(pfp->port_scsi_id);
2201 
2202 	mpi_push_reply(sc, ccb->ccb_rcb);
2203 	rv = 0;
2204 err:
2205 	scsi_io_put(&sc->sc_iopool, ccb);
2206 
2207 	return (rv);
2208 }
2209 
2210 int
2211 mpi_cfg_coalescing(struct mpi_softc *sc)
2212 {
2213 	struct mpi_cfg_hdr		hdr;
2214 	struct mpi_cfg_ioc_pg1		pg;
2215 	u_int32_t			flags;
2216 
2217 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2218 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2219 		    DEVNAME(sc));
2220 		return (1);
2221 	}
2222 
2223 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2224 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2225 		    DEVNAME(sc));
2226 		return (1);
2227 	}
2228 
2229 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2230 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2231 	    letoh32(pg.flags));
2232 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2233 	    letoh32(pg.coalescing_timeout));
2234 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2235 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2236 
2237 	flags = letoh32(pg.flags);
2238 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2239 		return (0);
2240 
2241 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2242 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2243 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2244 		    DEVNAME(sc));
2245 		return (1);
2246 	}
2247 
2248 	return (0);
2249 }
2250 
2251 int
2252 mpi_eventnotify(struct mpi_softc *sc)
2253 {
2254 	struct mpi_ccb				*ccb;
2255 	struct mpi_msg_event_request		*enq;
2256 
2257 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2258 	if (ccb == NULL) {
2259 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2260 		    DEVNAME(sc));
2261 		return (1);
2262 	}
2263 
2264 	sc->sc_evt_ccb = ccb;
2265 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2266 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2267 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2268 	    mpi_eventack, sc);
2269 
2270 	ccb->ccb_done = mpi_eventnotify_done;
2271 	enq = ccb->ccb_cmd;
2272 
2273 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2274 	enq->chain_offset = 0;
2275 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2276 	enq->msg_context = htole32(ccb->ccb_id);
2277 
2278 	mpi_start(sc, ccb);
2279 	return (0);
2280 }
2281 
2282 void
2283 mpi_eventnotify_done(struct mpi_ccb *ccb)
2284 {
2285 	struct mpi_softc			*sc = ccb->ccb_sc;
2286 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2287 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2288 
2289 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2290 
2291 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2292 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2293 	    letoh16(enp->data_length));
2294 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2295 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2296 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2297 	    letoh32(enp->msg_context));
2298 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2299 	    letoh16(enp->ioc_status));
2300 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2301 	    letoh32(enp->ioc_loginfo));
2302 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2303 	    letoh32(enp->event));
2304 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2305 	    letoh32(enp->event_context));
2306 
2307 	switch (letoh32(enp->event)) {
2308 	/* ignore these */
2309 	case MPI_EVENT_EVENT_CHANGE:
2310 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2311 		break;
2312 
2313 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2314 		if (sc->sc_scsibus == NULL)
2315 			break;
2316 
2317 		if (mpi_evt_sas(sc, rcb) != 0) {
2318 			/* reply is freed later on */
2319 			return;
2320 		}
2321 		break;
2322 
2323 	case MPI_EVENT_RESCAN:
2324 		if (sc->sc_scsibus != NULL &&
2325 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2326 			mpi_evt_fc_rescan(sc);
2327 		break;
2328 
2329 	default:
2330 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2331 		    DEVNAME(sc), letoh32(enp->event));
2332 		break;
2333 	}
2334 
2335 	mpi_eventnotify_free(sc, rcb);
2336 }
2337 
2338 void
2339 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2340 {
2341 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2342 
2343 	if (enp->ack_required) {
2344 		mtx_enter(&sc->sc_evt_ack_mtx);
2345 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2346 		mtx_leave(&sc->sc_evt_ack_mtx);
2347 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2348 	} else
2349 		mpi_push_reply(sc, rcb);
2350 }
2351 
2352 int
2353 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2354 {
2355 	struct mpi_evt_sas_change		*ch;
2356 	u_int8_t				*data;
2357 
2358 	data = rcb->rcb_reply;
2359 	data += sizeof(struct mpi_msg_event_reply);
2360 	ch = (struct mpi_evt_sas_change *)data;
2361 
2362 	if (ch->bus != 0)
2363 		return (0);
2364 
2365 	switch (ch->reason) {
2366 	case MPI_EVT_SASCH_REASON_ADDED:
2367 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2368 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2369 			printf("%s: unable to request attach of %d\n",
2370 			    DEVNAME(sc), ch->target);
2371 		}
2372 		break;
2373 
2374 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2375 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2376 
2377 		mtx_enter(&sc->sc_evt_scan_mtx);
2378 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2379 		mtx_leave(&sc->sc_evt_scan_mtx);
2380 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2381 
2382 		/* we'll handle event ack later on */
2383 		return (1);
2384 
2385 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2386 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2387 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2388 		break;
2389 	default:
2390 		printf("%s: unknown reason for SAS device status change: "
2391 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2392 		break;
2393 	}
2394 
2395 	return (0);
2396 }
2397 
2398 void
2399 mpi_evt_sas_detach(void *cookie, void *io)
2400 {
2401 	struct mpi_softc			*sc = cookie;
2402 	struct mpi_ccb				*ccb = io;
2403 	struct mpi_rcb				*rcb, *next;
2404 	struct mpi_msg_event_reply		*enp;
2405 	struct mpi_evt_sas_change		*ch;
2406 	struct mpi_msg_scsi_task_request	*str;
2407 
2408 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2409 
2410 	mtx_enter(&sc->sc_evt_scan_mtx);
2411 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2412 	if (rcb != NULL) {
2413 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2414 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2415 	}
2416 	mtx_leave(&sc->sc_evt_scan_mtx);
2417 
2418 	if (rcb == NULL) {
2419 		scsi_io_put(&sc->sc_iopool, ccb);
2420 		return;
2421 	}
2422 
2423 	enp = rcb->rcb_reply;
2424 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2425 
2426 	ccb->ccb_done = mpi_evt_sas_detach_done;
2427 	str = ccb->ccb_cmd;
2428 
2429 	str->target_id = ch->target;
2430 	str->bus = 0;
2431 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2432 
2433 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2434 
2435 	str->msg_context = htole32(ccb->ccb_id);
2436 
2437 	mpi_eventnotify_free(sc, rcb);
2438 
2439 	mpi_start(sc, ccb);
2440 
2441 	if (next != NULL)
2442 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2443 }
2444 
2445 void
2446 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2447 {
2448 	struct mpi_softc			*sc = ccb->ccb_sc;
2449 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2450 
2451 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2452 	    DETACH_FORCE) != 0) {
2453 		printf("%s: unable to request detach of %d\n",
2454 		    DEVNAME(sc), r->target_id);
2455 	}
2456 
2457 	mpi_push_reply(sc, ccb->ccb_rcb);
2458 	scsi_io_put(&sc->sc_iopool, ccb);
2459 }
2460 
2461 void
2462 mpi_evt_fc_rescan(struct mpi_softc *sc)
2463 {
2464 	int					queue = 1;
2465 
2466 	mtx_enter(&sc->sc_evt_rescan_mtx);
2467 	if (sc->sc_evt_rescan_sem)
2468 		queue = 0;
2469 	else
2470 		sc->sc_evt_rescan_sem = 1;
2471 	mtx_leave(&sc->sc_evt_rescan_mtx);
2472 
2473 	if (queue) {
2474 		workq_queue_task(NULL, &sc->sc_evt_rescan, 0,
2475 		    mpi_fc_rescan, sc, NULL);
2476 	}
2477 }
2478 
2479 void
2480 mpi_fc_rescan(void *xsc, void *xarg)
2481 {
2482 	struct mpi_softc			*sc = xsc;
2483 	struct mpi_cfg_hdr			hdr;
2484 	struct mpi_cfg_fc_device_pg0		pg;
2485 	struct scsi_link			*link;
2486 	u_int8_t				devmap[256 / NBBY];
2487 	u_int32_t				id = 0xffffff;
2488 	int					i;
2489 
2490 	mtx_enter(&sc->sc_evt_rescan_mtx);
2491 	sc->sc_evt_rescan_sem = 0;
2492 	mtx_leave(&sc->sc_evt_rescan_mtx);
2493 
2494 	bzero(devmap, sizeof(devmap));
2495 
2496 	do {
2497 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2498 		    id, 0, &hdr) != 0) {
2499 			printf("%s: header get for rescan of 0x%08x failed\n",
2500 			    DEVNAME(sc), id);
2501 			return;
2502 		}
2503 
2504 		bzero(&pg, sizeof(pg));
2505 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2506 			break;
2507 
2508 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2509 		    pg.current_bus == 0)
2510 			setbit(devmap, pg.current_target_id);
2511 
2512 		id = htole32(pg.port_id);
2513 	} while (id <= 0xff0000);
2514 
2515 	for (i = 0; i < sc->sc_buswidth; i++) {
2516 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2517 
2518 		if (isset(devmap, i)) {
2519 			if (link == NULL)
2520 				scsi_probe_target(sc->sc_scsibus, i);
2521 		} else {
2522 			if (link != NULL) {
2523 				scsi_activate(sc->sc_scsibus, i, -1,
2524 				    DVACT_DEACTIVATE);
2525 				scsi_detach_target(sc->sc_scsibus, i,
2526 				    DETACH_FORCE);
2527 			}
2528 		}
2529 	}
2530 }
2531 
2532 void
2533 mpi_eventack(void *cookie, void *io)
2534 {
2535 	struct mpi_softc			*sc = cookie;
2536 	struct mpi_ccb				*ccb = io;
2537 	struct mpi_rcb				*rcb, *next;
2538 	struct mpi_msg_event_reply		*enp;
2539 	struct mpi_msg_eventack_request		*eaq;
2540 
2541 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2542 
2543 	mtx_enter(&sc->sc_evt_ack_mtx);
2544 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2545 	if (rcb != NULL) {
2546 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2547 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2548 	}
2549 	mtx_leave(&sc->sc_evt_ack_mtx);
2550 
2551 	if (rcb == NULL) {
2552 		scsi_io_put(&sc->sc_iopool, ccb);
2553 		return;
2554 	}
2555 
2556 	enp = rcb->rcb_reply;
2557 
2558 	ccb->ccb_done = mpi_eventack_done;
2559 	eaq = ccb->ccb_cmd;
2560 
2561 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2562 	eaq->msg_context = htole32(ccb->ccb_id);
2563 
2564 	eaq->event = enp->event;
2565 	eaq->event_context = enp->event_context;
2566 
2567 	mpi_push_reply(sc, rcb);
2568 	mpi_start(sc, ccb);
2569 
2570 	if (next != NULL)
2571 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2572 }
2573 
2574 void
2575 mpi_eventack_done(struct mpi_ccb *ccb)
2576 {
2577 	struct mpi_softc			*sc = ccb->ccb_sc;
2578 
2579 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2580 
2581 	mpi_push_reply(sc, ccb->ccb_rcb);
2582 	scsi_io_put(&sc->sc_iopool, ccb);
2583 }
2584 
2585 int
2586 mpi_portenable(struct mpi_softc *sc)
2587 {
2588 	struct mpi_ccb				*ccb;
2589 	struct mpi_msg_portenable_request	*peq;
2590 	int					rv = 0;
2591 
2592 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2593 
2594 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2595 	if (ccb == NULL) {
2596 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2597 		    DEVNAME(sc));
2598 		return (1);
2599 	}
2600 
2601 	ccb->ccb_done = mpi_empty_done;
2602 	peq = ccb->ccb_cmd;
2603 
2604 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2605 	peq->port_number = 0;
2606 	peq->msg_context = htole32(ccb->ccb_id);
2607 
2608 	if (mpi_poll(sc, ccb, 50000) != 0) {
2609 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2610 		return (1);
2611 	}
2612 
2613 	if (ccb->ccb_rcb == NULL) {
2614 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2615 		    DEVNAME(sc));
2616 		rv = 1;
2617 	} else
2618 		mpi_push_reply(sc, ccb->ccb_rcb);
2619 
2620 	scsi_io_put(&sc->sc_iopool, ccb);
2621 
2622 	return (rv);
2623 }
2624 
2625 int
2626 mpi_fwupload(struct mpi_softc *sc)
2627 {
2628 	struct mpi_ccb				*ccb;
2629 	struct {
2630 		struct mpi_msg_fwupload_request		req;
2631 		struct mpi_sge				sge;
2632 	} __packed				*bundle;
2633 	struct mpi_msg_fwupload_reply		*upp;
2634 	int					rv = 0;
2635 
2636 	if (sc->sc_fw_len == 0)
2637 		return (0);
2638 
2639 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2640 
2641 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2642 	if (sc->sc_fw == NULL) {
2643 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2644 		    DEVNAME(sc), sc->sc_fw_len);
2645 		return (1);
2646 	}
2647 
2648 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2649 	if (ccb == NULL) {
2650 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2651 		    DEVNAME(sc));
2652 		goto err;
2653 	}
2654 
2655 	ccb->ccb_done = mpi_empty_done;
2656 	bundle = ccb->ccb_cmd;
2657 
2658 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2659 	bundle->req.msg_context = htole32(ccb->ccb_id);
2660 
2661 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2662 
2663 	bundle->req.tce.details_length = 12;
2664 	bundle->req.tce.image_size = htole32(sc->sc_fw_len);
2665 
2666 	bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2667 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2668 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2669 	bundle->sge.sg_addr = htole64(MPI_DMA_DVA(sc->sc_fw));
2670 
2671 	if (mpi_poll(sc, ccb, 50000) != 0) {
2672 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2673 		goto err;
2674 	}
2675 
2676 	if (ccb->ccb_rcb == NULL)
2677 		panic("%s: unable to do fw upload", DEVNAME(sc));
2678 	upp = ccb->ccb_rcb->rcb_reply;
2679 
2680 	if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2681 		rv = 1;
2682 
2683 	mpi_push_reply(sc, ccb->ccb_rcb);
2684 	scsi_io_put(&sc->sc_iopool, ccb);
2685 
2686 	return (rv);
2687 
2688 err:
2689 	mpi_dmamem_free(sc, sc->sc_fw);
2690 	return (1);
2691 }
2692 
2693 void
2694 mpi_get_raid(struct mpi_softc *sc)
2695 {
2696 	struct mpi_cfg_hdr		hdr;
2697 	struct mpi_cfg_ioc_pg2		*vol_page;
2698 	size_t				pagelen;
2699 	u_int32_t			capabilities;
2700 
2701 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2702 
2703 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2704 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2705 		    "for IOC page 2\n", DEVNAME(sc));
2706 		return;
2707 	}
2708 
2709 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2710 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2711 	if (vol_page == NULL) {
2712 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2713 		    "space for ioc config page 2\n", DEVNAME(sc));
2714 		return;
2715 	}
2716 
2717 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2718 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2719 		    "page 2\n", DEVNAME(sc));
2720 		goto out;
2721 	}
2722 
2723 	capabilities = letoh32(vol_page->capabilities);
2724 
2725 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2726 	    letoh32(vol_page->capabilities));
2727 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2728 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2729 	    vol_page->active_vols, vol_page->max_vols,
2730 	    vol_page->active_physdisks, vol_page->max_physdisks);
2731 
2732 	/* don't walk list if there are no RAID capability */
2733 	if (capabilities == 0xdeadbeef) {
2734 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2735 		goto out;
2736 	}
2737 
2738 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2739 		sc->sc_flags |= MPI_F_RAID;
2740 
2741 out:
2742 	free(vol_page, M_TEMP);
2743 }
2744 
2745 int
2746 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2747     u_int32_t address, int flags, void *p)
2748 {
2749 	struct mpi_ccb				*ccb;
2750 	struct mpi_msg_config_request		*cq;
2751 	struct mpi_msg_config_reply		*cp;
2752 	struct mpi_cfg_hdr			*hdr = p;
2753 	struct mpi_ecfg_hdr			*ehdr = p;
2754 	int					etype = 0;
2755 	int					rv = 0;
2756 
2757 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2758 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2759 	    address, flags, MPI_PG_FMT);
2760 
2761 	ccb = scsi_io_get(&sc->sc_iopool,
2762 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2763 	if (ccb == NULL) {
2764 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2765 		    DEVNAME(sc));
2766 		return (1);
2767 	}
2768 
2769 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2770 		etype = type;
2771 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2772 	}
2773 
2774 	cq = ccb->ccb_cmd;
2775 
2776 	cq->function = MPI_FUNCTION_CONFIG;
2777 	cq->msg_context = htole32(ccb->ccb_id);
2778 
2779 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2780 
2781 	cq->config_header.page_number = number;
2782 	cq->config_header.page_type = type;
2783 	cq->ext_page_type = etype;
2784 	cq->page_address = htole32(address);
2785 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2786 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2787 
2788 	ccb->ccb_done = mpi_empty_done;
2789 	if (ISSET(flags, MPI_PG_POLL)) {
2790 		if (mpi_poll(sc, ccb, 50000) != 0) {
2791 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2792 			    DEVNAME(sc));
2793 			return (1);
2794 		}
2795 	} else
2796 		mpi_wait(sc, ccb);
2797 
2798 	if (ccb->ccb_rcb == NULL)
2799 		panic("%s: unable to fetch config header", DEVNAME(sc));
2800 	cp = ccb->ccb_rcb->rcb_reply;
2801 
2802 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2803 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2804 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2805 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2806 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2807 	    cp->msg_flags);
2808 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2809 	    letoh32(cp->msg_context));
2810 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2811 	    letoh16(cp->ioc_status));
2812 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2813 	    letoh32(cp->ioc_loginfo));
2814 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2815 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2816 	    cp->config_header.page_version,
2817 	    cp->config_header.page_length,
2818 	    cp->config_header.page_number,
2819 	    cp->config_header.page_type);
2820 
2821 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2822 		rv = 1;
2823 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2824 		bzero(ehdr, sizeof(*ehdr));
2825 		ehdr->page_version = cp->config_header.page_version;
2826 		ehdr->page_number = cp->config_header.page_number;
2827 		ehdr->page_type = cp->config_header.page_type;
2828 		ehdr->ext_page_length = cp->ext_page_length;
2829 		ehdr->ext_page_type = cp->ext_page_type;
2830 	} else
2831 		*hdr = cp->config_header;
2832 
2833 	mpi_push_reply(sc, ccb->ccb_rcb);
2834 	scsi_io_put(&sc->sc_iopool, ccb);
2835 
2836 	return (rv);
2837 }
2838 
2839 int
2840 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2841     void *p, int read, void *page, size_t len)
2842 {
2843 	struct mpi_ccb				*ccb;
2844 	struct mpi_msg_config_request		*cq;
2845 	struct mpi_msg_config_reply		*cp;
2846 	struct mpi_cfg_hdr			*hdr = p;
2847 	struct mpi_ecfg_hdr			*ehdr = p;
2848 	char					*kva;
2849 	int					page_length;
2850 	int					rv = 0;
2851 
2852 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2853 	    DEVNAME(sc), address, read, hdr->page_type);
2854 
2855 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2856 	    letoh16(ehdr->ext_page_length) : hdr->page_length;
2857 
2858 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2859 	    len < page_length * 4)
2860 		return (1);
2861 
2862 	ccb = scsi_io_get(&sc->sc_iopool,
2863 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2864 	if (ccb == NULL) {
2865 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2866 		return (1);
2867 	}
2868 
2869 	cq = ccb->ccb_cmd;
2870 
2871 	cq->function = MPI_FUNCTION_CONFIG;
2872 	cq->msg_context = htole32(ccb->ccb_id);
2873 
2874 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2875 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2876 
2877 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2878 		cq->config_header.page_version = ehdr->page_version;
2879 		cq->config_header.page_number = ehdr->page_number;
2880 		cq->config_header.page_type = ehdr->page_type;
2881 		cq->ext_page_len = ehdr->ext_page_length;
2882 		cq->ext_page_type = ehdr->ext_page_type;
2883 	} else
2884 		cq->config_header = *hdr;
2885 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2886 	cq->page_address = htole32(address);
2887 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2888 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2889 	    (page_length * 4) |
2890 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2891 
2892 	/* bounce the page via the request space to avoid more bus_dma games */
2893 	cq->page_buffer.sg_addr = htole64(ccb->ccb_cmd_dva +
2894 	    sizeof(struct mpi_msg_config_request));
2895 
2896 	kva = ccb->ccb_cmd;
2897 	kva += sizeof(struct mpi_msg_config_request);
2898 	if (!read)
2899 		bcopy(page, kva, len);
2900 
2901 	ccb->ccb_done = mpi_empty_done;
2902 	if (ISSET(flags, MPI_PG_POLL)) {
2903 		if (mpi_poll(sc, ccb, 50000) != 0) {
2904 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2905 			    DEVNAME(sc));
2906 			return (1);
2907 		}
2908 	} else
2909 		mpi_wait(sc, ccb);
2910 
2911 	if (ccb->ccb_rcb == NULL) {
2912 		scsi_io_put(&sc->sc_iopool, ccb);
2913 		return (1);
2914 	}
2915 	cp = ccb->ccb_rcb->rcb_reply;
2916 
2917 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2918 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2919 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2920 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2921 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2922 	    cp->msg_flags);
2923 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2924 	    letoh32(cp->msg_context));
2925 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2926 	    letoh16(cp->ioc_status));
2927 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2928 	    letoh32(cp->ioc_loginfo));
2929 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2930 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2931 	    cp->config_header.page_version,
2932 	    cp->config_header.page_length,
2933 	    cp->config_header.page_number,
2934 	    cp->config_header.page_type);
2935 
2936 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2937 		rv = 1;
2938 	else if (read)
2939 		bcopy(kva, page, len);
2940 
2941 	mpi_push_reply(sc, ccb->ccb_rcb);
2942 	scsi_io_put(&sc->sc_iopool, ccb);
2943 
2944 	return (rv);
2945 }
2946 
2947 int
2948 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2949 {
2950 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2951 
2952 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2953 
2954 	switch (cmd) {
2955 	case DIOCGCACHE:
2956 	case DIOCSCACHE:
2957 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2958 			return (mpi_ioctl_cache(link, cmd,
2959 			    (struct dk_cache *)addr));
2960 		}
2961 		break;
2962 
2963 	default:
2964 		if (sc->sc_ioctl)
2965 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2966 
2967 		break;
2968 	}
2969 
2970 	return (ENOTTY);
2971 }
2972 
2973 int
2974 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2975 {
2976 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2977 	struct mpi_ccb		*ccb;
2978 	int			len, rv;
2979 	struct mpi_cfg_hdr	hdr;
2980 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2981 	int			enabled;
2982 	struct mpi_msg_raid_action_request *req;
2983 	struct mpi_msg_raid_action_reply *rep;
2984 	struct mpi_raid_settings settings;
2985 
2986 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
2987 	    link->target, MPI_PG_POLL, &hdr);
2988 	if (rv != 0)
2989 		return (EIO);
2990 
2991 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
2992 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
2993 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
2994 	if (rpg0 == NULL)
2995 		return (ENOMEM);
2996 
2997 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
2998 	    rpg0, len) != 0) {
2999 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3000 		    DEVNAME(sc));
3001 		rv = EIO;
3002 		goto done;
3003 	}
3004 
3005 	enabled = ISSET(letoh16(rpg0->settings.volume_settings),
3006 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3007 
3008 	if (cmd == DIOCGCACHE) {
3009 		dc->wrcache = enabled;
3010 		dc->rdcache = 0;
3011 		goto done;
3012 	} /* else DIOCSCACHE */
3013 
3014 	if (dc->rdcache) {
3015 		rv = EOPNOTSUPP;
3016 		goto done;
3017 	}
3018 
3019 	if (((dc->wrcache) ? 1 : 0) == enabled)
3020 		goto done;
3021 
3022 	settings = rpg0->settings;
3023 	if (dc->wrcache) {
3024 		SET(settings.volume_settings,
3025 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3026 	} else {
3027 		CLR(settings.volume_settings,
3028 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3029 	}
3030 
3031 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3032 	if (ccb == NULL) {
3033 		rv = ENOMEM;
3034 		goto done;
3035 	}
3036 
3037 	req = ccb->ccb_cmd;
3038 	req->function = MPI_FUNCTION_RAID_ACTION;
3039 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3040 	req->vol_id = rpg0->volume_id;
3041 	req->vol_bus = rpg0->volume_bus;
3042 	req->msg_context = htole32(ccb->ccb_id);
3043 
3044 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3045 	ccb->ccb_done = mpi_empty_done;
3046 	if (mpi_poll(sc, ccb, 50000) != 0) {
3047 		rv = EIO;
3048 		goto done;
3049 	}
3050 
3051 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3052 	if (rep == NULL)
3053 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3054 
3055 	switch (letoh16(rep->action_status)) {
3056 	case MPI_RAID_ACTION_STATUS_OK:
3057 		rv = 0;
3058 		break;
3059 	default:
3060 		rv = EIO;
3061 		break;
3062 	}
3063 
3064 	mpi_push_reply(sc, ccb->ccb_rcb);
3065 	scsi_io_put(&sc->sc_iopool, ccb);
3066 
3067 done:
3068 	free(rpg0, M_TEMP);
3069 	return (rv);
3070 }
3071 
3072 #if NBIO > 0
3073 int
3074 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3075 {
3076 	int			len, rv = EINVAL;
3077 	u_int32_t		address;
3078 	struct mpi_cfg_hdr	hdr;
3079 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3080 
3081 	/* get IOC page 2 */
3082 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3083 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3084 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3085 		    "fetch IOC page 2\n", DEVNAME(sc));
3086 		goto done;
3087 	}
3088 
3089 	/* XXX return something else than EINVAL to indicate within hs range */
3090 	if (id > sc->sc_vol_page->active_vols) {
3091 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3092 		    "id: %d\n", DEVNAME(sc), id);
3093 		goto done;
3094 	}
3095 
3096 	/* replace current buffer with new one */
3097 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3098 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3099 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3100 	if (rpg0 == NULL) {
3101 		printf("%s: can't get memory for RAID page 0, "
3102 		    "bio disabled\n", DEVNAME(sc));
3103 		goto done;
3104 	}
3105 	if (sc->sc_rpg0)
3106 		free(sc->sc_rpg0, M_DEVBUF);
3107 	sc->sc_rpg0 = rpg0;
3108 
3109 	/* get raid vol page 0 */
3110 	address = sc->sc_vol_list[id].vol_id |
3111 	    (sc->sc_vol_list[id].vol_bus << 8);
3112 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3113 	    address, 0, &hdr) != 0)
3114 		goto done;
3115 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3116 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3117 		    DEVNAME(sc));
3118 		goto done;
3119 	}
3120 
3121 	rv = 0;
3122 done:
3123 	return (rv);
3124 }
3125 
3126 int
3127 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3128 {
3129 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3130 	int error = 0;
3131 
3132 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3133 
3134 	/* make sure we have bio enabled */
3135 	if (sc->sc_ioctl != mpi_ioctl)
3136 		return (EINVAL);
3137 
3138 	rw_enter_write(&sc->sc_lock);
3139 
3140 	switch (cmd) {
3141 	case BIOCINQ:
3142 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3143 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3144 		break;
3145 
3146 	case BIOCVOL:
3147 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3148 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3149 		break;
3150 
3151 	case BIOCDISK:
3152 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3153 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3154 		break;
3155 
3156 	case BIOCALARM:
3157 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3158 		break;
3159 
3160 	case BIOCBLINK:
3161 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3162 		break;
3163 
3164 	case BIOCSETSTATE:
3165 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3166 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3167 		break;
3168 
3169 	default:
3170 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3171 		error = EINVAL;
3172 	}
3173 
3174 	rw_exit_write(&sc->sc_lock);
3175 
3176 	return (error);
3177 }
3178 
3179 int
3180 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3181 {
3182 	if (!(sc->sc_flags & MPI_F_RAID)) {
3183 		bi->bi_novol = 0;
3184 		bi->bi_nodisk = 0;
3185 	}
3186 
3187 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3188 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3189 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3190 		    "page 2\n", DEVNAME(sc));
3191 		return (EINVAL);
3192 	}
3193 
3194 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3195 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3196 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3197 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3198 
3199 	bi->bi_novol = sc->sc_vol_page->active_vols;
3200 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3201 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3202 
3203 	return (0);
3204 }
3205 
3206 int
3207 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3208 {
3209 	int			i, vol, id, rv = EINVAL;
3210 	struct device		*dev;
3211 	struct scsi_link	*link;
3212 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3213 	char			*vendp;
3214 
3215 	id = bv->bv_volid;
3216 	if (mpi_bio_get_pg0_raid(sc, id))
3217 		goto done;
3218 
3219 	if (id > sc->sc_vol_page->active_vols)
3220 		return (EINVAL); /* XXX deal with hot spares */
3221 
3222 	rpg0 = sc->sc_rpg0;
3223 	if (rpg0 == NULL)
3224 		goto done;
3225 
3226 	/* determine status */
3227 	switch (rpg0->volume_state) {
3228 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3229 		bv->bv_status = BIOC_SVONLINE;
3230 		break;
3231 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3232 		bv->bv_status = BIOC_SVDEGRADED;
3233 		break;
3234 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3235 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3236 		bv->bv_status = BIOC_SVOFFLINE;
3237 		break;
3238 	default:
3239 		bv->bv_status = BIOC_SVINVALID;
3240 	}
3241 
3242 	/* override status if scrubbing or something */
3243 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3244 		bv->bv_status = BIOC_SVREBUILD;
3245 
3246 	bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512;
3247 
3248 	switch (sc->sc_vol_list[id].vol_type) {
3249 	case MPI_CFG_RAID_TYPE_RAID_IS:
3250 		bv->bv_level = 0;
3251 		break;
3252 	case MPI_CFG_RAID_TYPE_RAID_IME:
3253 	case MPI_CFG_RAID_TYPE_RAID_IM:
3254 		bv->bv_level = 1;
3255 		break;
3256 	case MPI_CFG_RAID_TYPE_RAID_5:
3257 		bv->bv_level = 5;
3258 		break;
3259 	case MPI_CFG_RAID_TYPE_RAID_6:
3260 		bv->bv_level = 6;
3261 		break;
3262 	case MPI_CFG_RAID_TYPE_RAID_10:
3263 		bv->bv_level = 10;
3264 		break;
3265 	case MPI_CFG_RAID_TYPE_RAID_50:
3266 		bv->bv_level = 50;
3267 		break;
3268 	default:
3269 		bv->bv_level = -1;
3270 	}
3271 
3272 	bv->bv_nodisk = rpg0->num_phys_disks;
3273 
3274 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3275 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3276 		if (link == NULL)
3277 			continue;
3278 
3279 		/* skip if not a virtual disk */
3280 		if (!(link->flags & SDEV_VIRTUAL))
3281 			continue;
3282 
3283 		vol++;
3284 		/* are we it? */
3285 		if (vol == bv->bv_volid) {
3286 			dev = link->device_softc;
3287 			vendp = link->inqdata.vendor;
3288 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3289 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3290 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3291 			break;
3292 		}
3293 	}
3294 	rv = 0;
3295 done:
3296 	return (rv);
3297 }
3298 
3299 int
3300 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3301 {
3302 	int			pdid, id, rv = EINVAL;
3303 	u_int32_t		address;
3304 	struct mpi_cfg_hdr	hdr;
3305 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3306 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3307 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3308 
3309 	id = bd->bd_volid;
3310 	if (mpi_bio_get_pg0_raid(sc, id))
3311 		goto done;
3312 
3313 	if (id > sc->sc_vol_page->active_vols)
3314 		return (EINVAL); /* XXX deal with hot spares */
3315 
3316 	rpg0 = sc->sc_rpg0;
3317 	if (rpg0 == NULL)
3318 		goto done;
3319 
3320 	pdid = bd->bd_diskid;
3321 	if (pdid > rpg0->num_phys_disks)
3322 		goto done;
3323 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3324 	physdisk += pdid;
3325 
3326 	/* get raid phys disk page 0 */
3327 	address = physdisk->phys_disk_num;
3328 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3329 	    &hdr) != 0)
3330 		goto done;
3331 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3332 		bd->bd_status = BIOC_SDFAILED;
3333 		return (0);
3334 	}
3335 	bd->bd_channel = pdpg0.phys_disk_bus;
3336 	bd->bd_target = pdpg0.phys_disk_id;
3337 	bd->bd_lun = 0;
3338 	bd->bd_size = (u_quad_t)letoh32(pdpg0.max_lba) * 512;
3339 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3340 
3341 	switch (pdpg0.phys_disk_state) {
3342 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3343 		bd->bd_status = BIOC_SDONLINE;
3344 		break;
3345 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3346 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3347 		bd->bd_status = BIOC_SDFAILED;
3348 		break;
3349 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3350 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3351 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3352 		bd->bd_status = BIOC_SDOFFLINE;
3353 		break;
3354 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3355 		bd->bd_status = BIOC_SDSCRUB;
3356 		break;
3357 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3358 	default:
3359 		bd->bd_status = BIOC_SDINVALID;
3360 		break;
3361 	}
3362 
3363 	/* XXX figure this out */
3364 	/* bd_serial[32]; */
3365 	/* bd_procdev[16]; */
3366 
3367 	rv = 0;
3368 done:
3369 	return (rv);
3370 }
3371 
3372 int
3373 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3374 {
3375 	return (ENOTTY);
3376 }
3377 
3378 #ifndef SMALL_KERNEL
3379 int
3380 mpi_create_sensors(struct mpi_softc *sc)
3381 {
3382 	struct device		*dev;
3383 	struct scsi_link	*link;
3384 	int			i, vol;
3385 
3386 	/* count volumes */
3387 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3388 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3389 		if (link == NULL)
3390 			continue;
3391 		/* skip if not a virtual disk */
3392 		if (!(link->flags & SDEV_VIRTUAL))
3393 			continue;
3394 
3395 		vol++;
3396 	}
3397 	if (vol == 0)
3398 		return (0);
3399 
3400 	sc->sc_sensors = malloc(sizeof(struct ksensor) * vol,
3401 	    M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
3402 	if (sc->sc_sensors == NULL)
3403 		return (1);
3404 
3405 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3406 	    sizeof(sc->sc_sensordev.xname));
3407 
3408 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3409 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3410 		if (link == NULL)
3411 			continue;
3412 		/* skip if not a virtual disk */
3413 		if (!(link->flags & SDEV_VIRTUAL))
3414 			continue;
3415 
3416 		dev = link->device_softc;
3417 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3418 		    sizeof(sc->sc_sensors[vol].desc));
3419 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3420 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3421 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3422 
3423 		vol++;
3424 	}
3425 
3426 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3427 		goto bad;
3428 
3429 	sensordev_install(&sc->sc_sensordev);
3430 
3431 	return (0);
3432 
3433 bad:
3434 	free(sc->sc_sensors, M_DEVBUF);
3435 	return (1);
3436 }
3437 
3438 void
3439 mpi_refresh_sensors(void *arg)
3440 {
3441 	int			i, vol;
3442 	struct scsi_link	*link;
3443 	struct mpi_softc	*sc = arg;
3444 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3445 
3446 	rw_enter_write(&sc->sc_lock);
3447 
3448 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3449 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3450 		if (link == NULL)
3451 			continue;
3452 		/* skip if not a virtual disk */
3453 		if (!(link->flags & SDEV_VIRTUAL))
3454 			continue;
3455 
3456 		if (mpi_bio_get_pg0_raid(sc, vol))
3457 			continue;
3458 
3459 		rpg0 = sc->sc_rpg0;
3460 		if (rpg0 == NULL)
3461 			goto done;
3462 
3463 		/* determine status */
3464 		switch (rpg0->volume_state) {
3465 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3466 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3467 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3468 			break;
3469 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3470 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3471 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3472 			break;
3473 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3474 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3475 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3476 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3477 			break;
3478 		default:
3479 			sc->sc_sensors[vol].value = 0; /* unknown */
3480 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3481 		}
3482 
3483 		/* override status if scrubbing or something */
3484 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3485 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3486 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3487 		}
3488 
3489 		vol++;
3490 	}
3491 done:
3492 	rw_exit_write(&sc->sc_lock);
3493 }
3494 #endif /* SMALL_KERNEL */
3495 #endif /* NBIO > 0 */
3496