xref: /openbsd-src/sys/dev/ic/mpi.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /*	$OpenBSD: mpi.c,v 1.113 2009/10/11 02:11:34 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/proc.h>
28 #include <sys/malloc.h>
29 #include <sys/kernel.h>
30 #include <sys/rwlock.h>
31 #include <sys/sensors.h>
32 
33 #include <machine/bus.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/biovar.h>
39 #include <dev/ic/mpireg.h>
40 #include <dev/ic/mpivar.h>
41 
42 #ifdef MPI_DEBUG
43 uint32_t	mpi_debug = 0
44 /*		    | MPI_D_CMD */
45 /*		    | MPI_D_INTR */
46 /*		    | MPI_D_MISC */
47 /*		    | MPI_D_DMA */
48 /*		    | MPI_D_IOCTL */
49 /*		    | MPI_D_RW */
50 /*		    | MPI_D_MEM */
51 /*		    | MPI_D_CCB */
52 /*		    | MPI_D_PPR */
53 /*		    | MPI_D_RAID */
54 /*		    | MPI_D_EVT */
55 		;
56 #endif
57 
58 struct cfdriver mpi_cd = {
59 	NULL,
60 	"mpi",
61 	DV_DULL
62 };
63 
64 int			mpi_scsi_cmd(struct scsi_xfer *);
65 void			mpi_scsi_cmd_done(struct mpi_ccb *);
66 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
67 int			mpi_scsi_probe(struct scsi_link *);
68 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
69 			    int, struct proc *);
70 
71 struct scsi_adapter mpi_switch = {
72 	mpi_scsi_cmd,
73 	mpi_minphys,
74 	mpi_scsi_probe,
75 	NULL,
76 	mpi_scsi_ioctl
77 };
78 
79 struct scsi_device mpi_dev = {
80 	NULL,
81 	NULL,
82 	NULL,
83 	NULL
84 };
85 
86 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
87 void			mpi_dmamem_free(struct mpi_softc *,
88 			    struct mpi_dmamem *);
89 int			mpi_alloc_ccbs(struct mpi_softc *);
90 struct mpi_ccb		*mpi_get_ccb(struct mpi_softc *);
91 void			mpi_put_ccb(struct mpi_softc *, struct mpi_ccb *);
92 int			mpi_alloc_replies(struct mpi_softc *);
93 void			mpi_push_replies(struct mpi_softc *);
94 
95 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
96 int			mpi_complete(struct mpi_softc *, struct mpi_ccb *, int);
97 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
98 int			mpi_reply(struct mpi_softc *, u_int32_t);
99 
100 int			mpi_cfg_spi_port(struct mpi_softc *);
101 void			mpi_squash_ppr(struct mpi_softc *);
102 void			mpi_run_ppr(struct mpi_softc *);
103 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
104 			    struct mpi_cfg_raid_physdisk *, int, int, int);
105 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
106 
107 void			mpi_fc_info(struct mpi_softc *);
108 
109 void			mpi_timeout_xs(void *);
110 int			mpi_load_xs(struct mpi_ccb *);
111 
112 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
113 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
114 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
115 			    u_int32_t);
116 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
117 			    u_int32_t);
118 
119 int			mpi_init(struct mpi_softc *);
120 int			mpi_reset_soft(struct mpi_softc *);
121 int			mpi_reset_hard(struct mpi_softc *);
122 
123 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
124 int			mpi_handshake_recv_dword(struct mpi_softc *,
125 			    u_int32_t *);
126 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
127 
128 void			mpi_empty_done(struct mpi_ccb *);
129 
130 int			mpi_iocinit(struct mpi_softc *);
131 int			mpi_iocfacts(struct mpi_softc *);
132 int			mpi_portfacts(struct mpi_softc *);
133 int			mpi_portenable(struct mpi_softc *);
134 void			mpi_get_raid(struct mpi_softc *);
135 int			mpi_fwupload(struct mpi_softc *);
136 
137 int			mpi_eventnotify(struct mpi_softc *);
138 void			mpi_eventnotify_done(struct mpi_ccb *);
139 void			mpi_eventack(struct mpi_softc *,
140 			    struct mpi_msg_event_reply *);
141 void			mpi_eventack_done(struct mpi_ccb *);
142 void			mpi_evt_sas(void *, void *);
143 
144 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
145 			    u_int8_t, u_int32_t, int, void *);
146 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
147 			    void *, int, void *, size_t);
148 
149 #if NBIO > 0
150 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
151 int		mpi_ioctl(struct device *, u_long, caddr_t);
152 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
153 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
154 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
155 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
156 #ifndef SMALL_KERNEL
157 int		mpi_create_sensors(struct mpi_softc *);
158 void		mpi_refresh_sensors(void *);
159 #endif /* SMALL_KERNEL */
160 #endif /* NBIO > 0 */
161 
162 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
163 
164 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
165 
166 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
167 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
168 #define mpi_read_intr(s)	mpi_read((s), MPI_INTR_STATUS)
169 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
170 #define mpi_pop_reply(s)	mpi_read((s), MPI_REPLY_QUEUE)
171 #define mpi_push_reply(s, v)	mpi_write((s), MPI_REPLY_QUEUE, (v))
172 
173 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
174 				    MPI_INTR_STATUS_DOORBELL, 0)
175 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
176 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
177 
178 #define MPI_PG_EXTENDED		(1<<0)
179 #define MPI_PG_POLL		(1<<1)
180 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
181 
182 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
183 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
184 	    MPI_PG_POLL, (_h))
185 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
186 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
187 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
188 
189 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
190 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
191 	    (_h), (_r), (_p), (_l))
192 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
193 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
194 	    (_h), (_r), (_p), (_l))
195 
196 int
197 mpi_attach(struct mpi_softc *sc)
198 {
199 	struct scsibus_attach_args	saa;
200 	struct mpi_ccb			*ccb;
201 
202 	printf("\n");
203 
204 	/* disable interrupts */
205 	mpi_write(sc, MPI_INTR_MASK,
206 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
207 
208 	if (mpi_init(sc) != 0) {
209 		printf("%s: unable to initialise\n", DEVNAME(sc));
210 		return (1);
211 	}
212 
213 	if (mpi_iocfacts(sc) != 0) {
214 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
215 		return (1);
216 	}
217 
218 	if (mpi_alloc_ccbs(sc) != 0) {
219 		/* error already printed */
220 		return (1);
221 	}
222 
223 	if (mpi_alloc_replies(sc) != 0) {
224 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
225 		goto free_ccbs;
226 	}
227 
228 	if (mpi_iocinit(sc) != 0) {
229 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
230 		goto free_ccbs;
231 	}
232 
233 	/* spin until we're operational */
234 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
235 	    MPI_DOORBELL_STATE_OPER) != 0) {
236 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
237 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
238 		printf("%s: operational state timeout\n", DEVNAME(sc));
239 		goto free_ccbs;
240 	}
241 
242 	mpi_push_replies(sc);
243 
244 	if (mpi_portfacts(sc) != 0) {
245 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
246 		goto free_replies;
247 	}
248 
249 #ifdef notyet
250 	if (mpi_eventnotify(sc) != 0) {
251 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
252 		goto free_replies;
253 	}
254 #endif
255 
256 	if (mpi_portenable(sc) != 0) {
257 		printf("%s: unable to enable port\n", DEVNAME(sc));
258 		goto free_replies;
259 	}
260 
261 	if (mpi_fwupload(sc) != 0) {
262 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
263 		goto free_replies;
264 	}
265 
266 	switch (sc->sc_porttype) {
267 	case MPI_PORTFACTS_PORTTYPE_SCSI:
268 		if (mpi_cfg_spi_port(sc) != 0)
269 			goto free_replies;
270 		mpi_squash_ppr(sc);
271 		break;
272 	case MPI_PORTFACTS_PORTTYPE_FC:
273 		mpi_fc_info(sc);
274 		break;
275 	}
276 
277 	rw_init(&sc->sc_lock, "mpi_lock");
278 
279 	/* we should be good to go now, attach scsibus */
280 	sc->sc_link.device = &mpi_dev;
281 	sc->sc_link.adapter = &mpi_switch;
282 	sc->sc_link.adapter_softc = sc;
283 	sc->sc_link.adapter_target = sc->sc_target;
284 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
285 	sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth;
286 
287 	bzero(&saa, sizeof(saa));
288 	saa.saa_sc_link = &sc->sc_link;
289 
290 	/* config_found() returns the scsibus attached to us */
291 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
292 	    &saa, scsiprint);
293 
294 	/* get raid pages */
295 	mpi_get_raid(sc);
296 
297 	/* do domain validation */
298 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
299 		mpi_run_ppr(sc);
300 
301 	/* enable interrupts */
302 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
303 
304 #if NBIO > 0
305 	if (sc->sc_flags & MPI_F_RAID) {
306 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
307 			panic("%s: controller registration failed",
308 			    DEVNAME(sc));
309 		else {
310 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
311 			    2, 0, &sc->sc_cfg_hdr) != 0) {
312 				printf("%s: can't get IOC page 2 hdr, bio "
313 				    "disabled\n", DEVNAME(sc));
314 				goto done;
315 			}
316 			sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4,
317 			    M_TEMP, M_WAITOK | M_CANFAIL);
318 			if (sc->sc_vol_page == NULL) {
319 				printf("%s: can't get memory for IOC page 2, "
320 				    "bio disabled\n", DEVNAME(sc));
321 				goto done;
322 			}
323 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
324 			    (sc->sc_vol_page + 1);
325 
326 			sc->sc_ioctl = mpi_ioctl;
327 		}
328 	}
329 #ifndef SMALL_KERNEL
330 	mpi_create_sensors(sc);
331 #endif /* SMALL_KERNEL */
332 done:
333 #endif /* NBIO > 0 */
334 
335 	return (0);
336 
337 free_replies:
338 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
339 	    0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
340 	mpi_dmamem_free(sc, sc->sc_replies);
341 free_ccbs:
342 	while ((ccb = mpi_get_ccb(sc)) != NULL)
343 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
344 	mpi_dmamem_free(sc, sc->sc_requests);
345 	free(sc->sc_ccbs, M_DEVBUF);
346 
347 	return(1);
348 }
349 
350 int
351 mpi_cfg_spi_port(struct mpi_softc *sc)
352 {
353 	struct mpi_cfg_hdr		hdr;
354 	struct mpi_cfg_spi_port_pg1	port;
355 
356 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
357 	    &hdr) != 0)
358 		return (1);
359 
360 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
361 		return (1);
362 
363 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
364 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
365 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
366 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
367 	    letoh32(port.port_scsi_id));
368 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
369 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
370 
371 	if (port.port_scsi_id == sc->sc_target &&
372 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
373 	    port.on_bus_timer_value != htole32(0x0))
374 		return (0);
375 
376 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
377 	    sc->sc_target);
378 	port.port_scsi_id = sc->sc_target;
379 	port.port_resp_ids = htole16(1 << sc->sc_target);
380 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
381 
382 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
383 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
384 		return (1);
385 	}
386 
387 	return (0);
388 }
389 
390 void
391 mpi_squash_ppr(struct mpi_softc *sc)
392 {
393 	struct mpi_cfg_hdr		hdr;
394 	struct mpi_cfg_spi_dev_pg1	page;
395 	int				i;
396 
397 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
398 
399 	for (i = 0; i < sc->sc_buswidth; i++) {
400 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
401 		    1, i, &hdr) != 0)
402 			return;
403 
404 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
405 			return;
406 
407 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
408 		    "req_offset: 0x%02x req_period: 0x%02x "
409 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
410 		    page.req_params1, page.req_offset, page.req_period,
411 		    page.req_params2, letoh32(page.configuration));
412 
413 		page.req_params1 = 0x0;
414 		page.req_offset = 0x0;
415 		page.req_period = 0x0;
416 		page.req_params2 = 0x0;
417 		page.configuration = htole32(0x0);
418 
419 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
420 			return;
421 	}
422 }
423 
424 void
425 mpi_run_ppr(struct mpi_softc *sc)
426 {
427 	struct mpi_cfg_hdr		hdr;
428 	struct mpi_cfg_spi_port_pg0	port_pg;
429 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
430 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
431 	size_t				pagelen;
432 	struct scsi_link		*link;
433 	int				i, tries;
434 
435 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
436 	    &hdr) != 0) {
437 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
438 		    DEVNAME(sc));
439 		return;
440 	}
441 
442 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
443 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
444 		    DEVNAME(sc));
445 		return;
446 	}
447 
448 	for (i = 0; i < sc->sc_buswidth; i++) {
449 		link = sc->sc_scsibus->sc_link[i][0];
450 		if (link == NULL)
451 			continue;
452 
453 		/* do not ppr volumes */
454 		if (link->flags & SDEV_VIRTUAL)
455 			continue;
456 
457 		tries = 0;
458 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
459 		    port_pg.max_offset, tries) == EAGAIN)
460 			tries++;
461 	}
462 
463 	if ((sc->sc_flags & MPI_F_RAID) == 0)
464 		return;
465 
466 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
467 	    &hdr) != 0) {
468 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
469 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
470 		return;
471 	}
472 
473 	pagelen = hdr.page_length * 4; /* dwords to bytes */
474 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
475 	if (physdisk_pg == NULL) {
476 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
477 		    "allocate ioc pg 3\n", DEVNAME(sc));
478 		return;
479 	}
480 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
481 
482 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
483 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
484 		    "fetch ioc page 3\n", DEVNAME(sc));
485 		goto out;
486 	}
487 
488 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
489 	    physdisk_pg->no_phys_disks);
490 
491 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
492 		physdisk = &physdisk_list[i];
493 
494 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
495 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
496 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
497 		    physdisk->phys_disk_num);
498 
499 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
500 			continue;
501 
502 		tries = 0;
503 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
504 		    port_pg.max_offset, tries) == EAGAIN)
505 			tries++;
506 	}
507 
508 out:
509 	free(physdisk_pg, M_TEMP);
510 }
511 
512 int
513 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
514     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
515 {
516 	struct mpi_cfg_hdr		hdr0, hdr1;
517 	struct mpi_cfg_spi_dev_pg0	pg0;
518 	struct mpi_cfg_spi_dev_pg1	pg1;
519 	u_int32_t			address;
520 	int				id;
521 	int				raid = 0;
522 
523 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
524 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
525 	    link->quirks);
526 
527 	if (try >= 3)
528 		return (EIO);
529 
530 	if (physdisk == NULL) {
531 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
532 			return (EIO);
533 
534 		address = link->target;
535 		id = link->target;
536 	} else {
537 		raid = 1;
538 		address = (physdisk->phys_disk_bus << 8) |
539 		    (physdisk->phys_disk_id);
540 		id = physdisk->phys_disk_num;
541 	}
542 
543 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
544 	    address, &hdr0) != 0) {
545 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
546 		    DEVNAME(sc));
547 		return (EIO);
548 	}
549 
550 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
551 	    address, &hdr1) != 0) {
552 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
553 		    DEVNAME(sc));
554 		return (EIO);
555 	}
556 
557 #ifdef MPI_DEBUG
558 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
559 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
560 		    DEVNAME(sc));
561 		return (EIO);
562 	}
563 
564 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
565 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
566 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
567 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
568 #endif
569 
570 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
571 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
572 		    DEVNAME(sc));
573 		return (EIO);
574 	}
575 
576 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
577 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
578 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
579 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
580 
581 	pg1.req_params1 = 0;
582 	pg1.req_offset = offset;
583 	pg1.req_period = period;
584 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
585 
586 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
587 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
588 
589 		switch (try) {
590 		case 0: /* U320 */
591 			break;
592 		case 1: /* U160 */
593 			pg1.req_period = 0x09;
594 			break;
595 		case 2: /* U80 */
596 			pg1.req_period = 0x0a;
597 			break;
598 		}
599 
600 		if (pg1.req_period < 0x09) {
601 			/* Ultra320: enable QAS & PACKETIZED */
602 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
603 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
604 		}
605 		if (pg1.req_period < 0xa) {
606 			/* >= Ultra160: enable dual xfers */
607 			pg1.req_params1 |=
608 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
609 		}
610 	}
611 
612 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
613 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
614 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
615 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
616 
617 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
618 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
619 		    DEVNAME(sc));
620 		return (EIO);
621 	}
622 
623 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
624 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
625 		    DEVNAME(sc));
626 		return (EIO);
627 	}
628 
629 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
630 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
631 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
632 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
633 
634 	if (mpi_inq(sc, id, raid) != 0) {
635 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
636 		    "target %d\n", DEVNAME(sc), link->target);
637 		return (EIO);
638 	}
639 
640 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
641 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
642 		    "inquiry\n", DEVNAME(sc));
643 		return (EIO);
644 	}
645 
646 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
647 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
648 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
649 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
650 
651 	if (!(letoh32(pg0.information) & 0x07) && (try == 0)) {
652 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
653 		    DEVNAME(sc));
654 		return (EAGAIN);
655 	}
656 
657 	if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
658 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
659 		    DEVNAME(sc));
660 		return (EAGAIN);
661 	}
662 
663 	if (letoh32(pg0.information) & 0x0e) {
664 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
665 		    DEVNAME(sc), letoh32(pg0.information));
666 		return (EAGAIN);
667 	}
668 
669 	switch(pg0.neg_period) {
670 	case 0x08:
671 		period = 160;
672 		break;
673 	case 0x09:
674 		period = 80;
675 		break;
676 	case 0x0a:
677 		period = 40;
678 		break;
679 	case 0x0b:
680 		period = 20;
681 		break;
682 	case 0x0c:
683 		period = 10;
684 		break;
685 	default:
686 		period = 0;
687 		break;
688 	}
689 
690 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
691 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
692 	    id, period ? "Sync" : "Async", period,
693 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
694 	    pg0.neg_offset,
695 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
696 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
697 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
698 
699 	return (0);
700 }
701 
702 int
703 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
704 {
705 	struct mpi_ccb			*ccb;
706 	struct scsi_inquiry		inq;
707 	struct {
708 		struct mpi_msg_scsi_io		io;
709 		struct mpi_sge			sge;
710 		struct scsi_inquiry_data	inqbuf;
711 		struct scsi_sense_data		sense;
712 	} __packed			*bundle;
713 	struct mpi_msg_scsi_io		*io;
714 	struct mpi_sge			*sge;
715 	u_int64_t			addr;
716 
717 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
718 
719 	bzero(&inq, sizeof(inq));
720 	inq.opcode = INQUIRY;
721 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
722 
723 	ccb = mpi_get_ccb(sc);
724 	if (ccb == NULL)
725 		return (1);
726 
727 	ccb->ccb_done = mpi_empty_done;
728 
729 	bundle = ccb->ccb_cmd;
730 	io = &bundle->io;
731 	sge = &bundle->sge;
732 
733 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
734 	    MPI_FUNCTION_SCSI_IO_REQUEST;
735 	/*
736 	 * bus is always 0
737 	 * io->bus = htole16(sc->sc_bus);
738 	 */
739 	io->target_id = target;
740 
741 	io->cdb_length = sizeof(inq);
742 	io->sense_buf_len = sizeof(struct scsi_sense_data);
743 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
744 
745 	io->msg_context = htole32(ccb->ccb_id);
746 
747 	/*
748 	 * always lun 0
749 	 * io->lun[0] = htobe16(link->lun);
750 	 */
751 
752 	io->direction = MPI_SCSIIO_DIR_READ;
753 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
754 
755 	bcopy(&inq, io->cdb, sizeof(inq));
756 
757 	io->data_length = htole32(sizeof(struct scsi_inquiry_data));
758 
759 	io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
760 	    ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
761 
762 	sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
763 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
764 	    (u_int32_t)sizeof(inq));
765 
766 	addr = ccb->ccb_cmd_dva +
767 	    ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle);
768 	sge->sg_hi_addr = htole32((u_int32_t)(addr >> 32));
769 	sge->sg_lo_addr = htole32((u_int32_t)addr);
770 
771 	if (mpi_poll(sc, ccb, 5000) != 0)
772 		return (1);
773 
774 	if (ccb->ccb_rcb != NULL)
775 		mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
776 
777 	mpi_put_ccb(sc, ccb);
778 
779 	return (0);
780 }
781 
782 void
783 mpi_fc_info(struct mpi_softc *sc)
784 {
785 	struct mpi_cfg_hdr		hdr;
786 	struct mpi_cfg_fc_port_pg0	pg;
787 
788 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
789 	    &hdr) != 0) {
790 		DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch "
791 		    "FC port header 0\n", DEVNAME(sc));
792 		return;
793 	}
794 
795 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
796 		DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch "
797 		    "FC port page 0\n",
798 		    DEVNAME(sc));
799 		return;
800 	}
801 
802 	sc->sc_link.port_wwn = letoh64(pg.wwpn);
803 	sc->sc_link.node_wwn = letoh64(pg.wwnn);
804 }
805 
806 void
807 mpi_detach(struct mpi_softc *sc)
808 {
809 
810 }
811 
812 int
813 mpi_intr(void *arg)
814 {
815 	struct mpi_softc		*sc = arg;
816 	u_int32_t			reg;
817 	int				rv = 0;
818 
819 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
820 		mpi_reply(sc, reg);
821 		rv = 1;
822 	}
823 
824 	return (rv);
825 }
826 
827 int
828 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
829 {
830 	struct mpi_ccb			*ccb;
831 	struct mpi_rcb			*rcb = NULL;
832 	struct mpi_msg_reply		*reply = NULL;
833 	u_int32_t			reply_dva;
834 	int				id;
835 	int				i;
836 
837 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
838 
839 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
840 		bus_dmamap_sync(sc->sc_dmat,
841 		    MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE,
842 		    BUS_DMASYNC_POSTREAD);
843 
844 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
845 
846 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
847 		    MPI_REPLY_SIZE;
848 		rcb = &sc->sc_rcbs[i];
849 		reply = rcb->rcb_reply;
850 
851 		id = letoh32(reply->msg_context);
852 
853 		bus_dmamap_sync(sc->sc_dmat,
854 		    MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE,
855 		    BUS_DMASYNC_PREREAD);
856 	} else {
857 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
858 		case MPI_REPLY_QUEUE_TYPE_INIT:
859 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
860 			break;
861 
862 		default:
863 			panic("%s: unsupported context reply\n",
864 			    DEVNAME(sc));
865 		}
866 	}
867 
868 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
869 	    DEVNAME(sc), id, reply);
870 
871 	ccb = &sc->sc_ccbs[id];
872 
873 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
874 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
875 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
876 	ccb->ccb_state = MPI_CCB_READY;
877 	ccb->ccb_rcb = rcb;
878 
879 	ccb->ccb_done(ccb);
880 
881 	return (id);
882 }
883 
884 struct mpi_dmamem *
885 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
886 {
887 	struct mpi_dmamem		*mdm;
888 	int				nsegs;
889 
890 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
891 	if (mdm == NULL)
892 		return (NULL);
893 
894 	mdm->mdm_size = size;
895 
896 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
897 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
898 		goto mdmfree;
899 
900 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
901 	    1, &nsegs, BUS_DMA_NOWAIT) != 0)
902 		goto destroy;
903 
904 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
905 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
906 		goto free;
907 
908 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
909 	    NULL, BUS_DMA_NOWAIT) != 0)
910 		goto unmap;
911 
912 	bzero(mdm->mdm_kva, size);
913 
914 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
915 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
916 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
917 
918 	return (mdm);
919 
920 unmap:
921 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
922 free:
923 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
924 destroy:
925 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
926 mdmfree:
927 	free(mdm, M_DEVBUF);
928 
929 	return (NULL);
930 }
931 
932 void
933 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
934 {
935 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
936 
937 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
938 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
939 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
940 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
941 	free(mdm, M_DEVBUF);
942 }
943 
944 int
945 mpi_alloc_ccbs(struct mpi_softc *sc)
946 {
947 	struct mpi_ccb			*ccb;
948 	u_int8_t			*cmd;
949 	int				i;
950 
951 	TAILQ_INIT(&sc->sc_ccb_free);
952 
953 	sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds,
954 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
955 	if (sc->sc_ccbs == NULL) {
956 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
957 		return (1);
958 	}
959 
960 	sc->sc_requests = mpi_dmamem_alloc(sc,
961 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
962 	if (sc->sc_requests == NULL) {
963 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
964 		goto free_ccbs;
965 	}
966 	cmd = MPI_DMA_KVA(sc->sc_requests);
967 	bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds);
968 
969 	for (i = 0; i < sc->sc_maxcmds; i++) {
970 		ccb = &sc->sc_ccbs[i];
971 
972 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
973 		    sc->sc_max_sgl_len, MAXPHYS, 0,
974 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
975 		    &ccb->ccb_dmamap) != 0) {
976 			printf("%s: unable to create dma map\n", DEVNAME(sc));
977 			goto free_maps;
978 		}
979 
980 		ccb->ccb_sc = sc;
981 		ccb->ccb_id = i;
982 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
983 
984 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
985 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
986 		    ccb->ccb_offset;
987 
988 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
989 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
990 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
991 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
992 		    ccb->ccb_cmd_dva);
993 
994 		mpi_put_ccb(sc, ccb);
995 	}
996 
997 	return (0);
998 
999 free_maps:
1000 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1001 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1002 
1003 	mpi_dmamem_free(sc, sc->sc_requests);
1004 free_ccbs:
1005 	free(sc->sc_ccbs, M_DEVBUF);
1006 
1007 	return (1);
1008 }
1009 
1010 struct mpi_ccb *
1011 mpi_get_ccb(struct mpi_softc *sc)
1012 {
1013 	struct mpi_ccb			*ccb;
1014 
1015 	ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1016 	if (ccb == NULL) {
1017 		DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb == NULL\n", DEVNAME(sc));
1018 		return (NULL);
1019 	}
1020 
1021 	TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1022 
1023 	ccb->ccb_state = MPI_CCB_READY;
1024 
1025 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %#x\n", DEVNAME(sc), ccb);
1026 
1027 	return (ccb);
1028 }
1029 
1030 void
1031 mpi_put_ccb(struct mpi_softc *sc, struct mpi_ccb *ccb)
1032 {
1033 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %#x\n", DEVNAME(sc), ccb);
1034 
1035 	ccb->ccb_state = MPI_CCB_FREE;
1036 	ccb->ccb_xs = NULL;
1037 	ccb->ccb_done = NULL;
1038 	bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE);
1039 	TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1040 }
1041 
1042 int
1043 mpi_alloc_replies(struct mpi_softc *sc)
1044 {
1045 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1046 
1047 	sc->sc_rcbs = malloc(MPI_REPLY_COUNT * sizeof(struct mpi_rcb),
1048 	    M_DEVBUF, M_WAITOK|M_CANFAIL);
1049 	if (sc->sc_rcbs == NULL)
1050 		return (1);
1051 
1052 	sc->sc_replies = mpi_dmamem_alloc(sc, PAGE_SIZE);
1053 	if (sc->sc_replies == NULL) {
1054 		free(sc->sc_rcbs, M_DEVBUF);
1055 		return (1);
1056 	}
1057 
1058 	return (0);
1059 }
1060 
1061 void
1062 mpi_push_replies(struct mpi_softc *sc)
1063 {
1064 	struct mpi_rcb			*rcb;
1065 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1066 	int				i;
1067 
1068 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1069 	    0, PAGE_SIZE, BUS_DMASYNC_PREREAD);
1070 
1071 	for (i = 0; i < MPI_REPLY_COUNT; i++) {
1072 		rcb = &sc->sc_rcbs[i];
1073 
1074 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1075 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1076 		    MPI_REPLY_SIZE * i;
1077 		mpi_push_reply(sc, rcb->rcb_reply_dva);
1078 	}
1079 }
1080 
1081 void
1082 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1083 {
1084 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1085 	    ccb->ccb_cmd_dva);
1086 
1087 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1088 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1089 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1090 
1091 	ccb->ccb_state = MPI_CCB_QUEUED;
1092 	mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1093 }
1094 
1095 int
1096 mpi_complete(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1097 {
1098 	u_int32_t			reg;
1099 	int				id = -1;
1100 
1101 	DNPRINTF(MPI_D_INTR, "%s: mpi_complete timeout %d\n", DEVNAME(sc),
1102 	    timeout);
1103 
1104 	do {
1105 		reg = mpi_pop_reply(sc);
1106 		if (reg == 0xffffffff) {
1107 			if (timeout-- == 0)
1108 				return (1);
1109 
1110 			delay(1000);
1111 			continue;
1112 		}
1113 
1114 		id = mpi_reply(sc, reg);
1115 
1116 	} while (ccb->ccb_id != id);
1117 
1118 	return (0);
1119 }
1120 
1121 int
1122 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1123 {
1124 	int				error;
1125 	int				s;
1126 
1127 	DNPRINTF(MPI_D_CMD, "%s: mpi_poll\n", DEVNAME(sc));
1128 
1129 	s = splbio();
1130 	mpi_start(sc, ccb);
1131 	error = mpi_complete(sc, ccb, timeout);
1132 	splx(s);
1133 
1134 	return (error);
1135 }
1136 
1137 int
1138 mpi_scsi_cmd(struct scsi_xfer *xs)
1139 {
1140 	struct scsi_link		*link = xs->sc_link;
1141 	struct mpi_softc		*sc = link->adapter_softc;
1142 	struct mpi_ccb			*ccb;
1143 	struct mpi_ccb_bundle		*mcb;
1144 	struct mpi_msg_scsi_io		*io;
1145 	int				s;
1146 
1147 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1148 
1149 	if (xs->cmdlen > MPI_CDB_LEN) {
1150 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1151 		    DEVNAME(sc), xs->cmdlen);
1152 		bzero(&xs->sense, sizeof(xs->sense));
1153 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
1154 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1155 		xs->sense.add_sense_code = 0x20;
1156 		xs->error = XS_SENSE;
1157 		xs->flags |= ITSDONE;
1158 		s = splbio();
1159 		scsi_done(xs);
1160 		splx(s);
1161 		return (COMPLETE);
1162 	}
1163 
1164 	s = splbio();
1165 	ccb = mpi_get_ccb(sc);
1166 	splx(s);
1167 	if (ccb == NULL)
1168 		return (NO_CCB);
1169 
1170 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1171 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1172 
1173 	ccb->ccb_xs = xs;
1174 	ccb->ccb_done = mpi_scsi_cmd_done;
1175 
1176 	mcb = ccb->ccb_cmd;
1177 	io = &mcb->mcb_io;
1178 
1179 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1180 	/*
1181 	 * bus is always 0
1182 	 * io->bus = htole16(sc->sc_bus);
1183 	 */
1184 	io->target_id = link->target;
1185 
1186 	io->cdb_length = xs->cmdlen;
1187 	io->sense_buf_len = sizeof(xs->sense);
1188 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1189 
1190 	io->msg_context = htole32(ccb->ccb_id);
1191 
1192 	io->lun[0] = htobe16(link->lun);
1193 
1194 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1195 	case SCSI_DATA_IN:
1196 		io->direction = MPI_SCSIIO_DIR_READ;
1197 		break;
1198 	case SCSI_DATA_OUT:
1199 		io->direction = MPI_SCSIIO_DIR_WRITE;
1200 		break;
1201 	default:
1202 		io->direction = MPI_SCSIIO_DIR_NONE;
1203 		break;
1204 	}
1205 
1206 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1207 	    (link->quirks & SDEV_NOTAGS))
1208 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1209 	else
1210 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1211 
1212 	bcopy(xs->cmd, io->cdb, xs->cmdlen);
1213 
1214 	io->data_length = htole32(xs->datalen);
1215 
1216 	io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
1217 	    ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1218 
1219 	if (mpi_load_xs(ccb) != 0) {
1220 		xs->error = XS_DRIVER_STUFFUP;
1221 		xs->flags |= ITSDONE;
1222 		s = splbio();
1223 		mpi_put_ccb(sc, ccb);
1224 		scsi_done(xs);
1225 		splx(s);
1226 		return (COMPLETE);
1227 	}
1228 
1229 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1230 
1231 	if (xs->flags & SCSI_POLL) {
1232 		if (mpi_poll(sc, ccb, xs->timeout) != 0) {
1233 			xs->error = XS_DRIVER_STUFFUP;
1234 			xs->flags |= ITSDONE;
1235 			s = splbio();
1236 			scsi_done(xs);
1237 			splx(s);
1238 		}
1239 		return (COMPLETE);
1240 	}
1241 
1242 	s = splbio();
1243 	mpi_start(sc, ccb);
1244 	splx(s);
1245 	return (SUCCESSFULLY_QUEUED);
1246 }
1247 
1248 void
1249 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1250 {
1251 	struct mpi_softc		*sc = ccb->ccb_sc;
1252 	struct scsi_xfer		*xs = ccb->ccb_xs;
1253 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1254 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1255 	struct mpi_msg_scsi_io_error	*sie;
1256 
1257 	if (xs->datalen != 0) {
1258 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1259 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1260 		    BUS_DMASYNC_POSTWRITE);
1261 
1262 		bus_dmamap_unload(sc->sc_dmat, dmap);
1263 	}
1264 
1265 	/* timeout_del */
1266 	xs->error = XS_NOERROR;
1267 	xs->resid = 0;
1268 	xs->flags |= ITSDONE;
1269 
1270 	if (ccb->ccb_rcb == NULL) {
1271 		/* no scsi error, we're ok so drop out early */
1272 		xs->status = SCSI_OK;
1273 		mpi_put_ccb(sc, ccb);
1274 		scsi_done(xs);
1275 		return;
1276 	}
1277 
1278 	sie = ccb->ccb_rcb->rcb_reply;
1279 
1280 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1281 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1282 	    xs->flags);
1283 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1284 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1285 	    sie->msg_length, sie->function);
1286 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1287 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1288 	    sie->sense_buf_len, sie->msg_flags);
1289 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1290 	    letoh32(sie->msg_context));
1291 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1292 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1293 	    sie->scsi_state, letoh16(sie->ioc_status));
1294 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1295 	    letoh32(sie->ioc_loginfo));
1296 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1297 	    letoh32(sie->transfer_count));
1298 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1299 	    letoh32(sie->sense_count));
1300 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1301 	    letoh32(sie->response_info));
1302 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1303 	    letoh16(sie->tag));
1304 
1305 	xs->status = sie->scsi_status;
1306 	switch (letoh16(sie->ioc_status)) {
1307 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1308 		xs->resid = xs->datalen - letoh32(sie->transfer_count);
1309 		if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1310 			xs->error = XS_DRIVER_STUFFUP;
1311 			break;
1312 		}
1313 		/* FALLTHROUGH */
1314 	case MPI_IOCSTATUS_SUCCESS:
1315 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1316 		switch (xs->status) {
1317 		case SCSI_OK:
1318 			xs->resid = 0;
1319 			break;
1320 
1321 		case SCSI_CHECK:
1322 			xs->error = XS_SENSE;
1323 			break;
1324 
1325 		case SCSI_BUSY:
1326 		case SCSI_QUEUE_FULL:
1327 			xs->error = XS_BUSY;
1328 			break;
1329 
1330 		default:
1331 			xs->error = XS_DRIVER_STUFFUP;
1332 			break;
1333 		}
1334 		break;
1335 
1336 	case MPI_IOCSTATUS_BUSY:
1337 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1338 		xs->error = XS_BUSY;
1339 		break;
1340 
1341 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1342 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1343 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1344 		xs->error = XS_SELTIMEOUT;
1345 		break;
1346 
1347 	default:
1348 		xs->error = XS_DRIVER_STUFFUP;
1349 		break;
1350 	}
1351 
1352 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1353 		bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense));
1354 
1355 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1356 	    xs->error, xs->status);
1357 
1358 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
1359 	mpi_put_ccb(sc, ccb);
1360 	scsi_done(xs);
1361 }
1362 
1363 void
1364 mpi_timeout_xs(void *arg)
1365 {
1366 	/* XXX */
1367 }
1368 
1369 int
1370 mpi_load_xs(struct mpi_ccb *ccb)
1371 {
1372 	struct mpi_softc		*sc = ccb->ccb_sc;
1373 	struct scsi_xfer		*xs = ccb->ccb_xs;
1374 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1375 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1376 	struct mpi_sge			*sge, *nsge = &mcb->mcb_sgl[0];
1377 	struct mpi_sge			*ce = NULL, *nce;
1378 	u_int64_t			ce_dva;
1379 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1380 	u_int32_t			addr, flags;
1381 	int				i, error;
1382 
1383 	if (xs->datalen == 0) {
1384 		nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
1385 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1386 		return (0);
1387 	}
1388 
1389 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1390 	    xs->data, xs->datalen, NULL,
1391 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1392 	if (error) {
1393 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1394 		return (1);
1395 	}
1396 
1397 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1398 	if (xs->flags & SCSI_DATA_OUT)
1399 		flags |= MPI_SGE_FL_DIR_OUT;
1400 
1401 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1402 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1403 		io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4;
1404 	}
1405 
1406 	for (i = 0; i < dmap->dm_nsegs; i++) {
1407 
1408 		if (nsge == ce) {
1409 			nsge++;
1410 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1411 
1412 			DNPRINTF(MPI_D_DMA, "%s:   - 0x%08x 0x%08x 0x%08x\n",
1413 			    DEVNAME(sc), sge->sg_hdr,
1414 			    sge->sg_hi_addr, sge->sg_lo_addr);
1415 
1416 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1417 				nce = &nsge[sc->sc_chain_len - 1];
1418 				addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4;
1419 				addr = addr << 16 |
1420 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1421 			} else {
1422 				nce = NULL;
1423 				addr = sizeof(struct mpi_sge) *
1424 				    (dmap->dm_nsegs - i);
1425 			}
1426 
1427 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1428 			    MPI_SGE_FL_SIZE_64 | addr);
1429 
1430 			ce_dva = ccb->ccb_cmd_dva +
1431 			    ((u_int8_t *)nsge - (u_int8_t *)mcb);
1432 
1433 			addr = (u_int32_t)(ce_dva >> 32);
1434 			ce->sg_hi_addr = htole32(addr);
1435 			addr = (u_int32_t)ce_dva;
1436 			ce->sg_lo_addr = htole32(addr);
1437 
1438 			DNPRINTF(MPI_D_DMA, "%s:  ce: 0x%08x 0x%08x 0x%08x\n",
1439 			    DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr,
1440 			    ce->sg_lo_addr);
1441 
1442 			ce = nce;
1443 		}
1444 
1445 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1446 		    i, dmap->dm_segs[i].ds_len,
1447 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1448 
1449 		sge = nsge;
1450 
1451 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1452 		addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32);
1453 		sge->sg_hi_addr = htole32(addr);
1454 		addr = (u_int32_t)dmap->dm_segs[i].ds_addr;
1455 		sge->sg_lo_addr = htole32(addr);
1456 
1457 		DNPRINTF(MPI_D_DMA, "%s:  %d: 0x%08x 0x%08x 0x%08x\n",
1458 		    DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr,
1459 		    sge->sg_lo_addr);
1460 
1461 		nsge = sge + 1;
1462 	}
1463 
1464 	/* terminate list */
1465 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1466 	    MPI_SGE_FL_EOL);
1467 
1468 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1469 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1470 	    BUS_DMASYNC_PREWRITE);
1471 
1472 	return (0);
1473 }
1474 
1475 void
1476 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1477 {
1478 	/* XXX */
1479 	if (bp->b_bcount > MAXPHYS)
1480 		bp->b_bcount = MAXPHYS;
1481 	minphys(bp);
1482 }
1483 
1484 int
1485 mpi_scsi_probe(struct scsi_link *link)
1486 {
1487 	struct mpi_softc		*sc = link->adapter_softc;
1488 	struct mpi_ecfg_hdr		ehdr;
1489 	struct mpi_cfg_sas_dev_pg0	pg0;
1490 	u_int32_t			address;
1491 
1492 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1493 		return (0);
1494 
1495 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1496 
1497 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1498 	    address, &ehdr) != 0)
1499 		return (EIO);
1500 
1501 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1502 		return (0);
1503 
1504 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1505 	    DEVNAME(sc), link->target);
1506 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1507 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1508 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1509 	    letoh64(pg0.sas_addr));
1510 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1511 	    "access_status: 0x%02x\n", DEVNAME(sc),
1512 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1513 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1514 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1515 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1516 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1517 	    letoh32(pg0.device_info));
1518 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1519 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1520 
1521 	if (ISSET(letoh32(pg0.device_info),
1522 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1523 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1524 		    DEVNAME(sc), link->target);
1525 		link->flags |= SDEV_ATAPI;
1526 		link->quirks |= SDEV_ONLYBIG;
1527 	}
1528 
1529 	return (0);
1530 }
1531 
1532 u_int32_t
1533 mpi_read(struct mpi_softc *sc, bus_size_t r)
1534 {
1535 	u_int32_t			rv;
1536 
1537 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1538 	    BUS_SPACE_BARRIER_READ);
1539 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1540 
1541 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1542 
1543 	return (rv);
1544 }
1545 
1546 void
1547 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1548 {
1549 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1550 
1551 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1552 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1553 	    BUS_SPACE_BARRIER_WRITE);
1554 }
1555 
1556 int
1557 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1558     u_int32_t target)
1559 {
1560 	int				i;
1561 
1562 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1563 	    mask, target);
1564 
1565 	for (i = 0; i < 10000; i++) {
1566 		if ((mpi_read(sc, r) & mask) == target)
1567 			return (0);
1568 		delay(1000);
1569 	}
1570 
1571 	return (1);
1572 }
1573 
1574 int
1575 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1576     u_int32_t target)
1577 {
1578 	int				i;
1579 
1580 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1581 	    mask, target);
1582 
1583 	for (i = 0; i < 10000; i++) {
1584 		if ((mpi_read(sc, r) & mask) != target)
1585 			return (0);
1586 		delay(1000);
1587 	}
1588 
1589 	return (1);
1590 }
1591 
1592 int
1593 mpi_init(struct mpi_softc *sc)
1594 {
1595 	u_int32_t			db;
1596 	int				i;
1597 
1598 	/* spin until the IOC leaves the RESET state */
1599 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1600 	    MPI_DOORBELL_STATE_RESET) != 0) {
1601 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1602 		    "reset state\n", DEVNAME(sc));
1603 		return (1);
1604 	}
1605 
1606 	/* check current ownership */
1607 	db = mpi_read_db(sc);
1608 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1609 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1610 		    DEVNAME(sc));
1611 		return (0);
1612 	}
1613 
1614 	for (i = 0; i < 5; i++) {
1615 		switch (db & MPI_DOORBELL_STATE) {
1616 		case MPI_DOORBELL_STATE_READY:
1617 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1618 			    DEVNAME(sc));
1619 			return (0);
1620 
1621 		case MPI_DOORBELL_STATE_OPER:
1622 		case MPI_DOORBELL_STATE_FAULT:
1623 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1624 			    "reset\n" , DEVNAME(sc));
1625 			if (mpi_reset_soft(sc) != 0)
1626 				mpi_reset_hard(sc);
1627 			break;
1628 
1629 		case MPI_DOORBELL_STATE_RESET:
1630 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1631 			    "out of reset\n", DEVNAME(sc));
1632 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1633 			    MPI_DOORBELL_STATE_RESET) != 0)
1634 				return (1);
1635 			break;
1636 		}
1637 		db = mpi_read_db(sc);
1638 	}
1639 
1640 	return (1);
1641 }
1642 
1643 int
1644 mpi_reset_soft(struct mpi_softc *sc)
1645 {
1646 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1647 
1648 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1649 		return (1);
1650 
1651 	mpi_write_db(sc,
1652 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1653 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1654 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1655 		return (1);
1656 
1657 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1658 	    MPI_DOORBELL_STATE_READY) != 0)
1659 		return (1);
1660 
1661 	return (0);
1662 }
1663 
1664 int
1665 mpi_reset_hard(struct mpi_softc *sc)
1666 {
1667 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1668 
1669 	/* enable diagnostic register */
1670 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1671 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1672 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1673 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1674 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1675 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1676 
1677 	/* reset ioc */
1678 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1679 
1680 	delay(10000);
1681 
1682 	/* disable diagnostic register */
1683 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1684 
1685 	/* restore pci bits? */
1686 
1687 	/* firmware bits? */
1688 	return (0);
1689 }
1690 
1691 int
1692 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1693 {
1694 	u_int32_t				*query = buf;
1695 	int					i;
1696 
1697 	/* make sure the doorbell is not in use. */
1698 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1699 		return (1);
1700 
1701 	/* clear pending doorbell interrupts */
1702 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1703 		mpi_write_intr(sc, 0);
1704 
1705 	/*
1706 	 * first write the doorbell with the handshake function and the
1707 	 * dword count.
1708 	 */
1709 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1710 	    MPI_DOORBELL_DWORDS(dwords));
1711 
1712 	/*
1713 	 * the doorbell used bit will be set because a doorbell function has
1714 	 * started. Wait for the interrupt and then ack it.
1715 	 */
1716 	if (mpi_wait_db_int(sc) != 0)
1717 		return (1);
1718 	mpi_write_intr(sc, 0);
1719 
1720 	/* poll for the acknowledgement. */
1721 	if (mpi_wait_db_ack(sc) != 0)
1722 		return (1);
1723 
1724 	/* write the query through the doorbell. */
1725 	for (i = 0; i < dwords; i++) {
1726 		mpi_write_db(sc, htole32(query[i]));
1727 		if (mpi_wait_db_ack(sc) != 0)
1728 			return (1);
1729 	}
1730 
1731 	return (0);
1732 }
1733 
1734 int
1735 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1736 {
1737 	u_int16_t				*words = (u_int16_t *)dword;
1738 	int					i;
1739 
1740 	for (i = 0; i < 2; i++) {
1741 		if (mpi_wait_db_int(sc) != 0)
1742 			return (1);
1743 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1744 		mpi_write_intr(sc, 0);
1745 	}
1746 
1747 	return (0);
1748 }
1749 
1750 int
1751 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1752 {
1753 	struct mpi_msg_reply			*reply = buf;
1754 	u_int32_t				*dbuf = buf, dummy;
1755 	int					i;
1756 
1757 	/* get the first dword so we can read the length out of the header. */
1758 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1759 		return (1);
1760 
1761 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1762 	    DEVNAME(sc), dwords, reply->msg_length);
1763 
1764 	/*
1765 	 * the total length, in dwords, is in the message length field of the
1766 	 * reply header.
1767 	 */
1768 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1769 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1770 			return (1);
1771 	}
1772 
1773 	/* if there's extra stuff to come off the ioc, discard it */
1774 	while (i++ < reply->msg_length) {
1775 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1776 			return (1);
1777 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1778 		    "0x%08x\n", DEVNAME(sc), dummy);
1779 	}
1780 
1781 	/* wait for the doorbell used bit to be reset and clear the intr */
1782 	if (mpi_wait_db_int(sc) != 0)
1783 		return (1);
1784 	mpi_write_intr(sc, 0);
1785 
1786 	return (0);
1787 }
1788 
1789 void
1790 mpi_empty_done(struct mpi_ccb *ccb)
1791 {
1792 	/* nothing to do */
1793 }
1794 
1795 int
1796 mpi_iocfacts(struct mpi_softc *sc)
1797 {
1798 	struct mpi_msg_iocfacts_request		ifq;
1799 	struct mpi_msg_iocfacts_reply		ifp;
1800 
1801 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1802 
1803 	bzero(&ifq, sizeof(ifq));
1804 	bzero(&ifp, sizeof(ifp));
1805 
1806 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1807 	ifq.chain_offset = 0;
1808 	ifq.msg_flags = 0;
1809 	ifq.msg_context = htole32(0xdeadbeef);
1810 
1811 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1812 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1813 		    DEVNAME(sc));
1814 		return (1);
1815 	}
1816 
1817 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1818 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1819 		    DEVNAME(sc));
1820 		return (1);
1821 	}
1822 
1823 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1824 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1825 	    ifp.msg_version_maj, ifp.msg_version_min);
1826 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
1827 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1828 	    ifp.ioc_number, ifp.header_version_maj,
1829 	    ifp.header_version_min);
1830 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
1831 	    letoh32(ifp.msg_context));
1832 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
1833 	    DEVNAME(sc), letoh16(ifp.ioc_status),
1834 	    letoh16(ifp.ioc_exceptions));
1835 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
1836 	    letoh32(ifp.ioc_loginfo));
1837 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
1838 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
1839 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
1840 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
1841 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
1842 	    letoh16(ifp.reply_queue_depth));
1843 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
1844 	    letoh16(ifp.product_id));
1845 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
1846 	    letoh32(ifp.current_host_mfa_hi_addr));
1847 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
1848 	    "global_credits: %d\n",
1849 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
1850 	    letoh16(ifp.global_credits));
1851 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
1852 	    letoh32(ifp.current_sense_buffer_hi_addr));
1853 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
1854 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
1855 	    letoh16(ifp.current_reply_frame_size));
1856 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
1857 	    letoh32(ifp.fw_image_size));
1858 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
1859 	    letoh32(ifp.ioc_capabilities));
1860 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
1861 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
1862 	    ifp.fw_version_maj, ifp.fw_version_min,
1863 	    ifp.fw_version_unit, ifp.fw_version_dev);
1864 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
1865 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
1866 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
1867 	    "addr 0x%08x %08x\n", DEVNAME(sc),
1868 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
1869 	    letoh32(ifp.host_page_buffer_sge.sg_hi_addr),
1870 	    letoh32(ifp.host_page_buffer_sge.sg_lo_addr));
1871 
1872 	sc->sc_maxcmds = letoh16(ifp.global_credits);
1873 	sc->sc_maxchdepth = ifp.max_chain_depth;
1874 	sc->sc_ioc_number = ifp.ioc_number;
1875 	if (sc->sc_flags & MPI_F_SPI)
1876 		sc->sc_buswidth = 16;
1877 	else
1878 		sc->sc_buswidth =
1879 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
1880 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
1881 		sc->sc_fw_len = letoh32(ifp.fw_image_size);
1882 
1883 	/*
1884 	 * you can fit sg elements on the end of the io cmd if they fit in the
1885 	 * request frame size.
1886 	 */
1887 	sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) -
1888 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
1889 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
1890 	    sc->sc_first_sgl_len);
1891 
1892 	sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) /
1893 	    sizeof(struct mpi_sge);
1894 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
1895 	    sc->sc_chain_len);
1896 
1897 	/* the sgl tailing the io cmd loses an entry to the chain element. */
1898 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
1899 	/* the sgl chains lose an entry for each chain element */
1900 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
1901 	    sc->sc_chain_len;
1902 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
1903 	    sc->sc_max_sgl_len);
1904 
1905 	/* XXX we're ignoring the max chain depth */
1906 
1907 	return (0);
1908 }
1909 
1910 int
1911 mpi_iocinit(struct mpi_softc *sc)
1912 {
1913 	struct mpi_msg_iocinit_request		iiq;
1914 	struct mpi_msg_iocinit_reply		iip;
1915 	u_int32_t				hi_addr;
1916 
1917 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
1918 
1919 	bzero(&iiq, sizeof(iiq));
1920 	bzero(&iip, sizeof(iip));
1921 
1922 	iiq.function = MPI_FUNCTION_IOC_INIT;
1923 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
1924 
1925 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
1926 	iiq.max_buses = 1;
1927 
1928 	iiq.msg_context = htole32(0xd00fd00f);
1929 
1930 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
1931 
1932 	hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32);
1933 	iiq.host_mfa_hi_addr = htole32(hi_addr);
1934 	iiq.sense_buffer_hi_addr = htole32(hi_addr);
1935 
1936 	hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_replies) >> 32);
1937 	iiq.reply_fifo_host_signalling_addr = htole32(hi_addr);
1938 
1939 	iiq.msg_version_maj = 0x01;
1940 	iiq.msg_version_min = 0x02;
1941 
1942 	iiq.hdr_version_unit = 0x0d;
1943 	iiq.hdr_version_dev = 0x00;
1944 
1945 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1946 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
1947 		    DEVNAME(sc));
1948 		return (1);
1949 	}
1950 
1951 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1952 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
1953 		    DEVNAME(sc));
1954 		return (1);
1955 	}
1956 
1957 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1958 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1959 	    iip.msg_length, iip.whoinit);
1960 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
1961 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
1962 	    iip.max_buses, iip.max_devices, iip.flags);
1963 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1964 	    letoh32(iip.msg_context));
1965 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1966 	    letoh16(iip.ioc_status));
1967 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1968 	    letoh32(iip.ioc_loginfo));
1969 
1970 	return (0);
1971 }
1972 
1973 int
1974 mpi_portfacts(struct mpi_softc *sc)
1975 {
1976 	struct mpi_ccb				*ccb;
1977 	struct mpi_msg_portfacts_request	*pfq;
1978 	volatile struct mpi_msg_portfacts_reply	*pfp;
1979 	int					s, rv = 1;
1980 
1981 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
1982 
1983 	s = splbio();
1984 	ccb = mpi_get_ccb(sc);
1985 	splx(s);
1986 	if (ccb == NULL) {
1987 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
1988 		    DEVNAME(sc));
1989 		return (rv);
1990 	}
1991 
1992 	ccb->ccb_done = mpi_empty_done;
1993 	pfq = ccb->ccb_cmd;
1994 
1995 	pfq->function = MPI_FUNCTION_PORT_FACTS;
1996 	pfq->chain_offset = 0;
1997 	pfq->msg_flags = 0;
1998 	pfq->port_number = 0;
1999 	pfq->msg_context = htole32(ccb->ccb_id);
2000 
2001 	if (mpi_poll(sc, ccb, 50000) != 0) {
2002 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2003 		goto err;
2004 	}
2005 
2006 	if (ccb->ccb_rcb == NULL) {
2007 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2008 		    DEVNAME(sc));
2009 		goto err;
2010 	}
2011 	pfp = ccb->ccb_rcb->rcb_reply;
2012 
2013 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2014 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2015 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2016 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2017 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2018 	    letoh32(pfp->msg_context));
2019 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2020 	    letoh16(pfp->ioc_status));
2021 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2022 	    letoh32(pfp->ioc_loginfo));
2023 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2024 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2025 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2026 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2027 	    letoh16(pfp->port_scsi_id));
2028 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2029 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2030 	    letoh16(pfp->max_persistent_ids),
2031 	    letoh16(pfp->max_posted_cmd_buffers));
2032 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2033 	    letoh16(pfp->max_lan_buckets));
2034 
2035 	sc->sc_porttype = pfp->port_type;
2036 	if (sc->sc_target == -1)
2037 		sc->sc_target = letoh16(pfp->port_scsi_id);
2038 
2039 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2040 	rv = 0;
2041 err:
2042 	mpi_put_ccb(sc, ccb);
2043 
2044 	return (rv);
2045 }
2046 
2047 int
2048 mpi_eventnotify(struct mpi_softc *sc)
2049 {
2050 	struct mpi_ccb				*ccb;
2051 	struct mpi_msg_event_request		*enq;
2052 	int					s;
2053 
2054 	s = splbio();
2055 	ccb = mpi_get_ccb(sc);
2056 	splx(s);
2057 	if (ccb == NULL) {
2058 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2059 		    DEVNAME(sc));
2060 		return (1);
2061 	}
2062 
2063 	ccb->ccb_done = mpi_eventnotify_done;
2064 	enq = ccb->ccb_cmd;
2065 
2066 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2067 	enq->chain_offset = 0;
2068 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2069 	enq->msg_context = htole32(ccb->ccb_id);
2070 
2071 	mpi_start(sc, ccb);
2072 	return (0);
2073 }
2074 
2075 void
2076 mpi_eventnotify_done(struct mpi_ccb *ccb)
2077 {
2078 	struct mpi_softc			*sc = ccb->ccb_sc;
2079 	struct mpi_msg_event_reply		*enp = ccb->ccb_rcb->rcb_reply;
2080 	int					deferred = 0;
2081 
2082 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2083 
2084 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2085 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2086 	    letoh16(enp->data_length));
2087 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2088 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2089 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2090 	    letoh32(enp->msg_context));
2091 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2092 	    letoh16(enp->ioc_status));
2093 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2094 	    letoh32(enp->ioc_loginfo));
2095 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2096 	    letoh32(enp->event));
2097 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2098 	    letoh32(enp->event_context));
2099 
2100 	switch (letoh32(enp->event)) {
2101 	/* ignore these */
2102 	case MPI_EVENT_EVENT_CHANGE:
2103 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2104 		break;
2105 
2106 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2107 		if (sc->sc_scsibus == NULL)
2108 			break;
2109 
2110 		if (scsi_task(mpi_evt_sas, sc, ccb->ccb_rcb, 0) != 0) {
2111 			printf("%s: unable to run SAS device status change\n",
2112 			    DEVNAME(sc));
2113 			break;
2114 		}
2115 		deferred = 1;
2116 		break;
2117 
2118 	default:
2119 		printf("%s: unhandled event 0x%02x\n", DEVNAME(sc),
2120 		    letoh32(enp->event));
2121 		break;
2122 	}
2123 
2124 	if (!deferred) {
2125 		if (enp->ack_required)
2126 			mpi_eventack(sc, enp);
2127 		mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2128 	}
2129 
2130 	if ((enp->msg_flags & MPI_EVENT_FLAGS_REPLY_KEPT) == 0) {
2131 		/* XXX this shouldnt happen till shutdown */
2132 		mpi_put_ccb(sc, ccb);
2133 	}
2134 }
2135 
2136 void
2137 mpi_evt_sas(void *xsc, void *arg)
2138 {
2139 	struct mpi_softc			*sc = xsc;
2140 	struct mpi_rcb				*rcb = arg;
2141 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2142 	struct mpi_evt_sas_change		*ch;
2143 	u_int8_t				*data;
2144 	int					s;
2145 
2146 	data = rcb->rcb_reply;
2147 	data += sizeof(struct mpi_msg_event_reply);
2148 	ch = (struct mpi_evt_sas_change *)data;
2149 
2150 	if (ch->bus != 0)
2151 		return;
2152 
2153 	switch (ch->reason) {
2154 	case MPI_EVT_SASCH_REASON_ADDED:
2155 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2156 		scsi_probe_target(sc->sc_scsibus, ch->target);
2157 		break;
2158 
2159 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2160 		scsi_detach_target(sc->sc_scsibus, ch->target, DETACH_FORCE);
2161 		break;
2162 
2163 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2164 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2165 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2166 		break;
2167 	default:
2168 		printf("%s: unknown reason for SAS device status change: "
2169 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2170 		break;
2171 	}
2172 
2173 	s = splbio();
2174 	mpi_push_reply(sc, rcb->rcb_reply_dva);
2175 	if (enp->ack_required)
2176 		mpi_eventack(sc, enp);
2177 	splx(s);
2178 }
2179 
2180 void
2181 mpi_eventack(struct mpi_softc *sc, struct mpi_msg_event_reply *enp)
2182 {
2183 	struct mpi_ccb				*ccb;
2184 	struct mpi_msg_eventack_request		*eaq;
2185 
2186 	ccb = mpi_get_ccb(sc);
2187 	if (ccb == NULL) {
2188 		DNPRINTF(MPI_D_EVT, "%s: mpi_eventack ccb_get\n", DEVNAME(sc));
2189 		return;
2190 	}
2191 
2192 	ccb->ccb_done = mpi_eventack_done;
2193 	eaq = ccb->ccb_cmd;
2194 
2195 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2196 	eaq->msg_context = htole32(ccb->ccb_id);
2197 
2198 	eaq->event = enp->event;
2199 	eaq->event_context = enp->event_context;
2200 
2201 	mpi_start(sc, ccb);
2202 	return;
2203 }
2204 
2205 void
2206 mpi_eventack_done(struct mpi_ccb *ccb)
2207 {
2208 	struct mpi_softc			*sc = ccb->ccb_sc;
2209 
2210 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2211 
2212 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2213 	mpi_put_ccb(sc, ccb);
2214 }
2215 
2216 int
2217 mpi_portenable(struct mpi_softc *sc)
2218 {
2219 	struct mpi_ccb				*ccb;
2220 	struct mpi_msg_portenable_request	*peq;
2221 	struct mpi_msg_portenable_repy		*pep;
2222 	int					s;
2223 
2224 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2225 
2226 	s = splbio();
2227 	ccb = mpi_get_ccb(sc);
2228 	splx(s);
2229 	if (ccb == NULL) {
2230 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2231 		    DEVNAME(sc));
2232 		return (1);
2233 	}
2234 
2235 	ccb->ccb_done = mpi_empty_done;
2236 	peq = ccb->ccb_cmd;
2237 
2238 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2239 	peq->port_number = 0;
2240 	peq->msg_context = htole32(ccb->ccb_id);
2241 
2242 	if (mpi_poll(sc, ccb, 50000) != 0) {
2243 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2244 		return (1);
2245 	}
2246 
2247 	if (ccb->ccb_rcb == NULL) {
2248 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2249 		    DEVNAME(sc));
2250 		return (1);
2251 	}
2252 	pep = ccb->ccb_rcb->rcb_reply;
2253 
2254 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2255 	mpi_put_ccb(sc, ccb);
2256 
2257 	return (0);
2258 }
2259 
2260 int
2261 mpi_fwupload(struct mpi_softc *sc)
2262 {
2263 	struct mpi_ccb				*ccb;
2264 	struct {
2265 		struct mpi_msg_fwupload_request		req;
2266 		struct mpi_sge				sge;
2267 	} __packed				*bundle;
2268 	struct mpi_msg_fwupload_reply		*upp;
2269 	u_int64_t				addr;
2270 	int					s;
2271 	int					rv = 0;
2272 
2273 	if (sc->sc_fw_len == 0)
2274 		return (0);
2275 
2276 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2277 
2278 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2279 	if (sc->sc_fw == NULL) {
2280 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2281 		    DEVNAME(sc), sc->sc_fw_len);
2282 		return (1);
2283 	}
2284 
2285 	s = splbio();
2286 	ccb = mpi_get_ccb(sc);
2287 	splx(s);
2288 	if (ccb == NULL) {
2289 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2290 		    DEVNAME(sc));
2291 		goto err;
2292 	}
2293 
2294 	ccb->ccb_done = mpi_empty_done;
2295 	bundle = ccb->ccb_cmd;
2296 
2297 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2298 	bundle->req.msg_context = htole32(ccb->ccb_id);
2299 
2300 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2301 
2302 	bundle->req.tce.details_length = 12;
2303 	bundle->req.tce.image_size = htole32(sc->sc_fw_len);
2304 
2305 	bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2306 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2307 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2308 	addr = MPI_DMA_DVA(sc->sc_fw);
2309 	bundle->sge.sg_hi_addr = htole32((u_int32_t)(addr >> 32));
2310 	bundle->sge.sg_lo_addr = htole32((u_int32_t)addr);
2311 
2312 	if (mpi_poll(sc, ccb, 50000) != 0) {
2313 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2314 		goto err;
2315 	}
2316 
2317 	if (ccb->ccb_rcb == NULL)
2318 		panic("%s: unable to do fw upload\n", DEVNAME(sc));
2319 	upp = ccb->ccb_rcb->rcb_reply;
2320 
2321 	if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2322 		rv = 1;
2323 
2324 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2325 	mpi_put_ccb(sc, ccb);
2326 
2327 	return (rv);
2328 
2329 err:
2330 	mpi_dmamem_free(sc, sc->sc_fw);
2331 	return (1);
2332 }
2333 
2334 void
2335 mpi_get_raid(struct mpi_softc *sc)
2336 {
2337 	struct mpi_cfg_hdr		hdr;
2338 	struct mpi_cfg_ioc_pg2		*vol_page;
2339 	struct mpi_cfg_raid_vol		*vol_list, *vol;
2340 	size_t				pagelen;
2341 	u_int32_t			capabilities;
2342 	struct scsi_link		*link;
2343 	int				i;
2344 
2345 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2346 
2347 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2348 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2349 		    "for IOC page 2\n", DEVNAME(sc));
2350 		return;
2351 	}
2352 
2353 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2354 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2355 	if (vol_page == NULL) {
2356 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2357 		    "space for ioc config page 2\n", DEVNAME(sc));
2358 		return;
2359 	}
2360 	vol_list = (struct mpi_cfg_raid_vol *)(vol_page + 1);
2361 
2362 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2363 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2364 		    "page 2\n", DEVNAME(sc));
2365 		goto out;
2366 	}
2367 
2368 	capabilities = letoh32(vol_page->capabilities);
2369 
2370 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2371 	    letoh32(vol_page->capabilities));
2372 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2373 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2374 	    vol_page->active_vols, vol_page->max_vols,
2375 	    vol_page->active_physdisks, vol_page->max_physdisks);
2376 
2377 	/* don't walk list if there are no RAID capability */
2378 	if (capabilities == 0xdeadbeef) {
2379 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2380 		goto out;
2381 	}
2382 
2383 	if ((capabilities & MPI_CFG_IOC_2_CAPABILITIES_RAID) == 0 ||
2384 	    (vol_page->active_vols == 0))
2385 		goto out;
2386 
2387 	sc->sc_flags |= MPI_F_RAID;
2388 
2389 	for (i = 0; i < vol_page->active_vols; i++) {
2390 		vol = &vol_list[i];
2391 
2392 		DNPRINTF(MPI_D_RAID, "%s:   id: %d bus: %d ioc: %d pg: %d\n",
2393 		    DEVNAME(sc), vol->vol_id, vol->vol_bus, vol->vol_ioc,
2394 		    vol->vol_page);
2395 		DNPRINTF(MPI_D_RAID, "%s:   type: 0x%02x flags: 0x%02x\n",
2396 		    DEVNAME(sc), vol->vol_type, vol->flags);
2397 
2398 		if (vol->vol_ioc != sc->sc_ioc_number || vol->vol_bus != 0)
2399 			continue;
2400 
2401 		link = sc->sc_scsibus->sc_link[vol->vol_id][0];
2402 		if (link == NULL)
2403 			continue;
2404 
2405 		link->flags |= SDEV_VIRTUAL;
2406 	}
2407 
2408 out:
2409 	free(vol_page, M_TEMP);
2410 }
2411 
2412 int
2413 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2414     u_int32_t address, int flags, void *p)
2415 {
2416 	struct mpi_ccb				*ccb;
2417 	struct mpi_msg_config_request		*cq;
2418 	struct mpi_msg_config_reply		*cp;
2419 	struct mpi_cfg_hdr			*hdr = p;
2420 	struct mpi_ecfg_hdr			*ehdr = p;
2421 	int					etype = 0;
2422 	int					rv = 0;
2423 	int					s;
2424 
2425 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2426 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2427 	    address, flags, MPI_PG_FMT);
2428 
2429 	s = splbio();
2430 	ccb = mpi_get_ccb(sc);
2431 	splx(s);
2432 	if (ccb == NULL) {
2433 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2434 		    DEVNAME(sc));
2435 		return (1);
2436 	}
2437 
2438 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2439 		etype = type;
2440 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2441 	}
2442 
2443 	cq = ccb->ccb_cmd;
2444 
2445 	cq->function = MPI_FUNCTION_CONFIG;
2446 	cq->msg_context = htole32(ccb->ccb_id);
2447 
2448 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2449 
2450 	cq->config_header.page_number = number;
2451 	cq->config_header.page_type = type;
2452 	cq->ext_page_type = etype;
2453 	cq->page_address = htole32(address);
2454 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2455 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2456 
2457 	if (ISSET(flags, MPI_PG_POLL)) {
2458 		ccb->ccb_done = mpi_empty_done;
2459 		if (mpi_poll(sc, ccb, 50000) != 0) {
2460 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2461 			    DEVNAME(sc));
2462 			return (1);
2463 		}
2464 	} else {
2465 		ccb->ccb_done = (void (*)(struct mpi_ccb *))wakeup;
2466 		s = splbio();
2467 		mpi_start(sc, ccb);
2468 		while (ccb->ccb_state != MPI_CCB_READY)
2469 			tsleep(ccb, PRIBIO, "mpipghdr", 0);
2470 		splx(s);
2471 	}
2472 
2473 	if (ccb->ccb_rcb == NULL)
2474 		panic("%s: unable to fetch config header\n", DEVNAME(sc));
2475 	cp = ccb->ccb_rcb->rcb_reply;
2476 
2477 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2478 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2479 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2480 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2481 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2482 	    cp->msg_flags);
2483 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2484 	    letoh32(cp->msg_context));
2485 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2486 	    letoh16(cp->ioc_status));
2487 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2488 	    letoh32(cp->ioc_loginfo));
2489 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2490 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2491 	    cp->config_header.page_version,
2492 	    cp->config_header.page_length,
2493 	    cp->config_header.page_number,
2494 	    cp->config_header.page_type);
2495 
2496 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2497 		rv = 1;
2498 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2499 		bzero(ehdr, sizeof(*ehdr));
2500 		ehdr->page_version = cp->config_header.page_version;
2501 		ehdr->page_number = cp->config_header.page_number;
2502 		ehdr->page_type = cp->config_header.page_type;
2503 		ehdr->ext_page_length = cp->ext_page_length;
2504 		ehdr->ext_page_type = cp->ext_page_type;
2505 	} else
2506 		*hdr = cp->config_header;
2507 
2508 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2509 	mpi_put_ccb(sc, ccb);
2510 
2511 	return (rv);
2512 }
2513 
2514 int
2515 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2516     void *p, int read, void *page, size_t len)
2517 {
2518 	struct mpi_ccb				*ccb;
2519 	struct mpi_msg_config_request		*cq;
2520 	struct mpi_msg_config_reply		*cp;
2521 	struct mpi_cfg_hdr			*hdr = p;
2522 	struct mpi_ecfg_hdr			*ehdr = p;
2523 	u_int64_t				dva;
2524 	char					*kva;
2525 	int					page_length;
2526 	int					rv = 0;
2527 	int					s;
2528 
2529 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2530 	    DEVNAME(sc), address, read, hdr->page_type);
2531 
2532 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2533 	    letoh16(ehdr->ext_page_length) : hdr->page_length;
2534 
2535 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2536 	    len < page_length * 4)
2537 		return (1);
2538 
2539 	s = splbio();
2540 	ccb = mpi_get_ccb(sc);
2541 	splx(s);
2542 	if (ccb == NULL) {
2543 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2544 		return (1);
2545 	}
2546 
2547 	cq = ccb->ccb_cmd;
2548 
2549 	cq->function = MPI_FUNCTION_CONFIG;
2550 	cq->msg_context = htole32(ccb->ccb_id);
2551 
2552 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2553 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2554 
2555 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2556 		cq->config_header.page_version = ehdr->page_version;
2557 		cq->config_header.page_number = ehdr->page_number;
2558 		cq->config_header.page_type = ehdr->page_type;
2559 		cq->ext_page_len = ehdr->ext_page_length;
2560 		cq->ext_page_type = ehdr->ext_page_type;
2561 	} else
2562 		cq->config_header = *hdr;
2563 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2564 	cq->page_address = htole32(address);
2565 	cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2566 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2567 	    (page_length * 4) |
2568 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2569 
2570 	/* bounce the page via the request space to avoid more bus_dma games */
2571 	dva = ccb->ccb_cmd_dva + sizeof(struct mpi_msg_config_request);
2572 
2573 	cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32));
2574 	cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva);
2575 
2576 	kva = ccb->ccb_cmd;
2577 	kva += sizeof(struct mpi_msg_config_request);
2578 	if (!read)
2579 		bcopy(page, kva, len);
2580 
2581 	if (ISSET(flags, MPI_PG_POLL)) {
2582 		ccb->ccb_done = mpi_empty_done;
2583 		if (mpi_poll(sc, ccb, 50000) != 0) {
2584 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2585 			    DEVNAME(sc));
2586 			return (1);
2587 		}
2588 	} else {
2589 		ccb->ccb_done = (void (*)(struct mpi_ccb *))wakeup;
2590 		s = splbio();
2591 		mpi_start(sc, ccb);
2592 		while (ccb->ccb_state != MPI_CCB_READY)
2593 			tsleep(ccb, PRIBIO, "mpipghdr", 0);
2594 		splx(s);
2595 	}
2596 
2597 	if (ccb->ccb_rcb == NULL) {
2598 		mpi_put_ccb(sc, ccb);
2599 		return (1);
2600 	}
2601 	cp = ccb->ccb_rcb->rcb_reply;
2602 
2603 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2604 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2605 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2606 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2607 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2608 	    cp->msg_flags);
2609 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2610 	    letoh32(cp->msg_context));
2611 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2612 	    letoh16(cp->ioc_status));
2613 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2614 	    letoh32(cp->ioc_loginfo));
2615 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2616 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2617 	    cp->config_header.page_version,
2618 	    cp->config_header.page_length,
2619 	    cp->config_header.page_number,
2620 	    cp->config_header.page_type);
2621 
2622 	if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2623 		rv = 1;
2624 	else if (read)
2625 		bcopy(kva, page, len);
2626 
2627 	mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2628 	mpi_put_ccb(sc, ccb);
2629 
2630 	return (rv);
2631 }
2632 
2633 int
2634 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag,
2635     struct proc *p)
2636 {
2637 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2638 
2639 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2640 
2641 	if (sc->sc_ioctl)
2642 		return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2643 	else
2644 		return (ENOTTY);
2645 }
2646 
2647 #if NBIO > 0
2648 int
2649 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
2650 {
2651 	int			len, rv = EINVAL;
2652 	u_int32_t		address;
2653 	struct mpi_cfg_hdr	hdr;
2654 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2655 
2656 	/* get IOC page 2 */
2657 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
2658 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
2659 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
2660 		    "fetch IOC page 2\n", DEVNAME(sc));
2661 		goto done;
2662 	}
2663 
2664 	/* XXX return something else than EINVAL to indicate within hs range */
2665 	if (id > sc->sc_vol_page->active_vols) {
2666 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
2667 		    "id: %d\n", DEVNAME(sc), id);
2668 		goto done;
2669 	}
2670 
2671 	/* replace current buffer with new one */
2672 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
2673 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
2674 	rpg0 = malloc(len, M_TEMP, M_WAITOK | M_CANFAIL);
2675 	if (rpg0 == NULL) {
2676 		printf("%s: can't get memory for RAID page 0, "
2677 		    "bio disabled\n", DEVNAME(sc));
2678 		goto done;
2679 	}
2680 	if (sc->sc_rpg0)
2681 		free(sc->sc_rpg0, M_DEVBUF);
2682 	sc->sc_rpg0 = rpg0;
2683 
2684 	/* get raid vol page 0 */
2685 	address = sc->sc_vol_list[id].vol_id |
2686 	    (sc->sc_vol_list[id].vol_bus << 8);
2687 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
2688 	    address, &hdr) != 0)
2689 		goto done;
2690 	if (mpi_cfg_page(sc, address, &hdr, 1, rpg0, len)) {
2691 		printf("%s: can't get RAID vol cfg page 0\n", DEVNAME(sc));
2692 		goto done;
2693 	}
2694 
2695 	rv = 0;
2696 done:
2697 	return (rv);
2698 }
2699 
2700 int
2701 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2702 {
2703 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
2704 	int error = 0;
2705 
2706 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
2707 
2708 	/* make sure we have bio enabled */
2709 	if (sc->sc_ioctl != mpi_ioctl)
2710 		return (EINVAL);
2711 
2712 	rw_enter_write(&sc->sc_lock);
2713 
2714 	switch (cmd) {
2715 	case BIOCINQ:
2716 		DNPRINTF(MPI_D_IOCTL, "inq\n");
2717 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
2718 		break;
2719 
2720 	case BIOCVOL:
2721 		DNPRINTF(MPI_D_IOCTL, "vol\n");
2722 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
2723 		break;
2724 
2725 	case BIOCDISK:
2726 		DNPRINTF(MPI_D_IOCTL, "disk\n");
2727 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
2728 		break;
2729 
2730 	case BIOCALARM:
2731 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
2732 		break;
2733 
2734 	case BIOCBLINK:
2735 		DNPRINTF(MPI_D_IOCTL, "blink\n");
2736 		break;
2737 
2738 	case BIOCSETSTATE:
2739 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
2740 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2741 		break;
2742 
2743 	default:
2744 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
2745 		error = EINVAL;
2746 	}
2747 
2748 	rw_exit_write(&sc->sc_lock);
2749 
2750 	return (error);
2751 }
2752 
2753 int
2754 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
2755 {
2756 	if (!(sc->sc_flags & MPI_F_RAID)) {
2757 		bi->bi_novol = 0;
2758 		bi->bi_nodisk = 0;
2759 	}
2760 
2761 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
2762 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
2763 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
2764 		    "page 2\n", DEVNAME(sc));
2765 		return (EINVAL);
2766 	}
2767 
2768 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
2769 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2770 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
2771 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
2772 
2773 	bi->bi_novol = sc->sc_vol_page->active_vols;
2774 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
2775 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2776 
2777 	return (0);
2778 }
2779 
2780 int
2781 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
2782 {
2783 	int			i, vol, id, rv = EINVAL;
2784 	struct device		*dev;
2785 	struct scsi_link	*link;
2786 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2787 
2788 	id = bv->bv_volid;
2789 	if (mpi_bio_get_pg0_raid(sc, id))
2790 		goto done;
2791 
2792 	if (id > sc->sc_vol_page->active_vols)
2793 		return (EINVAL); /* XXX deal with hot spares */
2794 
2795 	rpg0 = sc->sc_rpg0;
2796 	if (rpg0 == NULL)
2797 		goto done;
2798 
2799 	/* determine status */
2800 	switch (rpg0->volume_state) {
2801 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
2802 		bv->bv_status = BIOC_SVONLINE;
2803 		break;
2804 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
2805 		bv->bv_status = BIOC_SVDEGRADED;
2806 		break;
2807 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
2808 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
2809 		bv->bv_status = BIOC_SVOFFLINE;
2810 		break;
2811 	default:
2812 		bv->bv_status = BIOC_SVINVALID;
2813 	}
2814 
2815 	/* override status if scrubbing or something */
2816 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
2817 		bv->bv_status = BIOC_SVREBUILD;
2818 
2819 	bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512;
2820 
2821 	switch (sc->sc_vol_list[id].vol_type) {
2822 	case MPI_CFG_RAID_TYPE_RAID_IS:
2823 		bv->bv_level = 0;
2824 		break;
2825 	case MPI_CFG_RAID_TYPE_RAID_IME:
2826 	case MPI_CFG_RAID_TYPE_RAID_IM:
2827 		bv->bv_level = 1;
2828 		break;
2829 	case MPI_CFG_RAID_TYPE_RAID_5:
2830 		bv->bv_level = 5;
2831 		break;
2832 	case MPI_CFG_RAID_TYPE_RAID_6:
2833 		bv->bv_level = 6;
2834 		break;
2835 	case MPI_CFG_RAID_TYPE_RAID_10:
2836 		bv->bv_level = 10;
2837 		break;
2838 	case MPI_CFG_RAID_TYPE_RAID_50:
2839 		bv->bv_level = 50;
2840 		break;
2841 	default:
2842 		bv->bv_level = -1;
2843 	}
2844 
2845 	bv->bv_nodisk = rpg0->num_phys_disks;
2846 
2847 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
2848 		link = sc->sc_scsibus->sc_link[i][0];
2849 		if (link == NULL)
2850 			continue;
2851 
2852 		/* skip if not a virtual disk */
2853 		if (!(link->flags & SDEV_VIRTUAL))
2854 			continue;
2855 
2856 		vol++;
2857 		/* are we it? */
2858 		if (vol == bv->bv_volid) {
2859 			dev = link->device_softc;
2860 			memcpy(bv->bv_vendor, link->inqdata.vendor,
2861 			    sizeof bv->bv_vendor);
2862 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
2863 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
2864 			break;
2865 		}
2866 	}
2867 	rv = 0;
2868 done:
2869 	return (rv);
2870 }
2871 
2872 int
2873 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
2874 {
2875 	int			pdid, id, rv = EINVAL;
2876 	u_int32_t		address;
2877 	struct mpi_cfg_hdr	hdr;
2878 	struct mpi_cfg_raid_vol_pg0 *rpg0;
2879 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
2880 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
2881 
2882 	id = bd->bd_volid;
2883 	if (mpi_bio_get_pg0_raid(sc, id))
2884 		goto done;
2885 
2886 	if (id > sc->sc_vol_page->active_vols)
2887 		return (EINVAL); /* XXX deal with hot spares */
2888 
2889 	rpg0 = sc->sc_rpg0;
2890 	if (rpg0 == NULL)
2891 		goto done;
2892 
2893 	pdid = bd->bd_diskid;
2894 	if (pdid > rpg0->num_phys_disks)
2895 		goto done;
2896 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
2897 	physdisk += pdid;
2898 
2899 	/* get raid phys disk page 0 */
2900 	address = physdisk->phys_disk_num;
2901 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
2902 	    &hdr) != 0)
2903 		goto done;
2904 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
2905 		bd->bd_status = BIOC_SDFAILED;
2906 		return (0);
2907 	}
2908 	bd->bd_channel = pdpg0.phys_disk_bus;
2909 	bd->bd_target = pdpg0.phys_disk_id;
2910 	bd->bd_lun = 0;
2911 	bd->bd_size = (u_quad_t)pdpg0.max_lba * 512;
2912 	strlcpy(bd->bd_vendor, pdpg0.vendor_id, sizeof(bd->bd_vendor));
2913 
2914 	switch (pdpg0.phys_disk_state) {
2915 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
2916 		bd->bd_status = BIOC_SDONLINE;
2917 		break;
2918 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
2919 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
2920 		bd->bd_status = BIOC_SDFAILED;
2921 		break;
2922 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
2923 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
2924 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
2925 		bd->bd_status = BIOC_SDOFFLINE;
2926 		break;
2927 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
2928 		bd->bd_status = BIOC_SDSCRUB;
2929 		break;
2930 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
2931 	default:
2932 		bd->bd_status = BIOC_SDINVALID;
2933 		break;
2934 	}
2935 
2936 	/* XXX figure this out */
2937 	/* bd_serial[32]; */
2938 	/* bd_procdev[16]; */
2939 
2940 	rv = 0;
2941 done:
2942 	return (rv);
2943 }
2944 
2945 int
2946 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
2947 {
2948 	return (ENOTTY);
2949 }
2950 
2951 #ifndef SMALL_KERNEL
2952 int
2953 mpi_create_sensors(struct mpi_softc *sc)
2954 {
2955 	struct device		*dev;
2956 	struct scsi_link	*link;
2957 	int			i, vol;
2958 
2959 	/* count volumes */
2960 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
2961 		link = sc->sc_scsibus->sc_link[i][0];
2962 		if (link == NULL)
2963 			continue;
2964 		/* skip if not a virtual disk */
2965 		if (!(link->flags & SDEV_VIRTUAL))
2966 			continue;
2967 
2968 		vol++;
2969 	}
2970 
2971 	sc->sc_sensors = malloc(sizeof(struct ksensor) * vol,
2972 	    M_DEVBUF, M_WAITOK|M_ZERO);
2973 	if (sc->sc_sensors == NULL)
2974 		return (1);
2975 
2976 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2977 	    sizeof(sc->sc_sensordev.xname));
2978 
2979 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
2980 		link = sc->sc_scsibus->sc_link[i][0];
2981 		if (link == NULL)
2982 			continue;
2983 		/* skip if not a virtual disk */
2984 		if (!(link->flags & SDEV_VIRTUAL))
2985 			continue;
2986 
2987 		dev = link->device_softc;
2988 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
2989 		    sizeof(sc->sc_sensors[vol].desc));
2990 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
2991 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
2992 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
2993 
2994 		vol++;
2995 	}
2996 
2997 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
2998 		goto bad;
2999 
3000 	sensordev_install(&sc->sc_sensordev);
3001 
3002 	return (0);
3003 
3004 bad:
3005 	free(sc->sc_sensors, M_DEVBUF);
3006 	return (1);
3007 }
3008 
3009 void
3010 mpi_refresh_sensors(void *arg)
3011 {
3012 	int			i, vol;
3013 	struct scsi_link	*link;
3014 	struct mpi_softc	*sc = arg;
3015 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3016 
3017 	rw_enter_write(&sc->sc_lock);
3018 
3019 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3020 		link = sc->sc_scsibus->sc_link[i][0];
3021 		if (link == NULL)
3022 			continue;
3023 		/* skip if not a virtual disk */
3024 		if (!(link->flags & SDEV_VIRTUAL))
3025 			continue;
3026 
3027 		if (mpi_bio_get_pg0_raid(sc, vol))
3028 			continue;
3029 
3030 		rpg0 = sc->sc_rpg0;
3031 		if (rpg0 == NULL)
3032 			goto done;
3033 
3034 		/* determine status */
3035 		switch (rpg0->volume_state) {
3036 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3037 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3038 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3039 			break;
3040 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3041 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3042 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3043 			break;
3044 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3045 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3046 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3047 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3048 			break;
3049 		default:
3050 			sc->sc_sensors[vol].value = 0; /* unknown */
3051 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3052 		}
3053 
3054 		/* override status if scrubbing or something */
3055 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3056 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3057 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3058 		}
3059 
3060 		vol++;
3061 	}
3062 done:
3063 	rw_exit_write(&sc->sc_lock);
3064 }
3065 #endif /* SMALL_KERNEL */
3066 #endif /* NBIO > 0 */
3067