xref: /netbsd-src/sys/dev/i2o/iop.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: iop.c,v 1.91 2021/04/24 23:36:55 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Support for I2O IOPs (intelligent I/O processors).
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.91 2021/04/24 23:36:55 thorpej Exp $");
38 
39 #include "iop.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54 
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59 
60 #include "ioconf.h"
61 #include "locators.h"
62 
63 #define POLL(ms, cond)				\
64 do {						\
65 	int xi;					\
66 	for (xi = (ms) * 10; xi; xi--) {	\
67 		if (cond)			\
68 			break;			\
69 		DELAY(100);			\
70 	}					\
71 } while (/* CONSTCOND */0);
72 
73 #ifdef I2ODEBUG
74 #define DPRINTF(x)	printf x
75 #else
76 #define	DPRINTF(x)
77 #endif
78 
79 #define IOP_ICTXHASH_NBUCKETS	16
80 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
81 
82 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
83 
84 #define	IOP_TCTX_SHIFT	12
85 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
86 
87 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
88 static u_long	iop_ictxhash;
89 static void	*iop_sdh;
90 static struct	i2o_systab *iop_systab;
91 static int	iop_systab_size;
92 
93 dev_type_open(iopopen);
94 dev_type_close(iopclose);
95 dev_type_ioctl(iopioctl);
96 
97 const struct cdevsw iop_cdevsw = {
98 	.d_open = iopopen,
99 	.d_close = iopclose,
100 	.d_read = noread,
101 	.d_write = nowrite,
102 	.d_ioctl = iopioctl,
103 	.d_stop = nostop,
104 	.d_tty = notty,
105 	.d_poll = nopoll,
106 	.d_mmap = nommap,
107 	.d_kqfilter = nokqfilter,
108 	.d_discard = nodiscard,
109 	.d_flag = D_OTHER,
110 };
111 
112 #define	IC_CONFIGURE	0x01
113 #define	IC_PRIORITY	0x02
114 
115 static struct iop_class {
116 	u_short	ic_class;
117 	u_short	ic_flags;
118 	const char *ic_caption;
119 } const iop_class[] = {
120 	{
121 		I2O_CLASS_EXECUTIVE,
122 		0,
123 		"executive"
124 	},
125 	{
126 		I2O_CLASS_DDM,
127 		0,
128 		"device driver module"
129 	},
130 	{
131 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 		IC_CONFIGURE | IC_PRIORITY,
133 		"random block storage"
134 	},
135 	{
136 		I2O_CLASS_SEQUENTIAL_STORAGE,
137 		IC_CONFIGURE | IC_PRIORITY,
138 		"sequential storage"
139 	},
140 	{
141 		I2O_CLASS_LAN,
142 		IC_CONFIGURE | IC_PRIORITY,
143 		"LAN port"
144 	},
145 	{
146 		I2O_CLASS_WAN,
147 		IC_CONFIGURE | IC_PRIORITY,
148 		"WAN port"
149 	},
150 	{
151 		I2O_CLASS_FIBRE_CHANNEL_PORT,
152 		IC_CONFIGURE,
153 		"fibrechannel port"
154 	},
155 	{
156 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 		0,
158 		"fibrechannel peripheral"
159 	},
160  	{
161  		I2O_CLASS_SCSI_PERIPHERAL,
162  		0,
163  		"SCSI peripheral"
164  	},
165 	{
166 		I2O_CLASS_ATE_PORT,
167 		IC_CONFIGURE,
168 		"ATE port"
169 	},
170 	{
171 		I2O_CLASS_ATE_PERIPHERAL,
172 		0,
173 		"ATE peripheral"
174 	},
175 	{
176 		I2O_CLASS_FLOPPY_CONTROLLER,
177 		IC_CONFIGURE,
178 		"floppy controller"
179 	},
180 	{
181 		I2O_CLASS_FLOPPY_DEVICE,
182 		0,
183 		"floppy device"
184 	},
185 	{
186 		I2O_CLASS_BUS_ADAPTER_PORT,
187 		IC_CONFIGURE,
188 		"bus adapter port"
189 	},
190 };
191 
192 #ifdef I2ODEBUG
193 static const char * const iop_status[] = {
194 	"success",
195 	"abort (dirty)",
196 	"abort (no data transfer)",
197 	"abort (partial transfer)",
198 	"error (dirty)",
199 	"error (no data transfer)",
200 	"error (partial transfer)",
201 	"undefined error code",
202 	"process abort (dirty)",
203 	"process abort (no data transfer)",
204 	"process abort (partial transfer)",
205 	"transaction error",
206 };
207 #endif
208 
209 static inline u_int32_t	iop_inl(struct iop_softc *, int);
210 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
211 
212 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
213 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
214 
215 static void	iop_config_interrupts(device_t);
216 static void	iop_configure_devices(struct iop_softc *, int, int);
217 static void	iop_devinfo(int, char *, size_t);
218 static int	iop_print(void *, const char *);
219 static void	iop_shutdown(void *);
220 
221 static void	iop_adjqparam(struct iop_softc *, int);
222 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int	iop_hrt_get(struct iop_softc *);
224 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void	iop_intr_event(device_t, struct iop_msg *, void *);
226 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 			     u_int32_t);
228 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int	iop_ofifo_init(struct iop_softc *);
231 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
232 				struct proc *);
233 static void	iop_reconf_thread(void *);
234 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int	iop_reset(struct iop_softc *);
236 static int	iop_sys_enable(struct iop_softc *);
237 static int	iop_systab_set(struct iop_softc *);
238 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239 
240 #ifdef I2ODEBUG
241 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243 
244 static inline u_int32_t
245 iop_inl(struct iop_softc *sc, int off)
246 {
247 
248 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
249 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
250 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
251 }
252 
253 static inline void
254 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
255 {
256 
257 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
258 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 	    BUS_SPACE_BARRIER_WRITE);
260 }
261 
262 static inline u_int32_t
263 iop_inl_msg(struct iop_softc *sc, int off)
264 {
265 
266 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
267 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
268 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
269 }
270 
271 static inline void
272 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
273 {
274 
275 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
276 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 	    BUS_SPACE_BARRIER_WRITE);
278 }
279 
280 /*
281  * Initialise the IOP and our interface.
282  */
283 void
284 iop_init(struct iop_softc *sc, const char *intrstr)
285 {
286 	struct iop_msg *im;
287 	int rv, i, j, state, nsegs;
288 	u_int32_t mask;
289 	char ident[64];
290 
291 	state = 0;
292 
293 	printf("I2O adapter");
294 
295 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
296 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
297 	cv_init(&sc->sc_confcv, "iopconf");
298 
299 	if (iop_ictxhashtbl == NULL) {
300 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
301 		    true, &iop_ictxhash);
302 	}
303 
304 	/* Disable interrupts at the IOP. */
305 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
306 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
307 
308 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
309 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
310 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
311 		aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
312 		return;
313 	}
314 
315 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
316 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
317 		aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
318 		goto bail_out;
319 	}
320 	state++;
321 
322 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
323 	    &sc->sc_scr, 0)) {
324 		aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
325 		goto bail_out;
326 	}
327 	state++;
328 
329 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
330 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
331 		aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
332 		goto bail_out;
333 	}
334 	state++;
335 
336 #ifdef I2ODEBUG
337 	/* So that our debug checks don't choke. */
338 	sc->sc_framesize = 128;
339 #endif
340 
341 	/* Avoid syncing the reply map until it's set up. */
342 	sc->sc_curib = 0x123;
343 
344 	/* Reset the adapter and request status. */
345  	if ((rv = iop_reset(sc)) != 0) {
346  		aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
347 		goto bail_out;
348  	}
349 
350  	if ((rv = iop_status_get(sc, 1)) != 0) {
351 		aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
352 		goto bail_out;
353  	}
354 
355 	sc->sc_flags |= IOP_HAVESTATUS;
356 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
357 	    ident, sizeof(ident));
358 	printf(" <%s>\n", ident);
359 
360 #ifdef I2ODEBUG
361 	printf("%s: orgid=0x%04x version=%d\n",
362 	    device_xname(sc->sc_dev),
363 	    le16toh(sc->sc_status.orgid),
364 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
365 	printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
366 	printf("%s: mem  %04x %04x %08x\n", device_xname(sc->sc_dev),
367 	    le32toh(sc->sc_status.desiredprivmemsize),
368 	    le32toh(sc->sc_status.currentprivmemsize),
369 	    le32toh(sc->sc_status.currentprivmembase));
370 	printf("%s: i/o  %04x %04x %08x\n", device_xname(sc->sc_dev),
371 	    le32toh(sc->sc_status.desiredpriviosize),
372 	    le32toh(sc->sc_status.currentpriviosize),
373 	    le32toh(sc->sc_status.currentpriviobase));
374 #endif
375 
376 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
377 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
378 		sc->sc_maxob = IOP_MAX_OUTBOUND;
379 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
380 	if (sc->sc_maxib > IOP_MAX_INBOUND)
381 		sc->sc_maxib = IOP_MAX_INBOUND;
382 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
383 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
384 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
385 
386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
387 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
388 		aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
389 		    sc->sc_framesize);
390 		goto bail_out;
391 	}
392 #endif
393 
394 	/* Allocate message wrappers. */
395 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_WAITOK|M_ZERO);
396 	state++;
397 	sc->sc_ims = im;
398 	SLIST_INIT(&sc->sc_im_freelist);
399 
400 	for (i = 0; i < sc->sc_maxib; i++, im++) {
401 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
402 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
403 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
404 		    &im->im_xfer[0].ix_map);
405 		if (rv != 0) {
406 			aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
407 			goto bail_out3;
408 		}
409 
410 		im->im_tctx = i;
411 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
412 		cv_init(&im->im_cv, "iopmsg");
413 	}
414 
415 	/* Initialise the IOP's outbound FIFO. */
416 	if (iop_ofifo_init(sc) != 0) {
417 		aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
418 		goto bail_out3;
419 	}
420 
421 	/*
422  	 * Defer further configuration until (a) interrupts are working and
423  	 * (b) we have enough information to build the system table.
424  	 */
425 	config_interrupts(sc->sc_dev, iop_config_interrupts);
426 
427 	/* Configure shutdown hook before we start any device activity. */
428 	if (iop_sdh == NULL)
429 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
430 
431 	/* Ensure interrupts are enabled at the IOP. */
432 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
433 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
434 
435 	if (intrstr != NULL)
436 		printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
437 		    intrstr);
438 
439 #ifdef I2ODEBUG
440 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
441 	    device_xname(sc->sc_dev), sc->sc_maxib,
442 	    le32toh(sc->sc_status.maxinboundmframes),
443 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
444 #endif
445 
446 	return;
447 
448  bail_out3:
449  	if (state > 3) {
450 		for (j = 0; j < i; j++)
451 			bus_dmamap_destroy(sc->sc_dmat,
452 			    sc->sc_ims[j].im_xfer[0].ix_map);
453 		free(sc->sc_ims, M_DEVBUF);
454 	}
455  bail_out:
456 	if (state > 2)
457 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
458 	if (state > 1)
459 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
460 	if (state > 0)
461 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
462 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
463 }
464 
465 /*
466  * Perform autoconfiguration tasks.
467  */
468 static void
469 iop_config_interrupts(device_t self)
470 {
471 	struct iop_attach_args ia;
472 	struct iop_softc *sc, *iop;
473 	struct i2o_systab_entry *ste;
474 	int rv, i, niop;
475 	int locs[IOPCF_NLOCS];
476 
477 	sc = device_private(self);
478 	mutex_enter(&sc->sc_conflock);
479 
480 	LIST_INIT(&sc->sc_iilist);
481 
482 	printf("%s: configuring...\n", device_xname(sc->sc_dev));
483 
484 	if (iop_hrt_get(sc) != 0) {
485 		printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
486 		mutex_exit(&sc->sc_conflock);
487 		return;
488 	}
489 
490 	/*
491  	 * Build the system table.
492  	 */
493 	if (iop_systab == NULL) {
494 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
495 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
496 				continue;
497 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
498 				continue;
499 			if (iop_status_get(iop, 1) != 0) {
500 				aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
501 				iop->sc_flags &= ~IOP_HAVESTATUS;
502 				continue;
503 			}
504 			niop++;
505 		}
506 		if (niop == 0) {
507 			mutex_exit(&sc->sc_conflock);
508 			return;
509 		}
510 
511 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
512 		    sizeof(struct i2o_systab);
513 		iop_systab_size = i;
514 		iop_systab = malloc(i, M_DEVBUF, M_WAITOK|M_ZERO);
515 		iop_systab->numentries = niop;
516 		iop_systab->version = I2O_VERSION_11;
517 
518 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
519 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
520 				continue;
521 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
522 				continue;
523 
524 			ste->orgid = iop->sc_status.orgid;
525 			ste->iopid = device_unit(iop->sc_dev) + 2;
526 			ste->segnumber =
527 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
528 			ste->iopcaps = iop->sc_status.iopcaps;
529 			ste->inboundmsgframesize =
530 			    iop->sc_status.inboundmframesize;
531 			ste->inboundmsgportaddresslow =
532 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
533 			ste++;
534 		}
535 	}
536 
537 	/*
538 	 * Post the system table to the IOP and bring it to the OPERATIONAL
539 	 * state.
540 	 */
541 	if (iop_systab_set(sc) != 0) {
542 		aprint_error_dev(sc->sc_dev, "unable to set system table\n");
543 		mutex_exit(&sc->sc_conflock);
544 		return;
545 	}
546 	if (iop_sys_enable(sc) != 0) {
547 		aprint_error_dev(sc->sc_dev, "unable to enable system\n");
548 		mutex_exit(&sc->sc_conflock);
549 		return;
550 	}
551 
552 	/*
553 	 * Set up an event handler for this IOP.
554 	 */
555 	sc->sc_eventii.ii_dv = self;
556 	sc->sc_eventii.ii_intr = iop_intr_event;
557 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
558 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
559 	iop_initiator_register(sc, &sc->sc_eventii);
560 
561 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
562 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
563 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
564 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
565 	    I2O_EVENT_EXEC_POWER_FAIL |
566 	    I2O_EVENT_EXEC_RESET_PENDING |
567 	    I2O_EVENT_EXEC_RESET_IMMINENT |
568 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
569 	    I2O_EVENT_EXEC_XCT_CHANGE |
570 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
571 	    I2O_EVENT_GEN_DEVICE_RESET |
572 	    I2O_EVENT_GEN_STATE_CHANGE |
573 	    I2O_EVENT_GEN_GENERAL_WARNING);
574 	if (rv != 0) {
575 		aprint_error_dev(sc->sc_dev, "unable to register for events");
576 		mutex_exit(&sc->sc_conflock);
577 		return;
578 	}
579 
580 	/*
581 	 * Attempt to match and attach a product-specific extension.
582 	 */
583 	ia.ia_class = I2O_CLASS_ANY;
584 	ia.ia_tid = I2O_TID_IOP;
585 	locs[IOPCF_TID] = I2O_TID_IOP;
586 	config_found(self, &ia, iop_print,
587 	    CFARG_SUBMATCH, config_stdsubmatch,
588 	    CFARG_LOCATORS, locs,
589 	    CFARG_EOL);
590 
591 	/*
592 	 * Start device configuration.
593 	 */
594 	if ((rv = iop_reconfigure(sc, 0)) == -1)
595 		aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
596 
597 
598 	sc->sc_flags |= IOP_ONLINE;
599 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
600 	    &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
601 	mutex_exit(&sc->sc_conflock);
602  	if (rv != 0) {
603 		aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
604  		return;
605  	}
606 }
607 
608 /*
609  * Reconfiguration thread; listens for LCT change notification, and
610  * initiates re-configuration if received.
611  */
612 static void
613 iop_reconf_thread(void *cookie)
614 {
615 	struct iop_softc *sc;
616 	struct i2o_lct lct;
617 	u_int32_t chgind;
618 	int rv;
619 
620 	sc = cookie;
621 	chgind = sc->sc_chgind + 1;
622 
623 	for (;;) {
624 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
625 		    device_xname(sc->sc_dev), chgind));
626 
627 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
628 
629 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
630 		    device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
631 
632 		mutex_enter(&sc->sc_conflock);
633 		if (rv == 0) {
634 			iop_reconfigure(sc, le32toh(lct.changeindicator));
635 			chgind = sc->sc_chgind + 1;
636 		}
637 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
638 		mutex_exit(&sc->sc_conflock);
639 	}
640 }
641 
642 /*
643  * Reconfigure: find new and removed devices.
644  */
645 int
646 iop_reconfigure(struct iop_softc *sc, u_int chgind)
647 {
648 	struct iop_msg *im;
649 	struct i2o_hba_bus_scan mf;
650 	struct i2o_lct_entry *le;
651 	struct iop_initiator *ii, *nextii;
652 	int rv, tid, i;
653 
654 	KASSERT(mutex_owned(&sc->sc_conflock));
655 
656 	/*
657 	 * If the reconfiguration request isn't the result of LCT change
658 	 * notification, then be more thorough: ask all bus ports to scan
659 	 * their busses.  Wait up to 5 minutes for each bus port to complete
660 	 * the request.
661 	 */
662 	if (chgind == 0) {
663 		if ((rv = iop_lct_get(sc)) != 0) {
664 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
665 			return (rv);
666 		}
667 
668 		le = sc->sc_lct->entry;
669 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
670 			if ((le16toh(le->classid) & 4095) !=
671 			    I2O_CLASS_BUS_ADAPTER_PORT)
672 				continue;
673 			tid = le16toh(le->localtid) & 4095;
674 
675 			im = iop_msg_alloc(sc, IM_WAIT);
676 
677 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
678 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
679 			mf.msgictx = IOP_ICTX;
680 			mf.msgtctx = im->im_tctx;
681 
682 			DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
683 			    tid));
684 
685 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
686 			iop_msg_free(sc, im);
687 #ifdef I2ODEBUG
688 			if (rv != 0)
689 				aprint_error_dev(sc->sc_dev, "bus scan failed\n");
690 #endif
691 		}
692 	} else if (chgind <= sc->sc_chgind) {
693 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
694 		return (0);
695 	}
696 
697 	/* Re-read the LCT and determine if it has changed. */
698 	if ((rv = iop_lct_get(sc)) != 0) {
699 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
700 		return (rv);
701 	}
702 	DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
703 
704 	chgind = le32toh(sc->sc_lct->changeindicator);
705 	if (chgind == sc->sc_chgind) {
706 		DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
707 		return (0);
708 	}
709 	DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
710 	sc->sc_chgind = chgind;
711 
712 	if (sc->sc_tidmap != NULL)
713 		free(sc->sc_tidmap, M_DEVBUF);
714 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
715 	    M_DEVBUF, M_WAITOK|M_ZERO);
716 
717 	/* Allow 1 queued command per device while we're configuring. */
718 	iop_adjqparam(sc, 1);
719 
720 	/*
721 	 * Match and attach child devices.  We configure high-level devices
722 	 * first so that any claims will propagate throughout the LCT,
723 	 * hopefully masking off aliased devices as a result.
724 	 *
725 	 * Re-reading the LCT at this point is a little dangerous, but we'll
726 	 * trust the IOP (and the operator) to behave itself...
727 	 */
728 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
729 	    IC_CONFIGURE | IC_PRIORITY);
730 	if ((rv = iop_lct_get(sc)) != 0) {
731 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
732 	}
733 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
734 	    IC_CONFIGURE);
735 
736 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
737 		nextii = LIST_NEXT(ii, ii_list);
738 
739 		/* Detach devices that were configured, but are now gone. */
740 		for (i = 0; i < sc->sc_nlctent; i++)
741 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
742 				break;
743 		if (i == sc->sc_nlctent ||
744 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
745 			config_detach(ii->ii_dv, DETACH_FORCE);
746 			continue;
747 		}
748 
749 		/*
750 		 * Tell initiators that existed before the re-configuration
751 		 * to re-configure.
752 		 */
753 		if (ii->ii_reconfig == NULL)
754 			continue;
755 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
756 			aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
757 			    device_xname(ii->ii_dv), rv);
758 	}
759 
760 	/* Re-adjust queue parameters and return. */
761 	if (sc->sc_nii != 0)
762 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
763 		    / sc->sc_nii);
764 
765 	return (0);
766 }
767 
768 /*
769  * Configure I2O devices into the system.
770  */
771 static void
772 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
773 {
774 	struct iop_attach_args ia;
775 	struct iop_initiator *ii;
776 	const struct i2o_lct_entry *le;
777 	device_t dv;
778 	int i, j, nent;
779 	u_int usertid;
780 	int locs[IOPCF_NLOCS];
781 
782 	nent = sc->sc_nlctent;
783 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
784 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
785 
786 		/* Ignore the device if it's in use. */
787 		usertid = le32toh(le->usertid) & 4095;
788 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
789 			continue;
790 
791 		ia.ia_class = le16toh(le->classid) & 4095;
792 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
793 
794 		/* Ignore uninteresting devices. */
795 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
796 			if (iop_class[j].ic_class == ia.ia_class)
797 				break;
798 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
799 		    (iop_class[j].ic_flags & mask) != maskval)
800 			continue;
801 
802 		/*
803 		 * Try to configure the device only if it's not already
804 		 * configured.
805  		 */
806  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
807  			if (ia.ia_tid == ii->ii_tid) {
808 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
809 				strcpy(sc->sc_tidmap[i].it_dvname,
810 				    device_xname(ii->ii_dv));
811  				break;
812 			}
813 		}
814 		if (ii != NULL)
815 			continue;
816 
817 		locs[IOPCF_TID] = ia.ia_tid;
818 
819 		dv = config_found(sc->sc_dev, &ia, iop_print,
820 		    CFARG_SUBMATCH, config_stdsubmatch,
821 		    CFARG_LOCATORS, locs,
822 		    CFARG_EOL);
823 		if (dv != NULL) {
824  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
825 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
826 		}
827 	}
828 }
829 
830 /*
831  * Adjust queue parameters for all child devices.
832  */
833 static void
834 iop_adjqparam(struct iop_softc *sc, int mpi)
835 {
836 	struct iop_initiator *ii;
837 
838 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
839 		if (ii->ii_adjqparam != NULL)
840 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
841 }
842 
843 static void
844 iop_devinfo(int class, char *devinfo, size_t l)
845 {
846 	int i;
847 
848 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
849 		if (class == iop_class[i].ic_class)
850 			break;
851 
852 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
853 		snprintf(devinfo, l, "device (class 0x%x)", class);
854 	else
855 		strlcpy(devinfo, iop_class[i].ic_caption, l);
856 }
857 
858 static int
859 iop_print(void *aux, const char *pnp)
860 {
861 	struct iop_attach_args *ia;
862 	char devinfo[256];
863 
864 	ia = aux;
865 
866 	if (pnp != NULL) {
867 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
868 		aprint_normal("%s at %s", devinfo, pnp);
869 	}
870 	aprint_normal(" tid %d", ia->ia_tid);
871 	return (UNCONF);
872 }
873 
874 /*
875  * Shut down all configured IOPs.
876  */
877 static void
878 iop_shutdown(void *junk)
879 {
880 	struct iop_softc *sc;
881 	int i;
882 
883 	printf("shutting down iop devices...");
884 
885 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
886 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
887 			continue;
888 		if ((sc->sc_flags & IOP_ONLINE) == 0)
889 			continue;
890 
891 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
892 		    0, 5000);
893 
894 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
895 			/*
896 			 * Some AMI firmware revisions will go to sleep and
897 			 * never come back after this.
898 			 */
899 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
900 			    IOP_ICTX, 0, 1000);
901 		}
902 	}
903 
904 	/* Wait.  Some boards could still be flushing, stupidly enough. */
905 	delay(5000*1000);
906 	printf(" done\n");
907 }
908 
909 /*
910  * Retrieve IOP status.
911  */
912 int
913 iop_status_get(struct iop_softc *sc, int nosleep)
914 {
915 	struct i2o_exec_status_get mf;
916 	struct i2o_status *st;
917 	paddr_t pa;
918 	int rv, i;
919 
920 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
921 	st = (struct i2o_status *)sc->sc_scr;
922 
923 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
924 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
925 	mf.reserved[0] = 0;
926 	mf.reserved[1] = 0;
927 	mf.reserved[2] = 0;
928 	mf.reserved[3] = 0;
929 	mf.addrlow = (u_int32_t)pa;
930 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
931 	mf.length = sizeof(sc->sc_status);
932 
933 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
934 	    BUS_DMASYNC_PREWRITE);
935 	memset(st, 0, sizeof(*st));
936 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
937 	    BUS_DMASYNC_POSTWRITE);
938 
939 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
940 		return (rv);
941 
942 	for (i = 100; i != 0; i--) {
943 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
944 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
945 		if (st->syncbyte == 0xff)
946 			break;
947 		if (nosleep)
948 			DELAY(100*1000);
949 		else
950 			kpause("iopstat", false, hz / 10, NULL);
951 	}
952 
953 	if (st->syncbyte != 0xff) {
954 		aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
955 		rv = EIO;
956 	} else {
957 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
958 		rv = 0;
959 	}
960 
961 	return (rv);
962 }
963 
964 /*
965  * Initialize and populate the IOP's outbound FIFO.
966  */
967 static int
968 iop_ofifo_init(struct iop_softc *sc)
969 {
970 	bus_addr_t addr;
971 	bus_dma_segment_t seg;
972 	struct i2o_exec_outbound_init *mf;
973 	int i, rseg, rv;
974 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
975 
976 	sw = (u_int32_t *)sc->sc_scr;
977 
978 	mf = (struct i2o_exec_outbound_init *)mb;
979 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
980 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
981 	mf->msgictx = IOP_ICTX;
982 	mf->msgtctx = 0;
983 	mf->pagesize = PAGE_SIZE;
984 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
985 
986 	/*
987 	 * The I2O spec says that there are two SGLs: one for the status
988 	 * word, and one for a list of discarded MFAs.  It continues to say
989 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
990 	 * necessary; this isn't the case (and is in fact a bad thing).
991 	 */
992 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
993 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
994 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
995 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
996 	mb[0] += 2 << 16;
997 
998 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
999 	    BUS_DMASYNC_POSTWRITE);
1000 	*sw = 0;
1001 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1002 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1003 
1004 	if ((rv = iop_post(sc, mb)) != 0)
1005 		return (rv);
1006 
1007 	POLL(5000,
1008 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1009 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1010 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1011 
1012 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1013 		aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1014 		    le32toh(*sw));
1015 		return (EIO);
1016 	}
1017 
1018 	/* Allocate DMA safe memory for the reply frames. */
1019 	if (sc->sc_rep_phys == 0) {
1020 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1021 
1022 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1023 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1024 		if (rv != 0) {
1025 			aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1026 			   rv);
1027 			return (rv);
1028 		}
1029 
1030 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1031 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1032 		if (rv != 0) {
1033 			aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1034 			return (rv);
1035 		}
1036 
1037 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1038 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1039 		if (rv != 0) {
1040 			aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1041 			return (rv);
1042 		}
1043 
1044 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1045 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1046 		if (rv != 0) {
1047 			aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1048 			return (rv);
1049 		}
1050 
1051 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1052 
1053 		/* Now safe to sync the reply map. */
1054 		sc->sc_curib = 0;
1055 	}
1056 
1057 	/* Populate the outbound FIFO. */
1058 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1059 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1060 		addr += sc->sc_framesize;
1061 	}
1062 
1063 	return (0);
1064 }
1065 
1066 /*
1067  * Read the specified number of bytes from the IOP's hardware resource table.
1068  */
1069 static int
1070 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1071 {
1072 	struct iop_msg *im;
1073 	int rv;
1074 	struct i2o_exec_hrt_get *mf;
1075 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1076 
1077 	im = iop_msg_alloc(sc, IM_WAIT);
1078 	mf = (struct i2o_exec_hrt_get *)mb;
1079 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1080 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1081 	mf->msgictx = IOP_ICTX;
1082 	mf->msgtctx = im->im_tctx;
1083 
1084 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1085 	rv = iop_msg_post(sc, im, mb, 30000);
1086 	iop_msg_unmap(sc, im);
1087 	iop_msg_free(sc, im);
1088 	return (rv);
1089 }
1090 
1091 /*
1092  * Read the IOP's hardware resource table.
1093  */
1094 static int
1095 iop_hrt_get(struct iop_softc *sc)
1096 {
1097 	struct i2o_hrt hrthdr, *hrt;
1098 	int size, rv;
1099 
1100 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1101 	if (rv != 0)
1102 		return (rv);
1103 
1104 	DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1105 	    le16toh(hrthdr.numentries)));
1106 
1107 	size = sizeof(struct i2o_hrt) +
1108 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1109 	hrt = malloc(size, M_DEVBUF, M_WAITOK);
1110 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1111 		free(hrt, M_DEVBUF);
1112 		return (rv);
1113 	}
1114 
1115 	if (sc->sc_hrt != NULL)
1116 		free(sc->sc_hrt, M_DEVBUF);
1117 	sc->sc_hrt = hrt;
1118 	return (0);
1119 }
1120 
1121 /*
1122  * Request the specified number of bytes from the IOP's logical
1123  * configuration table.  If a change indicator is specified, this
1124  * is a verbatim notification request, so the caller is prepared
1125  * to wait indefinitely.
1126  */
1127 static int
1128 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1129 	     u_int32_t chgind)
1130 {
1131 	struct iop_msg *im;
1132 	struct i2o_exec_lct_notify *mf;
1133 	int rv;
1134 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1135 
1136 	im = iop_msg_alloc(sc, IM_WAIT);
1137 	memset(lct, 0, size);
1138 
1139 	mf = (struct i2o_exec_lct_notify *)mb;
1140 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1141 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1142 	mf->msgictx = IOP_ICTX;
1143 	mf->msgtctx = im->im_tctx;
1144 	mf->classid = I2O_CLASS_ANY;
1145 	mf->changeindicator = chgind;
1146 
1147 #ifdef I2ODEBUG
1148 	printf("iop_lct_get0: reading LCT");
1149 	if (chgind != 0)
1150 		printf(" (async)");
1151 	printf("\n");
1152 #endif
1153 
1154 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1155 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1156 	iop_msg_unmap(sc, im);
1157 	iop_msg_free(sc, im);
1158 	return (rv);
1159 }
1160 
1161 /*
1162  * Read the IOP's logical configuration table.
1163  */
1164 int
1165 iop_lct_get(struct iop_softc *sc)
1166 {
1167 	int esize, size, rv;
1168 	struct i2o_lct *lct;
1169 
1170 	esize = le32toh(sc->sc_status.expectedlctsize);
1171 	lct = malloc(esize, M_DEVBUF, M_WAITOK);
1172 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1173 		free(lct, M_DEVBUF);
1174 		return (rv);
1175 	}
1176 
1177 	size = le16toh(lct->tablesize) << 2;
1178 	if (esize != size) {
1179 		free(lct, M_DEVBUF);
1180 		lct = malloc(size, M_DEVBUF, M_WAITOK);
1181 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1182 			free(lct, M_DEVBUF);
1183 			return (rv);
1184 		}
1185 	}
1186 
1187 	/* Swap in the new LCT. */
1188 	if (sc->sc_lct != NULL)
1189 		free(sc->sc_lct, M_DEVBUF);
1190 	sc->sc_lct = lct;
1191 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1192 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1193 	    sizeof(struct i2o_lct_entry);
1194 	return (0);
1195 }
1196 
1197 /*
1198  * Post a SYS_ENABLE message to the adapter.
1199  */
1200 int
1201 iop_sys_enable(struct iop_softc *sc)
1202 {
1203 	struct iop_msg *im;
1204 	struct i2o_msg mf;
1205 	int rv;
1206 
1207 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1208 
1209 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1210 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1211 	mf.msgictx = IOP_ICTX;
1212 	mf.msgtctx = im->im_tctx;
1213 
1214 	rv = iop_msg_post(sc, im, &mf, 30000);
1215 	if (rv == 0) {
1216 		if ((im->im_flags & IM_FAIL) != 0)
1217 			rv = ENXIO;
1218 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1219 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1220 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1221 			rv = 0;
1222 		else
1223 			rv = EIO;
1224 	}
1225 
1226 	iop_msg_free(sc, im);
1227 	return (rv);
1228 }
1229 
1230 /*
1231  * Request the specified parameter group from the target.  If an initiator
1232  * is specified (a) don't wait for the operation to complete, but instead
1233  * let the initiator's interrupt handler deal with the reply and (b) place a
1234  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1235  */
1236 int
1237 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1238 		  int size, struct iop_initiator *ii)
1239 {
1240 	struct iop_msg *im;
1241 	struct i2o_util_params_op *mf;
1242 	int rv;
1243 	struct iop_pgop *pgop;
1244 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1245 
1246 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1247 	pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK);
1248 	im->im_dvcontext = pgop;
1249 
1250 	mf = (struct i2o_util_params_op *)mb;
1251 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1252 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1253 	mf->msgictx = IOP_ICTX;
1254 	mf->msgtctx = im->im_tctx;
1255 	mf->flags = 0;
1256 
1257 	pgop->olh.count = htole16(1);
1258 	pgop->olh.reserved = htole16(0);
1259 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1260 	pgop->oat.fieldcount = htole16(0xffff);
1261 	pgop->oat.group = htole16(group);
1262 
1263 	memset(buf, 0, size);
1264 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1265 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1266 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1267 
1268 	/* Detect errors; let partial transfers to count as success. */
1269 	if (ii == NULL && rv == 0) {
1270 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1271 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1272 			rv = 0;
1273 		else
1274 			rv = (im->im_reqstatus != 0 ? EIO : 0);
1275 
1276 		if (rv != 0)
1277 			printf("%s: FIELD_GET failed for tid %d group %d\n",
1278 			    device_xname(sc->sc_dev), tid, group);
1279 	}
1280 
1281 	if (ii == NULL || rv != 0) {
1282 		iop_msg_unmap(sc, im);
1283 		iop_msg_free(sc, im);
1284 		free(pgop, M_DEVBUF);
1285 	}
1286 
1287 	return (rv);
1288 }
1289 
1290 /*
1291  * Set a single field in a scalar parameter group.
1292  */
1293 int
1294 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1295 	      int size, int field)
1296 {
1297 	struct iop_msg *im;
1298 	struct i2o_util_params_op *mf;
1299 	struct iop_pgop *pgop;
1300 	int rv, totsize;
1301 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1302 
1303 	totsize = sizeof(*pgop) + size;
1304 
1305 	im = iop_msg_alloc(sc, IM_WAIT);
1306 	pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1307 	mf = (struct i2o_util_params_op *)mb;
1308 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1309 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1310 	mf->msgictx = IOP_ICTX;
1311 	mf->msgtctx = im->im_tctx;
1312 	mf->flags = 0;
1313 
1314 	pgop->olh.count = htole16(1);
1315 	pgop->olh.reserved = htole16(0);
1316 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1317 	pgop->oat.fieldcount = htole16(1);
1318 	pgop->oat.group = htole16(group);
1319 	pgop->oat.fields[0] = htole16(field);
1320 	memcpy(pgop + 1, buf, size);
1321 
1322 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1323 	rv = iop_msg_post(sc, im, mb, 30000);
1324 	if (rv != 0)
1325 		aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1326 		    tid, group);
1327 
1328 	iop_msg_unmap(sc, im);
1329 	iop_msg_free(sc, im);
1330 	free(pgop, M_DEVBUF);
1331 	return (rv);
1332 }
1333 
1334 /*
1335  * Delete all rows in a tablular parameter group.
1336  */
1337 int
1338 iop_table_clear(struct iop_softc *sc, int tid, int group)
1339 {
1340 	struct iop_msg *im;
1341 	struct i2o_util_params_op *mf;
1342 	struct iop_pgop pgop;
1343 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1344 	int rv;
1345 
1346 	im = iop_msg_alloc(sc, IM_WAIT);
1347 
1348 	mf = (struct i2o_util_params_op *)mb;
1349 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1350 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1351 	mf->msgictx = IOP_ICTX;
1352 	mf->msgtctx = im->im_tctx;
1353 	mf->flags = 0;
1354 
1355 	pgop.olh.count = htole16(1);
1356 	pgop.olh.reserved = htole16(0);
1357 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1358 	pgop.oat.fieldcount = htole16(0);
1359 	pgop.oat.group = htole16(group);
1360 	pgop.oat.fields[0] = htole16(0);
1361 
1362 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1363 	rv = iop_msg_post(sc, im, mb, 30000);
1364 	if (rv != 0)
1365 		aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1366 		    tid, group);
1367 
1368 	iop_msg_unmap(sc, im);
1369 	iop_msg_free(sc, im);
1370 	return (rv);
1371 }
1372 
1373 /*
1374  * Add a single row to a tabular parameter group.  The row can have only one
1375  * field.
1376  */
1377 int
1378 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1379 		  int size, int row)
1380 {
1381 	struct iop_msg *im;
1382 	struct i2o_util_params_op *mf;
1383 	struct iop_pgop *pgop;
1384 	int rv, totsize;
1385 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1386 
1387 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1388 
1389 	im = iop_msg_alloc(sc, IM_WAIT);
1390 	pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1391 	mf = (struct i2o_util_params_op *)mb;
1392 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1393 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1394 	mf->msgictx = IOP_ICTX;
1395 	mf->msgtctx = im->im_tctx;
1396 	mf->flags = 0;
1397 
1398 	pgop->olh.count = htole16(1);
1399 	pgop->olh.reserved = htole16(0);
1400 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1401 	pgop->oat.fieldcount = htole16(1);
1402 	pgop->oat.group = htole16(group);
1403 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
1404 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
1405 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
1406 	memcpy(&pgop->oat.fields[3], buf, size);
1407 
1408 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1409 	rv = iop_msg_post(sc, im, mb, 30000);
1410 	if (rv != 0)
1411 		aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1412 		    tid, group, row);
1413 
1414 	iop_msg_unmap(sc, im);
1415 	iop_msg_free(sc, im);
1416 	free(pgop, M_DEVBUF);
1417 	return (rv);
1418 }
1419 
1420 /*
1421  * Execute a simple command (no parameters).
1422  */
1423 int
1424 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1425 	       int async, int timo)
1426 {
1427 	struct iop_msg *im;
1428 	struct i2o_msg mf;
1429 	int rv, fl;
1430 
1431 	fl = (async != 0 ? IM_WAIT : IM_POLL);
1432 	im = iop_msg_alloc(sc, fl);
1433 
1434 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1435 	mf.msgfunc = I2O_MSGFUNC(tid, function);
1436 	mf.msgictx = ictx;
1437 	mf.msgtctx = im->im_tctx;
1438 
1439 	rv = iop_msg_post(sc, im, &mf, timo);
1440 	iop_msg_free(sc, im);
1441 	return (rv);
1442 }
1443 
1444 /*
1445  * Post the system table to the IOP.
1446  */
1447 static int
1448 iop_systab_set(struct iop_softc *sc)
1449 {
1450 	struct i2o_exec_sys_tab_set *mf;
1451 	struct iop_msg *im;
1452 	bus_space_handle_t bsh;
1453 	bus_addr_t boo;
1454 	u_int32_t mema[2], ioa[2];
1455 	int rv;
1456 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1457 
1458 	im = iop_msg_alloc(sc, IM_WAIT);
1459 
1460 	mf = (struct i2o_exec_sys_tab_set *)mb;
1461 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1462 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1463 	mf->msgictx = IOP_ICTX;
1464 	mf->msgtctx = im->im_tctx;
1465 	mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1466 	mf->segnumber = 0;
1467 
1468 	mema[1] = sc->sc_status.desiredprivmemsize;
1469 	ioa[1] = sc->sc_status.desiredpriviosize;
1470 
1471 	if (mema[1] != 0) {
1472 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1473 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1474 		mema[0] = htole32(boo);
1475 		if (rv != 0) {
1476 			aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1477 			mema[0] = 0;
1478 			mema[1] = 0;
1479 		}
1480 	}
1481 
1482 	if (ioa[1] != 0) {
1483 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1484 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1485 		ioa[0] = htole32(boo);
1486 		if (rv != 0) {
1487 			aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1488 			ioa[0] = 0;
1489 			ioa[1] = 0;
1490 		}
1491 	}
1492 
1493 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1494 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1495 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1496 	rv = iop_msg_post(sc, im, mb, 5000);
1497 	iop_msg_unmap(sc, im);
1498 	iop_msg_free(sc, im);
1499 	return (rv);
1500 }
1501 
1502 /*
1503  * Reset the IOP.  Must be called with interrupts disabled.
1504  */
1505 static int
1506 iop_reset(struct iop_softc *sc)
1507 {
1508 	u_int32_t mfa, *sw;
1509 	struct i2o_exec_iop_reset mf;
1510 	int rv;
1511 	paddr_t pa;
1512 
1513 	sw = (u_int32_t *)sc->sc_scr;
1514 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1515 
1516 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1517 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1518 	mf.reserved[0] = 0;
1519 	mf.reserved[1] = 0;
1520 	mf.reserved[2] = 0;
1521 	mf.reserved[3] = 0;
1522 	mf.statuslow = (u_int32_t)pa;
1523 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1524 
1525 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1526 	    BUS_DMASYNC_POSTWRITE);
1527 	*sw = htole32(0);
1528 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1529 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1530 
1531 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1532 		return (rv);
1533 
1534 	POLL(2500,
1535 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1536 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1537 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1538 		aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1539 		    le32toh(*sw));
1540 		return (EIO);
1541 	}
1542 
1543 	/*
1544 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
1545 	 * the inbound queue to become responsive.
1546 	 */
1547 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1548 	if (mfa == IOP_MFA_EMPTY) {
1549 		aprint_error_dev(sc->sc_dev, "reset failed\n");
1550 		return (EIO);
1551 	}
1552 
1553 	iop_release_mfa(sc, mfa);
1554 	return (0);
1555 }
1556 
1557 /*
1558  * Register a new initiator.  Must be called with the configuration lock
1559  * held.
1560  */
1561 void
1562 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1563 {
1564 	static int ictxgen;
1565 
1566 	/* 0 is reserved (by us) for system messages. */
1567 	ii->ii_ictx = ++ictxgen;
1568 
1569 	/*
1570 	 * `Utility initiators' don't make it onto the per-IOP initiator list
1571 	 * (which is used only for configuration), but do get one slot on
1572 	 * the inbound queue.
1573 	 */
1574 	if ((ii->ii_flags & II_UTILITY) == 0) {
1575 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1576 		sc->sc_nii++;
1577 	} else
1578 		sc->sc_nuii++;
1579 
1580 	cv_init(&ii->ii_cv, "iopevt");
1581 
1582 	mutex_spin_enter(&sc->sc_intrlock);
1583 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1584 	mutex_spin_exit(&sc->sc_intrlock);
1585 }
1586 
1587 /*
1588  * Unregister an initiator.  Must be called with the configuration lock
1589  * held.
1590  */
1591 void
1592 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1593 {
1594 
1595 	if ((ii->ii_flags & II_UTILITY) == 0) {
1596 		LIST_REMOVE(ii, ii_list);
1597 		sc->sc_nii--;
1598 	} else
1599 		sc->sc_nuii--;
1600 
1601 	mutex_spin_enter(&sc->sc_intrlock);
1602 	LIST_REMOVE(ii, ii_hash);
1603 	mutex_spin_exit(&sc->sc_intrlock);
1604 
1605 	cv_destroy(&ii->ii_cv);
1606 }
1607 
1608 /*
1609  * Handle a reply frame from the IOP.
1610  */
1611 static int
1612 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1613 {
1614 	struct iop_msg *im;
1615 	struct i2o_reply *rb;
1616 	struct i2o_fault_notify *fn;
1617 	struct iop_initiator *ii;
1618 	u_int off, ictx, tctx, status, size;
1619 
1620 	KASSERT(mutex_owned(&sc->sc_intrlock));
1621 
1622 	off = (int)(rmfa - sc->sc_rep_phys);
1623 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1624 
1625 	/* Perform reply queue DMA synchronisation. */
1626 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1627 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1628 
1629 #ifdef I2ODEBUG
1630 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1631 		panic("iop_handle_reply: 64-bit reply");
1632 #endif
1633 	/*
1634 	 * Find the initiator.
1635 	 */
1636 	ictx = le32toh(rb->msgictx);
1637 	if (ictx == IOP_ICTX)
1638 		ii = NULL;
1639 	else {
1640 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1641 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1642 			if (ii->ii_ictx == ictx)
1643 				break;
1644 		if (ii == NULL) {
1645 #ifdef I2ODEBUG
1646 			iop_reply_print(sc, rb);
1647 #endif
1648 			aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1649 			    ictx);
1650 			return (-1);
1651 		}
1652 	}
1653 
1654 	/*
1655 	 * If we received a transport failure notice, we've got to dig the
1656 	 * transaction context (if any) out of the original message frame,
1657 	 * and then release the original MFA back to the inbound FIFO.
1658 	 */
1659 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1660 		status = I2O_STATUS_SUCCESS;
1661 
1662 		fn = (struct i2o_fault_notify *)rb;
1663 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1664 		iop_release_mfa(sc, fn->lowmfa);
1665 		iop_tfn_print(sc, fn);
1666 	} else {
1667 		status = rb->reqstatus;
1668 		tctx = le32toh(rb->msgtctx);
1669 	}
1670 
1671 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1672 		/*
1673 		 * This initiator tracks state using message wrappers.
1674 		 *
1675 		 * Find the originating message wrapper, and if requested
1676 		 * notify the initiator.
1677 		 */
1678 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1679 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1680 		    (im->im_flags & IM_ALLOCED) == 0 ||
1681 		    tctx != im->im_tctx) {
1682 			aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1683 			if (im != NULL)
1684 				aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1685 				    im->im_flags, im->im_tctx);
1686 #ifdef I2ODEBUG
1687 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1688 				iop_reply_print(sc, rb);
1689 #endif
1690 			return (-1);
1691 		}
1692 
1693 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1694 			im->im_flags |= IM_FAIL;
1695 
1696 #ifdef I2ODEBUG
1697 		if ((im->im_flags & IM_REPLIED) != 0)
1698 			panic("%s: dup reply", device_xname(sc->sc_dev));
1699 #endif
1700 		im->im_flags |= IM_REPLIED;
1701 
1702 #ifdef I2ODEBUG
1703 		if (status != I2O_STATUS_SUCCESS)
1704 			iop_reply_print(sc, rb);
1705 #endif
1706 		im->im_reqstatus = status;
1707 		im->im_detstatus = le16toh(rb->detail);
1708 
1709 		/* Copy the reply frame, if requested. */
1710 		if (im->im_rb != NULL) {
1711 			size = (le32toh(rb->msgflags) >> 14) & ~3;
1712 #ifdef I2ODEBUG
1713 			if (size > sc->sc_framesize)
1714 				panic("iop_handle_reply: reply too large");
1715 #endif
1716 			memcpy(im->im_rb, rb, size);
1717 		}
1718 
1719 		/* Notify the initiator. */
1720 		if ((im->im_flags & IM_WAIT) != 0)
1721 			cv_broadcast(&im->im_cv);
1722 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1723 			if (ii != NULL) {
1724 				mutex_spin_exit(&sc->sc_intrlock);
1725 				(*ii->ii_intr)(ii->ii_dv, im, rb);
1726 				mutex_spin_enter(&sc->sc_intrlock);
1727 			}
1728 		}
1729 	} else {
1730 		/*
1731 		 * This initiator discards message wrappers.
1732 		 *
1733 		 * Simply pass the reply frame to the initiator.
1734 		 */
1735 		if (ii != NULL) {
1736 			mutex_spin_exit(&sc->sc_intrlock);
1737 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
1738 			mutex_spin_enter(&sc->sc_intrlock);
1739 		}
1740 	}
1741 
1742 	return (status);
1743 }
1744 
1745 /*
1746  * Handle an interrupt from the IOP.
1747  */
1748 int
1749 iop_intr(void *arg)
1750 {
1751 	struct iop_softc *sc;
1752 	u_int32_t rmfa;
1753 
1754 	sc = arg;
1755 
1756 	mutex_spin_enter(&sc->sc_intrlock);
1757 
1758 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1759 		mutex_spin_exit(&sc->sc_intrlock);
1760 		return (0);
1761 	}
1762 
1763 	for (;;) {
1764 		/* Double read to account for IOP bug. */
1765 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1766 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
1767 			if (rmfa == IOP_MFA_EMPTY)
1768 				break;
1769 		}
1770 		iop_handle_reply(sc, rmfa);
1771 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
1772 	}
1773 
1774 	mutex_spin_exit(&sc->sc_intrlock);
1775 	return (1);
1776 }
1777 
1778 /*
1779  * Handle an event signalled by the executive.
1780  */
1781 static void
1782 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1783 {
1784 	struct i2o_util_event_register_reply *rb;
1785 	u_int event;
1786 
1787 	rb = reply;
1788 
1789 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1790 		return;
1791 
1792 	event = le32toh(rb->event);
1793 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
1794 }
1795 
1796 /*
1797  * Allocate a message wrapper.
1798  */
1799 struct iop_msg *
1800 iop_msg_alloc(struct iop_softc *sc, int flags)
1801 {
1802 	struct iop_msg *im;
1803 	static u_int tctxgen;
1804 	int i;
1805 
1806 #ifdef I2ODEBUG
1807 	if ((flags & IM_SYSMASK) != 0)
1808 		panic("iop_msg_alloc: system flags specified");
1809 #endif
1810 
1811 	mutex_spin_enter(&sc->sc_intrlock);
1812 	im = SLIST_FIRST(&sc->sc_im_freelist);
1813 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1814 	if (im == NULL)
1815 		panic("iop_msg_alloc: no free wrappers");
1816 #endif
1817 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1818 	mutex_spin_exit(&sc->sc_intrlock);
1819 
1820 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1821 	tctxgen += (1 << IOP_TCTX_SHIFT);
1822 	im->im_flags = flags | IM_ALLOCED;
1823 	im->im_rb = NULL;
1824 	i = 0;
1825 	do {
1826 		im->im_xfer[i++].ix_size = 0;
1827 	} while (i < IOP_MAX_MSG_XFERS);
1828 
1829 	return (im);
1830 }
1831 
1832 /*
1833  * Free a message wrapper.
1834  */
1835 void
1836 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1837 {
1838 
1839 #ifdef I2ODEBUG
1840 	if ((im->im_flags & IM_ALLOCED) == 0)
1841 		panic("iop_msg_free: wrapper not allocated");
1842 #endif
1843 
1844 	im->im_flags = 0;
1845 	mutex_spin_enter(&sc->sc_intrlock);
1846 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1847 	mutex_spin_exit(&sc->sc_intrlock);
1848 }
1849 
1850 /*
1851  * Map a data transfer.  Write a scatter-gather list into the message frame.
1852  */
1853 int
1854 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1855 	    void *xferaddr, int xfersize, int out, struct proc *up)
1856 {
1857 	bus_dmamap_t dm;
1858 	bus_dma_segment_t *ds;
1859 	struct iop_xfer *ix;
1860 	u_int rv, i, nsegs, flg, off, xn;
1861 	u_int32_t *p;
1862 
1863 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1864 		if (ix->ix_size == 0)
1865 			break;
1866 
1867 #ifdef I2ODEBUG
1868 	if (xfersize == 0)
1869 		panic("iop_msg_map: null transfer");
1870 	if (xfersize > IOP_MAX_XFER)
1871 		panic("iop_msg_map: transfer too large");
1872 	if (xn == IOP_MAX_MSG_XFERS)
1873 		panic("iop_msg_map: too many xfers");
1874 #endif
1875 
1876 	/*
1877 	 * Only the first DMA map is static.
1878 	 */
1879 	if (xn != 0) {
1880 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1881 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1882 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1883 		if (rv != 0)
1884 			return (rv);
1885 	}
1886 
1887 	dm = ix->ix_map;
1888 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1889 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
1890 	if (rv != 0)
1891 		goto bad;
1892 
1893 	/*
1894 	 * How many SIMPLE SG elements can we fit in this message?
1895 	 */
1896 	off = mb[0] >> 16;
1897 	p = mb + off;
1898 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1899 
1900 	if (dm->dm_nsegs > nsegs) {
1901 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1902 		rv = EFBIG;
1903 		DPRINTF(("iop_msg_map: too many segs\n"));
1904 		goto bad;
1905 	}
1906 
1907 	nsegs = dm->dm_nsegs;
1908 	xfersize = 0;
1909 
1910 	/*
1911 	 * Write out the SG list.
1912 	 */
1913 	if (out)
1914 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1915 	else
1916 		flg = I2O_SGL_SIMPLE;
1917 
1918 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1919 		p[0] = (u_int32_t)ds->ds_len | flg;
1920 		p[1] = (u_int32_t)ds->ds_addr;
1921 		xfersize += ds->ds_len;
1922 	}
1923 
1924 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1925 	p[1] = (u_int32_t)ds->ds_addr;
1926 	xfersize += ds->ds_len;
1927 
1928 	/* Fix up the transfer record, and sync the map. */
1929 	ix->ix_flags = (out ? IX_OUT : IX_IN);
1930 	ix->ix_size = xfersize;
1931 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1932 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1933 
1934 	/*
1935 	 * If this is the first xfer we've mapped for this message, adjust
1936 	 * the SGL offset field in the message header.
1937 	 */
1938 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1939 		mb[0] += (mb[0] >> 12) & 0xf0;
1940 		im->im_flags |= IM_SGLOFFADJ;
1941 	}
1942 	mb[0] += (nsegs << 17);
1943 	return (0);
1944 
1945  bad:
1946  	if (xn != 0)
1947 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1948 	return (rv);
1949 }
1950 
1951 /*
1952  * Map a block I/O data transfer (different in that there's only one per
1953  * message maximum, and PAGE addressing may be used).  Write a scatter
1954  * gather list into the message frame.
1955  */
1956 int
1957 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1958 		void *xferaddr, int xfersize, int out)
1959 {
1960 	bus_dma_segment_t *ds;
1961 	bus_dmamap_t dm;
1962 	struct iop_xfer *ix;
1963 	u_int rv, i, nsegs, off, slen, tlen, flg;
1964 	paddr_t saddr, eaddr;
1965 	u_int32_t *p;
1966 
1967 #ifdef I2ODEBUG
1968 	if (xfersize == 0)
1969 		panic("iop_msg_map_bio: null transfer");
1970 	if (xfersize > IOP_MAX_XFER)
1971 		panic("iop_msg_map_bio: transfer too large");
1972 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
1973 		panic("iop_msg_map_bio: SGLOFFADJ");
1974 #endif
1975 
1976 	ix = im->im_xfer;
1977 	dm = ix->ix_map;
1978 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1979 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1980 	if (rv != 0)
1981 		return (rv);
1982 
1983 	off = mb[0] >> 16;
1984 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1985 
1986 	/*
1987 	 * If the transfer is highly fragmented and won't fit using SIMPLE
1988 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
1989 	 * potentially more efficient, both for us and the IOP.
1990 	 */
1991 	if (dm->dm_nsegs > nsegs) {
1992 		nsegs = 1;
1993 		p = mb + off + 1;
1994 
1995 		/* XXX This should be done with a bus_space flag. */
1996 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1997 			slen = ds->ds_len;
1998 			saddr = ds->ds_addr;
1999 
2000 			while (slen > 0) {
2001 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2002 				tlen = uimin(eaddr - saddr, slen);
2003 				slen -= tlen;
2004 				*p++ = le32toh(saddr);
2005 				saddr = eaddr;
2006 				nsegs++;
2007 			}
2008 		}
2009 
2010 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2011 		    I2O_SGL_END;
2012 		if (out)
2013 			mb[off] |= I2O_SGL_DATA_OUT;
2014 	} else {
2015 		p = mb + off;
2016 		nsegs = dm->dm_nsegs;
2017 
2018 		if (out)
2019 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2020 		else
2021 			flg = I2O_SGL_SIMPLE;
2022 
2023 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2024 			p[0] = (u_int32_t)ds->ds_len | flg;
2025 			p[1] = (u_int32_t)ds->ds_addr;
2026 		}
2027 
2028 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2029 		    I2O_SGL_END;
2030 		p[1] = (u_int32_t)ds->ds_addr;
2031 		nsegs <<= 1;
2032 	}
2033 
2034 	/* Fix up the transfer record, and sync the map. */
2035 	ix->ix_flags = (out ? IX_OUT : IX_IN);
2036 	ix->ix_size = xfersize;
2037 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2038 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2039 
2040 	/*
2041 	 * Adjust the SGL offset and total message size fields.  We don't
2042 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2043 	 */
2044 	mb[0] += ((off << 4) + (nsegs << 16));
2045 	return (0);
2046 }
2047 
2048 /*
2049  * Unmap all data transfers associated with a message wrapper.
2050  */
2051 void
2052 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2053 {
2054 	struct iop_xfer *ix;
2055 	int i;
2056 
2057 #ifdef I2ODEBUG
2058 	if (im->im_xfer[0].ix_size == 0)
2059 		panic("iop_msg_unmap: no transfers mapped");
2060 #endif
2061 
2062 	for (ix = im->im_xfer, i = 0;;) {
2063 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2064 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2065 		    BUS_DMASYNC_POSTREAD);
2066 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2067 
2068 		/* Only the first DMA map is static. */
2069 		if (i != 0)
2070 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2071 		if (++i >= IOP_MAX_MSG_XFERS)
2072 			break;
2073 		if ((++ix)->ix_size == 0)
2074 			break;
2075 	}
2076 }
2077 
2078 /*
2079  * Post a message frame to the IOP's inbound queue.
2080  */
2081 int
2082 iop_post(struct iop_softc *sc, u_int32_t *mb)
2083 {
2084 	u_int32_t mfa;
2085 
2086 #ifdef I2ODEBUG
2087 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2088 		panic("iop_post: frame too large");
2089 #endif
2090 
2091 	mutex_spin_enter(&sc->sc_intrlock);
2092 
2093 	/* Allocate a slot with the IOP. */
2094 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2095 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2096 			mutex_spin_exit(&sc->sc_intrlock);
2097 			aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2098 			return (EAGAIN);
2099 		}
2100 
2101 	/* Perform reply buffer DMA synchronisation. */
2102 	if (sc->sc_rep_size != 0) {
2103 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2104 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2105 	}
2106 
2107 	/* Copy out the message frame. */
2108 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2109 	    mb[0] >> 16);
2110 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2111 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2112 
2113 	/* Post the MFA back to the IOP. */
2114 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2115 
2116 	mutex_spin_exit(&sc->sc_intrlock);
2117 	return (0);
2118 }
2119 
2120 /*
2121  * Post a message to the IOP and deal with completion.
2122  */
2123 int
2124 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2125 {
2126 	u_int32_t *mb;
2127 	int rv;
2128 
2129 	mb = xmb;
2130 
2131 	/* Terminate the scatter/gather list chain. */
2132 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
2133 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2134 
2135 	if ((rv = iop_post(sc, mb)) != 0)
2136 		return (rv);
2137 
2138 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2139 		if ((im->im_flags & IM_POLL) != 0)
2140 			iop_msg_poll(sc, im, timo);
2141 		else
2142 			iop_msg_wait(sc, im, timo);
2143 
2144 		mutex_spin_enter(&sc->sc_intrlock);
2145 		if ((im->im_flags & IM_REPLIED) != 0) {
2146 			if ((im->im_flags & IM_NOSTATUS) != 0)
2147 				rv = 0;
2148 			else if ((im->im_flags & IM_FAIL) != 0)
2149 				rv = ENXIO;
2150 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2151 				rv = EIO;
2152 			else
2153 				rv = 0;
2154 		} else
2155 			rv = EBUSY;
2156 		mutex_spin_exit(&sc->sc_intrlock);
2157 	} else
2158 		rv = 0;
2159 
2160 	return (rv);
2161 }
2162 
2163 /*
2164  * Spin until the specified message is replied to.
2165  */
2166 static void
2167 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2168 {
2169 	u_int32_t rmfa;
2170 
2171 	mutex_spin_enter(&sc->sc_intrlock);
2172 
2173 	for (timo *= 10; timo != 0; timo--) {
2174 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2175 			/* Double read to account for IOP bug. */
2176 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
2177 			if (rmfa == IOP_MFA_EMPTY)
2178 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
2179 			if (rmfa != IOP_MFA_EMPTY) {
2180 				iop_handle_reply(sc, rmfa);
2181 
2182 				/*
2183 				 * Return the reply frame to the IOP's
2184 				 * outbound FIFO.
2185 				 */
2186 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
2187 			}
2188 		}
2189 		if ((im->im_flags & IM_REPLIED) != 0)
2190 			break;
2191 		mutex_spin_exit(&sc->sc_intrlock);
2192 		DELAY(100);
2193 		mutex_spin_enter(&sc->sc_intrlock);
2194 	}
2195 
2196 	if (timo == 0) {
2197 #ifdef I2ODEBUG
2198 		printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2199 		if (iop_status_get(sc, 1) != 0)
2200 			printf("iop_msg_poll: unable to retrieve status\n");
2201 		else
2202 			printf("iop_msg_poll: IOP state = %d\n",
2203 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2204 #endif
2205 	}
2206 
2207 	mutex_spin_exit(&sc->sc_intrlock);
2208 }
2209 
2210 /*
2211  * Sleep until the specified message is replied to.
2212  */
2213 static void
2214 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2215 {
2216 	int rv;
2217 
2218 	mutex_spin_enter(&sc->sc_intrlock);
2219 	if ((im->im_flags & IM_REPLIED) != 0) {
2220 		mutex_spin_exit(&sc->sc_intrlock);
2221 		return;
2222 	}
2223 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2224 	mutex_spin_exit(&sc->sc_intrlock);
2225 
2226 #ifdef I2ODEBUG
2227 	if (rv != 0) {
2228 		printf("iop_msg_wait: tsleep() == %d\n", rv);
2229 		if (iop_status_get(sc, 0) != 0)
2230 			printf("%s: unable to retrieve status\n", __func__);
2231 		else
2232 			printf("%s: IOP state = %d\n", __func__,
2233 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2234 	}
2235 #else
2236 	__USE(rv);
2237 #endif
2238 }
2239 
2240 /*
2241  * Release an unused message frame back to the IOP's inbound fifo.
2242  */
2243 static void
2244 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2245 {
2246 
2247 	/* Use the frame to issue a no-op. */
2248 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2249 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2250 	iop_outl_msg(sc, mfa + 8, 0);
2251 	iop_outl_msg(sc, mfa + 12, 0);
2252 
2253 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2254 }
2255 
2256 #ifdef I2ODEBUG
2257 /*
2258  * Dump a reply frame header.
2259  */
2260 static void
2261 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2262 {
2263 	u_int function, detail;
2264 	const char *statusstr;
2265 
2266 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2267 	detail = le16toh(rb->detail);
2268 
2269 	printf("%s: reply:\n", device_xname(sc->sc_dev));
2270 
2271 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2272 		statusstr = iop_status[rb->reqstatus];
2273 	else
2274 		statusstr = "undefined error code";
2275 
2276 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
2277 	    device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2278 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2279 	    device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2280 	    le32toh(rb->msgtctx));
2281 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2282 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2283 	    (le32toh(rb->msgflags) >> 8) & 0xff);
2284 }
2285 #endif
2286 
2287 /*
2288  * Dump a transport failure reply.
2289  */
2290 static void
2291 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2292 {
2293 
2294 	printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2295 
2296 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2297 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
2298 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
2299 	    device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2300 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
2301 	    device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2302 }
2303 
2304 /*
2305  * Translate an I2O ASCII field into a C string.
2306  */
2307 void
2308 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2309 {
2310 	int hc, lc, i, nit;
2311 
2312 	dlen--;
2313 	lc = 0;
2314 	hc = 0;
2315 	i = 0;
2316 
2317 	/*
2318 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
2319 	 * spec has nothing to say about it.  Since AMI fields are usually
2320 	 * filled with junk after the terminator, ...
2321 	 */
2322 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2323 
2324 	while (slen-- != 0 && dlen-- != 0) {
2325 		if (nit && *src == '\0')
2326 			break;
2327 		else if (*src <= 0x20 || *src >= 0x7f) {
2328 			if (hc)
2329 				dst[i++] = ' ';
2330 		} else {
2331 			hc = 1;
2332 			dst[i++] = *src;
2333 			lc = i;
2334 		}
2335 		src++;
2336 	}
2337 
2338 	dst[lc] = '\0';
2339 }
2340 
2341 /*
2342  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2343  */
2344 int
2345 iop_print_ident(struct iop_softc *sc, int tid)
2346 {
2347 	struct {
2348 		struct	i2o_param_op_results pr;
2349 		struct	i2o_param_read_results prr;
2350 		struct	i2o_param_device_identity di;
2351 	} __packed p;
2352 	char buf[32];
2353 	int rv;
2354 
2355 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2356 	    sizeof(p), NULL);
2357 	if (rv != 0)
2358 		return (rv);
2359 
2360 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2361 	    sizeof(buf));
2362 	printf(" <%s, ", buf);
2363 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2364 	    sizeof(buf));
2365 	printf("%s, ", buf);
2366 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2367 	printf("%s>", buf);
2368 
2369 	return (0);
2370 }
2371 
2372 /*
2373  * Claim or unclaim the specified TID.
2374  */
2375 int
2376 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2377 	       int flags)
2378 {
2379 	struct iop_msg *im;
2380 	struct i2o_util_claim mf;
2381 	int rv, func;
2382 
2383 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2384 	im = iop_msg_alloc(sc, IM_WAIT);
2385 
2386 	/* We can use the same structure, as they're identical. */
2387 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2388 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2389 	mf.msgictx = ii->ii_ictx;
2390 	mf.msgtctx = im->im_tctx;
2391 	mf.flags = flags;
2392 
2393 	rv = iop_msg_post(sc, im, &mf, 5000);
2394 	iop_msg_free(sc, im);
2395 	return (rv);
2396 }
2397 
2398 /*
2399  * Perform an abort.
2400  */
2401 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2402 		   int tctxabort, int flags)
2403 {
2404 	struct iop_msg *im;
2405 	struct i2o_util_abort mf;
2406 	int rv;
2407 
2408 	im = iop_msg_alloc(sc, IM_WAIT);
2409 
2410 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2411 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2412 	mf.msgictx = ii->ii_ictx;
2413 	mf.msgtctx = im->im_tctx;
2414 	mf.flags = (func << 24) | flags;
2415 	mf.tctxabort = tctxabort;
2416 
2417 	rv = iop_msg_post(sc, im, &mf, 5000);
2418 	iop_msg_free(sc, im);
2419 	return (rv);
2420 }
2421 
2422 /*
2423  * Enable or disable reception of events for the specified device.
2424  */
2425 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2426 {
2427 	struct i2o_util_event_register mf;
2428 
2429 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2430 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2431 	mf.msgictx = ii->ii_ictx;
2432 	mf.msgtctx = 0;
2433 	mf.eventmask = mask;
2434 
2435 	/* This message is replied to only when events are signalled. */
2436 	return (iop_post(sc, (u_int32_t *)&mf));
2437 }
2438 
2439 int
2440 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2441 {
2442 	struct iop_softc *sc;
2443 
2444 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2445 		return (ENXIO);
2446 	if ((sc->sc_flags & IOP_ONLINE) == 0)
2447 		return (ENXIO);
2448 	if ((sc->sc_flags & IOP_OPEN) != 0)
2449 		return (EBUSY);
2450 	sc->sc_flags |= IOP_OPEN;
2451 
2452 	return (0);
2453 }
2454 
2455 int
2456 iopclose(dev_t dev, int flag, int mode,
2457     struct lwp *l)
2458 {
2459 	struct iop_softc *sc;
2460 
2461 	sc = device_lookup_private(&iop_cd, minor(dev));
2462 	sc->sc_flags &= ~IOP_OPEN;
2463 
2464 	return (0);
2465 }
2466 
2467 int
2468 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2469 {
2470 	struct iop_softc *sc;
2471 	struct iovec *iov;
2472 	int rv, i;
2473 
2474 	sc = device_lookup_private(&iop_cd, minor(dev));
2475 	rv = 0;
2476 
2477 	switch (cmd) {
2478 	case IOPIOCPT:
2479 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2480 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2481 		if (rv)
2482 			return (rv);
2483 
2484 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2485 
2486 	case IOPIOCGSTATUS:
2487 		iov = (struct iovec *)data;
2488 		i = sizeof(struct i2o_status);
2489 		if (i > iov->iov_len)
2490 			i = iov->iov_len;
2491 		else
2492 			iov->iov_len = i;
2493 		if ((rv = iop_status_get(sc, 0)) == 0)
2494 			rv = copyout(&sc->sc_status, iov->iov_base, i);
2495 		return (rv);
2496 
2497 	case IOPIOCGLCT:
2498 	case IOPIOCGTIDMAP:
2499 	case IOPIOCRECONFIG:
2500 		break;
2501 
2502 	default:
2503 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2504 		printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2505 #endif
2506 		return (ENOTTY);
2507 	}
2508 
2509 	mutex_enter(&sc->sc_conflock);
2510 
2511 	switch (cmd) {
2512 	case IOPIOCGLCT:
2513 		iov = (struct iovec *)data;
2514 		i = le16toh(sc->sc_lct->tablesize) << 2;
2515 		if (i > iov->iov_len)
2516 			i = iov->iov_len;
2517 		else
2518 			iov->iov_len = i;
2519 		rv = copyout(sc->sc_lct, iov->iov_base, i);
2520 		break;
2521 
2522 	case IOPIOCRECONFIG:
2523 		rv = iop_reconfigure(sc, 0);
2524 		break;
2525 
2526 	case IOPIOCGTIDMAP:
2527 		iov = (struct iovec *)data;
2528 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2529 		if (i > iov->iov_len)
2530 			i = iov->iov_len;
2531 		else
2532 			iov->iov_len = i;
2533 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2534 		break;
2535 	}
2536 
2537 	mutex_exit(&sc->sc_conflock);
2538 	return (rv);
2539 }
2540 
2541 static int
2542 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2543 {
2544 	struct iop_msg *im;
2545 	struct i2o_msg *mf;
2546 	struct ioppt_buf *ptb;
2547 	int rv, i, mapped;
2548 
2549 	mf = NULL;
2550 	im = NULL;
2551 	mapped = 1;
2552 
2553 	if (pt->pt_msglen > sc->sc_framesize ||
2554 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
2555 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2556 	    pt->pt_nbufs < 0 ||
2557 #if 0
2558 	    pt->pt_replylen < 0 ||
2559 #endif
2560             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2561 		return (EINVAL);
2562 
2563 	for (i = 0; i < pt->pt_nbufs; i++)
2564 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2565 			rv = ENOMEM;
2566 			goto bad;
2567 		}
2568 
2569 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2570 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2571 		goto bad;
2572 
2573 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2574 	im->im_rb = (struct i2o_reply *)mf;
2575 	mf->msgictx = IOP_ICTX;
2576 	mf->msgtctx = im->im_tctx;
2577 
2578 	for (i = 0; i < pt->pt_nbufs; i++) {
2579 		ptb = &pt->pt_bufs[i];
2580 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2581 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
2582 		if (rv != 0)
2583 			goto bad;
2584 		mapped = 1;
2585 	}
2586 
2587 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2588 		goto bad;
2589 
2590 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2591 	if (i > sc->sc_framesize)
2592 		i = sc->sc_framesize;
2593 	if (i > pt->pt_replylen)
2594 		i = pt->pt_replylen;
2595 	rv = copyout(im->im_rb, pt->pt_reply, i);
2596 
2597  bad:
2598 	if (mapped != 0)
2599 		iop_msg_unmap(sc, im);
2600 	if (im != NULL)
2601 		iop_msg_free(sc, im);
2602 	if (mf != NULL)
2603 		free(mf, M_DEVBUF);
2604 	return (rv);
2605 }
2606