xref: /netbsd-src/sys/dev/i2o/iop.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: iop.c,v 1.75 2008/09/14 18:12:16 mhitch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Support for I2O IOPs (intelligent I/O processors).
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.75 2008/09/14 18:12:16 mhitch Exp $");
38 
39 #include "iop.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54 
55 #include <uvm/uvm_extern.h>
56 
57 #include <dev/i2o/i2o.h>
58 #include <dev/i2o/iopio.h>
59 #include <dev/i2o/iopreg.h>
60 #include <dev/i2o/iopvar.h>
61 
62 #include "locators.h"
63 
64 #define POLL(ms, cond)				\
65 do {						\
66 	int xi;					\
67 	for (xi = (ms) * 10; xi; xi--) {	\
68 		if (cond)			\
69 			break;			\
70 		DELAY(100);			\
71 	}					\
72 } while (/* CONSTCOND */0);
73 
74 #ifdef I2ODEBUG
75 #define DPRINTF(x)	printf x
76 #else
77 #define	DPRINTF(x)
78 #endif
79 
80 #define IOP_ICTXHASH_NBUCKETS	16
81 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
82 
83 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
84 
85 #define	IOP_TCTX_SHIFT	12
86 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
87 
88 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
89 static u_long	iop_ictxhash;
90 static void	*iop_sdh;
91 static struct	i2o_systab *iop_systab;
92 static int	iop_systab_size;
93 
94 extern struct cfdriver iop_cd;
95 
96 dev_type_open(iopopen);
97 dev_type_close(iopclose);
98 dev_type_ioctl(iopioctl);
99 
100 const struct cdevsw iop_cdevsw = {
101 	iopopen, iopclose, noread, nowrite, iopioctl,
102 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
103 };
104 
105 #define	IC_CONFIGURE	0x01
106 #define	IC_PRIORITY	0x02
107 
108 static struct iop_class {
109 	u_short	ic_class;
110 	u_short	ic_flags;
111 	const char *ic_caption;
112 } const iop_class[] = {
113 	{
114 		I2O_CLASS_EXECUTIVE,
115 		0,
116 		"executive"
117 	},
118 	{
119 		I2O_CLASS_DDM,
120 		0,
121 		"device driver module"
122 	},
123 	{
124 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
125 		IC_CONFIGURE | IC_PRIORITY,
126 		"random block storage"
127 	},
128 	{
129 		I2O_CLASS_SEQUENTIAL_STORAGE,
130 		IC_CONFIGURE | IC_PRIORITY,
131 		"sequential storage"
132 	},
133 	{
134 		I2O_CLASS_LAN,
135 		IC_CONFIGURE | IC_PRIORITY,
136 		"LAN port"
137 	},
138 	{
139 		I2O_CLASS_WAN,
140 		IC_CONFIGURE | IC_PRIORITY,
141 		"WAN port"
142 	},
143 	{
144 		I2O_CLASS_FIBRE_CHANNEL_PORT,
145 		IC_CONFIGURE,
146 		"fibrechannel port"
147 	},
148 	{
149 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
150 		0,
151 		"fibrechannel peripheral"
152 	},
153  	{
154  		I2O_CLASS_SCSI_PERIPHERAL,
155  		0,
156  		"SCSI peripheral"
157  	},
158 	{
159 		I2O_CLASS_ATE_PORT,
160 		IC_CONFIGURE,
161 		"ATE port"
162 	},
163 	{
164 		I2O_CLASS_ATE_PERIPHERAL,
165 		0,
166 		"ATE peripheral"
167 	},
168 	{
169 		I2O_CLASS_FLOPPY_CONTROLLER,
170 		IC_CONFIGURE,
171 		"floppy controller"
172 	},
173 	{
174 		I2O_CLASS_FLOPPY_DEVICE,
175 		0,
176 		"floppy device"
177 	},
178 	{
179 		I2O_CLASS_BUS_ADAPTER_PORT,
180 		IC_CONFIGURE,
181 		"bus adapter port"
182 	},
183 };
184 
185 static const char * const iop_status[] = {
186 	"success",
187 	"abort (dirty)",
188 	"abort (no data transfer)",
189 	"abort (partial transfer)",
190 	"error (dirty)",
191 	"error (no data transfer)",
192 	"error (partial transfer)",
193 	"undefined error code",
194 	"process abort (dirty)",
195 	"process abort (no data transfer)",
196 	"process abort (partial transfer)",
197 	"transaction error",
198 };
199 
200 static inline u_int32_t	iop_inl(struct iop_softc *, int);
201 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
202 
203 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
204 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
205 
206 static void	iop_config_interrupts(struct device *);
207 static void	iop_configure_devices(struct iop_softc *, int, int);
208 static void	iop_devinfo(int, char *, size_t);
209 static int	iop_print(void *, const char *);
210 static void	iop_shutdown(void *);
211 
212 static void	iop_adjqparam(struct iop_softc *, int);
213 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int	iop_hrt_get(struct iop_softc *);
215 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void	iop_intr_event(struct device *, struct iop_msg *, void *);
217 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 			     u_int32_t);
219 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int	iop_ofifo_init(struct iop_softc *);
222 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
223 				struct proc *);
224 static void	iop_reconf_thread(void *);
225 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int	iop_reset(struct iop_softc *);
227 static int	iop_sys_enable(struct iop_softc *);
228 static int	iop_systab_set(struct iop_softc *);
229 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
230 
231 #ifdef I2ODEBUG
232 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
234 
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
237 {
238 
239 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
242 }
243 
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
246 {
247 
248 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 	    BUS_SPACE_BARRIER_WRITE);
251 }
252 
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
255 {
256 
257 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
260 }
261 
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
264 {
265 
266 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 	    BUS_SPACE_BARRIER_WRITE);
269 }
270 
271 /*
272  * Initialise the IOP and our interface.
273  */
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
276 {
277 	struct iop_msg *im;
278 	int rv, i, j, state, nsegs;
279 	u_int32_t mask;
280 	char ident[64];
281 
282 	state = 0;
283 
284 	printf("I2O adapter");
285 
286 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
287 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
288 	cv_init(&sc->sc_confcv, "iopconf");
289 
290 	if (iop_ictxhashtbl == NULL) {
291 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
292 		    true, &iop_ictxhash);
293 	}
294 
295 	/* Disable interrupts at the IOP. */
296 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
297 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
298 
299 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
300 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
301 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
302 		aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
303 		return;
304 	}
305 
306 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
307 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
308 		aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
309 		goto bail_out;
310 	}
311 	state++;
312 
313 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
314 	    &sc->sc_scr, 0)) {
315 		aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
316 		goto bail_out;
317 	}
318 	state++;
319 
320 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
321 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
322 		aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
323 		goto bail_out;
324 	}
325 	state++;
326 
327 #ifdef I2ODEBUG
328 	/* So that our debug checks don't choke. */
329 	sc->sc_framesize = 128;
330 #endif
331 
332 	/* Avoid syncing the reply map until it's set up. */
333 	sc->sc_curib = 0x123;
334 
335 	/* Reset the adapter and request status. */
336  	if ((rv = iop_reset(sc)) != 0) {
337  		aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
338 		goto bail_out;
339  	}
340 
341  	if ((rv = iop_status_get(sc, 1)) != 0) {
342 		aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
343 		goto bail_out;
344  	}
345 
346 	sc->sc_flags |= IOP_HAVESTATUS;
347 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
348 	    ident, sizeof(ident));
349 	printf(" <%s>\n", ident);
350 
351 #ifdef I2ODEBUG
352 	printf("%s: orgid=0x%04x version=%d\n",
353 	    device_xname(&sc->sc_dv),
354 	    le16toh(sc->sc_status.orgid),
355 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
356 	printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
357 	printf("%s: mem  %04x %04x %08x\n", device_xname(&sc->sc_dv),
358 	    le32toh(sc->sc_status.desiredprivmemsize),
359 	    le32toh(sc->sc_status.currentprivmemsize),
360 	    le32toh(sc->sc_status.currentprivmembase));
361 	printf("%s: i/o  %04x %04x %08x\n", device_xname(&sc->sc_dv),
362 	    le32toh(sc->sc_status.desiredpriviosize),
363 	    le32toh(sc->sc_status.currentpriviosize),
364 	    le32toh(sc->sc_status.currentpriviobase));
365 #endif
366 
367 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
368 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
369 		sc->sc_maxob = IOP_MAX_OUTBOUND;
370 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
371 	if (sc->sc_maxib > IOP_MAX_INBOUND)
372 		sc->sc_maxib = IOP_MAX_INBOUND;
373 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
374 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
375 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
376 
377 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
378 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
379 		aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
380 		    sc->sc_framesize);
381 		goto bail_out;
382 	}
383 #endif
384 
385 	/* Allocate message wrappers. */
386 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
387 	if (im == NULL) {
388 		aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
389 		goto bail_out;
390 	}
391 	state++;
392 	sc->sc_ims = im;
393 	SLIST_INIT(&sc->sc_im_freelist);
394 
395 	for (i = 0; i < sc->sc_maxib; i++, im++) {
396 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
397 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
398 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
399 		    &im->im_xfer[0].ix_map);
400 		if (rv != 0) {
401 			aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
402 			goto bail_out3;
403 		}
404 
405 		im->im_tctx = i;
406 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
407 		cv_init(&im->im_cv, "iopmsg");
408 	}
409 
410 	/* Initialise the IOP's outbound FIFO. */
411 	if (iop_ofifo_init(sc) != 0) {
412 		aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
413 		goto bail_out3;
414 	}
415 
416 	/*
417  	 * Defer further configuration until (a) interrupts are working and
418  	 * (b) we have enough information to build the system table.
419  	 */
420 	config_interrupts((struct device *)sc, iop_config_interrupts);
421 
422 	/* Configure shutdown hook before we start any device activity. */
423 	if (iop_sdh == NULL)
424 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
425 
426 	/* Ensure interrupts are enabled at the IOP. */
427 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
428 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
429 
430 	if (intrstr != NULL)
431 		printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
432 		    intrstr);
433 
434 #ifdef I2ODEBUG
435 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
436 	    device_xname(&sc->sc_dv), sc->sc_maxib,
437 	    le32toh(sc->sc_status.maxinboundmframes),
438 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
439 #endif
440 
441 	return;
442 
443  bail_out3:
444  	if (state > 3) {
445 		for (j = 0; j < i; j++)
446 			bus_dmamap_destroy(sc->sc_dmat,
447 			    sc->sc_ims[j].im_xfer[0].ix_map);
448 		free(sc->sc_ims, M_DEVBUF);
449 	}
450  bail_out:
451 	if (state > 2)
452 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
453 	if (state > 1)
454 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
455 	if (state > 0)
456 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
457 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
458 }
459 
460 /*
461  * Perform autoconfiguration tasks.
462  */
463 static void
464 iop_config_interrupts(struct device *self)
465 {
466 	struct iop_attach_args ia;
467 	struct iop_softc *sc, *iop;
468 	struct i2o_systab_entry *ste;
469 	int rv, i, niop;
470 	int locs[IOPCF_NLOCS];
471 
472 	sc = device_private(self);
473 	mutex_enter(&sc->sc_conflock);
474 
475 	LIST_INIT(&sc->sc_iilist);
476 
477 	printf("%s: configuring...\n", device_xname(&sc->sc_dv));
478 
479 	if (iop_hrt_get(sc) != 0) {
480 		printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
481 		mutex_exit(&sc->sc_conflock);
482 		return;
483 	}
484 
485 	/*
486  	 * Build the system table.
487  	 */
488 	if (iop_systab == NULL) {
489 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
490 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
491 				continue;
492 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
493 				continue;
494 			if (iop_status_get(iop, 1) != 0) {
495 				aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
496 				iop->sc_flags &= ~IOP_HAVESTATUS;
497 				continue;
498 			}
499 			niop++;
500 		}
501 		if (niop == 0) {
502 			mutex_exit(&sc->sc_conflock);
503 			return;
504 		}
505 
506 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
507 		    sizeof(struct i2o_systab);
508 		iop_systab_size = i;
509 		iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
510 
511 		iop_systab->numentries = niop;
512 		iop_systab->version = I2O_VERSION_11;
513 
514 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
515 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
516 				continue;
517 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
518 				continue;
519 
520 			ste->orgid = iop->sc_status.orgid;
521 			ste->iopid = device_unit(&iop->sc_dv) + 2;
522 			ste->segnumber =
523 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
524 			ste->iopcaps = iop->sc_status.iopcaps;
525 			ste->inboundmsgframesize =
526 			    iop->sc_status.inboundmframesize;
527 			ste->inboundmsgportaddresslow =
528 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
529 			ste++;
530 		}
531 	}
532 
533 	/*
534 	 * Post the system table to the IOP and bring it to the OPERATIONAL
535 	 * state.
536 	 */
537 	if (iop_systab_set(sc) != 0) {
538 		aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
539 		mutex_exit(&sc->sc_conflock);
540 		return;
541 	}
542 	if (iop_sys_enable(sc) != 0) {
543 		aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
544 		mutex_exit(&sc->sc_conflock);
545 		return;
546 	}
547 
548 	/*
549 	 * Set up an event handler for this IOP.
550 	 */
551 	sc->sc_eventii.ii_dv = self;
552 	sc->sc_eventii.ii_intr = iop_intr_event;
553 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
554 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
555 	iop_initiator_register(sc, &sc->sc_eventii);
556 
557 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
558 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
559 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
560 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
561 	    I2O_EVENT_EXEC_POWER_FAIL |
562 	    I2O_EVENT_EXEC_RESET_PENDING |
563 	    I2O_EVENT_EXEC_RESET_IMMINENT |
564 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
565 	    I2O_EVENT_EXEC_XCT_CHANGE |
566 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
567 	    I2O_EVENT_GEN_DEVICE_RESET |
568 	    I2O_EVENT_GEN_STATE_CHANGE |
569 	    I2O_EVENT_GEN_GENERAL_WARNING);
570 	if (rv != 0) {
571 		aprint_error_dev(&sc->sc_dv, "unable to register for events");
572 		mutex_exit(&sc->sc_conflock);
573 		return;
574 	}
575 
576 	/*
577 	 * Attempt to match and attach a product-specific extension.
578 	 */
579 	ia.ia_class = I2O_CLASS_ANY;
580 	ia.ia_tid = I2O_TID_IOP;
581 	locs[IOPCF_TID] = I2O_TID_IOP;
582 	config_found_sm_loc(self, "iop", locs, &ia, iop_print,
583 		config_stdsubmatch);
584 
585 	/*
586 	 * Start device configuration.
587 	 */
588 	if ((rv = iop_reconfigure(sc, 0)) == -1)
589 		aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
590 
591 
592 	sc->sc_flags |= IOP_ONLINE;
593 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
594 	    &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
595 	mutex_exit(&sc->sc_conflock);
596  	if (rv != 0) {
597 		aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
598  		return;
599  	}
600 }
601 
602 /*
603  * Reconfiguration thread; listens for LCT change notification, and
604  * initiates re-configuration if received.
605  */
606 static void
607 iop_reconf_thread(void *cookie)
608 {
609 	struct iop_softc *sc;
610 	struct lwp *l;
611 	struct i2o_lct lct;
612 	u_int32_t chgind;
613 	int rv;
614 
615 	sc = cookie;
616 	chgind = sc->sc_chgind + 1;
617 	l = curlwp;
618 
619 	for (;;) {
620 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
621 		    device_xname(&sc->sc_dv), chgind));
622 
623 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
624 
625 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
626 		    device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
627 
628 		mutex_enter(&sc->sc_conflock);
629 		if (rv == 0) {
630 			iop_reconfigure(sc, le32toh(lct.changeindicator));
631 			chgind = sc->sc_chgind + 1;
632 		}
633 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
634 		mutex_exit(&sc->sc_conflock);
635 	}
636 }
637 
638 /*
639  * Reconfigure: find new and removed devices.
640  */
641 int
642 iop_reconfigure(struct iop_softc *sc, u_int chgind)
643 {
644 	struct iop_msg *im;
645 	struct i2o_hba_bus_scan mf;
646 	struct i2o_lct_entry *le;
647 	struct iop_initiator *ii, *nextii;
648 	int rv, tid, i;
649 
650 	KASSERT(mutex_owned(&sc->sc_conflock));
651 
652 	/*
653 	 * If the reconfiguration request isn't the result of LCT change
654 	 * notification, then be more thorough: ask all bus ports to scan
655 	 * their busses.  Wait up to 5 minutes for each bus port to complete
656 	 * the request.
657 	 */
658 	if (chgind == 0) {
659 		if ((rv = iop_lct_get(sc)) != 0) {
660 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
661 			return (rv);
662 		}
663 
664 		le = sc->sc_lct->entry;
665 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
666 			if ((le16toh(le->classid) & 4095) !=
667 			    I2O_CLASS_BUS_ADAPTER_PORT)
668 				continue;
669 			tid = le16toh(le->localtid) & 4095;
670 
671 			im = iop_msg_alloc(sc, IM_WAIT);
672 
673 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
674 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
675 			mf.msgictx = IOP_ICTX;
676 			mf.msgtctx = im->im_tctx;
677 
678 			DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
679 			    tid));
680 
681 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
682 			iop_msg_free(sc, im);
683 #ifdef I2ODEBUG
684 			if (rv != 0)
685 				aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
686 #endif
687 		}
688 	} else if (chgind <= sc->sc_chgind) {
689 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
690 		return (0);
691 	}
692 
693 	/* Re-read the LCT and determine if it has changed. */
694 	if ((rv = iop_lct_get(sc)) != 0) {
695 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
696 		return (rv);
697 	}
698 	DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
699 
700 	chgind = le32toh(sc->sc_lct->changeindicator);
701 	if (chgind == sc->sc_chgind) {
702 		DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
703 		return (0);
704 	}
705 	DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
706 	sc->sc_chgind = chgind;
707 
708 	if (sc->sc_tidmap != NULL)
709 		free(sc->sc_tidmap, M_DEVBUF);
710 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
711 	    M_DEVBUF, M_NOWAIT|M_ZERO);
712 
713 	/* Allow 1 queued command per device while we're configuring. */
714 	iop_adjqparam(sc, 1);
715 
716 	/*
717 	 * Match and attach child devices.  We configure high-level devices
718 	 * first so that any claims will propagate throughout the LCT,
719 	 * hopefully masking off aliased devices as a result.
720 	 *
721 	 * Re-reading the LCT at this point is a little dangerous, but we'll
722 	 * trust the IOP (and the operator) to behave itself...
723 	 */
724 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 	    IC_CONFIGURE | IC_PRIORITY);
726 	if ((rv = iop_lct_get(sc)) != 0) {
727 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
728 	}
729 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
730 	    IC_CONFIGURE);
731 
732 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
733 		nextii = LIST_NEXT(ii, ii_list);
734 
735 		/* Detach devices that were configured, but are now gone. */
736 		for (i = 0; i < sc->sc_nlctent; i++)
737 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
738 				break;
739 		if (i == sc->sc_nlctent ||
740 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
741 			config_detach(ii->ii_dv, DETACH_FORCE);
742 			continue;
743 		}
744 
745 		/*
746 		 * Tell initiators that existed before the re-configuration
747 		 * to re-configure.
748 		 */
749 		if (ii->ii_reconfig == NULL)
750 			continue;
751 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 			aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
753 			    device_xname(ii->ii_dv), rv);
754 	}
755 
756 	/* Re-adjust queue parameters and return. */
757 	if (sc->sc_nii != 0)
758 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 		    / sc->sc_nii);
760 
761 	return (0);
762 }
763 
764 /*
765  * Configure I2O devices into the system.
766  */
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 {
770 	struct iop_attach_args ia;
771 	struct iop_initiator *ii;
772 	const struct i2o_lct_entry *le;
773 	struct device *dv;
774 	int i, j, nent;
775 	u_int usertid;
776 	int locs[IOPCF_NLOCS];
777 
778 	nent = sc->sc_nlctent;
779 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
780 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
781 
782 		/* Ignore the device if it's in use. */
783 		usertid = le32toh(le->usertid) & 4095;
784 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
785 			continue;
786 
787 		ia.ia_class = le16toh(le->classid) & 4095;
788 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
789 
790 		/* Ignore uninteresting devices. */
791 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
792 			if (iop_class[j].ic_class == ia.ia_class)
793 				break;
794 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
795 		    (iop_class[j].ic_flags & mask) != maskval)
796 			continue;
797 
798 		/*
799 		 * Try to configure the device only if it's not already
800 		 * configured.
801  		 */
802  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
803  			if (ia.ia_tid == ii->ii_tid) {
804 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
805 				strcpy(sc->sc_tidmap[i].it_dvname,
806 				    device_xname(ii->ii_dv));
807  				break;
808 			}
809 		}
810 		if (ii != NULL)
811 			continue;
812 
813 		locs[IOPCF_TID] = ia.ia_tid;
814 
815 		dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
816 					 iop_print, config_stdsubmatch);
817 		if (dv != NULL) {
818  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
819 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
820 		}
821 	}
822 }
823 
824 /*
825  * Adjust queue parameters for all child devices.
826  */
827 static void
828 iop_adjqparam(struct iop_softc *sc, int mpi)
829 {
830 	struct iop_initiator *ii;
831 
832 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
833 		if (ii->ii_adjqparam != NULL)
834 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
835 }
836 
837 static void
838 iop_devinfo(int class, char *devinfo, size_t l)
839 {
840 	int i;
841 
842 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
843 		if (class == iop_class[i].ic_class)
844 			break;
845 
846 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
847 		snprintf(devinfo, l, "device (class 0x%x)", class);
848 	else
849 		strlcpy(devinfo, iop_class[i].ic_caption, l);
850 }
851 
852 static int
853 iop_print(void *aux, const char *pnp)
854 {
855 	struct iop_attach_args *ia;
856 	char devinfo[256];
857 
858 	ia = aux;
859 
860 	if (pnp != NULL) {
861 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
862 		aprint_normal("%s at %s", devinfo, pnp);
863 	}
864 	aprint_normal(" tid %d", ia->ia_tid);
865 	return (UNCONF);
866 }
867 
868 /*
869  * Shut down all configured IOPs.
870  */
871 static void
872 iop_shutdown(void *junk)
873 {
874 	struct iop_softc *sc;
875 	int i;
876 
877 	printf("shutting down iop devices...");
878 
879 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
880 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
881 			continue;
882 		if ((sc->sc_flags & IOP_ONLINE) == 0)
883 			continue;
884 
885 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
886 		    0, 5000);
887 
888 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
889 			/*
890 			 * Some AMI firmware revisions will go to sleep and
891 			 * never come back after this.
892 			 */
893 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
894 			    IOP_ICTX, 0, 1000);
895 		}
896 	}
897 
898 	/* Wait.  Some boards could still be flushing, stupidly enough. */
899 	delay(5000*1000);
900 	printf(" done\n");
901 }
902 
903 /*
904  * Retrieve IOP status.
905  */
906 int
907 iop_status_get(struct iop_softc *sc, int nosleep)
908 {
909 	struct i2o_exec_status_get mf;
910 	struct i2o_status *st;
911 	paddr_t pa;
912 	int rv, i;
913 
914 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
915 	st = (struct i2o_status *)sc->sc_scr;
916 
917 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
918 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
919 	mf.reserved[0] = 0;
920 	mf.reserved[1] = 0;
921 	mf.reserved[2] = 0;
922 	mf.reserved[3] = 0;
923 	mf.addrlow = (u_int32_t)pa;
924 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
925 	mf.length = sizeof(sc->sc_status);
926 
927 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
928 	    BUS_DMASYNC_PREWRITE);
929 	memset(st, 0, sizeof(*st));
930 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
931 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
932 
933 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
934 		return (rv);
935 
936 	for (i = 100; i != 0; i--) {
937 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
938 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
939 		if (st->syncbyte == 0xff)
940 			break;
941 		if (nosleep)
942 			DELAY(100*1000);
943 		else
944 			kpause("iopstat", false, hz / 10, NULL);
945 	}
946 
947 	if (st->syncbyte != 0xff) {
948 		aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
949 		rv = EIO;
950 	} else {
951 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
952 		rv = 0;
953 	}
954 
955 	return (rv);
956 }
957 
958 /*
959  * Initialize and populate the IOP's outbound FIFO.
960  */
961 static int
962 iop_ofifo_init(struct iop_softc *sc)
963 {
964 	bus_addr_t addr;
965 	bus_dma_segment_t seg;
966 	struct i2o_exec_outbound_init *mf;
967 	int i, rseg, rv;
968 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
969 
970 	sw = (u_int32_t *)sc->sc_scr;
971 
972 	mf = (struct i2o_exec_outbound_init *)mb;
973 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
974 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
975 	mf->msgictx = IOP_ICTX;
976 	mf->msgtctx = 0;
977 	mf->pagesize = PAGE_SIZE;
978 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
979 
980 	/*
981 	 * The I2O spec says that there are two SGLs: one for the status
982 	 * word, and one for a list of discarded MFAs.  It continues to say
983 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
984 	 * necessary; this isn't the case (and is in fact a bad thing).
985 	 */
986 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
987 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
988 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
989 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
990 	mb[0] += 2 << 16;
991 
992 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
993 	    BUS_DMASYNC_PREWRITE);
994 	*sw = 0;
995 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
996 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
997 
998 	if ((rv = iop_post(sc, mb)) != 0)
999 		return (rv);
1000 
1001 	POLL(5000,
1002 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1003 	    BUS_DMASYNC_POSTREAD),
1004 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1005 
1006 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1007 		aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1008 		    le32toh(*sw));
1009 		return (EIO);
1010 	}
1011 
1012 	/* Allocate DMA safe memory for the reply frames. */
1013 	if (sc->sc_rep_phys == 0) {
1014 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1015 
1016 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1017 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1018 		if (rv != 0) {
1019 			aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1020 			   rv);
1021 			return (rv);
1022 		}
1023 
1024 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1025 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1026 		if (rv != 0) {
1027 			aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1028 			return (rv);
1029 		}
1030 
1031 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1032 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1033 		if (rv != 0) {
1034 			aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1035 			return (rv);
1036 		}
1037 
1038 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1039 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1040 		if (rv != 0) {
1041 			aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1042 			return (rv);
1043 		}
1044 
1045 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1046 
1047 		/* Now safe to sync the reply map. */
1048 		sc->sc_curib = 0;
1049 	}
1050 
1051 	/* Populate the outbound FIFO. */
1052 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1053 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1054 		addr += sc->sc_framesize;
1055 	}
1056 
1057 	return (0);
1058 }
1059 
1060 /*
1061  * Read the specified number of bytes from the IOP's hardware resource table.
1062  */
1063 static int
1064 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1065 {
1066 	struct iop_msg *im;
1067 	int rv;
1068 	struct i2o_exec_hrt_get *mf;
1069 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1070 
1071 	im = iop_msg_alloc(sc, IM_WAIT);
1072 	mf = (struct i2o_exec_hrt_get *)mb;
1073 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1074 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1075 	mf->msgictx = IOP_ICTX;
1076 	mf->msgtctx = im->im_tctx;
1077 
1078 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1079 	rv = iop_msg_post(sc, im, mb, 30000);
1080 	iop_msg_unmap(sc, im);
1081 	iop_msg_free(sc, im);
1082 	return (rv);
1083 }
1084 
1085 /*
1086  * Read the IOP's hardware resource table.
1087  */
1088 static int
1089 iop_hrt_get(struct iop_softc *sc)
1090 {
1091 	struct i2o_hrt hrthdr, *hrt;
1092 	int size, rv;
1093 
1094 	uvm_lwp_hold(curlwp);
1095 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1096 	uvm_lwp_rele(curlwp);
1097 	if (rv != 0)
1098 		return (rv);
1099 
1100 	DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1101 	    le16toh(hrthdr.numentries)));
1102 
1103 	size = sizeof(struct i2o_hrt) +
1104 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1105 	hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1106 
1107 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1108 		free(hrt, M_DEVBUF);
1109 		return (rv);
1110 	}
1111 
1112 	if (sc->sc_hrt != NULL)
1113 		free(sc->sc_hrt, M_DEVBUF);
1114 	sc->sc_hrt = hrt;
1115 	return (0);
1116 }
1117 
1118 /*
1119  * Request the specified number of bytes from the IOP's logical
1120  * configuration table.  If a change indicator is specified, this
1121  * is a verbatim notification request, so the caller is prepared
1122  * to wait indefinitely.
1123  */
1124 static int
1125 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1126 	     u_int32_t chgind)
1127 {
1128 	struct iop_msg *im;
1129 	struct i2o_exec_lct_notify *mf;
1130 	int rv;
1131 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1132 
1133 	im = iop_msg_alloc(sc, IM_WAIT);
1134 	memset(lct, 0, size);
1135 
1136 	mf = (struct i2o_exec_lct_notify *)mb;
1137 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1138 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1139 	mf->msgictx = IOP_ICTX;
1140 	mf->msgtctx = im->im_tctx;
1141 	mf->classid = I2O_CLASS_ANY;
1142 	mf->changeindicator = chgind;
1143 
1144 #ifdef I2ODEBUG
1145 	printf("iop_lct_get0: reading LCT");
1146 	if (chgind != 0)
1147 		printf(" (async)");
1148 	printf("\n");
1149 #endif
1150 
1151 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1152 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1153 	iop_msg_unmap(sc, im);
1154 	iop_msg_free(sc, im);
1155 	return (rv);
1156 }
1157 
1158 /*
1159  * Read the IOP's logical configuration table.
1160  */
1161 int
1162 iop_lct_get(struct iop_softc *sc)
1163 {
1164 	int esize, size, rv;
1165 	struct i2o_lct *lct;
1166 
1167 	esize = le32toh(sc->sc_status.expectedlctsize);
1168 	lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1169 	if (lct == NULL)
1170 		return (ENOMEM);
1171 
1172 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1173 		free(lct, M_DEVBUF);
1174 		return (rv);
1175 	}
1176 
1177 	size = le16toh(lct->tablesize) << 2;
1178 	if (esize != size) {
1179 		free(lct, M_DEVBUF);
1180 		lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1181 		if (lct == NULL)
1182 			return (ENOMEM);
1183 
1184 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1185 			free(lct, M_DEVBUF);
1186 			return (rv);
1187 		}
1188 	}
1189 
1190 	/* Swap in the new LCT. */
1191 	if (sc->sc_lct != NULL)
1192 		free(sc->sc_lct, M_DEVBUF);
1193 	sc->sc_lct = lct;
1194 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1195 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1196 	    sizeof(struct i2o_lct_entry);
1197 	return (0);
1198 }
1199 
1200 /*
1201  * Post a SYS_ENABLE message to the adapter.
1202  */
1203 int
1204 iop_sys_enable(struct iop_softc *sc)
1205 {
1206 	struct iop_msg *im;
1207 	struct i2o_msg mf;
1208 	int rv;
1209 
1210 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1211 
1212 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1213 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1214 	mf.msgictx = IOP_ICTX;
1215 	mf.msgtctx = im->im_tctx;
1216 
1217 	rv = iop_msg_post(sc, im, &mf, 30000);
1218 	if (rv == 0) {
1219 		if ((im->im_flags & IM_FAIL) != 0)
1220 			rv = ENXIO;
1221 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1222 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1223 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1224 			rv = 0;
1225 		else
1226 			rv = EIO;
1227 	}
1228 
1229 	iop_msg_free(sc, im);
1230 	return (rv);
1231 }
1232 
1233 /*
1234  * Request the specified parameter group from the target.  If an initiator
1235  * is specified (a) don't wait for the operation to complete, but instead
1236  * let the initiator's interrupt handler deal with the reply and (b) place a
1237  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1238  */
1239 int
1240 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1241 		  int size, struct iop_initiator *ii)
1242 {
1243 	struct iop_msg *im;
1244 	struct i2o_util_params_op *mf;
1245 	int rv;
1246 	struct iop_pgop *pgop;
1247 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1248 
1249 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1250 	if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1251 		iop_msg_free(sc, im);
1252 		return (ENOMEM);
1253 	}
1254 	im->im_dvcontext = pgop;
1255 
1256 	mf = (struct i2o_util_params_op *)mb;
1257 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1258 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1259 	mf->msgictx = IOP_ICTX;
1260 	mf->msgtctx = im->im_tctx;
1261 	mf->flags = 0;
1262 
1263 	pgop->olh.count = htole16(1);
1264 	pgop->olh.reserved = htole16(0);
1265 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1266 	pgop->oat.fieldcount = htole16(0xffff);
1267 	pgop->oat.group = htole16(group);
1268 
1269 	if (ii == NULL)
1270 		uvm_lwp_hold(curlwp);
1271 
1272 	memset(buf, 0, size);
1273 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1274 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1275 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1276 
1277 	if (ii == NULL)
1278 		uvm_lwp_rele(curlwp);
1279 
1280 	/* Detect errors; let partial transfers to count as success. */
1281 	if (ii == NULL && rv == 0) {
1282 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1283 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1284 			rv = 0;
1285 		else
1286 			rv = (im->im_reqstatus != 0 ? EIO : 0);
1287 
1288 		if (rv != 0)
1289 			printf("%s: FIELD_GET failed for tid %d group %d\n",
1290 			    device_xname(&sc->sc_dv), tid, group);
1291 	}
1292 
1293 	if (ii == NULL || rv != 0) {
1294 		iop_msg_unmap(sc, im);
1295 		iop_msg_free(sc, im);
1296 		free(pgop, M_DEVBUF);
1297 	}
1298 
1299 	return (rv);
1300 }
1301 
1302 /*
1303  * Set a single field in a scalar parameter group.
1304  */
1305 int
1306 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1307 	      int size, int field)
1308 {
1309 	struct iop_msg *im;
1310 	struct i2o_util_params_op *mf;
1311 	struct iop_pgop *pgop;
1312 	int rv, totsize;
1313 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1314 
1315 	totsize = sizeof(*pgop) + size;
1316 
1317 	im = iop_msg_alloc(sc, IM_WAIT);
1318 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1319 		iop_msg_free(sc, im);
1320 		return (ENOMEM);
1321 	}
1322 
1323 	mf = (struct i2o_util_params_op *)mb;
1324 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1325 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1326 	mf->msgictx = IOP_ICTX;
1327 	mf->msgtctx = im->im_tctx;
1328 	mf->flags = 0;
1329 
1330 	pgop->olh.count = htole16(1);
1331 	pgop->olh.reserved = htole16(0);
1332 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1333 	pgop->oat.fieldcount = htole16(1);
1334 	pgop->oat.group = htole16(group);
1335 	pgop->oat.fields[0] = htole16(field);
1336 	memcpy(pgop + 1, buf, size);
1337 
1338 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1339 	rv = iop_msg_post(sc, im, mb, 30000);
1340 	if (rv != 0)
1341 		aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1342 		    tid, group);
1343 
1344 	iop_msg_unmap(sc, im);
1345 	iop_msg_free(sc, im);
1346 	free(pgop, M_DEVBUF);
1347 	return (rv);
1348 }
1349 
1350 /*
1351  * Delete all rows in a tablular parameter group.
1352  */
1353 int
1354 iop_table_clear(struct iop_softc *sc, int tid, int group)
1355 {
1356 	struct iop_msg *im;
1357 	struct i2o_util_params_op *mf;
1358 	struct iop_pgop pgop;
1359 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1360 	int rv;
1361 
1362 	im = iop_msg_alloc(sc, IM_WAIT);
1363 
1364 	mf = (struct i2o_util_params_op *)mb;
1365 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1366 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1367 	mf->msgictx = IOP_ICTX;
1368 	mf->msgtctx = im->im_tctx;
1369 	mf->flags = 0;
1370 
1371 	pgop.olh.count = htole16(1);
1372 	pgop.olh.reserved = htole16(0);
1373 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1374 	pgop.oat.fieldcount = htole16(0);
1375 	pgop.oat.group = htole16(group);
1376 	pgop.oat.fields[0] = htole16(0);
1377 
1378 	uvm_lwp_hold(curlwp);
1379 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1380 	rv = iop_msg_post(sc, im, mb, 30000);
1381 	if (rv != 0)
1382 		aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1383 		    tid, group);
1384 
1385 	iop_msg_unmap(sc, im);
1386 	uvm_lwp_rele(curlwp);
1387 	iop_msg_free(sc, im);
1388 	return (rv);
1389 }
1390 
1391 /*
1392  * Add a single row to a tabular parameter group.  The row can have only one
1393  * field.
1394  */
1395 int
1396 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1397 		  int size, int row)
1398 {
1399 	struct iop_msg *im;
1400 	struct i2o_util_params_op *mf;
1401 	struct iop_pgop *pgop;
1402 	int rv, totsize;
1403 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1404 
1405 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1406 
1407 	im = iop_msg_alloc(sc, IM_WAIT);
1408 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1409 		iop_msg_free(sc, im);
1410 		return (ENOMEM);
1411 	}
1412 
1413 	mf = (struct i2o_util_params_op *)mb;
1414 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1415 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1416 	mf->msgictx = IOP_ICTX;
1417 	mf->msgtctx = im->im_tctx;
1418 	mf->flags = 0;
1419 
1420 	pgop->olh.count = htole16(1);
1421 	pgop->olh.reserved = htole16(0);
1422 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1423 	pgop->oat.fieldcount = htole16(1);
1424 	pgop->oat.group = htole16(group);
1425 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
1426 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
1427 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
1428 	memcpy(&pgop->oat.fields[3], buf, size);
1429 
1430 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1431 	rv = iop_msg_post(sc, im, mb, 30000);
1432 	if (rv != 0)
1433 		aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1434 		    tid, group, row);
1435 
1436 	iop_msg_unmap(sc, im);
1437 	iop_msg_free(sc, im);
1438 	free(pgop, M_DEVBUF);
1439 	return (rv);
1440 }
1441 
1442 /*
1443  * Execute a simple command (no parameters).
1444  */
1445 int
1446 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1447 	       int async, int timo)
1448 {
1449 	struct iop_msg *im;
1450 	struct i2o_msg mf;
1451 	int rv, fl;
1452 
1453 	fl = (async != 0 ? IM_WAIT : IM_POLL);
1454 	im = iop_msg_alloc(sc, fl);
1455 
1456 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1457 	mf.msgfunc = I2O_MSGFUNC(tid, function);
1458 	mf.msgictx = ictx;
1459 	mf.msgtctx = im->im_tctx;
1460 
1461 	rv = iop_msg_post(sc, im, &mf, timo);
1462 	iop_msg_free(sc, im);
1463 	return (rv);
1464 }
1465 
1466 /*
1467  * Post the system table to the IOP.
1468  */
1469 static int
1470 iop_systab_set(struct iop_softc *sc)
1471 {
1472 	struct i2o_exec_sys_tab_set *mf;
1473 	struct iop_msg *im;
1474 	bus_space_handle_t bsh;
1475 	bus_addr_t boo;
1476 	u_int32_t mema[2], ioa[2];
1477 	int rv;
1478 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1479 
1480 	im = iop_msg_alloc(sc, IM_WAIT);
1481 
1482 	mf = (struct i2o_exec_sys_tab_set *)mb;
1483 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1484 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1485 	mf->msgictx = IOP_ICTX;
1486 	mf->msgtctx = im->im_tctx;
1487 	mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1488 	mf->segnumber = 0;
1489 
1490 	mema[1] = sc->sc_status.desiredprivmemsize;
1491 	ioa[1] = sc->sc_status.desiredpriviosize;
1492 
1493 	if (mema[1] != 0) {
1494 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1495 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1496 		mema[0] = htole32(boo);
1497 		if (rv != 0) {
1498 			aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1499 			mema[0] = 0;
1500 			mema[1] = 0;
1501 		}
1502 	}
1503 
1504 	if (ioa[1] != 0) {
1505 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1506 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1507 		ioa[0] = htole32(boo);
1508 		if (rv != 0) {
1509 			aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1510 			ioa[0] = 0;
1511 			ioa[1] = 0;
1512 		}
1513 	}
1514 
1515 	uvm_lwp_hold(curlwp);
1516 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1517 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1518 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1519 	rv = iop_msg_post(sc, im, mb, 5000);
1520 	iop_msg_unmap(sc, im);
1521 	iop_msg_free(sc, im);
1522 	uvm_lwp_rele(curlwp);
1523 	return (rv);
1524 }
1525 
1526 /*
1527  * Reset the IOP.  Must be called with interrupts disabled.
1528  */
1529 static int
1530 iop_reset(struct iop_softc *sc)
1531 {
1532 	u_int32_t mfa, *sw;
1533 	struct i2o_exec_iop_reset mf;
1534 	int rv;
1535 	paddr_t pa;
1536 
1537 	sw = (u_int32_t *)sc->sc_scr;
1538 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1539 
1540 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1541 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1542 	mf.reserved[0] = 0;
1543 	mf.reserved[1] = 0;
1544 	mf.reserved[2] = 0;
1545 	mf.reserved[3] = 0;
1546 	mf.statuslow = (u_int32_t)pa;
1547 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1548 
1549 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1550 	    BUS_DMASYNC_PREWRITE);
1551 	*sw = htole32(0);
1552 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1553 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1554 
1555 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1556 		return (rv);
1557 
1558 	POLL(2500,
1559 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1560 	    BUS_DMASYNC_POSTREAD), *sw != 0));
1561 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1562 		aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1563 		    le32toh(*sw));
1564 		return (EIO);
1565 	}
1566 
1567 	/*
1568 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
1569 	 * the inbound queue to become responsive.
1570 	 */
1571 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1572 	if (mfa == IOP_MFA_EMPTY) {
1573 		aprint_error_dev(&sc->sc_dv, "reset failed\n");
1574 		return (EIO);
1575 	}
1576 
1577 	iop_release_mfa(sc, mfa);
1578 	return (0);
1579 }
1580 
1581 /*
1582  * Register a new initiator.  Must be called with the configuration lock
1583  * held.
1584  */
1585 void
1586 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1587 {
1588 	static int ictxgen;
1589 
1590 	/* 0 is reserved (by us) for system messages. */
1591 	ii->ii_ictx = ++ictxgen;
1592 
1593 	/*
1594 	 * `Utility initiators' don't make it onto the per-IOP initiator list
1595 	 * (which is used only for configuration), but do get one slot on
1596 	 * the inbound queue.
1597 	 */
1598 	if ((ii->ii_flags & II_UTILITY) == 0) {
1599 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1600 		sc->sc_nii++;
1601 	} else
1602 		sc->sc_nuii++;
1603 
1604 	cv_init(&ii->ii_cv, "iopevt");
1605 
1606 	mutex_spin_enter(&sc->sc_intrlock);
1607 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1608 	mutex_spin_exit(&sc->sc_intrlock);
1609 }
1610 
1611 /*
1612  * Unregister an initiator.  Must be called with the configuration lock
1613  * held.
1614  */
1615 void
1616 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1617 {
1618 
1619 	if ((ii->ii_flags & II_UTILITY) == 0) {
1620 		LIST_REMOVE(ii, ii_list);
1621 		sc->sc_nii--;
1622 	} else
1623 		sc->sc_nuii--;
1624 
1625 	mutex_spin_enter(&sc->sc_intrlock);
1626 	LIST_REMOVE(ii, ii_hash);
1627 	mutex_spin_exit(&sc->sc_intrlock);
1628 
1629 	cv_destroy(&ii->ii_cv);
1630 }
1631 
1632 /*
1633  * Handle a reply frame from the IOP.
1634  */
1635 static int
1636 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1637 {
1638 	struct iop_msg *im;
1639 	struct i2o_reply *rb;
1640 	struct i2o_fault_notify *fn;
1641 	struct iop_initiator *ii;
1642 	u_int off, ictx, tctx, status, size;
1643 
1644 	KASSERT(mutex_owned(&sc->sc_intrlock));
1645 
1646 	off = (int)(rmfa - sc->sc_rep_phys);
1647 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1648 
1649 	/* Perform reply queue DMA synchronisation. */
1650 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1651 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1652 
1653 #ifdef I2ODEBUG
1654 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1655 		panic("iop_handle_reply: 64-bit reply");
1656 #endif
1657 	/*
1658 	 * Find the initiator.
1659 	 */
1660 	ictx = le32toh(rb->msgictx);
1661 	if (ictx == IOP_ICTX)
1662 		ii = NULL;
1663 	else {
1664 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1665 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1666 			if (ii->ii_ictx == ictx)
1667 				break;
1668 		if (ii == NULL) {
1669 #ifdef I2ODEBUG
1670 			iop_reply_print(sc, rb);
1671 #endif
1672 			aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1673 			    ictx);
1674 			return (-1);
1675 		}
1676 	}
1677 
1678 	/*
1679 	 * If we received a transport failure notice, we've got to dig the
1680 	 * transaction context (if any) out of the original message frame,
1681 	 * and then release the original MFA back to the inbound FIFO.
1682 	 */
1683 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1684 		status = I2O_STATUS_SUCCESS;
1685 
1686 		fn = (struct i2o_fault_notify *)rb;
1687 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1688 		iop_release_mfa(sc, fn->lowmfa);
1689 		iop_tfn_print(sc, fn);
1690 	} else {
1691 		status = rb->reqstatus;
1692 		tctx = le32toh(rb->msgtctx);
1693 	}
1694 
1695 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1696 		/*
1697 		 * This initiator tracks state using message wrappers.
1698 		 *
1699 		 * Find the originating message wrapper, and if requested
1700 		 * notify the initiator.
1701 		 */
1702 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1703 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1704 		    (im->im_flags & IM_ALLOCED) == 0 ||
1705 		    tctx != im->im_tctx) {
1706 			aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1707 			if (im != NULL)
1708 				aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1709 				    im->im_flags, im->im_tctx);
1710 #ifdef I2ODEBUG
1711 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1712 				iop_reply_print(sc, rb);
1713 #endif
1714 			return (-1);
1715 		}
1716 
1717 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1718 			im->im_flags |= IM_FAIL;
1719 
1720 #ifdef I2ODEBUG
1721 		if ((im->im_flags & IM_REPLIED) != 0)
1722 			panic("%s: dup reply", device_xname(&sc->sc_dv));
1723 #endif
1724 		im->im_flags |= IM_REPLIED;
1725 
1726 #ifdef I2ODEBUG
1727 		if (status != I2O_STATUS_SUCCESS)
1728 			iop_reply_print(sc, rb);
1729 #endif
1730 		im->im_reqstatus = status;
1731 		im->im_detstatus = le16toh(rb->detail);
1732 
1733 		/* Copy the reply frame, if requested. */
1734 		if (im->im_rb != NULL) {
1735 			size = (le32toh(rb->msgflags) >> 14) & ~3;
1736 #ifdef I2ODEBUG
1737 			if (size > sc->sc_framesize)
1738 				panic("iop_handle_reply: reply too large");
1739 #endif
1740 			memcpy(im->im_rb, rb, size);
1741 		}
1742 
1743 		/* Notify the initiator. */
1744 		if ((im->im_flags & IM_WAIT) != 0)
1745 			cv_broadcast(&im->im_cv);
1746 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1747 			if (ii != NULL) {
1748 				mutex_spin_exit(&sc->sc_intrlock);
1749 				(*ii->ii_intr)(ii->ii_dv, im, rb);
1750 				mutex_spin_enter(&sc->sc_intrlock);
1751 			}
1752 		}
1753 	} else {
1754 		/*
1755 		 * This initiator discards message wrappers.
1756 		 *
1757 		 * Simply pass the reply frame to the initiator.
1758 		 */
1759 		if (ii != NULL) {
1760 			mutex_spin_exit(&sc->sc_intrlock);
1761 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
1762 			mutex_spin_enter(&sc->sc_intrlock);
1763 		}
1764 	}
1765 
1766 	return (status);
1767 }
1768 
1769 /*
1770  * Handle an interrupt from the IOP.
1771  */
1772 int
1773 iop_intr(void *arg)
1774 {
1775 	struct iop_softc *sc;
1776 	u_int32_t rmfa;
1777 
1778 	sc = arg;
1779 
1780 	mutex_spin_enter(&sc->sc_intrlock);
1781 
1782 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1783 		mutex_spin_exit(&sc->sc_intrlock);
1784 		return (0);
1785 	}
1786 
1787 	for (;;) {
1788 		/* Double read to account for IOP bug. */
1789 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1790 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
1791 			if (rmfa == IOP_MFA_EMPTY)
1792 				break;
1793 		}
1794 		iop_handle_reply(sc, rmfa);
1795 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
1796 	}
1797 
1798 	mutex_spin_exit(&sc->sc_intrlock);
1799 	return (1);
1800 }
1801 
1802 /*
1803  * Handle an event signalled by the executive.
1804  */
1805 static void
1806 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1807 {
1808 	struct i2o_util_event_register_reply *rb;
1809 	u_int event;
1810 
1811 	rb = reply;
1812 
1813 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1814 		return;
1815 
1816 	event = le32toh(rb->event);
1817 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
1818 }
1819 
1820 /*
1821  * Allocate a message wrapper.
1822  */
1823 struct iop_msg *
1824 iop_msg_alloc(struct iop_softc *sc, int flags)
1825 {
1826 	struct iop_msg *im;
1827 	static u_int tctxgen;
1828 	int i;
1829 
1830 #ifdef I2ODEBUG
1831 	if ((flags & IM_SYSMASK) != 0)
1832 		panic("iop_msg_alloc: system flags specified");
1833 #endif
1834 
1835 	mutex_spin_enter(&sc->sc_intrlock);
1836 	im = SLIST_FIRST(&sc->sc_im_freelist);
1837 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1838 	if (im == NULL)
1839 		panic("iop_msg_alloc: no free wrappers");
1840 #endif
1841 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1842 	mutex_spin_exit(&sc->sc_intrlock);
1843 
1844 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1845 	tctxgen += (1 << IOP_TCTX_SHIFT);
1846 	im->im_flags = flags | IM_ALLOCED;
1847 	im->im_rb = NULL;
1848 	i = 0;
1849 	do {
1850 		im->im_xfer[i++].ix_size = 0;
1851 	} while (i < IOP_MAX_MSG_XFERS);
1852 
1853 	return (im);
1854 }
1855 
1856 /*
1857  * Free a message wrapper.
1858  */
1859 void
1860 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1861 {
1862 
1863 #ifdef I2ODEBUG
1864 	if ((im->im_flags & IM_ALLOCED) == 0)
1865 		panic("iop_msg_free: wrapper not allocated");
1866 #endif
1867 
1868 	im->im_flags = 0;
1869 	mutex_spin_enter(&sc->sc_intrlock);
1870 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1871 	mutex_spin_exit(&sc->sc_intrlock);
1872 }
1873 
1874 /*
1875  * Map a data transfer.  Write a scatter-gather list into the message frame.
1876  */
1877 int
1878 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1879 	    void *xferaddr, int xfersize, int out, struct proc *up)
1880 {
1881 	bus_dmamap_t dm;
1882 	bus_dma_segment_t *ds;
1883 	struct iop_xfer *ix;
1884 	u_int rv, i, nsegs, flg, off, xn;
1885 	u_int32_t *p;
1886 
1887 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1888 		if (ix->ix_size == 0)
1889 			break;
1890 
1891 #ifdef I2ODEBUG
1892 	if (xfersize == 0)
1893 		panic("iop_msg_map: null transfer");
1894 	if (xfersize > IOP_MAX_XFER)
1895 		panic("iop_msg_map: transfer too large");
1896 	if (xn == IOP_MAX_MSG_XFERS)
1897 		panic("iop_msg_map: too many xfers");
1898 #endif
1899 
1900 	/*
1901 	 * Only the first DMA map is static.
1902 	 */
1903 	if (xn != 0) {
1904 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1905 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1906 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1907 		if (rv != 0)
1908 			return (rv);
1909 	}
1910 
1911 	dm = ix->ix_map;
1912 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1913 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
1914 	if (rv != 0)
1915 		goto bad;
1916 
1917 	/*
1918 	 * How many SIMPLE SG elements can we fit in this message?
1919 	 */
1920 	off = mb[0] >> 16;
1921 	p = mb + off;
1922 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1923 
1924 	if (dm->dm_nsegs > nsegs) {
1925 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1926 		rv = EFBIG;
1927 		DPRINTF(("iop_msg_map: too many segs\n"));
1928 		goto bad;
1929 	}
1930 
1931 	nsegs = dm->dm_nsegs;
1932 	xfersize = 0;
1933 
1934 	/*
1935 	 * Write out the SG list.
1936 	 */
1937 	if (out)
1938 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1939 	else
1940 		flg = I2O_SGL_SIMPLE;
1941 
1942 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1943 		p[0] = (u_int32_t)ds->ds_len | flg;
1944 		p[1] = (u_int32_t)ds->ds_addr;
1945 		xfersize += ds->ds_len;
1946 	}
1947 
1948 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1949 	p[1] = (u_int32_t)ds->ds_addr;
1950 	xfersize += ds->ds_len;
1951 
1952 	/* Fix up the transfer record, and sync the map. */
1953 	ix->ix_flags = (out ? IX_OUT : IX_IN);
1954 	ix->ix_size = xfersize;
1955 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1956 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1957 
1958 	/*
1959 	 * If this is the first xfer we've mapped for this message, adjust
1960 	 * the SGL offset field in the message header.
1961 	 */
1962 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1963 		mb[0] += (mb[0] >> 12) & 0xf0;
1964 		im->im_flags |= IM_SGLOFFADJ;
1965 	}
1966 	mb[0] += (nsegs << 17);
1967 	return (0);
1968 
1969  bad:
1970  	if (xn != 0)
1971 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1972 	return (rv);
1973 }
1974 
1975 /*
1976  * Map a block I/O data transfer (different in that there's only one per
1977  * message maximum, and PAGE addressing may be used).  Write a scatter
1978  * gather list into the message frame.
1979  */
1980 int
1981 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1982 		void *xferaddr, int xfersize, int out)
1983 {
1984 	bus_dma_segment_t *ds;
1985 	bus_dmamap_t dm;
1986 	struct iop_xfer *ix;
1987 	u_int rv, i, nsegs, off, slen, tlen, flg;
1988 	paddr_t saddr, eaddr;
1989 	u_int32_t *p;
1990 
1991 #ifdef I2ODEBUG
1992 	if (xfersize == 0)
1993 		panic("iop_msg_map_bio: null transfer");
1994 	if (xfersize > IOP_MAX_XFER)
1995 		panic("iop_msg_map_bio: transfer too large");
1996 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
1997 		panic("iop_msg_map_bio: SGLOFFADJ");
1998 #endif
1999 
2000 	ix = im->im_xfer;
2001 	dm = ix->ix_map;
2002 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2003 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2004 	if (rv != 0)
2005 		return (rv);
2006 
2007 	off = mb[0] >> 16;
2008 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2009 
2010 	/*
2011 	 * If the transfer is highly fragmented and won't fit using SIMPLE
2012 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
2013 	 * potentially more efficient, both for us and the IOP.
2014 	 */
2015 	if (dm->dm_nsegs > nsegs) {
2016 		nsegs = 1;
2017 		p = mb + off + 1;
2018 
2019 		/* XXX This should be done with a bus_space flag. */
2020 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2021 			slen = ds->ds_len;
2022 			saddr = ds->ds_addr;
2023 
2024 			while (slen > 0) {
2025 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2026 				tlen = min(eaddr - saddr, slen);
2027 				slen -= tlen;
2028 				*p++ = le32toh(saddr);
2029 				saddr = eaddr;
2030 				nsegs++;
2031 			}
2032 		}
2033 
2034 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2035 		    I2O_SGL_END;
2036 		if (out)
2037 			mb[off] |= I2O_SGL_DATA_OUT;
2038 	} else {
2039 		p = mb + off;
2040 		nsegs = dm->dm_nsegs;
2041 
2042 		if (out)
2043 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2044 		else
2045 			flg = I2O_SGL_SIMPLE;
2046 
2047 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2048 			p[0] = (u_int32_t)ds->ds_len | flg;
2049 			p[1] = (u_int32_t)ds->ds_addr;
2050 		}
2051 
2052 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2053 		    I2O_SGL_END;
2054 		p[1] = (u_int32_t)ds->ds_addr;
2055 		nsegs <<= 1;
2056 	}
2057 
2058 	/* Fix up the transfer record, and sync the map. */
2059 	ix->ix_flags = (out ? IX_OUT : IX_IN);
2060 	ix->ix_size = xfersize;
2061 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2062 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2063 
2064 	/*
2065 	 * Adjust the SGL offset and total message size fields.  We don't
2066 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2067 	 */
2068 	mb[0] += ((off << 4) + (nsegs << 16));
2069 	return (0);
2070 }
2071 
2072 /*
2073  * Unmap all data transfers associated with a message wrapper.
2074  */
2075 void
2076 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2077 {
2078 	struct iop_xfer *ix;
2079 	int i;
2080 
2081 #ifdef I2ODEBUG
2082 	if (im->im_xfer[0].ix_size == 0)
2083 		panic("iop_msg_unmap: no transfers mapped");
2084 #endif
2085 
2086 	for (ix = im->im_xfer, i = 0;;) {
2087 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2088 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2089 		    BUS_DMASYNC_POSTREAD);
2090 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2091 
2092 		/* Only the first DMA map is static. */
2093 		if (i != 0)
2094 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2095 		if ((++ix)->ix_size == 0)
2096 			break;
2097 		if (++i >= IOP_MAX_MSG_XFERS)
2098 			break;
2099 	}
2100 }
2101 
2102 /*
2103  * Post a message frame to the IOP's inbound queue.
2104  */
2105 int
2106 iop_post(struct iop_softc *sc, u_int32_t *mb)
2107 {
2108 	u_int32_t mfa;
2109 
2110 #ifdef I2ODEBUG
2111 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2112 		panic("iop_post: frame too large");
2113 #endif
2114 
2115 	mutex_spin_enter(&sc->sc_intrlock);
2116 
2117 	/* Allocate a slot with the IOP. */
2118 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2119 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2120 			mutex_spin_exit(&sc->sc_intrlock);
2121 			aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2122 			return (EAGAIN);
2123 		}
2124 
2125 	/* Perform reply buffer DMA synchronisation. */
2126 	if (sc->sc_rep_size != 0) {
2127 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2128 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2129 	}
2130 
2131 	/* Copy out the message frame. */
2132 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2133 	    mb[0] >> 16);
2134 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2135 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2136 
2137 	/* Post the MFA back to the IOP. */
2138 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2139 
2140 	mutex_spin_exit(&sc->sc_intrlock);
2141 	return (0);
2142 }
2143 
2144 /*
2145  * Post a message to the IOP and deal with completion.
2146  */
2147 int
2148 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2149 {
2150 	u_int32_t *mb;
2151 	int rv;
2152 
2153 	mb = xmb;
2154 
2155 	/* Terminate the scatter/gather list chain. */
2156 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
2157 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2158 
2159 	if ((rv = iop_post(sc, mb)) != 0)
2160 		return (rv);
2161 
2162 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2163 		if ((im->im_flags & IM_POLL) != 0)
2164 			iop_msg_poll(sc, im, timo);
2165 		else
2166 			iop_msg_wait(sc, im, timo);
2167 
2168 		mutex_spin_enter(&sc->sc_intrlock);
2169 		if ((im->im_flags & IM_REPLIED) != 0) {
2170 			if ((im->im_flags & IM_NOSTATUS) != 0)
2171 				rv = 0;
2172 			else if ((im->im_flags & IM_FAIL) != 0)
2173 				rv = ENXIO;
2174 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2175 				rv = EIO;
2176 			else
2177 				rv = 0;
2178 		} else
2179 			rv = EBUSY;
2180 		mutex_spin_exit(&sc->sc_intrlock);
2181 	} else
2182 		rv = 0;
2183 
2184 	return (rv);
2185 }
2186 
2187 /*
2188  * Spin until the specified message is replied to.
2189  */
2190 static void
2191 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2192 {
2193 	u_int32_t rmfa;
2194 
2195 	mutex_spin_enter(&sc->sc_intrlock);
2196 
2197 	for (timo *= 10; timo != 0; timo--) {
2198 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2199 			/* Double read to account for IOP bug. */
2200 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
2201 			if (rmfa == IOP_MFA_EMPTY)
2202 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
2203 			if (rmfa != IOP_MFA_EMPTY) {
2204 				iop_handle_reply(sc, rmfa);
2205 
2206 				/*
2207 				 * Return the reply frame to the IOP's
2208 				 * outbound FIFO.
2209 				 */
2210 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
2211 			}
2212 		}
2213 		if ((im->im_flags & IM_REPLIED) != 0)
2214 			break;
2215 		mutex_spin_exit(&sc->sc_intrlock);
2216 		DELAY(100);
2217 		mutex_spin_enter(&sc->sc_intrlock);
2218 	}
2219 
2220 	if (timo == 0) {
2221 #ifdef I2ODEBUG
2222 		printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2223 		if (iop_status_get(sc, 1) != 0)
2224 			printf("iop_msg_poll: unable to retrieve status\n");
2225 		else
2226 			printf("iop_msg_poll: IOP state = %d\n",
2227 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2228 #endif
2229 	}
2230 
2231 	mutex_spin_exit(&sc->sc_intrlock);
2232 }
2233 
2234 /*
2235  * Sleep until the specified message is replied to.
2236  */
2237 static void
2238 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2239 {
2240 	int rv;
2241 
2242 	mutex_spin_enter(&sc->sc_intrlock);
2243 	if ((im->im_flags & IM_REPLIED) != 0) {
2244 		mutex_spin_exit(&sc->sc_intrlock);
2245 		return;
2246 	}
2247 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2248 	mutex_spin_exit(&sc->sc_intrlock);
2249 
2250 #ifdef I2ODEBUG
2251 	if (rv != 0) {
2252 		printf("iop_msg_wait: tsleep() == %d\n", rv);
2253 		if (iop_status_get(sc, 0) != 0)
2254 			printf("iop_msg_wait: unable to retrieve status\n");
2255 		else
2256 			printf("iop_msg_wait: IOP state = %d\n",
2257 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2258 	}
2259 #endif
2260 }
2261 
2262 /*
2263  * Release an unused message frame back to the IOP's inbound fifo.
2264  */
2265 static void
2266 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2267 {
2268 
2269 	/* Use the frame to issue a no-op. */
2270 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2271 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2272 	iop_outl_msg(sc, mfa + 8, 0);
2273 	iop_outl_msg(sc, mfa + 12, 0);
2274 
2275 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2276 }
2277 
2278 #ifdef I2ODEBUG
2279 /*
2280  * Dump a reply frame header.
2281  */
2282 static void
2283 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2284 {
2285 	u_int function, detail;
2286 	const char *statusstr;
2287 
2288 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2289 	detail = le16toh(rb->detail);
2290 
2291 	printf("%s: reply:\n", device_xname(&sc->sc_dv));
2292 
2293 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2294 		statusstr = iop_status[rb->reqstatus];
2295 	else
2296 		statusstr = "undefined error code";
2297 
2298 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
2299 	    device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2300 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2301 	    device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2302 	    le32toh(rb->msgtctx));
2303 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2304 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2305 	    (le32toh(rb->msgflags) >> 8) & 0xff);
2306 }
2307 #endif
2308 
2309 /*
2310  * Dump a transport failure reply.
2311  */
2312 static void
2313 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2314 {
2315 
2316 	printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2317 
2318 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2319 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
2320 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
2321 	    device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2322 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
2323 	    device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2324 }
2325 
2326 /*
2327  * Translate an I2O ASCII field into a C string.
2328  */
2329 void
2330 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2331 {
2332 	int hc, lc, i, nit;
2333 
2334 	dlen--;
2335 	lc = 0;
2336 	hc = 0;
2337 	i = 0;
2338 
2339 	/*
2340 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
2341 	 * spec has nothing to say about it.  Since AMI fields are usually
2342 	 * filled with junk after the terminator, ...
2343 	 */
2344 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2345 
2346 	while (slen-- != 0 && dlen-- != 0) {
2347 		if (nit && *src == '\0')
2348 			break;
2349 		else if (*src <= 0x20 || *src >= 0x7f) {
2350 			if (hc)
2351 				dst[i++] = ' ';
2352 		} else {
2353 			hc = 1;
2354 			dst[i++] = *src;
2355 			lc = i;
2356 		}
2357 		src++;
2358 	}
2359 
2360 	dst[lc] = '\0';
2361 }
2362 
2363 /*
2364  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2365  */
2366 int
2367 iop_print_ident(struct iop_softc *sc, int tid)
2368 {
2369 	struct {
2370 		struct	i2o_param_op_results pr;
2371 		struct	i2o_param_read_results prr;
2372 		struct	i2o_param_device_identity di;
2373 	} __packed p;
2374 	char buf[32];
2375 	int rv;
2376 
2377 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2378 	    sizeof(p), NULL);
2379 	if (rv != 0)
2380 		return (rv);
2381 
2382 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2383 	    sizeof(buf));
2384 	printf(" <%s, ", buf);
2385 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2386 	    sizeof(buf));
2387 	printf("%s, ", buf);
2388 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2389 	printf("%s>", buf);
2390 
2391 	return (0);
2392 }
2393 
2394 /*
2395  * Claim or unclaim the specified TID.
2396  */
2397 int
2398 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2399 	       int flags)
2400 {
2401 	struct iop_msg *im;
2402 	struct i2o_util_claim mf;
2403 	int rv, func;
2404 
2405 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2406 	im = iop_msg_alloc(sc, IM_WAIT);
2407 
2408 	/* We can use the same structure, as they're identical. */
2409 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2410 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2411 	mf.msgictx = ii->ii_ictx;
2412 	mf.msgtctx = im->im_tctx;
2413 	mf.flags = flags;
2414 
2415 	rv = iop_msg_post(sc, im, &mf, 5000);
2416 	iop_msg_free(sc, im);
2417 	return (rv);
2418 }
2419 
2420 /*
2421  * Perform an abort.
2422  */
2423 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2424 		   int tctxabort, int flags)
2425 {
2426 	struct iop_msg *im;
2427 	struct i2o_util_abort mf;
2428 	int rv;
2429 
2430 	im = iop_msg_alloc(sc, IM_WAIT);
2431 
2432 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2433 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2434 	mf.msgictx = ii->ii_ictx;
2435 	mf.msgtctx = im->im_tctx;
2436 	mf.flags = (func << 24) | flags;
2437 	mf.tctxabort = tctxabort;
2438 
2439 	rv = iop_msg_post(sc, im, &mf, 5000);
2440 	iop_msg_free(sc, im);
2441 	return (rv);
2442 }
2443 
2444 /*
2445  * Enable or disable reception of events for the specified device.
2446  */
2447 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2448 {
2449 	struct i2o_util_event_register mf;
2450 
2451 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2452 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2453 	mf.msgictx = ii->ii_ictx;
2454 	mf.msgtctx = 0;
2455 	mf.eventmask = mask;
2456 
2457 	/* This message is replied to only when events are signalled. */
2458 	return (iop_post(sc, (u_int32_t *)&mf));
2459 }
2460 
2461 int
2462 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2463 {
2464 	struct iop_softc *sc;
2465 
2466 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2467 		return (ENXIO);
2468 	if ((sc->sc_flags & IOP_ONLINE) == 0)
2469 		return (ENXIO);
2470 	if ((sc->sc_flags & IOP_OPEN) != 0)
2471 		return (EBUSY);
2472 	sc->sc_flags |= IOP_OPEN;
2473 
2474 	return (0);
2475 }
2476 
2477 int
2478 iopclose(dev_t dev, int flag, int mode,
2479     struct lwp *l)
2480 {
2481 	struct iop_softc *sc;
2482 
2483 	sc = device_lookup_private(&iop_cd, minor(dev));
2484 	sc->sc_flags &= ~IOP_OPEN;
2485 
2486 	return (0);
2487 }
2488 
2489 int
2490 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2491 {
2492 	struct iop_softc *sc;
2493 	struct iovec *iov;
2494 	int rv, i;
2495 
2496 	sc = device_lookup_private(&iop_cd, minor(dev));
2497 	rv = 0;
2498 
2499 	switch (cmd) {
2500 	case IOPIOCPT:
2501 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2502 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2503 		if (rv)
2504 			return (rv);
2505 
2506 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2507 
2508 	case IOPIOCGSTATUS:
2509 		iov = (struct iovec *)data;
2510 		i = sizeof(struct i2o_status);
2511 		if (i > iov->iov_len)
2512 			i = iov->iov_len;
2513 		else
2514 			iov->iov_len = i;
2515 		if ((rv = iop_status_get(sc, 0)) == 0)
2516 			rv = copyout(&sc->sc_status, iov->iov_base, i);
2517 		return (rv);
2518 
2519 	case IOPIOCGLCT:
2520 	case IOPIOCGTIDMAP:
2521 	case IOPIOCRECONFIG:
2522 		break;
2523 
2524 	default:
2525 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2526 		printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2527 #endif
2528 		return (ENOTTY);
2529 	}
2530 
2531 	mutex_enter(&sc->sc_conflock);
2532 
2533 	switch (cmd) {
2534 	case IOPIOCGLCT:
2535 		iov = (struct iovec *)data;
2536 		i = le16toh(sc->sc_lct->tablesize) << 2;
2537 		if (i > iov->iov_len)
2538 			i = iov->iov_len;
2539 		else
2540 			iov->iov_len = i;
2541 		rv = copyout(sc->sc_lct, iov->iov_base, i);
2542 		break;
2543 
2544 	case IOPIOCRECONFIG:
2545 		rv = iop_reconfigure(sc, 0);
2546 		break;
2547 
2548 	case IOPIOCGTIDMAP:
2549 		iov = (struct iovec *)data;
2550 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2551 		if (i > iov->iov_len)
2552 			i = iov->iov_len;
2553 		else
2554 			iov->iov_len = i;
2555 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2556 		break;
2557 	}
2558 
2559 	mutex_exit(&sc->sc_conflock);
2560 	return (rv);
2561 }
2562 
2563 static int
2564 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2565 {
2566 	struct iop_msg *im;
2567 	struct i2o_msg *mf;
2568 	struct ioppt_buf *ptb;
2569 	int rv, i, mapped;
2570 
2571 	mf = NULL;
2572 	im = NULL;
2573 	mapped = 1;
2574 
2575 	if (pt->pt_msglen > sc->sc_framesize ||
2576 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
2577 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2578 	    pt->pt_nbufs < 0 ||
2579 #if 0
2580 	    pt->pt_replylen < 0 ||
2581 #endif
2582             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2583 		return (EINVAL);
2584 
2585 	for (i = 0; i < pt->pt_nbufs; i++)
2586 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2587 			rv = ENOMEM;
2588 			goto bad;
2589 		}
2590 
2591 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2592 	if (mf == NULL)
2593 		return (ENOMEM);
2594 
2595 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2596 		goto bad;
2597 
2598 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2599 	im->im_rb = (struct i2o_reply *)mf;
2600 	mf->msgictx = IOP_ICTX;
2601 	mf->msgtctx = im->im_tctx;
2602 
2603 	for (i = 0; i < pt->pt_nbufs; i++) {
2604 		ptb = &pt->pt_bufs[i];
2605 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2606 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
2607 		if (rv != 0)
2608 			goto bad;
2609 		mapped = 1;
2610 	}
2611 
2612 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2613 		goto bad;
2614 
2615 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2616 	if (i > sc->sc_framesize)
2617 		i = sc->sc_framesize;
2618 	if (i > pt->pt_replylen)
2619 		i = pt->pt_replylen;
2620 	rv = copyout(im->im_rb, pt->pt_reply, i);
2621 
2622  bad:
2623 	if (mapped != 0)
2624 		iop_msg_unmap(sc, im);
2625 	if (im != NULL)
2626 		iop_msg_free(sc, im);
2627 	if (mf != NULL)
2628 		free(mf, M_DEVBUF);
2629 	return (rv);
2630 }
2631