xref: /netbsd-src/sys/dev/ic/icp.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: icp.c,v 1.33 2019/11/10 21:16:35 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgement:
45  *	This product includes software developed by Niklas Hallqvist.
46  * 4. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
61  */
62 
63 /*
64  * This driver would not have written if it was not for the hardware donations
65  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
66  *
67  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
68  * Intel.
69  *
70  * Support for the ICP-Vortex management tools added by
71  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72  * provided by Achim Leubner <achim.leubner@intel.com>.
73  *
74  * Additional support for dynamic rescan of cacheservice drives by
75  * Jason R. Thorpe of Wasabi Systems, Inc.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.33 2019/11/10 21:16:35 chs Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
86 #include <sys/proc.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
90 #include <sys/disk.h>
91 
92 #include <sys/bswap.h>
93 #include <sys/bus.h>
94 
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98 
99 #include <dev/ic/icpreg.h>
100 #include <dev/ic/icpvar.h>
101 
102 #include <dev/scsipi/scsipi_all.h>
103 #include <dev/scsipi/scsiconf.h>
104 
105 #include "locators.h"
106 
107 int	icp_async_event(struct icp_softc *, int);
108 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
109 void	icp_chain(struct icp_softc *);
110 int	icp_print(void *, const char *);
111 void	icp_watchdog(void *);
112 void	icp_ucmd_intr(struct icp_ccb *);
113 void	icp_recompute_openings(struct icp_softc *);
114 
115 int	icp_count;	/* total # of controllers, for ioctl interface */
116 
117 /*
118  * Statistics for the ioctl interface to query.
119  *
120  * XXX Global.  They should probably be made per-controller
121  * XXX at some point.
122  */
123 gdt_statist_t icp_stats;
124 
125 int
126 icp_init(struct icp_softc *icp, const char *intrstr)
127 {
128 	struct icp_attach_args icpa;
129 	struct icp_binfo binfo;
130 	struct icp_ccb *ic;
131 	u_int16_t cdev_cnt;
132 	int i, j, state, feat, nsegs, rv;
133 	int locs[ICPCF_NLOCS];
134 
135 	state = 0;
136 
137 	if (intrstr != NULL)
138 		aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
139 		    intrstr);
140 
141 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
142 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
143 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
144 	callout_init(&icp->icp_wdog_callout, 0);
145 
146 	/*
147 	 * Allocate a scratch area.
148 	 */
149 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
150 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
151 	    &icp->icp_scr_dmamap) != 0) {
152 		aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
153 		return (1);
154 	}
155 	state++;
156 
157 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
158 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
159 		aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
160 		goto bail_out;
161 	}
162 	state++;
163 
164 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
165 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
166 		aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
167 		goto bail_out;
168 	}
169 	state++;
170 
171 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
172 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
173 		aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
174 		goto bail_out;
175 	}
176 	state++;
177 
178 	/*
179 	 * Allocate and initialize the command control blocks.
180 	 */
181 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_WAITOK | M_ZERO);
182 	state++;
183 
184 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
185 		/*
186 		 * The first two command indexes have special meanings, so
187 		 * we can't use them.
188 		 */
189 		ic->ic_ident = i + 2;
190 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
191 		    ICP_MAXSG, ICP_MAX_XFER, 0,
192 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
193 		    &ic->ic_xfer_map);
194 		if (rv != 0)
195 			break;
196 		icp->icp_nccbs++;
197 		icp_ccb_free(icp, ic);
198 	}
199 #ifdef DIAGNOSTIC
200 	if (icp->icp_nccbs != ICP_NCCBS)
201 		aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
202 		    icp->icp_nccbs, ICP_NCCBS);
203 #endif
204 
205 	/*
206 	 * Initialize the controller.
207 	 */
208 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
209 		aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
210 		    icp->icp_status);
211 		goto bail_out;
212 	}
213 
214 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
215 		aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
216 		    icp->icp_status);
217 		goto bail_out;
218 	}
219 
220 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
221 
222 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
223 		aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
224 		    icp->icp_status);
225 		goto bail_out;
226 	}
227 
228 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
229 		aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
230 		    icp->icp_status);
231 		goto bail_out;
232 	}
233 	cdev_cnt = (u_int16_t)icp->icp_info;
234 	icp->icp_fw_vers = icp->icp_service;
235 
236 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
237 		aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
238 		    icp->icp_status);
239 		goto bail_out;
240 	}
241 
242 	/*
243 	 * Set/get raw service features (scatter/gather).
244 	 */
245 	feat = 0;
246 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
247 	    0, 0))
248 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
249 			feat = icp->icp_info;
250 
251 	if ((feat & ICP_SCATTER_GATHER) == 0) {
252 #ifdef DIAGNOSTIC
253 		aprint_normal_dev(icp->icp_dv,
254 		    "scatter/gather not supported (raw service)\n");
255 #endif
256 	} else
257 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
258 
259 	/*
260 	 * Set/get cache service features (scatter/gather).
261 	 */
262 	feat = 0;
263 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
264 	    ICP_SCATTER_GATHER, 0))
265 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
266 			feat = icp->icp_info;
267 
268 	if ((feat & ICP_SCATTER_GATHER) == 0) {
269 #ifdef DIAGNOSTIC
270 		aprint_normal_dev(icp->icp_dv,
271 		    "scatter/gather not supported (cache service)\n");
272 #endif
273 	} else
274 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
275 
276 	/*
277 	 * Pull some information from the board and dump.
278 	 */
279 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
280 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
281 		aprint_error_dev(icp->icp_dv, "unable to retrive board info\n");
282 		goto bail_out;
283 	}
284 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
285 
286 	aprint_normal_dev(icp->icp_dv,
287 	    "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
288 	    binfo.bi_type_string, binfo.bi_raid_string,
289 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
290 
291 	/*
292 	 * Determine the number of devices, and number of openings per
293 	 * device.
294 	 */
295 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
296 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
297 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
298 			    0))
299 				continue;
300 
301 			icp->icp_cdr[j].cd_size = icp->icp_info;
302 			if (icp->icp_cdr[j].cd_size != 0)
303 				icp->icp_ndevs++;
304 
305 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
306 			    0))
307 				icp->icp_cdr[j].cd_type = icp->icp_info;
308 		}
309 	}
310 
311 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
312 		icp->icp_nchan = binfo.bi_chan_count;
313 		icp->icp_ndevs += icp->icp_nchan;
314 	}
315 
316 	icp_recompute_openings(icp);
317 
318 	/*
319 	 * Attach SCSI channels.
320 	 */
321 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
322 		struct icp_ioc_version *iv;
323 		struct icp_rawioc *ri;
324 		struct icp_getch *gc;
325 
326 		iv = (struct icp_ioc_version *)icp->icp_scr;
327 		iv->iv_version = htole32(ICP_IOC_NEWEST);
328 		iv->iv_listents = ICP_MAXBUS;
329 		iv->iv_firstchan = 0;
330 		iv->iv_lastchan = ICP_MAXBUS - 1;
331 		iv->iv_listoffset = htole32(sizeof(*iv));
332 
333 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
334 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
335 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
336 			ri = (struct icp_rawioc *)(iv + 1);
337 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
338 				icp->icp_bus_id[j] = ri->ri_procid;
339 		} else {
340 			/*
341 			 * Fall back to the old method.
342 			 */
343 			gc = (struct icp_getch *)icp->icp_scr;
344 
345 			for (j = 0; j < binfo.bi_chan_count; j++) {
346 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
347 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
348 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
349 				    sizeof(*gc))) {
350 				    	aprint_error_dev(icp->icp_dv,
351 					    "unable to get chan info");
352 					goto bail_out;
353 				}
354 				icp->icp_bus_id[j] = gc->gc_scsiid;
355 			}
356 		}
357 
358 		for (j = 0; j < binfo.bi_chan_count; j++) {
359 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
360 				icp->icp_bus_id[j] = ICP_MAXID_FC;
361 
362 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
363 
364 			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
365 
366 			icp->icp_children[icpa.icpa_unit] =
367 				config_found_sm_loc(icp->icp_dv, "icp", locs,
368 					&icpa, icp_print, config_stdsubmatch);
369 		}
370 	}
371 
372 	/*
373 	 * Attach cache devices.
374 	 */
375 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
376 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
377 			if (icp->icp_cdr[j].cd_size == 0)
378 				continue;
379 
380 			icpa.icpa_unit = j;
381 
382 			locs[ICPCF_UNIT] = j;
383 
384 			icp->icp_children[icpa.icpa_unit] =
385 			    config_found_sm_loc(icp->icp_dv, "icp", locs,
386 				&icpa, icp_print, config_stdsubmatch);
387 		}
388 	}
389 
390 	/*
391 	 * Start the watchdog.
392 	 */
393 	icp_watchdog(icp);
394 
395 	/*
396 	 * Count the controller, and we're done!
397 	 */
398 	if (icp_count++ == 0)
399 		mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
400 
401 	return (0);
402 
403  bail_out:
404 	if (state > 4)
405 		for (j = 0; j < i; j++)
406 			bus_dmamap_destroy(icp->icp_dmat,
407 			    icp->icp_ccbs[j].ic_xfer_map);
408  	if (state > 3)
409 		free(icp->icp_ccbs, M_DEVBUF);
410 	if (state > 2)
411 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
412 	if (state > 1)
413 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
414 		    ICP_SCRATCH_SIZE);
415 	if (state > 0)
416 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
417 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
418 
419 	return (1);
420 }
421 
422 void
423 icp_register_servicecb(struct icp_softc *icp, int unit,
424     const struct icp_servicecb *cb)
425 {
426 
427 	icp->icp_servicecb[unit] = cb;
428 }
429 
430 void
431 icp_rescan(struct icp_softc *icp, int unit)
432 {
433 	struct icp_attach_args icpa;
434 	u_int newsize, newtype;
435 	int locs[ICPCF_NLOCS];
436 
437 	/*
438 	 * NOTE: It is very important that the queue be frozen and not
439 	 * commands running when this is called.  The ioctl mutex must
440 	 * also be held.
441 	 */
442 
443 	KASSERT(icp->icp_qfreeze != 0);
444 	KASSERT(icp->icp_running == 0);
445 	KASSERT(unit < ICP_MAX_HDRIVES);
446 
447 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
448 #ifdef ICP_DEBUG
449 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
450 		    device_xname(icp->icp_dv), unit, icp->icp_status);
451 #endif
452 		goto gone;
453 	}
454 	if ((newsize = icp->icp_info) == 0) {
455 #ifdef ICP_DEBUG
456 		printf("%s: rescan: unit %d has zero size\n",
457 		    device_xname(icp->icp_dv), unit);
458 #endif
459  gone:
460 		/*
461 		 * Host drive is no longer present; detach if a child
462 		 * is currently there.
463 		 */
464 		if (icp->icp_cdr[unit].cd_size != 0)
465 			icp->icp_ndevs--;
466 		icp->icp_cdr[unit].cd_size = 0;
467 		if (icp->icp_children[unit] != NULL) {
468 			(void) config_detach(icp->icp_children[unit],
469 			    DETACH_FORCE);
470 			icp->icp_children[unit] = NULL;
471 		}
472 		return;
473 	}
474 
475 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
476 		newtype = icp->icp_info;
477 	else {
478 #ifdef ICP_DEBUG
479 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
480 		    device_xname(icp->icp_dv), unit);
481 #endif
482 		newtype = 0;	/* XXX? */
483 	}
484 
485 #ifdef ICP_DEBUG
486 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
487 	    device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
488 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
489 #endif
490 
491 	/*
492 	 * If the type or size changed, detach any old child (if it exists)
493 	 * and attach a new one.
494 	 */
495 	if (icp->icp_children[unit] == NULL ||
496 	    newsize != icp->icp_cdr[unit].cd_size ||
497 	    newtype != icp->icp_cdr[unit].cd_type) {
498 		if (icp->icp_cdr[unit].cd_size == 0)
499 			icp->icp_ndevs++;
500 		icp->icp_cdr[unit].cd_size = newsize;
501 		icp->icp_cdr[unit].cd_type = newtype;
502 		if (icp->icp_children[unit] != NULL)
503 			(void) config_detach(icp->icp_children[unit],
504 			    DETACH_FORCE);
505 
506 		icpa.icpa_unit = unit;
507 
508 		locs[ICPCF_UNIT] = unit;
509 
510 		icp->icp_children[unit] = config_found_sm_loc(icp->icp_dv,
511 			"icp", locs, &icpa, icp_print, config_stdsubmatch);
512 	}
513 
514 	icp_recompute_openings(icp);
515 }
516 
517 void
518 icp_rescan_all(struct icp_softc *icp)
519 {
520 	int unit;
521 	u_int16_t cdev_cnt;
522 
523 	/*
524 	 * This is the old method of rescanning the host drives.  We
525 	 * start by reinitializing the cache service.
526 	 */
527 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
528 		printf("%s: unable to re-initialize cache service for rescan\n",
529 		    device_xname(icp->icp_dv));
530 		return;
531 	}
532 	cdev_cnt = (u_int16_t) icp->icp_info;
533 
534 	/* For each host drive, do the new-style rescan. */
535 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
536 		icp_rescan(icp, unit);
537 
538 	/* Now detach anything in the slots after cdev_cnt. */
539 	for (; unit < ICP_MAX_HDRIVES; unit++) {
540 		if (icp->icp_cdr[unit].cd_size != 0) {
541 #ifdef ICP_DEBUG
542 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
543 			    device_xname(icp->icp_dv), unit, cdev_cnt);
544 #endif
545 			icp->icp_ndevs--;
546 			icp->icp_cdr[unit].cd_size = 0;
547 			if (icp->icp_children[unit] != NULL) {
548 				(void) config_detach(icp->icp_children[unit],
549 				    DETACH_FORCE);
550 				icp->icp_children[unit] = NULL;
551 			}
552 		}
553 	}
554 
555 	icp_recompute_openings(icp);
556 }
557 
558 void
559 icp_recompute_openings(struct icp_softc *icp)
560 {
561 	int unit, openings;
562 
563 	if (icp->icp_ndevs != 0)
564 		openings =
565 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
566 	else
567 		openings = 0;
568 	if (openings == icp->icp_openings)
569 		return;
570 	icp->icp_openings = openings;
571 
572 #ifdef ICP_DEBUG
573 	printf("%s: %d device%s, %d openings per device\n",
574 	    device_xname(icp->icp_dv), icp->icp_ndevs,
575 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
576 #endif
577 
578 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
579 		if (icp->icp_children[unit] != NULL)
580 			(*icp->icp_servicecb[unit]->iscb_openings)(
581 			    icp->icp_children[unit], icp->icp_openings);
582 	}
583 }
584 
585 void
586 icp_watchdog(void *cookie)
587 {
588 	struct icp_softc *icp;
589 	int s;
590 
591 	icp = cookie;
592 
593 	s = splbio();
594 	icp_intr(icp);
595 	if (ICP_HAS_WORK(icp))
596 		icp_ccb_enqueue(icp, NULL);
597 	splx(s);
598 
599 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
600 	    icp_watchdog, icp);
601 }
602 
603 int
604 icp_print(void *aux, const char *pnp)
605 {
606 	struct icp_attach_args *icpa;
607 	const char *str;
608 
609 	icpa = (struct icp_attach_args *)aux;
610 
611 	if (pnp != NULL) {
612 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
613 			str = "block device";
614 		else
615 			str = "SCSI channel";
616 		aprint_normal("%s at %s", str, pnp);
617 	}
618 	aprint_normal(" unit %d", icpa->icpa_unit);
619 
620 	return (UNCONF);
621 }
622 
623 int
624 icp_async_event(struct icp_softc *icp, int service)
625 {
626 
627 	if (service == ICP_SCREENSERVICE) {
628 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
629 			/* XXX */
630 		}
631 	} else {
632 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
633 			icp->icp_evt.size = 0;
634 			icp->icp_evt.eu.async.ionode =
635 			    device_unit(icp->icp_dv);
636 			icp->icp_evt.eu.async.status = icp->icp_status;
637 			/*
638 			 * Severity and event string are filled in by the
639 			 * hardware interface interrupt handler.
640 			 */
641 			printf("%s: %s\n", device_xname(icp->icp_dv),
642 			    icp->icp_evt.event_string);
643 		} else {
644 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
645 			icp->icp_evt.eu.async.ionode =
646 			    device_unit(icp->icp_dv);
647 			icp->icp_evt.eu.async.service = service;
648 			icp->icp_evt.eu.async.status = icp->icp_status;
649 			icp->icp_evt.eu.async.info = icp->icp_info;
650 			/* XXXJRT FIX THIS */
651 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
652 			    icp->icp_info2;
653 		}
654 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
655 	}
656 
657 	return (0);
658 }
659 
660 int
661 icp_intr(void *cookie)
662 {
663 	struct icp_softc *icp;
664 	struct icp_intr_ctx ctx;
665 	struct icp_ccb *ic;
666 
667 	icp = cookie;
668 
669 	ctx.istatus = (*icp->icp_get_status)(icp);
670 	if (!ctx.istatus) {
671 		icp->icp_status = ICP_S_NO_STATUS;
672 		return (0);
673 	}
674 
675 	(*icp->icp_intr)(icp, &ctx);
676 
677 	icp->icp_status = ctx.cmd_status;
678 	icp->icp_service = ctx.service;
679 	icp->icp_info = ctx.info;
680 	icp->icp_info2 = ctx.info2;
681 
682 	switch (ctx.istatus) {
683 	case ICP_ASYNCINDEX:
684 		icp_async_event(icp, ctx.service);
685 		return (1);
686 
687 	case ICP_SPEZINDEX:
688 		aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
689 		    ctx.info, ctx.info2);
690 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
691 		icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
692 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
693 		return (1);
694 	}
695 
696 	if ((ctx.istatus - 2) > icp->icp_nccbs)
697 		panic("icp_intr: bad command index returned");
698 
699 	ic = &icp->icp_ccbs[ctx.istatus - 2];
700 	ic->ic_status = icp->icp_status;
701 
702 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
703 		/* XXX ICP's "iir" driver just sends an event here. */
704 		panic("icp_intr: inactive CCB identified");
705 	}
706 
707 	/*
708 	 * Try to protect ourselves from the running command count already
709 	 * being 0 (e.g. if a polled command times out).
710 	 */
711 	KDASSERT(icp->icp_running != 0);
712 	if (--icp->icp_running == 0 &&
713 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
714 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
715 		wakeup(&icp->icp_qfreeze);
716 	}
717 
718 	switch (icp->icp_status) {
719 	case ICP_S_BSY:
720 #ifdef ICP_DEBUG
721 		printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
722 #endif
723 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
724 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
725 		else
726 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
727 		break;
728 
729 	default:
730 		ic->ic_flags |= IC_COMPLETE;
731 
732 		if ((ic->ic_flags & IC_WAITING) != 0)
733 			wakeup(ic);
734 		else if (ic->ic_intr != NULL)
735 			(*ic->ic_intr)(ic);
736 
737 		if (ICP_HAS_WORK(icp))
738 			icp_ccb_enqueue(icp, NULL);
739 
740 		break;
741 	}
742 
743 	return (1);
744 }
745 
746 struct icp_ucmd_ctx {
747 	gdt_ucmd_t *iu_ucmd;
748 	u_int32_t iu_cnt;
749 };
750 
751 void
752 icp_ucmd_intr(struct icp_ccb *ic)
753 {
754 	struct icp_softc *icp = device_private(ic->ic_dv);
755 	struct icp_ucmd_ctx *iu = ic->ic_context;
756 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
757 
758 	ucmd->status = icp->icp_status;
759 	ucmd->info = icp->icp_info;
760 
761 	if (iu->iu_cnt != 0) {
762 		bus_dmamap_sync(icp->icp_dmat,
763 		    icp->icp_scr_dmamap,
764 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
765 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
766 		memcpy(ucmd->data,
767 		    (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
768 	}
769 
770 	icp->icp_ucmd_ccb = NULL;
771 
772 	ic->ic_flags |= IC_COMPLETE;
773 	wakeup(ic);
774 }
775 
776 /*
777  * NOTE: We assume that it is safe to sleep here!
778  */
779 int
780 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
781 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
782 {
783 	struct icp_ioctlcmd *icmd;
784 	struct icp_cachecmd *cc;
785 	struct icp_rawcmd *rc;
786 	int retries, rv;
787 	struct icp_ccb *ic;
788 
789 	retries = ICP_RETRIES;
790 
791 	do {
792 		ic = icp_ccb_alloc_wait(icp);
793 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
794 		ic->ic_cmd.cmd_opcode = htole16(opcode);
795 
796 		switch (service) {
797 		case ICP_CACHESERVICE:
798 			if (opcode == ICP_IOCTL) {
799 				icmd = &ic->ic_cmd.cmd_packet.ic;
800 				icmd->ic_subfunc = htole16(arg1);
801 				icmd->ic_channel = htole32(arg2);
802 				icmd->ic_bufsize = htole32(arg3);
803 				icmd->ic_addr =
804 				    htole32(icp->icp_scr_seg[0].ds_addr);
805 
806 				bus_dmamap_sync(icp->icp_dmat,
807 				    icp->icp_scr_dmamap, 0, arg3,
808 				    BUS_DMASYNC_PREWRITE |
809 				    BUS_DMASYNC_PREREAD);
810 			} else {
811 				cc = &ic->ic_cmd.cmd_packet.cc;
812 				cc->cc_deviceno = htole16(arg1);
813 				cc->cc_blockno = htole32(arg2);
814 			}
815 			break;
816 
817 		case ICP_SCSIRAWSERVICE:
818 			rc = &ic->ic_cmd.cmd_packet.rc;
819 			rc->rc_direction = htole32(arg1);
820 			rc->rc_bus = arg2;
821 			rc->rc_target = arg3;
822 			rc->rc_lun = arg3 >> 8;
823 			break;
824 		}
825 
826 		ic->ic_service = service;
827 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
828 		rv = icp_ccb_poll(icp, ic, 10000);
829 
830 		switch (service) {
831 		case ICP_CACHESERVICE:
832 			if (opcode == ICP_IOCTL) {
833 				bus_dmamap_sync(icp->icp_dmat,
834 				    icp->icp_scr_dmamap, 0, arg3,
835 				    BUS_DMASYNC_POSTWRITE |
836 				    BUS_DMASYNC_POSTREAD);
837 			}
838 			break;
839 		}
840 
841 		icp_ccb_free(icp, ic);
842 	} while (rv != 0 && --retries > 0);
843 
844 	return (icp->icp_status == ICP_S_OK);
845 }
846 
847 int
848 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
849 {
850 	struct icp_ccb *ic;
851 	struct icp_ucmd_ctx iu;
852 	u_int32_t cnt;
853 	int error;
854 
855 	if (ucmd->service == ICP_CACHESERVICE) {
856 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
857 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
858 			if (cnt > GDT_SCRATCH_SZ) {
859 				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
860 				    GDT_SCRATCH_SZ, cnt);
861 				return (EINVAL);
862 			}
863 		} else {
864 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
865 			    ICP_SECTOR_SIZE;
866 			if (cnt > GDT_SCRATCH_SZ) {
867 				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
868 				    GDT_SCRATCH_SZ, cnt);
869 				return (EINVAL);
870 			}
871 		}
872 	} else {
873 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
874 		    ucmd->command.cmd_packet.rc.rc_sense_len;
875 		if (cnt > GDT_SCRATCH_SZ) {
876 			aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
877 			    GDT_SCRATCH_SZ, cnt);
878 			return (EINVAL);
879 		}
880 	}
881 
882 	iu.iu_ucmd = ucmd;
883 	iu.iu_cnt = cnt;
884 
885 	ic = icp_ccb_alloc_wait(icp);
886 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
887 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
888 
889 	if (ucmd->service == ICP_CACHESERVICE) {
890 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
891 			struct icp_ioctlcmd *icmd, *uicmd;
892 
893 			icmd = &ic->ic_cmd.cmd_packet.ic;
894 			uicmd = &ucmd->command.cmd_packet.ic;
895 
896 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
897 			icmd->ic_channel = htole32(uicmd->ic_channel);
898 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
899 			icmd->ic_addr =
900 			    htole32(icp->icp_scr_seg[0].ds_addr +
901 				    ICP_SCRATCH_UCMD);
902 		} else {
903 			struct icp_cachecmd *cc, *ucc;
904 
905 			cc = &ic->ic_cmd.cmd_packet.cc;
906 			ucc = &ucmd->command.cmd_packet.cc;
907 
908 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
909 			cc->cc_blockno = htole32(ucc->cc_blockno);
910 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
911 			cc->cc_addr = htole32(0xffffffffU);
912 			cc->cc_nsgent = htole32(1);
913 			cc->cc_sg[0].sg_addr =
914 			    htole32(icp->icp_scr_seg[0].ds_addr +
915 				    ICP_SCRATCH_UCMD);
916 			cc->cc_sg[0].sg_len = htole32(cnt);
917 		}
918 	} else {
919 		struct icp_rawcmd *rc, *urc;
920 
921 		rc = &ic->ic_cmd.cmd_packet.rc;
922 		urc = &ucmd->command.cmd_packet.rc;
923 
924 		rc->rc_direction = htole32(urc->rc_direction);
925 		rc->rc_sdata = htole32(0xffffffffU);
926 		rc->rc_sdlen = htole32(urc->rc_sdlen);
927 		rc->rc_clen = htole32(urc->rc_clen);
928 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
929 		rc->rc_target = urc->rc_target;
930 		rc->rc_lun = urc->rc_lun;
931 		rc->rc_bus = urc->rc_bus;
932 		rc->rc_sense_len = htole32(urc->rc_sense_len);
933 		rc->rc_sense_addr =
934 		    htole32(icp->icp_scr_seg[0].ds_addr +
935 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
936 		rc->rc_nsgent = htole32(1);
937 		rc->rc_sg[0].sg_addr =
938 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
939 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
940 	}
941 
942 	ic->ic_service = ucmd->service;
943 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
944 	ic->ic_context = &iu;
945 
946 	/*
947 	 * XXX What units are ucmd->timeout in?  Until we know, we
948 	 * XXX just pull a number out of thin air.
949 	 */
950 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
951 		aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
952 		    error);
953 
954 	/* icp_ucmd_intr() has updated ucmd. */
955 	icp_ccb_free(icp, ic);
956 
957 	return (error);
958 }
959 
960 struct icp_ccb *
961 icp_ccb_alloc(struct icp_softc *icp)
962 {
963 	struct icp_ccb *ic;
964 	int s;
965 
966 	s = splbio();
967 	if (__predict_false((ic =
968 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
969 		splx(s);
970 		return (NULL);
971 	}
972 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
973 	splx(s);
974 
975 	ic->ic_flags = IC_ALLOCED;
976 	return (ic);
977 }
978 
979 struct icp_ccb *
980 icp_ccb_alloc_wait(struct icp_softc *icp)
981 {
982 	struct icp_ccb *ic;
983 	int s;
984 
985 	s = splbio();
986 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
987 		icp->icp_flags |= ICP_F_WAIT_CCB;
988 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
989 	}
990 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
991 	splx(s);
992 
993 	ic->ic_flags = IC_ALLOCED;
994 	return (ic);
995 }
996 
997 void
998 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
999 {
1000 	int s;
1001 
1002 	s = splbio();
1003 	ic->ic_flags = 0;
1004 	ic->ic_intr = NULL;
1005 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1006 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1007 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1008 		wakeup(&icp->icp_ccb_freelist);
1009 	}
1010 	splx(s);
1011 }
1012 
1013 void
1014 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1015 {
1016 	int s;
1017 
1018 	s = splbio();
1019 
1020 	if (ic != NULL) {
1021 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1022 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1023 		else
1024 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1025 	}
1026 
1027 	for (; icp->icp_qfreeze == 0;) {
1028 		if (__predict_false((ic =
1029 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1030 			struct icp_ucmd_ctx *iu = ic->ic_context;
1031 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1032 
1033 			/*
1034 			 * All user-generated commands share the same
1035 			 * scratch space, so if one is already running,
1036 			 * we have to stall the command queue.
1037 			 */
1038 			if (icp->icp_ucmd_ccb != NULL)
1039 				break;
1040 			if ((*icp->icp_test_busy)(icp))
1041 				break;
1042 			icp->icp_ucmd_ccb = ic;
1043 
1044 			if (iu->iu_cnt != 0) {
1045 				memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1046 				    ucmd->data, iu->iu_cnt);
1047 				bus_dmamap_sync(icp->icp_dmat,
1048 				    icp->icp_scr_dmamap,
1049 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1050 				    BUS_DMASYNC_PREREAD |
1051 				    BUS_DMASYNC_PREWRITE);
1052 			}
1053 		} else if (__predict_true((ic =
1054 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1055 			if ((*icp->icp_test_busy)(icp))
1056 				break;
1057 		} else {
1058 			/* no command found */
1059 			break;
1060 		}
1061 		icp_ccb_submit(icp, ic);
1062 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1063 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1064 		else
1065 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1066 	}
1067 
1068 	splx(s);
1069 }
1070 
1071 int
1072 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1073 	    int dir)
1074 {
1075 	struct icp_sg *sg;
1076 	int nsegs, i, rv;
1077 	bus_dmamap_t xfer;
1078 
1079 	xfer = ic->ic_xfer_map;
1080 
1081 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1082 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1083 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1084 	if (rv != 0)
1085 		return (rv);
1086 
1087 	nsegs = xfer->dm_nsegs;
1088 	ic->ic_xfer_size = size;
1089 	ic->ic_nsgent = nsegs;
1090 	ic->ic_flags |= dir;
1091 	sg = ic->ic_sg;
1092 
1093 	if (sg != NULL) {
1094 		for (i = 0; i < nsegs; i++, sg++) {
1095 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1096 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1097 		}
1098 	} else if (nsegs > 1)
1099 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1100 
1101 	if ((dir & IC_XFER_OUT) != 0)
1102 		i = BUS_DMASYNC_PREWRITE;
1103 	else /* if ((dir & IC_XFER_IN) != 0) */
1104 		i = BUS_DMASYNC_PREREAD;
1105 
1106 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1107 	return (0);
1108 }
1109 
1110 void
1111 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1112 {
1113 	int i;
1114 
1115 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1116 		i = BUS_DMASYNC_POSTWRITE;
1117 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1118 		i = BUS_DMASYNC_POSTREAD;
1119 
1120 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1121 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1122 }
1123 
1124 int
1125 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1126 {
1127 	int s, rv;
1128 
1129 	s = splbio();
1130 
1131 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1132 		if (!(*icp->icp_test_busy)(icp))
1133 			break;
1134 		DELAY(10);
1135 	}
1136 	if (timo == 0) {
1137 		printf("%s: submit: busy\n", device_xname(icp->icp_dv));
1138 		return (EAGAIN);
1139 	}
1140 
1141 	icp_ccb_submit(icp, ic);
1142 
1143 	if (cold) {
1144 		for (timo *= 10; timo != 0; timo--) {
1145 			DELAY(100);
1146 			icp_intr(icp);
1147 			if ((ic->ic_flags & IC_COMPLETE) != 0)
1148 				break;
1149 		}
1150 	} else {
1151 		ic->ic_flags |= IC_WAITING;
1152 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1153 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1154 					 mstohz(timo))) != 0) {
1155 				timo = 0;
1156 				break;
1157 			}
1158 		}
1159 	}
1160 
1161 	if (timo != 0) {
1162 		if (ic->ic_status != ICP_S_OK) {
1163 #ifdef ICP_DEBUG
1164 			printf("%s: request failed; status=0x%04x\n",
1165 			    device_xname(icp->icp_dv), ic->ic_status);
1166 #endif
1167 			rv = EIO;
1168 		} else
1169 			rv = 0;
1170 	} else {
1171 		aprint_error_dev(icp->icp_dv, "command timed out\n");
1172 		rv = EIO;
1173 	}
1174 
1175 	while ((*icp->icp_test_busy)(icp) != 0)
1176 		DELAY(10);
1177 
1178 	splx(s);
1179 
1180 	return (rv);
1181 }
1182 
1183 int
1184 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1185 {
1186 	int s, rv;
1187 
1188 	ic->ic_flags |= IC_WAITING;
1189 
1190 	s = splbio();
1191 	icp_ccb_enqueue(icp, ic);
1192 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1193 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1194 			splx(s);
1195 			return (rv);
1196 		}
1197 	}
1198 	splx(s);
1199 
1200 	if (ic->ic_status != ICP_S_OK) {
1201 		aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
1202 		    ic->ic_status);
1203 		return (EIO);
1204 	}
1205 
1206 	return (0);
1207 }
1208 
1209 int
1210 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1211 {
1212 	int s, rv;
1213 
1214 	ic->ic_dv = icp->icp_dv;
1215 	ic->ic_intr = icp_ucmd_intr;
1216 	ic->ic_flags |= IC_UCMD;
1217 
1218 	s = splbio();
1219 	icp_ccb_enqueue(icp, ic);
1220 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1221 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1222 			splx(s);
1223 			return (rv);
1224 		}
1225 	}
1226 	splx(s);
1227 
1228 	return (0);
1229 }
1230 
1231 void
1232 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1233 {
1234 
1235 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1236 
1237 	(*icp->icp_set_sema0)(icp);
1238 	DELAY(10);
1239 
1240 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1241 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1242 
1243 	icp->icp_running++;
1244 
1245 	(*icp->icp_copy_cmd)(icp, ic);
1246 	(*icp->icp_release_event)(icp, ic);
1247 }
1248 
1249 int
1250 icp_freeze(struct icp_softc *icp)
1251 {
1252 	int s, error = 0;
1253 
1254 	s = splbio();
1255 	if (icp->icp_qfreeze++ == 0) {
1256 		while (icp->icp_running != 0) {
1257 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1258 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1259 			    "icpqfrz", 0);
1260 			if (error != 0 && --icp->icp_qfreeze == 0 &&
1261 			    ICP_HAS_WORK(icp)) {
1262 				icp_ccb_enqueue(icp, NULL);
1263 				break;
1264 			}
1265 		}
1266 	}
1267 	splx(s);
1268 
1269 	return (error);
1270 }
1271 
1272 void
1273 icp_unfreeze(struct icp_softc *icp)
1274 {
1275 	int s;
1276 
1277 	s = splbio();
1278 	KDASSERT(icp->icp_qfreeze != 0);
1279 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1280 		icp_ccb_enqueue(icp, NULL);
1281 	splx(s);
1282 }
1283 
1284 /* XXX Global - should be per-controller? XXX */
1285 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1286 static int icp_event_oldidx;
1287 static int icp_event_lastidx;
1288 
1289 gdt_evt_str *
1290 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1291     gdt_evt_data *evt)
1292 {
1293 	gdt_evt_str *e;
1294 
1295 	/* no source == no event */
1296 	if (source == 0)
1297 		return (NULL);
1298 
1299 	e = &icp_event_buffer[icp_event_lastidx];
1300 	if (e->event_source == source && e->event_idx == idx &&
1301 	    ((evt->size != 0 && e->event_data.size != 0 &&
1302 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1303 	     (evt->size == 0 && e->event_data.size == 0 &&
1304 	      strcmp((char *) e->event_data.event_string,
1305 	      	     (char *) evt->event_string) == 0))) {
1306 		e->last_stamp = time_second;
1307 		e->same_count++;
1308 	} else {
1309 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1310 			icp_event_lastidx++;
1311 			if (icp_event_lastidx == ICP_MAX_EVENTS)
1312 				icp_event_lastidx = 0;
1313 			if (icp_event_lastidx == icp_event_oldidx) {
1314 				icp_event_oldidx++;
1315 				if (icp_event_oldidx == ICP_MAX_EVENTS)
1316 					icp_event_oldidx = 0;
1317 			}
1318 		}
1319 		e = &icp_event_buffer[icp_event_lastidx];
1320 		e->event_source = source;
1321 		e->event_idx = idx;
1322 		e->first_stamp = e->last_stamp = time_second;
1323 		e->same_count = 1;
1324 		e->event_data = *evt;
1325 		e->application = 0;
1326 	}
1327 	return (e);
1328 }
1329 
1330 int
1331 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1332 {
1333 	gdt_evt_str *e;
1334 	int eindex, s;
1335 
1336 	s = splbio();
1337 
1338 	if (handle == -1)
1339 		eindex = icp_event_oldidx;
1340 	else
1341 		eindex = handle;
1342 
1343 	estr->event_source = 0;
1344 
1345 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1346 		splx(s);
1347 		return (eindex);
1348 	}
1349 
1350 	e = &icp_event_buffer[eindex];
1351 	if (e->event_source != 0) {
1352 		if (eindex != icp_event_lastidx) {
1353 			eindex++;
1354 			if (eindex == ICP_MAX_EVENTS)
1355 				eindex = 0;
1356 		} else
1357 			eindex = -1;
1358 		memcpy(estr, e, sizeof(gdt_evt_str));
1359 	}
1360 
1361 	splx(s);
1362 
1363 	return (eindex);
1364 }
1365 
1366 void
1367 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1368     gdt_evt_str *estr)
1369 {
1370 	gdt_evt_str *e;
1371 	int found = 0, eindex, s;
1372 
1373 	s = splbio();
1374 
1375 	eindex = icp_event_oldidx;
1376 	for (;;) {
1377 		e = &icp_event_buffer[eindex];
1378 		if (e->event_source == 0)
1379 			break;
1380 		if ((e->application & application) == 0) {
1381 			e->application |= application;
1382 			found = 1;
1383 			break;
1384 		}
1385 		if (eindex == icp_event_lastidx)
1386 			break;
1387 		eindex++;
1388 		if (eindex == ICP_MAX_EVENTS)
1389 			eindex = 0;
1390 	}
1391 	if (found)
1392 		memcpy(estr, e, sizeof(gdt_evt_str));
1393 	else
1394 		estr->event_source = 0;
1395 
1396 	splx(s);
1397 }
1398 
1399 void
1400 icp_clear_events(struct icp_softc *icp)
1401 {
1402 	int s;
1403 
1404 	s = splbio();
1405 	icp_event_oldidx = icp_event_lastidx = 0;
1406 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1407 	splx(s);
1408 }
1409