xref: /netbsd-src/sys/dev/ic/icp.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: icp.c,v 1.27 2008/04/08 12:07:26 cegger Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Niklas Hallqvist.
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  *
67  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68  */
69 
70 /*
71  * This driver would not have written if it was not for the hardware donations
72  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
73  *
74  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
75  * Intel.
76  *
77  * Support for the ICP-Vortex management tools added by
78  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
79  * provided by Achim Leubner <achim.leubner@intel.com>.
80  *
81  * Additional support for dynamic rescan of cacheservice drives by
82  * Jason R. Thorpe of Wasabi Systems, Inc.
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.27 2008/04/08 12:07:26 cegger Exp $");
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/device.h>
92 #include <sys/queue.h>
93 #include <sys/proc.h>
94 #include <sys/buf.h>
95 #include <sys/endian.h>
96 #include <sys/malloc.h>
97 #include <sys/disk.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <sys/bswap.h>
102 #include <sys/bus.h>
103 
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 
108 #include <dev/ic/icpreg.h>
109 #include <dev/ic/icpvar.h>
110 
111 #include <dev/scsipi/scsipi_all.h>
112 #include <dev/scsipi/scsiconf.h>
113 
114 #include "locators.h"
115 
116 int	icp_async_event(struct icp_softc *, int);
117 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
118 void	icp_chain(struct icp_softc *);
119 int	icp_print(void *, const char *);
120 void	icp_watchdog(void *);
121 void	icp_ucmd_intr(struct icp_ccb *);
122 void	icp_recompute_openings(struct icp_softc *);
123 
124 int	icp_count;	/* total # of controllers, for ioctl interface */
125 
126 /*
127  * Statistics for the ioctl interface to query.
128  *
129  * XXX Global.  They should probably be made per-controller
130  * XXX at some point.
131  */
132 gdt_statist_t icp_stats;
133 
134 int
135 icp_init(struct icp_softc *icp, const char *intrstr)
136 {
137 	struct icp_attach_args icpa;
138 	struct icp_binfo binfo;
139 	struct icp_ccb *ic;
140 	u_int16_t cdev_cnt;
141 	int i, j, state, feat, nsegs, rv;
142 	int locs[ICPCF_NLOCS];
143 
144 	state = 0;
145 
146 	if (intrstr != NULL)
147 		aprint_normal_dev(&icp->icp_dv, "interrupting at %s\n",
148 		    intrstr);
149 
150 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
151 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
152 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
153 	callout_init(&icp->icp_wdog_callout, 0);
154 
155 	/*
156 	 * Allocate a scratch area.
157 	 */
158 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
159 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
160 	    &icp->icp_scr_dmamap) != 0) {
161 		aprint_error_dev(&icp->icp_dv, "cannot create scratch dmamap\n");
162 		return (1);
163 	}
164 	state++;
165 
166 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
167 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
168 		aprint_error_dev(&icp->icp_dv, "cannot alloc scratch dmamem\n");
169 		goto bail_out;
170 	}
171 	state++;
172 
173 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
174 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
175 		aprint_error_dev(&icp->icp_dv, "cannot map scratch dmamem\n");
176 		goto bail_out;
177 	}
178 	state++;
179 
180 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
181 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
182 		aprint_error_dev(&icp->icp_dv, "cannot load scratch dmamap\n");
183 		goto bail_out;
184 	}
185 	state++;
186 
187 	/*
188 	 * Allocate and initialize the command control blocks.
189 	 */
190 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
191 	if ((icp->icp_ccbs = ic) == NULL) {
192 		aprint_error_dev(&icp->icp_dv, "malloc() failed\n");
193 		goto bail_out;
194 	}
195 	state++;
196 
197 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
198 		/*
199 		 * The first two command indexes have special meanings, so
200 		 * we can't use them.
201 		 */
202 		ic->ic_ident = i + 2;
203 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
204 		    ICP_MAXSG, ICP_MAX_XFER, 0,
205 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
206 		    &ic->ic_xfer_map);
207 		if (rv != 0)
208 			break;
209 		icp->icp_nccbs++;
210 		icp_ccb_free(icp, ic);
211 	}
212 #ifdef DIAGNOSTIC
213 	if (icp->icp_nccbs != ICP_NCCBS)
214 		aprint_error_dev(&icp->icp_dv, "%d/%d CCBs usable\n",
215 		    icp->icp_nccbs, ICP_NCCBS);
216 #endif
217 
218 	/*
219 	 * Initalize the controller.
220 	 */
221 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
222 		aprint_error_dev(&icp->icp_dv, "screen service init error %d\n",
223 		    icp->icp_status);
224 		goto bail_out;
225 	}
226 
227 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
228 		aprint_error_dev(&icp->icp_dv, "cache service init error %d\n",
229 		    icp->icp_status);
230 		goto bail_out;
231 	}
232 
233 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
234 
235 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
236 		aprint_error_dev(&icp->icp_dv, "cache service mount error %d\n",
237 		    icp->icp_status);
238 		goto bail_out;
239 	}
240 
241 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
242 		aprint_error_dev(&icp->icp_dv, "cache service post-mount init error %d\n",
243 		    icp->icp_status);
244 		goto bail_out;
245 	}
246 	cdev_cnt = (u_int16_t)icp->icp_info;
247 	icp->icp_fw_vers = icp->icp_service;
248 
249 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
250 		aprint_error_dev(&icp->icp_dv, "raw service init error %d\n",
251 		    icp->icp_status);
252 		goto bail_out;
253 	}
254 
255 	/*
256 	 * Set/get raw service features (scatter/gather).
257 	 */
258 	feat = 0;
259 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
260 	    0, 0))
261 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
262 			feat = icp->icp_info;
263 
264 	if ((feat & ICP_SCATTER_GATHER) == 0) {
265 #ifdef DIAGNOSTIC
266 		aprint_normal_dev(&icp->icp_dv,
267 		    "scatter/gather not supported (raw service)\n");
268 #endif
269 	} else
270 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
271 
272 	/*
273 	 * Set/get cache service features (scatter/gather).
274 	 */
275 	feat = 0;
276 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
277 	    ICP_SCATTER_GATHER, 0))
278 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
279 			feat = icp->icp_info;
280 
281 	if ((feat & ICP_SCATTER_GATHER) == 0) {
282 #ifdef DIAGNOSTIC
283 		aprint_normal_dev(&icp->icp_dv,
284 		    "scatter/gather not supported (cache service)\n");
285 #endif
286 	} else
287 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
288 
289 	/*
290 	 * Pull some information from the board and dump.
291 	 */
292 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
293 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
294 		aprint_error_dev(&icp->icp_dv, "unable to retrive board info\n");
295 		goto bail_out;
296 	}
297 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
298 
299 	aprint_normal_dev(&icp->icp_dv,
300 	    "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
301 	    binfo.bi_type_string, binfo.bi_raid_string,
302 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
303 
304 	/*
305 	 * Determine the number of devices, and number of openings per
306 	 * device.
307 	 */
308 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
309 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
310 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
311 			    0))
312 				continue;
313 
314 			icp->icp_cdr[j].cd_size = icp->icp_info;
315 			if (icp->icp_cdr[j].cd_size != 0)
316 				icp->icp_ndevs++;
317 
318 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
319 			    0))
320 				icp->icp_cdr[j].cd_type = icp->icp_info;
321 		}
322 	}
323 
324 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
325 		icp->icp_nchan = binfo.bi_chan_count;
326 		icp->icp_ndevs += icp->icp_nchan;
327 	}
328 
329 	icp_recompute_openings(icp);
330 
331 	/*
332 	 * Attach SCSI channels.
333 	 */
334 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
335 		struct icp_ioc_version *iv;
336 		struct icp_rawioc *ri;
337 		struct icp_getch *gc;
338 
339 		iv = (struct icp_ioc_version *)icp->icp_scr;
340 		iv->iv_version = htole32(ICP_IOC_NEWEST);
341 		iv->iv_listents = ICP_MAXBUS;
342 		iv->iv_firstchan = 0;
343 		iv->iv_lastchan = ICP_MAXBUS - 1;
344 		iv->iv_listoffset = htole32(sizeof(*iv));
345 
346 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
347 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
348 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
349 			ri = (struct icp_rawioc *)(iv + 1);
350 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
351 				icp->icp_bus_id[j] = ri->ri_procid;
352 		} else {
353 			/*
354 			 * Fall back to the old method.
355 			 */
356 			gc = (struct icp_getch *)icp->icp_scr;
357 
358 			for (j = 0; j < binfo.bi_chan_count; j++) {
359 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
360 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
361 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
362 				    sizeof(*gc))) {
363 				    	aprint_error_dev(&icp->icp_dv,
364 					    "unable to get chan info");
365 					goto bail_out;
366 				}
367 				icp->icp_bus_id[j] = gc->gc_scsiid;
368 			}
369 		}
370 
371 		for (j = 0; j < binfo.bi_chan_count; j++) {
372 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
373 				icp->icp_bus_id[j] = ICP_MAXID_FC;
374 
375 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
376 
377 			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
378 
379 			icp->icp_children[icpa.icpa_unit] =
380 				config_found_sm_loc(&icp->icp_dv, "icp", locs,
381 					&icpa, icp_print, config_stdsubmatch);
382 		}
383 	}
384 
385 	/*
386 	 * Attach cache devices.
387 	 */
388 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
389 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
390 			if (icp->icp_cdr[j].cd_size == 0)
391 				continue;
392 
393 			icpa.icpa_unit = j;
394 
395 			locs[ICPCF_UNIT] = j;
396 
397 			icp->icp_children[icpa.icpa_unit] =
398 			    config_found_sm_loc(&icp->icp_dv, "icp", locs,
399 				&icpa, icp_print, config_stdsubmatch);
400 		}
401 	}
402 
403 	/*
404 	 * Start the watchdog.
405 	 */
406 	icp_watchdog(icp);
407 
408 	/*
409 	 * Count the controller, and we're done!
410 	 */
411 	if (icp_count++ == 0)
412 		mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
413 
414 	return (0);
415 
416  bail_out:
417 	if (state > 4)
418 		for (j = 0; j < i; j++)
419 			bus_dmamap_destroy(icp->icp_dmat,
420 			    icp->icp_ccbs[j].ic_xfer_map);
421  	if (state > 3)
422 		free(icp->icp_ccbs, M_DEVBUF);
423 	if (state > 2)
424 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
425 	if (state > 1)
426 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
427 		    ICP_SCRATCH_SIZE);
428 	if (state > 0)
429 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
430 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
431 
432 	return (1);
433 }
434 
435 void
436 icp_register_servicecb(struct icp_softc *icp, int unit,
437     const struct icp_servicecb *cb)
438 {
439 
440 	icp->icp_servicecb[unit] = cb;
441 }
442 
443 void
444 icp_rescan(struct icp_softc *icp, int unit)
445 {
446 	struct icp_attach_args icpa;
447 	u_int newsize, newtype;
448 	int locs[ICPCF_NLOCS];
449 
450 	/*
451 	 * NOTE: It is very important that the queue be frozen and not
452 	 * commands running when this is called.  The ioctl mutex must
453 	 * also be held.
454 	 */
455 
456 	KASSERT(icp->icp_qfreeze != 0);
457 	KASSERT(icp->icp_running == 0);
458 	KASSERT(unit < ICP_MAX_HDRIVES);
459 
460 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
461 #ifdef ICP_DEBUG
462 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
463 		    device_xname(&icp->icp_dv), unit, icp->icp_status);
464 #endif
465 		goto gone;
466 	}
467 	if ((newsize = icp->icp_info) == 0) {
468 #ifdef ICP_DEBUG
469 		printf("%s: rescan: unit %d has zero size\n",
470 		    device-xname(&icp->icp_dv), unit);
471 #endif
472  gone:
473 		/*
474 		 * Host drive is no longer present; detach if a child
475 		 * is currently there.
476 		 */
477 		if (icp->icp_cdr[unit].cd_size != 0)
478 			icp->icp_ndevs--;
479 		icp->icp_cdr[unit].cd_size = 0;
480 		if (icp->icp_children[unit] != NULL) {
481 			(void) config_detach(icp->icp_children[unit],
482 			    DETACH_FORCE);
483 			icp->icp_children[unit] = NULL;
484 		}
485 		return;
486 	}
487 
488 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
489 		newtype = icp->icp_info;
490 	else {
491 #ifdef ICP_DEBUG
492 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
493 		    device_xname(&icp->icp_dv), unit);
494 #endif
495 		newtype = 0;	/* XXX? */
496 	}
497 
498 #ifdef ICP_DEBUG
499 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
500 	    device_xname(&icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
501 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
502 #endif
503 
504 	/*
505 	 * If the type or size changed, detach any old child (if it exists)
506 	 * and attach a new one.
507 	 */
508 	if (icp->icp_children[unit] == NULL ||
509 	    newsize != icp->icp_cdr[unit].cd_size ||
510 	    newtype != icp->icp_cdr[unit].cd_type) {
511 		if (icp->icp_cdr[unit].cd_size == 0)
512 			icp->icp_ndevs++;
513 		icp->icp_cdr[unit].cd_size = newsize;
514 		icp->icp_cdr[unit].cd_type = newtype;
515 		if (icp->icp_children[unit] != NULL)
516 			(void) config_detach(icp->icp_children[unit],
517 			    DETACH_FORCE);
518 
519 		icpa.icpa_unit = unit;
520 
521 		locs[ICPCF_UNIT] = unit;
522 
523 		icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv,
524 			"icp", locs, &icpa, icp_print, config_stdsubmatch);
525 	}
526 
527 	icp_recompute_openings(icp);
528 }
529 
530 void
531 icp_rescan_all(struct icp_softc *icp)
532 {
533 	int unit;
534 	u_int16_t cdev_cnt;
535 
536 	/*
537 	 * This is the old method of rescanning the host drives.  We
538 	 * start by reinitializing the cache service.
539 	 */
540 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
541 		printf("%s: unable to re-initialize cache service for rescan\n",
542 		    device_xname(&icp->icp_dv));
543 		return;
544 	}
545 	cdev_cnt = (u_int16_t) icp->icp_info;
546 
547 	/* For each host drive, do the new-style rescan. */
548 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
549 		icp_rescan(icp, unit);
550 
551 	/* Now detach anything in the slots after cdev_cnt. */
552 	for (; unit < ICP_MAX_HDRIVES; unit++) {
553 		if (icp->icp_cdr[unit].cd_size != 0) {
554 #ifdef ICP_DEBUG
555 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
556 			    device_xname(&icp->icp_dv), unit, cdev_cnt);
557 #endif
558 			icp->icp_ndevs--;
559 			icp->icp_cdr[unit].cd_size = 0;
560 			if (icp->icp_children[unit] != NULL) {
561 				(void) config_detach(icp->icp_children[unit],
562 				    DETACH_FORCE);
563 				icp->icp_children[unit] = NULL;
564 			}
565 		}
566 	}
567 
568 	icp_recompute_openings(icp);
569 }
570 
571 void
572 icp_recompute_openings(struct icp_softc *icp)
573 {
574 	int unit, openings;
575 
576 	if (icp->icp_ndevs != 0)
577 		openings =
578 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
579 	else
580 		openings = 0;
581 	if (openings == icp->icp_openings)
582 		return;
583 	icp->icp_openings = openings;
584 
585 #ifdef ICP_DEBUG
586 	printf("%s: %d device%s, %d openings per device\n",
587 	    device_xname(&icp->icp_dv), icp->icp_ndevs,
588 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
589 #endif
590 
591 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
592 		if (icp->icp_children[unit] != NULL)
593 			(*icp->icp_servicecb[unit]->iscb_openings)(
594 			    icp->icp_children[unit], icp->icp_openings);
595 	}
596 }
597 
598 void
599 icp_watchdog(void *cookie)
600 {
601 	struct icp_softc *icp;
602 	int s;
603 
604 	icp = cookie;
605 
606 	s = splbio();
607 	icp_intr(icp);
608 	if (ICP_HAS_WORK(icp))
609 		icp_ccb_enqueue(icp, NULL);
610 	splx(s);
611 
612 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
613 	    icp_watchdog, icp);
614 }
615 
616 int
617 icp_print(void *aux, const char *pnp)
618 {
619 	struct icp_attach_args *icpa;
620 	const char *str;
621 
622 	icpa = (struct icp_attach_args *)aux;
623 
624 	if (pnp != NULL) {
625 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
626 			str = "block device";
627 		else
628 			str = "SCSI channel";
629 		aprint_normal("%s at %s", str, pnp);
630 	}
631 	aprint_normal(" unit %d", icpa->icpa_unit);
632 
633 	return (UNCONF);
634 }
635 
636 int
637 icp_async_event(struct icp_softc *icp, int service)
638 {
639 
640 	if (service == ICP_SCREENSERVICE) {
641 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
642 			/* XXX */
643 		}
644 	} else {
645 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
646 			icp->icp_evt.size = 0;
647 			icp->icp_evt.eu.async.ionode =
648 			    device_unit(&icp->icp_dv);
649 			icp->icp_evt.eu.async.status = icp->icp_status;
650 			/*
651 			 * Severity and event string are filled in by the
652 			 * hardware interface interrupt handler.
653 			 */
654 			printf("%s: %s\n", device_xname(&icp->icp_dv),
655 			    icp->icp_evt.event_string);
656 		} else {
657 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
658 			icp->icp_evt.eu.async.ionode =
659 			    device_unit(&icp->icp_dv);
660 			icp->icp_evt.eu.async.service = service;
661 			icp->icp_evt.eu.async.status = icp->icp_status;
662 			icp->icp_evt.eu.async.info = icp->icp_info;
663 			/* XXXJRT FIX THIS */
664 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
665 			    icp->icp_info2;
666 		}
667 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
668 	}
669 
670 	return (0);
671 }
672 
673 int
674 icp_intr(void *cookie)
675 {
676 	struct icp_softc *icp;
677 	struct icp_intr_ctx ctx;
678 	struct icp_ccb *ic;
679 
680 	icp = cookie;
681 
682 	ctx.istatus = (*icp->icp_get_status)(icp);
683 	if (!ctx.istatus) {
684 		icp->icp_status = ICP_S_NO_STATUS;
685 		return (0);
686 	}
687 
688 	(*icp->icp_intr)(icp, &ctx);
689 
690 	icp->icp_status = ctx.cmd_status;
691 	icp->icp_service = ctx.service;
692 	icp->icp_info = ctx.info;
693 	icp->icp_info2 = ctx.info2;
694 
695 	switch (ctx.istatus) {
696 	case ICP_ASYNCINDEX:
697 		icp_async_event(icp, ctx.service);
698 		return (1);
699 
700 	case ICP_SPEZINDEX:
701 		aprint_error_dev(&icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
702 		    ctx.info, ctx.info2);
703 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
704 		icp->icp_evt.eu.driver.ionode = device_unit(&icp->icp_dv);
705 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
706 		return (1);
707 	}
708 
709 	if ((ctx.istatus - 2) > icp->icp_nccbs)
710 		panic("icp_intr: bad command index returned");
711 
712 	ic = &icp->icp_ccbs[ctx.istatus - 2];
713 	ic->ic_status = icp->icp_status;
714 
715 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
716 		/* XXX ICP's "iir" driver just sends an event here. */
717 		panic("icp_intr: inactive CCB identified");
718 	}
719 
720 	/*
721 	 * Try to protect ourselves from the running command count already
722 	 * being 0 (e.g. if a polled command times out).
723 	 */
724 	KDASSERT(icp->icp_running != 0);
725 	if (--icp->icp_running == 0 &&
726 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
727 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
728 		wakeup(&icp->icp_qfreeze);
729 	}
730 
731 	switch (icp->icp_status) {
732 	case ICP_S_BSY:
733 #ifdef ICP_DEBUG
734 		printf("%s: ICP_S_BSY received\n", device_xname(&icp->icp_dv));
735 #endif
736 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
737 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
738 		else
739 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
740 		break;
741 
742 	default:
743 		ic->ic_flags |= IC_COMPLETE;
744 
745 		if ((ic->ic_flags & IC_WAITING) != 0)
746 			wakeup(ic);
747 		else if (ic->ic_intr != NULL)
748 			(*ic->ic_intr)(ic);
749 
750 		if (ICP_HAS_WORK(icp))
751 			icp_ccb_enqueue(icp, NULL);
752 
753 		break;
754 	}
755 
756 	return (1);
757 }
758 
759 struct icp_ucmd_ctx {
760 	gdt_ucmd_t *iu_ucmd;
761 	u_int32_t iu_cnt;
762 };
763 
764 void
765 icp_ucmd_intr(struct icp_ccb *ic)
766 {
767 	struct icp_softc *icp = (void *) ic->ic_dv;
768 	struct icp_ucmd_ctx *iu = ic->ic_context;
769 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
770 
771 	ucmd->status = icp->icp_status;
772 	ucmd->info = icp->icp_info;
773 
774 	if (iu->iu_cnt != 0) {
775 		bus_dmamap_sync(icp->icp_dmat,
776 		    icp->icp_scr_dmamap,
777 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
778 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
779 		memcpy(ucmd->data,
780 		    (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
781 	}
782 
783 	icp->icp_ucmd_ccb = NULL;
784 
785 	ic->ic_flags |= IC_COMPLETE;
786 	wakeup(ic);
787 }
788 
789 /*
790  * NOTE: We assume that it is safe to sleep here!
791  */
792 int
793 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
794 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
795 {
796 	struct icp_ioctlcmd *icmd;
797 	struct icp_cachecmd *cc;
798 	struct icp_rawcmd *rc;
799 	int retries, rv;
800 	struct icp_ccb *ic;
801 
802 	retries = ICP_RETRIES;
803 
804 	do {
805 		ic = icp_ccb_alloc_wait(icp);
806 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
807 		ic->ic_cmd.cmd_opcode = htole16(opcode);
808 
809 		switch (service) {
810 		case ICP_CACHESERVICE:
811 			if (opcode == ICP_IOCTL) {
812 				icmd = &ic->ic_cmd.cmd_packet.ic;
813 				icmd->ic_subfunc = htole16(arg1);
814 				icmd->ic_channel = htole32(arg2);
815 				icmd->ic_bufsize = htole32(arg3);
816 				icmd->ic_addr =
817 				    htole32(icp->icp_scr_seg[0].ds_addr);
818 
819 				bus_dmamap_sync(icp->icp_dmat,
820 				    icp->icp_scr_dmamap, 0, arg3,
821 				    BUS_DMASYNC_PREWRITE |
822 				    BUS_DMASYNC_PREREAD);
823 			} else {
824 				cc = &ic->ic_cmd.cmd_packet.cc;
825 				cc->cc_deviceno = htole16(arg1);
826 				cc->cc_blockno = htole32(arg2);
827 			}
828 			break;
829 
830 		case ICP_SCSIRAWSERVICE:
831 			rc = &ic->ic_cmd.cmd_packet.rc;
832 			rc->rc_direction = htole32(arg1);
833 			rc->rc_bus = arg2;
834 			rc->rc_target = arg3;
835 			rc->rc_lun = arg3 >> 8;
836 			break;
837 		}
838 
839 		ic->ic_service = service;
840 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
841 		rv = icp_ccb_poll(icp, ic, 10000);
842 
843 		switch (service) {
844 		case ICP_CACHESERVICE:
845 			if (opcode == ICP_IOCTL) {
846 				bus_dmamap_sync(icp->icp_dmat,
847 				    icp->icp_scr_dmamap, 0, arg3,
848 				    BUS_DMASYNC_POSTWRITE |
849 				    BUS_DMASYNC_POSTREAD);
850 			}
851 			break;
852 		}
853 
854 		icp_ccb_free(icp, ic);
855 	} while (rv != 0 && --retries > 0);
856 
857 	return (icp->icp_status == ICP_S_OK);
858 }
859 
860 int
861 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
862 {
863 	struct icp_ccb *ic;
864 	struct icp_ucmd_ctx iu;
865 	u_int32_t cnt;
866 	int error;
867 
868 	if (ucmd->service == ICP_CACHESERVICE) {
869 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
870 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
871 			if (cnt > GDT_SCRATCH_SZ) {
872 				aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
873 				    GDT_SCRATCH_SZ, cnt);
874 				return (EINVAL);
875 			}
876 		} else {
877 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
878 			    ICP_SECTOR_SIZE;
879 			if (cnt > GDT_SCRATCH_SZ) {
880 				aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
881 				    GDT_SCRATCH_SZ, cnt);
882 				return (EINVAL);
883 			}
884 		}
885 	} else {
886 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
887 		    ucmd->command.cmd_packet.rc.rc_sense_len;
888 		if (cnt > GDT_SCRATCH_SZ) {
889 			aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
890 			    GDT_SCRATCH_SZ, cnt);
891 			return (EINVAL);
892 		}
893 	}
894 
895 	iu.iu_ucmd = ucmd;
896 	iu.iu_cnt = cnt;
897 
898 	ic = icp_ccb_alloc_wait(icp);
899 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
900 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
901 
902 	if (ucmd->service == ICP_CACHESERVICE) {
903 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
904 			struct icp_ioctlcmd *icmd, *uicmd;
905 
906 			icmd = &ic->ic_cmd.cmd_packet.ic;
907 			uicmd = &ucmd->command.cmd_packet.ic;
908 
909 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
910 			icmd->ic_channel = htole32(uicmd->ic_channel);
911 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
912 			icmd->ic_addr =
913 			    htole32(icp->icp_scr_seg[0].ds_addr +
914 				    ICP_SCRATCH_UCMD);
915 		} else {
916 			struct icp_cachecmd *cc, *ucc;
917 
918 			cc = &ic->ic_cmd.cmd_packet.cc;
919 			ucc = &ucmd->command.cmd_packet.cc;
920 
921 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
922 			cc->cc_blockno = htole32(ucc->cc_blockno);
923 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
924 			cc->cc_addr = htole32(0xffffffffU);
925 			cc->cc_nsgent = htole32(1);
926 			cc->cc_sg[0].sg_addr =
927 			    htole32(icp->icp_scr_seg[0].ds_addr +
928 				    ICP_SCRATCH_UCMD);
929 			cc->cc_sg[0].sg_len = htole32(cnt);
930 		}
931 	} else {
932 		struct icp_rawcmd *rc, *urc;
933 
934 		rc = &ic->ic_cmd.cmd_packet.rc;
935 		urc = &ucmd->command.cmd_packet.rc;
936 
937 		rc->rc_direction = htole32(urc->rc_direction);
938 		rc->rc_sdata = htole32(0xffffffffU);
939 		rc->rc_sdlen = htole32(urc->rc_sdlen);
940 		rc->rc_clen = htole32(urc->rc_clen);
941 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
942 		rc->rc_target = urc->rc_target;
943 		rc->rc_lun = urc->rc_lun;
944 		rc->rc_bus = urc->rc_bus;
945 		rc->rc_sense_len = htole32(urc->rc_sense_len);
946 		rc->rc_sense_addr =
947 		    htole32(icp->icp_scr_seg[0].ds_addr +
948 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
949 		rc->rc_nsgent = htole32(1);
950 		rc->rc_sg[0].sg_addr =
951 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
952 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
953 	}
954 
955 	ic->ic_service = ucmd->service;
956 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
957 	ic->ic_context = &iu;
958 
959 	/*
960 	 * XXX What units are ucmd->timeout in?  Until we know, we
961 	 * XXX just pull a number out of thin air.
962 	 */
963 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
964 		aprint_error_dev(&icp->icp_dv, "error %d waiting for ucmd to complete\n",
965 		    error);
966 
967 	/* icp_ucmd_intr() has updated ucmd. */
968 	icp_ccb_free(icp, ic);
969 
970 	return (error);
971 }
972 
973 struct icp_ccb *
974 icp_ccb_alloc(struct icp_softc *icp)
975 {
976 	struct icp_ccb *ic;
977 	int s;
978 
979 	s = splbio();
980 	if (__predict_false((ic =
981 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
982 		splx(s);
983 		return (NULL);
984 	}
985 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
986 	splx(s);
987 
988 	ic->ic_flags = IC_ALLOCED;
989 	return (ic);
990 }
991 
992 struct icp_ccb *
993 icp_ccb_alloc_wait(struct icp_softc *icp)
994 {
995 	struct icp_ccb *ic;
996 	int s;
997 
998 	s = splbio();
999 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
1000 		icp->icp_flags |= ICP_F_WAIT_CCB;
1001 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
1002 	}
1003 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
1004 	splx(s);
1005 
1006 	ic->ic_flags = IC_ALLOCED;
1007 	return (ic);
1008 }
1009 
1010 void
1011 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1012 {
1013 	int s;
1014 
1015 	s = splbio();
1016 	ic->ic_flags = 0;
1017 	ic->ic_intr = NULL;
1018 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1019 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1020 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1021 		wakeup(&icp->icp_ccb_freelist);
1022 	}
1023 	splx(s);
1024 }
1025 
1026 void
1027 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1028 {
1029 	int s;
1030 
1031 	s = splbio();
1032 
1033 	if (ic != NULL) {
1034 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1035 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1036 		else
1037 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1038 	}
1039 
1040 	for (; icp->icp_qfreeze == 0;) {
1041 		if (__predict_false((ic =
1042 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1043 			struct icp_ucmd_ctx *iu = ic->ic_context;
1044 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1045 
1046 			/*
1047 			 * All user-generated commands share the same
1048 			 * scratch space, so if one is already running,
1049 			 * we have to stall the command queue.
1050 			 */
1051 			if (icp->icp_ucmd_ccb != NULL)
1052 				break;
1053 			if ((*icp->icp_test_busy)(icp))
1054 				break;
1055 			icp->icp_ucmd_ccb = ic;
1056 
1057 			if (iu->iu_cnt != 0) {
1058 				memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1059 				    ucmd->data, iu->iu_cnt);
1060 				bus_dmamap_sync(icp->icp_dmat,
1061 				    icp->icp_scr_dmamap,
1062 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1063 				    BUS_DMASYNC_PREREAD |
1064 				    BUS_DMASYNC_PREWRITE);
1065 			}
1066 		} else if (__predict_true((ic =
1067 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1068 			if ((*icp->icp_test_busy)(icp))
1069 				break;
1070 		} else {
1071 			/* no command found */
1072 			break;
1073 		}
1074 		icp_ccb_submit(icp, ic);
1075 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1076 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1077 		else
1078 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1079 	}
1080 
1081 	splx(s);
1082 }
1083 
1084 int
1085 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1086 	    int dir)
1087 {
1088 	struct icp_sg *sg;
1089 	int nsegs, i, rv;
1090 	bus_dmamap_t xfer;
1091 
1092 	xfer = ic->ic_xfer_map;
1093 
1094 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1095 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1096 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1097 	if (rv != 0)
1098 		return (rv);
1099 
1100 	nsegs = xfer->dm_nsegs;
1101 	ic->ic_xfer_size = size;
1102 	ic->ic_nsgent = nsegs;
1103 	ic->ic_flags |= dir;
1104 	sg = ic->ic_sg;
1105 
1106 	if (sg != NULL) {
1107 		for (i = 0; i < nsegs; i++, sg++) {
1108 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1109 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1110 		}
1111 	} else if (nsegs > 1)
1112 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1113 
1114 	if ((dir & IC_XFER_OUT) != 0)
1115 		i = BUS_DMASYNC_PREWRITE;
1116 	else /* if ((dir & IC_XFER_IN) != 0) */
1117 		i = BUS_DMASYNC_PREREAD;
1118 
1119 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1120 	return (0);
1121 }
1122 
1123 void
1124 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1125 {
1126 	int i;
1127 
1128 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1129 		i = BUS_DMASYNC_POSTWRITE;
1130 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1131 		i = BUS_DMASYNC_POSTREAD;
1132 
1133 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1134 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1135 }
1136 
1137 int
1138 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1139 {
1140 	int s, rv;
1141 
1142 	s = splbio();
1143 
1144 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1145 		if (!(*icp->icp_test_busy)(icp))
1146 			break;
1147 		DELAY(10);
1148 	}
1149 	if (timo == 0) {
1150 		printf("%s: submit: busy\n", device_xname(&icp->icp_dv));
1151 		return (EAGAIN);
1152 	}
1153 
1154 	icp_ccb_submit(icp, ic);
1155 
1156 	if (cold) {
1157 		for (timo *= 10; timo != 0; timo--) {
1158 			DELAY(100);
1159 			icp_intr(icp);
1160 			if ((ic->ic_flags & IC_COMPLETE) != 0)
1161 				break;
1162 		}
1163 	} else {
1164 		ic->ic_flags |= IC_WAITING;
1165 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1166 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1167 					 mstohz(timo))) != 0) {
1168 				timo = 0;
1169 				break;
1170 			}
1171 		}
1172 	}
1173 
1174 	if (timo != 0) {
1175 		if (ic->ic_status != ICP_S_OK) {
1176 #ifdef ICP_DEBUG
1177 			printf("%s: request failed; status=0x%04x\n",
1178 			    device_xname(&icp->icp_dv), ic->ic_status);
1179 #endif
1180 			rv = EIO;
1181 		} else
1182 			rv = 0;
1183 	} else {
1184 		aprint_error_dev(&icp->icp_dv, "command timed out\n");
1185 		rv = EIO;
1186 	}
1187 
1188 	while ((*icp->icp_test_busy)(icp) != 0)
1189 		DELAY(10);
1190 
1191 	splx(s);
1192 
1193 	return (rv);
1194 }
1195 
1196 int
1197 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1198 {
1199 	int s, rv;
1200 
1201 	ic->ic_flags |= IC_WAITING;
1202 
1203 	s = splbio();
1204 	icp_ccb_enqueue(icp, ic);
1205 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1206 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1207 			splx(s);
1208 			return (rv);
1209 		}
1210 	}
1211 	splx(s);
1212 
1213 	if (ic->ic_status != ICP_S_OK) {
1214 		aprint_error_dev(&icp->icp_dv, "command failed; status=%x\n",
1215 		    ic->ic_status);
1216 		return (EIO);
1217 	}
1218 
1219 	return (0);
1220 }
1221 
1222 int
1223 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1224 {
1225 	int s, rv;
1226 
1227 	ic->ic_dv = &icp->icp_dv;
1228 	ic->ic_intr = icp_ucmd_intr;
1229 	ic->ic_flags |= IC_UCMD;
1230 
1231 	s = splbio();
1232 	icp_ccb_enqueue(icp, ic);
1233 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1234 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1235 			splx(s);
1236 			return (rv);
1237 		}
1238 	}
1239 	splx(s);
1240 
1241 	return (0);
1242 }
1243 
1244 void
1245 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1246 {
1247 
1248 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1249 
1250 	(*icp->icp_set_sema0)(icp);
1251 	DELAY(10);
1252 
1253 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1254 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1255 
1256 	icp->icp_running++;
1257 
1258 	(*icp->icp_copy_cmd)(icp, ic);
1259 	(*icp->icp_release_event)(icp, ic);
1260 }
1261 
1262 int
1263 icp_freeze(struct icp_softc *icp)
1264 {
1265 	int s, error = 0;
1266 
1267 	s = splbio();
1268 	if (icp->icp_qfreeze++ == 0) {
1269 		while (icp->icp_running != 0) {
1270 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1271 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1272 			    "icpqfrz", 0);
1273 			if (error != 0 && --icp->icp_qfreeze == 0 &&
1274 			    ICP_HAS_WORK(icp)) {
1275 				icp_ccb_enqueue(icp, NULL);
1276 				break;
1277 			}
1278 		}
1279 	}
1280 	splx(s);
1281 
1282 	return (error);
1283 }
1284 
1285 void
1286 icp_unfreeze(struct icp_softc *icp)
1287 {
1288 	int s;
1289 
1290 	s = splbio();
1291 	KDASSERT(icp->icp_qfreeze != 0);
1292 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1293 		icp_ccb_enqueue(icp, NULL);
1294 	splx(s);
1295 }
1296 
1297 /* XXX Global - should be per-controller? XXX */
1298 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1299 static int icp_event_oldidx;
1300 static int icp_event_lastidx;
1301 
1302 gdt_evt_str *
1303 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1304     gdt_evt_data *evt)
1305 {
1306 	gdt_evt_str *e;
1307 
1308 	/* no source == no event */
1309 	if (source == 0)
1310 		return (NULL);
1311 
1312 	e = &icp_event_buffer[icp_event_lastidx];
1313 	if (e->event_source == source && e->event_idx == idx &&
1314 	    ((evt->size != 0 && e->event_data.size != 0 &&
1315 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1316 	     (evt->size == 0 && e->event_data.size == 0 &&
1317 	      strcmp((char *) e->event_data.event_string,
1318 	      	     (char *) evt->event_string) == 0))) {
1319 		e->last_stamp = time_second;
1320 		e->same_count++;
1321 	} else {
1322 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1323 			icp_event_lastidx++;
1324 			if (icp_event_lastidx == ICP_MAX_EVENTS)
1325 				icp_event_lastidx = 0;
1326 			if (icp_event_lastidx == icp_event_oldidx) {
1327 				icp_event_oldidx++;
1328 				if (icp_event_oldidx == ICP_MAX_EVENTS)
1329 					icp_event_oldidx = 0;
1330 			}
1331 		}
1332 		e = &icp_event_buffer[icp_event_lastidx];
1333 		e->event_source = source;
1334 		e->event_idx = idx;
1335 		e->first_stamp = e->last_stamp = time_second;
1336 		e->same_count = 1;
1337 		e->event_data = *evt;
1338 		e->application = 0;
1339 	}
1340 	return (e);
1341 }
1342 
1343 int
1344 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1345 {
1346 	gdt_evt_str *e;
1347 	int eindex, s;
1348 
1349 	s = splbio();
1350 
1351 	if (handle == -1)
1352 		eindex = icp_event_oldidx;
1353 	else
1354 		eindex = handle;
1355 
1356 	estr->event_source = 0;
1357 
1358 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1359 		splx(s);
1360 		return (eindex);
1361 	}
1362 
1363 	e = &icp_event_buffer[eindex];
1364 	if (e->event_source != 0) {
1365 		if (eindex != icp_event_lastidx) {
1366 			eindex++;
1367 			if (eindex == ICP_MAX_EVENTS)
1368 				eindex = 0;
1369 		} else
1370 			eindex = -1;
1371 		memcpy(estr, e, sizeof(gdt_evt_str));
1372 	}
1373 
1374 	splx(s);
1375 
1376 	return (eindex);
1377 }
1378 
1379 void
1380 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1381     gdt_evt_str *estr)
1382 {
1383 	gdt_evt_str *e;
1384 	int found = 0, eindex, s;
1385 
1386 	s = splbio();
1387 
1388 	eindex = icp_event_oldidx;
1389 	for (;;) {
1390 		e = &icp_event_buffer[eindex];
1391 		if (e->event_source == 0)
1392 			break;
1393 		if ((e->application & application) == 0) {
1394 			e->application |= application;
1395 			found = 1;
1396 			break;
1397 		}
1398 		if (eindex == icp_event_lastidx)
1399 			break;
1400 		eindex++;
1401 		if (eindex == ICP_MAX_EVENTS)
1402 			eindex = 0;
1403 	}
1404 	if (found)
1405 		memcpy(estr, e, sizeof(gdt_evt_str));
1406 	else
1407 		estr->event_source = 0;
1408 
1409 	splx(s);
1410 }
1411 
1412 void
1413 icp_clear_events(struct icp_softc *icp)
1414 {
1415 	int s;
1416 
1417 	s = splbio();
1418 	icp_event_oldidx = icp_event_lastidx = 0;
1419 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1420 	splx(s);
1421 }
1422