xref: /netbsd-src/sys/dev/ic/icp.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: icp.c,v 1.21 2006/10/12 01:31:00 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Niklas Hallqvist.
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  *
67  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68  */
69 
70 /*
71  * This driver would not have written if it was not for the hardware donations
72  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
73  *
74  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
75  * Intel.
76  *
77  * Support for the ICP-Vortex management tools added by
78  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
79  * provided by Achim Leubner <achim.leubner@intel.com>.
80  *
81  * Additional support for dynamic rescan of cacheservice drives by
82  * Jason R. Thorpe of Wasabi Systems, Inc.
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.21 2006/10/12 01:31:00 christos Exp $");
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/device.h>
92 #include <sys/queue.h>
93 #include <sys/proc.h>
94 #include <sys/buf.h>
95 #include <sys/endian.h>
96 #include <sys/malloc.h>
97 #include <sys/disk.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <sys/bswap.h>
102 #include <machine/bus.h>
103 
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 
108 #include <dev/ic/icpreg.h>
109 #include <dev/ic/icpvar.h>
110 
111 #include <dev/scsipi/scsipi_all.h>
112 #include <dev/scsipi/scsiconf.h>
113 
114 #include "locators.h"
115 
116 int	icp_async_event(struct icp_softc *, int);
117 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
118 void	icp_chain(struct icp_softc *);
119 int	icp_print(void *, const char *);
120 void	icp_watchdog(void *);
121 void	icp_ucmd_intr(struct icp_ccb *);
122 void	icp_recompute_openings(struct icp_softc *);
123 
124 int	icp_count;	/* total # of controllers, for ioctl interface */
125 
126 /*
127  * Statistics for the ioctl interface to query.
128  *
129  * XXX Global.  They should probably be made per-controller
130  * XXX at some point.
131  */
132 gdt_statist_t icp_stats;
133 
134 int
135 icp_init(struct icp_softc *icp, const char *intrstr)
136 {
137 	struct icp_attach_args icpa;
138 	struct icp_binfo binfo;
139 	struct icp_ccb *ic;
140 	u_int16_t cdev_cnt;
141 	int i, j, state, feat, nsegs, rv;
142 	int locs[ICPCF_NLOCS];
143 
144 	state = 0;
145 
146 	if (intrstr != NULL)
147 		aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
148 		    intrstr);
149 
150 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
151 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
152 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
153 	callout_init(&icp->icp_wdog_callout);
154 
155 	/*
156 	 * Allocate a scratch area.
157 	 */
158 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
159 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
160 	    &icp->icp_scr_dmamap) != 0) {
161 		aprint_error("%s: cannot create scratch dmamap\n",
162 		    icp->icp_dv.dv_xname);
163 		return (1);
164 	}
165 	state++;
166 
167 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
168 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
169 		aprint_error("%s: cannot alloc scratch dmamem\n",
170 		    icp->icp_dv.dv_xname);
171 		goto bail_out;
172 	}
173 	state++;
174 
175 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
176 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
177 		aprint_error("%s: cannot map scratch dmamem\n",
178 		    icp->icp_dv.dv_xname);
179 		goto bail_out;
180 	}
181 	state++;
182 
183 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
184 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
185 		aprint_error("%s: cannot load scratch dmamap\n",
186 		    icp->icp_dv.dv_xname);
187 		goto bail_out;
188 	}
189 	state++;
190 
191 	/*
192 	 * Allocate and initialize the command control blocks.
193 	 */
194 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
195 	if ((icp->icp_ccbs = ic) == NULL) {
196 		aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
197 		goto bail_out;
198 	}
199 	state++;
200 
201 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
202 		/*
203 		 * The first two command indexes have special meanings, so
204 		 * we can't use them.
205 		 */
206 		ic->ic_ident = i + 2;
207 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
208 		    ICP_MAXSG, ICP_MAX_XFER, 0,
209 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
210 		    &ic->ic_xfer_map);
211 		if (rv != 0)
212 			break;
213 		icp->icp_nccbs++;
214 		icp_ccb_free(icp, ic);
215 	}
216 #ifdef DIAGNOSTIC
217 	if (icp->icp_nccbs != ICP_NCCBS)
218 		aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
219 		    icp->icp_nccbs, ICP_NCCBS);
220 #endif
221 
222 	/*
223 	 * Initalize the controller.
224 	 */
225 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
226 		aprint_error("%s: screen service init error %d\n",
227 		    icp->icp_dv.dv_xname, icp->icp_status);
228 		goto bail_out;
229 	}
230 
231 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
232 		aprint_error("%s: cache service init error %d\n",
233 		    icp->icp_dv.dv_xname, icp->icp_status);
234 		goto bail_out;
235 	}
236 
237 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
238 
239 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
240 		aprint_error("%s: cache service mount error %d\n",
241 		    icp->icp_dv.dv_xname, icp->icp_status);
242 		goto bail_out;
243 	}
244 
245 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
246 		aprint_error("%s: cache service post-mount init error %d\n",
247 		    icp->icp_dv.dv_xname, icp->icp_status);
248 		goto bail_out;
249 	}
250 	cdev_cnt = (u_int16_t)icp->icp_info;
251 	icp->icp_fw_vers = icp->icp_service;
252 
253 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
254 		aprint_error("%s: raw service init error %d\n",
255 		    icp->icp_dv.dv_xname, icp->icp_status);
256 		goto bail_out;
257 	}
258 
259 	/*
260 	 * Set/get raw service features (scatter/gather).
261 	 */
262 	feat = 0;
263 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
264 	    0, 0))
265 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
266 			feat = icp->icp_info;
267 
268 	if ((feat & ICP_SCATTER_GATHER) == 0) {
269 #ifdef DIAGNOSTIC
270 		aprint_normal(
271 		    "%s: scatter/gather not supported (raw service)\n",
272 		    icp->icp_dv.dv_xname);
273 #endif
274 	} else
275 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
276 
277 	/*
278 	 * Set/get cache service features (scatter/gather).
279 	 */
280 	feat = 0;
281 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
282 	    ICP_SCATTER_GATHER, 0))
283 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
284 			feat = icp->icp_info;
285 
286 	if ((feat & ICP_SCATTER_GATHER) == 0) {
287 #ifdef DIAGNOSTIC
288 		aprint_normal(
289 		    "%s: scatter/gather not supported (cache service)\n",
290 		    icp->icp_dv.dv_xname);
291 #endif
292 	} else
293 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
294 
295 	/*
296 	 * Pull some information from the board and dump.
297 	 */
298 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
299 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
300 		aprint_error("%s: unable to retrive board info\n",
301 		    icp->icp_dv.dv_xname);
302 		goto bail_out;
303 	}
304 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
305 
306 	aprint_normal(
307 	    "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
308 	    icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
309 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
310 
311 	/*
312 	 * Determine the number of devices, and number of openings per
313 	 * device.
314 	 */
315 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
316 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
317 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
318 			    0))
319 				continue;
320 
321 			icp->icp_cdr[j].cd_size = icp->icp_info;
322 			if (icp->icp_cdr[j].cd_size != 0)
323 				icp->icp_ndevs++;
324 
325 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
326 			    0))
327 				icp->icp_cdr[j].cd_type = icp->icp_info;
328 		}
329 	}
330 
331 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
332 		icp->icp_nchan = binfo.bi_chan_count;
333 		icp->icp_ndevs += icp->icp_nchan;
334 	}
335 
336 	icp_recompute_openings(icp);
337 
338 	/*
339 	 * Attach SCSI channels.
340 	 */
341 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
342 		struct icp_ioc_version *iv;
343 		struct icp_rawioc *ri;
344 		struct icp_getch *gc;
345 
346 		iv = (struct icp_ioc_version *)icp->icp_scr;
347 		iv->iv_version = htole32(ICP_IOC_NEWEST);
348 		iv->iv_listents = ICP_MAXBUS;
349 		iv->iv_firstchan = 0;
350 		iv->iv_lastchan = ICP_MAXBUS - 1;
351 		iv->iv_listoffset = htole32(sizeof(*iv));
352 
353 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
354 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
355 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
356 			ri = (struct icp_rawioc *)(iv + 1);
357 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
358 				icp->icp_bus_id[j] = ri->ri_procid;
359 		} else {
360 			/*
361 			 * Fall back to the old method.
362 			 */
363 			gc = (struct icp_getch *)icp->icp_scr;
364 
365 			for (j = 0; j < binfo.bi_chan_count; j++) {
366 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
367 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
368 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
369 				    sizeof(*gc))) {
370 				    	aprint_error(
371 					    "%s: unable to get chan info",
372 				    	    icp->icp_dv.dv_xname);
373 					goto bail_out;
374 				}
375 				icp->icp_bus_id[j] = gc->gc_scsiid;
376 			}
377 		}
378 
379 		for (j = 0; j < binfo.bi_chan_count; j++) {
380 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
381 				icp->icp_bus_id[j] = ICP_MAXID_FC;
382 
383 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
384 
385 			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
386 
387 			icp->icp_children[icpa.icpa_unit] =
388 				config_found_sm_loc(&icp->icp_dv, "icp", locs,
389 					&icpa, icp_print, config_stdsubmatch);
390 		}
391 	}
392 
393 	/*
394 	 * Attach cache devices.
395 	 */
396 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
397 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
398 			if (icp->icp_cdr[j].cd_size == 0)
399 				continue;
400 
401 			icpa.icpa_unit = j;
402 
403 			locs[ICPCF_UNIT] = j;
404 
405 			icp->icp_children[icpa.icpa_unit] =
406 			    config_found_sm_loc(&icp->icp_dv, "icp", locs,
407 				&icpa, icp_print, config_stdsubmatch);
408 		}
409 	}
410 
411 	/*
412 	 * Start the watchdog.
413 	 */
414 	icp_watchdog(icp);
415 
416 	/*
417 	 * Count the controller, and we're done!
418 	 */
419 	icp_count++;
420 
421 	return (0);
422 
423  bail_out:
424 	if (state > 4)
425 		for (j = 0; j < i; j++)
426 			bus_dmamap_destroy(icp->icp_dmat,
427 			    icp->icp_ccbs[j].ic_xfer_map);
428  	if (state > 3)
429 		free(icp->icp_ccbs, M_DEVBUF);
430 	if (state > 2)
431 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
432 	if (state > 1)
433 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
434 		    ICP_SCRATCH_SIZE);
435 	if (state > 0)
436 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
437 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
438 
439 	return (1);
440 }
441 
442 void
443 icp_register_servicecb(struct icp_softc *icp, int unit,
444     const struct icp_servicecb *cb)
445 {
446 
447 	icp->icp_servicecb[unit] = cb;
448 }
449 
450 void
451 icp_rescan(struct icp_softc *icp, int unit)
452 {
453 	struct icp_attach_args icpa;
454 	u_int newsize, newtype;
455 	int locs[ICPCF_NLOCS];
456 
457 	/*
458 	 * NOTE: It is very important that the queue be frozen and not
459 	 * commands running when this is called.  The ioctl mutex must
460 	 * also be held.
461 	 */
462 
463 	KASSERT(icp->icp_qfreeze != 0);
464 	KASSERT(icp->icp_running == 0);
465 	KASSERT(unit < ICP_MAX_HDRIVES);
466 
467 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
468 #ifdef ICP_DEBUG
469 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
470 		    icp->icp_dv.dv_xname, unit, icp->icp_status);
471 #endif
472 		goto gone;
473 	}
474 	if ((newsize = icp->icp_info) == 0) {
475 #ifdef ICP_DEBUG
476 		printf("%s: rescan: unit %d has zero size\n",
477 		    icp->icp_dv.dv_xname, unit);
478 #endif
479  gone:
480 		/*
481 		 * Host drive is no longer present; detach if a child
482 		 * is currently there.
483 		 */
484 		if (icp->icp_cdr[unit].cd_size != 0)
485 			icp->icp_ndevs--;
486 		icp->icp_cdr[unit].cd_size = 0;
487 		if (icp->icp_children[unit] != NULL) {
488 			(void) config_detach(icp->icp_children[unit],
489 			    DETACH_FORCE);
490 			icp->icp_children[unit] = NULL;
491 		}
492 		return;
493 	}
494 
495 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
496 		newtype = icp->icp_info;
497 	else {
498 #ifdef ICP_DEBUG
499 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
500 		    icp->icp_dv.dv_xname, unit);
501 #endif
502 		newtype = 0;	/* XXX? */
503 	}
504 
505 #ifdef ICP_DEBUG
506 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
507 	    icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size,
508 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
509 #endif
510 
511 	/*
512 	 * If the type or size changed, detach any old child (if it exists)
513 	 * and attach a new one.
514 	 */
515 	if (icp->icp_children[unit] == NULL ||
516 	    newsize != icp->icp_cdr[unit].cd_size ||
517 	    newtype != icp->icp_cdr[unit].cd_type) {
518 		if (icp->icp_cdr[unit].cd_size == 0)
519 			icp->icp_ndevs++;
520 		icp->icp_cdr[unit].cd_size = newsize;
521 		icp->icp_cdr[unit].cd_type = newtype;
522 		if (icp->icp_children[unit] != NULL)
523 			(void) config_detach(icp->icp_children[unit],
524 			    DETACH_FORCE);
525 
526 		icpa.icpa_unit = unit;
527 
528 		locs[ICPCF_UNIT] = unit;
529 
530 		icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv,
531 			"icp", locs, &icpa, icp_print, config_stdsubmatch);
532 	}
533 
534 	icp_recompute_openings(icp);
535 }
536 
537 void
538 icp_rescan_all(struct icp_softc *icp)
539 {
540 	int unit;
541 	u_int16_t cdev_cnt;
542 
543 	/*
544 	 * This is the old method of rescanning the host drives.  We
545 	 * start by reinitializing the cache service.
546 	 */
547 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
548 		printf("%s: unable to re-initialize cache service for rescan\n",
549 		    icp->icp_dv.dv_xname);
550 		return;
551 	}
552 	cdev_cnt = (u_int16_t) icp->icp_info;
553 
554 	/* For each host drive, do the new-style rescan. */
555 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
556 		icp_rescan(icp, unit);
557 
558 	/* Now detach anything in the slots after cdev_cnt. */
559 	for (; unit < ICP_MAX_HDRIVES; unit++) {
560 		if (icp->icp_cdr[unit].cd_size != 0) {
561 #ifdef ICP_DEBUG
562 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
563 			    icp->icp_dv.dv_xname, unit, cdev_cnt);
564 #endif
565 			icp->icp_ndevs--;
566 			icp->icp_cdr[unit].cd_size = 0;
567 			if (icp->icp_children[unit] != NULL) {
568 				(void) config_detach(icp->icp_children[unit],
569 				    DETACH_FORCE);
570 				icp->icp_children[unit] = NULL;
571 			}
572 		}
573 	}
574 
575 	icp_recompute_openings(icp);
576 }
577 
578 void
579 icp_recompute_openings(struct icp_softc *icp)
580 {
581 	int unit, openings;
582 
583 	if (icp->icp_ndevs != 0)
584 		openings =
585 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
586 	else
587 		openings = 0;
588 	if (openings == icp->icp_openings)
589 		return;
590 	icp->icp_openings = openings;
591 
592 #ifdef ICP_DEBUG
593 	printf("%s: %d device%s, %d openings per device\n",
594 	    icp->icp_dv.dv_xname, icp->icp_ndevs,
595 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
596 #endif
597 
598 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
599 		if (icp->icp_children[unit] != NULL)
600 			(*icp->icp_servicecb[unit]->iscb_openings)(
601 			    icp->icp_children[unit], icp->icp_openings);
602 	}
603 }
604 
605 void
606 icp_watchdog(void *cookie)
607 {
608 	struct icp_softc *icp;
609 	int s;
610 
611 	icp = cookie;
612 
613 	s = splbio();
614 	icp_intr(icp);
615 	if (ICP_HAS_WORK(icp))
616 		icp_ccb_enqueue(icp, NULL);
617 	splx(s);
618 
619 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
620 	    icp_watchdog, icp);
621 }
622 
623 int
624 icp_print(void *aux, const char *pnp)
625 {
626 	struct icp_attach_args *icpa;
627 	const char *str;
628 
629 	icpa = (struct icp_attach_args *)aux;
630 
631 	if (pnp != NULL) {
632 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
633 			str = "block device";
634 		else
635 			str = "SCSI channel";
636 		aprint_normal("%s at %s", str, pnp);
637 	}
638 	aprint_normal(" unit %d", icpa->icpa_unit);
639 
640 	return (UNCONF);
641 }
642 
643 int
644 icp_async_event(struct icp_softc *icp, int service)
645 {
646 
647 	if (service == ICP_SCREENSERVICE) {
648 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
649 			/* XXX */
650 		}
651 	} else {
652 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
653 			icp->icp_evt.size = 0;
654 			icp->icp_evt.eu.async.ionode =
655 			    device_unit(&icp->icp_dv);
656 			icp->icp_evt.eu.async.status = icp->icp_status;
657 			/*
658 			 * Severity and event string are filled in by the
659 			 * hardware interface interrupt handler.
660 			 */
661 			printf("%s: %s\n", icp->icp_dv.dv_xname,
662 			    icp->icp_evt.event_string);
663 		} else {
664 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
665 			icp->icp_evt.eu.async.ionode =
666 			    device_unit(&icp->icp_dv);
667 			icp->icp_evt.eu.async.service = service;
668 			icp->icp_evt.eu.async.status = icp->icp_status;
669 			icp->icp_evt.eu.async.info = icp->icp_info;
670 			/* XXXJRT FIX THIS */
671 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
672 			    icp->icp_info2;
673 		}
674 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
675 	}
676 
677 	return (0);
678 }
679 
680 int
681 icp_intr(void *cookie)
682 {
683 	struct icp_softc *icp;
684 	struct icp_intr_ctx ctx;
685 	struct icp_ccb *ic;
686 
687 	icp = cookie;
688 
689 	ctx.istatus = (*icp->icp_get_status)(icp);
690 	if (!ctx.istatus) {
691 		icp->icp_status = ICP_S_NO_STATUS;
692 		return (0);
693 	}
694 
695 	(*icp->icp_intr)(icp, &ctx);
696 
697 	icp->icp_status = ctx.cmd_status;
698 	icp->icp_service = ctx.service;
699 	icp->icp_info = ctx.info;
700 	icp->icp_info2 = ctx.info2;
701 
702 	switch (ctx.istatus) {
703 	case ICP_ASYNCINDEX:
704 		icp_async_event(icp, ctx.service);
705 		return (1);
706 
707 	case ICP_SPEZINDEX:
708 		printf("%s: uninitialized or unknown service (%d/%d)\n",
709 		    icp->icp_dv.dv_xname, ctx.info, ctx.info2);
710 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
711 		icp->icp_evt.eu.driver.ionode = device_unit(&icp->icp_dv);
712 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
713 		return (1);
714 	}
715 
716 	if ((ctx.istatus - 2) > icp->icp_nccbs)
717 		panic("icp_intr: bad command index returned");
718 
719 	ic = &icp->icp_ccbs[ctx.istatus - 2];
720 	ic->ic_status = icp->icp_status;
721 
722 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
723 		/* XXX ICP's "iir" driver just sends an event here. */
724 		panic("icp_intr: inactive CCB identified");
725 	}
726 
727 	/*
728 	 * Try to protect ourselves from the running command count already
729 	 * being 0 (e.g. if a polled command times out).
730 	 */
731 	KDASSERT(icp->icp_running != 0);
732 	if (--icp->icp_running == 0 &&
733 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
734 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
735 		wakeup(&icp->icp_qfreeze);
736 	}
737 
738 	switch (icp->icp_status) {
739 	case ICP_S_BSY:
740 #ifdef ICP_DEBUG
741 		printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
742 #endif
743 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
744 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
745 		else
746 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
747 		break;
748 
749 	default:
750 		ic->ic_flags |= IC_COMPLETE;
751 
752 		if ((ic->ic_flags & IC_WAITING) != 0)
753 			wakeup(ic);
754 		else if (ic->ic_intr != NULL)
755 			(*ic->ic_intr)(ic);
756 
757 		if (ICP_HAS_WORK(icp))
758 			icp_ccb_enqueue(icp, NULL);
759 
760 		break;
761 	}
762 
763 	return (1);
764 }
765 
766 struct icp_ucmd_ctx {
767 	gdt_ucmd_t *iu_ucmd;
768 	u_int32_t iu_cnt;
769 };
770 
771 void
772 icp_ucmd_intr(struct icp_ccb *ic)
773 {
774 	struct icp_softc *icp = (void *) ic->ic_dv;
775 	struct icp_ucmd_ctx *iu = ic->ic_context;
776 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
777 
778 	ucmd->status = icp->icp_status;
779 	ucmd->info = icp->icp_info;
780 
781 	if (iu->iu_cnt != 0) {
782 		bus_dmamap_sync(icp->icp_dmat,
783 		    icp->icp_scr_dmamap,
784 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
785 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
786 		memcpy(ucmd->data,
787 		    icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
788 	}
789 
790 	icp->icp_ucmd_ccb = NULL;
791 
792 	ic->ic_flags |= IC_COMPLETE;
793 	wakeup(ic);
794 }
795 
796 /*
797  * NOTE: We assume that it is safe to sleep here!
798  */
799 int
800 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
801 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
802 {
803 	struct icp_ioctlcmd *icmd;
804 	struct icp_cachecmd *cc;
805 	struct icp_rawcmd *rc;
806 	int retries, rv;
807 	struct icp_ccb *ic;
808 
809 	retries = ICP_RETRIES;
810 
811 	do {
812 		ic = icp_ccb_alloc_wait(icp);
813 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
814 		ic->ic_cmd.cmd_opcode = htole16(opcode);
815 
816 		switch (service) {
817 		case ICP_CACHESERVICE:
818 			if (opcode == ICP_IOCTL) {
819 				icmd = &ic->ic_cmd.cmd_packet.ic;
820 				icmd->ic_subfunc = htole16(arg1);
821 				icmd->ic_channel = htole32(arg2);
822 				icmd->ic_bufsize = htole32(arg3);
823 				icmd->ic_addr =
824 				    htole32(icp->icp_scr_seg[0].ds_addr);
825 
826 				bus_dmamap_sync(icp->icp_dmat,
827 				    icp->icp_scr_dmamap, 0, arg3,
828 				    BUS_DMASYNC_PREWRITE |
829 				    BUS_DMASYNC_PREREAD);
830 			} else {
831 				cc = &ic->ic_cmd.cmd_packet.cc;
832 				cc->cc_deviceno = htole16(arg1);
833 				cc->cc_blockno = htole32(arg2);
834 			}
835 			break;
836 
837 		case ICP_SCSIRAWSERVICE:
838 			rc = &ic->ic_cmd.cmd_packet.rc;
839 			rc->rc_direction = htole32(arg1);
840 			rc->rc_bus = arg2;
841 			rc->rc_target = arg3;
842 			rc->rc_lun = arg3 >> 8;
843 			break;
844 		}
845 
846 		ic->ic_service = service;
847 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
848 		rv = icp_ccb_poll(icp, ic, 10000);
849 
850 		switch (service) {
851 		case ICP_CACHESERVICE:
852 			if (opcode == ICP_IOCTL) {
853 				bus_dmamap_sync(icp->icp_dmat,
854 				    icp->icp_scr_dmamap, 0, arg3,
855 				    BUS_DMASYNC_POSTWRITE |
856 				    BUS_DMASYNC_POSTREAD);
857 			}
858 			break;
859 		}
860 
861 		icp_ccb_free(icp, ic);
862 	} while (rv != 0 && --retries > 0);
863 
864 	return (icp->icp_status == ICP_S_OK);
865 }
866 
867 int
868 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
869 {
870 	struct icp_ccb *ic;
871 	struct icp_ucmd_ctx iu;
872 	u_int32_t cnt;
873 	int error;
874 
875 	if (ucmd->service == ICP_CACHESERVICE) {
876 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
877 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
878 			if (cnt > GDT_SCRATCH_SZ) {
879 				printf("%s: scratch buffer too small (%d/%d)\n",
880 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
881 				return (EINVAL);
882 			}
883 		} else {
884 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
885 			    ICP_SECTOR_SIZE;
886 			if (cnt > GDT_SCRATCH_SZ) {
887 				printf("%s: scratch buffer too small (%d/%d)\n",
888 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
889 				return (EINVAL);
890 			}
891 		}
892 	} else {
893 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
894 		    ucmd->command.cmd_packet.rc.rc_sense_len;
895 		if (cnt > GDT_SCRATCH_SZ) {
896 			printf("%s: scratch buffer too small (%d/%d)\n",
897 			    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
898 			return (EINVAL);
899 		}
900 	}
901 
902 	iu.iu_ucmd = ucmd;
903 	iu.iu_cnt = cnt;
904 
905 	ic = icp_ccb_alloc_wait(icp);
906 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
907 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
908 
909 	if (ucmd->service == ICP_CACHESERVICE) {
910 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
911 			struct icp_ioctlcmd *icmd, *uicmd;
912 
913 			icmd = &ic->ic_cmd.cmd_packet.ic;
914 			uicmd = &ucmd->command.cmd_packet.ic;
915 
916 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
917 			icmd->ic_channel = htole32(uicmd->ic_channel);
918 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
919 			icmd->ic_addr =
920 			    htole32(icp->icp_scr_seg[0].ds_addr +
921 				    ICP_SCRATCH_UCMD);
922 		} else {
923 			struct icp_cachecmd *cc, *ucc;
924 
925 			cc = &ic->ic_cmd.cmd_packet.cc;
926 			ucc = &ucmd->command.cmd_packet.cc;
927 
928 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
929 			cc->cc_blockno = htole32(ucc->cc_blockno);
930 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
931 			cc->cc_addr = htole32(0xffffffffU);
932 			cc->cc_nsgent = htole32(1);
933 			cc->cc_sg[0].sg_addr =
934 			    htole32(icp->icp_scr_seg[0].ds_addr +
935 				    ICP_SCRATCH_UCMD);
936 			cc->cc_sg[0].sg_len = htole32(cnt);
937 		}
938 	} else {
939 		struct icp_rawcmd *rc, *urc;
940 
941 		rc = &ic->ic_cmd.cmd_packet.rc;
942 		urc = &ucmd->command.cmd_packet.rc;
943 
944 		rc->rc_direction = htole32(urc->rc_direction);
945 		rc->rc_sdata = htole32(0xffffffffU);
946 		rc->rc_sdlen = htole32(urc->rc_sdlen);
947 		rc->rc_clen = htole32(urc->rc_clen);
948 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
949 		rc->rc_target = urc->rc_target;
950 		rc->rc_lun = urc->rc_lun;
951 		rc->rc_bus = urc->rc_bus;
952 		rc->rc_sense_len = htole32(urc->rc_sense_len);
953 		rc->rc_sense_addr =
954 		    htole32(icp->icp_scr_seg[0].ds_addr +
955 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
956 		rc->rc_nsgent = htole32(1);
957 		rc->rc_sg[0].sg_addr =
958 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
959 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
960 	}
961 
962 	ic->ic_service = ucmd->service;
963 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
964 	ic->ic_context = &iu;
965 
966 	/*
967 	 * XXX What units are ucmd->timeout in?  Until we know, we
968 	 * XXX just pull a number out of thin air.
969 	 */
970 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
971 		printf("%s: error %d waiting for ucmd to complete\n",
972 		    icp->icp_dv.dv_xname, error);
973 
974 	/* icp_ucmd_intr() has updated ucmd. */
975 	icp_ccb_free(icp, ic);
976 
977 	return (error);
978 }
979 
980 struct icp_ccb *
981 icp_ccb_alloc(struct icp_softc *icp)
982 {
983 	struct icp_ccb *ic;
984 	int s;
985 
986 	s = splbio();
987 	if (__predict_false((ic =
988 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
989 		splx(s);
990 		return (NULL);
991 	}
992 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
993 	splx(s);
994 
995 	ic->ic_flags = IC_ALLOCED;
996 	return (ic);
997 }
998 
999 struct icp_ccb *
1000 icp_ccb_alloc_wait(struct icp_softc *icp)
1001 {
1002 	struct icp_ccb *ic;
1003 	int s;
1004 
1005 	s = splbio();
1006 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
1007 		icp->icp_flags |= ICP_F_WAIT_CCB;
1008 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
1009 	}
1010 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
1011 	splx(s);
1012 
1013 	ic->ic_flags = IC_ALLOCED;
1014 	return (ic);
1015 }
1016 
1017 void
1018 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1019 {
1020 	int s;
1021 
1022 	s = splbio();
1023 	ic->ic_flags = 0;
1024 	ic->ic_intr = NULL;
1025 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1026 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1027 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1028 		wakeup(&icp->icp_ccb_freelist);
1029 	}
1030 	splx(s);
1031 }
1032 
1033 void
1034 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1035 {
1036 	int s;
1037 
1038 	s = splbio();
1039 
1040 	if (ic != NULL) {
1041 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1042 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1043 		else
1044 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1045 	}
1046 
1047 	for (; icp->icp_qfreeze == 0;) {
1048 		if (__predict_false((ic =
1049 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1050 			struct icp_ucmd_ctx *iu = ic->ic_context;
1051 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1052 
1053 			/*
1054 			 * All user-generated commands share the same
1055 			 * scratch space, so if one is already running,
1056 			 * we have to stall the command queue.
1057 			 */
1058 			if (icp->icp_ucmd_ccb != NULL)
1059 				break;
1060 			if ((*icp->icp_test_busy)(icp))
1061 				break;
1062 			icp->icp_ucmd_ccb = ic;
1063 
1064 			if (iu->iu_cnt != 0) {
1065 				memcpy(icp->icp_scr + ICP_SCRATCH_UCMD,
1066 				    ucmd->data, iu->iu_cnt);
1067 				bus_dmamap_sync(icp->icp_dmat,
1068 				    icp->icp_scr_dmamap,
1069 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1070 				    BUS_DMASYNC_PREREAD |
1071 				    BUS_DMASYNC_PREWRITE);
1072 			}
1073 		} else if (__predict_true((ic =
1074 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1075 			if ((*icp->icp_test_busy)(icp))
1076 				break;
1077 		} else {
1078 			/* no command found */
1079 			break;
1080 		}
1081 		icp_ccb_submit(icp, ic);
1082 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1083 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1084 		else
1085 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1086 	}
1087 
1088 	splx(s);
1089 }
1090 
1091 int
1092 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1093 	    int dir)
1094 {
1095 	struct icp_sg *sg;
1096 	int nsegs, i, rv;
1097 	bus_dmamap_t xfer;
1098 
1099 	xfer = ic->ic_xfer_map;
1100 
1101 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1102 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1103 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1104 	if (rv != 0)
1105 		return (rv);
1106 
1107 	nsegs = xfer->dm_nsegs;
1108 	ic->ic_xfer_size = size;
1109 	ic->ic_nsgent = nsegs;
1110 	ic->ic_flags |= dir;
1111 	sg = ic->ic_sg;
1112 
1113 	if (sg != NULL) {
1114 		for (i = 0; i < nsegs; i++, sg++) {
1115 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1116 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1117 		}
1118 	} else if (nsegs > 1)
1119 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1120 
1121 	if ((dir & IC_XFER_OUT) != 0)
1122 		i = BUS_DMASYNC_PREWRITE;
1123 	else /* if ((dir & IC_XFER_IN) != 0) */
1124 		i = BUS_DMASYNC_PREREAD;
1125 
1126 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1127 	return (0);
1128 }
1129 
1130 void
1131 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1132 {
1133 	int i;
1134 
1135 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1136 		i = BUS_DMASYNC_POSTWRITE;
1137 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1138 		i = BUS_DMASYNC_POSTREAD;
1139 
1140 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1141 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1142 }
1143 
1144 int
1145 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1146 {
1147 	int s, rv;
1148 
1149 	s = splbio();
1150 
1151 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1152 		if (!(*icp->icp_test_busy)(icp))
1153 			break;
1154 		DELAY(10);
1155 	}
1156 	if (timo == 0) {
1157 		printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
1158 		return (EAGAIN);
1159 	}
1160 
1161 	icp_ccb_submit(icp, ic);
1162 
1163 	if (cold) {
1164 		for (timo *= 10; timo != 0; timo--) {
1165 			DELAY(100);
1166 			icp_intr(icp);
1167 			if ((ic->ic_flags & IC_COMPLETE) != 0)
1168 				break;
1169 		}
1170 	} else {
1171 		ic->ic_flags |= IC_WAITING;
1172 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1173 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1174 					 mstohz(timo))) != 0) {
1175 				timo = 0;
1176 				break;
1177 			}
1178 		}
1179 	}
1180 
1181 	if (timo != 0) {
1182 		if (ic->ic_status != ICP_S_OK) {
1183 #ifdef ICP_DEBUG
1184 			printf("%s: request failed; status=0x%04x\n",
1185 			    icp->icp_dv.dv_xname, ic->ic_status);
1186 #endif
1187 			rv = EIO;
1188 		} else
1189 			rv = 0;
1190 	} else {
1191 		printf("%s: command timed out\n", icp->icp_dv.dv_xname);
1192 		rv = EIO;
1193 	}
1194 
1195 	while ((*icp->icp_test_busy)(icp) != 0)
1196 		DELAY(10);
1197 
1198 	splx(s);
1199 
1200 	return (rv);
1201 }
1202 
1203 int
1204 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1205 {
1206 	int s, rv;
1207 
1208 	ic->ic_flags |= IC_WAITING;
1209 
1210 	s = splbio();
1211 	icp_ccb_enqueue(icp, ic);
1212 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1213 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1214 			splx(s);
1215 			return (rv);
1216 		}
1217 	}
1218 	splx(s);
1219 
1220 	if (ic->ic_status != ICP_S_OK) {
1221 		printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
1222 		    ic->ic_status);
1223 		return (EIO);
1224 	}
1225 
1226 	return (0);
1227 }
1228 
1229 int
1230 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1231 {
1232 	int s, rv;
1233 
1234 	ic->ic_dv = &icp->icp_dv;
1235 	ic->ic_intr = icp_ucmd_intr;
1236 	ic->ic_flags |= IC_UCMD;
1237 
1238 	s = splbio();
1239 	icp_ccb_enqueue(icp, ic);
1240 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1241 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1242 			splx(s);
1243 			return (rv);
1244 		}
1245 	}
1246 	splx(s);
1247 
1248 	return (0);
1249 }
1250 
1251 void
1252 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1253 {
1254 
1255 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1256 
1257 	(*icp->icp_set_sema0)(icp);
1258 	DELAY(10);
1259 
1260 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1261 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1262 
1263 	icp->icp_running++;
1264 
1265 	(*icp->icp_copy_cmd)(icp, ic);
1266 	(*icp->icp_release_event)(icp, ic);
1267 }
1268 
1269 int
1270 icp_freeze(struct icp_softc *icp)
1271 {
1272 	int s, error = 0;
1273 
1274 	s = splbio();
1275 	if (icp->icp_qfreeze++ == 0) {
1276 		while (icp->icp_running != 0) {
1277 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1278 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1279 			    "icpqfrz", 0);
1280 			if (error != 0 && --icp->icp_qfreeze == 0 &&
1281 			    ICP_HAS_WORK(icp)) {
1282 				icp_ccb_enqueue(icp, NULL);
1283 				break;
1284 			}
1285 		}
1286 	}
1287 	splx(s);
1288 
1289 	return (error);
1290 }
1291 
1292 void
1293 icp_unfreeze(struct icp_softc *icp)
1294 {
1295 	int s;
1296 
1297 	s = splbio();
1298 	KDASSERT(icp->icp_qfreeze != 0);
1299 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1300 		icp_ccb_enqueue(icp, NULL);
1301 	splx(s);
1302 }
1303 
1304 /* XXX Global - should be per-controller? XXX */
1305 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1306 static int icp_event_oldidx;
1307 static int icp_event_lastidx;
1308 
1309 gdt_evt_str *
1310 icp_store_event(struct icp_softc *icp __unused, u_int16_t source, u_int16_t idx,
1311     gdt_evt_data *evt)
1312 {
1313 	gdt_evt_str *e;
1314 
1315 	/* no source == no event */
1316 	if (source == 0)
1317 		return (NULL);
1318 
1319 	e = &icp_event_buffer[icp_event_lastidx];
1320 	if (e->event_source == source && e->event_idx == idx &&
1321 	    ((evt->size != 0 && e->event_data.size != 0 &&
1322 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1323 	     (evt->size == 0 && e->event_data.size == 0 &&
1324 	      strcmp((char *) e->event_data.event_string,
1325 	      	     (char *) evt->event_string) == 0))) {
1326 		e->last_stamp = time_second;
1327 		e->same_count++;
1328 	} else {
1329 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1330 			icp_event_lastidx++;
1331 			if (icp_event_lastidx == ICP_MAX_EVENTS)
1332 				icp_event_lastidx = 0;
1333 			if (icp_event_lastidx == icp_event_oldidx) {
1334 				icp_event_oldidx++;
1335 				if (icp_event_oldidx == ICP_MAX_EVENTS)
1336 					icp_event_oldidx = 0;
1337 			}
1338 		}
1339 		e = &icp_event_buffer[icp_event_lastidx];
1340 		e->event_source = source;
1341 		e->event_idx = idx;
1342 		e->first_stamp = e->last_stamp = time_second;
1343 		e->same_count = 1;
1344 		e->event_data = *evt;
1345 		e->application = 0;
1346 	}
1347 	return (e);
1348 }
1349 
1350 int
1351 icp_read_event(struct icp_softc *icp __unused, int handle, gdt_evt_str *estr)
1352 {
1353 	gdt_evt_str *e;
1354 	int eindex, s;
1355 
1356 	s = splbio();
1357 
1358 	if (handle == -1)
1359 		eindex = icp_event_oldidx;
1360 	else
1361 		eindex = handle;
1362 
1363 	estr->event_source = 0;
1364 
1365 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1366 		splx(s);
1367 		return (eindex);
1368 	}
1369 
1370 	e = &icp_event_buffer[eindex];
1371 	if (e->event_source != 0) {
1372 		if (eindex != icp_event_lastidx) {
1373 			eindex++;
1374 			if (eindex == ICP_MAX_EVENTS)
1375 				eindex = 0;
1376 		} else
1377 			eindex = -1;
1378 		memcpy(estr, e, sizeof(gdt_evt_str));
1379 	}
1380 
1381 	splx(s);
1382 
1383 	return (eindex);
1384 }
1385 
1386 void
1387 icp_readapp_event(struct icp_softc *icp __unused, u_int8_t application,
1388     gdt_evt_str *estr)
1389 {
1390 	gdt_evt_str *e;
1391 	int found = 0, eindex, s;
1392 
1393 	s = splbio();
1394 
1395 	eindex = icp_event_oldidx;
1396 	for (;;) {
1397 		e = &icp_event_buffer[eindex];
1398 		if (e->event_source == 0)
1399 			break;
1400 		if ((e->application & application) == 0) {
1401 			e->application |= application;
1402 			found = 1;
1403 			break;
1404 		}
1405 		if (eindex == icp_event_lastidx)
1406 			break;
1407 		eindex++;
1408 		if (eindex == ICP_MAX_EVENTS)
1409 			eindex = 0;
1410 	}
1411 	if (found)
1412 		memcpy(estr, e, sizeof(gdt_evt_str));
1413 	else
1414 		estr->event_source = 0;
1415 
1416 	splx(s);
1417 }
1418 
1419 void
1420 icp_clear_events(struct icp_softc *icp __unused)
1421 {
1422 	int s;
1423 
1424 	s = splbio();
1425 	icp_event_oldidx = icp_event_lastidx = 0;
1426 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1427 	splx(s);
1428 }
1429