xref: /netbsd-src/sys/dev/ic/icp.c (revision 4b896b232495b7a9b8b94a1cf1e21873296d53b8)
1 /*	$NetBSD: icp.c,v 1.12 2003/10/29 00:48:15 mycroft Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Niklas Hallqvist.
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  *
67  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68  */
69 
70 /*
71  * This driver would not have written if it was not for the hardware donations
72  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
73  *
74  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
75  * Intel.
76  *
77  * Support for the ICP-Vortex management tools added by
78  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
79  * provided by Achim Leubner <achim.leubner@intel.com>.
80  *
81  * Additional support for dynamic rescan of cacheservice drives by
82  * Jason R. Thorpe of Wasabi Systems, Inc.
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.12 2003/10/29 00:48:15 mycroft Exp $");
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/device.h>
92 #include <sys/queue.h>
93 #include <sys/proc.h>
94 #include <sys/buf.h>
95 #include <sys/endian.h>
96 #include <sys/malloc.h>
97 #include <sys/disk.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <machine/bswap.h>
102 #include <machine/bus.h>
103 
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 
108 #include <dev/ic/icpreg.h>
109 #include <dev/ic/icpvar.h>
110 
111 #include <dev/scsipi/scsipi_all.h>
112 #include <dev/scsipi/scsiconf.h>
113 
114 int	icp_async_event(struct icp_softc *, int);
115 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
116 void	icp_chain(struct icp_softc *);
117 int	icp_print(void *, const char *);
118 int	icp_submatch(struct device *, struct cfdata *, void *);
119 void	icp_watchdog(void *);
120 void	icp_ucmd_intr(struct icp_ccb *);
121 void	icp_recompute_openings(struct icp_softc *);
122 
123 int	icp_count;	/* total # of controllers, for ioctl interface */
124 
125 /*
126  * Statistics for the ioctl interface to query.
127  *
128  * XXX Global.  They should probably be made per-controller
129  * XXX at some point.
130  */
131 gdt_statist_t icp_stats;
132 
133 int
134 icp_init(struct icp_softc *icp, const char *intrstr)
135 {
136 	struct icp_attach_args icpa;
137 	struct icp_binfo binfo;
138 	struct icp_ccb *ic;
139 	u_int16_t cdev_cnt;
140 	int i, j, state, feat, nsegs, rv;
141 
142 	state = 0;
143 
144 	if (intrstr != NULL)
145 		aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
146 		    intrstr);
147 
148 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
149 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
150 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
151 	callout_init(&icp->icp_wdog_callout);
152 
153 	/*
154 	 * Allocate a scratch area.
155 	 */
156 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
157 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
158 	    &icp->icp_scr_dmamap) != 0) {
159 		aprint_error("%s: cannot create scratch dmamap\n",
160 		    icp->icp_dv.dv_xname);
161 		return (1);
162 	}
163 	state++;
164 
165 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
166 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
167 		aprint_error("%s: cannot alloc scratch dmamem\n",
168 		    icp->icp_dv.dv_xname);
169 		goto bail_out;
170 	}
171 	state++;
172 
173 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
174 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
175 		aprint_error("%s: cannot map scratch dmamem\n",
176 		    icp->icp_dv.dv_xname);
177 		goto bail_out;
178 	}
179 	state++;
180 
181 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
182 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
183 		aprint_error("%s: cannot load scratch dmamap\n",
184 		    icp->icp_dv.dv_xname);
185 		goto bail_out;
186 	}
187 	state++;
188 
189 	/*
190 	 * Allocate and initialize the command control blocks.
191 	 */
192 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
193 	if ((icp->icp_ccbs = ic) == NULL) {
194 		aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
195 		goto bail_out;
196 	}
197 	state++;
198 
199 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
200 		/*
201 		 * The first two command indexes have special meanings, so
202 		 * we can't use them.
203 		 */
204 		ic->ic_ident = i + 2;
205 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
206 		    ICP_MAXSG, ICP_MAX_XFER, 0,
207 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
208 		    &ic->ic_xfer_map);
209 		if (rv != 0)
210 			break;
211 		icp->icp_nccbs++;
212 		icp_ccb_free(icp, ic);
213 	}
214 #ifdef DIAGNOSTIC
215 	if (icp->icp_nccbs != ICP_NCCBS)
216 		aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
217 		    icp->icp_nccbs, ICP_NCCBS);
218 #endif
219 
220 	/*
221 	 * Initalize the controller.
222 	 */
223 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
224 		aprint_error("%s: screen service init error %d\n",
225 		    icp->icp_dv.dv_xname, icp->icp_status);
226 		goto bail_out;
227 	}
228 
229 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
230 		aprint_error("%s: cache service init error %d\n",
231 		    icp->icp_dv.dv_xname, icp->icp_status);
232 		goto bail_out;
233 	}
234 
235 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
236 
237 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
238 		aprint_error("%s: cache service mount error %d\n",
239 		    icp->icp_dv.dv_xname, icp->icp_status);
240 		goto bail_out;
241 	}
242 
243 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
244 		aprint_error("%s: cache service post-mount init error %d\n",
245 		    icp->icp_dv.dv_xname, icp->icp_status);
246 		goto bail_out;
247 	}
248 	cdev_cnt = (u_int16_t)icp->icp_info;
249 	icp->icp_fw_vers = icp->icp_service;
250 
251 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
252 		aprint_error("%s: raw service init error %d\n",
253 		    icp->icp_dv.dv_xname, icp->icp_status);
254 		goto bail_out;
255 	}
256 
257 	/*
258 	 * Set/get raw service features (scatter/gather).
259 	 */
260 	feat = 0;
261 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
262 	    0, 0))
263 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
264 			feat = icp->icp_info;
265 
266 	if ((feat & ICP_SCATTER_GATHER) == 0) {
267 #ifdef DIAGNOSTIC
268 		aprint_normal(
269 		    "%s: scatter/gather not supported (raw service)\n",
270 		    icp->icp_dv.dv_xname);
271 #endif
272 	} else
273 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
274 
275 	/*
276 	 * Set/get cache service features (scatter/gather).
277 	 */
278 	feat = 0;
279 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
280 	    ICP_SCATTER_GATHER, 0))
281 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
282 			feat = icp->icp_info;
283 
284 	if ((feat & ICP_SCATTER_GATHER) == 0) {
285 #ifdef DIAGNOSTIC
286 		aprint_normal(
287 		    "%s: scatter/gather not supported (cache service)\n",
288 		    icp->icp_dv.dv_xname);
289 #endif
290 	} else
291 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
292 
293 	/*
294 	 * Pull some information from the board and dump.
295 	 */
296 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
297 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
298 		aprint_error("%s: unable to retrive board info\n",
299 		    icp->icp_dv.dv_xname);
300 		goto bail_out;
301 	}
302 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
303 
304 	aprint_normal(
305 	    "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
306 	    icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
307 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
308 
309 	/*
310 	 * Determine the number of devices, and number of openings per
311 	 * device.
312 	 */
313 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
314 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
315 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
316 			    0))
317 				continue;
318 
319 			icp->icp_cdr[j].cd_size = icp->icp_info;
320 			if (icp->icp_cdr[j].cd_size != 0)
321 				icp->icp_ndevs++;
322 
323 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
324 			    0))
325 				icp->icp_cdr[j].cd_type = icp->icp_info;
326 		}
327 	}
328 
329 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
330 		icp->icp_nchan = binfo.bi_chan_count;
331 		icp->icp_ndevs += icp->icp_nchan;
332 	}
333 
334 	icp_recompute_openings(icp);
335 
336 	/*
337 	 * Attach SCSI channels.
338 	 */
339 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
340 		struct icp_ioc_version *iv;
341 		struct icp_rawioc *ri;
342 		struct icp_getch *gc;
343 
344 		iv = (struct icp_ioc_version *)icp->icp_scr;
345 		iv->iv_version = htole32(ICP_IOC_NEWEST);
346 		iv->iv_listents = ICP_MAXBUS;
347 		iv->iv_firstchan = 0;
348 		iv->iv_lastchan = ICP_MAXBUS - 1;
349 		iv->iv_listoffset = htole32(sizeof(*iv));
350 
351 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
352 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
353 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
354 			ri = (struct icp_rawioc *)(iv + 1);
355 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
356 				icp->icp_bus_id[j] = ri->ri_procid;
357 		} else {
358 			/*
359 			 * Fall back to the old method.
360 			 */
361 			gc = (struct icp_getch *)icp->icp_scr;
362 
363 			for (j = 0; j < binfo.bi_chan_count; j++) {
364 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
365 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
366 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
367 				    sizeof(*gc))) {
368 				    	aprint_error(
369 					    "%s: unable to get chan info",
370 				    	    icp->icp_dv.dv_xname);
371 					goto bail_out;
372 				}
373 				icp->icp_bus_id[j] = gc->gc_scsiid;
374 			}
375 		}
376 
377 		for (j = 0; j < binfo.bi_chan_count; j++) {
378 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
379 				icp->icp_bus_id[j] = ICP_MAXID_FC;
380 
381 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
382 			icp->icp_children[icpa.icpa_unit] =
383 			    config_found_sm(&icp->icp_dv, &icpa, icp_print,
384 			    icp_submatch);
385 		}
386 	}
387 
388 	/*
389 	 * Attach cache devices.
390 	 */
391 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
392 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
393 			if (icp->icp_cdr[j].cd_size == 0)
394 				continue;
395 
396 			icpa.icpa_unit = j;
397 			icp->icp_children[icpa.icpa_unit] =
398 			    config_found_sm(&icp->icp_dv, &icpa, icp_print,
399 			    icp_submatch);
400 		}
401 	}
402 
403 	/*
404 	 * Start the watchdog.
405 	 */
406 	icp_watchdog(icp);
407 
408 	/*
409 	 * Count the controller, and we're done!
410 	 */
411 	icp_count++;
412 
413 	return (0);
414 
415  bail_out:
416 	if (state > 4)
417 		for (j = 0; j < i; j++)
418 			bus_dmamap_destroy(icp->icp_dmat,
419 			    icp->icp_ccbs[j].ic_xfer_map);
420  	if (state > 3)
421 		free(icp->icp_ccbs, M_DEVBUF);
422 	if (state > 2)
423 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
424 	if (state > 1)
425 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
426 		    ICP_SCRATCH_SIZE);
427 	if (state > 0)
428 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
429 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
430 
431 	return (1);
432 }
433 
434 void
435 icp_register_servicecb(struct icp_softc *icp, int unit,
436     const struct icp_servicecb *cb)
437 {
438 
439 	icp->icp_servicecb[unit] = cb;
440 }
441 
442 void
443 icp_rescan(struct icp_softc *icp, int unit)
444 {
445 	struct icp_attach_args icpa;
446 	u_int newsize, newtype;
447 
448 	/*
449 	 * NOTE: It is very important that the queue be frozen and not
450 	 * commands running when this is called.  The ioctl mutex must
451 	 * also be held.
452 	 */
453 
454 	KASSERT(icp->icp_qfreeze != 0);
455 	KASSERT(icp->icp_running == 0);
456 	KASSERT(unit < ICP_MAX_HDRIVES);
457 
458 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
459 #ifdef ICP_DEBUG
460 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
461 		    icp->icp_dv.dv_xname, unit, icp->icp_status);
462 #endif
463 		goto gone;
464 	}
465 	if ((newsize = icp->icp_info) == 0) {
466 #ifdef ICP_DEBUG
467 		printf("%s: rescan: unit %d has zero size\n",
468 		    icp->icp_dv.dv_xname, unit);
469 #endif
470  gone:
471 		/*
472 		 * Host drive is no longer present; detach if a child
473 		 * is currently there.
474 		 */
475 		if (icp->icp_cdr[unit].cd_size != 0)
476 			icp->icp_ndevs--;
477 		icp->icp_cdr[unit].cd_size = 0;
478 		if (icp->icp_children[unit] != NULL) {
479 			(void) config_detach(icp->icp_children[unit],
480 			    DETACH_FORCE);
481 			icp->icp_children[unit] = NULL;
482 		}
483 		return;
484 	}
485 
486 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
487 		newtype = icp->icp_info;
488 	else {
489 #ifdef ICP_DEBUG
490 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
491 		    icp->icp_dv.dv_xname, unit);
492 #endif
493 		newtype = 0;	/* XXX? */
494 	}
495 
496 #ifdef ICP_DEBUG
497 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
498 	    icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size,
499 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
500 #endif
501 
502 	/*
503 	 * If the type or size changed, detach any old child (if it exists)
504 	 * and attach a new one.
505 	 */
506 	if (icp->icp_children[unit] == NULL ||
507 	    newsize != icp->icp_cdr[unit].cd_size ||
508 	    newtype != icp->icp_cdr[unit].cd_type) {
509 		if (icp->icp_cdr[unit].cd_size == 0)
510 			icp->icp_ndevs++;
511 		icp->icp_cdr[unit].cd_size = newsize;
512 		icp->icp_cdr[unit].cd_type = newtype;
513 		if (icp->icp_children[unit] != NULL)
514 			(void) config_detach(icp->icp_children[unit],
515 			    DETACH_FORCE);
516 
517 		icpa.icpa_unit = unit;
518 		icp->icp_children[unit] = config_found_sm(&icp->icp_dv, &icpa,
519 		    icp_print, icp_submatch);
520 	}
521 
522 	icp_recompute_openings(icp);
523 }
524 
525 void
526 icp_rescan_all(struct icp_softc *icp)
527 {
528 	int unit;
529 	u_int16_t cdev_cnt;
530 
531 	/*
532 	 * This is the old method of rescanning the host drives.  We
533 	 * start by reinitializing the cache service.
534 	 */
535 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
536 		printf("%s: unable to re-initialize cache service for rescan\n",
537 		    icp->icp_dv.dv_xname);
538 		return;
539 	}
540 	cdev_cnt = (u_int16_t) icp->icp_info;
541 
542 	/* For each host drive, do the new-style rescan. */
543 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
544 		icp_rescan(icp, unit);
545 
546 	/* Now detach anything in the slots after cdev_cnt. */
547 	for (; unit < ICP_MAX_HDRIVES; unit++) {
548 		if (icp->icp_cdr[unit].cd_size != 0) {
549 #ifdef ICP_DEBUG
550 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
551 			    icp->icp_dv.dv_xname, unit, cdev_cnt);
552 #endif
553 			icp->icp_ndevs--;
554 			icp->icp_cdr[unit].cd_size = 0;
555 			if (icp->icp_children[unit] != NULL) {
556 				(void) config_detach(icp->icp_children[unit],
557 				    DETACH_FORCE);
558 				icp->icp_children[unit] = NULL;
559 			}
560 		}
561 	}
562 
563 	icp_recompute_openings(icp);
564 }
565 
566 void
567 icp_recompute_openings(struct icp_softc *icp)
568 {
569 	int unit, openings;
570 
571 	if (icp->icp_ndevs != 0)
572 		openings =
573 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
574 	else
575 		openings = 0;
576 	if (openings == icp->icp_openings)
577 		return;
578 	icp->icp_openings = openings;
579 
580 #ifdef ICP_DEBUG
581 	printf("%s: %d device%s, %d openings per device\n",
582 	    icp->icp_dv.dv_xname, icp->icp_ndevs,
583 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
584 #endif
585 
586 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
587 		if (icp->icp_children[unit] != NULL)
588 			(*icp->icp_servicecb[unit]->iscb_openings)(
589 			    icp->icp_children[unit], icp->icp_openings);
590 	}
591 }
592 
593 void
594 icp_watchdog(void *cookie)
595 {
596 	struct icp_softc *icp;
597 	int s;
598 
599 	icp = cookie;
600 
601 	s = splbio();
602 	icp_intr(icp);
603 	if (ICP_HAS_WORK(icp))
604 		icp_ccb_enqueue(icp, NULL);
605 	splx(s);
606 
607 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
608 	    icp_watchdog, icp);
609 }
610 
611 int
612 icp_print(void *aux, const char *pnp)
613 {
614 	struct icp_attach_args *icpa;
615 	const char *str;
616 
617 	icpa = (struct icp_attach_args *)aux;
618 
619 	if (pnp != NULL) {
620 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
621 			str = "block device";
622 		else
623 			str = "SCSI channel";
624 		aprint_normal("%s at %s", str, pnp);
625 	}
626 	aprint_normal(" unit %d", icpa->icpa_unit);
627 
628 	return (UNCONF);
629 }
630 
631 int
632 icp_submatch(struct device *parent, struct cfdata *cf, void *aux)
633 {
634 	struct icp_attach_args *icpa;
635 
636 	icpa = (struct icp_attach_args *)aux;
637 
638 	if (cf->icpacf_unit != ICPCF_UNIT_DEFAULT &&
639 	    cf->icpacf_unit != icpa->icpa_unit)
640 		return (0);
641 
642 	return (config_match(parent, cf, aux));
643 }
644 
645 int
646 icp_async_event(struct icp_softc *icp, int service)
647 {
648 
649 	if (service == ICP_SCREENSERVICE) {
650 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
651 			/* XXX */
652 		}
653 	} else {
654 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
655 			icp->icp_evt.size = 0;
656 			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
657 			icp->icp_evt.eu.async.status = icp->icp_status;
658 			/*
659 			 * Severity and event string are filled in by the
660 			 * hardware interface interrupt handler.
661 			 */
662 			printf("%s: %s\n", icp->icp_dv.dv_xname,
663 			    icp->icp_evt.event_string);
664 		} else {
665 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
666 			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
667 			icp->icp_evt.eu.async.service = service;
668 			icp->icp_evt.eu.async.status = icp->icp_status;
669 			icp->icp_evt.eu.async.info = icp->icp_info;
670 			/* XXXJRT FIX THIS */
671 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
672 			    icp->icp_info2;
673 		}
674 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
675 	}
676 
677 	return (0);
678 }
679 
680 int
681 icp_intr(void *cookie)
682 {
683 	struct icp_softc *icp;
684 	struct icp_intr_ctx ctx;
685 	struct icp_ccb *ic;
686 
687 	icp = cookie;
688 
689 	ctx.istatus = (*icp->icp_get_status)(icp);
690 	if (!ctx.istatus) {
691 		icp->icp_status = ICP_S_NO_STATUS;
692 		return (0);
693 	}
694 
695 	(*icp->icp_intr)(icp, &ctx);
696 
697 	icp->icp_status = ctx.cmd_status;
698 	icp->icp_service = ctx.service;
699 	icp->icp_info = ctx.info;
700 	icp->icp_info2 = ctx.info2;
701 
702 	switch (ctx.istatus) {
703 	case ICP_ASYNCINDEX:
704 		icp_async_event(icp, ctx.service);
705 		return (1);
706 
707 	case ICP_SPEZINDEX:
708 		printf("%s: uninitialized or unknown service (%d/%d)\n",
709 		    icp->icp_dv.dv_xname, ctx.info, ctx.info2);
710 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
711 		icp->icp_evt.eu.driver.ionode = icp->icp_dv.dv_unit;
712 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
713 		return (1);
714 	}
715 
716 	if ((ctx.istatus - 2) > icp->icp_nccbs)
717 		panic("icp_intr: bad command index returned");
718 
719 	ic = &icp->icp_ccbs[ctx.istatus - 2];
720 	ic->ic_status = icp->icp_status;
721 
722 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
723 		/* XXX ICP's "iir" driver just sends an event here. */
724 		panic("icp_intr: inactive CCB identified");
725 	}
726 
727 	/*
728 	 * Try to protect ourselves from the running command count already
729 	 * being 0 (e.g. if a polled command times out).
730 	 */
731 	KDASSERT(icp->icp_running != 0);
732 	if (--icp->icp_running == 0 &&
733 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
734 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
735 		wakeup(&icp->icp_qfreeze);
736 	}
737 
738 	switch (icp->icp_status) {
739 	case ICP_S_BSY:
740 #ifdef ICP_DEBUG
741 		printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
742 #endif
743 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
744 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
745 		else
746 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
747 		break;
748 
749 	default:
750 		ic->ic_flags |= IC_COMPLETE;
751 
752 		if ((ic->ic_flags & IC_WAITING) != 0)
753 			wakeup(ic);
754 		else if (ic->ic_intr != NULL)
755 			(*ic->ic_intr)(ic);
756 
757 		if (ICP_HAS_WORK(icp))
758 			icp_ccb_enqueue(icp, NULL);
759 
760 		break;
761 	}
762 
763 	return (1);
764 }
765 
766 struct icp_ucmd_ctx {
767 	gdt_ucmd_t *iu_ucmd;
768 	u_int32_t iu_cnt;
769 };
770 
771 void
772 icp_ucmd_intr(struct icp_ccb *ic)
773 {
774 	struct icp_softc *icp = (void *) ic->ic_dv;
775 	struct icp_ucmd_ctx *iu = ic->ic_context;
776 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
777 
778 	ucmd->status = icp->icp_status;
779 	ucmd->info = icp->icp_info;
780 
781 	if (iu->iu_cnt != 0) {
782 		bus_dmamap_sync(icp->icp_dmat,
783 		    icp->icp_scr_dmamap,
784 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
785 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
786 		memcpy(ucmd->data,
787 		    icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
788 	}
789 
790 	icp->icp_ucmd_ccb = NULL;
791 
792 	ic->ic_flags |= IC_COMPLETE;
793 	wakeup(ic);
794 }
795 
796 /*
797  * NOTE: We assume that it is safe to sleep here!
798  */
799 int
800 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
801 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
802 {
803 	struct icp_ioctlcmd *icmd;
804 	struct icp_cachecmd *cc;
805 	struct icp_rawcmd *rc;
806 	int retries, rv;
807 	struct icp_ccb *ic;
808 
809 	retries = ICP_RETRIES;
810 
811 	do {
812 		ic = icp_ccb_alloc_wait(icp);
813 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
814 		ic->ic_cmd.cmd_opcode = htole16(opcode);
815 
816 		switch (service) {
817 		case ICP_CACHESERVICE:
818 			if (opcode == ICP_IOCTL) {
819 				icmd = &ic->ic_cmd.cmd_packet.ic;
820 				icmd->ic_subfunc = htole16(arg1);
821 				icmd->ic_channel = htole32(arg2);
822 				icmd->ic_bufsize = htole32(arg3);
823 				icmd->ic_addr =
824 				    htole32(icp->icp_scr_seg[0].ds_addr);
825 
826 				bus_dmamap_sync(icp->icp_dmat,
827 				    icp->icp_scr_dmamap, 0, arg3,
828 				    BUS_DMASYNC_PREWRITE |
829 				    BUS_DMASYNC_PREREAD);
830 			} else {
831 				cc = &ic->ic_cmd.cmd_packet.cc;
832 				cc->cc_deviceno = htole16(arg1);
833 				cc->cc_blockno = htole32(arg2);
834 			}
835 			break;
836 
837 		case ICP_SCSIRAWSERVICE:
838 			rc = &ic->ic_cmd.cmd_packet.rc;
839 			rc->rc_direction = htole32(arg1);
840 			rc->rc_bus = arg2;
841 			rc->rc_target = arg3;
842 			rc->rc_lun = arg3 >> 8;
843 			break;
844 		}
845 
846 		ic->ic_service = service;
847 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
848 		rv = icp_ccb_poll(icp, ic, 10000);
849 
850 		switch (service) {
851 		case ICP_CACHESERVICE:
852 			if (opcode == ICP_IOCTL) {
853 				bus_dmamap_sync(icp->icp_dmat,
854 				    icp->icp_scr_dmamap, 0, arg3,
855 				    BUS_DMASYNC_POSTWRITE |
856 				    BUS_DMASYNC_POSTREAD);
857 			}
858 			break;
859 		}
860 
861 		icp_ccb_free(icp, ic);
862 	} while (rv != 0 && --retries > 0);
863 
864 	return (icp->icp_status == ICP_S_OK);
865 }
866 
867 int
868 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
869 {
870 	struct icp_ccb *ic;
871 	struct icp_ucmd_ctx iu;
872 	u_int32_t cnt;
873 	int error;
874 
875 	if (ucmd->service == ICP_CACHESERVICE) {
876 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
877 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
878 			if (cnt > GDT_SCRATCH_SZ) {
879 				printf("%s: scratch buffer too small (%d/%d)\n",
880 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
881 				return (EINVAL);
882 			}
883 		} else {
884 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
885 			    ICP_SECTOR_SIZE;
886 			if (cnt > GDT_SCRATCH_SZ) {
887 				printf("%s: scratch buffer too small (%d/%d)\n",
888 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
889 				return (EINVAL);
890 			}
891 		}
892 	} else {
893 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
894 		    ucmd->command.cmd_packet.rc.rc_sense_len;
895 		if (cnt > GDT_SCRATCH_SZ) {
896 			printf("%s: scratch buffer too small (%d/%d)\n",
897 			    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
898 			return (EINVAL);
899 		}
900 	}
901 
902 	iu.iu_ucmd = ucmd;
903 	iu.iu_cnt = cnt;
904 
905 	ic = icp_ccb_alloc_wait(icp);
906 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
907 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
908 
909 	if (ucmd->service == ICP_CACHESERVICE) {
910 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
911 			struct icp_ioctlcmd *icmd, *uicmd;
912 
913 			icmd = &ic->ic_cmd.cmd_packet.ic;
914 			uicmd = &ucmd->command.cmd_packet.ic;
915 
916 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
917 			icmd->ic_channel = htole32(uicmd->ic_channel);
918 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
919 			icmd->ic_addr =
920 			    htole32(icp->icp_scr_seg[0].ds_addr +
921 				    ICP_SCRATCH_UCMD);
922 		} else {
923 			struct icp_cachecmd *cc, *ucc;
924 
925 			cc = &ic->ic_cmd.cmd_packet.cc;
926 			ucc = &ucmd->command.cmd_packet.cc;
927 
928 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
929 			cc->cc_blockno = htole32(ucc->cc_blockno);
930 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
931 			cc->cc_addr = htole32(0xffffffffU);
932 			cc->cc_nsgent = htole32(1);
933 			cc->cc_sg[0].sg_addr =
934 			    htole32(icp->icp_scr_seg[0].ds_addr +
935 				    ICP_SCRATCH_UCMD);
936 			cc->cc_sg[0].sg_len = htole32(cnt);
937 		}
938 	} else {
939 		struct icp_rawcmd *rc, *urc;
940 
941 		rc = &ic->ic_cmd.cmd_packet.rc;
942 		urc = &ucmd->command.cmd_packet.rc;
943 
944 		rc->rc_direction = htole32(urc->rc_direction);
945 		rc->rc_sdata = htole32(0xffffffffU);
946 		rc->rc_sdlen = htole32(urc->rc_sdlen);
947 		rc->rc_clen = htole32(urc->rc_clen);
948 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
949 		rc->rc_target = urc->rc_target;
950 		rc->rc_lun = urc->rc_lun;
951 		rc->rc_bus = urc->rc_bus;
952 		rc->rc_sense_len = htole32(urc->rc_sense_len);
953 		rc->rc_sense_addr =
954 		    htole32(icp->icp_scr_seg[0].ds_addr +
955 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
956 		rc->rc_nsgent = htole32(1);
957 		rc->rc_sg[0].sg_addr =
958 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
959 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
960 	}
961 
962 	ic->ic_service = ucmd->service;
963 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
964 	ic->ic_context = &iu;
965 
966 	/*
967 	 * XXX What units are ucmd->timeout in?  Until we know, we
968 	 * XXX just pull a number out of thin air.
969 	 */
970 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
971 		printf("%s: error %d waiting for ucmd to complete\n",
972 		    icp->icp_dv.dv_xname, error);
973 
974 	/* icp_ucmd_intr() has updated ucmd. */
975 	icp_ccb_free(icp, ic);
976 
977 	return (error);
978 }
979 
980 struct icp_ccb *
981 icp_ccb_alloc(struct icp_softc *icp)
982 {
983 	struct icp_ccb *ic;
984 	int s;
985 
986 	s = splbio();
987 	if (__predict_false((ic =
988 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
989 		splx(s);
990 		return (NULL);
991 	}
992 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
993 	splx(s);
994 
995 	ic->ic_flags = IC_ALLOCED;
996 	return (ic);
997 }
998 
999 struct icp_ccb *
1000 icp_ccb_alloc_wait(struct icp_softc *icp)
1001 {
1002 	struct icp_ccb *ic;
1003 	int s;
1004 
1005 	s = splbio();
1006 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
1007 		icp->icp_flags |= ICP_F_WAIT_CCB;
1008 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
1009 	}
1010 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
1011 	splx(s);
1012 
1013 	ic->ic_flags = IC_ALLOCED;
1014 	return (ic);
1015 }
1016 
1017 void
1018 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1019 {
1020 	int s;
1021 
1022 	s = splbio();
1023 	ic->ic_flags = 0;
1024 	ic->ic_intr = NULL;
1025 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1026 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1027 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1028 		wakeup(&icp->icp_ccb_freelist);
1029 	}
1030 	splx(s);
1031 }
1032 
1033 void
1034 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1035 {
1036 	int s;
1037 
1038 	s = splbio();
1039 
1040 	if (ic != NULL) {
1041 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1042 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1043 		else
1044 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1045 	}
1046 
1047 	for (; icp->icp_qfreeze == 0;) {
1048 		if (__predict_false((ic =
1049 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1050 			struct icp_ucmd_ctx *iu = ic->ic_context;
1051 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1052 
1053 			/*
1054 			 * All user-generated commands share the same
1055 			 * scratch space, so if one is already running,
1056 			 * we have to stall the command queue.
1057 			 */
1058 			if (icp->icp_ucmd_ccb != NULL)
1059 				break;
1060 			if ((*icp->icp_test_busy)(icp))
1061 				break;
1062 			icp->icp_ucmd_ccb = ic;
1063 
1064 			if (iu->iu_cnt != 0) {
1065 				memcpy(icp->icp_scr + ICP_SCRATCH_UCMD,
1066 				    ucmd->data, iu->iu_cnt);
1067 				bus_dmamap_sync(icp->icp_dmat,
1068 				    icp->icp_scr_dmamap,
1069 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1070 				    BUS_DMASYNC_PREREAD |
1071 				    BUS_DMASYNC_PREWRITE);
1072 			}
1073 		} else if (__predict_true((ic =
1074 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1075 			if ((*icp->icp_test_busy)(icp))
1076 				break;
1077 		} else {
1078 			/* no command found */
1079 			break;
1080 		}
1081 		icp_ccb_submit(icp, ic);
1082 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1083 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1084 		else
1085 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1086 	}
1087 
1088 	splx(s);
1089 }
1090 
1091 int
1092 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1093 	    int dir)
1094 {
1095 	struct icp_sg *sg;
1096 	int nsegs, i, rv;
1097 	bus_dmamap_t xfer;
1098 
1099 	xfer = ic->ic_xfer_map;
1100 
1101 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1102 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1103 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1104 	if (rv != 0)
1105 		return (rv);
1106 
1107 	nsegs = xfer->dm_nsegs;
1108 	ic->ic_xfer_size = size;
1109 	ic->ic_nsgent = nsegs;
1110 	ic->ic_flags |= dir;
1111 	sg = ic->ic_sg;
1112 
1113 	if (sg != NULL) {
1114 		for (i = 0; i < nsegs; i++, sg++) {
1115 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1116 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1117 		}
1118 	} else if (nsegs > 1)
1119 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1120 
1121 	if ((dir & IC_XFER_OUT) != 0)
1122 		i = BUS_DMASYNC_PREWRITE;
1123 	else /* if ((dir & IC_XFER_IN) != 0) */
1124 		i = BUS_DMASYNC_PREREAD;
1125 
1126 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1127 	return (0);
1128 }
1129 
1130 void
1131 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1132 {
1133 	int i;
1134 
1135 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1136 		i = BUS_DMASYNC_POSTWRITE;
1137 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1138 		i = BUS_DMASYNC_POSTREAD;
1139 
1140 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1141 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1142 }
1143 
1144 int
1145 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1146 {
1147 	int s, rv;
1148 
1149 	s = splbio();
1150 
1151 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1152 		if (!(*icp->icp_test_busy)(icp))
1153 			break;
1154 		DELAY(10);
1155 	}
1156 	if (timo == 0) {
1157 		printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
1158 		return (EAGAIN);
1159 	}
1160 
1161 	icp_ccb_submit(icp, ic);
1162 
1163 	if (cold) {
1164 		for (timo *= 10; timo != 0; timo--) {
1165 			DELAY(100);
1166 			icp_intr(icp);
1167 			if ((ic->ic_flags & IC_COMPLETE) != 0)
1168 				break;
1169 		}
1170 	} else {
1171 		ic->ic_flags |= IC_WAITING;
1172 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1173 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1174 					 mstohz(timo))) != 0) {
1175 				timo = 0;
1176 				break;
1177 			}
1178 		}
1179 	}
1180 
1181 	if (timo != 0) {
1182 		if (ic->ic_status != ICP_S_OK) {
1183 #ifdef ICP_DEBUG
1184 			printf("%s: request failed; status=0x%04x\n",
1185 			    icp->icp_dv.dv_xname, ic->ic_status);
1186 #endif
1187 			rv = EIO;
1188 		} else
1189 			rv = 0;
1190 	} else {
1191 		printf("%s: command timed out\n", icp->icp_dv.dv_xname);
1192 		rv = EIO;
1193 	}
1194 
1195 	while ((*icp->icp_test_busy)(icp) != 0)
1196 		DELAY(10);
1197 
1198 	splx(s);
1199 
1200 	return (rv);
1201 }
1202 
1203 int
1204 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1205 {
1206 	int s, rv;
1207 
1208 	ic->ic_flags |= IC_WAITING;
1209 
1210 	s = splbio();
1211 	icp_ccb_enqueue(icp, ic);
1212 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1213 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1214 			splx(s);
1215 			return (rv);
1216 		}
1217 	}
1218 	splx(s);
1219 
1220 	if (ic->ic_status != ICP_S_OK) {
1221 		printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
1222 		    ic->ic_status);
1223 		return (EIO);
1224 	}
1225 
1226 	return (0);
1227 }
1228 
1229 int
1230 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1231 {
1232 	int s, rv;
1233 
1234 	ic->ic_dv = &icp->icp_dv;
1235 	ic->ic_intr = icp_ucmd_intr;
1236 	ic->ic_flags |= IC_UCMD;
1237 
1238 	s = splbio();
1239 	icp_ccb_enqueue(icp, ic);
1240 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1241 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1242 			splx(s);
1243 			return (rv);
1244 		}
1245 	}
1246 	splx(s);
1247 
1248 	return (0);
1249 }
1250 
1251 void
1252 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1253 {
1254 
1255 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1256 
1257 	(*icp->icp_set_sema0)(icp);
1258 	DELAY(10);
1259 
1260 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1261 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1262 
1263 	icp->icp_running++;
1264 
1265 	(*icp->icp_copy_cmd)(icp, ic);
1266 	(*icp->icp_release_event)(icp, ic);
1267 }
1268 
1269 int
1270 icp_freeze(struct icp_softc *icp)
1271 {
1272 	int s, error = 0;
1273 
1274 	s = splbio();
1275 	if (icp->icp_qfreeze++ == 0) {
1276 		while (icp->icp_running != 0) {
1277 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1278 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1279 			    "icpqfrz", 0);
1280 			if (error != 0 && --icp->icp_qfreeze == 0 &&
1281 			    ICP_HAS_WORK(icp)) {
1282 				icp_ccb_enqueue(icp, NULL);
1283 				break;
1284 			}
1285 		}
1286 	}
1287 	splx(s);
1288 
1289 	return (error);
1290 }
1291 
1292 void
1293 icp_unfreeze(struct icp_softc *icp)
1294 {
1295 	int s;
1296 
1297 	s = splbio();
1298 	KDASSERT(icp->icp_qfreeze != 0);
1299 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1300 		icp_ccb_enqueue(icp, NULL);
1301 	splx(s);
1302 }
1303 
1304 /* XXX Global - should be per-controller? XXX */
1305 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1306 static int icp_event_oldidx;
1307 static int icp_event_lastidx;
1308 
1309 gdt_evt_str *
1310 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1311     gdt_evt_data *evt)
1312 {
1313 	gdt_evt_str *e;
1314 
1315 	/* no source == no event */
1316 	if (source == 0)
1317 		return (NULL);
1318 
1319 	e = &icp_event_buffer[icp_event_lastidx];
1320 	if (e->event_source == source && e->event_idx == idx &&
1321 	    ((evt->size != 0 && e->event_data.size != 0 &&
1322 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1323 	     (evt->size == 0 && e->event_data.size == 0 &&
1324 	      strcmp((char *) e->event_data.event_string,
1325 	      	     (char *) evt->event_string) == 0))) {
1326 		e->last_stamp = time.tv_sec;
1327 		e->same_count++;
1328 	} else {
1329 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1330 			icp_event_lastidx++;
1331 			if (icp_event_lastidx == ICP_MAX_EVENTS)
1332 				icp_event_lastidx = 0;
1333 			if (icp_event_lastidx == icp_event_oldidx) {
1334 				icp_event_oldidx++;
1335 				if (icp_event_oldidx == ICP_MAX_EVENTS)
1336 					icp_event_oldidx = 0;
1337 			}
1338 		}
1339 		e = &icp_event_buffer[icp_event_lastidx];
1340 		e->event_source = source;
1341 		e->event_idx = idx;
1342 		e->first_stamp = e->last_stamp = time.tv_sec;
1343 		e->same_count = 1;
1344 		e->event_data = *evt;
1345 		e->application = 0;
1346 	}
1347 	return (e);
1348 }
1349 
1350 int
1351 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1352 {
1353 	gdt_evt_str *e;
1354 	int eindex, s;
1355 
1356 	s = splbio();
1357 
1358 	if (handle == -1)
1359 		eindex = icp_event_oldidx;
1360 	else
1361 		eindex = handle;
1362 
1363 	estr->event_source = 0;
1364 
1365 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1366 		splx(s);
1367 		return (eindex);
1368 	}
1369 
1370 	e = &icp_event_buffer[eindex];
1371 	if (e->event_source != 0) {
1372 		if (eindex != icp_event_lastidx) {
1373 			eindex++;
1374 			if (eindex == ICP_MAX_EVENTS)
1375 				eindex = 0;
1376 		} else
1377 			eindex = -1;
1378 		memcpy(estr, e, sizeof(gdt_evt_str));
1379 	}
1380 
1381 	splx(s);
1382 
1383 	return (eindex);
1384 }
1385 
1386 void
1387 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1388     gdt_evt_str *estr)
1389 {
1390 	gdt_evt_str *e;
1391 	int found = 0, eindex, s;
1392 
1393 	s = splbio();
1394 
1395 	eindex = icp_event_oldidx;
1396 	for (;;) {
1397 		e = &icp_event_buffer[eindex];
1398 		if (e->event_source == 0)
1399 			break;
1400 		if ((e->application & application) == 0) {
1401 			e->application |= application;
1402 			found = 1;
1403 			break;
1404 		}
1405 		if (eindex == icp_event_lastidx)
1406 			break;
1407 		eindex++;
1408 		if (eindex == ICP_MAX_EVENTS)
1409 			eindex = 0;
1410 	}
1411 	if (found)
1412 		memcpy(estr, e, sizeof(gdt_evt_str));
1413 	else
1414 		estr->event_source = 0;
1415 
1416 	splx(s);
1417 }
1418 
1419 void
1420 icp_clear_events(struct icp_softc *icp)
1421 {
1422 	int s;
1423 
1424 	s = splbio();
1425 	icp_event_oldidx = icp_event_lastidx = 0;
1426 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1427 	splx(s);
1428 }
1429