xref: /netbsd-src/sys/dev/usb/ugen.c (revision 972fdaff44babb0781c0f95f2aea248ec621cdd2)
1 /*	$NetBSD: ugen.c,v 1.173 2023/07/31 17:41:18 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.173 2023/07/31 17:41:18 christos Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63 
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67 #include <dev/usb/usbhist.h>
68 
69 #include "ioconf.h"
70 
71 #ifdef USB_DEBUG
72 #ifndef UGEN_DEBUG
73 #define ugendebug 0
74 #else
75 
76 #ifndef UGEN_DEBUG_DEFAULT
77 #define UGEN_DEBUG_DEFAULT 0
78 #endif
79 
80 int	ugendebug = UGEN_DEBUG_DEFAULT;
81 
82 SYSCTL_SETUP(sysctl_hw_ugen_setup, "sysctl hw.ugen setup")
83 {
84 	int err;
85 	const struct sysctlnode *rnode;
86 	const struct sysctlnode *cnode;
87 
88 	err = sysctl_createv(clog, 0, NULL, &rnode,
89 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "ugen",
90 	    SYSCTL_DESCR("ugen global controls"),
91 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
92 
93 	if (err)
94 		goto fail;
95 
96 	/* control debugging printfs */
97 	err = sysctl_createv(clog, 0, &rnode, &cnode,
98 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
99 	    "debug", SYSCTL_DESCR("Enable debugging output"),
100 	    NULL, 0, &ugendebug, sizeof(ugendebug), CTL_CREATE, CTL_EOL);
101 	if (err)
102 		goto fail;
103 
104 	return;
105 fail:
106 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
107 }
108 
109 #endif /* UGEN_DEBUG */
110 #endif /* USB_DEBUG */
111 
112 #define DPRINTF(FMT,A,B,C,D)    USBHIST_LOGN(ugendebug,1,FMT,A,B,C,D)
113 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(ugendebug,N,FMT,A,B,C,D)
114 #define UGENHIST_FUNC()         USBHIST_FUNC()
115 #define UGENHIST_CALLED(name)   USBHIST_CALLED(ugendebug)
116 #define UGENHIST_CALLARGS(FMT,A,B,C,D) \
117 				USBHIST_CALLARGS(ugendebug,FMT,A,B,C,D)
118 #define UGENHIST_CALLARGSN(N,FMT,A,B,C,D) \
119 				USBHIST_CALLARGSN(ugendebug,N,FMT,A,B,C,D)
120 
121 #define	UGEN_CHUNK	128	/* chunk size for read */
122 #define	UGEN_IBSIZE	1020	/* buffer size */
123 #define	UGEN_BBSIZE	1024
124 
125 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
126 #define UGEN_NISORFRMS	8	/* number of transactions per req */
127 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
128 
129 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
130 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
131 
132 struct isoreq {
133 	struct ugen_endpoint *sce;
134 	struct usbd_xfer *xfer;
135 	void *dmabuf;
136 	uint16_t sizes[UGEN_NISORFRMS];
137 };
138 
139 struct ugen_endpoint {
140 	struct ugen_softc *sc;
141 	usb_endpoint_descriptor_t *edesc;
142 	struct usbd_interface *iface;
143 	int state;
144 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
145 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
146 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
147 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
148 	struct usbd_pipe *pipeh;
149 	struct clist q;
150 	u_char *ibuf;		/* start of buffer (circular for isoc) */
151 	u_char *fill;		/* location for input (isoc) */
152 	u_char *limit;		/* end of circular buffer (isoc) */
153 	u_char *cur;		/* current read location (isoc) */
154 	uint32_t timeout;
155 	uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
156 	uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
157 	uint32_t ra_wb_used;	 /* how much is in buffer */
158 	uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
159 	struct usbd_xfer *ra_wb_xfer;
160 	struct isoreq isoreqs[UGEN_NISOREQS];
161 	/* Keep these last; we don't overwrite them in ugen_set_config() */
162 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
163 	struct selinfo rsel;
164 	kcondvar_t cv;
165 };
166 
167 struct ugen_softc {
168 	device_t sc_dev;		/* base device */
169 	struct usbd_device *sc_udev;
170 	struct rb_node sc_node;
171 	unsigned sc_unit;
172 
173 	kmutex_t		sc_lock;
174 	kcondvar_t		sc_detach_cv;
175 
176 	char sc_is_open[USB_MAX_ENDPOINTS];
177 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
178 #define OUT 0
179 #define IN  1
180 
181 	int sc_refcnt;
182 	char sc_buffer[UGEN_BBSIZE];
183 	u_char sc_dying;
184 	u_char sc_attached;
185 };
186 
187 static struct {
188 	kmutex_t	lock;
189 	rb_tree_t	tree;
190 } ugenif __cacheline_aligned;
191 
192 static int
193 compare_ugen(void *cookie, const void *vsca, const void *vscb)
194 {
195 	const struct ugen_softc *sca = vsca;
196 	const struct ugen_softc *scb = vscb;
197 
198 	if (sca->sc_unit < scb->sc_unit)
199 		return -1;
200 	if (sca->sc_unit > scb->sc_unit)
201 		return +1;
202 	return 0;
203 }
204 
205 static int
206 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
207 {
208 	const struct ugen_softc *sc = vsc;
209 	const unsigned *k = vk;
210 
211 	if (sc->sc_unit < *k)
212 		return -1;
213 	if (sc->sc_unit > *k)
214 		return +1;
215 	return 0;
216 }
217 
218 static const rb_tree_ops_t ugenif_tree_ops = {
219 	.rbto_compare_nodes = compare_ugen,
220 	.rbto_compare_key = compare_ugen_key,
221 	.rbto_node_offset = offsetof(struct ugen_softc, sc_node),
222 };
223 
224 static void
225 ugenif_get_unit(struct ugen_softc *sc)
226 {
227 	struct ugen_softc *sc0;
228 	unsigned i;
229 
230 	mutex_enter(&ugenif.lock);
231 	for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
232 	     sc0 != NULL && i == sc0->sc_unit;
233 	     i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
234 		KASSERT(i < UINT_MAX);
235 	KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
236 	sc->sc_unit = i;
237 	sc0 = rb_tree_insert_node(&ugenif.tree, sc);
238 	KASSERT(sc0 == sc);
239 	KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
240 	mutex_exit(&ugenif.lock);
241 }
242 
243 static void
244 ugenif_put_unit(struct ugen_softc *sc)
245 {
246 
247 	mutex_enter(&ugenif.lock);
248 	KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
249 	rb_tree_remove_node(&ugenif.tree, sc);
250 	sc->sc_unit = -1;
251 	mutex_exit(&ugenif.lock);
252 }
253 
254 static struct ugen_softc *
255 ugenif_acquire(unsigned unit)
256 {
257 	struct ugen_softc *sc;
258 
259 	mutex_enter(&ugenif.lock);
260 	sc = rb_tree_find_node(&ugenif.tree, &unit);
261 	if (sc == NULL)
262 		goto out;
263 	mutex_enter(&sc->sc_lock);
264 	if (sc->sc_dying) {
265 		mutex_exit(&sc->sc_lock);
266 		sc = NULL;
267 		goto out;
268 	}
269 	KASSERT(sc->sc_refcnt < INT_MAX);
270 	sc->sc_refcnt++;
271 	mutex_exit(&sc->sc_lock);
272 out:	mutex_exit(&ugenif.lock);
273 
274 	return sc;
275 }
276 
277 static void
278 ugenif_release(struct ugen_softc *sc)
279 {
280 
281 	mutex_enter(&sc->sc_lock);
282 	if (--sc->sc_refcnt < 0)
283 		cv_broadcast(&sc->sc_detach_cv);
284 	mutex_exit(&sc->sc_lock);
285 }
286 
287 static dev_type_open(ugenopen);
288 static dev_type_close(ugenclose);
289 static dev_type_read(ugenread);
290 static dev_type_write(ugenwrite);
291 static dev_type_ioctl(ugenioctl);
292 static dev_type_poll(ugenpoll);
293 static dev_type_kqfilter(ugenkqfilter);
294 
295 const struct cdevsw ugen_cdevsw = {
296 	.d_open = ugenopen,
297 	.d_close = ugenclose,
298 	.d_read = ugenread,
299 	.d_write = ugenwrite,
300 	.d_ioctl = ugenioctl,
301 	.d_stop = nostop,
302 	.d_tty = notty,
303 	.d_poll = ugenpoll,
304 	.d_mmap = nommap,
305 	.d_kqfilter = ugenkqfilter,
306 	.d_discard = nodiscard,
307 	.d_flag = D_OTHER,
308 };
309 
310 Static void ugenintr(struct usbd_xfer *, void *,
311 		     usbd_status);
312 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
313 			    usbd_status);
314 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
315 			     usbd_status);
316 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
317 			     usbd_status);
318 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
319 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
320 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
321 			 void *, int, struct lwp *);
322 Static int ugen_set_config(struct ugen_softc *, int, int);
323 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
324 					       int, int *);
325 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
326 Static int ugen_get_alt_index(struct ugen_softc *, int);
327 Static void ugen_clear_endpoints(struct ugen_softc *);
328 
329 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
330 #define UGENENDPOINT(n) (minor(n) & 0xf)
331 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
332 
333 static int	ugenif_match(device_t, cfdata_t, void *);
334 static void	ugenif_attach(device_t, device_t, void *);
335 static int	ugen_match(device_t, cfdata_t, void *);
336 static void	ugen_attach(device_t, device_t, void *);
337 static int	ugen_detach(device_t, int);
338 static int	ugen_activate(device_t, enum devact);
339 
340 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
341     ugen_attach, ugen_detach, ugen_activate);
342 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
343     ugenif_attach, ugen_detach, ugen_activate);
344 
345 /* toggle to control attach priority. -1 means "let autoconf decide" */
346 int ugen_override = -1;
347 
348 static int
349 ugen_match(device_t parent, cfdata_t match, void *aux)
350 {
351 	struct usb_attach_arg *uaa = aux;
352 	int override;
353 
354 	if (ugen_override != -1)
355 		override = ugen_override;
356 	else
357 		override = match->cf_flags & 1;
358 
359 	if (override)
360 		return UMATCH_HIGHEST;
361 	else if (uaa->uaa_usegeneric)
362 		return UMATCH_GENERIC;
363 	else
364 		return UMATCH_NONE;
365 }
366 
367 static int
368 ugenif_match(device_t parent, cfdata_t match, void *aux)
369 {
370 	/* Assume that they knew what they configured! (see ugenif(4)) */
371 	return UMATCH_HIGHEST;
372 }
373 
374 static void
375 ugen_attach(device_t parent, device_t self, void *aux)
376 {
377 	struct usb_attach_arg *uaa = aux;
378 	struct usbif_attach_arg uiaa;
379 
380 	memset(&uiaa, 0, sizeof(uiaa));
381 	uiaa.uiaa_port = uaa->uaa_port;
382 	uiaa.uiaa_vendor = uaa->uaa_vendor;
383 	uiaa.uiaa_product = uaa->uaa_product;
384 	uiaa.uiaa_release = uaa->uaa_release;
385 	uiaa.uiaa_device = uaa->uaa_device;
386 	uiaa.uiaa_configno = -1;
387 	uiaa.uiaa_ifaceno = -1;
388 
389 	ugenif_attach(parent, self, &uiaa);
390 }
391 
392 static void
393 ugenif_attach(device_t parent, device_t self, void *aux)
394 {
395 	struct ugen_softc *sc = device_private(self);
396 	struct usbif_attach_arg *uiaa = aux;
397 	struct usbd_device *udev;
398 	char *devinfop;
399 	usbd_status err;
400 	int i, dir, conf;
401 
402 	aprint_naive("\n");
403 	aprint_normal("\n");
404 
405 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
406 	cv_init(&sc->sc_detach_cv, "ugendet");
407 
408 	devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
409 	aprint_normal_dev(self, "%s\n", devinfop);
410 	usbd_devinfo_free(devinfop);
411 
412 	sc->sc_dev = self;
413 	sc->sc_udev = udev = uiaa->uiaa_device;
414 
415 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
416 		for (dir = OUT; dir <= IN; dir++) {
417 			struct ugen_endpoint *sce;
418 
419 			sce = &sc->sc_endpoints[i][dir];
420 			selinit(&sce->rsel);
421 			cv_init(&sce->cv, "ugensce");
422 		}
423 	}
424 
425 	if (!pmf_device_register(self, NULL, NULL))
426 		aprint_error_dev(self, "couldn't establish power handler\n");
427 
428 	if (uiaa->uiaa_ifaceno < 0) {
429 		/*
430 		 * If we attach the whole device,
431 		 * set configuration index 0, the default one.
432 		 */
433 		err = usbd_set_config_index(udev, 0, 0);
434 		if (err) {
435 			aprint_error_dev(self,
436 			    "setting configuration index 0 failed\n");
437 			return;
438 		}
439 	}
440 
441 	/* Get current configuration */
442 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
443 
444 	/* Set up all the local state for this configuration. */
445 	err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
446 	if (err) {
447 		aprint_error_dev(self, "setting configuration %d failed\n",
448 		    conf);
449 		return;
450 	}
451 
452 	ugenif_get_unit(sc);
453 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
454 	sc->sc_attached = 1;
455 }
456 
457 Static void
458 ugen_clear_endpoints(struct ugen_softc *sc)
459 {
460 
461 	/* Clear out the old info, but leave the selinfo and cv initialised. */
462 	for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
463 		for (int dir = OUT; dir <= IN; dir++) {
464 			struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
465 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
466 		}
467 	}
468 }
469 
470 Static int
471 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
472 {
473 	struct usbd_device *dev = sc->sc_udev;
474 	usb_config_descriptor_t *cdesc;
475 	struct usbd_interface *iface;
476 	usb_endpoint_descriptor_t *ed;
477 	struct ugen_endpoint *sce;
478 	uint8_t niface, nendpt;
479 	int ifaceno, endptno, endpt;
480 	usbd_status err;
481 	int dir;
482 
483 	UGENHIST_FUNC();
484 	UGENHIST_CALLARGSN(1, "ugen%jd: to configno %jd, sc=%jx",
485 	    device_unit(sc->sc_dev), configno, (uintptr_t)sc, 0);
486 
487 	KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
488 
489 	if (chkopen) {
490 		/*
491 		 * We start at 1, not 0, because we don't care whether the
492 		 * control endpoint is open or not. It is always present.
493 		 */
494 		for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
495 			if (sc->sc_is_open[endptno]) {
496 				DPRINTFN(1,
497 				     "ugen%jd - endpoint %d is open",
498 				      device_unit(sc->sc_dev), endptno, 0, 0);
499 				return USBD_IN_USE;
500 			}
501 
502 		/* Prevent opening while we're setting the config.  */
503 		for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
504 			KASSERT(!sc->sc_is_open[endptno]);
505 			sc->sc_is_open[endptno] = 1;
506 		}
507 	}
508 
509 	/* Avoid setting the current value. */
510 	cdesc = usbd_get_config_descriptor(dev);
511 	if (!cdesc || cdesc->bConfigurationValue != configno) {
512 		err = usbd_set_config_no(dev, configno, 1);
513 		if (err)
514 			goto out;
515 	}
516 
517 	ugen_clear_endpoints(sc);
518 
519 	err = usbd_interface_count(dev, &niface);
520 	if (err)
521 		goto out;
522 
523 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
524 		DPRINTFN(1, "ifaceno %jd", ifaceno, 0, 0, 0);
525 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
526 		if (err)
527 			goto out;
528 		err = usbd_endpoint_count(iface, &nendpt);
529 		if (err)
530 			goto out;
531 		for (endptno = 0; endptno < nendpt; endptno++) {
532 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
533 			KASSERT(ed != NULL);
534 			endpt = ed->bEndpointAddress;
535 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
536 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
537 			DPRINTFN(1, "endptno %jd, endpt=0x%02jx (%jd,%jd)",
538 				 endptno, endpt, UE_GET_ADDR(endpt),
539 				 UE_GET_DIR(endpt));
540 			sce->sc = sc;
541 			sce->edesc = ed;
542 			sce->iface = iface;
543 		}
544 	}
545 	err = USBD_NORMAL_COMPLETION;
546 
547 out:	if (chkopen) {
548 		/*
549 		 * Allow open again now that we're done trying to set
550 		 * the config.
551 		 */
552 		for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
553 			KASSERT(sc->sc_is_open[endptno]);
554 			sc->sc_is_open[endptno] = 0;
555 		}
556 	}
557 	return err;
558 }
559 
560 static int
561 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
562 {
563 	struct ugen_softc *sc;
564 	int unit = UGENUNIT(dev);
565 	int endpt = UGENENDPOINT(dev);
566 	usb_endpoint_descriptor_t *edesc;
567 	struct ugen_endpoint *sce;
568 	int dir, isize;
569 	usbd_status err;
570 	struct usbd_xfer *xfer;
571 	int i, j;
572 	int error;
573 	int opened = 0;
574 
575 	UGENHIST_FUNC();
576 	UGENHIST_CALLARGS("flag=%jd, mode=%jd, unit=%jd endpt=%jd",
577 	    flag, mode, unit, endpt);
578 
579 	KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
580 
581 	if ((sc = ugenif_acquire(unit)) == NULL)
582 		return ENXIO;
583 
584 	/* The control endpoint allows multiple opens. */
585 	if (endpt == USB_CONTROL_ENDPOINT) {
586 		opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
587 		error = 0;
588 		goto out;
589 	}
590 
591 	if (sc->sc_is_open[endpt]) {
592 		error = EBUSY;
593 		goto out;
594 	}
595 	opened = sc->sc_is_open[endpt] = 1;
596 
597 	/* Make sure there are pipes for all directions. */
598 	for (dir = OUT; dir <= IN; dir++) {
599 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
600 			sce = &sc->sc_endpoints[endpt][dir];
601 			if (sce->edesc == NULL) {
602 				error = ENXIO;
603 				goto out;
604 			}
605 		}
606 	}
607 
608 	/* Actually open the pipes. */
609 	/* XXX Should back out properly if it fails. */
610 	for (dir = OUT; dir <= IN; dir++) {
611 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
612 			continue;
613 		sce = &sc->sc_endpoints[endpt][dir];
614 		sce->state = 0;
615 		sce->timeout = USBD_NO_TIMEOUT;
616 		DPRINTFN(5, "sc=%jx, endpt=%jd, dir=%jd, sce=%jp",
617 			     (uintptr_t)sc, endpt, dir, (uintptr_t)sce);
618 		edesc = sce->edesc;
619 		switch (edesc->bmAttributes & UE_XFERTYPE) {
620 		case UE_INTERRUPT:
621 			if (dir == OUT) {
622 				err = usbd_open_pipe(sce->iface,
623 				    edesc->bEndpointAddress, 0, &sce->pipeh);
624 				if (err) {
625 					error = EIO;
626 					goto out;
627 				}
628 				break;
629 			}
630 			isize = UGETW(edesc->wMaxPacketSize);
631 			if (isize == 0) {	/* shouldn't happen */
632 				error = EINVAL;
633 				goto out;
634 			}
635 			sce->ibuf = kmem_alloc(isize, KM_SLEEP);
636 			DPRINTFN(5, "intr endpt=%d, isize=%d",
637 				     endpt, isize, 0, 0);
638 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
639 				kmem_free(sce->ibuf, isize);
640 				sce->ibuf = NULL;
641 				error = ENOMEM;
642 				goto out;
643 			}
644 			err = usbd_open_pipe_intr(sce->iface,
645 				  edesc->bEndpointAddress,
646 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
647 				  sce->ibuf, isize, ugenintr,
648 				  USBD_DEFAULT_INTERVAL);
649 			if (err) {
650 				clfree(&sce->q);
651 				kmem_free(sce->ibuf, isize);
652 				sce->ibuf = NULL;
653 				error = EIO;
654 				goto out;
655 			}
656 			DPRINTFN(5, "interrupt open done", 0, 0, 0, 0);
657 			break;
658 		case UE_BULK:
659 			err = usbd_open_pipe(sce->iface,
660 				  edesc->bEndpointAddress, 0, &sce->pipeh);
661 			if (err) {
662 				error = EIO;
663 				goto out;
664 			}
665 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
666 			/*
667 			 * Use request size for non-RA/WB transfers
668 			 * as the default.
669 			 */
670 			sce->ra_wb_reqsize = UGEN_BBSIZE;
671 			break;
672 		case UE_ISOCHRONOUS:
673 			if (dir == OUT) {
674 				error = EINVAL;
675 				goto out;
676 			}
677 			isize = UGETW(edesc->wMaxPacketSize);
678 			if (isize == 0) {	/* shouldn't happen */
679 				error = EINVAL;
680 				goto out;
681 			}
682 			sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
683 				KM_SLEEP);
684 			sce->cur = sce->fill = sce->ibuf;
685 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
686 			DPRINTFN(5, "isoc endpt=%d, isize=%d",
687 				     endpt, isize, 0, 0);
688 			err = usbd_open_pipe(sce->iface,
689 				  edesc->bEndpointAddress, 0, &sce->pipeh);
690 			if (err) {
691 				kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
692 				sce->ibuf = NULL;
693 				error = EIO;
694 				goto out;
695 			}
696 			for (i = 0; i < UGEN_NISOREQS; ++i) {
697 				sce->isoreqs[i].sce = sce;
698 				err = usbd_create_xfer(sce->pipeh,
699 				    isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
700 				    &xfer);
701 				if (err)
702 					goto bad;
703 				sce->isoreqs[i].xfer = xfer;
704 				sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
705 				for (j = 0; j < UGEN_NISORFRMS; ++j)
706 					sce->isoreqs[i].sizes[j] = isize;
707 				usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
708 				    sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
709 				    ugen_isoc_rintr);
710 				(void)usbd_transfer(xfer);
711 			}
712 			DPRINTFN(5, "isoc open done", 0, 0, 0, 0);
713 			break;
714 		bad:
715 			while (--i >= 0) { /* implicit buffer free */
716 				usbd_destroy_xfer(sce->isoreqs[i].xfer);
717 				sce->isoreqs[i].xfer = NULL;
718 			}
719 			usbd_close_pipe(sce->pipeh);
720 			sce->pipeh = NULL;
721 			kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
722 			sce->ibuf = NULL;
723 			error = ENOMEM;
724 			goto out;
725 		case UE_CONTROL:
726 			sce->timeout = USBD_DEFAULT_TIMEOUT;
727 			error = EINVAL;
728 			goto out;
729 		}
730 	}
731 	error = 0;
732 out:	if (error && opened)
733 		sc->sc_is_open[endpt] = 0;
734 	ugenif_release(sc);
735 	return error;
736 }
737 
738 static void
739 ugen_do_close(struct ugen_softc *sc, int flag, int endpt)
740 {
741 	struct ugen_endpoint *sce;
742 	int dir;
743 	int i;
744 
745 	UGENHIST_FUNC();
746 	UGENHIST_CALLARGS("flag=%jd endpt=%jd", flag, endpt, 0, 0);
747 
748 	KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
749 
750 	if (!sc->sc_is_open[endpt])
751 		goto out;
752 
753 	if (endpt == USB_CONTROL_ENDPOINT) {
754 		DPRINTFN(5, "close control", 0, 0, 0, 0);
755 		goto out;
756 	}
757 
758 	for (dir = OUT; dir <= IN; dir++) {
759 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
760 			continue;
761 		sce = &sc->sc_endpoints[endpt][dir];
762 		if (sce->pipeh == NULL)
763 			continue;
764 		DPRINTFN(5, "endpt=%jd dir=%jd sce=%jx",
765 			     endpt, dir, (uintptr_t)sce, 0);
766 
767 		usbd_abort_pipe(sce->pipeh);
768 
769 		int isize = UGETW(sce->edesc->wMaxPacketSize);
770 		int msize = 0;
771 
772 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
773 		case UE_INTERRUPT:
774 			ndflush(&sce->q, sce->q.c_cc);
775 			clfree(&sce->q);
776 			msize = isize;
777 			break;
778 		case UE_ISOCHRONOUS:
779 			for (i = 0; i < UGEN_NISOREQS; ++i) {
780 				usbd_destroy_xfer(sce->isoreqs[i].xfer);
781 				sce->isoreqs[i].xfer = NULL;
782 			}
783 			msize = isize * UGEN_NISOFRAMES;
784 			break;
785 		case UE_BULK:
786 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
787 				usbd_destroy_xfer(sce->ra_wb_xfer);
788 				sce->ra_wb_xfer = NULL;
789 				msize = sce->ra_wb_bufsize;
790 			}
791 			break;
792 		default:
793 			break;
794 		}
795 		usbd_close_pipe(sce->pipeh);
796 		sce->pipeh = NULL;
797 		if (sce->ibuf != NULL) {
798 			kmem_free(sce->ibuf, msize);
799 			sce->ibuf = NULL;
800 		}
801 	}
802 
803 out:	sc->sc_is_open[endpt] = 0;
804 	for (dir = OUT; dir <= IN; dir++) {
805 		sce = &sc->sc_endpoints[endpt][dir];
806 		KASSERT(sce->pipeh == NULL);
807 		KASSERT(sce->ibuf == NULL);
808 		KASSERT(sce->ra_wb_xfer == NULL);
809 		for (i = 0; i < UGEN_NISOREQS; i++)
810 			KASSERT(sce->isoreqs[i].xfer == NULL);
811 	}
812 }
813 
814 static int
815 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
816 {
817 	int endpt = UGENENDPOINT(dev);
818 	struct ugen_softc *sc;
819 
820 	UGENHIST_FUNC();
821 	UGENHIST_CALLARGS("flag=%jd, mode=%jd, unit=%jd, endpt=%jd",
822 	    flag, mode, UGENUNIT(dev), endpt);
823 
824 	KASSERT(KERNEL_LOCKED_P()); /* ugen_do_close */
825 
826 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
827 		return ENXIO;
828 
829 	KASSERT(sc->sc_is_open[endpt]);
830 	ugen_do_close(sc, flag, endpt);
831 	KASSERT(!sc->sc_is_open[endpt]);
832 
833 	ugenif_release(sc);
834 
835 	return 0;
836 }
837 
838 Static int
839 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
840 {
841 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
842 	uint32_t n, tn;
843 	struct usbd_xfer *xfer;
844 	usbd_status err;
845 	int error = 0;
846 
847 	UGENHIST_FUNC();
848 	UGENHIST_CALLARGS("ugen%d: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
849 
850 	if (endpt == USB_CONTROL_ENDPOINT)
851 		return ENODEV;
852 
853 	KASSERT(sce->edesc);
854 	KASSERT(sce->pipeh);
855 
856 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
857 	case UE_INTERRUPT:
858 		/* Block until activity occurred. */
859 		mutex_enter(&sc->sc_lock);
860 		while (sce->q.c_cc == 0) {
861 			if (flag & IO_NDELAY) {
862 				mutex_exit(&sc->sc_lock);
863 				return EWOULDBLOCK;
864 			}
865 			DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
866 			/* "ugenri" */
867 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
868 			    mstohz(sce->timeout));
869 			DPRINTFN(5, "woke, error=%jd",
870 				    error, 0, 0, 0);
871 			if (sc->sc_dying)
872 				error = EIO;
873 			if (error)
874 				break;
875 		}
876 		mutex_exit(&sc->sc_lock);
877 
878 		/* Transfer as many chunks as possible. */
879 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
880 			n = uimin(sce->q.c_cc, uio->uio_resid);
881 			if (n > sizeof(sc->sc_buffer))
882 				n = sizeof(sc->sc_buffer);
883 
884 			/* Remove a small chunk from the input queue. */
885 			q_to_b(&sce->q, sc->sc_buffer, n);
886 			DPRINTFN(5, "got %jd chars", n, 0, 0, 0);
887 
888 			/* Copy the data to the user process. */
889 			error = uiomove(sc->sc_buffer, n, uio);
890 			if (error)
891 				break;
892 		}
893 		break;
894 	case UE_BULK:
895 		if (sce->state & UGEN_BULK_RA) {
896 			DPRINTFN(5, "BULK_RA req: %zd used: %d",
897 				     uio->uio_resid, sce->ra_wb_used, 0, 0);
898 			xfer = sce->ra_wb_xfer;
899 
900 			mutex_enter(&sc->sc_lock);
901 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
902 				mutex_exit(&sc->sc_lock);
903 				return EWOULDBLOCK;
904 			}
905 			while (uio->uio_resid > 0 && !error) {
906 				while (sce->ra_wb_used == 0) {
907 					DPRINTFN(5, "sleep on %jx",
908 						    (uintptr_t)sce, 0, 0, 0);
909 					/* "ugenrb" */
910 					error = cv_timedwait_sig(&sce->cv,
911 					    &sc->sc_lock, mstohz(sce->timeout));
912 					DPRINTFN(5, "woke, error=%jd",
913 						    error, 0, 0, 0);
914 					if (sc->sc_dying)
915 						error = EIO;
916 					if (error)
917 						break;
918 				}
919 
920 				/* Copy data to the process. */
921 				while (uio->uio_resid > 0
922 				       && sce->ra_wb_used > 0) {
923 					n = uimin(uio->uio_resid,
924 						sce->ra_wb_used);
925 					n = uimin(n, sce->limit - sce->cur);
926 					error = uiomove(sce->cur, n, uio);
927 					if (error)
928 						break;
929 					sce->cur += n;
930 					sce->ra_wb_used -= n;
931 					if (sce->cur == sce->limit)
932 						sce->cur = sce->ibuf;
933 				}
934 
935 				/*
936 				 * If the transfers stopped because the
937 				 * buffer was full, restart them.
938 				 */
939 				if (sce->state & UGEN_RA_WB_STOP &&
940 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
941 					n = (sce->limit - sce->ibuf)
942 					    - sce->ra_wb_used;
943 					usbd_setup_xfer(xfer, sce, NULL,
944 					    uimin(n, sce->ra_wb_xferlen),
945 					    0, USBD_NO_TIMEOUT,
946 					    ugen_bulkra_intr);
947 					sce->state &= ~UGEN_RA_WB_STOP;
948 					err = usbd_transfer(xfer);
949 					if (err != USBD_IN_PROGRESS)
950 						/*
951 						 * The transfer has not been
952 						 * queued.  Setting STOP
953 						 * will make us try
954 						 * again at the next read.
955 						 */
956 						sce->state |= UGEN_RA_WB_STOP;
957 				}
958 			}
959 			mutex_exit(&sc->sc_lock);
960 			break;
961 		}
962 		error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
963 		    0, 0, &xfer);
964 		if (error)
965 			return error;
966 		while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
967 			DPRINTFN(1, "start transfer %jd bytes", n, 0, 0, 0);
968 			tn = n;
969 			err = usbd_bulk_transfer(xfer, sce->pipeh,
970 			    sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
971 			    sce->timeout, sc->sc_buffer, &tn);
972 			if (err) {
973 				if (err == USBD_INTERRUPTED)
974 					error = EINTR;
975 				else if (err == USBD_TIMEOUT)
976 					error = ETIMEDOUT;
977 				else
978 					error = EIO;
979 				break;
980 			}
981 			DPRINTFN(1, "got %jd bytes", tn, 0, 0, 0);
982 			error = uiomove(sc->sc_buffer, tn, uio);
983 			if (error || tn < n)
984 				break;
985 		}
986 		usbd_destroy_xfer(xfer);
987 		break;
988 	case UE_ISOCHRONOUS:
989 		mutex_enter(&sc->sc_lock);
990 		while (sce->cur == sce->fill) {
991 			if (flag & IO_NDELAY) {
992 				mutex_exit(&sc->sc_lock);
993 				return EWOULDBLOCK;
994 			}
995 			/* "ugenri" */
996 			DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
997 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
998 			    mstohz(sce->timeout));
999 			DPRINTFN(5, "woke, error=%jd", error, 0, 0, 0);
1000 			if (sc->sc_dying)
1001 				error = EIO;
1002 			if (error)
1003 				break;
1004 		}
1005 
1006 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
1007 			if(sce->fill > sce->cur)
1008 				n = uimin(sce->fill - sce->cur, uio->uio_resid);
1009 			else
1010 				n = uimin(sce->limit - sce->cur, uio->uio_resid);
1011 
1012 			DPRINTFN(5, "isoc got %jd chars", n, 0, 0, 0);
1013 
1014 			/* Copy the data to the user process. */
1015 			error = uiomove(sce->cur, n, uio);
1016 			if (error)
1017 				break;
1018 			sce->cur += n;
1019 			if (sce->cur >= sce->limit)
1020 				sce->cur = sce->ibuf;
1021 		}
1022 		mutex_exit(&sc->sc_lock);
1023 		break;
1024 
1025 
1026 	default:
1027 		return ENXIO;
1028 	}
1029 	return error;
1030 }
1031 
1032 static int
1033 ugenread(dev_t dev, struct uio *uio, int flag)
1034 {
1035 	int endpt = UGENENDPOINT(dev);
1036 	struct ugen_softc *sc;
1037 	int error;
1038 
1039 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1040 		return ENXIO;
1041 	error = ugen_do_read(sc, endpt, uio, flag);
1042 	ugenif_release(sc);
1043 
1044 	return error;
1045 }
1046 
1047 Static int
1048 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
1049 	int flag)
1050 {
1051 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
1052 	uint32_t n;
1053 	int error = 0;
1054 	uint32_t tn;
1055 	char *dbuf;
1056 	struct usbd_xfer *xfer;
1057 	usbd_status err;
1058 
1059 	UGENHIST_FUNC();
1060 	UGENHIST_CALLARGSN(5, "ugen%jd: %jd",
1061 	    device_unit(sc->sc_dev), endpt, 0, 0);
1062 
1063 	if (endpt == USB_CONTROL_ENDPOINT)
1064 		return ENODEV;
1065 
1066 	KASSERT(sce->edesc);
1067 	KASSERT(sce->pipeh);
1068 
1069 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
1070 	case UE_BULK:
1071 		if (sce->state & UGEN_BULK_WB) {
1072 			DPRINTFN(5, "BULK_WB req: %jd used: %jd",
1073 				     uio->uio_resid, sce->ra_wb_used, 0, 0);
1074 			xfer = sce->ra_wb_xfer;
1075 
1076 			mutex_enter(&sc->sc_lock);
1077 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
1078 			    flag & IO_NDELAY) {
1079 				mutex_exit(&sc->sc_lock);
1080 				return EWOULDBLOCK;
1081 			}
1082 			while (uio->uio_resid > 0 && !error) {
1083 				while (sce->ra_wb_used ==
1084 				       sce->limit - sce->ibuf) {
1085 					DPRINTFN(5, "sleep on %#jx",
1086 						     (uintptr_t)sce, 0, 0, 0);
1087 					/* "ugenwb" */
1088 					error = cv_timedwait_sig(&sce->cv,
1089 					    &sc->sc_lock, mstohz(sce->timeout));
1090 					DPRINTFN(5, "woke, error=%d",
1091 						    error, 0, 0, 0);
1092 					if (sc->sc_dying)
1093 						error = EIO;
1094 					if (error)
1095 						break;
1096 				}
1097 
1098 				/* Copy data from the process. */
1099 				while (uio->uio_resid > 0 &&
1100 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
1101 					n = uimin(uio->uio_resid,
1102 						(sce->limit - sce->ibuf)
1103 						 - sce->ra_wb_used);
1104 					n = uimin(n, sce->limit - sce->fill);
1105 					error = uiomove(sce->fill, n, uio);
1106 					if (error)
1107 						break;
1108 					sce->fill += n;
1109 					sce->ra_wb_used += n;
1110 					if (sce->fill == sce->limit)
1111 						sce->fill = sce->ibuf;
1112 				}
1113 
1114 				/*
1115 				 * If the transfers stopped because the
1116 				 * buffer was empty, restart them.
1117 				 */
1118 				if (sce->state & UGEN_RA_WB_STOP &&
1119 				    sce->ra_wb_used > 0) {
1120 					dbuf = (char *)usbd_get_buffer(xfer);
1121 					n = uimin(sce->ra_wb_used,
1122 						sce->ra_wb_xferlen);
1123 					tn = uimin(n, sce->limit - sce->cur);
1124 					memcpy(dbuf, sce->cur, tn);
1125 					dbuf += tn;
1126 					if (n - tn > 0)
1127 						memcpy(dbuf, sce->ibuf,
1128 						       n - tn);
1129 					usbd_setup_xfer(xfer, sce, NULL, n,
1130 					    0, USBD_NO_TIMEOUT,
1131 					    ugen_bulkwb_intr);
1132 					sce->state &= ~UGEN_RA_WB_STOP;
1133 					err = usbd_transfer(xfer);
1134 					if (err != USBD_IN_PROGRESS)
1135 						/*
1136 						 * The transfer has not been
1137 						 * queued.  Setting STOP
1138 						 * will make us try again
1139 						 * at the next read.
1140 						 */
1141 						sce->state |= UGEN_RA_WB_STOP;
1142 				}
1143 			}
1144 			mutex_exit(&sc->sc_lock);
1145 			break;
1146 		}
1147 		error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1148 		    0, 0, &xfer);
1149 		if (error)
1150 			return error;
1151 		while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1152 			error = uiomove(sc->sc_buffer, n, uio);
1153 			if (error)
1154 				break;
1155 			DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1156 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1157 			    sc->sc_buffer, &n);
1158 			if (err) {
1159 				if (err == USBD_INTERRUPTED)
1160 					error = EINTR;
1161 				else if (err == USBD_TIMEOUT)
1162 					error = ETIMEDOUT;
1163 				else
1164 					error = EIO;
1165 				break;
1166 			}
1167 		}
1168 		usbd_destroy_xfer(xfer);
1169 		break;
1170 	case UE_INTERRUPT:
1171 		error = usbd_create_xfer(sce->pipeh,
1172 		    UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1173 		if (error)
1174 			return error;
1175 		while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1176 		    uio->uio_resid)) != 0) {
1177 			error = uiomove(sc->sc_buffer, n, uio);
1178 			if (error)
1179 				break;
1180 			DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1181 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1182 			    sce->timeout, sc->sc_buffer, &n);
1183 			if (err) {
1184 				if (err == USBD_INTERRUPTED)
1185 					error = EINTR;
1186 				else if (err == USBD_TIMEOUT)
1187 					error = ETIMEDOUT;
1188 				else
1189 					error = EIO;
1190 				break;
1191 			}
1192 		}
1193 		usbd_destroy_xfer(xfer);
1194 		break;
1195 	default:
1196 		return ENXIO;
1197 	}
1198 	return error;
1199 }
1200 
1201 static int
1202 ugenwrite(dev_t dev, struct uio *uio, int flag)
1203 {
1204 	int endpt = UGENENDPOINT(dev);
1205 	struct ugen_softc *sc;
1206 	int error;
1207 
1208 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1209 		return ENXIO;
1210 	error = ugen_do_write(sc, endpt, uio, flag);
1211 	ugenif_release(sc);
1212 
1213 	return error;
1214 }
1215 
1216 static int
1217 ugen_activate(device_t self, enum devact act)
1218 {
1219 	struct ugen_softc *sc = device_private(self);
1220 
1221 	switch (act) {
1222 	case DVACT_DEACTIVATE:
1223 		sc->sc_dying = 1;
1224 		return 0;
1225 	default:
1226 		return EOPNOTSUPP;
1227 	}
1228 }
1229 
1230 static int
1231 ugen_detach(device_t self, int flags)
1232 {
1233 	struct ugen_softc *sc = device_private(self);
1234 	struct ugen_endpoint *sce;
1235 	int i, dir;
1236 	int maj, mn;
1237 
1238 	UGENHIST_FUNC();
1239 	UGENHIST_CALLARGS("sc=%ju flags=%ju", (uintptr_t)sc, flags, 0, 0);
1240 
1241 	KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
1242 
1243 	/*
1244 	 * Fail if we're not forced to detach and userland has any
1245 	 * endpoints open.
1246 	 */
1247 	if ((flags & DETACH_FORCE) == 0) {
1248 		for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1249 			if (sc->sc_is_open[i])
1250 				return EBUSY;
1251 		}
1252 	}
1253 
1254 	/* Prevent new users.  Prevent suspend/resume.  */
1255 	sc->sc_dying = 1;
1256 	pmf_device_deregister(self);
1257 
1258 	/*
1259 	 * If we never finished attaching, skip nixing endpoints and
1260 	 * users because there aren't any.
1261 	 */
1262 	if (!sc->sc_attached)
1263 		goto out;
1264 
1265 	/* Abort all pipes.  */
1266 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1267 		for (dir = OUT; dir <= IN; dir++) {
1268 			sce = &sc->sc_endpoints[i][dir];
1269 			if (sce->pipeh)
1270 				usbd_abort_pipe(sce->pipeh);
1271 		}
1272 	}
1273 
1274 	/*
1275 	 * Wait for users to drain.  Before this point there can be no
1276 	 * more I/O operations started because we set sc_dying; after
1277 	 * this, there can be no more I/O operations in progress, so it
1278 	 * will be safe to free things.
1279 	 */
1280 	mutex_enter(&sc->sc_lock);
1281 	if (--sc->sc_refcnt >= 0) {
1282 		/* Wake everyone */
1283 		for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1284 			for (dir = OUT; dir <= IN; dir++)
1285 				cv_broadcast(&sc->sc_endpoints[i][dir].cv);
1286 		}
1287 		/* Wait for processes to go away. */
1288 		do {
1289 			cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
1290 		} while (sc->sc_refcnt >= 0);
1291 	}
1292 	mutex_exit(&sc->sc_lock);
1293 
1294 	/* locate the major number */
1295 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1296 
1297 	/*
1298 	 * Nuke the vnodes for any open instances (calls ugenclose, but
1299 	 * with no effect because we already set sc_dying).
1300 	 */
1301 	mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1302 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1303 
1304 	/* Actually close any lingering pipes.  */
1305 	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1306 		ugen_do_close(sc, FREAD|FWRITE, i);
1307 
1308 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1309 	ugenif_put_unit(sc);
1310 
1311 out:	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1312 		for (dir = OUT; dir <= IN; dir++) {
1313 			sce = &sc->sc_endpoints[i][dir];
1314 			seldestroy(&sce->rsel);
1315 			cv_destroy(&sce->cv);
1316 		}
1317 	}
1318 
1319 	cv_destroy(&sc->sc_detach_cv);
1320 	mutex_destroy(&sc->sc_lock);
1321 
1322 	return 0;
1323 }
1324 
1325 Static void
1326 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1327 {
1328 	struct ugen_endpoint *sce = addr;
1329 	struct ugen_softc *sc = sce->sc;
1330 	uint32_t count;
1331 	u_char *ibuf;
1332 
1333 	UGENHIST_FUNC();
1334 	UGENHIST_CALLARGS("xfer %jx status %d", (uintptr_t)xfer, status, 0, 0);
1335 
1336 	if (status == USBD_CANCELLED)
1337 		return;
1338 
1339 	if (status != USBD_NORMAL_COMPLETION) {
1340 		DPRINTF("status=%jd", status, 0, 0, 0);
1341 		if (status == USBD_STALLED)
1342 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1343 		return;
1344 	}
1345 
1346 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1347 	ibuf = sce->ibuf;
1348 
1349 	DPRINTFN(5, "xfer=%#jx status=%d count=%d",
1350 		     (uintptr_t)xfer, status, count, 0);
1351 	DPRINTFN(5, "          data = %02x %02x %02x",
1352 		     ibuf[0], ibuf[1], ibuf[2], 0);
1353 
1354 	mutex_enter(&sc->sc_lock);
1355 	(void)b_to_q(ibuf, count, &sce->q);
1356 	cv_signal(&sce->cv);
1357 	mutex_exit(&sc->sc_lock);
1358 	selnotify(&sce->rsel, 0, 0);
1359 }
1360 
1361 Static void
1362 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1363 		usbd_status status)
1364 {
1365 	struct isoreq *req = addr;
1366 	struct ugen_endpoint *sce = req->sce;
1367 	struct ugen_softc *sc = sce->sc;
1368 	uint32_t count, n;
1369 	int i, isize;
1370 
1371 	UGENHIST_FUNC();
1372 	UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1373 
1374 	/* Return if we are aborting. */
1375 	if (status == USBD_CANCELLED)
1376 		return;
1377 
1378 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1379 	DPRINTFN(5, "xfer %ld, count=%d",
1380 	    (long)(req - sce->isoreqs), count, 0, 0);
1381 
1382 	mutex_enter(&sc->sc_lock);
1383 
1384 	/* throw away oldest input if the buffer is full */
1385 	if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1386 		sce->cur += count;
1387 		if (sce->cur >= sce->limit)
1388 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1389 		DPRINTFN(5, "throwing away %jd bytes",
1390 			     count, 0, 0, 0);
1391 	}
1392 
1393 	isize = UGETW(sce->edesc->wMaxPacketSize);
1394 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1395 		uint32_t actlen = req->sizes[i];
1396 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1397 
1398 		/* copy data to buffer */
1399 		while (actlen > 0) {
1400 			n = uimin(actlen, sce->limit - sce->fill);
1401 			memcpy(sce->fill, tbuf, n);
1402 
1403 			tbuf += n;
1404 			actlen -= n;
1405 			sce->fill += n;
1406 			if (sce->fill == sce->limit)
1407 				sce->fill = sce->ibuf;
1408 		}
1409 
1410 		/* setup size for next transfer */
1411 		req->sizes[i] = isize;
1412 	}
1413 
1414 	usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1415 	    ugen_isoc_rintr);
1416 	(void)usbd_transfer(xfer);
1417 
1418 	cv_signal(&sce->cv);
1419 	mutex_exit(&sc->sc_lock);
1420 	selnotify(&sce->rsel, 0, 0);
1421 }
1422 
1423 Static void
1424 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1425 		 usbd_status status)
1426 {
1427 	struct ugen_endpoint *sce = addr;
1428 	struct ugen_softc *sc = sce->sc;
1429 	uint32_t count, n;
1430 	char const *tbuf;
1431 	usbd_status err;
1432 
1433 	UGENHIST_FUNC();
1434 	UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1435 
1436 	/* Return if we are aborting. */
1437 	if (status == USBD_CANCELLED)
1438 		return;
1439 
1440 	if (status != USBD_NORMAL_COMPLETION) {
1441 		DPRINTF("status=%jd", status, 0, 0, 0);
1442 		sce->state |= UGEN_RA_WB_STOP;
1443 		if (status == USBD_STALLED)
1444 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1445 		return;
1446 	}
1447 
1448 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1449 
1450 	mutex_enter(&sc->sc_lock);
1451 
1452 	/* Keep track of how much is in the buffer. */
1453 	sce->ra_wb_used += count;
1454 
1455 	/* Copy data to buffer. */
1456 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1457 	n = uimin(count, sce->limit - sce->fill);
1458 	memcpy(sce->fill, tbuf, n);
1459 	tbuf += n;
1460 	count -= n;
1461 	sce->fill += n;
1462 	if (sce->fill == sce->limit)
1463 		sce->fill = sce->ibuf;
1464 	if (count > 0) {
1465 		memcpy(sce->fill, tbuf, count);
1466 		sce->fill += count;
1467 	}
1468 
1469 	/* Set up the next request if necessary. */
1470 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1471 	if (n > 0) {
1472 		usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1473 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1474 		err = usbd_transfer(xfer);
1475 		if (err != USBD_IN_PROGRESS) {
1476 			printf("error=%d", err);
1477 			/*
1478 			 * The transfer has not been queued.  Setting STOP
1479 			 * will make us try again at the next read.
1480 			 */
1481 			sce->state |= UGEN_RA_WB_STOP;
1482 		}
1483 	}
1484 	else
1485 		sce->state |= UGEN_RA_WB_STOP;
1486 
1487 	cv_signal(&sce->cv);
1488 	mutex_exit(&sc->sc_lock);
1489 	selnotify(&sce->rsel, 0, 0);
1490 }
1491 
1492 Static void
1493 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1494 		 usbd_status status)
1495 {
1496 	struct ugen_endpoint *sce = addr;
1497 	struct ugen_softc *sc = sce->sc;
1498 	uint32_t count, n;
1499 	char *tbuf;
1500 	usbd_status err;
1501 
1502 	UGENHIST_FUNC();
1503 	UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1504 
1505 	/* Return if we are aborting. */
1506 	if (status == USBD_CANCELLED)
1507 		return;
1508 
1509 	if (status != USBD_NORMAL_COMPLETION) {
1510 		DPRINTF("status=%jd", status, 0, 0, 0);
1511 		sce->state |= UGEN_RA_WB_STOP;
1512 		if (status == USBD_STALLED)
1513 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1514 		return;
1515 	}
1516 
1517 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1518 
1519 	mutex_enter(&sc->sc_lock);
1520 
1521 	/* Keep track of how much is in the buffer. */
1522 	sce->ra_wb_used -= count;
1523 
1524 	/* Update buffer pointers. */
1525 	sce->cur += count;
1526 	if (sce->cur >= sce->limit)
1527 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1528 
1529 	/* Set up next request if necessary. */
1530 	if (sce->ra_wb_used > 0) {
1531 		/* copy data from buffer */
1532 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1533 		count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1534 		n = uimin(count, sce->limit - sce->cur);
1535 		memcpy(tbuf, sce->cur, n);
1536 		tbuf += n;
1537 		if (count - n > 0)
1538 			memcpy(tbuf, sce->ibuf, count - n);
1539 
1540 		usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1541 		    ugen_bulkwb_intr);
1542 		err = usbd_transfer(xfer);
1543 		if (err != USBD_IN_PROGRESS) {
1544 			printf("error=%d", err);
1545 			/*
1546 			 * The transfer has not been queued.  Setting STOP
1547 			 * will make us try again at the next write.
1548 			 */
1549 			sce->state |= UGEN_RA_WB_STOP;
1550 		}
1551 	}
1552 	else
1553 		sce->state |= UGEN_RA_WB_STOP;
1554 
1555 	cv_signal(&sce->cv);
1556 	mutex_exit(&sc->sc_lock);
1557 	selnotify(&sce->rsel, 0, 0);
1558 }
1559 
1560 Static usbd_status
1561 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1562 {
1563 	struct usbd_interface *iface;
1564 	usb_endpoint_descriptor_t *ed;
1565 	usbd_status err;
1566 	struct ugen_endpoint *sce;
1567 	uint8_t niface, nendpt, endptno, endpt;
1568 	int dir;
1569 
1570 	UGENHIST_FUNC();
1571 	UGENHIST_CALLARGSN(15, "ifaceidx=%jd altno=%jd", ifaceidx, altno, 0, 0);
1572 
1573 	err = usbd_interface_count(sc->sc_udev, &niface);
1574 	if (err)
1575 		return err;
1576 	if (ifaceidx < 0 || ifaceidx >= niface)
1577 		return USBD_INVAL;
1578 
1579 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1580 	if (err)
1581 		return err;
1582 	err = usbd_endpoint_count(iface, &nendpt);
1583 	if (err)
1584 		return err;
1585 
1586 	/* change setting */
1587 	err = usbd_set_interface(iface, altno);
1588 	if (err)
1589 		return err;
1590 
1591 	err = usbd_endpoint_count(iface, &nendpt);
1592 	if (err)
1593 		return err;
1594 
1595 	ugen_clear_endpoints(sc);
1596 
1597 	for (endptno = 0; endptno < nendpt; endptno++) {
1598 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1599 		KASSERT(ed != NULL);
1600 		endpt = ed->bEndpointAddress;
1601 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1602 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1603 		sce->sc = sc;
1604 		sce->edesc = ed;
1605 		sce->iface = iface;
1606 	}
1607 	return 0;
1608 }
1609 
1610 /* Retrieve a complete descriptor for a certain device and index. */
1611 Static usb_config_descriptor_t *
1612 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1613 {
1614 	usb_config_descriptor_t *cdesc = NULL, *tdesc, cdescr;
1615 	int len = 0;
1616 	usbd_status err;
1617 
1618 	UGENHIST_FUNC(); UGENHIST_CALLARGS("index=%jd", index, 0, 0, 0);
1619 
1620 	switch (index) {
1621 	case USB_CURRENT_CONFIG_INDEX:
1622 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1623 		if (tdesc == NULL)
1624 			break;
1625 		len = UGETW(tdesc->wTotalLength);
1626 		cdesc = kmem_alloc(len, KM_SLEEP);
1627 		memcpy(cdesc, tdesc, len);
1628 		break;
1629 	default:
1630 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1631 		if (err)
1632 			break;
1633 		len = UGETW(cdescr.wTotalLength);
1634 		cdesc = kmem_alloc(len, KM_SLEEP);
1635 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1636 		if (err) {
1637 			kmem_free(cdesc, len);
1638 			cdesc = NULL;
1639 		}
1640 		break;
1641 	}
1642 	DPRINTFN(5, "req len=%jd cdesc=%jx", len, (uintptr_t)cdesc, 0, 0);
1643 	if (cdesc && lenp)
1644 		*lenp = len;
1645 	return cdesc;
1646 }
1647 
1648 Static int
1649 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1650 {
1651 	struct usbd_interface *iface;
1652 	usbd_status err;
1653 
1654 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1655 	if (err)
1656 		return -1;
1657 	return usbd_get_interface_altindex(iface);
1658 }
1659 
1660 Static int
1661 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1662 	      void *addr, int flag, struct lwp *l)
1663 {
1664 	struct ugen_endpoint *sce;
1665 	usbd_status err;
1666 	struct usbd_interface *iface;
1667 	struct usb_config_desc *cd;
1668 	usb_config_descriptor_t *cdesc;
1669 	struct usb_interface_desc *id;
1670 	usb_interface_descriptor_t *idesc;
1671 	struct usb_endpoint_desc *ed;
1672 	usb_endpoint_descriptor_t *edesc;
1673 	struct usb_alt_interface *ai;
1674 	struct usb_string_desc *si;
1675 	uint8_t conf, alt;
1676 	int cdesclen;
1677 	int error;
1678 	int dir;
1679 
1680 	UGENHIST_FUNC();
1681 	UGENHIST_CALLARGS("ugen%d: endpt=%ju cmd=%08jx flag=%jx",
1682 	    device_unit(sc->sc_dev), endpt, cmd, flag);
1683 
1684 	KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */
1685 
1686 	switch (cmd) {
1687 	case FIONBIO:
1688 		/* All handled in the upper FS layer. */
1689 		return 0;
1690 	case USB_SET_SHORT_XFER:
1691 		if (endpt == USB_CONTROL_ENDPOINT)
1692 			return EINVAL;
1693 		/* This flag only affects read */
1694 		sce = &sc->sc_endpoints[endpt][IN];
1695 		if (sce == NULL || sce->pipeh == NULL)
1696 			return EINVAL;
1697 		if (*(int *)addr)
1698 			sce->state |= UGEN_SHORT_OK;
1699 		else
1700 			sce->state &= ~UGEN_SHORT_OK;
1701 		DPRINTFN(5, "pipe=%jx short xfer=%ju",
1702 		    (uintptr_t)sce->pipeh, sce->state & UGEN_SHORT_OK, 0, 0);
1703 		return 0;
1704 	case USB_SET_TIMEOUT:
1705 		for (dir = OUT; dir <= IN; dir++) {
1706 			sce = &sc->sc_endpoints[endpt][dir];
1707 			if (sce == NULL)
1708 				return EINVAL;
1709 
1710 			sce->timeout = *(int *)addr;
1711 			DPRINTFN(5, "pipe=%jx timeout[dir=%ju] timeout=%ju",
1712 			    (uintptr_t)sce->pipeh, dir, sce->timeout, 0);
1713 		}
1714 		return 0;
1715 	case USB_SET_BULK_RA:
1716 		if (endpt == USB_CONTROL_ENDPOINT)
1717 			return EINVAL;
1718 		sce = &sc->sc_endpoints[endpt][IN];
1719 		if (sce == NULL || sce->pipeh == NULL)
1720 			return EINVAL;
1721 		edesc = sce->edesc;
1722 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1723 			return EINVAL;
1724 
1725 		if (*(int *)addr) {
1726 			/* Only turn RA on if it's currently off. */
1727 			if (sce->state & UGEN_BULK_RA)
1728 				return 0;
1729 			KASSERT(sce->ra_wb_xfer == NULL);
1730 			KASSERT(sce->ibuf == NULL);
1731 
1732 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1733 				/* shouldn't happen */
1734 				return EINVAL;
1735 			error = usbd_create_xfer(sce->pipeh,
1736 			    sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1737 			if (error)
1738 				return error;
1739 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1740 			sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1741 			sce->fill = sce->cur = sce->ibuf;
1742 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1743 			sce->ra_wb_used = 0;
1744 			sce->state |= UGEN_BULK_RA;
1745 			sce->state &= ~UGEN_RA_WB_STOP;
1746 			/* Now start reading. */
1747 			usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1748 			    uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1749 			     0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1750 			err = usbd_transfer(sce->ra_wb_xfer);
1751 			if (err != USBD_IN_PROGRESS) {
1752 				sce->state &= ~UGEN_BULK_RA;
1753 				kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1754 				sce->ibuf = NULL;
1755 				usbd_destroy_xfer(sce->ra_wb_xfer);
1756 				sce->ra_wb_xfer = NULL;
1757 				return EIO;
1758 			}
1759 		} else {
1760 			/* Only turn RA off if it's currently on. */
1761 			if (!(sce->state & UGEN_BULK_RA))
1762 				return 0;
1763 
1764 			sce->state &= ~UGEN_BULK_RA;
1765 			usbd_abort_pipe(sce->pipeh);
1766 			usbd_destroy_xfer(sce->ra_wb_xfer);
1767 			sce->ra_wb_xfer = NULL;
1768 			/*
1769 			 * XXX Discard whatever's in the buffer, but we
1770 			 * should keep it around and drain the buffer
1771 			 * instead.
1772 			 */
1773 			kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1774 			sce->ibuf = NULL;
1775 		}
1776 		return 0;
1777 	case USB_SET_BULK_WB:
1778 		if (endpt == USB_CONTROL_ENDPOINT)
1779 			return EINVAL;
1780 		sce = &sc->sc_endpoints[endpt][OUT];
1781 		if (sce == NULL || sce->pipeh == NULL)
1782 			return EINVAL;
1783 		edesc = sce->edesc;
1784 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1785 			return EINVAL;
1786 
1787 		if (*(int *)addr) {
1788 			/* Only turn WB on if it's currently off. */
1789 			if (sce->state & UGEN_BULK_WB)
1790 				return 0;
1791 			KASSERT(sce->ra_wb_xfer == NULL);
1792 			KASSERT(sce->ibuf == NULL);
1793 
1794 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1795 				/* shouldn't happen */
1796 				return EINVAL;
1797 			error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1798 			    0, 0, &sce->ra_wb_xfer);
1799 			/* XXX check error???  */
1800 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1801 			sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1802 			sce->fill = sce->cur = sce->ibuf;
1803 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1804 			sce->ra_wb_used = 0;
1805 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1806 		} else {
1807 			/* Only turn WB off if it's currently on. */
1808 			if (!(sce->state & UGEN_BULK_WB))
1809 				return 0;
1810 
1811 			sce->state &= ~UGEN_BULK_WB;
1812 			/*
1813 			 * XXX Discard whatever's in the buffer, but we
1814 			 * should keep it around and keep writing to
1815 			 * drain the buffer instead.
1816 			 */
1817 			usbd_abort_pipe(sce->pipeh);
1818 			usbd_destroy_xfer(sce->ra_wb_xfer);
1819 			sce->ra_wb_xfer = NULL;
1820 			kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1821 			sce->ibuf = NULL;
1822 		}
1823 		return 0;
1824 	case USB_SET_BULK_RA_OPT:
1825 	case USB_SET_BULK_WB_OPT:
1826 	{
1827 		struct usb_bulk_ra_wb_opt *opt;
1828 
1829 		if (endpt == USB_CONTROL_ENDPOINT)
1830 			return EINVAL;
1831 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1832 		if (cmd == USB_SET_BULK_RA_OPT)
1833 			sce = &sc->sc_endpoints[endpt][IN];
1834 		else
1835 			sce = &sc->sc_endpoints[endpt][OUT];
1836 		if (sce == NULL || sce->pipeh == NULL)
1837 			return EINVAL;
1838 		if (opt->ra_wb_buffer_size < 1 ||
1839 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1840 		    opt->ra_wb_request_size < 1 ||
1841 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1842 			return EINVAL;
1843 		/*
1844 		 * XXX These changes do not take effect until the
1845 		 * next time RA/WB mode is enabled but they ought to
1846 		 * take effect immediately.
1847 		 */
1848 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1849 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1850 		return 0;
1851 	}
1852 	default:
1853 		break;
1854 	}
1855 
1856 	if (endpt != USB_CONTROL_ENDPOINT)
1857 		return EINVAL;
1858 
1859 	switch (cmd) {
1860 #ifdef UGEN_DEBUG
1861 	case USB_SETDEBUG:
1862 		ugendebug = *(int *)addr;
1863 		break;
1864 #endif
1865 	case USB_GET_CONFIG:
1866 		err = usbd_get_config(sc->sc_udev, &conf);
1867 		if (err)
1868 			return EIO;
1869 		*(int *)addr = conf;
1870 		break;
1871 	case USB_SET_CONFIG:
1872 		if (!(flag & FWRITE))
1873 			return EPERM;
1874 		err = ugen_set_config(sc, *(int *)addr, 1);
1875 		switch (err) {
1876 		case USBD_NORMAL_COMPLETION:
1877 			break;
1878 		case USBD_IN_USE:
1879 			return EBUSY;
1880 		default:
1881 			return EIO;
1882 		}
1883 		break;
1884 	case USB_GET_ALTINTERFACE:
1885 		ai = (struct usb_alt_interface *)addr;
1886 		err = usbd_device2interface_handle(sc->sc_udev,
1887 			  ai->uai_interface_index, &iface);
1888 		if (err)
1889 			return EINVAL;
1890 		idesc = usbd_get_interface_descriptor(iface);
1891 		if (idesc == NULL)
1892 			return EIO;
1893 		ai->uai_alt_no = idesc->bAlternateSetting;
1894 		break;
1895 	case USB_SET_ALTINTERFACE:
1896 		if (!(flag & FWRITE))
1897 			return EPERM;
1898 		ai = (struct usb_alt_interface *)addr;
1899 		err = usbd_device2interface_handle(sc->sc_udev,
1900 			  ai->uai_interface_index, &iface);
1901 		if (err)
1902 			return EINVAL;
1903 		err = ugen_set_interface(sc, ai->uai_interface_index,
1904 		    ai->uai_alt_no);
1905 		if (err)
1906 			return EINVAL;
1907 		break;
1908 	case USB_GET_NO_ALT:
1909 		ai = (struct usb_alt_interface *)addr;
1910 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1911 		if (cdesc == NULL)
1912 			return EINVAL;
1913 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1914 		if (idesc == NULL) {
1915 			kmem_free(cdesc, cdesclen);
1916 			return EINVAL;
1917 		}
1918 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1919 		    idesc->bInterfaceNumber);
1920 		kmem_free(cdesc, cdesclen);
1921 		break;
1922 	case USB_GET_DEVICE_DESC:
1923 		*(usb_device_descriptor_t *)addr =
1924 			*usbd_get_device_descriptor(sc->sc_udev);
1925 		break;
1926 	case USB_GET_CONFIG_DESC:
1927 		cd = (struct usb_config_desc *)addr;
1928 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1929 		if (cdesc == NULL)
1930 			return EINVAL;
1931 		cd->ucd_desc = *cdesc;
1932 		kmem_free(cdesc, cdesclen);
1933 		break;
1934 	case USB_GET_INTERFACE_DESC:
1935 		id = (struct usb_interface_desc *)addr;
1936 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1937 		if (cdesc == NULL)
1938 			return EINVAL;
1939 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1940 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1941 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1942 		else
1943 			alt = id->uid_alt_index;
1944 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1945 		if (idesc == NULL) {
1946 			kmem_free(cdesc, cdesclen);
1947 			return EINVAL;
1948 		}
1949 		id->uid_desc = *idesc;
1950 		kmem_free(cdesc, cdesclen);
1951 		break;
1952 	case USB_GET_ENDPOINT_DESC:
1953 		ed = (struct usb_endpoint_desc *)addr;
1954 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1955 		if (cdesc == NULL)
1956 			return EINVAL;
1957 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1958 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1959 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1960 		else
1961 			alt = ed->ued_alt_index;
1962 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1963 					alt, ed->ued_endpoint_index);
1964 		if (edesc == NULL) {
1965 			kmem_free(cdesc, cdesclen);
1966 			return EINVAL;
1967 		}
1968 		ed->ued_desc = *edesc;
1969 		kmem_free(cdesc, cdesclen);
1970 		break;
1971 	case USB_GET_FULL_DESC:
1972 	{
1973 		int len;
1974 		struct iovec iov;
1975 		struct uio uio;
1976 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1977 
1978 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1979 		if (cdesc == NULL)
1980 			return EINVAL;
1981 		len = cdesclen;
1982 		if (len > fd->ufd_size)
1983 			len = fd->ufd_size;
1984 		iov.iov_base = (void *)fd->ufd_data;
1985 		iov.iov_len = len;
1986 		uio.uio_iov = &iov;
1987 		uio.uio_iovcnt = 1;
1988 		uio.uio_resid = len;
1989 		uio.uio_offset = 0;
1990 		uio.uio_rw = UIO_READ;
1991 		uio.uio_vmspace = l->l_proc->p_vmspace;
1992 		error = uiomove((void *)cdesc, len, &uio);
1993 		kmem_free(cdesc, cdesclen);
1994 		return error;
1995 	}
1996 	case USB_GET_STRING_DESC: {
1997 		int len;
1998 		si = (struct usb_string_desc *)addr;
1999 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
2000 			  si->usd_language_id, &si->usd_desc, &len);
2001 		if (err)
2002 			return EINVAL;
2003 		break;
2004 	}
2005 	case USB_DO_REQUEST:
2006 	{
2007 		struct usb_ctl_request *ur = (void *)addr;
2008 		int len = UGETW(ur->ucr_request.wLength);
2009 		struct iovec iov;
2010 		struct uio uio;
2011 		void *ptr = 0;
2012 		usbd_status xerr;
2013 
2014 		error = 0;
2015 
2016 		if (!(flag & FWRITE))
2017 			return EPERM;
2018 		/* Avoid requests that would damage the bus integrity. */
2019 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2020 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
2021 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2022 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
2023 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
2024 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
2025 			return EINVAL;
2026 
2027 		if (len < 0 || len > 32767)
2028 			return EINVAL;
2029 		if (len != 0) {
2030 			iov.iov_base = (void *)ur->ucr_data;
2031 			iov.iov_len = len;
2032 			uio.uio_iov = &iov;
2033 			uio.uio_iovcnt = 1;
2034 			uio.uio_resid = len;
2035 			uio.uio_offset = 0;
2036 			uio.uio_rw =
2037 				ur->ucr_request.bmRequestType & UT_READ ?
2038 				UIO_READ : UIO_WRITE;
2039 			uio.uio_vmspace = l->l_proc->p_vmspace;
2040 			ptr = kmem_alloc(len, KM_SLEEP);
2041 			if (uio.uio_rw == UIO_WRITE) {
2042 				error = uiomove(ptr, len, &uio);
2043 				if (error)
2044 					goto ret;
2045 			}
2046 		}
2047 		sce = &sc->sc_endpoints[endpt][IN];
2048 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
2049 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
2050 		if (xerr) {
2051 			error = EIO;
2052 			goto ret;
2053 		}
2054 		if (len != 0) {
2055 			if (uio.uio_rw == UIO_READ) {
2056 				size_t alen = uimin(len, ur->ucr_actlen);
2057 				error = uiomove(ptr, alen, &uio);
2058 				if (error)
2059 					goto ret;
2060 			}
2061 		}
2062 	ret:
2063 		if (ptr)
2064 			kmem_free(ptr, len);
2065 		return error;
2066 	}
2067 	case USB_GET_DEVICEINFO:
2068 		usbd_fill_deviceinfo(sc->sc_udev,
2069 				     (struct usb_device_info *)addr, 0);
2070 		break;
2071 	case USB_GET_DEVICEINFO_30:
2072 	{
2073 		int ret;
2074 		MODULE_HOOK_CALL(usb_subr_fill_30_hook,
2075 		    (sc->sc_udev, (struct usb_device_info30 *)addr, 0,
2076 		      usbd_devinfo_vp, usbd_printBCD),
2077 		    enosys(), ret);
2078 		if (ret == 0)
2079 			return 0;
2080 		return EINVAL;
2081 	}
2082 	default:
2083 		return EINVAL;
2084 	}
2085 	return 0;
2086 }
2087 
2088 static int
2089 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2090 {
2091 	int endpt = UGENENDPOINT(dev);
2092 	struct ugen_softc *sc;
2093 	int error;
2094 
2095 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
2096 		return ENXIO;
2097 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
2098 	ugenif_release(sc);
2099 
2100 	return error;
2101 }
2102 
2103 static int
2104 ugenpoll(dev_t dev, int events, struct lwp *l)
2105 {
2106 	struct ugen_softc *sc;
2107 	struct ugen_endpoint *sce_in, *sce_out;
2108 	int revents = 0;
2109 
2110 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2111 		return POLLHUP;
2112 
2113 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2114 		revents |= POLLERR;
2115 		goto out;
2116 	}
2117 
2118 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2119 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2120 	KASSERT(sce_in->edesc || sce_out->edesc);
2121 	KASSERT(sce_in->pipeh || sce_out->pipeh);
2122 
2123 	mutex_enter(&sc->sc_lock);
2124 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
2125 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
2126 		case UE_INTERRUPT:
2127 			if (sce_in->q.c_cc > 0)
2128 				revents |= events & (POLLIN | POLLRDNORM);
2129 			else
2130 				selrecord(l, &sce_in->rsel);
2131 			break;
2132 		case UE_ISOCHRONOUS:
2133 			if (sce_in->cur != sce_in->fill)
2134 				revents |= events & (POLLIN | POLLRDNORM);
2135 			else
2136 				selrecord(l, &sce_in->rsel);
2137 			break;
2138 		case UE_BULK:
2139 			if (sce_in->state & UGEN_BULK_RA) {
2140 				if (sce_in->ra_wb_used > 0)
2141 					revents |= events &
2142 					    (POLLIN | POLLRDNORM);
2143 				else
2144 					selrecord(l, &sce_in->rsel);
2145 				break;
2146 			}
2147 			/*
2148 			 * We have no easy way of determining if a read will
2149 			 * yield any data or a write will happen.
2150 			 * Pretend they will.
2151 			 */
2152 			revents |= events & (POLLIN | POLLRDNORM);
2153 			break;
2154 		default:
2155 			break;
2156 		}
2157 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
2158 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
2159 		case UE_INTERRUPT:
2160 		case UE_ISOCHRONOUS:
2161 			/* XXX unimplemented */
2162 			break;
2163 		case UE_BULK:
2164 			if (sce_out->state & UGEN_BULK_WB) {
2165 				if (sce_out->ra_wb_used <
2166 				    sce_out->limit - sce_out->ibuf)
2167 					revents |= events &
2168 					    (POLLOUT | POLLWRNORM);
2169 				else
2170 					selrecord(l, &sce_out->rsel);
2171 				break;
2172 			}
2173 			/*
2174 			 * We have no easy way of determining if a read will
2175 			 * yield any data or a write will happen.
2176 			 * Pretend they will.
2177 			 */
2178 			 revents |= events & (POLLOUT | POLLWRNORM);
2179 			 break;
2180 		default:
2181 			break;
2182 		}
2183 
2184 	mutex_exit(&sc->sc_lock);
2185 
2186 out:	ugenif_release(sc);
2187 	return revents;
2188 }
2189 
2190 static void
2191 filt_ugenrdetach(struct knote *kn)
2192 {
2193 	struct ugen_endpoint *sce = kn->kn_hook;
2194 	struct ugen_softc *sc = sce->sc;
2195 
2196 	mutex_enter(&sc->sc_lock);
2197 	selremove_knote(&sce->rsel, kn);
2198 	mutex_exit(&sc->sc_lock);
2199 }
2200 
2201 static int
2202 filt_ugenread_intr(struct knote *kn, long hint)
2203 {
2204 	struct ugen_endpoint *sce = kn->kn_hook;
2205 	struct ugen_softc *sc = sce->sc;
2206 	int ret;
2207 
2208 	mutex_enter(&sc->sc_lock);
2209 	if (sc->sc_dying) {
2210 		ret = 0;
2211 	} else {
2212 		kn->kn_data = sce->q.c_cc;
2213 		ret = kn->kn_data > 0;
2214 	}
2215 	mutex_exit(&sc->sc_lock);
2216 
2217 	return ret;
2218 }
2219 
2220 static int
2221 filt_ugenread_isoc(struct knote *kn, long hint)
2222 {
2223 	struct ugen_endpoint *sce = kn->kn_hook;
2224 	struct ugen_softc *sc = sce->sc;
2225 	int ret;
2226 
2227 	mutex_enter(&sc->sc_lock);
2228 	if (sc->sc_dying) {
2229 		ret = 0;
2230 	} else if (sce->cur == sce->fill) {
2231 		ret = 0;
2232 	} else if (sce->cur < sce->fill) {
2233 		kn->kn_data = sce->fill - sce->cur;
2234 		ret = 1;
2235 	} else {
2236 		kn->kn_data = (sce->limit - sce->cur) +
2237 		    (sce->fill - sce->ibuf);
2238 		ret = 1;
2239 	}
2240 	mutex_exit(&sc->sc_lock);
2241 
2242 	return ret;
2243 }
2244 
2245 static int
2246 filt_ugenread_bulk(struct knote *kn, long hint)
2247 {
2248 	struct ugen_endpoint *sce = kn->kn_hook;
2249 	struct ugen_softc *sc = sce->sc;
2250 	int ret;
2251 
2252 	mutex_enter(&sc->sc_lock);
2253 	if (sc->sc_dying) {
2254 		ret = 0;
2255 	} else if (!(sce->state & UGEN_BULK_RA)) {
2256 		/*
2257 		 * We have no easy way of determining if a read will
2258 		 * yield any data or a write will happen.
2259 		 * So, emulate "seltrue".
2260 		 */
2261 		ret = filt_seltrue(kn, hint);
2262 	} else if (sce->ra_wb_used == 0) {
2263 		ret = 0;
2264 	} else {
2265 		kn->kn_data = sce->ra_wb_used;
2266 		ret = 1;
2267 	}
2268 	mutex_exit(&sc->sc_lock);
2269 
2270 	return ret;
2271 }
2272 
2273 static int
2274 filt_ugenwrite_bulk(struct knote *kn, long hint)
2275 {
2276 	struct ugen_endpoint *sce = kn->kn_hook;
2277 	struct ugen_softc *sc = sce->sc;
2278 	int ret;
2279 
2280 	mutex_enter(&sc->sc_lock);
2281 	if (sc->sc_dying) {
2282 		ret = 0;
2283 	} else if (!(sce->state & UGEN_BULK_WB)) {
2284 		/*
2285 		 * We have no easy way of determining if a read will
2286 		 * yield any data or a write will happen.
2287 		 * So, emulate "seltrue".
2288 		 */
2289 		ret = filt_seltrue(kn, hint);
2290 	} else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2291 		ret = 0;
2292 	} else {
2293 		kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2294 		ret = 1;
2295 	}
2296 	mutex_exit(&sc->sc_lock);
2297 
2298 	return ret;
2299 }
2300 
2301 static const struct filterops ugenread_intr_filtops = {
2302 	.f_flags = FILTEROP_ISFD,
2303 	.f_attach = NULL,
2304 	.f_detach = filt_ugenrdetach,
2305 	.f_event = filt_ugenread_intr,
2306 };
2307 
2308 static const struct filterops ugenread_isoc_filtops = {
2309 	.f_flags = FILTEROP_ISFD,
2310 	.f_attach = NULL,
2311 	.f_detach = filt_ugenrdetach,
2312 	.f_event = filt_ugenread_isoc,
2313 };
2314 
2315 static const struct filterops ugenread_bulk_filtops = {
2316 	.f_flags = FILTEROP_ISFD,
2317 	.f_attach = NULL,
2318 	.f_detach = filt_ugenrdetach,
2319 	.f_event = filt_ugenread_bulk,
2320 };
2321 
2322 static const struct filterops ugenwrite_bulk_filtops = {
2323 	.f_flags = FILTEROP_ISFD,
2324 	.f_attach = NULL,
2325 	.f_detach = filt_ugenrdetach,
2326 	.f_event = filt_ugenwrite_bulk,
2327 };
2328 
2329 static int
2330 ugenkqfilter(dev_t dev, struct knote *kn)
2331 {
2332 	struct ugen_softc *sc;
2333 	struct ugen_endpoint *sce;
2334 	struct selinfo *sip;
2335 	int error;
2336 
2337 	if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2338 		return ENXIO;
2339 
2340 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2341 		error = ENODEV;
2342 		goto out;
2343 	}
2344 
2345 	switch (kn->kn_filter) {
2346 	case EVFILT_READ:
2347 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2348 		if (sce == NULL) {
2349 			error = EINVAL;
2350 			goto out;
2351 		}
2352 
2353 		sip = &sce->rsel;
2354 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2355 		case UE_INTERRUPT:
2356 			kn->kn_fop = &ugenread_intr_filtops;
2357 			break;
2358 		case UE_ISOCHRONOUS:
2359 			kn->kn_fop = &ugenread_isoc_filtops;
2360 			break;
2361 		case UE_BULK:
2362 			kn->kn_fop = &ugenread_bulk_filtops;
2363 			break;
2364 		default:
2365 			error = EINVAL;
2366 			goto out;
2367 		}
2368 		break;
2369 
2370 	case EVFILT_WRITE:
2371 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2372 		if (sce == NULL) {
2373 			error = EINVAL;
2374 			goto out;
2375 		}
2376 
2377 		sip = &sce->rsel;
2378 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2379 		case UE_INTERRUPT:
2380 		case UE_ISOCHRONOUS:
2381 			/* XXX poll doesn't support this */
2382 			error = EINVAL;
2383 			goto out;
2384 
2385 		case UE_BULK:
2386 			kn->kn_fop = &ugenwrite_bulk_filtops;
2387 			break;
2388 		default:
2389 			error = EINVAL;
2390 			goto out;
2391 		}
2392 		break;
2393 
2394 	default:
2395 		error = EINVAL;
2396 		goto out;
2397 	}
2398 
2399 	kn->kn_hook = sce;
2400 
2401 	mutex_enter(&sc->sc_lock);
2402 	selrecord_knote(sip, kn);
2403 	mutex_exit(&sc->sc_lock);
2404 
2405 	error = 0;
2406 
2407 out:	ugenif_release(sc);
2408 	return error;
2409 }
2410 
2411 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2412 
2413 static int
2414 ugen_modcmd(modcmd_t cmd, void *aux)
2415 {
2416 
2417 	switch (cmd) {
2418 	case MODULE_CMD_INIT:
2419 		mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2420 		rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2421 		return 0;
2422 	default:
2423 		return ENOTTY;
2424 	}
2425 }
2426