xref: /netbsd-src/sys/dev/usb/ugen.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: ugen.c,v 1.139 2018/03/05 09:35:01 ws Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.139 2018/03/05 09:35:01 ws Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 
61 #include <dev/usb/usb.h>
62 #include <dev/usb/usbdi.h>
63 #include <dev/usb/usbdi_util.h>
64 
65 #ifdef UGEN_DEBUG
66 #define DPRINTF(x)	if (ugendebug) printf x
67 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
68 int	ugendebug = 0;
69 #else
70 #define DPRINTF(x)
71 #define DPRINTFN(n,x)
72 #endif
73 
74 #define	UGEN_CHUNK	128	/* chunk size for read */
75 #define	UGEN_IBSIZE	1020	/* buffer size */
76 #define	UGEN_BBSIZE	1024
77 
78 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
79 #define UGEN_NISORFRMS	8	/* number of transactions per req */
80 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
81 
82 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
83 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
84 
85 struct isoreq {
86 	struct ugen_endpoint *sce;
87 	struct usbd_xfer *xfer;
88 	void *dmabuf;
89 	uint16_t sizes[UGEN_NISORFRMS];
90 };
91 
92 struct ugen_endpoint {
93 	struct ugen_softc *sc;
94 	usb_endpoint_descriptor_t *edesc;
95 	struct usbd_interface *iface;
96 	int state;
97 #define	UGEN_ASLP	0x02	/* waiting for data */
98 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
99 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
100 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
102 	struct usbd_pipe *pipeh;
103 	struct clist q;
104 	u_char *ibuf;		/* start of buffer (circular for isoc) */
105 	u_char *fill;		/* location for input (isoc) */
106 	u_char *limit;		/* end of circular buffer (isoc) */
107 	u_char *cur;		/* current read location (isoc) */
108 	uint32_t timeout;
109 	uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 	uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 	uint32_t ra_wb_used;	 /* how much is in buffer */
112 	uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 	struct usbd_xfer *ra_wb_xfer;
114 	struct isoreq isoreqs[UGEN_NISOREQS];
115 	/* Keep these last; we don't overwrite them in ugen_set_config() */
116 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
117 	struct selinfo rsel;
118 	kcondvar_t cv;
119 };
120 
121 struct ugen_softc {
122 	device_t sc_dev;		/* base device */
123 	struct usbd_device *sc_udev;
124 
125 	kmutex_t		sc_lock;
126 	kcondvar_t		sc_detach_cv;
127 
128 	char sc_is_open[USB_MAX_ENDPOINTS];
129 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
130 #define OUT 0
131 #define IN  1
132 
133 	int sc_refcnt;
134 	char sc_buffer[UGEN_BBSIZE];
135 	u_char sc_dying;
136 };
137 
138 dev_type_open(ugenopen);
139 dev_type_close(ugenclose);
140 dev_type_read(ugenread);
141 dev_type_write(ugenwrite);
142 dev_type_ioctl(ugenioctl);
143 dev_type_poll(ugenpoll);
144 dev_type_kqfilter(ugenkqfilter);
145 
146 const struct cdevsw ugen_cdevsw = {
147 	.d_open = ugenopen,
148 	.d_close = ugenclose,
149 	.d_read = ugenread,
150 	.d_write = ugenwrite,
151 	.d_ioctl = ugenioctl,
152 	.d_stop = nostop,
153 	.d_tty = notty,
154 	.d_poll = ugenpoll,
155 	.d_mmap = nommap,
156 	.d_kqfilter = ugenkqfilter,
157 	.d_discard = nodiscard,
158 	.d_flag = D_OTHER,
159 };
160 
161 Static void ugenintr(struct usbd_xfer *, void *,
162 		     usbd_status);
163 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
164 			    usbd_status);
165 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
166 			     usbd_status);
167 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
168 			     usbd_status);
169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
172 			 void *, int, struct lwp *);
173 Static int ugen_set_config(struct ugen_softc *, int, int);
174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
175 					       int, int *);
176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
177 Static int ugen_get_alt_index(struct ugen_softc *, int);
178 Static void ugen_clear_endpoints(struct ugen_softc *);
179 
180 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
181 #define UGENENDPOINT(n) (minor(n) & 0xf)
182 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
183 
184 int	ugenif_match(device_t, cfdata_t, void *);
185 void	ugenif_attach(device_t, device_t, void *);
186 int	ugen_match(device_t, cfdata_t, void *);
187 void	ugen_attach(device_t, device_t, void *);
188 int	ugen_detach(device_t, int);
189 int	ugen_activate(device_t, enum devact);
190 extern struct cfdriver ugen_cd;
191 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
192     ugen_attach, ugen_detach, ugen_activate);
193 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
194     ugenif_attach, ugen_detach, ugen_activate);
195 
196 /* toggle to control attach priority. -1 means "let autoconf decide" */
197 int ugen_override = -1;
198 
199 int
200 ugen_match(device_t parent, cfdata_t match, void *aux)
201 {
202 	struct usb_attach_arg *uaa = aux;
203 	int override;
204 
205 	if (ugen_override != -1)
206 		override = ugen_override;
207 	else
208 		override = match->cf_flags & 1;
209 
210 	if (override)
211 		return UMATCH_HIGHEST;
212 	else if (uaa->uaa_usegeneric)
213 		return UMATCH_GENERIC;
214 	else
215 		return UMATCH_NONE;
216 }
217 
218 int
219 ugenif_match(device_t parent, cfdata_t match, void *aux)
220 {
221 	/* Assume that they knew what they configured! (see ugenif(4)) */
222 	return UMATCH_HIGHEST;
223 }
224 
225 void
226 ugen_attach(device_t parent, device_t self, void *aux)
227 {
228 	struct usb_attach_arg *uaa = aux;
229 	struct usbif_attach_arg uiaa;
230 
231 	memset(&uiaa, 0, sizeof uiaa);
232 	uiaa.uiaa_port = uaa->uaa_port;
233 	uiaa.uiaa_vendor = uaa->uaa_vendor;
234 	uiaa.uiaa_product = uaa->uaa_product;
235 	uiaa.uiaa_release = uaa->uaa_release;
236 	uiaa.uiaa_device = uaa->uaa_device;
237 	uiaa.uiaa_configno = -1;
238 	uiaa.uiaa_ifaceno = -1;
239 
240 	ugenif_attach(parent, self, &uiaa);
241 }
242 
243 void
244 ugenif_attach(device_t parent, device_t self, void *aux)
245 {
246 	struct ugen_softc *sc = device_private(self);
247 	struct usbif_attach_arg *uiaa = aux;
248 	struct usbd_device *udev;
249 	char *devinfop;
250 	usbd_status err;
251 	int i, dir, conf;
252 
253 	aprint_naive("\n");
254 	aprint_normal("\n");
255 
256 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
257 	cv_init(&sc->sc_detach_cv, "ugendet");
258 
259 	devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
260 	aprint_normal_dev(self, "%s\n", devinfop);
261 	usbd_devinfo_free(devinfop);
262 
263 	sc->sc_dev = self;
264 	sc->sc_udev = udev = uiaa->uiaa_device;
265 
266 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
267 		for (dir = OUT; dir <= IN; dir++) {
268 			struct ugen_endpoint *sce;
269 
270 			sce = &sc->sc_endpoints[i][dir];
271 			selinit(&sce->rsel);
272 			cv_init(&sce->cv, "ugensce");
273 		}
274 	}
275 
276 	if (uiaa->uiaa_ifaceno < 0) {
277 		/*
278 		 * If we attach the whole device,
279 		 * set configuration index 0, the default one.
280 		 */
281 		err = usbd_set_config_index(udev, 0, 0);
282 		if (err) {
283 			aprint_error_dev(self,
284 			    "setting configuration index 0 failed\n");
285 			sc->sc_dying = 1;
286 			return;
287 		}
288 	}
289 
290 	/* Get current configuration */
291 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
292 
293 	/* Set up all the local state for this configuration. */
294 	err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
295 	if (err) {
296 		aprint_error_dev(self, "setting configuration %d failed\n",
297 		    conf);
298 		sc->sc_dying = 1;
299 		return;
300 	}
301 
302 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
303 
304 	if (!pmf_device_register(self, NULL, NULL))
305 		aprint_error_dev(self, "couldn't establish power handler\n");
306 
307 }
308 
309 Static void
310 ugen_clear_endpoints(struct ugen_softc *sc)
311 {
312 
313 	/* Clear out the old info, but leave the selinfo and cv initialised. */
314 	for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
315 		for (int dir = OUT; dir <= IN; dir++) {
316 			struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
317 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
318 		}
319 	}
320 }
321 
322 Static int
323 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
324 {
325 	struct usbd_device *dev = sc->sc_udev;
326 	usb_config_descriptor_t *cdesc;
327 	struct usbd_interface *iface;
328 	usb_endpoint_descriptor_t *ed;
329 	struct ugen_endpoint *sce;
330 	uint8_t niface, nendpt;
331 	int ifaceno, endptno, endpt;
332 	usbd_status err;
333 	int dir;
334 
335 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
336 		    device_xname(sc->sc_dev), configno, sc));
337 
338 	if (chkopen) {
339 		/*
340 		 * We start at 1, not 0, because we don't care whether the
341 		 * control endpoint is open or not. It is always present.
342 		 */
343 		for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
344 			if (sc->sc_is_open[endptno]) {
345 				DPRINTFN(1,
346 				     ("ugen_set_config: %s - endpoint %d is open\n",
347 				      device_xname(sc->sc_dev), endptno));
348 				return USBD_IN_USE;
349 			}
350 	}
351 
352 	/* Avoid setting the current value. */
353 	cdesc = usbd_get_config_descriptor(dev);
354 	if (!cdesc || cdesc->bConfigurationValue != configno) {
355 		err = usbd_set_config_no(dev, configno, 1);
356 		if (err)
357 			return err;
358 	}
359 
360 	ugen_clear_endpoints(sc);
361 
362 	err = usbd_interface_count(dev, &niface);
363 	if (err)
364 		return err;
365 
366 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
367 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
368 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
369 		if (err)
370 			return err;
371 		err = usbd_endpoint_count(iface, &nendpt);
372 		if (err)
373 			return err;
374 		for (endptno = 0; endptno < nendpt; endptno++) {
375 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
376 			KASSERT(ed != NULL);
377 			endpt = ed->bEndpointAddress;
378 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
379 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
380 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
381 				    "(%d,%d), sce=%p\n",
382 				    endptno, endpt, UE_GET_ADDR(endpt),
383 				    UE_GET_DIR(endpt), sce));
384 			sce->sc = sc;
385 			sce->edesc = ed;
386 			sce->iface = iface;
387 		}
388 	}
389 	return USBD_NORMAL_COMPLETION;
390 }
391 
392 int
393 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
394 {
395 	struct ugen_softc *sc;
396 	int unit = UGENUNIT(dev);
397 	int endpt = UGENENDPOINT(dev);
398 	usb_endpoint_descriptor_t *edesc;
399 	struct ugen_endpoint *sce;
400 	int dir, isize;
401 	usbd_status err;
402 	struct usbd_xfer *xfer;
403 	int i, j;
404 
405 	sc = device_lookup_private(&ugen_cd, unit);
406 	if (sc == NULL || sc->sc_dying)
407 		return ENXIO;
408 
409 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
410 		     flag, mode, unit, endpt));
411 
412 	/* The control endpoint allows multiple opens. */
413 	if (endpt == USB_CONTROL_ENDPOINT) {
414 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
415 		return 0;
416 	}
417 
418 	if (sc->sc_is_open[endpt])
419 		return EBUSY;
420 
421 	/* Make sure there are pipes for all directions. */
422 	for (dir = OUT; dir <= IN; dir++) {
423 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
424 			sce = &sc->sc_endpoints[endpt][dir];
425 			if (sce->edesc == NULL)
426 				return ENXIO;
427 		}
428 	}
429 
430 	/* Actually open the pipes. */
431 	/* XXX Should back out properly if it fails. */
432 	for (dir = OUT; dir <= IN; dir++) {
433 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
434 			continue;
435 		sce = &sc->sc_endpoints[endpt][dir];
436 		sce->state = 0;
437 		sce->timeout = USBD_NO_TIMEOUT;
438 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
439 			     sc, endpt, dir, sce));
440 		edesc = sce->edesc;
441 		switch (edesc->bmAttributes & UE_XFERTYPE) {
442 		case UE_INTERRUPT:
443 			if (dir == OUT) {
444 				err = usbd_open_pipe(sce->iface,
445 				    edesc->bEndpointAddress, 0, &sce->pipeh);
446 				if (err)
447 					return EIO;
448 				break;
449 			}
450 			isize = UGETW(edesc->wMaxPacketSize);
451 			if (isize == 0)	/* shouldn't happen */
452 				return EINVAL;
453 			sce->ibuf = kmem_alloc(isize, KM_SLEEP);
454 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
455 				     endpt, isize));
456 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
457 				kmem_free(sce->ibuf, isize);
458 				sce->ibuf = NULL;
459 				return ENOMEM;
460 			}
461 			err = usbd_open_pipe_intr(sce->iface,
462 				  edesc->bEndpointAddress,
463 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
464 				  sce->ibuf, isize, ugenintr,
465 				  USBD_DEFAULT_INTERVAL);
466 			if (err) {
467 				clfree(&sce->q);
468 				kmem_free(sce->ibuf, isize);
469 				sce->ibuf = NULL;
470 				return EIO;
471 			}
472 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
473 			break;
474 		case UE_BULK:
475 			err = usbd_open_pipe(sce->iface,
476 				  edesc->bEndpointAddress, 0, &sce->pipeh);
477 			if (err)
478 				return EIO;
479 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
480 			/*
481 			 * Use request size for non-RA/WB transfers
482 			 * as the default.
483 			 */
484 			sce->ra_wb_reqsize = UGEN_BBSIZE;
485 			break;
486 		case UE_ISOCHRONOUS:
487 			if (dir == OUT)
488 				return EINVAL;
489 			isize = UGETW(edesc->wMaxPacketSize);
490 			if (isize == 0)	/* shouldn't happen */
491 				return EINVAL;
492 			sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
493 				KM_SLEEP);
494 			sce->cur = sce->fill = sce->ibuf;
495 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
496 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
497 				     endpt, isize));
498 			err = usbd_open_pipe(sce->iface,
499 				  edesc->bEndpointAddress, 0, &sce->pipeh);
500 			if (err) {
501 				kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
502 				sce->ibuf = NULL;
503 				return EIO;
504 			}
505 			for (i = 0; i < UGEN_NISOREQS; ++i) {
506 				sce->isoreqs[i].sce = sce;
507 				err = usbd_create_xfer(sce->pipeh,
508 				    isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
509 				    &xfer);
510 				if (err)
511 					goto bad;
512 				sce->isoreqs[i].xfer = xfer;
513 				sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
514 				for (j = 0; j < UGEN_NISORFRMS; ++j)
515 					sce->isoreqs[i].sizes[j] = isize;
516 				usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
517 				    sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
518 				    ugen_isoc_rintr);
519 				(void)usbd_transfer(xfer);
520 			}
521 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
522 			break;
523 		bad:
524 			while (--i >= 0) /* implicit buffer free */
525 				usbd_destroy_xfer(sce->isoreqs[i].xfer);
526 			usbd_close_pipe(sce->pipeh);
527 			sce->pipeh = NULL;
528 			kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
529 			sce->ibuf = NULL;
530 			return ENOMEM;
531 		case UE_CONTROL:
532 			sce->timeout = USBD_DEFAULT_TIMEOUT;
533 			return EINVAL;
534 		}
535 	}
536 	sc->sc_is_open[endpt] = 1;
537 	return 0;
538 }
539 
540 int
541 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
542 {
543 	int endpt = UGENENDPOINT(dev);
544 	struct ugen_softc *sc;
545 	struct ugen_endpoint *sce;
546 	int dir;
547 	int i;
548 
549 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
550 	if (sc == NULL || sc->sc_dying)
551 		return ENXIO;
552 
553 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
554 		     flag, mode, UGENUNIT(dev), endpt));
555 
556 #ifdef DIAGNOSTIC
557 	if (!sc->sc_is_open[endpt]) {
558 		printf("ugenclose: not open\n");
559 		return EINVAL;
560 	}
561 #endif
562 
563 	if (endpt == USB_CONTROL_ENDPOINT) {
564 		DPRINTFN(5, ("ugenclose: close control\n"));
565 		sc->sc_is_open[endpt] = 0;
566 		return 0;
567 	}
568 
569 	for (dir = OUT; dir <= IN; dir++) {
570 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
571 			continue;
572 		sce = &sc->sc_endpoints[endpt][dir];
573 		if (sce->pipeh == NULL)
574 			continue;
575 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
576 			     endpt, dir, sce));
577 
578 		usbd_abort_pipe(sce->pipeh);
579 
580 		int isize = UGETW(sce->edesc->wMaxPacketSize);
581 		int msize = 0;
582 
583 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
584 		case UE_INTERRUPT:
585 			ndflush(&sce->q, sce->q.c_cc);
586 			clfree(&sce->q);
587 			msize = isize;
588 			break;
589 		case UE_ISOCHRONOUS:
590 			for (i = 0; i < UGEN_NISOREQS; ++i)
591 				usbd_destroy_xfer(sce->isoreqs[i].xfer);
592 			msize = isize * UGEN_NISOFRAMES;
593 			break;
594 		case UE_BULK:
595 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
596 				usbd_destroy_xfer(sce->ra_wb_xfer);
597 				msize = sce->ra_wb_bufsize;
598 			}
599 			break;
600 		default:
601 			break;
602 		}
603 		usbd_close_pipe(sce->pipeh);
604 		sce->pipeh = NULL;
605 		if (sce->ibuf != NULL) {
606 			kmem_free(sce->ibuf, msize);
607 			sce->ibuf = NULL;
608 		}
609 	}
610 	sc->sc_is_open[endpt] = 0;
611 
612 	return 0;
613 }
614 
615 Static int
616 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
617 {
618 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
619 	uint32_t n, tn;
620 	struct usbd_xfer *xfer;
621 	usbd_status err;
622 	int error = 0;
623 
624 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
625 
626 	if (endpt == USB_CONTROL_ENDPOINT)
627 		return ENODEV;
628 
629 #ifdef DIAGNOSTIC
630 	if (sce->edesc == NULL) {
631 		printf("ugenread: no edesc\n");
632 		return EIO;
633 	}
634 	if (sce->pipeh == NULL) {
635 		printf("ugenread: no pipe\n");
636 		return EIO;
637 	}
638 #endif
639 
640 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
641 	case UE_INTERRUPT:
642 		/* Block until activity occurred. */
643 		mutex_enter(&sc->sc_lock);
644 		while (sce->q.c_cc == 0) {
645 			if (flag & IO_NDELAY) {
646 				mutex_exit(&sc->sc_lock);
647 				return EWOULDBLOCK;
648 			}
649 			sce->state |= UGEN_ASLP;
650 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
651 			/* "ugenri" */
652 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
653 			    mstohz(sce->timeout));
654 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
655 			if (sc->sc_dying)
656 				error = EIO;
657 			if (error) {
658 				sce->state &= ~UGEN_ASLP;
659 				break;
660 			}
661 		}
662 		mutex_exit(&sc->sc_lock);
663 
664 		/* Transfer as many chunks as possible. */
665 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
666 			n = min(sce->q.c_cc, uio->uio_resid);
667 			if (n > sizeof(sc->sc_buffer))
668 				n = sizeof(sc->sc_buffer);
669 
670 			/* Remove a small chunk from the input queue. */
671 			q_to_b(&sce->q, sc->sc_buffer, n);
672 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
673 
674 			/* Copy the data to the user process. */
675 			error = uiomove(sc->sc_buffer, n, uio);
676 			if (error)
677 				break;
678 		}
679 		break;
680 	case UE_BULK:
681 		if (sce->state & UGEN_BULK_RA) {
682 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
683 				     uio->uio_resid, sce->ra_wb_used));
684 			xfer = sce->ra_wb_xfer;
685 
686 			mutex_enter(&sc->sc_lock);
687 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
688 				mutex_exit(&sc->sc_lock);
689 				return EWOULDBLOCK;
690 			}
691 			while (uio->uio_resid > 0 && !error) {
692 				while (sce->ra_wb_used == 0) {
693 					sce->state |= UGEN_ASLP;
694 					DPRINTFN(5,
695 						 ("ugenread: sleep on %p\n",
696 						  sce));
697 					/* "ugenrb" */
698 					error = cv_timedwait_sig(&sce->cv,
699 					    &sc->sc_lock, mstohz(sce->timeout));
700 					DPRINTFN(5,
701 						 ("ugenread: woke, error=%d\n",
702 						  error));
703 					if (sc->sc_dying)
704 						error = EIO;
705 					if (error) {
706 						sce->state &= ~UGEN_ASLP;
707 						break;
708 					}
709 				}
710 
711 				/* Copy data to the process. */
712 				while (uio->uio_resid > 0
713 				       && sce->ra_wb_used > 0) {
714 					n = min(uio->uio_resid,
715 						sce->ra_wb_used);
716 					n = min(n, sce->limit - sce->cur);
717 					error = uiomove(sce->cur, n, uio);
718 					if (error)
719 						break;
720 					sce->cur += n;
721 					sce->ra_wb_used -= n;
722 					if (sce->cur == sce->limit)
723 						sce->cur = sce->ibuf;
724 				}
725 
726 				/*
727 				 * If the transfers stopped because the
728 				 * buffer was full, restart them.
729 				 */
730 				if (sce->state & UGEN_RA_WB_STOP &&
731 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
732 					n = (sce->limit - sce->ibuf)
733 					    - sce->ra_wb_used;
734 					usbd_setup_xfer(xfer, sce, NULL,
735 					    min(n, sce->ra_wb_xferlen),
736 					    0, USBD_NO_TIMEOUT,
737 					    ugen_bulkra_intr);
738 					sce->state &= ~UGEN_RA_WB_STOP;
739 					err = usbd_transfer(xfer);
740 					if (err != USBD_IN_PROGRESS)
741 						/*
742 						 * The transfer has not been
743 						 * queued.  Setting STOP
744 						 * will make us try
745 						 * again at the next read.
746 						 */
747 						sce->state |= UGEN_RA_WB_STOP;
748 				}
749 			}
750 			mutex_exit(&sc->sc_lock);
751 			break;
752 		}
753 		error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
754 		    0, 0, &xfer);
755 		if (error)
756 			return error;
757 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
758 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
759 			tn = n;
760 			err = usbd_bulk_transfer(xfer, sce->pipeh,
761 			    sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
762 			    sce->timeout, sc->sc_buffer, &tn);
763 			if (err) {
764 				if (err == USBD_INTERRUPTED)
765 					error = EINTR;
766 				else if (err == USBD_TIMEOUT)
767 					error = ETIMEDOUT;
768 				else
769 					error = EIO;
770 				break;
771 			}
772 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
773 			error = uiomove(sc->sc_buffer, tn, uio);
774 			if (error || tn < n)
775 				break;
776 		}
777 		usbd_destroy_xfer(xfer);
778 		break;
779 	case UE_ISOCHRONOUS:
780 		mutex_enter(&sc->sc_lock);
781 		while (sce->cur == sce->fill) {
782 			if (flag & IO_NDELAY) {
783 				mutex_exit(&sc->sc_lock);
784 				return EWOULDBLOCK;
785 			}
786 			sce->state |= UGEN_ASLP;
787 			/* "ugenri" */
788 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
789 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
790 			    mstohz(sce->timeout));
791 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
792 			if (sc->sc_dying)
793 				error = EIO;
794 			if (error) {
795 				sce->state &= ~UGEN_ASLP;
796 				break;
797 			}
798 		}
799 
800 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
801 			if(sce->fill > sce->cur)
802 				n = min(sce->fill - sce->cur, uio->uio_resid);
803 			else
804 				n = min(sce->limit - sce->cur, uio->uio_resid);
805 
806 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
807 
808 			/* Copy the data to the user process. */
809 			error = uiomove(sce->cur, n, uio);
810 			if (error)
811 				break;
812 			sce->cur += n;
813 			if (sce->cur >= sce->limit)
814 				sce->cur = sce->ibuf;
815 		}
816 		mutex_exit(&sc->sc_lock);
817 		break;
818 
819 
820 	default:
821 		return ENXIO;
822 	}
823 	return error;
824 }
825 
826 int
827 ugenread(dev_t dev, struct uio *uio, int flag)
828 {
829 	int endpt = UGENENDPOINT(dev);
830 	struct ugen_softc *sc;
831 	int error;
832 
833 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
834 	if (sc == NULL || sc->sc_dying)
835 		return ENXIO;
836 
837 	mutex_enter(&sc->sc_lock);
838 	sc->sc_refcnt++;
839 	mutex_exit(&sc->sc_lock);
840 
841 	error = ugen_do_read(sc, endpt, uio, flag);
842 
843 	mutex_enter(&sc->sc_lock);
844 	if (--sc->sc_refcnt < 0)
845 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
846 	mutex_exit(&sc->sc_lock);
847 
848 	return error;
849 }
850 
851 Static int
852 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
853 	int flag)
854 {
855 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
856 	uint32_t n;
857 	int error = 0;
858 	uint32_t tn;
859 	char *dbuf;
860 	struct usbd_xfer *xfer;
861 	usbd_status err;
862 
863 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
864 
865 	if (endpt == USB_CONTROL_ENDPOINT)
866 		return ENODEV;
867 
868 #ifdef DIAGNOSTIC
869 	if (sce->edesc == NULL) {
870 		printf("ugenwrite: no edesc\n");
871 		return EIO;
872 	}
873 	if (sce->pipeh == NULL) {
874 		printf("ugenwrite: no pipe\n");
875 		return EIO;
876 	}
877 #endif
878 
879 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
880 	case UE_BULK:
881 		if (sce->state & UGEN_BULK_WB) {
882 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
883 				     uio->uio_resid, sce->ra_wb_used));
884 			xfer = sce->ra_wb_xfer;
885 
886 			mutex_enter(&sc->sc_lock);
887 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
888 			    flag & IO_NDELAY) {
889 				mutex_exit(&sc->sc_lock);
890 				return EWOULDBLOCK;
891 			}
892 			while (uio->uio_resid > 0 && !error) {
893 				while (sce->ra_wb_used ==
894 				       sce->limit - sce->ibuf) {
895 					sce->state |= UGEN_ASLP;
896 					DPRINTFN(5,
897 						 ("ugenwrite: sleep on %p\n",
898 						  sce));
899 					/* "ugenwb" */
900 					error = cv_timedwait_sig(&sce->cv,
901 					    &sc->sc_lock, mstohz(sce->timeout));
902 					DPRINTFN(5,
903 						 ("ugenwrite: woke, error=%d\n",
904 						  error));
905 					if (sc->sc_dying)
906 						error = EIO;
907 					if (error) {
908 						sce->state &= ~UGEN_ASLP;
909 						break;
910 					}
911 				}
912 
913 				/* Copy data from the process. */
914 				while (uio->uio_resid > 0 &&
915 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
916 					n = min(uio->uio_resid,
917 						(sce->limit - sce->ibuf)
918 						 - sce->ra_wb_used);
919 					n = min(n, sce->limit - sce->fill);
920 					error = uiomove(sce->fill, n, uio);
921 					if (error)
922 						break;
923 					sce->fill += n;
924 					sce->ra_wb_used += n;
925 					if (sce->fill == sce->limit)
926 						sce->fill = sce->ibuf;
927 				}
928 
929 				/*
930 				 * If the transfers stopped because the
931 				 * buffer was empty, restart them.
932 				 */
933 				if (sce->state & UGEN_RA_WB_STOP &&
934 				    sce->ra_wb_used > 0) {
935 					dbuf = (char *)usbd_get_buffer(xfer);
936 					n = min(sce->ra_wb_used,
937 						sce->ra_wb_xferlen);
938 					tn = min(n, sce->limit - sce->cur);
939 					memcpy(dbuf, sce->cur, tn);
940 					dbuf += tn;
941 					if (n - tn > 0)
942 						memcpy(dbuf, sce->ibuf,
943 						       n - tn);
944 					usbd_setup_xfer(xfer, sce, NULL, n,
945 					    0, USBD_NO_TIMEOUT,
946 					    ugen_bulkwb_intr);
947 					sce->state &= ~UGEN_RA_WB_STOP;
948 					err = usbd_transfer(xfer);
949 					if (err != USBD_IN_PROGRESS)
950 						/*
951 						 * The transfer has not been
952 						 * queued.  Setting STOP
953 						 * will make us try again
954 						 * at the next read.
955 						 */
956 						sce->state |= UGEN_RA_WB_STOP;
957 				}
958 			}
959 			mutex_exit(&sc->sc_lock);
960 			break;
961 		}
962 		error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
963 		    0, 0, &xfer);
964 		if (error)
965 			return error;
966 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
967 			error = uiomove(sc->sc_buffer, n, uio);
968 			if (error)
969 				break;
970 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
971 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
972 			    sc->sc_buffer, &n);
973 			if (err) {
974 				if (err == USBD_INTERRUPTED)
975 					error = EINTR;
976 				else if (err == USBD_TIMEOUT)
977 					error = ETIMEDOUT;
978 				else
979 					error = EIO;
980 				break;
981 			}
982 		}
983 		usbd_destroy_xfer(xfer);
984 		break;
985 	case UE_INTERRUPT:
986 		error = usbd_create_xfer(sce->pipeh,
987 		    UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
988 		if (error)
989 			return error;
990 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
991 		    uio->uio_resid)) != 0) {
992 			error = uiomove(sc->sc_buffer, n, uio);
993 			if (error)
994 				break;
995 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
996 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
997 			    sce->timeout, sc->sc_buffer, &n);
998 			if (err) {
999 				if (err == USBD_INTERRUPTED)
1000 					error = EINTR;
1001 				else if (err == USBD_TIMEOUT)
1002 					error = ETIMEDOUT;
1003 				else
1004 					error = EIO;
1005 				break;
1006 			}
1007 		}
1008 		usbd_destroy_xfer(xfer);
1009 		break;
1010 	default:
1011 		return ENXIO;
1012 	}
1013 	return error;
1014 }
1015 
1016 int
1017 ugenwrite(dev_t dev, struct uio *uio, int flag)
1018 {
1019 	int endpt = UGENENDPOINT(dev);
1020 	struct ugen_softc *sc;
1021 	int error;
1022 
1023 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1024 	if (sc == NULL || sc->sc_dying)
1025 		return ENXIO;
1026 
1027 	mutex_enter(&sc->sc_lock);
1028 	sc->sc_refcnt++;
1029 	mutex_exit(&sc->sc_lock);
1030 
1031 	error = ugen_do_write(sc, endpt, uio, flag);
1032 
1033 	mutex_enter(&sc->sc_lock);
1034 	if (--sc->sc_refcnt < 0)
1035 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1036 	mutex_exit(&sc->sc_lock);
1037 
1038 	return error;
1039 }
1040 
1041 int
1042 ugen_activate(device_t self, enum devact act)
1043 {
1044 	struct ugen_softc *sc = device_private(self);
1045 
1046 	switch (act) {
1047 	case DVACT_DEACTIVATE:
1048 		sc->sc_dying = 1;
1049 		return 0;
1050 	default:
1051 		return EOPNOTSUPP;
1052 	}
1053 }
1054 
1055 int
1056 ugen_detach(device_t self, int flags)
1057 {
1058 	struct ugen_softc *sc = device_private(self);
1059 	struct ugen_endpoint *sce;
1060 	int i, dir;
1061 	int maj, mn;
1062 
1063 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1064 
1065 	sc->sc_dying = 1;
1066 	pmf_device_deregister(self);
1067 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1068 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1069 		for (dir = OUT; dir <= IN; dir++) {
1070 			sce = &sc->sc_endpoints[i][dir];
1071 			if (sce->pipeh)
1072 				usbd_abort_pipe(sce->pipeh);
1073 		}
1074 	}
1075 
1076 	mutex_enter(&sc->sc_lock);
1077 	if (--sc->sc_refcnt >= 0) {
1078 		/* Wake everyone */
1079 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1080 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1081 		/* Wait for processes to go away. */
1082 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1083 	}
1084 	mutex_exit(&sc->sc_lock);
1085 
1086 	/* locate the major number */
1087 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1088 
1089 	/* Nuke the vnodes for any open instances (calls close). */
1090 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1091 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1092 
1093 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1094 
1095 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1096 		for (dir = OUT; dir <= IN; dir++) {
1097 			sce = &sc->sc_endpoints[i][dir];
1098 			seldestroy(&sce->rsel);
1099 			cv_destroy(&sce->cv);
1100 		}
1101 	}
1102 
1103 	cv_destroy(&sc->sc_detach_cv);
1104 	mutex_destroy(&sc->sc_lock);
1105 
1106 	return 0;
1107 }
1108 
1109 Static void
1110 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1111 {
1112 	struct ugen_endpoint *sce = addr;
1113 	struct ugen_softc *sc = sce->sc;
1114 	uint32_t count;
1115 	u_char *ibuf;
1116 
1117 	if (status == USBD_CANCELLED)
1118 		return;
1119 
1120 	if (status != USBD_NORMAL_COMPLETION) {
1121 		DPRINTF(("ugenintr: status=%d\n", status));
1122 		if (status == USBD_STALLED)
1123 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1124 		return;
1125 	}
1126 
1127 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1128 	ibuf = sce->ibuf;
1129 
1130 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1131 		     xfer, status, count));
1132 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1133 		     ibuf[0], ibuf[1], ibuf[2]));
1134 
1135 	(void)b_to_q(ibuf, count, &sce->q);
1136 
1137 	mutex_enter(&sc->sc_lock);
1138 	if (sce->state & UGEN_ASLP) {
1139 		sce->state &= ~UGEN_ASLP;
1140 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1141 		cv_signal(&sce->cv);
1142 	}
1143 	mutex_exit(&sc->sc_lock);
1144 	selnotify(&sce->rsel, 0, 0);
1145 }
1146 
1147 Static void
1148 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1149 		usbd_status status)
1150 {
1151 	struct isoreq *req = addr;
1152 	struct ugen_endpoint *sce = req->sce;
1153 	struct ugen_softc *sc = sce->sc;
1154 	uint32_t count, n;
1155 	int i, isize;
1156 
1157 	/* Return if we are aborting. */
1158 	if (status == USBD_CANCELLED)
1159 		return;
1160 
1161 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1162 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1163 	    (long)(req - sce->isoreqs), count));
1164 
1165 	/* throw away oldest input if the buffer is full */
1166 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1167 		sce->cur += count;
1168 		if(sce->cur >= sce->limit)
1169 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1170 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1171 			     count));
1172 	}
1173 
1174 	isize = UGETW(sce->edesc->wMaxPacketSize);
1175 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1176 		uint32_t actlen = req->sizes[i];
1177 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1178 
1179 		/* copy data to buffer */
1180 		while (actlen > 0) {
1181 			n = min(actlen, sce->limit - sce->fill);
1182 			memcpy(sce->fill, tbuf, n);
1183 
1184 			tbuf += n;
1185 			actlen -= n;
1186 			sce->fill += n;
1187 			if(sce->fill == sce->limit)
1188 				sce->fill = sce->ibuf;
1189 		}
1190 
1191 		/* setup size for next transfer */
1192 		req->sizes[i] = isize;
1193 	}
1194 
1195 	usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1196 	    ugen_isoc_rintr);
1197 	(void)usbd_transfer(xfer);
1198 
1199 	mutex_enter(&sc->sc_lock);
1200 	if (sce->state & UGEN_ASLP) {
1201 		sce->state &= ~UGEN_ASLP;
1202 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1203 		cv_signal(&sce->cv);
1204 	}
1205 	mutex_exit(&sc->sc_lock);
1206 	selnotify(&sce->rsel, 0, 0);
1207 }
1208 
1209 Static void
1210 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1211 		 usbd_status status)
1212 {
1213 	struct ugen_endpoint *sce = addr;
1214 	struct ugen_softc *sc = sce->sc;
1215 	uint32_t count, n;
1216 	char const *tbuf;
1217 	usbd_status err;
1218 
1219 	/* Return if we are aborting. */
1220 	if (status == USBD_CANCELLED)
1221 		return;
1222 
1223 	if (status != USBD_NORMAL_COMPLETION) {
1224 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1225 		sce->state |= UGEN_RA_WB_STOP;
1226 		if (status == USBD_STALLED)
1227 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1228 		return;
1229 	}
1230 
1231 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1232 
1233 	/* Keep track of how much is in the buffer. */
1234 	sce->ra_wb_used += count;
1235 
1236 	/* Copy data to buffer. */
1237 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1238 	n = min(count, sce->limit - sce->fill);
1239 	memcpy(sce->fill, tbuf, n);
1240 	tbuf += n;
1241 	count -= n;
1242 	sce->fill += n;
1243 	if (sce->fill == sce->limit)
1244 		sce->fill = sce->ibuf;
1245 	if (count > 0) {
1246 		memcpy(sce->fill, tbuf, count);
1247 		sce->fill += count;
1248 	}
1249 
1250 	/* Set up the next request if necessary. */
1251 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1252 	if (n > 0) {
1253 		usbd_setup_xfer(xfer, sce, NULL, min(n, sce->ra_wb_xferlen), 0,
1254 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1255 		err = usbd_transfer(xfer);
1256 		if (err != USBD_IN_PROGRESS) {
1257 			printf("usbd_bulkra_intr: error=%d\n", err);
1258 			/*
1259 			 * The transfer has not been queued.  Setting STOP
1260 			 * will make us try again at the next read.
1261 			 */
1262 			sce->state |= UGEN_RA_WB_STOP;
1263 		}
1264 	}
1265 	else
1266 		sce->state |= UGEN_RA_WB_STOP;
1267 
1268 	mutex_enter(&sc->sc_lock);
1269 	if (sce->state & UGEN_ASLP) {
1270 		sce->state &= ~UGEN_ASLP;
1271 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1272 		cv_signal(&sce->cv);
1273 	}
1274 	mutex_exit(&sc->sc_lock);
1275 	selnotify(&sce->rsel, 0, 0);
1276 }
1277 
1278 Static void
1279 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1280 		 usbd_status status)
1281 {
1282 	struct ugen_endpoint *sce = addr;
1283 	struct ugen_softc *sc = sce->sc;
1284 	uint32_t count, n;
1285 	char *tbuf;
1286 	usbd_status err;
1287 
1288 	/* Return if we are aborting. */
1289 	if (status == USBD_CANCELLED)
1290 		return;
1291 
1292 	if (status != USBD_NORMAL_COMPLETION) {
1293 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1294 		sce->state |= UGEN_RA_WB_STOP;
1295 		if (status == USBD_STALLED)
1296 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1297 		return;
1298 	}
1299 
1300 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1301 
1302 	/* Keep track of how much is in the buffer. */
1303 	sce->ra_wb_used -= count;
1304 
1305 	/* Update buffer pointers. */
1306 	sce->cur += count;
1307 	if (sce->cur >= sce->limit)
1308 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1309 
1310 	/* Set up next request if necessary. */
1311 	if (sce->ra_wb_used > 0) {
1312 		/* copy data from buffer */
1313 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1314 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1315 		n = min(count, sce->limit - sce->cur);
1316 		memcpy(tbuf, sce->cur, n);
1317 		tbuf += n;
1318 		if (count - n > 0)
1319 			memcpy(tbuf, sce->ibuf, count - n);
1320 
1321 		usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1322 		    ugen_bulkwb_intr);
1323 		err = usbd_transfer(xfer);
1324 		if (err != USBD_IN_PROGRESS) {
1325 			printf("usbd_bulkwb_intr: error=%d\n", err);
1326 			/*
1327 			 * The transfer has not been queued.  Setting STOP
1328 			 * will make us try again at the next write.
1329 			 */
1330 			sce->state |= UGEN_RA_WB_STOP;
1331 		}
1332 	}
1333 	else
1334 		sce->state |= UGEN_RA_WB_STOP;
1335 
1336 	mutex_enter(&sc->sc_lock);
1337 	if (sce->state & UGEN_ASLP) {
1338 		sce->state &= ~UGEN_ASLP;
1339 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1340 		cv_signal(&sce->cv);
1341 	}
1342 	mutex_exit(&sc->sc_lock);
1343 	selnotify(&sce->rsel, 0, 0);
1344 }
1345 
1346 Static usbd_status
1347 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1348 {
1349 	struct usbd_interface *iface;
1350 	usb_endpoint_descriptor_t *ed;
1351 	usbd_status err;
1352 	struct ugen_endpoint *sce;
1353 	uint8_t niface, nendpt, endptno, endpt;
1354 	int dir;
1355 
1356 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1357 
1358 	err = usbd_interface_count(sc->sc_udev, &niface);
1359 	if (err)
1360 		return err;
1361 	if (ifaceidx < 0 || ifaceidx >= niface)
1362 		return USBD_INVAL;
1363 
1364 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1365 	if (err)
1366 		return err;
1367 	err = usbd_endpoint_count(iface, &nendpt);
1368 	if (err)
1369 		return err;
1370 
1371 	/* change setting */
1372 	err = usbd_set_interface(iface, altno);
1373 	if (err)
1374 		return err;
1375 
1376 	err = usbd_endpoint_count(iface, &nendpt);
1377 	if (err)
1378 		return err;
1379 
1380 	ugen_clear_endpoints(sc);
1381 
1382 	for (endptno = 0; endptno < nendpt; endptno++) {
1383 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1384 		KASSERT(ed != NULL);
1385 		endpt = ed->bEndpointAddress;
1386 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1387 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1388 		sce->sc = sc;
1389 		sce->edesc = ed;
1390 		sce->iface = iface;
1391 	}
1392 	return 0;
1393 }
1394 
1395 /* Retrieve a complete descriptor for a certain device and index. */
1396 Static usb_config_descriptor_t *
1397 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1398 {
1399 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1400 	int len;
1401 	usbd_status err;
1402 
1403 	if (index == USB_CURRENT_CONFIG_INDEX) {
1404 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1405 		len = UGETW(tdesc->wTotalLength);
1406 		if (lenp)
1407 			*lenp = len;
1408 		cdesc = kmem_alloc(len, KM_SLEEP);
1409 		memcpy(cdesc, tdesc, len);
1410 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1411 	} else {
1412 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1413 		if (err)
1414 			return 0;
1415 		len = UGETW(cdescr.wTotalLength);
1416 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1417 		if (lenp)
1418 			*lenp = len;
1419 		cdesc = kmem_alloc(len, KM_SLEEP);
1420 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1421 		if (err) {
1422 			kmem_free(cdesc, len);
1423 			return 0;
1424 		}
1425 	}
1426 	return cdesc;
1427 }
1428 
1429 Static int
1430 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1431 {
1432 	struct usbd_interface *iface;
1433 	usbd_status err;
1434 
1435 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1436 	if (err)
1437 		return -1;
1438 	return usbd_get_interface_altindex(iface);
1439 }
1440 
1441 Static int
1442 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1443 	      void *addr, int flag, struct lwp *l)
1444 {
1445 	struct ugen_endpoint *sce;
1446 	usbd_status err;
1447 	struct usbd_interface *iface;
1448 	struct usb_config_desc *cd;
1449 	usb_config_descriptor_t *cdesc;
1450 	struct usb_interface_desc *id;
1451 	usb_interface_descriptor_t *idesc;
1452 	struct usb_endpoint_desc *ed;
1453 	usb_endpoint_descriptor_t *edesc;
1454 	struct usb_alt_interface *ai;
1455 	struct usb_string_desc *si;
1456 	uint8_t conf, alt;
1457 	int cdesclen;
1458 	int error;
1459 
1460 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1461 	if (sc->sc_dying)
1462 		return EIO;
1463 
1464 	switch (cmd) {
1465 	case FIONBIO:
1466 		/* All handled in the upper FS layer. */
1467 		return 0;
1468 	case USB_SET_SHORT_XFER:
1469 		if (endpt == USB_CONTROL_ENDPOINT)
1470 			return EINVAL;
1471 		/* This flag only affects read */
1472 		sce = &sc->sc_endpoints[endpt][IN];
1473 		if (sce == NULL || sce->pipeh == NULL)
1474 			return EINVAL;
1475 		if (*(int *)addr)
1476 			sce->state |= UGEN_SHORT_OK;
1477 		else
1478 			sce->state &= ~UGEN_SHORT_OK;
1479 		return 0;
1480 	case USB_SET_TIMEOUT:
1481 		sce = &sc->sc_endpoints[endpt][IN];
1482 		if (sce == NULL
1483 		    /* XXX this shouldn't happen, but the distinction between
1484 		       input and output pipes isn't clear enough.
1485 		       || sce->pipeh == NULL */
1486 			)
1487 			return EINVAL;
1488 		sce->timeout = *(int *)addr;
1489 		return 0;
1490 	case USB_SET_BULK_RA:
1491 		if (endpt == USB_CONTROL_ENDPOINT)
1492 			return EINVAL;
1493 		sce = &sc->sc_endpoints[endpt][IN];
1494 		if (sce == NULL || sce->pipeh == NULL)
1495 			return EINVAL;
1496 		edesc = sce->edesc;
1497 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1498 			return EINVAL;
1499 
1500 		if (*(int *)addr) {
1501 			/* Only turn RA on if it's currently off. */
1502 			if (sce->state & UGEN_BULK_RA)
1503 				return 0;
1504 
1505 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1506 				/* shouldn't happen */
1507 				return EINVAL;
1508 			error = usbd_create_xfer(sce->pipeh,
1509 			    sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1510 			if (error)
1511 				return error;
1512 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1513 			sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1514 			sce->fill = sce->cur = sce->ibuf;
1515 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1516 			sce->ra_wb_used = 0;
1517 			sce->state |= UGEN_BULK_RA;
1518 			sce->state &= ~UGEN_RA_WB_STOP;
1519 			/* Now start reading. */
1520 			usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1521 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1522 			     0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1523 			err = usbd_transfer(sce->ra_wb_xfer);
1524 			if (err != USBD_IN_PROGRESS) {
1525 				sce->state &= ~UGEN_BULK_RA;
1526 				kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1527 				sce->ibuf = NULL;
1528 				usbd_destroy_xfer(sce->ra_wb_xfer);
1529 				return EIO;
1530 			}
1531 		} else {
1532 			/* Only turn RA off if it's currently on. */
1533 			if (!(sce->state & UGEN_BULK_RA))
1534 				return 0;
1535 
1536 			sce->state &= ~UGEN_BULK_RA;
1537 			usbd_abort_pipe(sce->pipeh);
1538 			usbd_destroy_xfer(sce->ra_wb_xfer);
1539 			/*
1540 			 * XXX Discard whatever's in the buffer, but we
1541 			 * should keep it around and drain the buffer
1542 			 * instead.
1543 			 */
1544 			kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1545 			sce->ibuf = NULL;
1546 		}
1547 		return 0;
1548 	case USB_SET_BULK_WB:
1549 		if (endpt == USB_CONTROL_ENDPOINT)
1550 			return EINVAL;
1551 		sce = &sc->sc_endpoints[endpt][OUT];
1552 		if (sce == NULL || sce->pipeh == NULL)
1553 			return EINVAL;
1554 		edesc = sce->edesc;
1555 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1556 			return EINVAL;
1557 
1558 		if (*(int *)addr) {
1559 			/* Only turn WB on if it's currently off. */
1560 			if (sce->state & UGEN_BULK_WB)
1561 				return 0;
1562 
1563 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1564 				/* shouldn't happen */
1565 				return EINVAL;
1566 			error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1567 			    0, 0, &sce->ra_wb_xfer);
1568 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1569 			sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1570 			sce->fill = sce->cur = sce->ibuf;
1571 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1572 			sce->ra_wb_used = 0;
1573 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1574 		} else {
1575 			/* Only turn WB off if it's currently on. */
1576 			if (!(sce->state & UGEN_BULK_WB))
1577 				return 0;
1578 
1579 			sce->state &= ~UGEN_BULK_WB;
1580 			/*
1581 			 * XXX Discard whatever's in the buffer, but we
1582 			 * should keep it around and keep writing to
1583 			 * drain the buffer instead.
1584 			 */
1585 			usbd_abort_pipe(sce->pipeh);
1586 			usbd_destroy_xfer(sce->ra_wb_xfer);
1587 			kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1588 			sce->ibuf = NULL;
1589 		}
1590 		return 0;
1591 	case USB_SET_BULK_RA_OPT:
1592 	case USB_SET_BULK_WB_OPT:
1593 	{
1594 		struct usb_bulk_ra_wb_opt *opt;
1595 
1596 		if (endpt == USB_CONTROL_ENDPOINT)
1597 			return EINVAL;
1598 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1599 		if (cmd == USB_SET_BULK_RA_OPT)
1600 			sce = &sc->sc_endpoints[endpt][IN];
1601 		else
1602 			sce = &sc->sc_endpoints[endpt][OUT];
1603 		if (sce == NULL || sce->pipeh == NULL)
1604 			return EINVAL;
1605 		if (opt->ra_wb_buffer_size < 1 ||
1606 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1607 		    opt->ra_wb_request_size < 1 ||
1608 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1609 			return EINVAL;
1610 		/*
1611 		 * XXX These changes do not take effect until the
1612 		 * next time RA/WB mode is enabled but they ought to
1613 		 * take effect immediately.
1614 		 */
1615 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1616 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1617 		return 0;
1618 	}
1619 	default:
1620 		break;
1621 	}
1622 
1623 	if (endpt != USB_CONTROL_ENDPOINT)
1624 		return EINVAL;
1625 
1626 	switch (cmd) {
1627 #ifdef UGEN_DEBUG
1628 	case USB_SETDEBUG:
1629 		ugendebug = *(int *)addr;
1630 		break;
1631 #endif
1632 	case USB_GET_CONFIG:
1633 		err = usbd_get_config(sc->sc_udev, &conf);
1634 		if (err)
1635 			return EIO;
1636 		*(int *)addr = conf;
1637 		break;
1638 	case USB_SET_CONFIG:
1639 		if (!(flag & FWRITE))
1640 			return EPERM;
1641 		err = ugen_set_config(sc, *(int *)addr, 1);
1642 		switch (err) {
1643 		case USBD_NORMAL_COMPLETION:
1644 			break;
1645 		case USBD_IN_USE:
1646 			return EBUSY;
1647 		default:
1648 			return EIO;
1649 		}
1650 		break;
1651 	case USB_GET_ALTINTERFACE:
1652 		ai = (struct usb_alt_interface *)addr;
1653 		err = usbd_device2interface_handle(sc->sc_udev,
1654 			  ai->uai_interface_index, &iface);
1655 		if (err)
1656 			return EINVAL;
1657 		idesc = usbd_get_interface_descriptor(iface);
1658 		if (idesc == NULL)
1659 			return EIO;
1660 		ai->uai_alt_no = idesc->bAlternateSetting;
1661 		break;
1662 	case USB_SET_ALTINTERFACE:
1663 		if (!(flag & FWRITE))
1664 			return EPERM;
1665 		ai = (struct usb_alt_interface *)addr;
1666 		err = usbd_device2interface_handle(sc->sc_udev,
1667 			  ai->uai_interface_index, &iface);
1668 		if (err)
1669 			return EINVAL;
1670 		err = ugen_set_interface(sc, ai->uai_interface_index,
1671 		    ai->uai_alt_no);
1672 		if (err)
1673 			return EINVAL;
1674 		break;
1675 	case USB_GET_NO_ALT:
1676 		ai = (struct usb_alt_interface *)addr;
1677 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1678 		if (cdesc == NULL)
1679 			return EINVAL;
1680 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1681 		if (idesc == NULL) {
1682 			kmem_free(cdesc, cdesclen);
1683 			return EINVAL;
1684 		}
1685 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1686 		    idesc->bInterfaceNumber);
1687 		kmem_free(cdesc, cdesclen);
1688 		break;
1689 	case USB_GET_DEVICE_DESC:
1690 		*(usb_device_descriptor_t *)addr =
1691 			*usbd_get_device_descriptor(sc->sc_udev);
1692 		break;
1693 	case USB_GET_CONFIG_DESC:
1694 		cd = (struct usb_config_desc *)addr;
1695 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1696 		if (cdesc == NULL)
1697 			return EINVAL;
1698 		cd->ucd_desc = *cdesc;
1699 		kmem_free(cdesc, cdesclen);
1700 		break;
1701 	case USB_GET_INTERFACE_DESC:
1702 		id = (struct usb_interface_desc *)addr;
1703 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1704 		if (cdesc == NULL)
1705 			return EINVAL;
1706 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1707 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1708 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1709 		else
1710 			alt = id->uid_alt_index;
1711 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1712 		if (idesc == NULL) {
1713 			kmem_free(cdesc, cdesclen);
1714 			return EINVAL;
1715 		}
1716 		id->uid_desc = *idesc;
1717 		kmem_free(cdesc, cdesclen);
1718 		break;
1719 	case USB_GET_ENDPOINT_DESC:
1720 		ed = (struct usb_endpoint_desc *)addr;
1721 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1722 		if (cdesc == NULL)
1723 			return EINVAL;
1724 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1725 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1726 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1727 		else
1728 			alt = ed->ued_alt_index;
1729 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1730 					alt, ed->ued_endpoint_index);
1731 		if (edesc == NULL) {
1732 			kmem_free(cdesc, cdesclen);
1733 			return EINVAL;
1734 		}
1735 		ed->ued_desc = *edesc;
1736 		kmem_free(cdesc, cdesclen);
1737 		break;
1738 	case USB_GET_FULL_DESC:
1739 	{
1740 		int len;
1741 		struct iovec iov;
1742 		struct uio uio;
1743 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1744 
1745 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1746 		if (cdesc == NULL)
1747 			return EINVAL;
1748 		len = cdesclen;
1749 		if (len > fd->ufd_size)
1750 			len = fd->ufd_size;
1751 		iov.iov_base = (void *)fd->ufd_data;
1752 		iov.iov_len = len;
1753 		uio.uio_iov = &iov;
1754 		uio.uio_iovcnt = 1;
1755 		uio.uio_resid = len;
1756 		uio.uio_offset = 0;
1757 		uio.uio_rw = UIO_READ;
1758 		uio.uio_vmspace = l->l_proc->p_vmspace;
1759 		error = uiomove((void *)cdesc, len, &uio);
1760 		kmem_free(cdesc, cdesclen);
1761 		return error;
1762 	}
1763 	case USB_GET_STRING_DESC: {
1764 		int len;
1765 		si = (struct usb_string_desc *)addr;
1766 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1767 			  si->usd_language_id, &si->usd_desc, &len);
1768 		if (err)
1769 			return EINVAL;
1770 		break;
1771 	}
1772 	case USB_DO_REQUEST:
1773 	{
1774 		struct usb_ctl_request *ur = (void *)addr;
1775 		int len = UGETW(ur->ucr_request.wLength);
1776 		struct iovec iov;
1777 		struct uio uio;
1778 		void *ptr = 0;
1779 		usbd_status xerr;
1780 
1781 		error = 0;
1782 
1783 		if (!(flag & FWRITE))
1784 			return EPERM;
1785 		/* Avoid requests that would damage the bus integrity. */
1786 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1787 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1788 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1789 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1790 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1791 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1792 			return EINVAL;
1793 
1794 		if (len < 0 || len > 32767)
1795 			return EINVAL;
1796 		if (len != 0) {
1797 			iov.iov_base = (void *)ur->ucr_data;
1798 			iov.iov_len = len;
1799 			uio.uio_iov = &iov;
1800 			uio.uio_iovcnt = 1;
1801 			uio.uio_resid = len;
1802 			uio.uio_offset = 0;
1803 			uio.uio_rw =
1804 				ur->ucr_request.bmRequestType & UT_READ ?
1805 				UIO_READ : UIO_WRITE;
1806 			uio.uio_vmspace = l->l_proc->p_vmspace;
1807 			ptr = kmem_alloc(len, KM_SLEEP);
1808 			if (uio.uio_rw == UIO_WRITE) {
1809 				error = uiomove(ptr, len, &uio);
1810 				if (error)
1811 					goto ret;
1812 			}
1813 		}
1814 		sce = &sc->sc_endpoints[endpt][IN];
1815 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1816 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1817 		if (xerr) {
1818 			error = EIO;
1819 			goto ret;
1820 		}
1821 		if (len != 0) {
1822 			if (uio.uio_rw == UIO_READ) {
1823 				size_t alen = min(len, ur->ucr_actlen);
1824 				error = uiomove(ptr, alen, &uio);
1825 				if (error)
1826 					goto ret;
1827 			}
1828 		}
1829 	ret:
1830 		if (ptr)
1831 			kmem_free(ptr, len);
1832 		return error;
1833 	}
1834 	case USB_GET_DEVICEINFO:
1835 		usbd_fill_deviceinfo(sc->sc_udev,
1836 				     (struct usb_device_info *)addr, 0);
1837 		break;
1838 #ifdef COMPAT_30
1839 	case USB_GET_DEVICEINFO_OLD:
1840 		usbd_fill_deviceinfo_old(sc->sc_udev,
1841 					 (struct usb_device_info_old *)addr, 0);
1842 
1843 		break;
1844 #endif
1845 	default:
1846 		return EINVAL;
1847 	}
1848 	return 0;
1849 }
1850 
1851 int
1852 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1853 {
1854 	int endpt = UGENENDPOINT(dev);
1855 	struct ugen_softc *sc;
1856 	int error;
1857 
1858 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1859 	if (sc == NULL || sc->sc_dying)
1860 		return ENXIO;
1861 
1862 	sc->sc_refcnt++;
1863 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1864 	if (--sc->sc_refcnt < 0)
1865 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1866 	return error;
1867 }
1868 
1869 int
1870 ugenpoll(dev_t dev, int events, struct lwp *l)
1871 {
1872 	struct ugen_softc *sc;
1873 	struct ugen_endpoint *sce_in, *sce_out;
1874 	int revents = 0;
1875 
1876 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1877 	if (sc == NULL)
1878 		return ENXIO;
1879 
1880 	if (sc->sc_dying)
1881 		return POLLHUP;
1882 
1883 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1884 		return ENODEV;
1885 
1886 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1887 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1888 	if (sce_in == NULL && sce_out == NULL)
1889 		return POLLERR;
1890 #ifdef DIAGNOSTIC
1891 	if (!sce_in->edesc && !sce_out->edesc) {
1892 		printf("ugenpoll: no edesc\n");
1893 		return POLLERR;
1894 	}
1895 	/* It's possible to have only one pipe open. */
1896 	if (!sce_in->pipeh && !sce_out->pipeh) {
1897 		printf("ugenpoll: no pipe\n");
1898 		return POLLERR;
1899 	}
1900 #endif
1901 
1902 	mutex_enter(&sc->sc_lock);
1903 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1904 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1905 		case UE_INTERRUPT:
1906 			if (sce_in->q.c_cc > 0)
1907 				revents |= events & (POLLIN | POLLRDNORM);
1908 			else
1909 				selrecord(l, &sce_in->rsel);
1910 			break;
1911 		case UE_ISOCHRONOUS:
1912 			if (sce_in->cur != sce_in->fill)
1913 				revents |= events & (POLLIN | POLLRDNORM);
1914 			else
1915 				selrecord(l, &sce_in->rsel);
1916 			break;
1917 		case UE_BULK:
1918 			if (sce_in->state & UGEN_BULK_RA) {
1919 				if (sce_in->ra_wb_used > 0)
1920 					revents |= events &
1921 					    (POLLIN | POLLRDNORM);
1922 				else
1923 					selrecord(l, &sce_in->rsel);
1924 				break;
1925 			}
1926 			/*
1927 			 * We have no easy way of determining if a read will
1928 			 * yield any data or a write will happen.
1929 			 * Pretend they will.
1930 			 */
1931 			 revents |= events & (POLLIN | POLLRDNORM);
1932 			 break;
1933 		default:
1934 			break;
1935 		}
1936 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1937 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1938 		case UE_INTERRUPT:
1939 		case UE_ISOCHRONOUS:
1940 			/* XXX unimplemented */
1941 			break;
1942 		case UE_BULK:
1943 			if (sce_out->state & UGEN_BULK_WB) {
1944 				if (sce_out->ra_wb_used <
1945 				    sce_out->limit - sce_out->ibuf)
1946 					revents |= events &
1947 					    (POLLOUT | POLLWRNORM);
1948 				else
1949 					selrecord(l, &sce_out->rsel);
1950 				break;
1951 			}
1952 			/*
1953 			 * We have no easy way of determining if a read will
1954 			 * yield any data or a write will happen.
1955 			 * Pretend they will.
1956 			 */
1957 			 revents |= events & (POLLOUT | POLLWRNORM);
1958 			 break;
1959 		default:
1960 			break;
1961 		}
1962 
1963 	mutex_exit(&sc->sc_lock);
1964 
1965 	return revents;
1966 }
1967 
1968 static void
1969 filt_ugenrdetach(struct knote *kn)
1970 {
1971 	struct ugen_endpoint *sce = kn->kn_hook;
1972 	struct ugen_softc *sc = sce->sc;
1973 
1974 	mutex_enter(&sc->sc_lock);
1975 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1976 	mutex_exit(&sc->sc_lock);
1977 }
1978 
1979 static int
1980 filt_ugenread_intr(struct knote *kn, long hint)
1981 {
1982 	struct ugen_endpoint *sce = kn->kn_hook;
1983 	struct ugen_softc *sc = sce->sc;
1984 
1985 	if (sc->sc_dying)
1986 		return 0;
1987 
1988 	kn->kn_data = sce->q.c_cc;
1989 	return kn->kn_data > 0;
1990 }
1991 
1992 static int
1993 filt_ugenread_isoc(struct knote *kn, long hint)
1994 {
1995 	struct ugen_endpoint *sce = kn->kn_hook;
1996 	struct ugen_softc *sc = sce->sc;
1997 
1998 	if (sc->sc_dying)
1999 		return 0;
2000 
2001 	if (sce->cur == sce->fill)
2002 		return 0;
2003 
2004 	if (sce->cur < sce->fill)
2005 		kn->kn_data = sce->fill - sce->cur;
2006 	else
2007 		kn->kn_data = (sce->limit - sce->cur) +
2008 		    (sce->fill - sce->ibuf);
2009 
2010 	return 1;
2011 }
2012 
2013 static int
2014 filt_ugenread_bulk(struct knote *kn, long hint)
2015 {
2016 	struct ugen_endpoint *sce = kn->kn_hook;
2017 	struct ugen_softc *sc = sce->sc;
2018 
2019 	if (sc->sc_dying)
2020 		return 0;
2021 
2022 	if (!(sce->state & UGEN_BULK_RA))
2023 		/*
2024 		 * We have no easy way of determining if a read will
2025 		 * yield any data or a write will happen.
2026 		 * So, emulate "seltrue".
2027 		 */
2028 		return filt_seltrue(kn, hint);
2029 
2030 	if (sce->ra_wb_used == 0)
2031 		return 0;
2032 
2033 	kn->kn_data = sce->ra_wb_used;
2034 
2035 	return 1;
2036 }
2037 
2038 static int
2039 filt_ugenwrite_bulk(struct knote *kn, long hint)
2040 {
2041 	struct ugen_endpoint *sce = kn->kn_hook;
2042 	struct ugen_softc *sc = sce->sc;
2043 
2044 	if (sc->sc_dying)
2045 		return 0;
2046 
2047 	if (!(sce->state & UGEN_BULK_WB))
2048 		/*
2049 		 * We have no easy way of determining if a read will
2050 		 * yield any data or a write will happen.
2051 		 * So, emulate "seltrue".
2052 		 */
2053 		return filt_seltrue(kn, hint);
2054 
2055 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2056 		return 0;
2057 
2058 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2059 
2060 	return 1;
2061 }
2062 
2063 static const struct filterops ugenread_intr_filtops = {
2064 	.f_isfd = 1,
2065 	.f_attach = NULL,
2066 	.f_detach = filt_ugenrdetach,
2067 	.f_event = filt_ugenread_intr,
2068 };
2069 
2070 static const struct filterops ugenread_isoc_filtops = {
2071 	.f_isfd = 1,
2072 	.f_attach = NULL,
2073 	.f_detach = filt_ugenrdetach,
2074 	.f_event = filt_ugenread_isoc,
2075 };
2076 
2077 static const struct filterops ugenread_bulk_filtops = {
2078 	.f_isfd = 1,
2079 	.f_attach = NULL,
2080 	.f_detach = filt_ugenrdetach,
2081 	.f_event = filt_ugenread_bulk,
2082 };
2083 
2084 static const struct filterops ugenwrite_bulk_filtops = {
2085 	.f_isfd = 1,
2086 	.f_attach = NULL,
2087 	.f_detach = filt_ugenrdetach,
2088 	.f_event = filt_ugenwrite_bulk,
2089 };
2090 
2091 int
2092 ugenkqfilter(dev_t dev, struct knote *kn)
2093 {
2094 	struct ugen_softc *sc;
2095 	struct ugen_endpoint *sce;
2096 	struct klist *klist;
2097 
2098 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2099 	if (sc == NULL || sc->sc_dying)
2100 		return ENXIO;
2101 
2102 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2103 		return ENODEV;
2104 
2105 	switch (kn->kn_filter) {
2106 	case EVFILT_READ:
2107 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2108 		if (sce == NULL)
2109 			return EINVAL;
2110 
2111 		klist = &sce->rsel.sel_klist;
2112 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2113 		case UE_INTERRUPT:
2114 			kn->kn_fop = &ugenread_intr_filtops;
2115 			break;
2116 		case UE_ISOCHRONOUS:
2117 			kn->kn_fop = &ugenread_isoc_filtops;
2118 			break;
2119 		case UE_BULK:
2120 			kn->kn_fop = &ugenread_bulk_filtops;
2121 			break;
2122 		default:
2123 			return EINVAL;
2124 		}
2125 		break;
2126 
2127 	case EVFILT_WRITE:
2128 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2129 		if (sce == NULL)
2130 			return EINVAL;
2131 
2132 		klist = &sce->rsel.sel_klist;
2133 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2134 		case UE_INTERRUPT:
2135 		case UE_ISOCHRONOUS:
2136 			/* XXX poll doesn't support this */
2137 			return EINVAL;
2138 
2139 		case UE_BULK:
2140 			kn->kn_fop = &ugenwrite_bulk_filtops;
2141 			break;
2142 		default:
2143 			return EINVAL;
2144 		}
2145 		break;
2146 
2147 	default:
2148 		return EINVAL;
2149 	}
2150 
2151 	kn->kn_hook = sce;
2152 
2153 	mutex_enter(&sc->sc_lock);
2154 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2155 	mutex_exit(&sc->sc_lock);
2156 
2157 	return 0;
2158 }
2159