xref: /netbsd-src/sys/dev/usb/ugen.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: ugen.c,v 1.126 2014/09/20 08:45:23 gson Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.126 2014/09/20 08:45:23 gson Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 
61 #include <dev/usb/usb.h>
62 #include <dev/usb/usbdi.h>
63 #include <dev/usb/usbdi_util.h>
64 
65 #ifdef UGEN_DEBUG
66 #define DPRINTF(x)	if (ugendebug) printf x
67 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
68 int	ugendebug = 0;
69 #else
70 #define DPRINTF(x)
71 #define DPRINTFN(n,x)
72 #endif
73 
74 #define	UGEN_CHUNK	128	/* chunk size for read */
75 #define	UGEN_IBSIZE	1020	/* buffer size */
76 #define	UGEN_BBSIZE	1024
77 
78 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
79 #define UGEN_NISORFRMS	8	/* number of transactions per req */
80 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
81 
82 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
83 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
84 
85 struct isoreq {
86 	struct ugen_endpoint *sce;
87 	usbd_xfer_handle xfer;
88 	void *dmabuf;
89 	u_int16_t sizes[UGEN_NISORFRMS];
90 };
91 
92 struct ugen_endpoint {
93 	struct ugen_softc *sc;
94 	usb_endpoint_descriptor_t *edesc;
95 	usbd_interface_handle iface;
96 	int state;
97 #define	UGEN_ASLP	0x02	/* waiting for data */
98 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
99 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
100 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
102 	usbd_pipe_handle pipeh;
103 	struct clist q;
104 	u_char *ibuf;		/* start of buffer (circular for isoc) */
105 	u_char *fill;		/* location for input (isoc) */
106 	u_char *limit;		/* end of circular buffer (isoc) */
107 	u_char *cur;		/* current read location (isoc) */
108 	u_int32_t timeout;
109 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 	u_int32_t ra_wb_used;	 /* how much is in buffer */
112 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 	usbd_xfer_handle ra_wb_xfer;
114 	struct isoreq isoreqs[UGEN_NISOREQS];
115 	/* Keep these last; we don't overwrite them in ugen_set_config() */
116 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
117 	struct selinfo rsel;
118 	kcondvar_t cv;
119 };
120 
121 struct ugen_softc {
122 	device_t sc_dev;		/* base device */
123 	usbd_device_handle sc_udev;
124 
125 	kmutex_t		sc_lock;
126 	kcondvar_t		sc_detach_cv;
127 
128 	char sc_is_open[USB_MAX_ENDPOINTS];
129 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
130 #define OUT 0
131 #define IN  1
132 
133 	int sc_refcnt;
134 	char sc_buffer[UGEN_BBSIZE];
135 	u_char sc_dying;
136 };
137 
138 dev_type_open(ugenopen);
139 dev_type_close(ugenclose);
140 dev_type_read(ugenread);
141 dev_type_write(ugenwrite);
142 dev_type_ioctl(ugenioctl);
143 dev_type_poll(ugenpoll);
144 dev_type_kqfilter(ugenkqfilter);
145 
146 const struct cdevsw ugen_cdevsw = {
147 	.d_open = ugenopen,
148 	.d_close = ugenclose,
149 	.d_read = ugenread,
150 	.d_write = ugenwrite,
151 	.d_ioctl = ugenioctl,
152 	.d_stop = nostop,
153 	.d_tty = notty,
154 	.d_poll = ugenpoll,
155 	.d_mmap = nommap,
156 	.d_kqfilter = ugenkqfilter,
157 	.d_discard = nodiscard,
158 	.d_flag = D_OTHER,
159 };
160 
161 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
162 		     usbd_status status);
163 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
164 			    usbd_status status);
165 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
166 			     usbd_status status);
167 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
168 			     usbd_status status);
169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
172 			 void *, int, struct lwp *);
173 Static int ugen_set_config(struct ugen_softc *sc, int configno);
174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
175 					       int index, int *lenp);
176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
177 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
178 
179 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
180 #define UGENENDPOINT(n) (minor(n) & 0xf)
181 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
182 
183 int             ugen_match(device_t, cfdata_t, void *);
184 void            ugen_attach(device_t, device_t, void *);
185 int             ugen_detach(device_t, int);
186 int             ugen_activate(device_t, enum devact);
187 extern struct cfdriver ugen_cd;
188 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
189 
190 /* toggle to control attach priority. -1 means "let autoconf decide" */
191 int ugen_override = -1;
192 
193 int
194 ugen_match(device_t parent, cfdata_t match, void *aux)
195 {
196 	struct usb_attach_arg *uaa = aux;
197 	int override;
198 
199 	if (ugen_override != -1)
200 		override = ugen_override;
201 	else
202 		override = match->cf_flags & 1;
203 
204 	if (override)
205 		return (UMATCH_HIGHEST);
206 	else if (uaa->usegeneric)
207 		return (UMATCH_GENERIC);
208 	else
209 		return (UMATCH_NONE);
210 }
211 
212 void
213 ugen_attach(device_t parent, device_t self, void *aux)
214 {
215 	struct ugen_softc *sc = device_private(self);
216 	struct usb_attach_arg *uaa = aux;
217 	usbd_device_handle udev;
218 	char *devinfop;
219 	usbd_status err;
220 	int i, dir, conf;
221 
222 	aprint_naive("\n");
223 	aprint_normal("\n");
224 
225 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
226 	cv_init(&sc->sc_detach_cv, "ugendet");
227 
228 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
229 	aprint_normal_dev(self, "%s\n", devinfop);
230 	usbd_devinfo_free(devinfop);
231 
232 	sc->sc_dev = self;
233 	sc->sc_udev = udev = uaa->device;
234 
235 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
236 		for (dir = OUT; dir <= IN; dir++) {
237 			struct ugen_endpoint *sce;
238 
239 			sce = &sc->sc_endpoints[i][dir];
240 			selinit(&sce->rsel);
241 			cv_init(&sce->cv, "ugensce");
242 		}
243 	}
244 
245 	/* First set configuration index 0, the default one for ugen. */
246 	err = usbd_set_config_index(udev, 0, 0);
247 	if (err) {
248 		aprint_error_dev(self,
249 		    "setting configuration index 0 failed\n");
250 		sc->sc_dying = 1;
251 		return;
252 	}
253 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
254 
255 	/* Set up all the local state for this configuration. */
256 	err = ugen_set_config(sc, conf);
257 	if (err) {
258 		aprint_error_dev(self, "setting configuration %d failed\n",
259 		    conf);
260 		sc->sc_dying = 1;
261 		return;
262 	}
263 
264 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
265 			   sc->sc_dev);
266 
267 	if (!pmf_device_register(self, NULL, NULL))
268 		aprint_error_dev(self, "couldn't establish power handler\n");
269 
270 	return;
271 }
272 
273 Static int
274 ugen_set_config(struct ugen_softc *sc, int configno)
275 {
276 	usbd_device_handle dev = sc->sc_udev;
277 	usb_config_descriptor_t *cdesc;
278 	usbd_interface_handle iface;
279 	usb_endpoint_descriptor_t *ed;
280 	struct ugen_endpoint *sce;
281 	u_int8_t niface, nendpt;
282 	int ifaceno, endptno, endpt;
283 	usbd_status err;
284 	int dir, i;
285 
286 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
287 		    device_xname(sc->sc_dev), configno, sc));
288 
289 	/*
290 	 * We start at 1, not 0, because we don't care whether the
291 	 * control endpoint is open or not. It is always present.
292 	 */
293 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
294 		if (sc->sc_is_open[endptno]) {
295 			DPRINTFN(1,
296 			     ("ugen_set_config: %s - endpoint %d is open\n",
297 			      device_xname(sc->sc_dev), endptno));
298 			return (USBD_IN_USE);
299 		}
300 
301 	/* Avoid setting the current value. */
302 	cdesc = usbd_get_config_descriptor(dev);
303 	if (!cdesc || cdesc->bConfigurationValue != configno) {
304 		err = usbd_set_config_no(dev, configno, 1);
305 		if (err)
306 			return (err);
307 	}
308 
309 	err = usbd_interface_count(dev, &niface);
310 	if (err)
311 		return (err);
312 
313 	/* Clear out the old info, but leave the selinfo and cv initialised. */
314 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
315 		for (dir = OUT; dir <= IN; dir++) {
316 			sce = &sc->sc_endpoints[i][dir];
317 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
318 		}
319 	}
320 
321 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
322 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
323 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
324 		if (err)
325 			return (err);
326 		err = usbd_endpoint_count(iface, &nendpt);
327 		if (err)
328 			return (err);
329 		for (endptno = 0; endptno < nendpt; endptno++) {
330 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
331 			KASSERT(ed != NULL);
332 			endpt = ed->bEndpointAddress;
333 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
334 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
335 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
336 				    "(%d,%d), sce=%p\n",
337 				    endptno, endpt, UE_GET_ADDR(endpt),
338 				    UE_GET_DIR(endpt), sce));
339 			sce->sc = sc;
340 			sce->edesc = ed;
341 			sce->iface = iface;
342 		}
343 	}
344 	return (USBD_NORMAL_COMPLETION);
345 }
346 
347 int
348 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
349 {
350 	struct ugen_softc *sc;
351 	int unit = UGENUNIT(dev);
352 	int endpt = UGENENDPOINT(dev);
353 	usb_endpoint_descriptor_t *edesc;
354 	struct ugen_endpoint *sce;
355 	int dir, isize;
356 	usbd_status err;
357 	usbd_xfer_handle xfer;
358 	void *tbuf;
359 	int i, j;
360 
361 	sc = device_lookup_private(&ugen_cd, unit);
362 	if (sc == NULL)
363 		return ENXIO;
364 
365 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
366 		     flag, mode, unit, endpt));
367 
368 	if (sc == NULL || sc->sc_dying)
369 		return (ENXIO);
370 
371 	/* The control endpoint allows multiple opens. */
372 	if (endpt == USB_CONTROL_ENDPOINT) {
373 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
374 		return (0);
375 	}
376 
377 	if (sc->sc_is_open[endpt])
378 		return (EBUSY);
379 
380 	/* Make sure there are pipes for all directions. */
381 	for (dir = OUT; dir <= IN; dir++) {
382 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
383 			sce = &sc->sc_endpoints[endpt][dir];
384 			if (sce == 0 || sce->edesc == 0)
385 				return (ENXIO);
386 		}
387 	}
388 
389 	/* Actually open the pipes. */
390 	/* XXX Should back out properly if it fails. */
391 	for (dir = OUT; dir <= IN; dir++) {
392 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
393 			continue;
394 		sce = &sc->sc_endpoints[endpt][dir];
395 		sce->state = 0;
396 		sce->timeout = USBD_NO_TIMEOUT;
397 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
398 			     sc, endpt, dir, sce));
399 		edesc = sce->edesc;
400 		switch (edesc->bmAttributes & UE_XFERTYPE) {
401 		case UE_INTERRUPT:
402 			if (dir == OUT) {
403 				err = usbd_open_pipe(sce->iface,
404 				    edesc->bEndpointAddress, 0, &sce->pipeh);
405 				if (err)
406 					return (EIO);
407 				break;
408 			}
409 			isize = UGETW(edesc->wMaxPacketSize);
410 			if (isize == 0)	/* shouldn't happen */
411 				return (EINVAL);
412 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
413 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
414 				     endpt, isize));
415 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
416 				free(sce->ibuf, M_USBDEV);
417 				sce->ibuf = NULL;
418 				return (ENOMEM);
419 			}
420 			err = usbd_open_pipe_intr(sce->iface,
421 				  edesc->bEndpointAddress,
422 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
423 				  sce->ibuf, isize, ugenintr,
424 				  USBD_DEFAULT_INTERVAL);
425 			if (err) {
426 				clfree(&sce->q);
427 				free(sce->ibuf, M_USBDEV);
428 				sce->ibuf = NULL;
429 				return (EIO);
430 			}
431 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
432 			break;
433 		case UE_BULK:
434 			err = usbd_open_pipe(sce->iface,
435 				  edesc->bEndpointAddress, 0, &sce->pipeh);
436 			if (err)
437 				return (EIO);
438 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
439 			/*
440 			 * Use request size for non-RA/WB transfers
441 			 * as the default.
442 			 */
443 			sce->ra_wb_reqsize = UGEN_BBSIZE;
444 			break;
445 		case UE_ISOCHRONOUS:
446 			if (dir == OUT)
447 				return (EINVAL);
448 			isize = UGETW(edesc->wMaxPacketSize);
449 			if (isize == 0)	/* shouldn't happen */
450 				return (EINVAL);
451 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
452 				M_USBDEV, M_WAITOK);
453 			sce->cur = sce->fill = sce->ibuf;
454 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
455 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
456 				     endpt, isize));
457 			err = usbd_open_pipe(sce->iface,
458 				  edesc->bEndpointAddress, 0, &sce->pipeh);
459 			if (err) {
460 				free(sce->ibuf, M_USBDEV);
461 				sce->ibuf = NULL;
462 				return (EIO);
463 			}
464 			for(i = 0; i < UGEN_NISOREQS; ++i) {
465 				sce->isoreqs[i].sce = sce;
466 				xfer = usbd_alloc_xfer(sc->sc_udev);
467 				if (xfer == 0)
468 					goto bad;
469 				sce->isoreqs[i].xfer = xfer;
470 				tbuf = usbd_alloc_buffer
471 					(xfer, isize * UGEN_NISORFRMS);
472 				if (tbuf == 0) {
473 					i++;
474 					goto bad;
475 				}
476 				sce->isoreqs[i].dmabuf = tbuf;
477 				for(j = 0; j < UGEN_NISORFRMS; ++j)
478 					sce->isoreqs[i].sizes[j] = isize;
479 				usbd_setup_isoc_xfer
480 					(xfer, sce->pipeh, &sce->isoreqs[i],
481 					 sce->isoreqs[i].sizes,
482 					 UGEN_NISORFRMS, USBD_NO_COPY,
483 					 ugen_isoc_rintr);
484 				(void)usbd_transfer(xfer);
485 			}
486 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
487 			break;
488 		bad:
489 			while (--i >= 0) /* implicit buffer free */
490 				usbd_free_xfer(sce->isoreqs[i].xfer);
491 			usbd_close_pipe(sce->pipeh);
492 			sce->pipeh = NULL;
493 			free(sce->ibuf, M_USBDEV);
494 			sce->ibuf = NULL;
495 			return (ENOMEM);
496 		case UE_CONTROL:
497 			sce->timeout = USBD_DEFAULT_TIMEOUT;
498 			return (EINVAL);
499 		}
500 	}
501 	sc->sc_is_open[endpt] = 1;
502 	return (0);
503 }
504 
505 int
506 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
507 {
508 	int endpt = UGENENDPOINT(dev);
509 	struct ugen_softc *sc;
510 	struct ugen_endpoint *sce;
511 	int dir;
512 	int i;
513 
514 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
515 	if (sc == NULL)
516 		return ENXIO;
517 
518 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
519 		     flag, mode, UGENUNIT(dev), endpt));
520 
521 #ifdef DIAGNOSTIC
522 	if (!sc->sc_is_open[endpt]) {
523 		printf("ugenclose: not open\n");
524 		return (EINVAL);
525 	}
526 #endif
527 
528 	if (endpt == USB_CONTROL_ENDPOINT) {
529 		DPRINTFN(5, ("ugenclose: close control\n"));
530 		sc->sc_is_open[endpt] = 0;
531 		return (0);
532 	}
533 
534 	for (dir = OUT; dir <= IN; dir++) {
535 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
536 			continue;
537 		sce = &sc->sc_endpoints[endpt][dir];
538 		if (sce == NULL || sce->pipeh == NULL)
539 			continue;
540 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
541 			     endpt, dir, sce));
542 
543 		usbd_abort_pipe(sce->pipeh);
544 		usbd_close_pipe(sce->pipeh);
545 		sce->pipeh = NULL;
546 
547 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
548 		case UE_INTERRUPT:
549 			ndflush(&sce->q, sce->q.c_cc);
550 			clfree(&sce->q);
551 			break;
552 		case UE_ISOCHRONOUS:
553 			for (i = 0; i < UGEN_NISOREQS; ++i)
554 				usbd_free_xfer(sce->isoreqs[i].xfer);
555 			break;
556 		case UE_BULK:
557 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
558 				/* ibuf freed below */
559 				usbd_free_xfer(sce->ra_wb_xfer);
560 			break;
561 		default:
562 			break;
563 		}
564 
565 		if (sce->ibuf != NULL) {
566 			free(sce->ibuf, M_USBDEV);
567 			sce->ibuf = NULL;
568 		}
569 	}
570 	sc->sc_is_open[endpt] = 0;
571 
572 	return (0);
573 }
574 
575 Static int
576 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
577 {
578 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
579 	u_int32_t n, tn;
580 	usbd_xfer_handle xfer;
581 	usbd_status err;
582 	int error = 0;
583 
584 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
585 
586 	if (sc->sc_dying)
587 		return (EIO);
588 
589 	if (endpt == USB_CONTROL_ENDPOINT)
590 		return (ENODEV);
591 
592 #ifdef DIAGNOSTIC
593 	if (sce->edesc == NULL) {
594 		printf("ugenread: no edesc\n");
595 		return (EIO);
596 	}
597 	if (sce->pipeh == NULL) {
598 		printf("ugenread: no pipe\n");
599 		return (EIO);
600 	}
601 #endif
602 
603 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
604 	case UE_INTERRUPT:
605 		/* Block until activity occurred. */
606 		mutex_enter(&sc->sc_lock);
607 		while (sce->q.c_cc == 0) {
608 			if (flag & IO_NDELAY) {
609 				mutex_exit(&sc->sc_lock);
610 				return (EWOULDBLOCK);
611 			}
612 			sce->state |= UGEN_ASLP;
613 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
614 			/* "ugenri" */
615 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
616 			    mstohz(sce->timeout));
617 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
618 			if (sc->sc_dying)
619 				error = EIO;
620 			if (error) {
621 				sce->state &= ~UGEN_ASLP;
622 				break;
623 			}
624 		}
625 		mutex_exit(&sc->sc_lock);
626 
627 		/* Transfer as many chunks as possible. */
628 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
629 			n = min(sce->q.c_cc, uio->uio_resid);
630 			if (n > sizeof(sc->sc_buffer))
631 				n = sizeof(sc->sc_buffer);
632 
633 			/* Remove a small chunk from the input queue. */
634 			q_to_b(&sce->q, sc->sc_buffer, n);
635 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
636 
637 			/* Copy the data to the user process. */
638 			error = uiomove(sc->sc_buffer, n, uio);
639 			if (error)
640 				break;
641 		}
642 		break;
643 	case UE_BULK:
644 		if (sce->state & UGEN_BULK_RA) {
645 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
646 				     uio->uio_resid, sce->ra_wb_used));
647 			xfer = sce->ra_wb_xfer;
648 
649 			mutex_enter(&sc->sc_lock);
650 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
651 				mutex_exit(&sc->sc_lock);
652 				return (EWOULDBLOCK);
653 			}
654 			while (uio->uio_resid > 0 && !error) {
655 				while (sce->ra_wb_used == 0) {
656 					sce->state |= UGEN_ASLP;
657 					DPRINTFN(5,
658 						 ("ugenread: sleep on %p\n",
659 						  sce));
660 					/* "ugenrb" */
661 					error = cv_timedwait_sig(&sce->cv,
662 					    &sc->sc_lock, mstohz(sce->timeout));
663 					DPRINTFN(5,
664 						 ("ugenread: woke, error=%d\n",
665 						  error));
666 					if (sc->sc_dying)
667 						error = EIO;
668 					if (error) {
669 						sce->state &= ~UGEN_ASLP;
670 						break;
671 					}
672 				}
673 
674 				/* Copy data to the process. */
675 				while (uio->uio_resid > 0
676 				       && sce->ra_wb_used > 0) {
677 					n = min(uio->uio_resid,
678 						sce->ra_wb_used);
679 					n = min(n, sce->limit - sce->cur);
680 					error = uiomove(sce->cur, n, uio);
681 					if (error)
682 						break;
683 					sce->cur += n;
684 					sce->ra_wb_used -= n;
685 					if (sce->cur == sce->limit)
686 						sce->cur = sce->ibuf;
687 				}
688 
689 				/*
690 				 * If the transfers stopped because the
691 				 * buffer was full, restart them.
692 				 */
693 				if (sce->state & UGEN_RA_WB_STOP &&
694 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
695 					n = (sce->limit - sce->ibuf)
696 					    - sce->ra_wb_used;
697 					usbd_setup_xfer(xfer,
698 					    sce->pipeh, sce, NULL,
699 					    min(n, sce->ra_wb_xferlen),
700 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
701 					    ugen_bulkra_intr);
702 					sce->state &= ~UGEN_RA_WB_STOP;
703 					err = usbd_transfer(xfer);
704 					if (err != USBD_IN_PROGRESS)
705 						/*
706 						 * The transfer has not been
707 						 * queued.  Setting STOP
708 						 * will make us try
709 						 * again at the next read.
710 						 */
711 						sce->state |= UGEN_RA_WB_STOP;
712 				}
713 			}
714 			mutex_exit(&sc->sc_lock);
715 			break;
716 		}
717 		xfer = usbd_alloc_xfer(sc->sc_udev);
718 		if (xfer == 0)
719 			return (ENOMEM);
720 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
721 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
722 			tn = n;
723 			err = usbd_bulk_transfer(
724 				  xfer, sce->pipeh,
725 				  sce->state & UGEN_SHORT_OK ?
726 				      USBD_SHORT_XFER_OK : 0,
727 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
728 			if (err) {
729 				if (err == USBD_INTERRUPTED)
730 					error = EINTR;
731 				else if (err == USBD_TIMEOUT)
732 					error = ETIMEDOUT;
733 				else
734 					error = EIO;
735 				break;
736 			}
737 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
738 			error = uiomove(sc->sc_buffer, tn, uio);
739 			if (error || tn < n)
740 				break;
741 		}
742 		usbd_free_xfer(xfer);
743 		break;
744 	case UE_ISOCHRONOUS:
745 		mutex_enter(&sc->sc_lock);
746 		while (sce->cur == sce->fill) {
747 			if (flag & IO_NDELAY) {
748 				mutex_exit(&sc->sc_lock);
749 				return (EWOULDBLOCK);
750 			}
751 			sce->state |= UGEN_ASLP;
752 			/* "ugenri" */
753 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
754 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
755 			    mstohz(sce->timeout));
756 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
757 			if (sc->sc_dying)
758 				error = EIO;
759 			if (error) {
760 				sce->state &= ~UGEN_ASLP;
761 				break;
762 			}
763 		}
764 
765 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
766 			if(sce->fill > sce->cur)
767 				n = min(sce->fill - sce->cur, uio->uio_resid);
768 			else
769 				n = min(sce->limit - sce->cur, uio->uio_resid);
770 
771 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
772 
773 			/* Copy the data to the user process. */
774 			error = uiomove(sce->cur, n, uio);
775 			if (error)
776 				break;
777 			sce->cur += n;
778 			if (sce->cur >= sce->limit)
779 				sce->cur = sce->ibuf;
780 		}
781 		mutex_exit(&sc->sc_lock);
782 		break;
783 
784 
785 	default:
786 		return (ENXIO);
787 	}
788 	return (error);
789 }
790 
791 int
792 ugenread(dev_t dev, struct uio *uio, int flag)
793 {
794 	int endpt = UGENENDPOINT(dev);
795 	struct ugen_softc *sc;
796 	int error;
797 
798 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
799 	if (sc == NULL)
800 		return ENXIO;
801 
802 	mutex_enter(&sc->sc_lock);
803 	sc->sc_refcnt++;
804 	mutex_exit(&sc->sc_lock);
805 
806 	error = ugen_do_read(sc, endpt, uio, flag);
807 
808 	mutex_enter(&sc->sc_lock);
809 	if (--sc->sc_refcnt < 0)
810 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
811 	mutex_exit(&sc->sc_lock);
812 
813 	return (error);
814 }
815 
816 Static int
817 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
818 	int flag)
819 {
820 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
821 	u_int32_t n;
822 	int error = 0;
823 	u_int32_t tn;
824 	char *dbuf;
825 	usbd_xfer_handle xfer;
826 	usbd_status err;
827 
828 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
829 
830 	if (sc->sc_dying)
831 		return (EIO);
832 
833 	if (endpt == USB_CONTROL_ENDPOINT)
834 		return (ENODEV);
835 
836 #ifdef DIAGNOSTIC
837 	if (sce->edesc == NULL) {
838 		printf("ugenwrite: no edesc\n");
839 		return (EIO);
840 	}
841 	if (sce->pipeh == NULL) {
842 		printf("ugenwrite: no pipe\n");
843 		return (EIO);
844 	}
845 #endif
846 
847 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
848 	case UE_BULK:
849 		if (sce->state & UGEN_BULK_WB) {
850 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
851 				     uio->uio_resid, sce->ra_wb_used));
852 			xfer = sce->ra_wb_xfer;
853 
854 			mutex_enter(&sc->sc_lock);
855 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
856 			    flag & IO_NDELAY) {
857 				mutex_exit(&sc->sc_lock);
858 				return (EWOULDBLOCK);
859 			}
860 			while (uio->uio_resid > 0 && !error) {
861 				while (sce->ra_wb_used ==
862 				       sce->limit - sce->ibuf) {
863 					sce->state |= UGEN_ASLP;
864 					DPRINTFN(5,
865 						 ("ugenwrite: sleep on %p\n",
866 						  sce));
867 					/* "ugenwb" */
868 					error = cv_timedwait_sig(&sce->cv,
869 					    &sc->sc_lock, mstohz(sce->timeout));
870 					DPRINTFN(5,
871 						 ("ugenwrite: woke, error=%d\n",
872 						  error));
873 					if (sc->sc_dying)
874 						error = EIO;
875 					if (error) {
876 						sce->state &= ~UGEN_ASLP;
877 						break;
878 					}
879 				}
880 
881 				/* Copy data from the process. */
882 				while (uio->uio_resid > 0 &&
883 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
884 					n = min(uio->uio_resid,
885 						(sce->limit - sce->ibuf)
886 						 - sce->ra_wb_used);
887 					n = min(n, sce->limit - sce->fill);
888 					error = uiomove(sce->fill, n, uio);
889 					if (error)
890 						break;
891 					sce->fill += n;
892 					sce->ra_wb_used += n;
893 					if (sce->fill == sce->limit)
894 						sce->fill = sce->ibuf;
895 				}
896 
897 				/*
898 				 * If the transfers stopped because the
899 				 * buffer was empty, restart them.
900 				 */
901 				if (sce->state & UGEN_RA_WB_STOP &&
902 				    sce->ra_wb_used > 0) {
903 					dbuf = (char *)usbd_get_buffer(xfer);
904 					n = min(sce->ra_wb_used,
905 						sce->ra_wb_xferlen);
906 					tn = min(n, sce->limit - sce->cur);
907 					memcpy(dbuf, sce->cur, tn);
908 					dbuf += tn;
909 					if (n - tn > 0)
910 						memcpy(dbuf, sce->ibuf,
911 						       n - tn);
912 					usbd_setup_xfer(xfer,
913 					    sce->pipeh, sce, NULL, n,
914 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
915 					    ugen_bulkwb_intr);
916 					sce->state &= ~UGEN_RA_WB_STOP;
917 					err = usbd_transfer(xfer);
918 					if (err != USBD_IN_PROGRESS)
919 						/*
920 						 * The transfer has not been
921 						 * queued.  Setting STOP
922 						 * will make us try again
923 						 * at the next read.
924 						 */
925 						sce->state |= UGEN_RA_WB_STOP;
926 				}
927 			}
928 			mutex_exit(&sc->sc_lock);
929 			break;
930 		}
931 		xfer = usbd_alloc_xfer(sc->sc_udev);
932 		if (xfer == 0)
933 			return (EIO);
934 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
935 			error = uiomove(sc->sc_buffer, n, uio);
936 			if (error)
937 				break;
938 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
939 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
940 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
941 			if (err) {
942 				if (err == USBD_INTERRUPTED)
943 					error = EINTR;
944 				else if (err == USBD_TIMEOUT)
945 					error = ETIMEDOUT;
946 				else
947 					error = EIO;
948 				break;
949 			}
950 		}
951 		usbd_free_xfer(xfer);
952 		break;
953 	case UE_INTERRUPT:
954 		xfer = usbd_alloc_xfer(sc->sc_udev);
955 		if (xfer == 0)
956 			return (EIO);
957 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
958 		    uio->uio_resid)) != 0) {
959 			error = uiomove(sc->sc_buffer, n, uio);
960 			if (error)
961 				break;
962 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
963 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
964 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
965 			if (err) {
966 				if (err == USBD_INTERRUPTED)
967 					error = EINTR;
968 				else if (err == USBD_TIMEOUT)
969 					error = ETIMEDOUT;
970 				else
971 					error = EIO;
972 				break;
973 			}
974 		}
975 		usbd_free_xfer(xfer);
976 		break;
977 	default:
978 		return (ENXIO);
979 	}
980 	return (error);
981 }
982 
983 int
984 ugenwrite(dev_t dev, struct uio *uio, int flag)
985 {
986 	int endpt = UGENENDPOINT(dev);
987 	struct ugen_softc *sc;
988 	int error;
989 
990 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
991 	if (sc == NULL)
992 		return ENXIO;
993 
994 	mutex_enter(&sc->sc_lock);
995 	sc->sc_refcnt++;
996 	mutex_exit(&sc->sc_lock);
997 
998 	error = ugen_do_write(sc, endpt, uio, flag);
999 
1000 	mutex_enter(&sc->sc_lock);
1001 	if (--sc->sc_refcnt < 0)
1002 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1003 	mutex_exit(&sc->sc_lock);
1004 
1005 	return (error);
1006 }
1007 
1008 int
1009 ugen_activate(device_t self, enum devact act)
1010 {
1011 	struct ugen_softc *sc = device_private(self);
1012 
1013 	switch (act) {
1014 	case DVACT_DEACTIVATE:
1015 		sc->sc_dying = 1;
1016 		return 0;
1017 	default:
1018 		return EOPNOTSUPP;
1019 	}
1020 }
1021 
1022 int
1023 ugen_detach(device_t self, int flags)
1024 {
1025 	struct ugen_softc *sc = device_private(self);
1026 	struct ugen_endpoint *sce;
1027 	int i, dir;
1028 	int maj, mn;
1029 
1030 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1031 
1032 	sc->sc_dying = 1;
1033 	pmf_device_deregister(self);
1034 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1035 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1036 		for (dir = OUT; dir <= IN; dir++) {
1037 			sce = &sc->sc_endpoints[i][dir];
1038 			if (sce && sce->pipeh)
1039 				usbd_abort_pipe(sce->pipeh);
1040 		}
1041 	}
1042 
1043 	mutex_enter(&sc->sc_lock);
1044 	if (--sc->sc_refcnt >= 0) {
1045 		/* Wake everyone */
1046 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1047 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1048 		/* Wait for processes to go away. */
1049 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1050 	}
1051 	mutex_exit(&sc->sc_lock);
1052 
1053 	/* locate the major number */
1054 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1055 
1056 	/* Nuke the vnodes for any open instances (calls close). */
1057 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1058 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1059 
1060 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1061 			   sc->sc_dev);
1062 
1063 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1064 		for (dir = OUT; dir <= IN; dir++) {
1065 			sce = &sc->sc_endpoints[i][dir];
1066 			seldestroy(&sce->rsel);
1067 			cv_destroy(&sce->cv);
1068 		}
1069 	}
1070 
1071 	cv_destroy(&sc->sc_detach_cv);
1072 	mutex_destroy(&sc->sc_lock);
1073 
1074 	return (0);
1075 }
1076 
1077 Static void
1078 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1079 {
1080 	struct ugen_endpoint *sce = addr;
1081 	struct ugen_softc *sc = sce->sc;
1082 	u_int32_t count;
1083 	u_char *ibuf;
1084 
1085 	if (status == USBD_CANCELLED)
1086 		return;
1087 
1088 	if (status != USBD_NORMAL_COMPLETION) {
1089 		DPRINTF(("ugenintr: status=%d\n", status));
1090 		if (status == USBD_STALLED)
1091 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1092 		return;
1093 	}
1094 
1095 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1096 	ibuf = sce->ibuf;
1097 
1098 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1099 		     xfer, status, count));
1100 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1101 		     ibuf[0], ibuf[1], ibuf[2]));
1102 
1103 	(void)b_to_q(ibuf, count, &sce->q);
1104 
1105 	mutex_enter(&sc->sc_lock);
1106 	if (sce->state & UGEN_ASLP) {
1107 		sce->state &= ~UGEN_ASLP;
1108 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1109 		cv_signal(&sce->cv);
1110 	}
1111 	mutex_exit(&sc->sc_lock);
1112 	selnotify(&sce->rsel, 0, 0);
1113 }
1114 
1115 Static void
1116 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1117 		usbd_status status)
1118 {
1119 	struct isoreq *req = addr;
1120 	struct ugen_endpoint *sce = req->sce;
1121 	struct ugen_softc *sc = sce->sc;
1122 	u_int32_t count, n;
1123 	int i, isize;
1124 
1125 	/* Return if we are aborting. */
1126 	if (status == USBD_CANCELLED)
1127 		return;
1128 
1129 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1130 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1131 	    (long)(req - sce->isoreqs), count));
1132 
1133 	/* throw away oldest input if the buffer is full */
1134 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1135 		sce->cur += count;
1136 		if(sce->cur >= sce->limit)
1137 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1138 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1139 			     count));
1140 	}
1141 
1142 	isize = UGETW(sce->edesc->wMaxPacketSize);
1143 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1144 		u_int32_t actlen = req->sizes[i];
1145 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1146 
1147 		/* copy data to buffer */
1148 		while (actlen > 0) {
1149 			n = min(actlen, sce->limit - sce->fill);
1150 			memcpy(sce->fill, tbuf, n);
1151 
1152 			tbuf += n;
1153 			actlen -= n;
1154 			sce->fill += n;
1155 			if(sce->fill == sce->limit)
1156 				sce->fill = sce->ibuf;
1157 		}
1158 
1159 		/* setup size for next transfer */
1160 		req->sizes[i] = isize;
1161 	}
1162 
1163 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1164 			     USBD_NO_COPY, ugen_isoc_rintr);
1165 	(void)usbd_transfer(xfer);
1166 
1167 	mutex_enter(&sc->sc_lock);
1168 	if (sce->state & UGEN_ASLP) {
1169 		sce->state &= ~UGEN_ASLP;
1170 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1171 		cv_signal(&sce->cv);
1172 	}
1173 	mutex_exit(&sc->sc_lock);
1174 	selnotify(&sce->rsel, 0, 0);
1175 }
1176 
1177 Static void
1178 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1179 		 usbd_status status)
1180 {
1181 	struct ugen_endpoint *sce = addr;
1182 	struct ugen_softc *sc = sce->sc;
1183 	u_int32_t count, n;
1184 	char const *tbuf;
1185 	usbd_status err;
1186 
1187 	/* Return if we are aborting. */
1188 	if (status == USBD_CANCELLED)
1189 		return;
1190 
1191 	if (status != USBD_NORMAL_COMPLETION) {
1192 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1193 		sce->state |= UGEN_RA_WB_STOP;
1194 		if (status == USBD_STALLED)
1195 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1196 		return;
1197 	}
1198 
1199 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1200 
1201 	/* Keep track of how much is in the buffer. */
1202 	sce->ra_wb_used += count;
1203 
1204 	/* Copy data to buffer. */
1205 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1206 	n = min(count, sce->limit - sce->fill);
1207 	memcpy(sce->fill, tbuf, n);
1208 	tbuf += n;
1209 	count -= n;
1210 	sce->fill += n;
1211 	if (sce->fill == sce->limit)
1212 		sce->fill = sce->ibuf;
1213 	if (count > 0) {
1214 		memcpy(sce->fill, tbuf, count);
1215 		sce->fill += count;
1216 	}
1217 
1218 	/* Set up the next request if necessary. */
1219 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1220 	if (n > 0) {
1221 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1222 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1223 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1224 		err = usbd_transfer(xfer);
1225 		if (err != USBD_IN_PROGRESS) {
1226 			printf("usbd_bulkra_intr: error=%d\n", err);
1227 			/*
1228 			 * The transfer has not been queued.  Setting STOP
1229 			 * will make us try again at the next read.
1230 			 */
1231 			sce->state |= UGEN_RA_WB_STOP;
1232 		}
1233 	}
1234 	else
1235 		sce->state |= UGEN_RA_WB_STOP;
1236 
1237 	mutex_enter(&sc->sc_lock);
1238 	if (sce->state & UGEN_ASLP) {
1239 		sce->state &= ~UGEN_ASLP;
1240 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1241 		cv_signal(&sce->cv);
1242 	}
1243 	mutex_exit(&sc->sc_lock);
1244 	selnotify(&sce->rsel, 0, 0);
1245 }
1246 
1247 Static void
1248 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1249 		 usbd_status status)
1250 {
1251 	struct ugen_endpoint *sce = addr;
1252 	struct ugen_softc *sc = sce->sc;
1253 	u_int32_t count, n;
1254 	char *tbuf;
1255 	usbd_status err;
1256 
1257 	/* Return if we are aborting. */
1258 	if (status == USBD_CANCELLED)
1259 		return;
1260 
1261 	if (status != USBD_NORMAL_COMPLETION) {
1262 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1263 		sce->state |= UGEN_RA_WB_STOP;
1264 		if (status == USBD_STALLED)
1265 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1266 		return;
1267 	}
1268 
1269 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1270 
1271 	/* Keep track of how much is in the buffer. */
1272 	sce->ra_wb_used -= count;
1273 
1274 	/* Update buffer pointers. */
1275 	sce->cur += count;
1276 	if (sce->cur >= sce->limit)
1277 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1278 
1279 	/* Set up next request if necessary. */
1280 	if (sce->ra_wb_used > 0) {
1281 		/* copy data from buffer */
1282 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1283 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1284 		n = min(count, sce->limit - sce->cur);
1285 		memcpy(tbuf, sce->cur, n);
1286 		tbuf += n;
1287 		if (count - n > 0)
1288 			memcpy(tbuf, sce->ibuf, count - n);
1289 
1290 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1291 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1292 		err = usbd_transfer(xfer);
1293 		if (err != USBD_IN_PROGRESS) {
1294 			printf("usbd_bulkwb_intr: error=%d\n", err);
1295 			/*
1296 			 * The transfer has not been queued.  Setting STOP
1297 			 * will make us try again at the next write.
1298 			 */
1299 			sce->state |= UGEN_RA_WB_STOP;
1300 		}
1301 	}
1302 	else
1303 		sce->state |= UGEN_RA_WB_STOP;
1304 
1305 	mutex_enter(&sc->sc_lock);
1306 	if (sce->state & UGEN_ASLP) {
1307 		sce->state &= ~UGEN_ASLP;
1308 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1309 		cv_signal(&sce->cv);
1310 	}
1311 	mutex_exit(&sc->sc_lock);
1312 	selnotify(&sce->rsel, 0, 0);
1313 }
1314 
1315 Static usbd_status
1316 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1317 {
1318 	usbd_interface_handle iface;
1319 	usb_endpoint_descriptor_t *ed;
1320 	usbd_status err;
1321 	struct ugen_endpoint *sce;
1322 	u_int8_t niface, nendpt, endptno, endpt;
1323 	int dir;
1324 
1325 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1326 
1327 	err = usbd_interface_count(sc->sc_udev, &niface);
1328 	if (err)
1329 		return (err);
1330 	if (ifaceidx < 0 || ifaceidx >= niface)
1331 		return (USBD_INVAL);
1332 
1333 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1334 	if (err)
1335 		return (err);
1336 	err = usbd_endpoint_count(iface, &nendpt);
1337 	if (err)
1338 		return (err);
1339 	/* XXX should only do this after setting new altno has succeeded */
1340 	for (endptno = 0; endptno < nendpt; endptno++) {
1341 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1342 		endpt = ed->bEndpointAddress;
1343 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1344 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1345 		sce->sc = 0;
1346 		sce->edesc = 0;
1347 		sce->iface = 0;
1348 	}
1349 
1350 	/* change setting */
1351 	err = usbd_set_interface(iface, altno);
1352 	if (err)
1353 		return (err);
1354 
1355 	err = usbd_endpoint_count(iface, &nendpt);
1356 	if (err)
1357 		return (err);
1358 	for (endptno = 0; endptno < nendpt; endptno++) {
1359 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1360 		KASSERT(ed != NULL);
1361 		endpt = ed->bEndpointAddress;
1362 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1363 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1364 		sce->sc = sc;
1365 		sce->edesc = ed;
1366 		sce->iface = iface;
1367 	}
1368 	return (0);
1369 }
1370 
1371 /* Retrieve a complete descriptor for a certain device and index. */
1372 Static usb_config_descriptor_t *
1373 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1374 {
1375 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1376 	int len;
1377 	usbd_status err;
1378 
1379 	if (index == USB_CURRENT_CONFIG_INDEX) {
1380 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1381 		len = UGETW(tdesc->wTotalLength);
1382 		if (lenp)
1383 			*lenp = len;
1384 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1385 		memcpy(cdesc, tdesc, len);
1386 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1387 	} else {
1388 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1389 		if (err)
1390 			return (0);
1391 		len = UGETW(cdescr.wTotalLength);
1392 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1393 		if (lenp)
1394 			*lenp = len;
1395 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1396 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1397 		if (err) {
1398 			free(cdesc, M_TEMP);
1399 			return (0);
1400 		}
1401 	}
1402 	return (cdesc);
1403 }
1404 
1405 Static int
1406 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1407 {
1408 	usbd_interface_handle iface;
1409 	usbd_status err;
1410 
1411 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1412 	if (err)
1413 		return (-1);
1414 	return (usbd_get_interface_altindex(iface));
1415 }
1416 
1417 Static int
1418 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1419 	      void *addr, int flag, struct lwp *l)
1420 {
1421 	struct ugen_endpoint *sce;
1422 	usbd_status err;
1423 	usbd_interface_handle iface;
1424 	struct usb_config_desc *cd;
1425 	usb_config_descriptor_t *cdesc;
1426 	struct usb_interface_desc *id;
1427 	usb_interface_descriptor_t *idesc;
1428 	struct usb_endpoint_desc *ed;
1429 	usb_endpoint_descriptor_t *edesc;
1430 	struct usb_alt_interface *ai;
1431 	struct usb_string_desc *si;
1432 	u_int8_t conf, alt;
1433 
1434 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1435 	if (sc->sc_dying)
1436 		return (EIO);
1437 
1438 	switch (cmd) {
1439 	case FIONBIO:
1440 		/* All handled in the upper FS layer. */
1441 		return (0);
1442 	case USB_SET_SHORT_XFER:
1443 		if (endpt == USB_CONTROL_ENDPOINT)
1444 			return (EINVAL);
1445 		/* This flag only affects read */
1446 		sce = &sc->sc_endpoints[endpt][IN];
1447 		if (sce == NULL || sce->pipeh == NULL)
1448 			return (EINVAL);
1449 		if (*(int *)addr)
1450 			sce->state |= UGEN_SHORT_OK;
1451 		else
1452 			sce->state &= ~UGEN_SHORT_OK;
1453 		return (0);
1454 	case USB_SET_TIMEOUT:
1455 		sce = &sc->sc_endpoints[endpt][IN];
1456 		if (sce == NULL
1457 		    /* XXX this shouldn't happen, but the distinction between
1458 		       input and output pipes isn't clear enough.
1459 		       || sce->pipeh == NULL */
1460 			)
1461 			return (EINVAL);
1462 		sce->timeout = *(int *)addr;
1463 		return (0);
1464 	case USB_SET_BULK_RA:
1465 		if (endpt == USB_CONTROL_ENDPOINT)
1466 			return (EINVAL);
1467 		sce = &sc->sc_endpoints[endpt][IN];
1468 		if (sce == NULL || sce->pipeh == NULL)
1469 			return (EINVAL);
1470 		edesc = sce->edesc;
1471 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1472 			return (EINVAL);
1473 
1474 		if (*(int *)addr) {
1475 			/* Only turn RA on if it's currently off. */
1476 			if (sce->state & UGEN_BULK_RA)
1477 				return (0);
1478 
1479 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1480 				/* shouldn't happen */
1481 				return (EINVAL);
1482 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1483 			if (sce->ra_wb_xfer == NULL)
1484 				return (ENOMEM);
1485 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1486 			/*
1487 			 * Set up a dmabuf because we reuse the xfer with
1488 			 * the same (max) request length like isoc.
1489 			 */
1490 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1491 					      sce->ra_wb_xferlen) == 0) {
1492 				usbd_free_xfer(sce->ra_wb_xfer);
1493 				return (ENOMEM);
1494 			}
1495 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1496 					   M_USBDEV, M_WAITOK);
1497 			sce->fill = sce->cur = sce->ibuf;
1498 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1499 			sce->ra_wb_used = 0;
1500 			sce->state |= UGEN_BULK_RA;
1501 			sce->state &= ~UGEN_RA_WB_STOP;
1502 			/* Now start reading. */
1503 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1504 			    NULL,
1505 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1506 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1507 			    ugen_bulkra_intr);
1508 			err = usbd_transfer(sce->ra_wb_xfer);
1509 			if (err != USBD_IN_PROGRESS) {
1510 				sce->state &= ~UGEN_BULK_RA;
1511 				free(sce->ibuf, M_USBDEV);
1512 				sce->ibuf = NULL;
1513 				usbd_free_xfer(sce->ra_wb_xfer);
1514 				return (EIO);
1515 			}
1516 		} else {
1517 			/* Only turn RA off if it's currently on. */
1518 			if (!(sce->state & UGEN_BULK_RA))
1519 				return (0);
1520 
1521 			sce->state &= ~UGEN_BULK_RA;
1522 			usbd_abort_pipe(sce->pipeh);
1523 			usbd_free_xfer(sce->ra_wb_xfer);
1524 			/*
1525 			 * XXX Discard whatever's in the buffer, but we
1526 			 * should keep it around and drain the buffer
1527 			 * instead.
1528 			 */
1529 			free(sce->ibuf, M_USBDEV);
1530 			sce->ibuf = NULL;
1531 		}
1532 		return (0);
1533 	case USB_SET_BULK_WB:
1534 		if (endpt == USB_CONTROL_ENDPOINT)
1535 			return (EINVAL);
1536 		sce = &sc->sc_endpoints[endpt][OUT];
1537 		if (sce == NULL || sce->pipeh == NULL)
1538 			return (EINVAL);
1539 		edesc = sce->edesc;
1540 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1541 			return (EINVAL);
1542 
1543 		if (*(int *)addr) {
1544 			/* Only turn WB on if it's currently off. */
1545 			if (sce->state & UGEN_BULK_WB)
1546 				return (0);
1547 
1548 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1549 				/* shouldn't happen */
1550 				return (EINVAL);
1551 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1552 			if (sce->ra_wb_xfer == NULL)
1553 				return (ENOMEM);
1554 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1555 			/*
1556 			 * Set up a dmabuf because we reuse the xfer with
1557 			 * the same (max) request length like isoc.
1558 			 */
1559 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1560 					      sce->ra_wb_xferlen) == 0) {
1561 				usbd_free_xfer(sce->ra_wb_xfer);
1562 				return (ENOMEM);
1563 			}
1564 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1565 					   M_USBDEV, M_WAITOK);
1566 			sce->fill = sce->cur = sce->ibuf;
1567 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1568 			sce->ra_wb_used = 0;
1569 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1570 		} else {
1571 			/* Only turn WB off if it's currently on. */
1572 			if (!(sce->state & UGEN_BULK_WB))
1573 				return (0);
1574 
1575 			sce->state &= ~UGEN_BULK_WB;
1576 			/*
1577 			 * XXX Discard whatever's in the buffer, but we
1578 			 * should keep it around and keep writing to
1579 			 * drain the buffer instead.
1580 			 */
1581 			usbd_abort_pipe(sce->pipeh);
1582 			usbd_free_xfer(sce->ra_wb_xfer);
1583 			free(sce->ibuf, M_USBDEV);
1584 			sce->ibuf = NULL;
1585 		}
1586 		return (0);
1587 	case USB_SET_BULK_RA_OPT:
1588 	case USB_SET_BULK_WB_OPT:
1589 	{
1590 		struct usb_bulk_ra_wb_opt *opt;
1591 
1592 		if (endpt == USB_CONTROL_ENDPOINT)
1593 			return (EINVAL);
1594 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1595 		if (cmd == USB_SET_BULK_RA_OPT)
1596 			sce = &sc->sc_endpoints[endpt][IN];
1597 		else
1598 			sce = &sc->sc_endpoints[endpt][OUT];
1599 		if (sce == NULL || sce->pipeh == NULL)
1600 			return (EINVAL);
1601 		if (opt->ra_wb_buffer_size < 1 ||
1602 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1603 		    opt->ra_wb_request_size < 1 ||
1604 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1605 			return (EINVAL);
1606 		/*
1607 		 * XXX These changes do not take effect until the
1608 		 * next time RA/WB mode is enabled but they ought to
1609 		 * take effect immediately.
1610 		 */
1611 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1612 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1613 		return (0);
1614 	}
1615 	default:
1616 		break;
1617 	}
1618 
1619 	if (endpt != USB_CONTROL_ENDPOINT)
1620 		return (EINVAL);
1621 
1622 	switch (cmd) {
1623 #ifdef UGEN_DEBUG
1624 	case USB_SETDEBUG:
1625 		ugendebug = *(int *)addr;
1626 		break;
1627 #endif
1628 	case USB_GET_CONFIG:
1629 		err = usbd_get_config(sc->sc_udev, &conf);
1630 		if (err)
1631 			return (EIO);
1632 		*(int *)addr = conf;
1633 		break;
1634 	case USB_SET_CONFIG:
1635 		if (!(flag & FWRITE))
1636 			return (EPERM);
1637 		err = ugen_set_config(sc, *(int *)addr);
1638 		switch (err) {
1639 		case USBD_NORMAL_COMPLETION:
1640 			break;
1641 		case USBD_IN_USE:
1642 			return (EBUSY);
1643 		default:
1644 			return (EIO);
1645 		}
1646 		break;
1647 	case USB_GET_ALTINTERFACE:
1648 		ai = (struct usb_alt_interface *)addr;
1649 		err = usbd_device2interface_handle(sc->sc_udev,
1650 			  ai->uai_interface_index, &iface);
1651 		if (err)
1652 			return (EINVAL);
1653 		idesc = usbd_get_interface_descriptor(iface);
1654 		if (idesc == NULL)
1655 			return (EIO);
1656 		ai->uai_alt_no = idesc->bAlternateSetting;
1657 		break;
1658 	case USB_SET_ALTINTERFACE:
1659 		if (!(flag & FWRITE))
1660 			return (EPERM);
1661 		ai = (struct usb_alt_interface *)addr;
1662 		err = usbd_device2interface_handle(sc->sc_udev,
1663 			  ai->uai_interface_index, &iface);
1664 		if (err)
1665 			return (EINVAL);
1666 		err = ugen_set_interface(sc, ai->uai_interface_index,
1667 		    ai->uai_alt_no);
1668 		if (err)
1669 			return (EINVAL);
1670 		break;
1671 	case USB_GET_NO_ALT:
1672 		ai = (struct usb_alt_interface *)addr;
1673 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1674 		if (cdesc == NULL)
1675 			return (EINVAL);
1676 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1677 		if (idesc == NULL) {
1678 			free(cdesc, M_TEMP);
1679 			return (EINVAL);
1680 		}
1681 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1682 		    idesc->bInterfaceNumber);
1683 		free(cdesc, M_TEMP);
1684 		break;
1685 	case USB_GET_DEVICE_DESC:
1686 		*(usb_device_descriptor_t *)addr =
1687 			*usbd_get_device_descriptor(sc->sc_udev);
1688 		break;
1689 	case USB_GET_CONFIG_DESC:
1690 		cd = (struct usb_config_desc *)addr;
1691 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1692 		if (cdesc == NULL)
1693 			return (EINVAL);
1694 		cd->ucd_desc = *cdesc;
1695 		free(cdesc, M_TEMP);
1696 		break;
1697 	case USB_GET_INTERFACE_DESC:
1698 		id = (struct usb_interface_desc *)addr;
1699 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1700 		if (cdesc == NULL)
1701 			return (EINVAL);
1702 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1703 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1704 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1705 		else
1706 			alt = id->uid_alt_index;
1707 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1708 		if (idesc == NULL) {
1709 			free(cdesc, M_TEMP);
1710 			return (EINVAL);
1711 		}
1712 		id->uid_desc = *idesc;
1713 		free(cdesc, M_TEMP);
1714 		break;
1715 	case USB_GET_ENDPOINT_DESC:
1716 		ed = (struct usb_endpoint_desc *)addr;
1717 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1718 		if (cdesc == NULL)
1719 			return (EINVAL);
1720 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1721 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1722 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1723 		else
1724 			alt = ed->ued_alt_index;
1725 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1726 					alt, ed->ued_endpoint_index);
1727 		if (edesc == NULL) {
1728 			free(cdesc, M_TEMP);
1729 			return (EINVAL);
1730 		}
1731 		ed->ued_desc = *edesc;
1732 		free(cdesc, M_TEMP);
1733 		break;
1734 	case USB_GET_FULL_DESC:
1735 	{
1736 		int len;
1737 		struct iovec iov;
1738 		struct uio uio;
1739 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1740 		int error;
1741 
1742 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1743 		if (cdesc == NULL)
1744 			return (EINVAL);
1745 		if (len > fd->ufd_size)
1746 			len = fd->ufd_size;
1747 		iov.iov_base = (void *)fd->ufd_data;
1748 		iov.iov_len = len;
1749 		uio.uio_iov = &iov;
1750 		uio.uio_iovcnt = 1;
1751 		uio.uio_resid = len;
1752 		uio.uio_offset = 0;
1753 		uio.uio_rw = UIO_READ;
1754 		uio.uio_vmspace = l->l_proc->p_vmspace;
1755 		error = uiomove((void *)cdesc, len, &uio);
1756 		free(cdesc, M_TEMP);
1757 		return (error);
1758 	}
1759 	case USB_GET_STRING_DESC: {
1760 		int len;
1761 		si = (struct usb_string_desc *)addr;
1762 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1763 			  si->usd_language_id, &si->usd_desc, &len);
1764 		if (err)
1765 			return (EINVAL);
1766 		break;
1767 	}
1768 	case USB_DO_REQUEST:
1769 	{
1770 		struct usb_ctl_request *ur = (void *)addr;
1771 		int len = UGETW(ur->ucr_request.wLength);
1772 		struct iovec iov;
1773 		struct uio uio;
1774 		void *ptr = 0;
1775 		usbd_status xerr;
1776 		int error = 0;
1777 
1778 		if (!(flag & FWRITE))
1779 			return (EPERM);
1780 		/* Avoid requests that would damage the bus integrity. */
1781 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1782 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1783 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1784 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1785 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1786 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1787 			return (EINVAL);
1788 
1789 		if (len < 0 || len > 32767)
1790 			return (EINVAL);
1791 		if (len != 0) {
1792 			iov.iov_base = (void *)ur->ucr_data;
1793 			iov.iov_len = len;
1794 			uio.uio_iov = &iov;
1795 			uio.uio_iovcnt = 1;
1796 			uio.uio_resid = len;
1797 			uio.uio_offset = 0;
1798 			uio.uio_rw =
1799 				ur->ucr_request.bmRequestType & UT_READ ?
1800 				UIO_READ : UIO_WRITE;
1801 			uio.uio_vmspace = l->l_proc->p_vmspace;
1802 			ptr = malloc(len, M_TEMP, M_WAITOK);
1803 			if (uio.uio_rw == UIO_WRITE) {
1804 				error = uiomove(ptr, len, &uio);
1805 				if (error)
1806 					goto ret;
1807 			}
1808 		}
1809 		sce = &sc->sc_endpoints[endpt][IN];
1810 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1811 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1812 		if (xerr) {
1813 			error = EIO;
1814 			goto ret;
1815 		}
1816 		if (len != 0) {
1817 			if (uio.uio_rw == UIO_READ) {
1818 				error = uiomove(ptr, len, &uio);
1819 				if (error)
1820 					goto ret;
1821 			}
1822 		}
1823 	ret:
1824 		if (ptr)
1825 			free(ptr, M_TEMP);
1826 		return (error);
1827 	}
1828 	case USB_GET_DEVICEINFO:
1829 		usbd_fill_deviceinfo(sc->sc_udev,
1830 				     (struct usb_device_info *)addr, 0);
1831 		break;
1832 #ifdef COMPAT_30
1833 	case USB_GET_DEVICEINFO_OLD:
1834 		usbd_fill_deviceinfo_old(sc->sc_udev,
1835 					 (struct usb_device_info_old *)addr, 0);
1836 
1837 		break;
1838 #endif
1839 	default:
1840 		return (EINVAL);
1841 	}
1842 	return (0);
1843 }
1844 
1845 int
1846 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1847 {
1848 	int endpt = UGENENDPOINT(dev);
1849 	struct ugen_softc *sc;
1850 	int error;
1851 
1852 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1853 	if (sc == NULL)
1854 		return ENXIO;
1855 
1856 	sc->sc_refcnt++;
1857 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1858 	if (--sc->sc_refcnt < 0)
1859 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1860 	return (error);
1861 }
1862 
1863 int
1864 ugenpoll(dev_t dev, int events, struct lwp *l)
1865 {
1866 	struct ugen_softc *sc;
1867 	struct ugen_endpoint *sce_in, *sce_out;
1868 	int revents = 0;
1869 
1870 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1871 	if (sc == NULL)
1872 		return ENXIO;
1873 
1874 	if (sc->sc_dying)
1875 		return (POLLHUP);
1876 
1877 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1878 		return ENODEV;
1879 
1880 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1881 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1882 	if (sce_in == NULL && sce_out == NULL)
1883 		return (POLLERR);
1884 #ifdef DIAGNOSTIC
1885 	if (!sce_in->edesc && !sce_out->edesc) {
1886 		printf("ugenpoll: no edesc\n");
1887 		return (POLLERR);
1888 	}
1889 	/* It's possible to have only one pipe open. */
1890 	if (!sce_in->pipeh && !sce_out->pipeh) {
1891 		printf("ugenpoll: no pipe\n");
1892 		return (POLLERR);
1893 	}
1894 #endif
1895 
1896 	mutex_enter(&sc->sc_lock);
1897 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1898 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1899 		case UE_INTERRUPT:
1900 			if (sce_in->q.c_cc > 0)
1901 				revents |= events & (POLLIN | POLLRDNORM);
1902 			else
1903 				selrecord(l, &sce_in->rsel);
1904 			break;
1905 		case UE_ISOCHRONOUS:
1906 			if (sce_in->cur != sce_in->fill)
1907 				revents |= events & (POLLIN | POLLRDNORM);
1908 			else
1909 				selrecord(l, &sce_in->rsel);
1910 			break;
1911 		case UE_BULK:
1912 			if (sce_in->state & UGEN_BULK_RA) {
1913 				if (sce_in->ra_wb_used > 0)
1914 					revents |= events &
1915 					    (POLLIN | POLLRDNORM);
1916 				else
1917 					selrecord(l, &sce_in->rsel);
1918 				break;
1919 			}
1920 			/*
1921 			 * We have no easy way of determining if a read will
1922 			 * yield any data or a write will happen.
1923 			 * Pretend they will.
1924 			 */
1925 			 revents |= events & (POLLIN | POLLRDNORM);
1926 			 break;
1927 		default:
1928 			break;
1929 		}
1930 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1931 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1932 		case UE_INTERRUPT:
1933 		case UE_ISOCHRONOUS:
1934 			/* XXX unimplemented */
1935 			break;
1936 		case UE_BULK:
1937 			if (sce_out->state & UGEN_BULK_WB) {
1938 				if (sce_out->ra_wb_used <
1939 				    sce_out->limit - sce_out->ibuf)
1940 					revents |= events &
1941 					    (POLLOUT | POLLWRNORM);
1942 				else
1943 					selrecord(l, &sce_out->rsel);
1944 				break;
1945 			}
1946 			/*
1947 			 * We have no easy way of determining if a read will
1948 			 * yield any data or a write will happen.
1949 			 * Pretend they will.
1950 			 */
1951 			 revents |= events & (POLLOUT | POLLWRNORM);
1952 			 break;
1953 		default:
1954 			break;
1955 		}
1956 
1957 	mutex_exit(&sc->sc_lock);
1958 
1959 	return (revents);
1960 }
1961 
1962 static void
1963 filt_ugenrdetach(struct knote *kn)
1964 {
1965 	struct ugen_endpoint *sce = kn->kn_hook;
1966 	struct ugen_softc *sc = sce->sc;
1967 
1968 	mutex_enter(&sc->sc_lock);
1969 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1970 	mutex_exit(&sc->sc_lock);
1971 }
1972 
1973 static int
1974 filt_ugenread_intr(struct knote *kn, long hint)
1975 {
1976 	struct ugen_endpoint *sce = kn->kn_hook;
1977 
1978 	kn->kn_data = sce->q.c_cc;
1979 	return (kn->kn_data > 0);
1980 }
1981 
1982 static int
1983 filt_ugenread_isoc(struct knote *kn, long hint)
1984 {
1985 	struct ugen_endpoint *sce = kn->kn_hook;
1986 
1987 	if (sce->cur == sce->fill)
1988 		return (0);
1989 
1990 	if (sce->cur < sce->fill)
1991 		kn->kn_data = sce->fill - sce->cur;
1992 	else
1993 		kn->kn_data = (sce->limit - sce->cur) +
1994 		    (sce->fill - sce->ibuf);
1995 
1996 	return (1);
1997 }
1998 
1999 static int
2000 filt_ugenread_bulk(struct knote *kn, long hint)
2001 {
2002 	struct ugen_endpoint *sce = kn->kn_hook;
2003 
2004 	if (!(sce->state & UGEN_BULK_RA))
2005 		/*
2006 		 * We have no easy way of determining if a read will
2007 		 * yield any data or a write will happen.
2008 		 * So, emulate "seltrue".
2009 		 */
2010 		return (filt_seltrue(kn, hint));
2011 
2012 	if (sce->ra_wb_used == 0)
2013 		return (0);
2014 
2015 	kn->kn_data = sce->ra_wb_used;
2016 
2017 	return (1);
2018 }
2019 
2020 static int
2021 filt_ugenwrite_bulk(struct knote *kn, long hint)
2022 {
2023 	struct ugen_endpoint *sce = kn->kn_hook;
2024 
2025 	if (!(sce->state & UGEN_BULK_WB))
2026 		/*
2027 		 * We have no easy way of determining if a read will
2028 		 * yield any data or a write will happen.
2029 		 * So, emulate "seltrue".
2030 		 */
2031 		return (filt_seltrue(kn, hint));
2032 
2033 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2034 		return (0);
2035 
2036 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2037 
2038 	return (1);
2039 }
2040 
2041 static const struct filterops ugenread_intr_filtops =
2042 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2043 
2044 static const struct filterops ugenread_isoc_filtops =
2045 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2046 
2047 static const struct filterops ugenread_bulk_filtops =
2048 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2049 
2050 static const struct filterops ugenwrite_bulk_filtops =
2051 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2052 
2053 int
2054 ugenkqfilter(dev_t dev, struct knote *kn)
2055 {
2056 	struct ugen_softc *sc;
2057 	struct ugen_endpoint *sce;
2058 	struct klist *klist;
2059 
2060 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2061 	if (sc == NULL)
2062 		return ENXIO;
2063 
2064 	if (sc->sc_dying)
2065 		return (ENXIO);
2066 
2067 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2068 		return ENODEV;
2069 
2070 	switch (kn->kn_filter) {
2071 	case EVFILT_READ:
2072 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2073 		if (sce == NULL)
2074 			return (EINVAL);
2075 
2076 		klist = &sce->rsel.sel_klist;
2077 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2078 		case UE_INTERRUPT:
2079 			kn->kn_fop = &ugenread_intr_filtops;
2080 			break;
2081 		case UE_ISOCHRONOUS:
2082 			kn->kn_fop = &ugenread_isoc_filtops;
2083 			break;
2084 		case UE_BULK:
2085 			kn->kn_fop = &ugenread_bulk_filtops;
2086 			break;
2087 		default:
2088 			return (EINVAL);
2089 		}
2090 		break;
2091 
2092 	case EVFILT_WRITE:
2093 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2094 		if (sce == NULL)
2095 			return (EINVAL);
2096 
2097 		klist = &sce->rsel.sel_klist;
2098 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2099 		case UE_INTERRUPT:
2100 		case UE_ISOCHRONOUS:
2101 			/* XXX poll doesn't support this */
2102 			return (EINVAL);
2103 
2104 		case UE_BULK:
2105 			kn->kn_fop = &ugenwrite_bulk_filtops;
2106 			break;
2107 		default:
2108 			return (EINVAL);
2109 		}
2110 		break;
2111 
2112 	default:
2113 		return (EINVAL);
2114 	}
2115 
2116 	kn->kn_hook = sce;
2117 
2118 	mutex_enter(&sc->sc_lock);
2119 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2120 	mutex_exit(&sc->sc_lock);
2121 
2122 	return (0);
2123 }
2124