xref: /netbsd-src/sys/dev/usb/ugen.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: ugen.c,v 1.123 2014/03/16 05:20:29 dholland Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.123 2014/03/16 05:20:29 dholland Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59 
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63 
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x)	if (ugendebug) printf x
66 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
67 int	ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72 
73 #define	UGEN_CHUNK	128	/* chunk size for read */
74 #define	UGEN_IBSIZE	1020	/* buffer size */
75 #define	UGEN_BBSIZE	1024
76 
77 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS	8	/* number of transactions per req */
79 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
80 
81 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
83 
84 struct ugen_endpoint {
85 	struct ugen_softc *sc;
86 	usb_endpoint_descriptor_t *edesc;
87 	usbd_interface_handle iface;
88 	int state;
89 #define	UGEN_ASLP	0x02	/* waiting for data */
90 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
91 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
92 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
93 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
94 	usbd_pipe_handle pipeh;
95 	struct clist q;
96 	u_char *ibuf;		/* start of buffer (circular for isoc) */
97 	u_char *fill;		/* location for input (isoc) */
98 	u_char *limit;		/* end of circular buffer (isoc) */
99 	u_char *cur;		/* current read location (isoc) */
100 	u_int32_t timeout;
101 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
102 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
103 	u_int32_t ra_wb_used;	 /* how much is in buffer */
104 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
105 	usbd_xfer_handle ra_wb_xfer;
106 	struct isoreq {
107 		struct ugen_endpoint *sce;
108 		usbd_xfer_handle xfer;
109 		void *dmabuf;
110 		u_int16_t sizes[UGEN_NISORFRMS];
111 	} isoreqs[UGEN_NISOREQS];
112 	/* Keep these last; we don't overwrite them in ugen_set_config() */
113 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
114 	struct selinfo rsel;
115 	kcondvar_t cv;
116 };
117 
118 struct ugen_softc {
119 	device_t sc_dev;		/* base device */
120 	usbd_device_handle sc_udev;
121 
122 	kmutex_t		sc_lock;
123 	kcondvar_t		sc_detach_cv;
124 
125 	char sc_is_open[USB_MAX_ENDPOINTS];
126 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
127 #define OUT 0
128 #define IN  1
129 
130 	int sc_refcnt;
131 	char sc_buffer[UGEN_BBSIZE];
132 	u_char sc_dying;
133 };
134 
135 dev_type_open(ugenopen);
136 dev_type_close(ugenclose);
137 dev_type_read(ugenread);
138 dev_type_write(ugenwrite);
139 dev_type_ioctl(ugenioctl);
140 dev_type_poll(ugenpoll);
141 dev_type_kqfilter(ugenkqfilter);
142 
143 const struct cdevsw ugen_cdevsw = {
144 	.d_open = ugenopen,
145 	.d_close = ugenclose,
146 	.d_read = ugenread,
147 	.d_write = ugenwrite,
148 	.d_ioctl = ugenioctl,
149 	.d_stop = nostop,
150 	.d_tty = notty,
151 	.d_poll = ugenpoll,
152 	.d_mmap = nommap,
153 	.d_kqfilter = ugenkqfilter,
154 	.d_flag = D_OTHER,
155 };
156 
157 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
158 		     usbd_status status);
159 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
160 			    usbd_status status);
161 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
162 			     usbd_status status);
163 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
164 			     usbd_status status);
165 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
166 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
167 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
168 			 void *, int, struct lwp *);
169 Static int ugen_set_config(struct ugen_softc *sc, int configno);
170 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
171 					       int index, int *lenp);
172 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
173 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
174 
175 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
176 #define UGENENDPOINT(n) (minor(n) & 0xf)
177 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
178 
179 int             ugen_match(device_t, cfdata_t, void *);
180 void            ugen_attach(device_t, device_t, void *);
181 int             ugen_detach(device_t, int);
182 int             ugen_activate(device_t, enum devact);
183 extern struct cfdriver ugen_cd;
184 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
185 
186 /* toggle to control attach priority. -1 means "let autoconf decide" */
187 int ugen_override = -1;
188 
189 int
190 ugen_match(device_t parent, cfdata_t match, void *aux)
191 {
192 	struct usb_attach_arg *uaa = aux;
193 	int override;
194 
195 	if (ugen_override != -1)
196 		override = ugen_override;
197 	else
198 		override = match->cf_flags & 1;
199 
200 	if (override)
201 		return (UMATCH_HIGHEST);
202 	else if (uaa->usegeneric)
203 		return (UMATCH_GENERIC);
204 	else
205 		return (UMATCH_NONE);
206 }
207 
208 void
209 ugen_attach(device_t parent, device_t self, void *aux)
210 {
211 	struct ugen_softc *sc = device_private(self);
212 	struct usb_attach_arg *uaa = aux;
213 	usbd_device_handle udev;
214 	char *devinfop;
215 	usbd_status err;
216 	int i, dir, conf;
217 
218 	aprint_naive("\n");
219 	aprint_normal("\n");
220 
221 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
222 	cv_init(&sc->sc_detach_cv, "ugendet");
223 
224 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
225 	aprint_normal_dev(self, "%s\n", devinfop);
226 	usbd_devinfo_free(devinfop);
227 
228 	sc->sc_dev = self;
229 	sc->sc_udev = udev = uaa->device;
230 
231 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
232 		for (dir = OUT; dir <= IN; dir++) {
233 			struct ugen_endpoint *sce;
234 
235 			sce = &sc->sc_endpoints[i][dir];
236 			selinit(&sce->rsel);
237 			cv_init(&sce->cv, "ugensce");
238 		}
239 	}
240 
241 	/* First set configuration index 0, the default one for ugen. */
242 	err = usbd_set_config_index(udev, 0, 0);
243 	if (err) {
244 		aprint_error_dev(self,
245 		    "setting configuration index 0 failed\n");
246 		sc->sc_dying = 1;
247 		return;
248 	}
249 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
250 
251 	/* Set up all the local state for this configuration. */
252 	err = ugen_set_config(sc, conf);
253 	if (err) {
254 		aprint_error_dev(self, "setting configuration %d failed\n",
255 		    conf);
256 		sc->sc_dying = 1;
257 		return;
258 	}
259 
260 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
261 			   sc->sc_dev);
262 
263 	if (!pmf_device_register(self, NULL, NULL))
264 		aprint_error_dev(self, "couldn't establish power handler\n");
265 
266 	return;
267 }
268 
269 Static int
270 ugen_set_config(struct ugen_softc *sc, int configno)
271 {
272 	usbd_device_handle dev = sc->sc_udev;
273 	usb_config_descriptor_t *cdesc;
274 	usbd_interface_handle iface;
275 	usb_endpoint_descriptor_t *ed;
276 	struct ugen_endpoint *sce;
277 	u_int8_t niface, nendpt;
278 	int ifaceno, endptno, endpt;
279 	usbd_status err;
280 	int dir, i;
281 
282 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
283 		    device_xname(sc->sc_dev), configno, sc));
284 
285 	/*
286 	 * We start at 1, not 0, because we don't care whether the
287 	 * control endpoint is open or not. It is always present.
288 	 */
289 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
290 		if (sc->sc_is_open[endptno]) {
291 			DPRINTFN(1,
292 			     ("ugen_set_config: %s - endpoint %d is open\n",
293 			      device_xname(sc->sc_dev), endptno));
294 			return (USBD_IN_USE);
295 		}
296 
297 	/* Avoid setting the current value. */
298 	cdesc = usbd_get_config_descriptor(dev);
299 	if (!cdesc || cdesc->bConfigurationValue != configno) {
300 		err = usbd_set_config_no(dev, configno, 1);
301 		if (err)
302 			return (err);
303 	}
304 
305 	err = usbd_interface_count(dev, &niface);
306 	if (err)
307 		return (err);
308 
309 	/* Clear out the old info, but leave the selinfo and cv initialised. */
310 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
311 		for (dir = OUT; dir <= IN; dir++) {
312 			sce = &sc->sc_endpoints[i][dir];
313 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
314 		}
315 	}
316 
317 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
318 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
319 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
320 		if (err)
321 			return (err);
322 		err = usbd_endpoint_count(iface, &nendpt);
323 		if (err)
324 			return (err);
325 		for (endptno = 0; endptno < nendpt; endptno++) {
326 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
327 			KASSERT(ed != NULL);
328 			endpt = ed->bEndpointAddress;
329 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
330 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
331 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
332 				    "(%d,%d), sce=%p\n",
333 				    endptno, endpt, UE_GET_ADDR(endpt),
334 				    UE_GET_DIR(endpt), sce));
335 			sce->sc = sc;
336 			sce->edesc = ed;
337 			sce->iface = iface;
338 		}
339 	}
340 	return (USBD_NORMAL_COMPLETION);
341 }
342 
343 int
344 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
345 {
346 	struct ugen_softc *sc;
347 	int unit = UGENUNIT(dev);
348 	int endpt = UGENENDPOINT(dev);
349 	usb_endpoint_descriptor_t *edesc;
350 	struct ugen_endpoint *sce;
351 	int dir, isize;
352 	usbd_status err;
353 	usbd_xfer_handle xfer;
354 	void *tbuf;
355 	int i, j;
356 
357 	sc = device_lookup_private(&ugen_cd, unit);
358 	if (sc == NULL)
359 		return ENXIO;
360 
361 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
362 		     flag, mode, unit, endpt));
363 
364 	if (sc == NULL || sc->sc_dying)
365 		return (ENXIO);
366 
367 	/* The control endpoint allows multiple opens. */
368 	if (endpt == USB_CONTROL_ENDPOINT) {
369 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
370 		return (0);
371 	}
372 
373 	if (sc->sc_is_open[endpt])
374 		return (EBUSY);
375 
376 	/* Make sure there are pipes for all directions. */
377 	for (dir = OUT; dir <= IN; dir++) {
378 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
379 			sce = &sc->sc_endpoints[endpt][dir];
380 			if (sce == 0 || sce->edesc == 0)
381 				return (ENXIO);
382 		}
383 	}
384 
385 	/* Actually open the pipes. */
386 	/* XXX Should back out properly if it fails. */
387 	for (dir = OUT; dir <= IN; dir++) {
388 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
389 			continue;
390 		sce = &sc->sc_endpoints[endpt][dir];
391 		sce->state = 0;
392 		sce->timeout = USBD_NO_TIMEOUT;
393 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
394 			     sc, endpt, dir, sce));
395 		edesc = sce->edesc;
396 		switch (edesc->bmAttributes & UE_XFERTYPE) {
397 		case UE_INTERRUPT:
398 			if (dir == OUT) {
399 				err = usbd_open_pipe(sce->iface,
400 				    edesc->bEndpointAddress, 0, &sce->pipeh);
401 				if (err)
402 					return (EIO);
403 				break;
404 			}
405 			isize = UGETW(edesc->wMaxPacketSize);
406 			if (isize == 0)	/* shouldn't happen */
407 				return (EINVAL);
408 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
409 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
410 				     endpt, isize));
411 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
412 				free(sce->ibuf, M_USBDEV);
413 				sce->ibuf = NULL;
414 				return (ENOMEM);
415 			}
416 			err = usbd_open_pipe_intr(sce->iface,
417 				  edesc->bEndpointAddress,
418 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
419 				  sce->ibuf, isize, ugenintr,
420 				  USBD_DEFAULT_INTERVAL);
421 			if (err) {
422 				clfree(&sce->q);
423 				free(sce->ibuf, M_USBDEV);
424 				sce->ibuf = NULL;
425 				return (EIO);
426 			}
427 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
428 			break;
429 		case UE_BULK:
430 			err = usbd_open_pipe(sce->iface,
431 				  edesc->bEndpointAddress, 0, &sce->pipeh);
432 			if (err)
433 				return (EIO);
434 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
435 			/*
436 			 * Use request size for non-RA/WB transfers
437 			 * as the default.
438 			 */
439 			sce->ra_wb_reqsize = UGEN_BBSIZE;
440 			break;
441 		case UE_ISOCHRONOUS:
442 			if (dir == OUT)
443 				return (EINVAL);
444 			isize = UGETW(edesc->wMaxPacketSize);
445 			if (isize == 0)	/* shouldn't happen */
446 				return (EINVAL);
447 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
448 				M_USBDEV, M_WAITOK);
449 			sce->cur = sce->fill = sce->ibuf;
450 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
451 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
452 				     endpt, isize));
453 			err = usbd_open_pipe(sce->iface,
454 				  edesc->bEndpointAddress, 0, &sce->pipeh);
455 			if (err) {
456 				free(sce->ibuf, M_USBDEV);
457 				sce->ibuf = NULL;
458 				return (EIO);
459 			}
460 			for(i = 0; i < UGEN_NISOREQS; ++i) {
461 				sce->isoreqs[i].sce = sce;
462 				xfer = usbd_alloc_xfer(sc->sc_udev);
463 				if (xfer == 0)
464 					goto bad;
465 				sce->isoreqs[i].xfer = xfer;
466 				tbuf = usbd_alloc_buffer
467 					(xfer, isize * UGEN_NISORFRMS);
468 				if (tbuf == 0) {
469 					i++;
470 					goto bad;
471 				}
472 				sce->isoreqs[i].dmabuf = tbuf;
473 				for(j = 0; j < UGEN_NISORFRMS; ++j)
474 					sce->isoreqs[i].sizes[j] = isize;
475 				usbd_setup_isoc_xfer
476 					(xfer, sce->pipeh, &sce->isoreqs[i],
477 					 sce->isoreqs[i].sizes,
478 					 UGEN_NISORFRMS, USBD_NO_COPY,
479 					 ugen_isoc_rintr);
480 				(void)usbd_transfer(xfer);
481 			}
482 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
483 			break;
484 		bad:
485 			while (--i >= 0) /* implicit buffer free */
486 				usbd_free_xfer(sce->isoreqs[i].xfer);
487 			usbd_close_pipe(sce->pipeh);
488 			sce->pipeh = NULL;
489 			free(sce->ibuf, M_USBDEV);
490 			sce->ibuf = NULL;
491 			return (ENOMEM);
492 		case UE_CONTROL:
493 			sce->timeout = USBD_DEFAULT_TIMEOUT;
494 			return (EINVAL);
495 		}
496 	}
497 	sc->sc_is_open[endpt] = 1;
498 	return (0);
499 }
500 
501 int
502 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
503 {
504 	int endpt = UGENENDPOINT(dev);
505 	struct ugen_softc *sc;
506 	struct ugen_endpoint *sce;
507 	int dir;
508 	int i;
509 
510 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
511 	if (sc == NULL)
512 		return ENXIO;
513 
514 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
515 		     flag, mode, UGENUNIT(dev), endpt));
516 
517 #ifdef DIAGNOSTIC
518 	if (!sc->sc_is_open[endpt]) {
519 		printf("ugenclose: not open\n");
520 		return (EINVAL);
521 	}
522 #endif
523 
524 	if (endpt == USB_CONTROL_ENDPOINT) {
525 		DPRINTFN(5, ("ugenclose: close control\n"));
526 		sc->sc_is_open[endpt] = 0;
527 		return (0);
528 	}
529 
530 	for (dir = OUT; dir <= IN; dir++) {
531 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
532 			continue;
533 		sce = &sc->sc_endpoints[endpt][dir];
534 		if (sce == NULL || sce->pipeh == NULL)
535 			continue;
536 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
537 			     endpt, dir, sce));
538 
539 		usbd_abort_pipe(sce->pipeh);
540 		usbd_close_pipe(sce->pipeh);
541 		sce->pipeh = NULL;
542 
543 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
544 		case UE_INTERRUPT:
545 			ndflush(&sce->q, sce->q.c_cc);
546 			clfree(&sce->q);
547 			break;
548 		case UE_ISOCHRONOUS:
549 			for (i = 0; i < UGEN_NISOREQS; ++i)
550 				usbd_free_xfer(sce->isoreqs[i].xfer);
551 			break;
552 		case UE_BULK:
553 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
554 				/* ibuf freed below */
555 				usbd_free_xfer(sce->ra_wb_xfer);
556 			break;
557 		default:
558 			break;
559 		}
560 
561 		if (sce->ibuf != NULL) {
562 			free(sce->ibuf, M_USBDEV);
563 			sce->ibuf = NULL;
564 		}
565 	}
566 	sc->sc_is_open[endpt] = 0;
567 
568 	return (0);
569 }
570 
571 Static int
572 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
573 {
574 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
575 	u_int32_t n, tn;
576 	usbd_xfer_handle xfer;
577 	usbd_status err;
578 	int error = 0;
579 
580 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
581 
582 	if (sc->sc_dying)
583 		return (EIO);
584 
585 	if (endpt == USB_CONTROL_ENDPOINT)
586 		return (ENODEV);
587 
588 #ifdef DIAGNOSTIC
589 	if (sce->edesc == NULL) {
590 		printf("ugenread: no edesc\n");
591 		return (EIO);
592 	}
593 	if (sce->pipeh == NULL) {
594 		printf("ugenread: no pipe\n");
595 		return (EIO);
596 	}
597 #endif
598 
599 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
600 	case UE_INTERRUPT:
601 		/* Block until activity occurred. */
602 		mutex_enter(&sc->sc_lock);
603 		while (sce->q.c_cc == 0) {
604 			if (flag & IO_NDELAY) {
605 				mutex_exit(&sc->sc_lock);
606 				return (EWOULDBLOCK);
607 			}
608 			sce->state |= UGEN_ASLP;
609 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
610 			/* "ugenri" */
611 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
612 			    mstohz(sce->timeout));
613 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
614 			if (sc->sc_dying)
615 				error = EIO;
616 			if (error) {
617 				sce->state &= ~UGEN_ASLP;
618 				break;
619 			}
620 		}
621 		mutex_exit(&sc->sc_lock);
622 
623 		/* Transfer as many chunks as possible. */
624 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
625 			n = min(sce->q.c_cc, uio->uio_resid);
626 			if (n > sizeof(sc->sc_buffer))
627 				n = sizeof(sc->sc_buffer);
628 
629 			/* Remove a small chunk from the input queue. */
630 			q_to_b(&sce->q, sc->sc_buffer, n);
631 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
632 
633 			/* Copy the data to the user process. */
634 			error = uiomove(sc->sc_buffer, n, uio);
635 			if (error)
636 				break;
637 		}
638 		break;
639 	case UE_BULK:
640 		if (sce->state & UGEN_BULK_RA) {
641 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
642 				     uio->uio_resid, sce->ra_wb_used));
643 			xfer = sce->ra_wb_xfer;
644 
645 			mutex_enter(&sc->sc_lock);
646 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
647 				mutex_exit(&sc->sc_lock);
648 				return (EWOULDBLOCK);
649 			}
650 			while (uio->uio_resid > 0 && !error) {
651 				while (sce->ra_wb_used == 0) {
652 					sce->state |= UGEN_ASLP;
653 					DPRINTFN(5,
654 						 ("ugenread: sleep on %p\n",
655 						  sce));
656 					/* "ugenrb" */
657 					error = cv_timedwait_sig(&sce->cv,
658 					    &sc->sc_lock, mstohz(sce->timeout));
659 					DPRINTFN(5,
660 						 ("ugenread: woke, error=%d\n",
661 						  error));
662 					if (sc->sc_dying)
663 						error = EIO;
664 					if (error) {
665 						sce->state &= ~UGEN_ASLP;
666 						break;
667 					}
668 				}
669 
670 				/* Copy data to the process. */
671 				while (uio->uio_resid > 0
672 				       && sce->ra_wb_used > 0) {
673 					n = min(uio->uio_resid,
674 						sce->ra_wb_used);
675 					n = min(n, sce->limit - sce->cur);
676 					error = uiomove(sce->cur, n, uio);
677 					if (error)
678 						break;
679 					sce->cur += n;
680 					sce->ra_wb_used -= n;
681 					if (sce->cur == sce->limit)
682 						sce->cur = sce->ibuf;
683 				}
684 
685 				/*
686 				 * If the transfers stopped because the
687 				 * buffer was full, restart them.
688 				 */
689 				if (sce->state & UGEN_RA_WB_STOP &&
690 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
691 					n = (sce->limit - sce->ibuf)
692 					    - sce->ra_wb_used;
693 					usbd_setup_xfer(xfer,
694 					    sce->pipeh, sce, NULL,
695 					    min(n, sce->ra_wb_xferlen),
696 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
697 					    ugen_bulkra_intr);
698 					sce->state &= ~UGEN_RA_WB_STOP;
699 					err = usbd_transfer(xfer);
700 					if (err != USBD_IN_PROGRESS)
701 						/*
702 						 * The transfer has not been
703 						 * queued.  Setting STOP
704 						 * will make us try
705 						 * again at the next read.
706 						 */
707 						sce->state |= UGEN_RA_WB_STOP;
708 				}
709 			}
710 			mutex_exit(&sc->sc_lock);
711 			break;
712 		}
713 		xfer = usbd_alloc_xfer(sc->sc_udev);
714 		if (xfer == 0)
715 			return (ENOMEM);
716 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
717 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
718 			tn = n;
719 			err = usbd_bulk_transfer(
720 				  xfer, sce->pipeh,
721 				  sce->state & UGEN_SHORT_OK ?
722 				      USBD_SHORT_XFER_OK : 0,
723 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
724 			if (err) {
725 				if (err == USBD_INTERRUPTED)
726 					error = EINTR;
727 				else if (err == USBD_TIMEOUT)
728 					error = ETIMEDOUT;
729 				else
730 					error = EIO;
731 				break;
732 			}
733 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
734 			error = uiomove(sc->sc_buffer, tn, uio);
735 			if (error || tn < n)
736 				break;
737 		}
738 		usbd_free_xfer(xfer);
739 		break;
740 	case UE_ISOCHRONOUS:
741 		mutex_enter(&sc->sc_lock);
742 		while (sce->cur == sce->fill) {
743 			if (flag & IO_NDELAY) {
744 				mutex_exit(&sc->sc_lock);
745 				return (EWOULDBLOCK);
746 			}
747 			sce->state |= UGEN_ASLP;
748 			/* "ugenri" */
749 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
750 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
751 			    mstohz(sce->timeout));
752 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
753 			if (sc->sc_dying)
754 				error = EIO;
755 			if (error) {
756 				sce->state &= ~UGEN_ASLP;
757 				break;
758 			}
759 		}
760 
761 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
762 			if(sce->fill > sce->cur)
763 				n = min(sce->fill - sce->cur, uio->uio_resid);
764 			else
765 				n = min(sce->limit - sce->cur, uio->uio_resid);
766 
767 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
768 
769 			/* Copy the data to the user process. */
770 			error = uiomove(sce->cur, n, uio);
771 			if (error)
772 				break;
773 			sce->cur += n;
774 			if (sce->cur >= sce->limit)
775 				sce->cur = sce->ibuf;
776 		}
777 		mutex_exit(&sc->sc_lock);
778 		break;
779 
780 
781 	default:
782 		return (ENXIO);
783 	}
784 	return (error);
785 }
786 
787 int
788 ugenread(dev_t dev, struct uio *uio, int flag)
789 {
790 	int endpt = UGENENDPOINT(dev);
791 	struct ugen_softc *sc;
792 	int error;
793 
794 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
795 	if (sc == NULL)
796 		return ENXIO;
797 
798 	mutex_enter(&sc->sc_lock);
799 	sc->sc_refcnt++;
800 	mutex_exit(&sc->sc_lock);
801 
802 	error = ugen_do_read(sc, endpt, uio, flag);
803 
804 	mutex_enter(&sc->sc_lock);
805 	if (--sc->sc_refcnt < 0)
806 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
807 	mutex_exit(&sc->sc_lock);
808 
809 	return (error);
810 }
811 
812 Static int
813 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
814 	int flag)
815 {
816 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
817 	u_int32_t n;
818 	int error = 0;
819 	u_int32_t tn;
820 	char *dbuf;
821 	usbd_xfer_handle xfer;
822 	usbd_status err;
823 
824 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
825 
826 	if (sc->sc_dying)
827 		return (EIO);
828 
829 	if (endpt == USB_CONTROL_ENDPOINT)
830 		return (ENODEV);
831 
832 #ifdef DIAGNOSTIC
833 	if (sce->edesc == NULL) {
834 		printf("ugenwrite: no edesc\n");
835 		return (EIO);
836 	}
837 	if (sce->pipeh == NULL) {
838 		printf("ugenwrite: no pipe\n");
839 		return (EIO);
840 	}
841 #endif
842 
843 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
844 	case UE_BULK:
845 		if (sce->state & UGEN_BULK_WB) {
846 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
847 				     uio->uio_resid, sce->ra_wb_used));
848 			xfer = sce->ra_wb_xfer;
849 
850 			mutex_enter(&sc->sc_lock);
851 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
852 			    flag & IO_NDELAY) {
853 				mutex_exit(&sc->sc_lock);
854 				return (EWOULDBLOCK);
855 			}
856 			while (uio->uio_resid > 0 && !error) {
857 				while (sce->ra_wb_used ==
858 				       sce->limit - sce->ibuf) {
859 					sce->state |= UGEN_ASLP;
860 					DPRINTFN(5,
861 						 ("ugenwrite: sleep on %p\n",
862 						  sce));
863 					/* "ugenwb" */
864 					error = cv_timedwait_sig(&sce->cv,
865 					    &sc->sc_lock, mstohz(sce->timeout));
866 					DPRINTFN(5,
867 						 ("ugenwrite: woke, error=%d\n",
868 						  error));
869 					if (sc->sc_dying)
870 						error = EIO;
871 					if (error) {
872 						sce->state &= ~UGEN_ASLP;
873 						break;
874 					}
875 				}
876 
877 				/* Copy data from the process. */
878 				while (uio->uio_resid > 0 &&
879 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
880 					n = min(uio->uio_resid,
881 						(sce->limit - sce->ibuf)
882 						 - sce->ra_wb_used);
883 					n = min(n, sce->limit - sce->fill);
884 					error = uiomove(sce->fill, n, uio);
885 					if (error)
886 						break;
887 					sce->fill += n;
888 					sce->ra_wb_used += n;
889 					if (sce->fill == sce->limit)
890 						sce->fill = sce->ibuf;
891 				}
892 
893 				/*
894 				 * If the transfers stopped because the
895 				 * buffer was empty, restart them.
896 				 */
897 				if (sce->state & UGEN_RA_WB_STOP &&
898 				    sce->ra_wb_used > 0) {
899 					dbuf = (char *)usbd_get_buffer(xfer);
900 					n = min(sce->ra_wb_used,
901 						sce->ra_wb_xferlen);
902 					tn = min(n, sce->limit - sce->cur);
903 					memcpy(dbuf, sce->cur, tn);
904 					dbuf += tn;
905 					if (n - tn > 0)
906 						memcpy(dbuf, sce->ibuf,
907 						       n - tn);
908 					usbd_setup_xfer(xfer,
909 					    sce->pipeh, sce, NULL, n,
910 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
911 					    ugen_bulkwb_intr);
912 					sce->state &= ~UGEN_RA_WB_STOP;
913 					err = usbd_transfer(xfer);
914 					if (err != USBD_IN_PROGRESS)
915 						/*
916 						 * The transfer has not been
917 						 * queued.  Setting STOP
918 						 * will make us try again
919 						 * at the next read.
920 						 */
921 						sce->state |= UGEN_RA_WB_STOP;
922 				}
923 			}
924 			mutex_exit(&sc->sc_lock);
925 			break;
926 		}
927 		xfer = usbd_alloc_xfer(sc->sc_udev);
928 		if (xfer == 0)
929 			return (EIO);
930 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
931 			error = uiomove(sc->sc_buffer, n, uio);
932 			if (error)
933 				break;
934 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
935 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
936 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
937 			if (err) {
938 				if (err == USBD_INTERRUPTED)
939 					error = EINTR;
940 				else if (err == USBD_TIMEOUT)
941 					error = ETIMEDOUT;
942 				else
943 					error = EIO;
944 				break;
945 			}
946 		}
947 		usbd_free_xfer(xfer);
948 		break;
949 	case UE_INTERRUPT:
950 		xfer = usbd_alloc_xfer(sc->sc_udev);
951 		if (xfer == 0)
952 			return (EIO);
953 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
954 		    uio->uio_resid)) != 0) {
955 			error = uiomove(sc->sc_buffer, n, uio);
956 			if (error)
957 				break;
958 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
959 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
960 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
961 			if (err) {
962 				if (err == USBD_INTERRUPTED)
963 					error = EINTR;
964 				else if (err == USBD_TIMEOUT)
965 					error = ETIMEDOUT;
966 				else
967 					error = EIO;
968 				break;
969 			}
970 		}
971 		usbd_free_xfer(xfer);
972 		break;
973 	default:
974 		return (ENXIO);
975 	}
976 	return (error);
977 }
978 
979 int
980 ugenwrite(dev_t dev, struct uio *uio, int flag)
981 {
982 	int endpt = UGENENDPOINT(dev);
983 	struct ugen_softc *sc;
984 	int error;
985 
986 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
987 	if (sc == NULL)
988 		return ENXIO;
989 
990 	mutex_enter(&sc->sc_lock);
991 	sc->sc_refcnt++;
992 	mutex_exit(&sc->sc_lock);
993 
994 	error = ugen_do_write(sc, endpt, uio, flag);
995 
996 	mutex_enter(&sc->sc_lock);
997 	if (--sc->sc_refcnt < 0)
998 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
999 	mutex_exit(&sc->sc_lock);
1000 
1001 	return (error);
1002 }
1003 
1004 int
1005 ugen_activate(device_t self, enum devact act)
1006 {
1007 	struct ugen_softc *sc = device_private(self);
1008 
1009 	switch (act) {
1010 	case DVACT_DEACTIVATE:
1011 		sc->sc_dying = 1;
1012 		return 0;
1013 	default:
1014 		return EOPNOTSUPP;
1015 	}
1016 }
1017 
1018 int
1019 ugen_detach(device_t self, int flags)
1020 {
1021 	struct ugen_softc *sc = device_private(self);
1022 	struct ugen_endpoint *sce;
1023 	int i, dir;
1024 	int maj, mn;
1025 
1026 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1027 
1028 	sc->sc_dying = 1;
1029 	pmf_device_deregister(self);
1030 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1031 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1032 		for (dir = OUT; dir <= IN; dir++) {
1033 			sce = &sc->sc_endpoints[i][dir];
1034 			if (sce && sce->pipeh)
1035 				usbd_abort_pipe(sce->pipeh);
1036 		}
1037 	}
1038 
1039 	mutex_enter(&sc->sc_lock);
1040 	if (--sc->sc_refcnt >= 0) {
1041 		/* Wake everyone */
1042 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1043 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1044 		/* Wait for processes to go away. */
1045 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1046 	}
1047 	mutex_exit(&sc->sc_lock);
1048 
1049 	/* locate the major number */
1050 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1051 
1052 	/* Nuke the vnodes for any open instances (calls close). */
1053 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1054 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1055 
1056 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1057 			   sc->sc_dev);
1058 
1059 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1060 		for (dir = OUT; dir <= IN; dir++) {
1061 			sce = &sc->sc_endpoints[i][dir];
1062 			seldestroy(&sce->rsel);
1063 			cv_destroy(&sce->cv);
1064 		}
1065 	}
1066 
1067 	cv_destroy(&sc->sc_detach_cv);
1068 	mutex_destroy(&sc->sc_lock);
1069 
1070 	return (0);
1071 }
1072 
1073 Static void
1074 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1075 {
1076 	struct ugen_endpoint *sce = addr;
1077 	struct ugen_softc *sc = sce->sc;
1078 	u_int32_t count;
1079 	u_char *ibuf;
1080 
1081 	if (status == USBD_CANCELLED)
1082 		return;
1083 
1084 	if (status != USBD_NORMAL_COMPLETION) {
1085 		DPRINTF(("ugenintr: status=%d\n", status));
1086 		if (status == USBD_STALLED)
1087 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1088 		return;
1089 	}
1090 
1091 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1092 	ibuf = sce->ibuf;
1093 
1094 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1095 		     xfer, status, count));
1096 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1097 		     ibuf[0], ibuf[1], ibuf[2]));
1098 
1099 	(void)b_to_q(ibuf, count, &sce->q);
1100 
1101 	mutex_enter(&sc->sc_lock);
1102 	if (sce->state & UGEN_ASLP) {
1103 		sce->state &= ~UGEN_ASLP;
1104 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1105 		cv_signal(&sce->cv);
1106 	}
1107 	mutex_exit(&sc->sc_lock);
1108 	selnotify(&sce->rsel, 0, 0);
1109 }
1110 
1111 Static void
1112 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1113 		usbd_status status)
1114 {
1115 	struct isoreq *req = addr;
1116 	struct ugen_endpoint *sce = req->sce;
1117 	struct ugen_softc *sc = sce->sc;
1118 	u_int32_t count, n;
1119 	int i, isize;
1120 
1121 	/* Return if we are aborting. */
1122 	if (status == USBD_CANCELLED)
1123 		return;
1124 
1125 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1126 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1127 	    (long)(req - sce->isoreqs), count));
1128 
1129 	/* throw away oldest input if the buffer is full */
1130 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1131 		sce->cur += count;
1132 		if(sce->cur >= sce->limit)
1133 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1134 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1135 			     count));
1136 	}
1137 
1138 	isize = UGETW(sce->edesc->wMaxPacketSize);
1139 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1140 		u_int32_t actlen = req->sizes[i];
1141 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1142 
1143 		/* copy data to buffer */
1144 		while (actlen > 0) {
1145 			n = min(actlen, sce->limit - sce->fill);
1146 			memcpy(sce->fill, tbuf, n);
1147 
1148 			tbuf += n;
1149 			actlen -= n;
1150 			sce->fill += n;
1151 			if(sce->fill == sce->limit)
1152 				sce->fill = sce->ibuf;
1153 		}
1154 
1155 		/* setup size for next transfer */
1156 		req->sizes[i] = isize;
1157 	}
1158 
1159 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1160 			     USBD_NO_COPY, ugen_isoc_rintr);
1161 	(void)usbd_transfer(xfer);
1162 
1163 	mutex_enter(&sc->sc_lock);
1164 	if (sce->state & UGEN_ASLP) {
1165 		sce->state &= ~UGEN_ASLP;
1166 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1167 		cv_signal(&sce->cv);
1168 	}
1169 	mutex_exit(&sc->sc_lock);
1170 	selnotify(&sce->rsel, 0, 0);
1171 }
1172 
1173 Static void
1174 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1175 		 usbd_status status)
1176 {
1177 	struct ugen_endpoint *sce = addr;
1178 	struct ugen_softc *sc = sce->sc;
1179 	u_int32_t count, n;
1180 	char const *tbuf;
1181 	usbd_status err;
1182 
1183 	/* Return if we are aborting. */
1184 	if (status == USBD_CANCELLED)
1185 		return;
1186 
1187 	if (status != USBD_NORMAL_COMPLETION) {
1188 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1189 		sce->state |= UGEN_RA_WB_STOP;
1190 		if (status == USBD_STALLED)
1191 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1192 		return;
1193 	}
1194 
1195 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1196 
1197 	/* Keep track of how much is in the buffer. */
1198 	sce->ra_wb_used += count;
1199 
1200 	/* Copy data to buffer. */
1201 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1202 	n = min(count, sce->limit - sce->fill);
1203 	memcpy(sce->fill, tbuf, n);
1204 	tbuf += n;
1205 	count -= n;
1206 	sce->fill += n;
1207 	if (sce->fill == sce->limit)
1208 		sce->fill = sce->ibuf;
1209 	if (count > 0) {
1210 		memcpy(sce->fill, tbuf, count);
1211 		sce->fill += count;
1212 	}
1213 
1214 	/* Set up the next request if necessary. */
1215 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1216 	if (n > 0) {
1217 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1218 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1219 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1220 		err = usbd_transfer(xfer);
1221 		if (err != USBD_IN_PROGRESS) {
1222 			printf("usbd_bulkra_intr: error=%d\n", err);
1223 			/*
1224 			 * The transfer has not been queued.  Setting STOP
1225 			 * will make us try again at the next read.
1226 			 */
1227 			sce->state |= UGEN_RA_WB_STOP;
1228 		}
1229 	}
1230 	else
1231 		sce->state |= UGEN_RA_WB_STOP;
1232 
1233 	mutex_enter(&sc->sc_lock);
1234 	if (sce->state & UGEN_ASLP) {
1235 		sce->state &= ~UGEN_ASLP;
1236 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1237 		cv_signal(&sce->cv);
1238 	}
1239 	mutex_exit(&sc->sc_lock);
1240 	selnotify(&sce->rsel, 0, 0);
1241 }
1242 
1243 Static void
1244 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1245 		 usbd_status status)
1246 {
1247 	struct ugen_endpoint *sce = addr;
1248 	struct ugen_softc *sc = sce->sc;
1249 	u_int32_t count, n;
1250 	char *tbuf;
1251 	usbd_status err;
1252 
1253 	/* Return if we are aborting. */
1254 	if (status == USBD_CANCELLED)
1255 		return;
1256 
1257 	if (status != USBD_NORMAL_COMPLETION) {
1258 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1259 		sce->state |= UGEN_RA_WB_STOP;
1260 		if (status == USBD_STALLED)
1261 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1262 		return;
1263 	}
1264 
1265 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1266 
1267 	/* Keep track of how much is in the buffer. */
1268 	sce->ra_wb_used -= count;
1269 
1270 	/* Update buffer pointers. */
1271 	sce->cur += count;
1272 	if (sce->cur >= sce->limit)
1273 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1274 
1275 	/* Set up next request if necessary. */
1276 	if (sce->ra_wb_used > 0) {
1277 		/* copy data from buffer */
1278 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1279 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1280 		n = min(count, sce->limit - sce->cur);
1281 		memcpy(tbuf, sce->cur, n);
1282 		tbuf += n;
1283 		if (count - n > 0)
1284 			memcpy(tbuf, sce->ibuf, count - n);
1285 
1286 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1287 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1288 		err = usbd_transfer(xfer);
1289 		if (err != USBD_IN_PROGRESS) {
1290 			printf("usbd_bulkwb_intr: error=%d\n", err);
1291 			/*
1292 			 * The transfer has not been queued.  Setting STOP
1293 			 * will make us try again at the next write.
1294 			 */
1295 			sce->state |= UGEN_RA_WB_STOP;
1296 		}
1297 	}
1298 	else
1299 		sce->state |= UGEN_RA_WB_STOP;
1300 
1301 	mutex_enter(&sc->sc_lock);
1302 	if (sce->state & UGEN_ASLP) {
1303 		sce->state &= ~UGEN_ASLP;
1304 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1305 		cv_signal(&sce->cv);
1306 	}
1307 	mutex_exit(&sc->sc_lock);
1308 	selnotify(&sce->rsel, 0, 0);
1309 }
1310 
1311 Static usbd_status
1312 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1313 {
1314 	usbd_interface_handle iface;
1315 	usb_endpoint_descriptor_t *ed;
1316 	usbd_status err;
1317 	struct ugen_endpoint *sce;
1318 	u_int8_t niface, nendpt, endptno, endpt;
1319 	int dir;
1320 
1321 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1322 
1323 	err = usbd_interface_count(sc->sc_udev, &niface);
1324 	if (err)
1325 		return (err);
1326 	if (ifaceidx < 0 || ifaceidx >= niface)
1327 		return (USBD_INVAL);
1328 
1329 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1330 	if (err)
1331 		return (err);
1332 	err = usbd_endpoint_count(iface, &nendpt);
1333 	if (err)
1334 		return (err);
1335 	/* XXX should only do this after setting new altno has succeeded */
1336 	for (endptno = 0; endptno < nendpt; endptno++) {
1337 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1338 		endpt = ed->bEndpointAddress;
1339 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1340 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1341 		sce->sc = 0;
1342 		sce->edesc = 0;
1343 		sce->iface = 0;
1344 	}
1345 
1346 	/* change setting */
1347 	err = usbd_set_interface(iface, altno);
1348 	if (err)
1349 		return (err);
1350 
1351 	err = usbd_endpoint_count(iface, &nendpt);
1352 	if (err)
1353 		return (err);
1354 	for (endptno = 0; endptno < nendpt; endptno++) {
1355 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1356 		KASSERT(ed != NULL);
1357 		endpt = ed->bEndpointAddress;
1358 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1359 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1360 		sce->sc = sc;
1361 		sce->edesc = ed;
1362 		sce->iface = iface;
1363 	}
1364 	return (0);
1365 }
1366 
1367 /* Retrieve a complete descriptor for a certain device and index. */
1368 Static usb_config_descriptor_t *
1369 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1370 {
1371 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1372 	int len;
1373 	usbd_status err;
1374 
1375 	if (index == USB_CURRENT_CONFIG_INDEX) {
1376 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1377 		len = UGETW(tdesc->wTotalLength);
1378 		if (lenp)
1379 			*lenp = len;
1380 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1381 		memcpy(cdesc, tdesc, len);
1382 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1383 	} else {
1384 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1385 		if (err)
1386 			return (0);
1387 		len = UGETW(cdescr.wTotalLength);
1388 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1389 		if (lenp)
1390 			*lenp = len;
1391 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1392 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1393 		if (err) {
1394 			free(cdesc, M_TEMP);
1395 			return (0);
1396 		}
1397 	}
1398 	return (cdesc);
1399 }
1400 
1401 Static int
1402 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1403 {
1404 	usbd_interface_handle iface;
1405 	usbd_status err;
1406 
1407 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1408 	if (err)
1409 		return (-1);
1410 	return (usbd_get_interface_altindex(iface));
1411 }
1412 
1413 Static int
1414 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1415 	      void *addr, int flag, struct lwp *l)
1416 {
1417 	struct ugen_endpoint *sce;
1418 	usbd_status err;
1419 	usbd_interface_handle iface;
1420 	struct usb_config_desc *cd;
1421 	usb_config_descriptor_t *cdesc;
1422 	struct usb_interface_desc *id;
1423 	usb_interface_descriptor_t *idesc;
1424 	struct usb_endpoint_desc *ed;
1425 	usb_endpoint_descriptor_t *edesc;
1426 	struct usb_alt_interface *ai;
1427 	struct usb_string_desc *si;
1428 	u_int8_t conf, alt;
1429 
1430 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1431 	if (sc->sc_dying)
1432 		return (EIO);
1433 
1434 	switch (cmd) {
1435 	case FIONBIO:
1436 		/* All handled in the upper FS layer. */
1437 		return (0);
1438 	case USB_SET_SHORT_XFER:
1439 		if (endpt == USB_CONTROL_ENDPOINT)
1440 			return (EINVAL);
1441 		/* This flag only affects read */
1442 		sce = &sc->sc_endpoints[endpt][IN];
1443 		if (sce == NULL || sce->pipeh == NULL)
1444 			return (EINVAL);
1445 		if (*(int *)addr)
1446 			sce->state |= UGEN_SHORT_OK;
1447 		else
1448 			sce->state &= ~UGEN_SHORT_OK;
1449 		return (0);
1450 	case USB_SET_TIMEOUT:
1451 		sce = &sc->sc_endpoints[endpt][IN];
1452 		if (sce == NULL
1453 		    /* XXX this shouldn't happen, but the distinction between
1454 		       input and output pipes isn't clear enough.
1455 		       || sce->pipeh == NULL */
1456 			)
1457 			return (EINVAL);
1458 		sce->timeout = *(int *)addr;
1459 		return (0);
1460 	case USB_SET_BULK_RA:
1461 		if (endpt == USB_CONTROL_ENDPOINT)
1462 			return (EINVAL);
1463 		sce = &sc->sc_endpoints[endpt][IN];
1464 		if (sce == NULL || sce->pipeh == NULL)
1465 			return (EINVAL);
1466 		edesc = sce->edesc;
1467 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1468 			return (EINVAL);
1469 
1470 		if (*(int *)addr) {
1471 			/* Only turn RA on if it's currently off. */
1472 			if (sce->state & UGEN_BULK_RA)
1473 				return (0);
1474 
1475 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1476 				/* shouldn't happen */
1477 				return (EINVAL);
1478 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1479 			if (sce->ra_wb_xfer == NULL)
1480 				return (ENOMEM);
1481 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1482 			/*
1483 			 * Set up a dmabuf because we reuse the xfer with
1484 			 * the same (max) request length like isoc.
1485 			 */
1486 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1487 					      sce->ra_wb_xferlen) == 0) {
1488 				usbd_free_xfer(sce->ra_wb_xfer);
1489 				return (ENOMEM);
1490 			}
1491 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1492 					   M_USBDEV, M_WAITOK);
1493 			sce->fill = sce->cur = sce->ibuf;
1494 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1495 			sce->ra_wb_used = 0;
1496 			sce->state |= UGEN_BULK_RA;
1497 			sce->state &= ~UGEN_RA_WB_STOP;
1498 			/* Now start reading. */
1499 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1500 			    NULL,
1501 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1502 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1503 			    ugen_bulkra_intr);
1504 			err = usbd_transfer(sce->ra_wb_xfer);
1505 			if (err != USBD_IN_PROGRESS) {
1506 				sce->state &= ~UGEN_BULK_RA;
1507 				free(sce->ibuf, M_USBDEV);
1508 				sce->ibuf = NULL;
1509 				usbd_free_xfer(sce->ra_wb_xfer);
1510 				return (EIO);
1511 			}
1512 		} else {
1513 			/* Only turn RA off if it's currently on. */
1514 			if (!(sce->state & UGEN_BULK_RA))
1515 				return (0);
1516 
1517 			sce->state &= ~UGEN_BULK_RA;
1518 			usbd_abort_pipe(sce->pipeh);
1519 			usbd_free_xfer(sce->ra_wb_xfer);
1520 			/*
1521 			 * XXX Discard whatever's in the buffer, but we
1522 			 * should keep it around and drain the buffer
1523 			 * instead.
1524 			 */
1525 			free(sce->ibuf, M_USBDEV);
1526 			sce->ibuf = NULL;
1527 		}
1528 		return (0);
1529 	case USB_SET_BULK_WB:
1530 		if (endpt == USB_CONTROL_ENDPOINT)
1531 			return (EINVAL);
1532 		sce = &sc->sc_endpoints[endpt][OUT];
1533 		if (sce == NULL || sce->pipeh == NULL)
1534 			return (EINVAL);
1535 		edesc = sce->edesc;
1536 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1537 			return (EINVAL);
1538 
1539 		if (*(int *)addr) {
1540 			/* Only turn WB on if it's currently off. */
1541 			if (sce->state & UGEN_BULK_WB)
1542 				return (0);
1543 
1544 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1545 				/* shouldn't happen */
1546 				return (EINVAL);
1547 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1548 			if (sce->ra_wb_xfer == NULL)
1549 				return (ENOMEM);
1550 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1551 			/*
1552 			 * Set up a dmabuf because we reuse the xfer with
1553 			 * the same (max) request length like isoc.
1554 			 */
1555 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1556 					      sce->ra_wb_xferlen) == 0) {
1557 				usbd_free_xfer(sce->ra_wb_xfer);
1558 				return (ENOMEM);
1559 			}
1560 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1561 					   M_USBDEV, M_WAITOK);
1562 			sce->fill = sce->cur = sce->ibuf;
1563 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1564 			sce->ra_wb_used = 0;
1565 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1566 		} else {
1567 			/* Only turn WB off if it's currently on. */
1568 			if (!(sce->state & UGEN_BULK_WB))
1569 				return (0);
1570 
1571 			sce->state &= ~UGEN_BULK_WB;
1572 			/*
1573 			 * XXX Discard whatever's in the buffer, but we
1574 			 * should keep it around and keep writing to
1575 			 * drain the buffer instead.
1576 			 */
1577 			usbd_abort_pipe(sce->pipeh);
1578 			usbd_free_xfer(sce->ra_wb_xfer);
1579 			free(sce->ibuf, M_USBDEV);
1580 			sce->ibuf = NULL;
1581 		}
1582 		return (0);
1583 	case USB_SET_BULK_RA_OPT:
1584 	case USB_SET_BULK_WB_OPT:
1585 	{
1586 		struct usb_bulk_ra_wb_opt *opt;
1587 
1588 		if (endpt == USB_CONTROL_ENDPOINT)
1589 			return (EINVAL);
1590 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1591 		if (cmd == USB_SET_BULK_RA_OPT)
1592 			sce = &sc->sc_endpoints[endpt][IN];
1593 		else
1594 			sce = &sc->sc_endpoints[endpt][OUT];
1595 		if (sce == NULL || sce->pipeh == NULL)
1596 			return (EINVAL);
1597 		if (opt->ra_wb_buffer_size < 1 ||
1598 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1599 		    opt->ra_wb_request_size < 1 ||
1600 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1601 			return (EINVAL);
1602 		/*
1603 		 * XXX These changes do not take effect until the
1604 		 * next time RA/WB mode is enabled but they ought to
1605 		 * take effect immediately.
1606 		 */
1607 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1608 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1609 		return (0);
1610 	}
1611 	default:
1612 		break;
1613 	}
1614 
1615 	if (endpt != USB_CONTROL_ENDPOINT)
1616 		return (EINVAL);
1617 
1618 	switch (cmd) {
1619 #ifdef UGEN_DEBUG
1620 	case USB_SETDEBUG:
1621 		ugendebug = *(int *)addr;
1622 		break;
1623 #endif
1624 	case USB_GET_CONFIG:
1625 		err = usbd_get_config(sc->sc_udev, &conf);
1626 		if (err)
1627 			return (EIO);
1628 		*(int *)addr = conf;
1629 		break;
1630 	case USB_SET_CONFIG:
1631 		if (!(flag & FWRITE))
1632 			return (EPERM);
1633 		err = ugen_set_config(sc, *(int *)addr);
1634 		switch (err) {
1635 		case USBD_NORMAL_COMPLETION:
1636 			break;
1637 		case USBD_IN_USE:
1638 			return (EBUSY);
1639 		default:
1640 			return (EIO);
1641 		}
1642 		break;
1643 	case USB_GET_ALTINTERFACE:
1644 		ai = (struct usb_alt_interface *)addr;
1645 		err = usbd_device2interface_handle(sc->sc_udev,
1646 			  ai->uai_interface_index, &iface);
1647 		if (err)
1648 			return (EINVAL);
1649 		idesc = usbd_get_interface_descriptor(iface);
1650 		if (idesc == NULL)
1651 			return (EIO);
1652 		ai->uai_alt_no = idesc->bAlternateSetting;
1653 		break;
1654 	case USB_SET_ALTINTERFACE:
1655 		if (!(flag & FWRITE))
1656 			return (EPERM);
1657 		ai = (struct usb_alt_interface *)addr;
1658 		err = usbd_device2interface_handle(sc->sc_udev,
1659 			  ai->uai_interface_index, &iface);
1660 		if (err)
1661 			return (EINVAL);
1662 		err = ugen_set_interface(sc, ai->uai_interface_index,
1663 		    ai->uai_alt_no);
1664 		if (err)
1665 			return (EINVAL);
1666 		break;
1667 	case USB_GET_NO_ALT:
1668 		ai = (struct usb_alt_interface *)addr;
1669 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1670 		if (cdesc == NULL)
1671 			return (EINVAL);
1672 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1673 		if (idesc == NULL) {
1674 			free(cdesc, M_TEMP);
1675 			return (EINVAL);
1676 		}
1677 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1678 		    idesc->bInterfaceNumber);
1679 		free(cdesc, M_TEMP);
1680 		break;
1681 	case USB_GET_DEVICE_DESC:
1682 		*(usb_device_descriptor_t *)addr =
1683 			*usbd_get_device_descriptor(sc->sc_udev);
1684 		break;
1685 	case USB_GET_CONFIG_DESC:
1686 		cd = (struct usb_config_desc *)addr;
1687 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1688 		if (cdesc == NULL)
1689 			return (EINVAL);
1690 		cd->ucd_desc = *cdesc;
1691 		free(cdesc, M_TEMP);
1692 		break;
1693 	case USB_GET_INTERFACE_DESC:
1694 		id = (struct usb_interface_desc *)addr;
1695 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1696 		if (cdesc == NULL)
1697 			return (EINVAL);
1698 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1699 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1700 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1701 		else
1702 			alt = id->uid_alt_index;
1703 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1704 		if (idesc == NULL) {
1705 			free(cdesc, M_TEMP);
1706 			return (EINVAL);
1707 		}
1708 		id->uid_desc = *idesc;
1709 		free(cdesc, M_TEMP);
1710 		break;
1711 	case USB_GET_ENDPOINT_DESC:
1712 		ed = (struct usb_endpoint_desc *)addr;
1713 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1714 		if (cdesc == NULL)
1715 			return (EINVAL);
1716 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1717 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1718 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1719 		else
1720 			alt = ed->ued_alt_index;
1721 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1722 					alt, ed->ued_endpoint_index);
1723 		if (edesc == NULL) {
1724 			free(cdesc, M_TEMP);
1725 			return (EINVAL);
1726 		}
1727 		ed->ued_desc = *edesc;
1728 		free(cdesc, M_TEMP);
1729 		break;
1730 	case USB_GET_FULL_DESC:
1731 	{
1732 		int len;
1733 		struct iovec iov;
1734 		struct uio uio;
1735 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1736 		int error;
1737 
1738 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1739 		if (cdesc == NULL)
1740 			return (EINVAL);
1741 		if (len > fd->ufd_size)
1742 			len = fd->ufd_size;
1743 		iov.iov_base = (void *)fd->ufd_data;
1744 		iov.iov_len = len;
1745 		uio.uio_iov = &iov;
1746 		uio.uio_iovcnt = 1;
1747 		uio.uio_resid = len;
1748 		uio.uio_offset = 0;
1749 		uio.uio_rw = UIO_READ;
1750 		uio.uio_vmspace = l->l_proc->p_vmspace;
1751 		error = uiomove((void *)cdesc, len, &uio);
1752 		free(cdesc, M_TEMP);
1753 		return (error);
1754 	}
1755 	case USB_GET_STRING_DESC: {
1756 		int len;
1757 		si = (struct usb_string_desc *)addr;
1758 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1759 			  si->usd_language_id, &si->usd_desc, &len);
1760 		if (err)
1761 			return (EINVAL);
1762 		break;
1763 	}
1764 	case USB_DO_REQUEST:
1765 	{
1766 		struct usb_ctl_request *ur = (void *)addr;
1767 		int len = UGETW(ur->ucr_request.wLength);
1768 		struct iovec iov;
1769 		struct uio uio;
1770 		void *ptr = 0;
1771 		usbd_status xerr;
1772 		int error = 0;
1773 
1774 		if (!(flag & FWRITE))
1775 			return (EPERM);
1776 		/* Avoid requests that would damage the bus integrity. */
1777 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1778 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1779 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1780 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1781 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1782 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1783 			return (EINVAL);
1784 
1785 		if (len < 0 || len > 32767)
1786 			return (EINVAL);
1787 		if (len != 0) {
1788 			iov.iov_base = (void *)ur->ucr_data;
1789 			iov.iov_len = len;
1790 			uio.uio_iov = &iov;
1791 			uio.uio_iovcnt = 1;
1792 			uio.uio_resid = len;
1793 			uio.uio_offset = 0;
1794 			uio.uio_rw =
1795 				ur->ucr_request.bmRequestType & UT_READ ?
1796 				UIO_READ : UIO_WRITE;
1797 			uio.uio_vmspace = l->l_proc->p_vmspace;
1798 			ptr = malloc(len, M_TEMP, M_WAITOK);
1799 			if (uio.uio_rw == UIO_WRITE) {
1800 				error = uiomove(ptr, len, &uio);
1801 				if (error)
1802 					goto ret;
1803 			}
1804 		}
1805 		sce = &sc->sc_endpoints[endpt][IN];
1806 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1807 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1808 		if (xerr) {
1809 			error = EIO;
1810 			goto ret;
1811 		}
1812 		if (len != 0) {
1813 			if (uio.uio_rw == UIO_READ) {
1814 				error = uiomove(ptr, len, &uio);
1815 				if (error)
1816 					goto ret;
1817 			}
1818 		}
1819 	ret:
1820 		if (ptr)
1821 			free(ptr, M_TEMP);
1822 		return (error);
1823 	}
1824 	case USB_GET_DEVICEINFO:
1825 		usbd_fill_deviceinfo(sc->sc_udev,
1826 				     (struct usb_device_info *)addr, 0);
1827 		break;
1828 #ifdef COMPAT_30
1829 	case USB_GET_DEVICEINFO_OLD:
1830 		usbd_fill_deviceinfo_old(sc->sc_udev,
1831 					 (struct usb_device_info_old *)addr, 0);
1832 
1833 		break;
1834 #endif
1835 	default:
1836 		return (EINVAL);
1837 	}
1838 	return (0);
1839 }
1840 
1841 int
1842 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1843 {
1844 	int endpt = UGENENDPOINT(dev);
1845 	struct ugen_softc *sc;
1846 	int error;
1847 
1848 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1849 	if (sc == NULL)
1850 		return ENXIO;
1851 
1852 	sc->sc_refcnt++;
1853 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1854 	if (--sc->sc_refcnt < 0)
1855 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1856 	return (error);
1857 }
1858 
1859 int
1860 ugenpoll(dev_t dev, int events, struct lwp *l)
1861 {
1862 	struct ugen_softc *sc;
1863 	struct ugen_endpoint *sce_in, *sce_out;
1864 	int revents = 0;
1865 
1866 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1867 	if (sc == NULL)
1868 		return ENXIO;
1869 
1870 	if (sc->sc_dying)
1871 		return (POLLHUP);
1872 
1873 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1874 		return ENODEV;
1875 
1876 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1877 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1878 	if (sce_in == NULL && sce_out == NULL)
1879 		return (POLLERR);
1880 #ifdef DIAGNOSTIC
1881 	if (!sce_in->edesc && !sce_out->edesc) {
1882 		printf("ugenpoll: no edesc\n");
1883 		return (POLLERR);
1884 	}
1885 	/* It's possible to have only one pipe open. */
1886 	if (!sce_in->pipeh && !sce_out->pipeh) {
1887 		printf("ugenpoll: no pipe\n");
1888 		return (POLLERR);
1889 	}
1890 #endif
1891 
1892 	mutex_enter(&sc->sc_lock);
1893 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1894 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1895 		case UE_INTERRUPT:
1896 			if (sce_in->q.c_cc > 0)
1897 				revents |= events & (POLLIN | POLLRDNORM);
1898 			else
1899 				selrecord(l, &sce_in->rsel);
1900 			break;
1901 		case UE_ISOCHRONOUS:
1902 			if (sce_in->cur != sce_in->fill)
1903 				revents |= events & (POLLIN | POLLRDNORM);
1904 			else
1905 				selrecord(l, &sce_in->rsel);
1906 			break;
1907 		case UE_BULK:
1908 			if (sce_in->state & UGEN_BULK_RA) {
1909 				if (sce_in->ra_wb_used > 0)
1910 					revents |= events &
1911 					    (POLLIN | POLLRDNORM);
1912 				else
1913 					selrecord(l, &sce_in->rsel);
1914 				break;
1915 			}
1916 			/*
1917 			 * We have no easy way of determining if a read will
1918 			 * yield any data or a write will happen.
1919 			 * Pretend they will.
1920 			 */
1921 			 revents |= events & (POLLIN | POLLRDNORM);
1922 			 break;
1923 		default:
1924 			break;
1925 		}
1926 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1927 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1928 		case UE_INTERRUPT:
1929 		case UE_ISOCHRONOUS:
1930 			/* XXX unimplemented */
1931 			break;
1932 		case UE_BULK:
1933 			if (sce_out->state & UGEN_BULK_WB) {
1934 				if (sce_out->ra_wb_used <
1935 				    sce_out->limit - sce_out->ibuf)
1936 					revents |= events &
1937 					    (POLLOUT | POLLWRNORM);
1938 				else
1939 					selrecord(l, &sce_out->rsel);
1940 				break;
1941 			}
1942 			/*
1943 			 * We have no easy way of determining if a read will
1944 			 * yield any data or a write will happen.
1945 			 * Pretend they will.
1946 			 */
1947 			 revents |= events & (POLLOUT | POLLWRNORM);
1948 			 break;
1949 		default:
1950 			break;
1951 		}
1952 
1953 	mutex_exit(&sc->sc_lock);
1954 
1955 	return (revents);
1956 }
1957 
1958 static void
1959 filt_ugenrdetach(struct knote *kn)
1960 {
1961 	struct ugen_endpoint *sce = kn->kn_hook;
1962 	struct ugen_softc *sc = sce->sc;
1963 
1964 	mutex_enter(&sc->sc_lock);
1965 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1966 	mutex_exit(&sc->sc_lock);
1967 }
1968 
1969 static int
1970 filt_ugenread_intr(struct knote *kn, long hint)
1971 {
1972 	struct ugen_endpoint *sce = kn->kn_hook;
1973 
1974 	kn->kn_data = sce->q.c_cc;
1975 	return (kn->kn_data > 0);
1976 }
1977 
1978 static int
1979 filt_ugenread_isoc(struct knote *kn, long hint)
1980 {
1981 	struct ugen_endpoint *sce = kn->kn_hook;
1982 
1983 	if (sce->cur == sce->fill)
1984 		return (0);
1985 
1986 	if (sce->cur < sce->fill)
1987 		kn->kn_data = sce->fill - sce->cur;
1988 	else
1989 		kn->kn_data = (sce->limit - sce->cur) +
1990 		    (sce->fill - sce->ibuf);
1991 
1992 	return (1);
1993 }
1994 
1995 static int
1996 filt_ugenread_bulk(struct knote *kn, long hint)
1997 {
1998 	struct ugen_endpoint *sce = kn->kn_hook;
1999 
2000 	if (!(sce->state & UGEN_BULK_RA))
2001 		/*
2002 		 * We have no easy way of determining if a read will
2003 		 * yield any data or a write will happen.
2004 		 * So, emulate "seltrue".
2005 		 */
2006 		return (filt_seltrue(kn, hint));
2007 
2008 	if (sce->ra_wb_used == 0)
2009 		return (0);
2010 
2011 	kn->kn_data = sce->ra_wb_used;
2012 
2013 	return (1);
2014 }
2015 
2016 static int
2017 filt_ugenwrite_bulk(struct knote *kn, long hint)
2018 {
2019 	struct ugen_endpoint *sce = kn->kn_hook;
2020 
2021 	if (!(sce->state & UGEN_BULK_WB))
2022 		/*
2023 		 * We have no easy way of determining if a read will
2024 		 * yield any data or a write will happen.
2025 		 * So, emulate "seltrue".
2026 		 */
2027 		return (filt_seltrue(kn, hint));
2028 
2029 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2030 		return (0);
2031 
2032 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2033 
2034 	return (1);
2035 }
2036 
2037 static const struct filterops ugenread_intr_filtops =
2038 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2039 
2040 static const struct filterops ugenread_isoc_filtops =
2041 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2042 
2043 static const struct filterops ugenread_bulk_filtops =
2044 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2045 
2046 static const struct filterops ugenwrite_bulk_filtops =
2047 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2048 
2049 int
2050 ugenkqfilter(dev_t dev, struct knote *kn)
2051 {
2052 	struct ugen_softc *sc;
2053 	struct ugen_endpoint *sce;
2054 	struct klist *klist;
2055 
2056 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2057 	if (sc == NULL)
2058 		return ENXIO;
2059 
2060 	if (sc->sc_dying)
2061 		return (ENXIO);
2062 
2063 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2064 		return ENODEV;
2065 
2066 	switch (kn->kn_filter) {
2067 	case EVFILT_READ:
2068 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2069 		if (sce == NULL)
2070 			return (EINVAL);
2071 
2072 		klist = &sce->rsel.sel_klist;
2073 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2074 		case UE_INTERRUPT:
2075 			kn->kn_fop = &ugenread_intr_filtops;
2076 			break;
2077 		case UE_ISOCHRONOUS:
2078 			kn->kn_fop = &ugenread_isoc_filtops;
2079 			break;
2080 		case UE_BULK:
2081 			kn->kn_fop = &ugenread_bulk_filtops;
2082 			break;
2083 		default:
2084 			return (EINVAL);
2085 		}
2086 		break;
2087 
2088 	case EVFILT_WRITE:
2089 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2090 		if (sce == NULL)
2091 			return (EINVAL);
2092 
2093 		klist = &sce->rsel.sel_klist;
2094 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2095 		case UE_INTERRUPT:
2096 		case UE_ISOCHRONOUS:
2097 			/* XXX poll doesn't support this */
2098 			return (EINVAL);
2099 
2100 		case UE_BULK:
2101 			kn->kn_fop = &ugenwrite_bulk_filtops;
2102 			break;
2103 		default:
2104 			return (EINVAL);
2105 		}
2106 		break;
2107 
2108 	default:
2109 		return (EINVAL);
2110 	}
2111 
2112 	kn->kn_hook = sce;
2113 
2114 	mutex_enter(&sc->sc_lock);
2115 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2116 	mutex_exit(&sc->sc_lock);
2117 
2118 	return (0);
2119 }
2120