xref: /netbsd-src/sys/dev/usb/ugen.c (revision e61202360d5611414dd6f6115934a96aa1f50b1a)
1 /*	$NetBSD: ugen.c,v 1.120 2012/06/10 06:15:53 mrg Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.120 2012/06/10 06:15:53 mrg Exp $");
41 
42 #include "opt_compat_netbsd.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/device.h>
49 #include <sys/ioctl.h>
50 #include <sys/conf.h>
51 #include <sys/tty.h>
52 #include <sys/file.h>
53 #include <sys/select.h>
54 #include <sys/proc.h>
55 #include <sys/vnode.h>
56 #include <sys/poll.h>
57 
58 #include <dev/usb/usb.h>
59 #include <dev/usb/usbdi.h>
60 #include <dev/usb/usbdi_util.h>
61 
62 #ifdef UGEN_DEBUG
63 #define DPRINTF(x)	if (ugendebug) printf x
64 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
65 int	ugendebug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n,x)
69 #endif
70 
71 #define	UGEN_CHUNK	128	/* chunk size for read */
72 #define	UGEN_IBSIZE	1020	/* buffer size */
73 #define	UGEN_BBSIZE	1024
74 
75 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
76 #define UGEN_NISORFRMS	8	/* number of transactions per req */
77 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
78 
79 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
80 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
81 
82 struct ugen_endpoint {
83 	struct ugen_softc *sc;
84 	usb_endpoint_descriptor_t *edesc;
85 	usbd_interface_handle iface;
86 	int state;
87 #define	UGEN_ASLP	0x02	/* waiting for data */
88 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
89 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
90 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
91 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
92 	usbd_pipe_handle pipeh;
93 	struct clist q;
94 	struct selinfo rsel;
95 	u_char *ibuf;		/* start of buffer (circular for isoc) */
96 	u_char *fill;		/* location for input (isoc) */
97 	u_char *limit;		/* end of circular buffer (isoc) */
98 	u_char *cur;		/* current read location (isoc) */
99 	u_int32_t timeout;
100 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
101 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
102 	u_int32_t ra_wb_used;	 /* how much is in buffer */
103 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
104 	usbd_xfer_handle ra_wb_xfer;
105 	struct isoreq {
106 		struct ugen_endpoint *sce;
107 		usbd_xfer_handle xfer;
108 		void *dmabuf;
109 		u_int16_t sizes[UGEN_NISORFRMS];
110 	} isoreqs[UGEN_NISOREQS];
111 	/* Keep this last; we don't overwrite it in ugen_set_config() */
112 	kcondvar_t		cv;
113 };
114 
115 struct ugen_softc {
116 	device_t sc_dev;		/* base device */
117 	usbd_device_handle sc_udev;
118 
119 	kmutex_t		sc_lock;
120 	kcondvar_t		sc_detach_cv;
121 
122 	char sc_is_open[USB_MAX_ENDPOINTS];
123 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
124 #define OUT 0
125 #define IN  1
126 
127 	int sc_refcnt;
128 	char sc_buffer[UGEN_BBSIZE];
129 	u_char sc_dying;
130 };
131 
132 dev_type_open(ugenopen);
133 dev_type_close(ugenclose);
134 dev_type_read(ugenread);
135 dev_type_write(ugenwrite);
136 dev_type_ioctl(ugenioctl);
137 dev_type_poll(ugenpoll);
138 dev_type_kqfilter(ugenkqfilter);
139 
140 const struct cdevsw ugen_cdevsw = {
141 	ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
142 	nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
143 };
144 
145 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
146 		     usbd_status status);
147 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
148 			    usbd_status status);
149 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
150 			     usbd_status status);
151 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
152 			     usbd_status status);
153 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
154 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
155 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
156 			 void *, int, struct lwp *);
157 Static int ugen_set_config(struct ugen_softc *sc, int configno);
158 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
159 					       int index, int *lenp);
160 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
161 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
162 
163 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
164 #define UGENENDPOINT(n) (minor(n) & 0xf)
165 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
166 
167 int             ugen_match(device_t, cfdata_t, void *);
168 void            ugen_attach(device_t, device_t, void *);
169 int             ugen_detach(device_t, int);
170 int             ugen_activate(device_t, enum devact);
171 extern struct cfdriver ugen_cd;
172 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
173 
174 /* toggle to control attach priority. -1 means "let autoconf decide" */
175 int ugen_override = -1;
176 
177 int
178 ugen_match(device_t parent, cfdata_t match, void *aux)
179 {
180 	struct usb_attach_arg *uaa = aux;
181 	int override;
182 
183 	if (ugen_override != -1)
184 		override = ugen_override;
185 	else
186 		override = match->cf_flags & 1;
187 
188 	if (override)
189 		return (UMATCH_HIGHEST);
190 	else if (uaa->usegeneric)
191 		return (UMATCH_GENERIC);
192 	else
193 		return (UMATCH_NONE);
194 }
195 
196 void
197 ugen_attach(device_t parent, device_t self, void *aux)
198 {
199 	struct ugen_softc *sc = device_private(self);
200 	struct usb_attach_arg *uaa = aux;
201 	usbd_device_handle udev;
202 	char *devinfop;
203 	usbd_status err;
204 	int i, dir, conf;
205 
206 	aprint_naive("\n");
207 	aprint_normal("\n");
208 
209 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
210 	cv_init(&sc->sc_detach_cv, "ugendet");
211 
212 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
213 	aprint_normal_dev(self, "%s\n", devinfop);
214 	usbd_devinfo_free(devinfop);
215 
216 	sc->sc_dev = self;
217 	sc->sc_udev = udev = uaa->device;
218 
219 	/* First set configuration index 0, the default one for ugen. */
220 	err = usbd_set_config_index(udev, 0, 0);
221 	if (err) {
222 		aprint_error_dev(self,
223 		    "setting configuration index 0 failed\n");
224 		sc->sc_dying = 1;
225 		return;
226 	}
227 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
228 
229 	/* Set up all the local state for this configuration. */
230 	err = ugen_set_config(sc, conf);
231 	if (err) {
232 		aprint_error_dev(self, "setting configuration %d failed\n",
233 		    conf);
234 		sc->sc_dying = 1;
235 		return;
236 	}
237 
238 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
239 		for (dir = OUT; dir <= IN; dir++) {
240 			struct ugen_endpoint *sce;
241 
242 			sce = &sc->sc_endpoints[i][dir];
243 			selinit(&sce->rsel);
244 			cv_init(&sce->cv, "ugensce");
245 		}
246 	}
247 
248 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
249 			   sc->sc_dev);
250 
251 	if (!pmf_device_register(self, NULL, NULL))
252 		aprint_error_dev(self, "couldn't establish power handler\n");
253 
254 	return;
255 }
256 
257 Static int
258 ugen_set_config(struct ugen_softc *sc, int configno)
259 {
260 	usbd_device_handle dev = sc->sc_udev;
261 	usb_config_descriptor_t *cdesc;
262 	usbd_interface_handle iface;
263 	usb_endpoint_descriptor_t *ed;
264 	struct ugen_endpoint *sce;
265 	u_int8_t niface, nendpt;
266 	int ifaceno, endptno, endpt;
267 	usbd_status err;
268 	int dir, i;
269 
270 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
271 		    device_xname(sc->sc_dev), configno, sc));
272 
273 	/*
274 	 * We start at 1, not 0, because we don't care whether the
275 	 * control endpoint is open or not. It is always present.
276 	 */
277 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
278 		if (sc->sc_is_open[endptno]) {
279 			DPRINTFN(1,
280 			     ("ugen_set_config: %s - endpoint %d is open\n",
281 			      device_xname(sc->sc_dev), endptno));
282 			return (USBD_IN_USE);
283 		}
284 
285 	/* Avoid setting the current value. */
286 	cdesc = usbd_get_config_descriptor(dev);
287 	if (!cdesc || cdesc->bConfigurationValue != configno) {
288 		err = usbd_set_config_no(dev, configno, 1);
289 		if (err)
290 			return (err);
291 	}
292 
293 	err = usbd_interface_count(dev, &niface);
294 	if (err)
295 		return (err);
296 
297 	/* Clear out the old info, but leave the cv initialised. */
298 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
299 		for (dir = OUT; dir <= IN; dir++) {
300 			sce = &sc->sc_endpoints[i][dir];
301 			memset(sce, 0, offsetof(struct ugen_endpoint, cv));
302 		}
303 	}
304 
305 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
306 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
307 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
308 		if (err)
309 			return (err);
310 		err = usbd_endpoint_count(iface, &nendpt);
311 		if (err)
312 			return (err);
313 		for (endptno = 0; endptno < nendpt; endptno++) {
314 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
315 			KASSERT(ed != NULL);
316 			endpt = ed->bEndpointAddress;
317 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
318 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
319 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
320 				    "(%d,%d), sce=%p\n",
321 				    endptno, endpt, UE_GET_ADDR(endpt),
322 				    UE_GET_DIR(endpt), sce));
323 			sce->sc = sc;
324 			sce->edesc = ed;
325 			sce->iface = iface;
326 		}
327 	}
328 	return (USBD_NORMAL_COMPLETION);
329 }
330 
331 int
332 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
333 {
334 	struct ugen_softc *sc;
335 	int unit = UGENUNIT(dev);
336 	int endpt = UGENENDPOINT(dev);
337 	usb_endpoint_descriptor_t *edesc;
338 	struct ugen_endpoint *sce;
339 	int dir, isize;
340 	usbd_status err;
341 	usbd_xfer_handle xfer;
342 	void *tbuf;
343 	int i, j;
344 
345 	sc = device_lookup_private(&ugen_cd, unit);
346 	if (sc == NULL)
347 		return ENXIO;
348 
349 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
350 		     flag, mode, unit, endpt));
351 
352 	if (sc == NULL || sc->sc_dying)
353 		return (ENXIO);
354 
355 	/* The control endpoint allows multiple opens. */
356 	if (endpt == USB_CONTROL_ENDPOINT) {
357 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
358 		return (0);
359 	}
360 
361 	if (sc->sc_is_open[endpt])
362 		return (EBUSY);
363 
364 	/* Make sure there are pipes for all directions. */
365 	for (dir = OUT; dir <= IN; dir++) {
366 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
367 			sce = &sc->sc_endpoints[endpt][dir];
368 			if (sce == 0 || sce->edesc == 0)
369 				return (ENXIO);
370 		}
371 	}
372 
373 	/* Actually open the pipes. */
374 	/* XXX Should back out properly if it fails. */
375 	for (dir = OUT; dir <= IN; dir++) {
376 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
377 			continue;
378 		sce = &sc->sc_endpoints[endpt][dir];
379 		sce->state = 0;
380 		sce->timeout = USBD_NO_TIMEOUT;
381 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
382 			     sc, endpt, dir, sce));
383 		edesc = sce->edesc;
384 		switch (edesc->bmAttributes & UE_XFERTYPE) {
385 		case UE_INTERRUPT:
386 			if (dir == OUT) {
387 				err = usbd_open_pipe(sce->iface,
388 				    edesc->bEndpointAddress, 0, &sce->pipeh);
389 				if (err)
390 					return (EIO);
391 				break;
392 			}
393 			isize = UGETW(edesc->wMaxPacketSize);
394 			if (isize == 0)	/* shouldn't happen */
395 				return (EINVAL);
396 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
397 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
398 				     endpt, isize));
399 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
400 				return (ENOMEM);
401 			err = usbd_open_pipe_intr(sce->iface,
402 				  edesc->bEndpointAddress,
403 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
404 				  sce->ibuf, isize, ugenintr,
405 				  USBD_DEFAULT_INTERVAL);
406 			if (err) {
407 				free(sce->ibuf, M_USBDEV);
408 				clfree(&sce->q);
409 				return (EIO);
410 			}
411 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
412 			break;
413 		case UE_BULK:
414 			err = usbd_open_pipe(sce->iface,
415 				  edesc->bEndpointAddress, 0, &sce->pipeh);
416 			if (err)
417 				return (EIO);
418 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
419 			/*
420 			 * Use request size for non-RA/WB transfers
421 			 * as the default.
422 			 */
423 			sce->ra_wb_reqsize = UGEN_BBSIZE;
424 			break;
425 		case UE_ISOCHRONOUS:
426 			if (dir == OUT)
427 				return (EINVAL);
428 			isize = UGETW(edesc->wMaxPacketSize);
429 			if (isize == 0)	/* shouldn't happen */
430 				return (EINVAL);
431 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
432 				M_USBDEV, M_WAITOK);
433 			sce->cur = sce->fill = sce->ibuf;
434 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
435 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
436 				     endpt, isize));
437 			err = usbd_open_pipe(sce->iface,
438 				  edesc->bEndpointAddress, 0, &sce->pipeh);
439 			if (err) {
440 				free(sce->ibuf, M_USBDEV);
441 				return (EIO);
442 			}
443 			for(i = 0; i < UGEN_NISOREQS; ++i) {
444 				sce->isoreqs[i].sce = sce;
445 				xfer = usbd_alloc_xfer(sc->sc_udev);
446 				if (xfer == 0)
447 					goto bad;
448 				sce->isoreqs[i].xfer = xfer;
449 				tbuf = usbd_alloc_buffer
450 					(xfer, isize * UGEN_NISORFRMS);
451 				if (tbuf == 0) {
452 					i++;
453 					goto bad;
454 				}
455 				sce->isoreqs[i].dmabuf = tbuf;
456 				for(j = 0; j < UGEN_NISORFRMS; ++j)
457 					sce->isoreqs[i].sizes[j] = isize;
458 				usbd_setup_isoc_xfer
459 					(xfer, sce->pipeh, &sce->isoreqs[i],
460 					 sce->isoreqs[i].sizes,
461 					 UGEN_NISORFRMS, USBD_NO_COPY,
462 					 ugen_isoc_rintr);
463 				(void)usbd_transfer(xfer);
464 			}
465 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
466 			break;
467 		bad:
468 			while (--i >= 0) /* implicit buffer free */
469 				usbd_free_xfer(sce->isoreqs[i].xfer);
470 			return (ENOMEM);
471 		case UE_CONTROL:
472 			sce->timeout = USBD_DEFAULT_TIMEOUT;
473 			return (EINVAL);
474 		}
475 	}
476 	sc->sc_is_open[endpt] = 1;
477 	return (0);
478 }
479 
480 int
481 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
482 {
483 	int endpt = UGENENDPOINT(dev);
484 	struct ugen_softc *sc;
485 	struct ugen_endpoint *sce;
486 	int dir;
487 	int i;
488 
489 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
490 	if (sc == NULL)
491 		return ENXIO;
492 
493 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
494 		     flag, mode, UGENUNIT(dev), endpt));
495 
496 #ifdef DIAGNOSTIC
497 	if (!sc->sc_is_open[endpt]) {
498 		printf("ugenclose: not open\n");
499 		return (EINVAL);
500 	}
501 #endif
502 
503 	if (endpt == USB_CONTROL_ENDPOINT) {
504 		DPRINTFN(5, ("ugenclose: close control\n"));
505 		sc->sc_is_open[endpt] = 0;
506 		return (0);
507 	}
508 
509 	for (dir = OUT; dir <= IN; dir++) {
510 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
511 			continue;
512 		sce = &sc->sc_endpoints[endpt][dir];
513 		if (sce == NULL || sce->pipeh == NULL)
514 			continue;
515 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
516 			     endpt, dir, sce));
517 
518 		usbd_abort_pipe(sce->pipeh);
519 		usbd_close_pipe(sce->pipeh);
520 		sce->pipeh = NULL;
521 
522 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
523 		case UE_INTERRUPT:
524 			ndflush(&sce->q, sce->q.c_cc);
525 			clfree(&sce->q);
526 			break;
527 		case UE_ISOCHRONOUS:
528 			for (i = 0; i < UGEN_NISOREQS; ++i)
529 				usbd_free_xfer(sce->isoreqs[i].xfer);
530 			break;
531 		case UE_BULK:
532 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
533 				/* ibuf freed below */
534 				usbd_free_xfer(sce->ra_wb_xfer);
535 			break;
536 		default:
537 			break;
538 		}
539 
540 		if (sce->ibuf != NULL) {
541 			free(sce->ibuf, M_USBDEV);
542 			sce->ibuf = NULL;
543 		}
544 	}
545 	sc->sc_is_open[endpt] = 0;
546 
547 	return (0);
548 }
549 
550 Static int
551 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
552 {
553 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
554 	u_int32_t n, tn;
555 	usbd_xfer_handle xfer;
556 	usbd_status err;
557 	int error = 0;
558 
559 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
560 
561 	if (sc->sc_dying)
562 		return (EIO);
563 
564 	if (endpt == USB_CONTROL_ENDPOINT)
565 		return (ENODEV);
566 
567 #ifdef DIAGNOSTIC
568 	if (sce->edesc == NULL) {
569 		printf("ugenread: no edesc\n");
570 		return (EIO);
571 	}
572 	if (sce->pipeh == NULL) {
573 		printf("ugenread: no pipe\n");
574 		return (EIO);
575 	}
576 #endif
577 
578 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
579 	case UE_INTERRUPT:
580 		/* Block until activity occurred. */
581 		mutex_enter(&sc->sc_lock);
582 		while (sce->q.c_cc == 0) {
583 			if (flag & IO_NDELAY) {
584 				mutex_exit(&sc->sc_lock);
585 				return (EWOULDBLOCK);
586 			}
587 			sce->state |= UGEN_ASLP;
588 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
589 			/* "ugenri" */
590 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
591 			    mstohz(sce->timeout));
592 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
593 			if (sc->sc_dying)
594 				error = EIO;
595 			if (error) {
596 				sce->state &= ~UGEN_ASLP;
597 				break;
598 			}
599 		}
600 		mutex_exit(&sc->sc_lock);
601 
602 		/* Transfer as many chunks as possible. */
603 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
604 			n = min(sce->q.c_cc, uio->uio_resid);
605 			if (n > sizeof(sc->sc_buffer))
606 				n = sizeof(sc->sc_buffer);
607 
608 			/* Remove a small chunk from the input queue. */
609 			q_to_b(&sce->q, sc->sc_buffer, n);
610 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
611 
612 			/* Copy the data to the user process. */
613 			error = uiomove(sc->sc_buffer, n, uio);
614 			if (error)
615 				break;
616 		}
617 		break;
618 	case UE_BULK:
619 		if (sce->state & UGEN_BULK_RA) {
620 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
621 				     uio->uio_resid, sce->ra_wb_used));
622 			xfer = sce->ra_wb_xfer;
623 
624 			mutex_enter(&sc->sc_lock);
625 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
626 				mutex_exit(&sc->sc_lock);
627 				return (EWOULDBLOCK);
628 			}
629 			while (uio->uio_resid > 0 && !error) {
630 				while (sce->ra_wb_used == 0) {
631 					sce->state |= UGEN_ASLP;
632 					DPRINTFN(5,
633 						 ("ugenread: sleep on %p\n",
634 						  sce));
635 					/* "ugenrb" */
636 					error = cv_timedwait_sig(&sce->cv,
637 					    &sc->sc_lock, mstohz(sce->timeout));
638 					DPRINTFN(5,
639 						 ("ugenread: woke, error=%d\n",
640 						  error));
641 					if (sc->sc_dying)
642 						error = EIO;
643 					if (error) {
644 						sce->state &= ~UGEN_ASLP;
645 						break;
646 					}
647 				}
648 
649 				/* Copy data to the process. */
650 				while (uio->uio_resid > 0
651 				       && sce->ra_wb_used > 0) {
652 					n = min(uio->uio_resid,
653 						sce->ra_wb_used);
654 					n = min(n, sce->limit - sce->cur);
655 					error = uiomove(sce->cur, n, uio);
656 					if (error)
657 						break;
658 					sce->cur += n;
659 					sce->ra_wb_used -= n;
660 					if (sce->cur == sce->limit)
661 						sce->cur = sce->ibuf;
662 				}
663 
664 				/*
665 				 * If the transfers stopped because the
666 				 * buffer was full, restart them.
667 				 */
668 				if (sce->state & UGEN_RA_WB_STOP &&
669 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
670 					n = (sce->limit - sce->ibuf)
671 					    - sce->ra_wb_used;
672 					usbd_setup_xfer(xfer,
673 					    sce->pipeh, sce, NULL,
674 					    min(n, sce->ra_wb_xferlen),
675 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
676 					    ugen_bulkra_intr);
677 					sce->state &= ~UGEN_RA_WB_STOP;
678 					err = usbd_transfer(xfer);
679 					if (err != USBD_IN_PROGRESS)
680 						/*
681 						 * The transfer has not been
682 						 * queued.  Setting STOP
683 						 * will make us try
684 						 * again at the next read.
685 						 */
686 						sce->state |= UGEN_RA_WB_STOP;
687 				}
688 			}
689 			mutex_exit(&sc->sc_lock);
690 			break;
691 		}
692 		xfer = usbd_alloc_xfer(sc->sc_udev);
693 		if (xfer == 0)
694 			return (ENOMEM);
695 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
696 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
697 			tn = n;
698 			err = usbd_bulk_transfer(
699 				  xfer, sce->pipeh,
700 				  sce->state & UGEN_SHORT_OK ?
701 				      USBD_SHORT_XFER_OK : 0,
702 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
703 			if (err) {
704 				if (err == USBD_INTERRUPTED)
705 					error = EINTR;
706 				else if (err == USBD_TIMEOUT)
707 					error = ETIMEDOUT;
708 				else
709 					error = EIO;
710 				break;
711 			}
712 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
713 			error = uiomove(sc->sc_buffer, tn, uio);
714 			if (error || tn < n)
715 				break;
716 		}
717 		usbd_free_xfer(xfer);
718 		break;
719 	case UE_ISOCHRONOUS:
720 		mutex_enter(&sc->sc_lock);
721 		while (sce->cur == sce->fill) {
722 			if (flag & IO_NDELAY) {
723 				mutex_exit(&sc->sc_lock);
724 				return (EWOULDBLOCK);
725 			}
726 			sce->state |= UGEN_ASLP;
727 			/* "ugenri" */
728 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
729 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
730 			    mstohz(sce->timeout));
731 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
732 			if (sc->sc_dying)
733 				error = EIO;
734 			if (error) {
735 				sce->state &= ~UGEN_ASLP;
736 				break;
737 			}
738 		}
739 
740 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
741 			if(sce->fill > sce->cur)
742 				n = min(sce->fill - sce->cur, uio->uio_resid);
743 			else
744 				n = min(sce->limit - sce->cur, uio->uio_resid);
745 
746 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
747 
748 			/* Copy the data to the user process. */
749 			error = uiomove(sce->cur, n, uio);
750 			if (error)
751 				break;
752 			sce->cur += n;
753 			if (sce->cur >= sce->limit)
754 				sce->cur = sce->ibuf;
755 		}
756 		mutex_exit(&sc->sc_lock);
757 		break;
758 
759 
760 	default:
761 		return (ENXIO);
762 	}
763 	return (error);
764 }
765 
766 int
767 ugenread(dev_t dev, struct uio *uio, int flag)
768 {
769 	int endpt = UGENENDPOINT(dev);
770 	struct ugen_softc *sc;
771 	int error;
772 
773 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
774 	if (sc == NULL)
775 		return ENXIO;
776 
777 	mutex_enter(&sc->sc_lock);
778 	sc->sc_refcnt++;
779 	mutex_exit(&sc->sc_lock);
780 
781 	error = ugen_do_read(sc, endpt, uio, flag);
782 
783 	mutex_enter(&sc->sc_lock);
784 	if (--sc->sc_refcnt < 0)
785 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
786 	mutex_exit(&sc->sc_lock);
787 
788 	return (error);
789 }
790 
791 Static int
792 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
793 	int flag)
794 {
795 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
796 	u_int32_t n;
797 	int error = 0;
798 	u_int32_t tn;
799 	char *dbuf;
800 	usbd_xfer_handle xfer;
801 	usbd_status err;
802 
803 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
804 
805 	if (sc->sc_dying)
806 		return (EIO);
807 
808 	if (endpt == USB_CONTROL_ENDPOINT)
809 		return (ENODEV);
810 
811 #ifdef DIAGNOSTIC
812 	if (sce->edesc == NULL) {
813 		printf("ugenwrite: no edesc\n");
814 		return (EIO);
815 	}
816 	if (sce->pipeh == NULL) {
817 		printf("ugenwrite: no pipe\n");
818 		return (EIO);
819 	}
820 #endif
821 
822 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
823 	case UE_BULK:
824 		if (sce->state & UGEN_BULK_WB) {
825 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
826 				     uio->uio_resid, sce->ra_wb_used));
827 			xfer = sce->ra_wb_xfer;
828 
829 			mutex_enter(&sc->sc_lock);
830 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
831 			    flag & IO_NDELAY) {
832 				mutex_exit(&sc->sc_lock);
833 				return (EWOULDBLOCK);
834 			}
835 			while (uio->uio_resid > 0 && !error) {
836 				while (sce->ra_wb_used ==
837 				       sce->limit - sce->ibuf) {
838 					sce->state |= UGEN_ASLP;
839 					DPRINTFN(5,
840 						 ("ugenwrite: sleep on %p\n",
841 						  sce));
842 					/* "ugenwb" */
843 					error = cv_timedwait_sig(&sce->cv,
844 					    &sc->sc_lock, mstohz(sce->timeout));
845 					DPRINTFN(5,
846 						 ("ugenwrite: woke, error=%d\n",
847 						  error));
848 					if (sc->sc_dying)
849 						error = EIO;
850 					if (error) {
851 						sce->state &= ~UGEN_ASLP;
852 						break;
853 					}
854 				}
855 
856 				/* Copy data from the process. */
857 				while (uio->uio_resid > 0 &&
858 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
859 					n = min(uio->uio_resid,
860 						(sce->limit - sce->ibuf)
861 						 - sce->ra_wb_used);
862 					n = min(n, sce->limit - sce->fill);
863 					error = uiomove(sce->fill, n, uio);
864 					if (error)
865 						break;
866 					sce->fill += n;
867 					sce->ra_wb_used += n;
868 					if (sce->fill == sce->limit)
869 						sce->fill = sce->ibuf;
870 				}
871 
872 				/*
873 				 * If the transfers stopped because the
874 				 * buffer was empty, restart them.
875 				 */
876 				if (sce->state & UGEN_RA_WB_STOP &&
877 				    sce->ra_wb_used > 0) {
878 					dbuf = (char *)usbd_get_buffer(xfer);
879 					n = min(sce->ra_wb_used,
880 						sce->ra_wb_xferlen);
881 					tn = min(n, sce->limit - sce->cur);
882 					memcpy(dbuf, sce->cur, tn);
883 					dbuf += tn;
884 					if (n - tn > 0)
885 						memcpy(dbuf, sce->ibuf,
886 						       n - tn);
887 					usbd_setup_xfer(xfer,
888 					    sce->pipeh, sce, NULL, n,
889 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
890 					    ugen_bulkwb_intr);
891 					sce->state &= ~UGEN_RA_WB_STOP;
892 					err = usbd_transfer(xfer);
893 					if (err != USBD_IN_PROGRESS)
894 						/*
895 						 * The transfer has not been
896 						 * queued.  Setting STOP
897 						 * will make us try again
898 						 * at the next read.
899 						 */
900 						sce->state |= UGEN_RA_WB_STOP;
901 				}
902 			}
903 			mutex_exit(&sc->sc_lock);
904 			break;
905 		}
906 		xfer = usbd_alloc_xfer(sc->sc_udev);
907 		if (xfer == 0)
908 			return (EIO);
909 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
910 			error = uiomove(sc->sc_buffer, n, uio);
911 			if (error)
912 				break;
913 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
914 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
915 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
916 			if (err) {
917 				if (err == USBD_INTERRUPTED)
918 					error = EINTR;
919 				else if (err == USBD_TIMEOUT)
920 					error = ETIMEDOUT;
921 				else
922 					error = EIO;
923 				break;
924 			}
925 		}
926 		usbd_free_xfer(xfer);
927 		break;
928 	case UE_INTERRUPT:
929 		xfer = usbd_alloc_xfer(sc->sc_udev);
930 		if (xfer == 0)
931 			return (EIO);
932 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
933 		    uio->uio_resid)) != 0) {
934 			error = uiomove(sc->sc_buffer, n, uio);
935 			if (error)
936 				break;
937 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
938 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
939 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
940 			if (err) {
941 				if (err == USBD_INTERRUPTED)
942 					error = EINTR;
943 				else if (err == USBD_TIMEOUT)
944 					error = ETIMEDOUT;
945 				else
946 					error = EIO;
947 				break;
948 			}
949 		}
950 		usbd_free_xfer(xfer);
951 		break;
952 	default:
953 		return (ENXIO);
954 	}
955 	return (error);
956 }
957 
958 int
959 ugenwrite(dev_t dev, struct uio *uio, int flag)
960 {
961 	int endpt = UGENENDPOINT(dev);
962 	struct ugen_softc *sc;
963 	int error;
964 
965 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
966 	if (sc == NULL)
967 		return ENXIO;
968 
969 	mutex_enter(&sc->sc_lock);
970 	sc->sc_refcnt++;
971 	mutex_exit(&sc->sc_lock);
972 
973 	error = ugen_do_write(sc, endpt, uio, flag);
974 
975 	mutex_enter(&sc->sc_lock);
976 	if (--sc->sc_refcnt < 0)
977 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
978 	mutex_exit(&sc->sc_lock);
979 
980 	return (error);
981 }
982 
983 int
984 ugen_activate(device_t self, enum devact act)
985 {
986 	struct ugen_softc *sc = device_private(self);
987 
988 	switch (act) {
989 	case DVACT_DEACTIVATE:
990 		sc->sc_dying = 1;
991 		return 0;
992 	default:
993 		return EOPNOTSUPP;
994 	}
995 }
996 
997 int
998 ugen_detach(device_t self, int flags)
999 {
1000 	struct ugen_softc *sc = device_private(self);
1001 	struct ugen_endpoint *sce;
1002 	int i, dir;
1003 	int maj, mn;
1004 
1005 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1006 
1007 	sc->sc_dying = 1;
1008 	pmf_device_deregister(self);
1009 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1010 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1011 		for (dir = OUT; dir <= IN; dir++) {
1012 			sce = &sc->sc_endpoints[i][dir];
1013 			if (sce && sce->pipeh)
1014 				usbd_abort_pipe(sce->pipeh);
1015 		}
1016 	}
1017 
1018 	mutex_enter(&sc->sc_lock);
1019 	if (--sc->sc_refcnt >= 0) {
1020 		/* Wake everyone */
1021 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1022 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1023 		/* Wait for processes to go away. */
1024 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1025 	}
1026 	mutex_exit(&sc->sc_lock);
1027 
1028 	/* locate the major number */
1029 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1030 
1031 	/* Nuke the vnodes for any open instances (calls close). */
1032 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1033 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1034 
1035 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1036 			   sc->sc_dev);
1037 
1038 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1039 		for (dir = OUT; dir <= IN; dir++) {
1040 			sce = &sc->sc_endpoints[i][dir];
1041 			seldestroy(&sce->rsel);
1042 			cv_destroy(&sce->cv);
1043 		}
1044 	}
1045 
1046 	cv_destroy(&sc->sc_detach_cv);
1047 	mutex_destroy(&sc->sc_lock);
1048 
1049 	return (0);
1050 }
1051 
1052 Static void
1053 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1054 {
1055 	struct ugen_endpoint *sce = addr;
1056 	struct ugen_softc *sc = sce->sc;
1057 	u_int32_t count;
1058 	u_char *ibuf;
1059 
1060 	if (status == USBD_CANCELLED)
1061 		return;
1062 
1063 	if (status != USBD_NORMAL_COMPLETION) {
1064 		DPRINTF(("ugenintr: status=%d\n", status));
1065 		if (status == USBD_STALLED)
1066 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1067 		return;
1068 	}
1069 
1070 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1071 	ibuf = sce->ibuf;
1072 
1073 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1074 		     xfer, status, count));
1075 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1076 		     ibuf[0], ibuf[1], ibuf[2]));
1077 
1078 	(void)b_to_q(ibuf, count, &sce->q);
1079 
1080 	mutex_enter(&sc->sc_lock);
1081 	if (sce->state & UGEN_ASLP) {
1082 		sce->state &= ~UGEN_ASLP;
1083 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1084 		cv_signal(&sce->cv);
1085 	}
1086 	mutex_exit(&sc->sc_lock);
1087 	selnotify(&sce->rsel, 0, 0);
1088 }
1089 
1090 Static void
1091 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1092 		usbd_status status)
1093 {
1094 	struct isoreq *req = addr;
1095 	struct ugen_endpoint *sce = req->sce;
1096 	struct ugen_softc *sc = sce->sc;
1097 	u_int32_t count, n;
1098 	int i, isize;
1099 
1100 	/* Return if we are aborting. */
1101 	if (status == USBD_CANCELLED)
1102 		return;
1103 
1104 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1105 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1106 	    (long)(req - sce->isoreqs), count));
1107 
1108 	/* throw away oldest input if the buffer is full */
1109 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1110 		sce->cur += count;
1111 		if(sce->cur >= sce->limit)
1112 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1113 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1114 			     count));
1115 	}
1116 
1117 	isize = UGETW(sce->edesc->wMaxPacketSize);
1118 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1119 		u_int32_t actlen = req->sizes[i];
1120 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1121 
1122 		/* copy data to buffer */
1123 		while (actlen > 0) {
1124 			n = min(actlen, sce->limit - sce->fill);
1125 			memcpy(sce->fill, tbuf, n);
1126 
1127 			tbuf += n;
1128 			actlen -= n;
1129 			sce->fill += n;
1130 			if(sce->fill == sce->limit)
1131 				sce->fill = sce->ibuf;
1132 		}
1133 
1134 		/* setup size for next transfer */
1135 		req->sizes[i] = isize;
1136 	}
1137 
1138 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1139 			     USBD_NO_COPY, ugen_isoc_rintr);
1140 	(void)usbd_transfer(xfer);
1141 
1142 	mutex_enter(&sc->sc_lock);
1143 	if (sce->state & UGEN_ASLP) {
1144 		sce->state &= ~UGEN_ASLP;
1145 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1146 		cv_signal(&sce->cv);
1147 	}
1148 	mutex_exit(&sc->sc_lock);
1149 	selnotify(&sce->rsel, 0, 0);
1150 }
1151 
1152 Static void
1153 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1154 		 usbd_status status)
1155 {
1156 	struct ugen_endpoint *sce = addr;
1157 	struct ugen_softc *sc = sce->sc;
1158 	u_int32_t count, n;
1159 	char const *tbuf;
1160 	usbd_status err;
1161 
1162 	/* Return if we are aborting. */
1163 	if (status == USBD_CANCELLED)
1164 		return;
1165 
1166 	if (status != USBD_NORMAL_COMPLETION) {
1167 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1168 		sce->state |= UGEN_RA_WB_STOP;
1169 		if (status == USBD_STALLED)
1170 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1171 		return;
1172 	}
1173 
1174 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1175 
1176 	/* Keep track of how much is in the buffer. */
1177 	sce->ra_wb_used += count;
1178 
1179 	/* Copy data to buffer. */
1180 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1181 	n = min(count, sce->limit - sce->fill);
1182 	memcpy(sce->fill, tbuf, n);
1183 	tbuf += n;
1184 	count -= n;
1185 	sce->fill += n;
1186 	if (sce->fill == sce->limit)
1187 		sce->fill = sce->ibuf;
1188 	if (count > 0) {
1189 		memcpy(sce->fill, tbuf, count);
1190 		sce->fill += count;
1191 	}
1192 
1193 	/* Set up the next request if necessary. */
1194 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1195 	if (n > 0) {
1196 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1197 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1198 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1199 		err = usbd_transfer(xfer);
1200 		if (err != USBD_IN_PROGRESS) {
1201 			printf("usbd_bulkra_intr: error=%d\n", err);
1202 			/*
1203 			 * The transfer has not been queued.  Setting STOP
1204 			 * will make us try again at the next read.
1205 			 */
1206 			sce->state |= UGEN_RA_WB_STOP;
1207 		}
1208 	}
1209 	else
1210 		sce->state |= UGEN_RA_WB_STOP;
1211 
1212 	mutex_enter(&sc->sc_lock);
1213 	if (sce->state & UGEN_ASLP) {
1214 		sce->state &= ~UGEN_ASLP;
1215 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1216 		cv_signal(&sce->cv);
1217 	}
1218 	mutex_exit(&sc->sc_lock);
1219 	selnotify(&sce->rsel, 0, 0);
1220 }
1221 
1222 Static void
1223 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1224 		 usbd_status status)
1225 {
1226 	struct ugen_endpoint *sce = addr;
1227 	struct ugen_softc *sc = sce->sc;
1228 	u_int32_t count, n;
1229 	char *tbuf;
1230 	usbd_status err;
1231 
1232 	/* Return if we are aborting. */
1233 	if (status == USBD_CANCELLED)
1234 		return;
1235 
1236 	if (status != USBD_NORMAL_COMPLETION) {
1237 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1238 		sce->state |= UGEN_RA_WB_STOP;
1239 		if (status == USBD_STALLED)
1240 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1241 		return;
1242 	}
1243 
1244 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1245 
1246 	/* Keep track of how much is in the buffer. */
1247 	sce->ra_wb_used -= count;
1248 
1249 	/* Update buffer pointers. */
1250 	sce->cur += count;
1251 	if (sce->cur >= sce->limit)
1252 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1253 
1254 	/* Set up next request if necessary. */
1255 	if (sce->ra_wb_used > 0) {
1256 		/* copy data from buffer */
1257 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1258 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1259 		n = min(count, sce->limit - sce->cur);
1260 		memcpy(tbuf, sce->cur, n);
1261 		tbuf += n;
1262 		if (count - n > 0)
1263 			memcpy(tbuf, sce->ibuf, count - n);
1264 
1265 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1266 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1267 		err = usbd_transfer(xfer);
1268 		if (err != USBD_IN_PROGRESS) {
1269 			printf("usbd_bulkwb_intr: error=%d\n", err);
1270 			/*
1271 			 * The transfer has not been queued.  Setting STOP
1272 			 * will make us try again at the next write.
1273 			 */
1274 			sce->state |= UGEN_RA_WB_STOP;
1275 		}
1276 	}
1277 	else
1278 		sce->state |= UGEN_RA_WB_STOP;
1279 
1280 	mutex_enter(&sc->sc_lock);
1281 	if (sce->state & UGEN_ASLP) {
1282 		sce->state &= ~UGEN_ASLP;
1283 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1284 		cv_signal(&sce->cv);
1285 	}
1286 	mutex_exit(&sc->sc_lock);
1287 	selnotify(&sce->rsel, 0, 0);
1288 }
1289 
1290 Static usbd_status
1291 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1292 {
1293 	usbd_interface_handle iface;
1294 	usb_endpoint_descriptor_t *ed;
1295 	usbd_status err;
1296 	struct ugen_endpoint *sce;
1297 	u_int8_t niface, nendpt, endptno, endpt;
1298 	int dir;
1299 
1300 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1301 
1302 	err = usbd_interface_count(sc->sc_udev, &niface);
1303 	if (err)
1304 		return (err);
1305 	if (ifaceidx < 0 || ifaceidx >= niface)
1306 		return (USBD_INVAL);
1307 
1308 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1309 	if (err)
1310 		return (err);
1311 	err = usbd_endpoint_count(iface, &nendpt);
1312 	if (err)
1313 		return (err);
1314 	/* XXX should only do this after setting new altno has succeeded */
1315 	for (endptno = 0; endptno < nendpt; endptno++) {
1316 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1317 		endpt = ed->bEndpointAddress;
1318 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1319 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1320 		sce->sc = 0;
1321 		sce->edesc = 0;
1322 		sce->iface = 0;
1323 	}
1324 
1325 	/* change setting */
1326 	err = usbd_set_interface(iface, altno);
1327 	if (err)
1328 		return (err);
1329 
1330 	err = usbd_endpoint_count(iface, &nendpt);
1331 	if (err)
1332 		return (err);
1333 	for (endptno = 0; endptno < nendpt; endptno++) {
1334 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1335 		KASSERT(ed != NULL);
1336 		endpt = ed->bEndpointAddress;
1337 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1338 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1339 		sce->sc = sc;
1340 		sce->edesc = ed;
1341 		sce->iface = iface;
1342 	}
1343 	return (0);
1344 }
1345 
1346 /* Retrieve a complete descriptor for a certain device and index. */
1347 Static usb_config_descriptor_t *
1348 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1349 {
1350 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1351 	int len;
1352 	usbd_status err;
1353 
1354 	if (index == USB_CURRENT_CONFIG_INDEX) {
1355 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1356 		len = UGETW(tdesc->wTotalLength);
1357 		if (lenp)
1358 			*lenp = len;
1359 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1360 		memcpy(cdesc, tdesc, len);
1361 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1362 	} else {
1363 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1364 		if (err)
1365 			return (0);
1366 		len = UGETW(cdescr.wTotalLength);
1367 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1368 		if (lenp)
1369 			*lenp = len;
1370 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1371 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1372 		if (err) {
1373 			free(cdesc, M_TEMP);
1374 			return (0);
1375 		}
1376 	}
1377 	return (cdesc);
1378 }
1379 
1380 Static int
1381 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1382 {
1383 	usbd_interface_handle iface;
1384 	usbd_status err;
1385 
1386 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1387 	if (err)
1388 		return (-1);
1389 	return (usbd_get_interface_altindex(iface));
1390 }
1391 
1392 Static int
1393 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1394 	      void *addr, int flag, struct lwp *l)
1395 {
1396 	struct ugen_endpoint *sce;
1397 	usbd_status err;
1398 	usbd_interface_handle iface;
1399 	struct usb_config_desc *cd;
1400 	usb_config_descriptor_t *cdesc;
1401 	struct usb_interface_desc *id;
1402 	usb_interface_descriptor_t *idesc;
1403 	struct usb_endpoint_desc *ed;
1404 	usb_endpoint_descriptor_t *edesc;
1405 	struct usb_alt_interface *ai;
1406 	struct usb_string_desc *si;
1407 	u_int8_t conf, alt;
1408 
1409 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1410 	if (sc->sc_dying)
1411 		return (EIO);
1412 
1413 	switch (cmd) {
1414 	case FIONBIO:
1415 		/* All handled in the upper FS layer. */
1416 		return (0);
1417 	case USB_SET_SHORT_XFER:
1418 		if (endpt == USB_CONTROL_ENDPOINT)
1419 			return (EINVAL);
1420 		/* This flag only affects read */
1421 		sce = &sc->sc_endpoints[endpt][IN];
1422 		if (sce == NULL || sce->pipeh == NULL)
1423 			return (EINVAL);
1424 		if (*(int *)addr)
1425 			sce->state |= UGEN_SHORT_OK;
1426 		else
1427 			sce->state &= ~UGEN_SHORT_OK;
1428 		return (0);
1429 	case USB_SET_TIMEOUT:
1430 		sce = &sc->sc_endpoints[endpt][IN];
1431 		if (sce == NULL
1432 		    /* XXX this shouldn't happen, but the distinction between
1433 		       input and output pipes isn't clear enough.
1434 		       || sce->pipeh == NULL */
1435 			)
1436 			return (EINVAL);
1437 		sce->timeout = *(int *)addr;
1438 		return (0);
1439 	case USB_SET_BULK_RA:
1440 		if (endpt == USB_CONTROL_ENDPOINT)
1441 			return (EINVAL);
1442 		sce = &sc->sc_endpoints[endpt][IN];
1443 		if (sce == NULL || sce->pipeh == NULL)
1444 			return (EINVAL);
1445 		edesc = sce->edesc;
1446 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1447 			return (EINVAL);
1448 
1449 		if (*(int *)addr) {
1450 			/* Only turn RA on if it's currently off. */
1451 			if (sce->state & UGEN_BULK_RA)
1452 				return (0);
1453 
1454 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1455 				/* shouldn't happen */
1456 				return (EINVAL);
1457 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1458 			if (sce->ra_wb_xfer == NULL)
1459 				return (ENOMEM);
1460 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1461 			/*
1462 			 * Set up a dmabuf because we reuse the xfer with
1463 			 * the same (max) request length like isoc.
1464 			 */
1465 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1466 					      sce->ra_wb_xferlen) == 0) {
1467 				usbd_free_xfer(sce->ra_wb_xfer);
1468 				return (ENOMEM);
1469 			}
1470 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1471 					   M_USBDEV, M_WAITOK);
1472 			sce->fill = sce->cur = sce->ibuf;
1473 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1474 			sce->ra_wb_used = 0;
1475 			sce->state |= UGEN_BULK_RA;
1476 			sce->state &= ~UGEN_RA_WB_STOP;
1477 			/* Now start reading. */
1478 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1479 			    NULL,
1480 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1481 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1482 			    ugen_bulkra_intr);
1483 			err = usbd_transfer(sce->ra_wb_xfer);
1484 			if (err != USBD_IN_PROGRESS) {
1485 				sce->state &= ~UGEN_BULK_RA;
1486 				free(sce->ibuf, M_USBDEV);
1487 				sce->ibuf = NULL;
1488 				usbd_free_xfer(sce->ra_wb_xfer);
1489 				return (EIO);
1490 			}
1491 		} else {
1492 			/* Only turn RA off if it's currently on. */
1493 			if (!(sce->state & UGEN_BULK_RA))
1494 				return (0);
1495 
1496 			sce->state &= ~UGEN_BULK_RA;
1497 			usbd_abort_pipe(sce->pipeh);
1498 			usbd_free_xfer(sce->ra_wb_xfer);
1499 			/*
1500 			 * XXX Discard whatever's in the buffer, but we
1501 			 * should keep it around and drain the buffer
1502 			 * instead.
1503 			 */
1504 			free(sce->ibuf, M_USBDEV);
1505 			sce->ibuf = NULL;
1506 		}
1507 		return (0);
1508 	case USB_SET_BULK_WB:
1509 		if (endpt == USB_CONTROL_ENDPOINT)
1510 			return (EINVAL);
1511 		sce = &sc->sc_endpoints[endpt][OUT];
1512 		if (sce == NULL || sce->pipeh == NULL)
1513 			return (EINVAL);
1514 		edesc = sce->edesc;
1515 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1516 			return (EINVAL);
1517 
1518 		if (*(int *)addr) {
1519 			/* Only turn WB on if it's currently off. */
1520 			if (sce->state & UGEN_BULK_WB)
1521 				return (0);
1522 
1523 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1524 				/* shouldn't happen */
1525 				return (EINVAL);
1526 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1527 			if (sce->ra_wb_xfer == NULL)
1528 				return (ENOMEM);
1529 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1530 			/*
1531 			 * Set up a dmabuf because we reuse the xfer with
1532 			 * the same (max) request length like isoc.
1533 			 */
1534 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1535 					      sce->ra_wb_xferlen) == 0) {
1536 				usbd_free_xfer(sce->ra_wb_xfer);
1537 				return (ENOMEM);
1538 			}
1539 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1540 					   M_USBDEV, M_WAITOK);
1541 			sce->fill = sce->cur = sce->ibuf;
1542 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1543 			sce->ra_wb_used = 0;
1544 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1545 		} else {
1546 			/* Only turn WB off if it's currently on. */
1547 			if (!(sce->state & UGEN_BULK_WB))
1548 				return (0);
1549 
1550 			sce->state &= ~UGEN_BULK_WB;
1551 			/*
1552 			 * XXX Discard whatever's in the buffer, but we
1553 			 * should keep it around and keep writing to
1554 			 * drain the buffer instead.
1555 			 */
1556 			usbd_abort_pipe(sce->pipeh);
1557 			usbd_free_xfer(sce->ra_wb_xfer);
1558 			free(sce->ibuf, M_USBDEV);
1559 			sce->ibuf = NULL;
1560 		}
1561 		return (0);
1562 	case USB_SET_BULK_RA_OPT:
1563 	case USB_SET_BULK_WB_OPT:
1564 	{
1565 		struct usb_bulk_ra_wb_opt *opt;
1566 
1567 		if (endpt == USB_CONTROL_ENDPOINT)
1568 			return (EINVAL);
1569 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1570 		if (cmd == USB_SET_BULK_RA_OPT)
1571 			sce = &sc->sc_endpoints[endpt][IN];
1572 		else
1573 			sce = &sc->sc_endpoints[endpt][OUT];
1574 		if (sce == NULL || sce->pipeh == NULL)
1575 			return (EINVAL);
1576 		if (opt->ra_wb_buffer_size < 1 ||
1577 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1578 		    opt->ra_wb_request_size < 1 ||
1579 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1580 			return (EINVAL);
1581 		/*
1582 		 * XXX These changes do not take effect until the
1583 		 * next time RA/WB mode is enabled but they ought to
1584 		 * take effect immediately.
1585 		 */
1586 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1587 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1588 		return (0);
1589 	}
1590 	default:
1591 		break;
1592 	}
1593 
1594 	if (endpt != USB_CONTROL_ENDPOINT)
1595 		return (EINVAL);
1596 
1597 	switch (cmd) {
1598 #ifdef UGEN_DEBUG
1599 	case USB_SETDEBUG:
1600 		ugendebug = *(int *)addr;
1601 		break;
1602 #endif
1603 	case USB_GET_CONFIG:
1604 		err = usbd_get_config(sc->sc_udev, &conf);
1605 		if (err)
1606 			return (EIO);
1607 		*(int *)addr = conf;
1608 		break;
1609 	case USB_SET_CONFIG:
1610 		if (!(flag & FWRITE))
1611 			return (EPERM);
1612 		err = ugen_set_config(sc, *(int *)addr);
1613 		switch (err) {
1614 		case USBD_NORMAL_COMPLETION:
1615 			break;
1616 		case USBD_IN_USE:
1617 			return (EBUSY);
1618 		default:
1619 			return (EIO);
1620 		}
1621 		break;
1622 	case USB_GET_ALTINTERFACE:
1623 		ai = (struct usb_alt_interface *)addr;
1624 		err = usbd_device2interface_handle(sc->sc_udev,
1625 			  ai->uai_interface_index, &iface);
1626 		if (err)
1627 			return (EINVAL);
1628 		idesc = usbd_get_interface_descriptor(iface);
1629 		if (idesc == NULL)
1630 			return (EIO);
1631 		ai->uai_alt_no = idesc->bAlternateSetting;
1632 		break;
1633 	case USB_SET_ALTINTERFACE:
1634 		if (!(flag & FWRITE))
1635 			return (EPERM);
1636 		ai = (struct usb_alt_interface *)addr;
1637 		err = usbd_device2interface_handle(sc->sc_udev,
1638 			  ai->uai_interface_index, &iface);
1639 		if (err)
1640 			return (EINVAL);
1641 		err = ugen_set_interface(sc, ai->uai_interface_index,
1642 		    ai->uai_alt_no);
1643 		if (err)
1644 			return (EINVAL);
1645 		break;
1646 	case USB_GET_NO_ALT:
1647 		ai = (struct usb_alt_interface *)addr;
1648 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1649 		if (cdesc == NULL)
1650 			return (EINVAL);
1651 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1652 		if (idesc == NULL) {
1653 			free(cdesc, M_TEMP);
1654 			return (EINVAL);
1655 		}
1656 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1657 		    idesc->bInterfaceNumber);
1658 		free(cdesc, M_TEMP);
1659 		break;
1660 	case USB_GET_DEVICE_DESC:
1661 		*(usb_device_descriptor_t *)addr =
1662 			*usbd_get_device_descriptor(sc->sc_udev);
1663 		break;
1664 	case USB_GET_CONFIG_DESC:
1665 		cd = (struct usb_config_desc *)addr;
1666 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1667 		if (cdesc == NULL)
1668 			return (EINVAL);
1669 		cd->ucd_desc = *cdesc;
1670 		free(cdesc, M_TEMP);
1671 		break;
1672 	case USB_GET_INTERFACE_DESC:
1673 		id = (struct usb_interface_desc *)addr;
1674 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1675 		if (cdesc == NULL)
1676 			return (EINVAL);
1677 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1678 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1679 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1680 		else
1681 			alt = id->uid_alt_index;
1682 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1683 		if (idesc == NULL) {
1684 			free(cdesc, M_TEMP);
1685 			return (EINVAL);
1686 		}
1687 		id->uid_desc = *idesc;
1688 		free(cdesc, M_TEMP);
1689 		break;
1690 	case USB_GET_ENDPOINT_DESC:
1691 		ed = (struct usb_endpoint_desc *)addr;
1692 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1693 		if (cdesc == NULL)
1694 			return (EINVAL);
1695 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1696 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1697 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1698 		else
1699 			alt = ed->ued_alt_index;
1700 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1701 					alt, ed->ued_endpoint_index);
1702 		if (edesc == NULL) {
1703 			free(cdesc, M_TEMP);
1704 			return (EINVAL);
1705 		}
1706 		ed->ued_desc = *edesc;
1707 		free(cdesc, M_TEMP);
1708 		break;
1709 	case USB_GET_FULL_DESC:
1710 	{
1711 		int len;
1712 		struct iovec iov;
1713 		struct uio uio;
1714 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1715 		int error;
1716 
1717 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1718 		if (cdesc == NULL)
1719 			return (EINVAL);
1720 		if (len > fd->ufd_size)
1721 			len = fd->ufd_size;
1722 		iov.iov_base = (void *)fd->ufd_data;
1723 		iov.iov_len = len;
1724 		uio.uio_iov = &iov;
1725 		uio.uio_iovcnt = 1;
1726 		uio.uio_resid = len;
1727 		uio.uio_offset = 0;
1728 		uio.uio_rw = UIO_READ;
1729 		uio.uio_vmspace = l->l_proc->p_vmspace;
1730 		error = uiomove((void *)cdesc, len, &uio);
1731 		free(cdesc, M_TEMP);
1732 		return (error);
1733 	}
1734 	case USB_GET_STRING_DESC: {
1735 		int len;
1736 		si = (struct usb_string_desc *)addr;
1737 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1738 			  si->usd_language_id, &si->usd_desc, &len);
1739 		if (err)
1740 			return (EINVAL);
1741 		break;
1742 	}
1743 	case USB_DO_REQUEST:
1744 	{
1745 		struct usb_ctl_request *ur = (void *)addr;
1746 		int len = UGETW(ur->ucr_request.wLength);
1747 		struct iovec iov;
1748 		struct uio uio;
1749 		void *ptr = 0;
1750 		usbd_status xerr;
1751 		int error = 0;
1752 
1753 		if (!(flag & FWRITE))
1754 			return (EPERM);
1755 		/* Avoid requests that would damage the bus integrity. */
1756 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1757 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1758 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1759 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1760 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1761 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1762 			return (EINVAL);
1763 
1764 		if (len < 0 || len > 32767)
1765 			return (EINVAL);
1766 		if (len != 0) {
1767 			iov.iov_base = (void *)ur->ucr_data;
1768 			iov.iov_len = len;
1769 			uio.uio_iov = &iov;
1770 			uio.uio_iovcnt = 1;
1771 			uio.uio_resid = len;
1772 			uio.uio_offset = 0;
1773 			uio.uio_rw =
1774 				ur->ucr_request.bmRequestType & UT_READ ?
1775 				UIO_READ : UIO_WRITE;
1776 			uio.uio_vmspace = l->l_proc->p_vmspace;
1777 			ptr = malloc(len, M_TEMP, M_WAITOK);
1778 			if (uio.uio_rw == UIO_WRITE) {
1779 				error = uiomove(ptr, len, &uio);
1780 				if (error)
1781 					goto ret;
1782 			}
1783 		}
1784 		sce = &sc->sc_endpoints[endpt][IN];
1785 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1786 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1787 		if (xerr) {
1788 			error = EIO;
1789 			goto ret;
1790 		}
1791 		if (len != 0) {
1792 			if (uio.uio_rw == UIO_READ) {
1793 				error = uiomove(ptr, len, &uio);
1794 				if (error)
1795 					goto ret;
1796 			}
1797 		}
1798 	ret:
1799 		if (ptr)
1800 			free(ptr, M_TEMP);
1801 		return (error);
1802 	}
1803 	case USB_GET_DEVICEINFO:
1804 		usbd_fill_deviceinfo(sc->sc_udev,
1805 				     (struct usb_device_info *)addr, 0);
1806 		break;
1807 #ifdef COMPAT_30
1808 	case USB_GET_DEVICEINFO_OLD:
1809 		usbd_fill_deviceinfo_old(sc->sc_udev,
1810 					 (struct usb_device_info_old *)addr, 0);
1811 
1812 		break;
1813 #endif
1814 	default:
1815 		return (EINVAL);
1816 	}
1817 	return (0);
1818 }
1819 
1820 int
1821 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1822 {
1823 	int endpt = UGENENDPOINT(dev);
1824 	struct ugen_softc *sc;
1825 	int error;
1826 
1827 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1828 	if (sc == NULL)
1829 		return ENXIO;
1830 
1831 	sc->sc_refcnt++;
1832 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1833 	if (--sc->sc_refcnt < 0)
1834 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1835 	return (error);
1836 }
1837 
1838 int
1839 ugenpoll(dev_t dev, int events, struct lwp *l)
1840 {
1841 	struct ugen_softc *sc;
1842 	struct ugen_endpoint *sce_in, *sce_out;
1843 	int revents = 0;
1844 
1845 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1846 	if (sc == NULL)
1847 		return ENXIO;
1848 
1849 	if (sc->sc_dying)
1850 		return (POLLHUP);
1851 
1852 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1853 		return ENODEV;
1854 
1855 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1856 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1857 	if (sce_in == NULL && sce_out == NULL)
1858 		return (POLLERR);
1859 #ifdef DIAGNOSTIC
1860 	if (!sce_in->edesc && !sce_out->edesc) {
1861 		printf("ugenpoll: no edesc\n");
1862 		return (POLLERR);
1863 	}
1864 	/* It's possible to have only one pipe open. */
1865 	if (!sce_in->pipeh && !sce_out->pipeh) {
1866 		printf("ugenpoll: no pipe\n");
1867 		return (POLLERR);
1868 	}
1869 #endif
1870 
1871 	mutex_enter(&sc->sc_lock);
1872 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1873 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1874 		case UE_INTERRUPT:
1875 			if (sce_in->q.c_cc > 0)
1876 				revents |= events & (POLLIN | POLLRDNORM);
1877 			else
1878 				selrecord(l, &sce_in->rsel);
1879 			break;
1880 		case UE_ISOCHRONOUS:
1881 			if (sce_in->cur != sce_in->fill)
1882 				revents |= events & (POLLIN | POLLRDNORM);
1883 			else
1884 				selrecord(l, &sce_in->rsel);
1885 			break;
1886 		case UE_BULK:
1887 			if (sce_in->state & UGEN_BULK_RA) {
1888 				if (sce_in->ra_wb_used > 0)
1889 					revents |= events &
1890 					    (POLLIN | POLLRDNORM);
1891 				else
1892 					selrecord(l, &sce_in->rsel);
1893 				break;
1894 			}
1895 			/*
1896 			 * We have no easy way of determining if a read will
1897 			 * yield any data or a write will happen.
1898 			 * Pretend they will.
1899 			 */
1900 			 revents |= events & (POLLIN | POLLRDNORM);
1901 			 break;
1902 		default:
1903 			break;
1904 		}
1905 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1906 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1907 		case UE_INTERRUPT:
1908 		case UE_ISOCHRONOUS:
1909 			/* XXX unimplemented */
1910 			break;
1911 		case UE_BULK:
1912 			if (sce_out->state & UGEN_BULK_WB) {
1913 				if (sce_out->ra_wb_used <
1914 				    sce_out->limit - sce_out->ibuf)
1915 					revents |= events &
1916 					    (POLLOUT | POLLWRNORM);
1917 				else
1918 					selrecord(l, &sce_out->rsel);
1919 				break;
1920 			}
1921 			/*
1922 			 * We have no easy way of determining if a read will
1923 			 * yield any data or a write will happen.
1924 			 * Pretend they will.
1925 			 */
1926 			 revents |= events & (POLLOUT | POLLWRNORM);
1927 			 break;
1928 		default:
1929 			break;
1930 		}
1931 
1932 	mutex_exit(&sc->sc_lock);
1933 
1934 	return (revents);
1935 }
1936 
1937 static void
1938 filt_ugenrdetach(struct knote *kn)
1939 {
1940 	struct ugen_endpoint *sce = kn->kn_hook;
1941 	struct ugen_softc *sc = sce->sc;
1942 
1943 	mutex_enter(&sc->sc_lock);
1944 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1945 	mutex_exit(&sc->sc_lock);
1946 }
1947 
1948 static int
1949 filt_ugenread_intr(struct knote *kn, long hint)
1950 {
1951 	struct ugen_endpoint *sce = kn->kn_hook;
1952 
1953 	kn->kn_data = sce->q.c_cc;
1954 	return (kn->kn_data > 0);
1955 }
1956 
1957 static int
1958 filt_ugenread_isoc(struct knote *kn, long hint)
1959 {
1960 	struct ugen_endpoint *sce = kn->kn_hook;
1961 
1962 	if (sce->cur == sce->fill)
1963 		return (0);
1964 
1965 	if (sce->cur < sce->fill)
1966 		kn->kn_data = sce->fill - sce->cur;
1967 	else
1968 		kn->kn_data = (sce->limit - sce->cur) +
1969 		    (sce->fill - sce->ibuf);
1970 
1971 	return (1);
1972 }
1973 
1974 static int
1975 filt_ugenread_bulk(struct knote *kn, long hint)
1976 {
1977 	struct ugen_endpoint *sce = kn->kn_hook;
1978 
1979 	if (!(sce->state & UGEN_BULK_RA))
1980 		/*
1981 		 * We have no easy way of determining if a read will
1982 		 * yield any data or a write will happen.
1983 		 * So, emulate "seltrue".
1984 		 */
1985 		return (filt_seltrue(kn, hint));
1986 
1987 	if (sce->ra_wb_used == 0)
1988 		return (0);
1989 
1990 	kn->kn_data = sce->ra_wb_used;
1991 
1992 	return (1);
1993 }
1994 
1995 static int
1996 filt_ugenwrite_bulk(struct knote *kn, long hint)
1997 {
1998 	struct ugen_endpoint *sce = kn->kn_hook;
1999 
2000 	if (!(sce->state & UGEN_BULK_WB))
2001 		/*
2002 		 * We have no easy way of determining if a read will
2003 		 * yield any data or a write will happen.
2004 		 * So, emulate "seltrue".
2005 		 */
2006 		return (filt_seltrue(kn, hint));
2007 
2008 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2009 		return (0);
2010 
2011 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2012 
2013 	return (1);
2014 }
2015 
2016 static const struct filterops ugenread_intr_filtops =
2017 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2018 
2019 static const struct filterops ugenread_isoc_filtops =
2020 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2021 
2022 static const struct filterops ugenread_bulk_filtops =
2023 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2024 
2025 static const struct filterops ugenwrite_bulk_filtops =
2026 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2027 
2028 int
2029 ugenkqfilter(dev_t dev, struct knote *kn)
2030 {
2031 	struct ugen_softc *sc;
2032 	struct ugen_endpoint *sce;
2033 	struct klist *klist;
2034 
2035 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2036 	if (sc == NULL)
2037 		return ENXIO;
2038 
2039 	if (sc->sc_dying)
2040 		return (ENXIO);
2041 
2042 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2043 		return ENODEV;
2044 
2045 	switch (kn->kn_filter) {
2046 	case EVFILT_READ:
2047 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2048 		if (sce == NULL)
2049 			return (EINVAL);
2050 
2051 		klist = &sce->rsel.sel_klist;
2052 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2053 		case UE_INTERRUPT:
2054 			kn->kn_fop = &ugenread_intr_filtops;
2055 			break;
2056 		case UE_ISOCHRONOUS:
2057 			kn->kn_fop = &ugenread_isoc_filtops;
2058 			break;
2059 		case UE_BULK:
2060 			kn->kn_fop = &ugenread_bulk_filtops;
2061 			break;
2062 		default:
2063 			return (EINVAL);
2064 		}
2065 		break;
2066 
2067 	case EVFILT_WRITE:
2068 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2069 		if (sce == NULL)
2070 			return (EINVAL);
2071 
2072 		klist = &sce->rsel.sel_klist;
2073 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2074 		case UE_INTERRUPT:
2075 		case UE_ISOCHRONOUS:
2076 			/* XXX poll doesn't support this */
2077 			return (EINVAL);
2078 
2079 		case UE_BULK:
2080 			kn->kn_fop = &ugenwrite_bulk_filtops;
2081 			break;
2082 		default:
2083 			return (EINVAL);
2084 		}
2085 		break;
2086 
2087 	default:
2088 		return (EINVAL);
2089 	}
2090 
2091 	kn->kn_hook = sce;
2092 
2093 	mutex_enter(&sc->sc_lock);
2094 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2095 	mutex_exit(&sc->sc_lock);
2096 
2097 	return (0);
2098 }
2099