xref: /netbsd-src/sys/dev/usb/ugen.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: ugen.c,v 1.122 2013/01/05 01:30:16 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.122 2013/01/05 01:30:16 christos Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59 
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63 
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x)	if (ugendebug) printf x
66 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
67 int	ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72 
73 #define	UGEN_CHUNK	128	/* chunk size for read */
74 #define	UGEN_IBSIZE	1020	/* buffer size */
75 #define	UGEN_BBSIZE	1024
76 
77 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS	8	/* number of transactions per req */
79 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
80 
81 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
83 
84 struct ugen_endpoint {
85 	struct ugen_softc *sc;
86 	usb_endpoint_descriptor_t *edesc;
87 	usbd_interface_handle iface;
88 	int state;
89 #define	UGEN_ASLP	0x02	/* waiting for data */
90 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
91 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
92 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
93 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
94 	usbd_pipe_handle pipeh;
95 	struct clist q;
96 	u_char *ibuf;		/* start of buffer (circular for isoc) */
97 	u_char *fill;		/* location for input (isoc) */
98 	u_char *limit;		/* end of circular buffer (isoc) */
99 	u_char *cur;		/* current read location (isoc) */
100 	u_int32_t timeout;
101 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
102 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
103 	u_int32_t ra_wb_used;	 /* how much is in buffer */
104 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
105 	usbd_xfer_handle ra_wb_xfer;
106 	struct isoreq {
107 		struct ugen_endpoint *sce;
108 		usbd_xfer_handle xfer;
109 		void *dmabuf;
110 		u_int16_t sizes[UGEN_NISORFRMS];
111 	} isoreqs[UGEN_NISOREQS];
112 	/* Keep these last; we don't overwrite them in ugen_set_config() */
113 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
114 	struct selinfo rsel;
115 	kcondvar_t cv;
116 };
117 
118 struct ugen_softc {
119 	device_t sc_dev;		/* base device */
120 	usbd_device_handle sc_udev;
121 
122 	kmutex_t		sc_lock;
123 	kcondvar_t		sc_detach_cv;
124 
125 	char sc_is_open[USB_MAX_ENDPOINTS];
126 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
127 #define OUT 0
128 #define IN  1
129 
130 	int sc_refcnt;
131 	char sc_buffer[UGEN_BBSIZE];
132 	u_char sc_dying;
133 };
134 
135 dev_type_open(ugenopen);
136 dev_type_close(ugenclose);
137 dev_type_read(ugenread);
138 dev_type_write(ugenwrite);
139 dev_type_ioctl(ugenioctl);
140 dev_type_poll(ugenpoll);
141 dev_type_kqfilter(ugenkqfilter);
142 
143 const struct cdevsw ugen_cdevsw = {
144 	ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
145 	nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
146 };
147 
148 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
149 		     usbd_status status);
150 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
151 			    usbd_status status);
152 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
153 			     usbd_status status);
154 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
155 			     usbd_status status);
156 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
157 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
158 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
159 			 void *, int, struct lwp *);
160 Static int ugen_set_config(struct ugen_softc *sc, int configno);
161 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
162 					       int index, int *lenp);
163 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
164 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
165 
166 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
167 #define UGENENDPOINT(n) (minor(n) & 0xf)
168 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
169 
170 int             ugen_match(device_t, cfdata_t, void *);
171 void            ugen_attach(device_t, device_t, void *);
172 int             ugen_detach(device_t, int);
173 int             ugen_activate(device_t, enum devact);
174 extern struct cfdriver ugen_cd;
175 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
176 
177 /* toggle to control attach priority. -1 means "let autoconf decide" */
178 int ugen_override = -1;
179 
180 int
181 ugen_match(device_t parent, cfdata_t match, void *aux)
182 {
183 	struct usb_attach_arg *uaa = aux;
184 	int override;
185 
186 	if (ugen_override != -1)
187 		override = ugen_override;
188 	else
189 		override = match->cf_flags & 1;
190 
191 	if (override)
192 		return (UMATCH_HIGHEST);
193 	else if (uaa->usegeneric)
194 		return (UMATCH_GENERIC);
195 	else
196 		return (UMATCH_NONE);
197 }
198 
199 void
200 ugen_attach(device_t parent, device_t self, void *aux)
201 {
202 	struct ugen_softc *sc = device_private(self);
203 	struct usb_attach_arg *uaa = aux;
204 	usbd_device_handle udev;
205 	char *devinfop;
206 	usbd_status err;
207 	int i, dir, conf;
208 
209 	aprint_naive("\n");
210 	aprint_normal("\n");
211 
212 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
213 	cv_init(&sc->sc_detach_cv, "ugendet");
214 
215 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
216 	aprint_normal_dev(self, "%s\n", devinfop);
217 	usbd_devinfo_free(devinfop);
218 
219 	sc->sc_dev = self;
220 	sc->sc_udev = udev = uaa->device;
221 
222 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
223 		for (dir = OUT; dir <= IN; dir++) {
224 			struct ugen_endpoint *sce;
225 
226 			sce = &sc->sc_endpoints[i][dir];
227 			selinit(&sce->rsel);
228 			cv_init(&sce->cv, "ugensce");
229 		}
230 	}
231 
232 	/* First set configuration index 0, the default one for ugen. */
233 	err = usbd_set_config_index(udev, 0, 0);
234 	if (err) {
235 		aprint_error_dev(self,
236 		    "setting configuration index 0 failed\n");
237 		sc->sc_dying = 1;
238 		return;
239 	}
240 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
241 
242 	/* Set up all the local state for this configuration. */
243 	err = ugen_set_config(sc, conf);
244 	if (err) {
245 		aprint_error_dev(self, "setting configuration %d failed\n",
246 		    conf);
247 		sc->sc_dying = 1;
248 		return;
249 	}
250 
251 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
252 			   sc->sc_dev);
253 
254 	if (!pmf_device_register(self, NULL, NULL))
255 		aprint_error_dev(self, "couldn't establish power handler\n");
256 
257 	return;
258 }
259 
260 Static int
261 ugen_set_config(struct ugen_softc *sc, int configno)
262 {
263 	usbd_device_handle dev = sc->sc_udev;
264 	usb_config_descriptor_t *cdesc;
265 	usbd_interface_handle iface;
266 	usb_endpoint_descriptor_t *ed;
267 	struct ugen_endpoint *sce;
268 	u_int8_t niface, nendpt;
269 	int ifaceno, endptno, endpt;
270 	usbd_status err;
271 	int dir, i;
272 
273 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
274 		    device_xname(sc->sc_dev), configno, sc));
275 
276 	/*
277 	 * We start at 1, not 0, because we don't care whether the
278 	 * control endpoint is open or not. It is always present.
279 	 */
280 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
281 		if (sc->sc_is_open[endptno]) {
282 			DPRINTFN(1,
283 			     ("ugen_set_config: %s - endpoint %d is open\n",
284 			      device_xname(sc->sc_dev), endptno));
285 			return (USBD_IN_USE);
286 		}
287 
288 	/* Avoid setting the current value. */
289 	cdesc = usbd_get_config_descriptor(dev);
290 	if (!cdesc || cdesc->bConfigurationValue != configno) {
291 		err = usbd_set_config_no(dev, configno, 1);
292 		if (err)
293 			return (err);
294 	}
295 
296 	err = usbd_interface_count(dev, &niface);
297 	if (err)
298 		return (err);
299 
300 	/* Clear out the old info, but leave the selinfo and cv initialised. */
301 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
302 		for (dir = OUT; dir <= IN; dir++) {
303 			sce = &sc->sc_endpoints[i][dir];
304 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
305 		}
306 	}
307 
308 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
309 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
310 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
311 		if (err)
312 			return (err);
313 		err = usbd_endpoint_count(iface, &nendpt);
314 		if (err)
315 			return (err);
316 		for (endptno = 0; endptno < nendpt; endptno++) {
317 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
318 			KASSERT(ed != NULL);
319 			endpt = ed->bEndpointAddress;
320 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
321 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
322 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
323 				    "(%d,%d), sce=%p\n",
324 				    endptno, endpt, UE_GET_ADDR(endpt),
325 				    UE_GET_DIR(endpt), sce));
326 			sce->sc = sc;
327 			sce->edesc = ed;
328 			sce->iface = iface;
329 		}
330 	}
331 	return (USBD_NORMAL_COMPLETION);
332 }
333 
334 int
335 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
336 {
337 	struct ugen_softc *sc;
338 	int unit = UGENUNIT(dev);
339 	int endpt = UGENENDPOINT(dev);
340 	usb_endpoint_descriptor_t *edesc;
341 	struct ugen_endpoint *sce;
342 	int dir, isize;
343 	usbd_status err;
344 	usbd_xfer_handle xfer;
345 	void *tbuf;
346 	int i, j;
347 
348 	sc = device_lookup_private(&ugen_cd, unit);
349 	if (sc == NULL)
350 		return ENXIO;
351 
352 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
353 		     flag, mode, unit, endpt));
354 
355 	if (sc == NULL || sc->sc_dying)
356 		return (ENXIO);
357 
358 	/* The control endpoint allows multiple opens. */
359 	if (endpt == USB_CONTROL_ENDPOINT) {
360 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
361 		return (0);
362 	}
363 
364 	if (sc->sc_is_open[endpt])
365 		return (EBUSY);
366 
367 	/* Make sure there are pipes for all directions. */
368 	for (dir = OUT; dir <= IN; dir++) {
369 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
370 			sce = &sc->sc_endpoints[endpt][dir];
371 			if (sce == 0 || sce->edesc == 0)
372 				return (ENXIO);
373 		}
374 	}
375 
376 	/* Actually open the pipes. */
377 	/* XXX Should back out properly if it fails. */
378 	for (dir = OUT; dir <= IN; dir++) {
379 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
380 			continue;
381 		sce = &sc->sc_endpoints[endpt][dir];
382 		sce->state = 0;
383 		sce->timeout = USBD_NO_TIMEOUT;
384 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
385 			     sc, endpt, dir, sce));
386 		edesc = sce->edesc;
387 		switch (edesc->bmAttributes & UE_XFERTYPE) {
388 		case UE_INTERRUPT:
389 			if (dir == OUT) {
390 				err = usbd_open_pipe(sce->iface,
391 				    edesc->bEndpointAddress, 0, &sce->pipeh);
392 				if (err)
393 					return (EIO);
394 				break;
395 			}
396 			isize = UGETW(edesc->wMaxPacketSize);
397 			if (isize == 0)	/* shouldn't happen */
398 				return (EINVAL);
399 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
400 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
401 				     endpt, isize));
402 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
403 				free(sce->ibuf, M_USBDEV);
404 				sce->ibuf = NULL;
405 				return (ENOMEM);
406 			}
407 			err = usbd_open_pipe_intr(sce->iface,
408 				  edesc->bEndpointAddress,
409 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
410 				  sce->ibuf, isize, ugenintr,
411 				  USBD_DEFAULT_INTERVAL);
412 			if (err) {
413 				clfree(&sce->q);
414 				free(sce->ibuf, M_USBDEV);
415 				sce->ibuf = NULL;
416 				return (EIO);
417 			}
418 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
419 			break;
420 		case UE_BULK:
421 			err = usbd_open_pipe(sce->iface,
422 				  edesc->bEndpointAddress, 0, &sce->pipeh);
423 			if (err)
424 				return (EIO);
425 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
426 			/*
427 			 * Use request size for non-RA/WB transfers
428 			 * as the default.
429 			 */
430 			sce->ra_wb_reqsize = UGEN_BBSIZE;
431 			break;
432 		case UE_ISOCHRONOUS:
433 			if (dir == OUT)
434 				return (EINVAL);
435 			isize = UGETW(edesc->wMaxPacketSize);
436 			if (isize == 0)	/* shouldn't happen */
437 				return (EINVAL);
438 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
439 				M_USBDEV, M_WAITOK);
440 			sce->cur = sce->fill = sce->ibuf;
441 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
442 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
443 				     endpt, isize));
444 			err = usbd_open_pipe(sce->iface,
445 				  edesc->bEndpointAddress, 0, &sce->pipeh);
446 			if (err) {
447 				free(sce->ibuf, M_USBDEV);
448 				sce->ibuf = NULL;
449 				return (EIO);
450 			}
451 			for(i = 0; i < UGEN_NISOREQS; ++i) {
452 				sce->isoreqs[i].sce = sce;
453 				xfer = usbd_alloc_xfer(sc->sc_udev);
454 				if (xfer == 0)
455 					goto bad;
456 				sce->isoreqs[i].xfer = xfer;
457 				tbuf = usbd_alloc_buffer
458 					(xfer, isize * UGEN_NISORFRMS);
459 				if (tbuf == 0) {
460 					i++;
461 					goto bad;
462 				}
463 				sce->isoreqs[i].dmabuf = tbuf;
464 				for(j = 0; j < UGEN_NISORFRMS; ++j)
465 					sce->isoreqs[i].sizes[j] = isize;
466 				usbd_setup_isoc_xfer
467 					(xfer, sce->pipeh, &sce->isoreqs[i],
468 					 sce->isoreqs[i].sizes,
469 					 UGEN_NISORFRMS, USBD_NO_COPY,
470 					 ugen_isoc_rintr);
471 				(void)usbd_transfer(xfer);
472 			}
473 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
474 			break;
475 		bad:
476 			while (--i >= 0) /* implicit buffer free */
477 				usbd_free_xfer(sce->isoreqs[i].xfer);
478 			usbd_close_pipe(sce->pipeh);
479 			sce->pipeh = NULL;
480 			free(sce->ibuf, M_USBDEV);
481 			sce->ibuf = NULL;
482 			return (ENOMEM);
483 		case UE_CONTROL:
484 			sce->timeout = USBD_DEFAULT_TIMEOUT;
485 			return (EINVAL);
486 		}
487 	}
488 	sc->sc_is_open[endpt] = 1;
489 	return (0);
490 }
491 
492 int
493 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
494 {
495 	int endpt = UGENENDPOINT(dev);
496 	struct ugen_softc *sc;
497 	struct ugen_endpoint *sce;
498 	int dir;
499 	int i;
500 
501 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
502 	if (sc == NULL)
503 		return ENXIO;
504 
505 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
506 		     flag, mode, UGENUNIT(dev), endpt));
507 
508 #ifdef DIAGNOSTIC
509 	if (!sc->sc_is_open[endpt]) {
510 		printf("ugenclose: not open\n");
511 		return (EINVAL);
512 	}
513 #endif
514 
515 	if (endpt == USB_CONTROL_ENDPOINT) {
516 		DPRINTFN(5, ("ugenclose: close control\n"));
517 		sc->sc_is_open[endpt] = 0;
518 		return (0);
519 	}
520 
521 	for (dir = OUT; dir <= IN; dir++) {
522 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
523 			continue;
524 		sce = &sc->sc_endpoints[endpt][dir];
525 		if (sce == NULL || sce->pipeh == NULL)
526 			continue;
527 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
528 			     endpt, dir, sce));
529 
530 		usbd_abort_pipe(sce->pipeh);
531 		usbd_close_pipe(sce->pipeh);
532 		sce->pipeh = NULL;
533 
534 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
535 		case UE_INTERRUPT:
536 			ndflush(&sce->q, sce->q.c_cc);
537 			clfree(&sce->q);
538 			break;
539 		case UE_ISOCHRONOUS:
540 			for (i = 0; i < UGEN_NISOREQS; ++i)
541 				usbd_free_xfer(sce->isoreqs[i].xfer);
542 			break;
543 		case UE_BULK:
544 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
545 				/* ibuf freed below */
546 				usbd_free_xfer(sce->ra_wb_xfer);
547 			break;
548 		default:
549 			break;
550 		}
551 
552 		if (sce->ibuf != NULL) {
553 			free(sce->ibuf, M_USBDEV);
554 			sce->ibuf = NULL;
555 		}
556 	}
557 	sc->sc_is_open[endpt] = 0;
558 
559 	return (0);
560 }
561 
562 Static int
563 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
564 {
565 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
566 	u_int32_t n, tn;
567 	usbd_xfer_handle xfer;
568 	usbd_status err;
569 	int error = 0;
570 
571 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
572 
573 	if (sc->sc_dying)
574 		return (EIO);
575 
576 	if (endpt == USB_CONTROL_ENDPOINT)
577 		return (ENODEV);
578 
579 #ifdef DIAGNOSTIC
580 	if (sce->edesc == NULL) {
581 		printf("ugenread: no edesc\n");
582 		return (EIO);
583 	}
584 	if (sce->pipeh == NULL) {
585 		printf("ugenread: no pipe\n");
586 		return (EIO);
587 	}
588 #endif
589 
590 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
591 	case UE_INTERRUPT:
592 		/* Block until activity occurred. */
593 		mutex_enter(&sc->sc_lock);
594 		while (sce->q.c_cc == 0) {
595 			if (flag & IO_NDELAY) {
596 				mutex_exit(&sc->sc_lock);
597 				return (EWOULDBLOCK);
598 			}
599 			sce->state |= UGEN_ASLP;
600 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
601 			/* "ugenri" */
602 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
603 			    mstohz(sce->timeout));
604 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
605 			if (sc->sc_dying)
606 				error = EIO;
607 			if (error) {
608 				sce->state &= ~UGEN_ASLP;
609 				break;
610 			}
611 		}
612 		mutex_exit(&sc->sc_lock);
613 
614 		/* Transfer as many chunks as possible. */
615 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
616 			n = min(sce->q.c_cc, uio->uio_resid);
617 			if (n > sizeof(sc->sc_buffer))
618 				n = sizeof(sc->sc_buffer);
619 
620 			/* Remove a small chunk from the input queue. */
621 			q_to_b(&sce->q, sc->sc_buffer, n);
622 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
623 
624 			/* Copy the data to the user process. */
625 			error = uiomove(sc->sc_buffer, n, uio);
626 			if (error)
627 				break;
628 		}
629 		break;
630 	case UE_BULK:
631 		if (sce->state & UGEN_BULK_RA) {
632 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
633 				     uio->uio_resid, sce->ra_wb_used));
634 			xfer = sce->ra_wb_xfer;
635 
636 			mutex_enter(&sc->sc_lock);
637 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
638 				mutex_exit(&sc->sc_lock);
639 				return (EWOULDBLOCK);
640 			}
641 			while (uio->uio_resid > 0 && !error) {
642 				while (sce->ra_wb_used == 0) {
643 					sce->state |= UGEN_ASLP;
644 					DPRINTFN(5,
645 						 ("ugenread: sleep on %p\n",
646 						  sce));
647 					/* "ugenrb" */
648 					error = cv_timedwait_sig(&sce->cv,
649 					    &sc->sc_lock, mstohz(sce->timeout));
650 					DPRINTFN(5,
651 						 ("ugenread: woke, error=%d\n",
652 						  error));
653 					if (sc->sc_dying)
654 						error = EIO;
655 					if (error) {
656 						sce->state &= ~UGEN_ASLP;
657 						break;
658 					}
659 				}
660 
661 				/* Copy data to the process. */
662 				while (uio->uio_resid > 0
663 				       && sce->ra_wb_used > 0) {
664 					n = min(uio->uio_resid,
665 						sce->ra_wb_used);
666 					n = min(n, sce->limit - sce->cur);
667 					error = uiomove(sce->cur, n, uio);
668 					if (error)
669 						break;
670 					sce->cur += n;
671 					sce->ra_wb_used -= n;
672 					if (sce->cur == sce->limit)
673 						sce->cur = sce->ibuf;
674 				}
675 
676 				/*
677 				 * If the transfers stopped because the
678 				 * buffer was full, restart them.
679 				 */
680 				if (sce->state & UGEN_RA_WB_STOP &&
681 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
682 					n = (sce->limit - sce->ibuf)
683 					    - sce->ra_wb_used;
684 					usbd_setup_xfer(xfer,
685 					    sce->pipeh, sce, NULL,
686 					    min(n, sce->ra_wb_xferlen),
687 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
688 					    ugen_bulkra_intr);
689 					sce->state &= ~UGEN_RA_WB_STOP;
690 					err = usbd_transfer(xfer);
691 					if (err != USBD_IN_PROGRESS)
692 						/*
693 						 * The transfer has not been
694 						 * queued.  Setting STOP
695 						 * will make us try
696 						 * again at the next read.
697 						 */
698 						sce->state |= UGEN_RA_WB_STOP;
699 				}
700 			}
701 			mutex_exit(&sc->sc_lock);
702 			break;
703 		}
704 		xfer = usbd_alloc_xfer(sc->sc_udev);
705 		if (xfer == 0)
706 			return (ENOMEM);
707 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
708 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
709 			tn = n;
710 			err = usbd_bulk_transfer(
711 				  xfer, sce->pipeh,
712 				  sce->state & UGEN_SHORT_OK ?
713 				      USBD_SHORT_XFER_OK : 0,
714 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
715 			if (err) {
716 				if (err == USBD_INTERRUPTED)
717 					error = EINTR;
718 				else if (err == USBD_TIMEOUT)
719 					error = ETIMEDOUT;
720 				else
721 					error = EIO;
722 				break;
723 			}
724 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
725 			error = uiomove(sc->sc_buffer, tn, uio);
726 			if (error || tn < n)
727 				break;
728 		}
729 		usbd_free_xfer(xfer);
730 		break;
731 	case UE_ISOCHRONOUS:
732 		mutex_enter(&sc->sc_lock);
733 		while (sce->cur == sce->fill) {
734 			if (flag & IO_NDELAY) {
735 				mutex_exit(&sc->sc_lock);
736 				return (EWOULDBLOCK);
737 			}
738 			sce->state |= UGEN_ASLP;
739 			/* "ugenri" */
740 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
741 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
742 			    mstohz(sce->timeout));
743 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
744 			if (sc->sc_dying)
745 				error = EIO;
746 			if (error) {
747 				sce->state &= ~UGEN_ASLP;
748 				break;
749 			}
750 		}
751 
752 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
753 			if(sce->fill > sce->cur)
754 				n = min(sce->fill - sce->cur, uio->uio_resid);
755 			else
756 				n = min(sce->limit - sce->cur, uio->uio_resid);
757 
758 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
759 
760 			/* Copy the data to the user process. */
761 			error = uiomove(sce->cur, n, uio);
762 			if (error)
763 				break;
764 			sce->cur += n;
765 			if (sce->cur >= sce->limit)
766 				sce->cur = sce->ibuf;
767 		}
768 		mutex_exit(&sc->sc_lock);
769 		break;
770 
771 
772 	default:
773 		return (ENXIO);
774 	}
775 	return (error);
776 }
777 
778 int
779 ugenread(dev_t dev, struct uio *uio, int flag)
780 {
781 	int endpt = UGENENDPOINT(dev);
782 	struct ugen_softc *sc;
783 	int error;
784 
785 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
786 	if (sc == NULL)
787 		return ENXIO;
788 
789 	mutex_enter(&sc->sc_lock);
790 	sc->sc_refcnt++;
791 	mutex_exit(&sc->sc_lock);
792 
793 	error = ugen_do_read(sc, endpt, uio, flag);
794 
795 	mutex_enter(&sc->sc_lock);
796 	if (--sc->sc_refcnt < 0)
797 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
798 	mutex_exit(&sc->sc_lock);
799 
800 	return (error);
801 }
802 
803 Static int
804 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
805 	int flag)
806 {
807 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
808 	u_int32_t n;
809 	int error = 0;
810 	u_int32_t tn;
811 	char *dbuf;
812 	usbd_xfer_handle xfer;
813 	usbd_status err;
814 
815 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
816 
817 	if (sc->sc_dying)
818 		return (EIO);
819 
820 	if (endpt == USB_CONTROL_ENDPOINT)
821 		return (ENODEV);
822 
823 #ifdef DIAGNOSTIC
824 	if (sce->edesc == NULL) {
825 		printf("ugenwrite: no edesc\n");
826 		return (EIO);
827 	}
828 	if (sce->pipeh == NULL) {
829 		printf("ugenwrite: no pipe\n");
830 		return (EIO);
831 	}
832 #endif
833 
834 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
835 	case UE_BULK:
836 		if (sce->state & UGEN_BULK_WB) {
837 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
838 				     uio->uio_resid, sce->ra_wb_used));
839 			xfer = sce->ra_wb_xfer;
840 
841 			mutex_enter(&sc->sc_lock);
842 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
843 			    flag & IO_NDELAY) {
844 				mutex_exit(&sc->sc_lock);
845 				return (EWOULDBLOCK);
846 			}
847 			while (uio->uio_resid > 0 && !error) {
848 				while (sce->ra_wb_used ==
849 				       sce->limit - sce->ibuf) {
850 					sce->state |= UGEN_ASLP;
851 					DPRINTFN(5,
852 						 ("ugenwrite: sleep on %p\n",
853 						  sce));
854 					/* "ugenwb" */
855 					error = cv_timedwait_sig(&sce->cv,
856 					    &sc->sc_lock, mstohz(sce->timeout));
857 					DPRINTFN(5,
858 						 ("ugenwrite: woke, error=%d\n",
859 						  error));
860 					if (sc->sc_dying)
861 						error = EIO;
862 					if (error) {
863 						sce->state &= ~UGEN_ASLP;
864 						break;
865 					}
866 				}
867 
868 				/* Copy data from the process. */
869 				while (uio->uio_resid > 0 &&
870 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
871 					n = min(uio->uio_resid,
872 						(sce->limit - sce->ibuf)
873 						 - sce->ra_wb_used);
874 					n = min(n, sce->limit - sce->fill);
875 					error = uiomove(sce->fill, n, uio);
876 					if (error)
877 						break;
878 					sce->fill += n;
879 					sce->ra_wb_used += n;
880 					if (sce->fill == sce->limit)
881 						sce->fill = sce->ibuf;
882 				}
883 
884 				/*
885 				 * If the transfers stopped because the
886 				 * buffer was empty, restart them.
887 				 */
888 				if (sce->state & UGEN_RA_WB_STOP &&
889 				    sce->ra_wb_used > 0) {
890 					dbuf = (char *)usbd_get_buffer(xfer);
891 					n = min(sce->ra_wb_used,
892 						sce->ra_wb_xferlen);
893 					tn = min(n, sce->limit - sce->cur);
894 					memcpy(dbuf, sce->cur, tn);
895 					dbuf += tn;
896 					if (n - tn > 0)
897 						memcpy(dbuf, sce->ibuf,
898 						       n - tn);
899 					usbd_setup_xfer(xfer,
900 					    sce->pipeh, sce, NULL, n,
901 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
902 					    ugen_bulkwb_intr);
903 					sce->state &= ~UGEN_RA_WB_STOP;
904 					err = usbd_transfer(xfer);
905 					if (err != USBD_IN_PROGRESS)
906 						/*
907 						 * The transfer has not been
908 						 * queued.  Setting STOP
909 						 * will make us try again
910 						 * at the next read.
911 						 */
912 						sce->state |= UGEN_RA_WB_STOP;
913 				}
914 			}
915 			mutex_exit(&sc->sc_lock);
916 			break;
917 		}
918 		xfer = usbd_alloc_xfer(sc->sc_udev);
919 		if (xfer == 0)
920 			return (EIO);
921 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
922 			error = uiomove(sc->sc_buffer, n, uio);
923 			if (error)
924 				break;
925 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
926 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
927 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
928 			if (err) {
929 				if (err == USBD_INTERRUPTED)
930 					error = EINTR;
931 				else if (err == USBD_TIMEOUT)
932 					error = ETIMEDOUT;
933 				else
934 					error = EIO;
935 				break;
936 			}
937 		}
938 		usbd_free_xfer(xfer);
939 		break;
940 	case UE_INTERRUPT:
941 		xfer = usbd_alloc_xfer(sc->sc_udev);
942 		if (xfer == 0)
943 			return (EIO);
944 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
945 		    uio->uio_resid)) != 0) {
946 			error = uiomove(sc->sc_buffer, n, uio);
947 			if (error)
948 				break;
949 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
950 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
951 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
952 			if (err) {
953 				if (err == USBD_INTERRUPTED)
954 					error = EINTR;
955 				else if (err == USBD_TIMEOUT)
956 					error = ETIMEDOUT;
957 				else
958 					error = EIO;
959 				break;
960 			}
961 		}
962 		usbd_free_xfer(xfer);
963 		break;
964 	default:
965 		return (ENXIO);
966 	}
967 	return (error);
968 }
969 
970 int
971 ugenwrite(dev_t dev, struct uio *uio, int flag)
972 {
973 	int endpt = UGENENDPOINT(dev);
974 	struct ugen_softc *sc;
975 	int error;
976 
977 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
978 	if (sc == NULL)
979 		return ENXIO;
980 
981 	mutex_enter(&sc->sc_lock);
982 	sc->sc_refcnt++;
983 	mutex_exit(&sc->sc_lock);
984 
985 	error = ugen_do_write(sc, endpt, uio, flag);
986 
987 	mutex_enter(&sc->sc_lock);
988 	if (--sc->sc_refcnt < 0)
989 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
990 	mutex_exit(&sc->sc_lock);
991 
992 	return (error);
993 }
994 
995 int
996 ugen_activate(device_t self, enum devact act)
997 {
998 	struct ugen_softc *sc = device_private(self);
999 
1000 	switch (act) {
1001 	case DVACT_DEACTIVATE:
1002 		sc->sc_dying = 1;
1003 		return 0;
1004 	default:
1005 		return EOPNOTSUPP;
1006 	}
1007 }
1008 
1009 int
1010 ugen_detach(device_t self, int flags)
1011 {
1012 	struct ugen_softc *sc = device_private(self);
1013 	struct ugen_endpoint *sce;
1014 	int i, dir;
1015 	int maj, mn;
1016 
1017 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1018 
1019 	sc->sc_dying = 1;
1020 	pmf_device_deregister(self);
1021 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1022 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1023 		for (dir = OUT; dir <= IN; dir++) {
1024 			sce = &sc->sc_endpoints[i][dir];
1025 			if (sce && sce->pipeh)
1026 				usbd_abort_pipe(sce->pipeh);
1027 		}
1028 	}
1029 
1030 	mutex_enter(&sc->sc_lock);
1031 	if (--sc->sc_refcnt >= 0) {
1032 		/* Wake everyone */
1033 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1034 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1035 		/* Wait for processes to go away. */
1036 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1037 	}
1038 	mutex_exit(&sc->sc_lock);
1039 
1040 	/* locate the major number */
1041 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1042 
1043 	/* Nuke the vnodes for any open instances (calls close). */
1044 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1045 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1046 
1047 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1048 			   sc->sc_dev);
1049 
1050 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1051 		for (dir = OUT; dir <= IN; dir++) {
1052 			sce = &sc->sc_endpoints[i][dir];
1053 			seldestroy(&sce->rsel);
1054 			cv_destroy(&sce->cv);
1055 		}
1056 	}
1057 
1058 	cv_destroy(&sc->sc_detach_cv);
1059 	mutex_destroy(&sc->sc_lock);
1060 
1061 	return (0);
1062 }
1063 
1064 Static void
1065 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1066 {
1067 	struct ugen_endpoint *sce = addr;
1068 	struct ugen_softc *sc = sce->sc;
1069 	u_int32_t count;
1070 	u_char *ibuf;
1071 
1072 	if (status == USBD_CANCELLED)
1073 		return;
1074 
1075 	if (status != USBD_NORMAL_COMPLETION) {
1076 		DPRINTF(("ugenintr: status=%d\n", status));
1077 		if (status == USBD_STALLED)
1078 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1079 		return;
1080 	}
1081 
1082 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1083 	ibuf = sce->ibuf;
1084 
1085 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1086 		     xfer, status, count));
1087 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1088 		     ibuf[0], ibuf[1], ibuf[2]));
1089 
1090 	(void)b_to_q(ibuf, count, &sce->q);
1091 
1092 	mutex_enter(&sc->sc_lock);
1093 	if (sce->state & UGEN_ASLP) {
1094 		sce->state &= ~UGEN_ASLP;
1095 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1096 		cv_signal(&sce->cv);
1097 	}
1098 	mutex_exit(&sc->sc_lock);
1099 	selnotify(&sce->rsel, 0, 0);
1100 }
1101 
1102 Static void
1103 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1104 		usbd_status status)
1105 {
1106 	struct isoreq *req = addr;
1107 	struct ugen_endpoint *sce = req->sce;
1108 	struct ugen_softc *sc = sce->sc;
1109 	u_int32_t count, n;
1110 	int i, isize;
1111 
1112 	/* Return if we are aborting. */
1113 	if (status == USBD_CANCELLED)
1114 		return;
1115 
1116 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1117 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1118 	    (long)(req - sce->isoreqs), count));
1119 
1120 	/* throw away oldest input if the buffer is full */
1121 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1122 		sce->cur += count;
1123 		if(sce->cur >= sce->limit)
1124 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1125 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1126 			     count));
1127 	}
1128 
1129 	isize = UGETW(sce->edesc->wMaxPacketSize);
1130 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1131 		u_int32_t actlen = req->sizes[i];
1132 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1133 
1134 		/* copy data to buffer */
1135 		while (actlen > 0) {
1136 			n = min(actlen, sce->limit - sce->fill);
1137 			memcpy(sce->fill, tbuf, n);
1138 
1139 			tbuf += n;
1140 			actlen -= n;
1141 			sce->fill += n;
1142 			if(sce->fill == sce->limit)
1143 				sce->fill = sce->ibuf;
1144 		}
1145 
1146 		/* setup size for next transfer */
1147 		req->sizes[i] = isize;
1148 	}
1149 
1150 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1151 			     USBD_NO_COPY, ugen_isoc_rintr);
1152 	(void)usbd_transfer(xfer);
1153 
1154 	mutex_enter(&sc->sc_lock);
1155 	if (sce->state & UGEN_ASLP) {
1156 		sce->state &= ~UGEN_ASLP;
1157 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1158 		cv_signal(&sce->cv);
1159 	}
1160 	mutex_exit(&sc->sc_lock);
1161 	selnotify(&sce->rsel, 0, 0);
1162 }
1163 
1164 Static void
1165 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1166 		 usbd_status status)
1167 {
1168 	struct ugen_endpoint *sce = addr;
1169 	struct ugen_softc *sc = sce->sc;
1170 	u_int32_t count, n;
1171 	char const *tbuf;
1172 	usbd_status err;
1173 
1174 	/* Return if we are aborting. */
1175 	if (status == USBD_CANCELLED)
1176 		return;
1177 
1178 	if (status != USBD_NORMAL_COMPLETION) {
1179 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1180 		sce->state |= UGEN_RA_WB_STOP;
1181 		if (status == USBD_STALLED)
1182 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1183 		return;
1184 	}
1185 
1186 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1187 
1188 	/* Keep track of how much is in the buffer. */
1189 	sce->ra_wb_used += count;
1190 
1191 	/* Copy data to buffer. */
1192 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1193 	n = min(count, sce->limit - sce->fill);
1194 	memcpy(sce->fill, tbuf, n);
1195 	tbuf += n;
1196 	count -= n;
1197 	sce->fill += n;
1198 	if (sce->fill == sce->limit)
1199 		sce->fill = sce->ibuf;
1200 	if (count > 0) {
1201 		memcpy(sce->fill, tbuf, count);
1202 		sce->fill += count;
1203 	}
1204 
1205 	/* Set up the next request if necessary. */
1206 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1207 	if (n > 0) {
1208 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1209 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1210 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1211 		err = usbd_transfer(xfer);
1212 		if (err != USBD_IN_PROGRESS) {
1213 			printf("usbd_bulkra_intr: error=%d\n", err);
1214 			/*
1215 			 * The transfer has not been queued.  Setting STOP
1216 			 * will make us try again at the next read.
1217 			 */
1218 			sce->state |= UGEN_RA_WB_STOP;
1219 		}
1220 	}
1221 	else
1222 		sce->state |= UGEN_RA_WB_STOP;
1223 
1224 	mutex_enter(&sc->sc_lock);
1225 	if (sce->state & UGEN_ASLP) {
1226 		sce->state &= ~UGEN_ASLP;
1227 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1228 		cv_signal(&sce->cv);
1229 	}
1230 	mutex_exit(&sc->sc_lock);
1231 	selnotify(&sce->rsel, 0, 0);
1232 }
1233 
1234 Static void
1235 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1236 		 usbd_status status)
1237 {
1238 	struct ugen_endpoint *sce = addr;
1239 	struct ugen_softc *sc = sce->sc;
1240 	u_int32_t count, n;
1241 	char *tbuf;
1242 	usbd_status err;
1243 
1244 	/* Return if we are aborting. */
1245 	if (status == USBD_CANCELLED)
1246 		return;
1247 
1248 	if (status != USBD_NORMAL_COMPLETION) {
1249 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1250 		sce->state |= UGEN_RA_WB_STOP;
1251 		if (status == USBD_STALLED)
1252 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1253 		return;
1254 	}
1255 
1256 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1257 
1258 	/* Keep track of how much is in the buffer. */
1259 	sce->ra_wb_used -= count;
1260 
1261 	/* Update buffer pointers. */
1262 	sce->cur += count;
1263 	if (sce->cur >= sce->limit)
1264 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1265 
1266 	/* Set up next request if necessary. */
1267 	if (sce->ra_wb_used > 0) {
1268 		/* copy data from buffer */
1269 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1270 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1271 		n = min(count, sce->limit - sce->cur);
1272 		memcpy(tbuf, sce->cur, n);
1273 		tbuf += n;
1274 		if (count - n > 0)
1275 			memcpy(tbuf, sce->ibuf, count - n);
1276 
1277 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1278 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1279 		err = usbd_transfer(xfer);
1280 		if (err != USBD_IN_PROGRESS) {
1281 			printf("usbd_bulkwb_intr: error=%d\n", err);
1282 			/*
1283 			 * The transfer has not been queued.  Setting STOP
1284 			 * will make us try again at the next write.
1285 			 */
1286 			sce->state |= UGEN_RA_WB_STOP;
1287 		}
1288 	}
1289 	else
1290 		sce->state |= UGEN_RA_WB_STOP;
1291 
1292 	mutex_enter(&sc->sc_lock);
1293 	if (sce->state & UGEN_ASLP) {
1294 		sce->state &= ~UGEN_ASLP;
1295 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1296 		cv_signal(&sce->cv);
1297 	}
1298 	mutex_exit(&sc->sc_lock);
1299 	selnotify(&sce->rsel, 0, 0);
1300 }
1301 
1302 Static usbd_status
1303 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1304 {
1305 	usbd_interface_handle iface;
1306 	usb_endpoint_descriptor_t *ed;
1307 	usbd_status err;
1308 	struct ugen_endpoint *sce;
1309 	u_int8_t niface, nendpt, endptno, endpt;
1310 	int dir;
1311 
1312 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1313 
1314 	err = usbd_interface_count(sc->sc_udev, &niface);
1315 	if (err)
1316 		return (err);
1317 	if (ifaceidx < 0 || ifaceidx >= niface)
1318 		return (USBD_INVAL);
1319 
1320 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1321 	if (err)
1322 		return (err);
1323 	err = usbd_endpoint_count(iface, &nendpt);
1324 	if (err)
1325 		return (err);
1326 	/* XXX should only do this after setting new altno has succeeded */
1327 	for (endptno = 0; endptno < nendpt; endptno++) {
1328 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1329 		endpt = ed->bEndpointAddress;
1330 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1331 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1332 		sce->sc = 0;
1333 		sce->edesc = 0;
1334 		sce->iface = 0;
1335 	}
1336 
1337 	/* change setting */
1338 	err = usbd_set_interface(iface, altno);
1339 	if (err)
1340 		return (err);
1341 
1342 	err = usbd_endpoint_count(iface, &nendpt);
1343 	if (err)
1344 		return (err);
1345 	for (endptno = 0; endptno < nendpt; endptno++) {
1346 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1347 		KASSERT(ed != NULL);
1348 		endpt = ed->bEndpointAddress;
1349 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1350 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1351 		sce->sc = sc;
1352 		sce->edesc = ed;
1353 		sce->iface = iface;
1354 	}
1355 	return (0);
1356 }
1357 
1358 /* Retrieve a complete descriptor for a certain device and index. */
1359 Static usb_config_descriptor_t *
1360 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1361 {
1362 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1363 	int len;
1364 	usbd_status err;
1365 
1366 	if (index == USB_CURRENT_CONFIG_INDEX) {
1367 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1368 		len = UGETW(tdesc->wTotalLength);
1369 		if (lenp)
1370 			*lenp = len;
1371 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1372 		memcpy(cdesc, tdesc, len);
1373 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1374 	} else {
1375 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1376 		if (err)
1377 			return (0);
1378 		len = UGETW(cdescr.wTotalLength);
1379 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1380 		if (lenp)
1381 			*lenp = len;
1382 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1383 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1384 		if (err) {
1385 			free(cdesc, M_TEMP);
1386 			return (0);
1387 		}
1388 	}
1389 	return (cdesc);
1390 }
1391 
1392 Static int
1393 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1394 {
1395 	usbd_interface_handle iface;
1396 	usbd_status err;
1397 
1398 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1399 	if (err)
1400 		return (-1);
1401 	return (usbd_get_interface_altindex(iface));
1402 }
1403 
1404 Static int
1405 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1406 	      void *addr, int flag, struct lwp *l)
1407 {
1408 	struct ugen_endpoint *sce;
1409 	usbd_status err;
1410 	usbd_interface_handle iface;
1411 	struct usb_config_desc *cd;
1412 	usb_config_descriptor_t *cdesc;
1413 	struct usb_interface_desc *id;
1414 	usb_interface_descriptor_t *idesc;
1415 	struct usb_endpoint_desc *ed;
1416 	usb_endpoint_descriptor_t *edesc;
1417 	struct usb_alt_interface *ai;
1418 	struct usb_string_desc *si;
1419 	u_int8_t conf, alt;
1420 
1421 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1422 	if (sc->sc_dying)
1423 		return (EIO);
1424 
1425 	switch (cmd) {
1426 	case FIONBIO:
1427 		/* All handled in the upper FS layer. */
1428 		return (0);
1429 	case USB_SET_SHORT_XFER:
1430 		if (endpt == USB_CONTROL_ENDPOINT)
1431 			return (EINVAL);
1432 		/* This flag only affects read */
1433 		sce = &sc->sc_endpoints[endpt][IN];
1434 		if (sce == NULL || sce->pipeh == NULL)
1435 			return (EINVAL);
1436 		if (*(int *)addr)
1437 			sce->state |= UGEN_SHORT_OK;
1438 		else
1439 			sce->state &= ~UGEN_SHORT_OK;
1440 		return (0);
1441 	case USB_SET_TIMEOUT:
1442 		sce = &sc->sc_endpoints[endpt][IN];
1443 		if (sce == NULL
1444 		    /* XXX this shouldn't happen, but the distinction between
1445 		       input and output pipes isn't clear enough.
1446 		       || sce->pipeh == NULL */
1447 			)
1448 			return (EINVAL);
1449 		sce->timeout = *(int *)addr;
1450 		return (0);
1451 	case USB_SET_BULK_RA:
1452 		if (endpt == USB_CONTROL_ENDPOINT)
1453 			return (EINVAL);
1454 		sce = &sc->sc_endpoints[endpt][IN];
1455 		if (sce == NULL || sce->pipeh == NULL)
1456 			return (EINVAL);
1457 		edesc = sce->edesc;
1458 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1459 			return (EINVAL);
1460 
1461 		if (*(int *)addr) {
1462 			/* Only turn RA on if it's currently off. */
1463 			if (sce->state & UGEN_BULK_RA)
1464 				return (0);
1465 
1466 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1467 				/* shouldn't happen */
1468 				return (EINVAL);
1469 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1470 			if (sce->ra_wb_xfer == NULL)
1471 				return (ENOMEM);
1472 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1473 			/*
1474 			 * Set up a dmabuf because we reuse the xfer with
1475 			 * the same (max) request length like isoc.
1476 			 */
1477 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1478 					      sce->ra_wb_xferlen) == 0) {
1479 				usbd_free_xfer(sce->ra_wb_xfer);
1480 				return (ENOMEM);
1481 			}
1482 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1483 					   M_USBDEV, M_WAITOK);
1484 			sce->fill = sce->cur = sce->ibuf;
1485 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1486 			sce->ra_wb_used = 0;
1487 			sce->state |= UGEN_BULK_RA;
1488 			sce->state &= ~UGEN_RA_WB_STOP;
1489 			/* Now start reading. */
1490 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1491 			    NULL,
1492 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1493 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1494 			    ugen_bulkra_intr);
1495 			err = usbd_transfer(sce->ra_wb_xfer);
1496 			if (err != USBD_IN_PROGRESS) {
1497 				sce->state &= ~UGEN_BULK_RA;
1498 				free(sce->ibuf, M_USBDEV);
1499 				sce->ibuf = NULL;
1500 				usbd_free_xfer(sce->ra_wb_xfer);
1501 				return (EIO);
1502 			}
1503 		} else {
1504 			/* Only turn RA off if it's currently on. */
1505 			if (!(sce->state & UGEN_BULK_RA))
1506 				return (0);
1507 
1508 			sce->state &= ~UGEN_BULK_RA;
1509 			usbd_abort_pipe(sce->pipeh);
1510 			usbd_free_xfer(sce->ra_wb_xfer);
1511 			/*
1512 			 * XXX Discard whatever's in the buffer, but we
1513 			 * should keep it around and drain the buffer
1514 			 * instead.
1515 			 */
1516 			free(sce->ibuf, M_USBDEV);
1517 			sce->ibuf = NULL;
1518 		}
1519 		return (0);
1520 	case USB_SET_BULK_WB:
1521 		if (endpt == USB_CONTROL_ENDPOINT)
1522 			return (EINVAL);
1523 		sce = &sc->sc_endpoints[endpt][OUT];
1524 		if (sce == NULL || sce->pipeh == NULL)
1525 			return (EINVAL);
1526 		edesc = sce->edesc;
1527 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1528 			return (EINVAL);
1529 
1530 		if (*(int *)addr) {
1531 			/* Only turn WB on if it's currently off. */
1532 			if (sce->state & UGEN_BULK_WB)
1533 				return (0);
1534 
1535 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1536 				/* shouldn't happen */
1537 				return (EINVAL);
1538 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1539 			if (sce->ra_wb_xfer == NULL)
1540 				return (ENOMEM);
1541 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1542 			/*
1543 			 * Set up a dmabuf because we reuse the xfer with
1544 			 * the same (max) request length like isoc.
1545 			 */
1546 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1547 					      sce->ra_wb_xferlen) == 0) {
1548 				usbd_free_xfer(sce->ra_wb_xfer);
1549 				return (ENOMEM);
1550 			}
1551 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1552 					   M_USBDEV, M_WAITOK);
1553 			sce->fill = sce->cur = sce->ibuf;
1554 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1555 			sce->ra_wb_used = 0;
1556 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1557 		} else {
1558 			/* Only turn WB off if it's currently on. */
1559 			if (!(sce->state & UGEN_BULK_WB))
1560 				return (0);
1561 
1562 			sce->state &= ~UGEN_BULK_WB;
1563 			/*
1564 			 * XXX Discard whatever's in the buffer, but we
1565 			 * should keep it around and keep writing to
1566 			 * drain the buffer instead.
1567 			 */
1568 			usbd_abort_pipe(sce->pipeh);
1569 			usbd_free_xfer(sce->ra_wb_xfer);
1570 			free(sce->ibuf, M_USBDEV);
1571 			sce->ibuf = NULL;
1572 		}
1573 		return (0);
1574 	case USB_SET_BULK_RA_OPT:
1575 	case USB_SET_BULK_WB_OPT:
1576 	{
1577 		struct usb_bulk_ra_wb_opt *opt;
1578 
1579 		if (endpt == USB_CONTROL_ENDPOINT)
1580 			return (EINVAL);
1581 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1582 		if (cmd == USB_SET_BULK_RA_OPT)
1583 			sce = &sc->sc_endpoints[endpt][IN];
1584 		else
1585 			sce = &sc->sc_endpoints[endpt][OUT];
1586 		if (sce == NULL || sce->pipeh == NULL)
1587 			return (EINVAL);
1588 		if (opt->ra_wb_buffer_size < 1 ||
1589 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1590 		    opt->ra_wb_request_size < 1 ||
1591 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1592 			return (EINVAL);
1593 		/*
1594 		 * XXX These changes do not take effect until the
1595 		 * next time RA/WB mode is enabled but they ought to
1596 		 * take effect immediately.
1597 		 */
1598 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1599 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1600 		return (0);
1601 	}
1602 	default:
1603 		break;
1604 	}
1605 
1606 	if (endpt != USB_CONTROL_ENDPOINT)
1607 		return (EINVAL);
1608 
1609 	switch (cmd) {
1610 #ifdef UGEN_DEBUG
1611 	case USB_SETDEBUG:
1612 		ugendebug = *(int *)addr;
1613 		break;
1614 #endif
1615 	case USB_GET_CONFIG:
1616 		err = usbd_get_config(sc->sc_udev, &conf);
1617 		if (err)
1618 			return (EIO);
1619 		*(int *)addr = conf;
1620 		break;
1621 	case USB_SET_CONFIG:
1622 		if (!(flag & FWRITE))
1623 			return (EPERM);
1624 		err = ugen_set_config(sc, *(int *)addr);
1625 		switch (err) {
1626 		case USBD_NORMAL_COMPLETION:
1627 			break;
1628 		case USBD_IN_USE:
1629 			return (EBUSY);
1630 		default:
1631 			return (EIO);
1632 		}
1633 		break;
1634 	case USB_GET_ALTINTERFACE:
1635 		ai = (struct usb_alt_interface *)addr;
1636 		err = usbd_device2interface_handle(sc->sc_udev,
1637 			  ai->uai_interface_index, &iface);
1638 		if (err)
1639 			return (EINVAL);
1640 		idesc = usbd_get_interface_descriptor(iface);
1641 		if (idesc == NULL)
1642 			return (EIO);
1643 		ai->uai_alt_no = idesc->bAlternateSetting;
1644 		break;
1645 	case USB_SET_ALTINTERFACE:
1646 		if (!(flag & FWRITE))
1647 			return (EPERM);
1648 		ai = (struct usb_alt_interface *)addr;
1649 		err = usbd_device2interface_handle(sc->sc_udev,
1650 			  ai->uai_interface_index, &iface);
1651 		if (err)
1652 			return (EINVAL);
1653 		err = ugen_set_interface(sc, ai->uai_interface_index,
1654 		    ai->uai_alt_no);
1655 		if (err)
1656 			return (EINVAL);
1657 		break;
1658 	case USB_GET_NO_ALT:
1659 		ai = (struct usb_alt_interface *)addr;
1660 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1661 		if (cdesc == NULL)
1662 			return (EINVAL);
1663 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1664 		if (idesc == NULL) {
1665 			free(cdesc, M_TEMP);
1666 			return (EINVAL);
1667 		}
1668 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1669 		    idesc->bInterfaceNumber);
1670 		free(cdesc, M_TEMP);
1671 		break;
1672 	case USB_GET_DEVICE_DESC:
1673 		*(usb_device_descriptor_t *)addr =
1674 			*usbd_get_device_descriptor(sc->sc_udev);
1675 		break;
1676 	case USB_GET_CONFIG_DESC:
1677 		cd = (struct usb_config_desc *)addr;
1678 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1679 		if (cdesc == NULL)
1680 			return (EINVAL);
1681 		cd->ucd_desc = *cdesc;
1682 		free(cdesc, M_TEMP);
1683 		break;
1684 	case USB_GET_INTERFACE_DESC:
1685 		id = (struct usb_interface_desc *)addr;
1686 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1687 		if (cdesc == NULL)
1688 			return (EINVAL);
1689 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1690 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1691 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1692 		else
1693 			alt = id->uid_alt_index;
1694 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1695 		if (idesc == NULL) {
1696 			free(cdesc, M_TEMP);
1697 			return (EINVAL);
1698 		}
1699 		id->uid_desc = *idesc;
1700 		free(cdesc, M_TEMP);
1701 		break;
1702 	case USB_GET_ENDPOINT_DESC:
1703 		ed = (struct usb_endpoint_desc *)addr;
1704 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1705 		if (cdesc == NULL)
1706 			return (EINVAL);
1707 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1708 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1709 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1710 		else
1711 			alt = ed->ued_alt_index;
1712 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1713 					alt, ed->ued_endpoint_index);
1714 		if (edesc == NULL) {
1715 			free(cdesc, M_TEMP);
1716 			return (EINVAL);
1717 		}
1718 		ed->ued_desc = *edesc;
1719 		free(cdesc, M_TEMP);
1720 		break;
1721 	case USB_GET_FULL_DESC:
1722 	{
1723 		int len;
1724 		struct iovec iov;
1725 		struct uio uio;
1726 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1727 		int error;
1728 
1729 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1730 		if (cdesc == NULL)
1731 			return (EINVAL);
1732 		if (len > fd->ufd_size)
1733 			len = fd->ufd_size;
1734 		iov.iov_base = (void *)fd->ufd_data;
1735 		iov.iov_len = len;
1736 		uio.uio_iov = &iov;
1737 		uio.uio_iovcnt = 1;
1738 		uio.uio_resid = len;
1739 		uio.uio_offset = 0;
1740 		uio.uio_rw = UIO_READ;
1741 		uio.uio_vmspace = l->l_proc->p_vmspace;
1742 		error = uiomove((void *)cdesc, len, &uio);
1743 		free(cdesc, M_TEMP);
1744 		return (error);
1745 	}
1746 	case USB_GET_STRING_DESC: {
1747 		int len;
1748 		si = (struct usb_string_desc *)addr;
1749 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1750 			  si->usd_language_id, &si->usd_desc, &len);
1751 		if (err)
1752 			return (EINVAL);
1753 		break;
1754 	}
1755 	case USB_DO_REQUEST:
1756 	{
1757 		struct usb_ctl_request *ur = (void *)addr;
1758 		int len = UGETW(ur->ucr_request.wLength);
1759 		struct iovec iov;
1760 		struct uio uio;
1761 		void *ptr = 0;
1762 		usbd_status xerr;
1763 		int error = 0;
1764 
1765 		if (!(flag & FWRITE))
1766 			return (EPERM);
1767 		/* Avoid requests that would damage the bus integrity. */
1768 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1769 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1770 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1771 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1772 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1773 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1774 			return (EINVAL);
1775 
1776 		if (len < 0 || len > 32767)
1777 			return (EINVAL);
1778 		if (len != 0) {
1779 			iov.iov_base = (void *)ur->ucr_data;
1780 			iov.iov_len = len;
1781 			uio.uio_iov = &iov;
1782 			uio.uio_iovcnt = 1;
1783 			uio.uio_resid = len;
1784 			uio.uio_offset = 0;
1785 			uio.uio_rw =
1786 				ur->ucr_request.bmRequestType & UT_READ ?
1787 				UIO_READ : UIO_WRITE;
1788 			uio.uio_vmspace = l->l_proc->p_vmspace;
1789 			ptr = malloc(len, M_TEMP, M_WAITOK);
1790 			if (uio.uio_rw == UIO_WRITE) {
1791 				error = uiomove(ptr, len, &uio);
1792 				if (error)
1793 					goto ret;
1794 			}
1795 		}
1796 		sce = &sc->sc_endpoints[endpt][IN];
1797 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1798 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1799 		if (xerr) {
1800 			error = EIO;
1801 			goto ret;
1802 		}
1803 		if (len != 0) {
1804 			if (uio.uio_rw == UIO_READ) {
1805 				error = uiomove(ptr, len, &uio);
1806 				if (error)
1807 					goto ret;
1808 			}
1809 		}
1810 	ret:
1811 		if (ptr)
1812 			free(ptr, M_TEMP);
1813 		return (error);
1814 	}
1815 	case USB_GET_DEVICEINFO:
1816 		usbd_fill_deviceinfo(sc->sc_udev,
1817 				     (struct usb_device_info *)addr, 0);
1818 		break;
1819 #ifdef COMPAT_30
1820 	case USB_GET_DEVICEINFO_OLD:
1821 		usbd_fill_deviceinfo_old(sc->sc_udev,
1822 					 (struct usb_device_info_old *)addr, 0);
1823 
1824 		break;
1825 #endif
1826 	default:
1827 		return (EINVAL);
1828 	}
1829 	return (0);
1830 }
1831 
1832 int
1833 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1834 {
1835 	int endpt = UGENENDPOINT(dev);
1836 	struct ugen_softc *sc;
1837 	int error;
1838 
1839 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1840 	if (sc == NULL)
1841 		return ENXIO;
1842 
1843 	sc->sc_refcnt++;
1844 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1845 	if (--sc->sc_refcnt < 0)
1846 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1847 	return (error);
1848 }
1849 
1850 int
1851 ugenpoll(dev_t dev, int events, struct lwp *l)
1852 {
1853 	struct ugen_softc *sc;
1854 	struct ugen_endpoint *sce_in, *sce_out;
1855 	int revents = 0;
1856 
1857 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1858 	if (sc == NULL)
1859 		return ENXIO;
1860 
1861 	if (sc->sc_dying)
1862 		return (POLLHUP);
1863 
1864 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1865 		return ENODEV;
1866 
1867 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1868 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1869 	if (sce_in == NULL && sce_out == NULL)
1870 		return (POLLERR);
1871 #ifdef DIAGNOSTIC
1872 	if (!sce_in->edesc && !sce_out->edesc) {
1873 		printf("ugenpoll: no edesc\n");
1874 		return (POLLERR);
1875 	}
1876 	/* It's possible to have only one pipe open. */
1877 	if (!sce_in->pipeh && !sce_out->pipeh) {
1878 		printf("ugenpoll: no pipe\n");
1879 		return (POLLERR);
1880 	}
1881 #endif
1882 
1883 	mutex_enter(&sc->sc_lock);
1884 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1885 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1886 		case UE_INTERRUPT:
1887 			if (sce_in->q.c_cc > 0)
1888 				revents |= events & (POLLIN | POLLRDNORM);
1889 			else
1890 				selrecord(l, &sce_in->rsel);
1891 			break;
1892 		case UE_ISOCHRONOUS:
1893 			if (sce_in->cur != sce_in->fill)
1894 				revents |= events & (POLLIN | POLLRDNORM);
1895 			else
1896 				selrecord(l, &sce_in->rsel);
1897 			break;
1898 		case UE_BULK:
1899 			if (sce_in->state & UGEN_BULK_RA) {
1900 				if (sce_in->ra_wb_used > 0)
1901 					revents |= events &
1902 					    (POLLIN | POLLRDNORM);
1903 				else
1904 					selrecord(l, &sce_in->rsel);
1905 				break;
1906 			}
1907 			/*
1908 			 * We have no easy way of determining if a read will
1909 			 * yield any data or a write will happen.
1910 			 * Pretend they will.
1911 			 */
1912 			 revents |= events & (POLLIN | POLLRDNORM);
1913 			 break;
1914 		default:
1915 			break;
1916 		}
1917 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1918 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1919 		case UE_INTERRUPT:
1920 		case UE_ISOCHRONOUS:
1921 			/* XXX unimplemented */
1922 			break;
1923 		case UE_BULK:
1924 			if (sce_out->state & UGEN_BULK_WB) {
1925 				if (sce_out->ra_wb_used <
1926 				    sce_out->limit - sce_out->ibuf)
1927 					revents |= events &
1928 					    (POLLOUT | POLLWRNORM);
1929 				else
1930 					selrecord(l, &sce_out->rsel);
1931 				break;
1932 			}
1933 			/*
1934 			 * We have no easy way of determining if a read will
1935 			 * yield any data or a write will happen.
1936 			 * Pretend they will.
1937 			 */
1938 			 revents |= events & (POLLOUT | POLLWRNORM);
1939 			 break;
1940 		default:
1941 			break;
1942 		}
1943 
1944 	mutex_exit(&sc->sc_lock);
1945 
1946 	return (revents);
1947 }
1948 
1949 static void
1950 filt_ugenrdetach(struct knote *kn)
1951 {
1952 	struct ugen_endpoint *sce = kn->kn_hook;
1953 	struct ugen_softc *sc = sce->sc;
1954 
1955 	mutex_enter(&sc->sc_lock);
1956 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1957 	mutex_exit(&sc->sc_lock);
1958 }
1959 
1960 static int
1961 filt_ugenread_intr(struct knote *kn, long hint)
1962 {
1963 	struct ugen_endpoint *sce = kn->kn_hook;
1964 
1965 	kn->kn_data = sce->q.c_cc;
1966 	return (kn->kn_data > 0);
1967 }
1968 
1969 static int
1970 filt_ugenread_isoc(struct knote *kn, long hint)
1971 {
1972 	struct ugen_endpoint *sce = kn->kn_hook;
1973 
1974 	if (sce->cur == sce->fill)
1975 		return (0);
1976 
1977 	if (sce->cur < sce->fill)
1978 		kn->kn_data = sce->fill - sce->cur;
1979 	else
1980 		kn->kn_data = (sce->limit - sce->cur) +
1981 		    (sce->fill - sce->ibuf);
1982 
1983 	return (1);
1984 }
1985 
1986 static int
1987 filt_ugenread_bulk(struct knote *kn, long hint)
1988 {
1989 	struct ugen_endpoint *sce = kn->kn_hook;
1990 
1991 	if (!(sce->state & UGEN_BULK_RA))
1992 		/*
1993 		 * We have no easy way of determining if a read will
1994 		 * yield any data or a write will happen.
1995 		 * So, emulate "seltrue".
1996 		 */
1997 		return (filt_seltrue(kn, hint));
1998 
1999 	if (sce->ra_wb_used == 0)
2000 		return (0);
2001 
2002 	kn->kn_data = sce->ra_wb_used;
2003 
2004 	return (1);
2005 }
2006 
2007 static int
2008 filt_ugenwrite_bulk(struct knote *kn, long hint)
2009 {
2010 	struct ugen_endpoint *sce = kn->kn_hook;
2011 
2012 	if (!(sce->state & UGEN_BULK_WB))
2013 		/*
2014 		 * We have no easy way of determining if a read will
2015 		 * yield any data or a write will happen.
2016 		 * So, emulate "seltrue".
2017 		 */
2018 		return (filt_seltrue(kn, hint));
2019 
2020 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2021 		return (0);
2022 
2023 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2024 
2025 	return (1);
2026 }
2027 
2028 static const struct filterops ugenread_intr_filtops =
2029 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2030 
2031 static const struct filterops ugenread_isoc_filtops =
2032 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2033 
2034 static const struct filterops ugenread_bulk_filtops =
2035 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2036 
2037 static const struct filterops ugenwrite_bulk_filtops =
2038 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2039 
2040 int
2041 ugenkqfilter(dev_t dev, struct knote *kn)
2042 {
2043 	struct ugen_softc *sc;
2044 	struct ugen_endpoint *sce;
2045 	struct klist *klist;
2046 
2047 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2048 	if (sc == NULL)
2049 		return ENXIO;
2050 
2051 	if (sc->sc_dying)
2052 		return (ENXIO);
2053 
2054 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2055 		return ENODEV;
2056 
2057 	switch (kn->kn_filter) {
2058 	case EVFILT_READ:
2059 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2060 		if (sce == NULL)
2061 			return (EINVAL);
2062 
2063 		klist = &sce->rsel.sel_klist;
2064 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2065 		case UE_INTERRUPT:
2066 			kn->kn_fop = &ugenread_intr_filtops;
2067 			break;
2068 		case UE_ISOCHRONOUS:
2069 			kn->kn_fop = &ugenread_isoc_filtops;
2070 			break;
2071 		case UE_BULK:
2072 			kn->kn_fop = &ugenread_bulk_filtops;
2073 			break;
2074 		default:
2075 			return (EINVAL);
2076 		}
2077 		break;
2078 
2079 	case EVFILT_WRITE:
2080 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2081 		if (sce == NULL)
2082 			return (EINVAL);
2083 
2084 		klist = &sce->rsel.sel_klist;
2085 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2086 		case UE_INTERRUPT:
2087 		case UE_ISOCHRONOUS:
2088 			/* XXX poll doesn't support this */
2089 			return (EINVAL);
2090 
2091 		case UE_BULK:
2092 			kn->kn_fop = &ugenwrite_bulk_filtops;
2093 			break;
2094 		default:
2095 			return (EINVAL);
2096 		}
2097 		break;
2098 
2099 	default:
2100 		return (EINVAL);
2101 	}
2102 
2103 	kn->kn_hook = sce;
2104 
2105 	mutex_enter(&sc->sc_lock);
2106 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2107 	mutex_exit(&sc->sc_lock);
2108 
2109 	return (0);
2110 }
2111