xref: /netbsd-src/sys/dev/usb/ugen.c (revision 80d9064ac03cbb6a4174695f0d5b237c8766d3d0)
1 /*	$NetBSD: ugen.c,v 1.125 2014/09/05 05:31:15 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.125 2014/09/05 05:31:15 matt Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59 
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63 
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x)	if (ugendebug) printf x
66 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
67 int	ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72 
73 #define	UGEN_CHUNK	128	/* chunk size for read */
74 #define	UGEN_IBSIZE	1020	/* buffer size */
75 #define	UGEN_BBSIZE	1024
76 
77 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS	8	/* number of transactions per req */
79 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
80 
81 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
83 
84 struct isoreq {
85 	struct ugen_endpoint *sce;
86 	usbd_xfer_handle xfer;
87 	void *dmabuf;
88 	u_int16_t sizes[UGEN_NISORFRMS];
89 };
90 
91 struct ugen_endpoint {
92 	struct ugen_softc *sc;
93 	usb_endpoint_descriptor_t *edesc;
94 	usbd_interface_handle iface;
95 	int state;
96 #define	UGEN_ASLP	0x02	/* waiting for data */
97 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
98 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
99 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
100 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
101 	usbd_pipe_handle pipeh;
102 	struct clist q;
103 	u_char *ibuf;		/* start of buffer (circular for isoc) */
104 	u_char *fill;		/* location for input (isoc) */
105 	u_char *limit;		/* end of circular buffer (isoc) */
106 	u_char *cur;		/* current read location (isoc) */
107 	u_int32_t timeout;
108 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
109 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
110 	u_int32_t ra_wb_used;	 /* how much is in buffer */
111 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
112 	usbd_xfer_handle ra_wb_xfer;
113 	struct isoreq isoreqs[UGEN_NISOREQS];
114 	/* Keep these last; we don't overwrite them in ugen_set_config() */
115 #define UGEN_ENDPOINT_NONZERO_CRUFT	offsetof(struct ugen_endpoint, rsel)
116 	struct selinfo rsel;
117 	kcondvar_t cv;
118 };
119 
120 struct ugen_softc {
121 	device_t sc_dev;		/* base device */
122 	usbd_device_handle sc_udev;
123 
124 	kmutex_t		sc_lock;
125 	kcondvar_t		sc_detach_cv;
126 
127 	char sc_is_open[USB_MAX_ENDPOINTS];
128 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
129 #define OUT 0
130 #define IN  1
131 
132 	int sc_refcnt;
133 	char sc_buffer[UGEN_BBSIZE];
134 	u_char sc_dying;
135 };
136 
137 dev_type_open(ugenopen);
138 dev_type_close(ugenclose);
139 dev_type_read(ugenread);
140 dev_type_write(ugenwrite);
141 dev_type_ioctl(ugenioctl);
142 dev_type_poll(ugenpoll);
143 dev_type_kqfilter(ugenkqfilter);
144 
145 const struct cdevsw ugen_cdevsw = {
146 	.d_open = ugenopen,
147 	.d_close = ugenclose,
148 	.d_read = ugenread,
149 	.d_write = ugenwrite,
150 	.d_ioctl = ugenioctl,
151 	.d_stop = nostop,
152 	.d_tty = notty,
153 	.d_poll = ugenpoll,
154 	.d_mmap = nommap,
155 	.d_kqfilter = ugenkqfilter,
156 	.d_discard = nodiscard,
157 	.d_flag = D_OTHER,
158 };
159 
160 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
161 		     usbd_status status);
162 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
163 			    usbd_status status);
164 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
165 			     usbd_status status);
166 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
167 			     usbd_status status);
168 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
169 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
171 			 void *, int, struct lwp *);
172 Static int ugen_set_config(struct ugen_softc *sc, int configno);
173 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
174 					       int index, int *lenp);
175 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
176 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
177 
178 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
179 #define UGENENDPOINT(n) (minor(n) & 0xf)
180 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
181 
182 int             ugen_match(device_t, cfdata_t, void *);
183 void            ugen_attach(device_t, device_t, void *);
184 int             ugen_detach(device_t, int);
185 int             ugen_activate(device_t, enum devact);
186 extern struct cfdriver ugen_cd;
187 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
188 
189 /* toggle to control attach priority. -1 means "let autoconf decide" */
190 int ugen_override = -1;
191 
192 int
193 ugen_match(device_t parent, cfdata_t match, void *aux)
194 {
195 	struct usb_attach_arg *uaa = aux;
196 	int override;
197 
198 	if (ugen_override != -1)
199 		override = ugen_override;
200 	else
201 		override = match->cf_flags & 1;
202 
203 	if (override)
204 		return (UMATCH_HIGHEST);
205 	else if (uaa->usegeneric)
206 		return (UMATCH_GENERIC);
207 	else
208 		return (UMATCH_NONE);
209 }
210 
211 void
212 ugen_attach(device_t parent, device_t self, void *aux)
213 {
214 	struct ugen_softc *sc = device_private(self);
215 	struct usb_attach_arg *uaa = aux;
216 	usbd_device_handle udev;
217 	char *devinfop;
218 	usbd_status err;
219 	int i, dir, conf;
220 
221 	aprint_naive("\n");
222 	aprint_normal("\n");
223 
224 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
225 	cv_init(&sc->sc_detach_cv, "ugendet");
226 
227 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
228 	aprint_normal_dev(self, "%s\n", devinfop);
229 	usbd_devinfo_free(devinfop);
230 
231 	sc->sc_dev = self;
232 	sc->sc_udev = udev = uaa->device;
233 
234 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
235 		for (dir = OUT; dir <= IN; dir++) {
236 			struct ugen_endpoint *sce;
237 
238 			sce = &sc->sc_endpoints[i][dir];
239 			selinit(&sce->rsel);
240 			cv_init(&sce->cv, "ugensce");
241 		}
242 	}
243 
244 	/* First set configuration index 0, the default one for ugen. */
245 	err = usbd_set_config_index(udev, 0, 0);
246 	if (err) {
247 		aprint_error_dev(self,
248 		    "setting configuration index 0 failed\n");
249 		sc->sc_dying = 1;
250 		return;
251 	}
252 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
253 
254 	/* Set up all the local state for this configuration. */
255 	err = ugen_set_config(sc, conf);
256 	if (err) {
257 		aprint_error_dev(self, "setting configuration %d failed\n",
258 		    conf);
259 		sc->sc_dying = 1;
260 		return;
261 	}
262 
263 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
264 			   sc->sc_dev);
265 
266 	if (!pmf_device_register(self, NULL, NULL))
267 		aprint_error_dev(self, "couldn't establish power handler\n");
268 
269 	return;
270 }
271 
272 Static int
273 ugen_set_config(struct ugen_softc *sc, int configno)
274 {
275 	usbd_device_handle dev = sc->sc_udev;
276 	usb_config_descriptor_t *cdesc;
277 	usbd_interface_handle iface;
278 	usb_endpoint_descriptor_t *ed;
279 	struct ugen_endpoint *sce;
280 	u_int8_t niface, nendpt;
281 	int ifaceno, endptno, endpt;
282 	usbd_status err;
283 	int dir, i;
284 
285 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
286 		    device_xname(sc->sc_dev), configno, sc));
287 
288 	/*
289 	 * We start at 1, not 0, because we don't care whether the
290 	 * control endpoint is open or not. It is always present.
291 	 */
292 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
293 		if (sc->sc_is_open[endptno]) {
294 			DPRINTFN(1,
295 			     ("ugen_set_config: %s - endpoint %d is open\n",
296 			      device_xname(sc->sc_dev), endptno));
297 			return (USBD_IN_USE);
298 		}
299 
300 	/* Avoid setting the current value. */
301 	cdesc = usbd_get_config_descriptor(dev);
302 	if (!cdesc || cdesc->bConfigurationValue != configno) {
303 		err = usbd_set_config_no(dev, configno, 1);
304 		if (err)
305 			return (err);
306 	}
307 
308 	err = usbd_interface_count(dev, &niface);
309 	if (err)
310 		return (err);
311 
312 	/* Clear out the old info, but leave the selinfo and cv initialised. */
313 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
314 		for (dir = OUT; dir <= IN; dir++) {
315 			sce = &sc->sc_endpoints[i][dir];
316 			memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
317 		}
318 	}
319 
320 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
321 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
322 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
323 		if (err)
324 			return (err);
325 		err = usbd_endpoint_count(iface, &nendpt);
326 		if (err)
327 			return (err);
328 		for (endptno = 0; endptno < nendpt; endptno++) {
329 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
330 			KASSERT(ed != NULL);
331 			endpt = ed->bEndpointAddress;
332 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
333 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
334 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
335 				    "(%d,%d), sce=%p\n",
336 				    endptno, endpt, UE_GET_ADDR(endpt),
337 				    UE_GET_DIR(endpt), sce));
338 			sce->sc = sc;
339 			sce->edesc = ed;
340 			sce->iface = iface;
341 		}
342 	}
343 	return (USBD_NORMAL_COMPLETION);
344 }
345 
346 int
347 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
348 {
349 	struct ugen_softc *sc;
350 	int unit = UGENUNIT(dev);
351 	int endpt = UGENENDPOINT(dev);
352 	usb_endpoint_descriptor_t *edesc;
353 	struct ugen_endpoint *sce;
354 	int dir, isize;
355 	usbd_status err;
356 	usbd_xfer_handle xfer;
357 	void *tbuf;
358 	int i, j;
359 
360 	sc = device_lookup_private(&ugen_cd, unit);
361 	if (sc == NULL)
362 		return ENXIO;
363 
364 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
365 		     flag, mode, unit, endpt));
366 
367 	if (sc == NULL || sc->sc_dying)
368 		return (ENXIO);
369 
370 	/* The control endpoint allows multiple opens. */
371 	if (endpt == USB_CONTROL_ENDPOINT) {
372 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
373 		return (0);
374 	}
375 
376 	if (sc->sc_is_open[endpt])
377 		return (EBUSY);
378 
379 	/* Make sure there are pipes for all directions. */
380 	for (dir = OUT; dir <= IN; dir++) {
381 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
382 			sce = &sc->sc_endpoints[endpt][dir];
383 			if (sce == 0 || sce->edesc == 0)
384 				return (ENXIO);
385 		}
386 	}
387 
388 	/* Actually open the pipes. */
389 	/* XXX Should back out properly if it fails. */
390 	for (dir = OUT; dir <= IN; dir++) {
391 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
392 			continue;
393 		sce = &sc->sc_endpoints[endpt][dir];
394 		sce->state = 0;
395 		sce->timeout = USBD_NO_TIMEOUT;
396 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
397 			     sc, endpt, dir, sce));
398 		edesc = sce->edesc;
399 		switch (edesc->bmAttributes & UE_XFERTYPE) {
400 		case UE_INTERRUPT:
401 			if (dir == OUT) {
402 				err = usbd_open_pipe(sce->iface,
403 				    edesc->bEndpointAddress, 0, &sce->pipeh);
404 				if (err)
405 					return (EIO);
406 				break;
407 			}
408 			isize = UGETW(edesc->wMaxPacketSize);
409 			if (isize == 0)	/* shouldn't happen */
410 				return (EINVAL);
411 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
412 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
413 				     endpt, isize));
414 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
415 				free(sce->ibuf, M_USBDEV);
416 				sce->ibuf = NULL;
417 				return (ENOMEM);
418 			}
419 			err = usbd_open_pipe_intr(sce->iface,
420 				  edesc->bEndpointAddress,
421 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
422 				  sce->ibuf, isize, ugenintr,
423 				  USBD_DEFAULT_INTERVAL);
424 			if (err) {
425 				clfree(&sce->q);
426 				free(sce->ibuf, M_USBDEV);
427 				sce->ibuf = NULL;
428 				return (EIO);
429 			}
430 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
431 			break;
432 		case UE_BULK:
433 			err = usbd_open_pipe(sce->iface,
434 				  edesc->bEndpointAddress, 0, &sce->pipeh);
435 			if (err)
436 				return (EIO);
437 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
438 			/*
439 			 * Use request size for non-RA/WB transfers
440 			 * as the default.
441 			 */
442 			sce->ra_wb_reqsize = UGEN_BBSIZE;
443 			break;
444 		case UE_ISOCHRONOUS:
445 			if (dir == OUT)
446 				return (EINVAL);
447 			isize = UGETW(edesc->wMaxPacketSize);
448 			if (isize == 0)	/* shouldn't happen */
449 				return (EINVAL);
450 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
451 				M_USBDEV, M_WAITOK);
452 			sce->cur = sce->fill = sce->ibuf;
453 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
454 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
455 				     endpt, isize));
456 			err = usbd_open_pipe(sce->iface,
457 				  edesc->bEndpointAddress, 0, &sce->pipeh);
458 			if (err) {
459 				free(sce->ibuf, M_USBDEV);
460 				sce->ibuf = NULL;
461 				return (EIO);
462 			}
463 			for(i = 0; i < UGEN_NISOREQS; ++i) {
464 				sce->isoreqs[i].sce = sce;
465 				xfer = usbd_alloc_xfer(sc->sc_udev);
466 				if (xfer == 0)
467 					goto bad;
468 				sce->isoreqs[i].xfer = xfer;
469 				tbuf = usbd_alloc_buffer
470 					(xfer, isize * UGEN_NISORFRMS);
471 				if (tbuf == 0) {
472 					i++;
473 					goto bad;
474 				}
475 				sce->isoreqs[i].dmabuf = tbuf;
476 				for(j = 0; j < UGEN_NISORFRMS; ++j)
477 					sce->isoreqs[i].sizes[j] = isize;
478 				usbd_setup_isoc_xfer
479 					(xfer, sce->pipeh, &sce->isoreqs[i],
480 					 sce->isoreqs[i].sizes,
481 					 UGEN_NISORFRMS, USBD_NO_COPY,
482 					 ugen_isoc_rintr);
483 				(void)usbd_transfer(xfer);
484 			}
485 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
486 			break;
487 		bad:
488 			while (--i >= 0) /* implicit buffer free */
489 				usbd_free_xfer(sce->isoreqs[i].xfer);
490 			usbd_close_pipe(sce->pipeh);
491 			sce->pipeh = NULL;
492 			free(sce->ibuf, M_USBDEV);
493 			sce->ibuf = NULL;
494 			return (ENOMEM);
495 		case UE_CONTROL:
496 			sce->timeout = USBD_DEFAULT_TIMEOUT;
497 			return (EINVAL);
498 		}
499 	}
500 	sc->sc_is_open[endpt] = 1;
501 	return (0);
502 }
503 
504 int
505 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
506 {
507 	int endpt = UGENENDPOINT(dev);
508 	struct ugen_softc *sc;
509 	struct ugen_endpoint *sce;
510 	int dir;
511 	int i;
512 
513 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
514 	if (sc == NULL)
515 		return ENXIO;
516 
517 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
518 		     flag, mode, UGENUNIT(dev), endpt));
519 
520 #ifdef DIAGNOSTIC
521 	if (!sc->sc_is_open[endpt]) {
522 		printf("ugenclose: not open\n");
523 		return (EINVAL);
524 	}
525 #endif
526 
527 	if (endpt == USB_CONTROL_ENDPOINT) {
528 		DPRINTFN(5, ("ugenclose: close control\n"));
529 		sc->sc_is_open[endpt] = 0;
530 		return (0);
531 	}
532 
533 	for (dir = OUT; dir <= IN; dir++) {
534 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
535 			continue;
536 		sce = &sc->sc_endpoints[endpt][dir];
537 		if (sce == NULL || sce->pipeh == NULL)
538 			continue;
539 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
540 			     endpt, dir, sce));
541 
542 		usbd_abort_pipe(sce->pipeh);
543 		usbd_close_pipe(sce->pipeh);
544 		sce->pipeh = NULL;
545 
546 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
547 		case UE_INTERRUPT:
548 			ndflush(&sce->q, sce->q.c_cc);
549 			clfree(&sce->q);
550 			break;
551 		case UE_ISOCHRONOUS:
552 			for (i = 0; i < UGEN_NISOREQS; ++i)
553 				usbd_free_xfer(sce->isoreqs[i].xfer);
554 			break;
555 		case UE_BULK:
556 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
557 				/* ibuf freed below */
558 				usbd_free_xfer(sce->ra_wb_xfer);
559 			break;
560 		default:
561 			break;
562 		}
563 
564 		if (sce->ibuf != NULL) {
565 			free(sce->ibuf, M_USBDEV);
566 			sce->ibuf = NULL;
567 		}
568 	}
569 	sc->sc_is_open[endpt] = 0;
570 
571 	return (0);
572 }
573 
574 Static int
575 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
576 {
577 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
578 	u_int32_t n, tn;
579 	usbd_xfer_handle xfer;
580 	usbd_status err;
581 	int error = 0;
582 
583 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
584 
585 	if (sc->sc_dying)
586 		return (EIO);
587 
588 	if (endpt == USB_CONTROL_ENDPOINT)
589 		return (ENODEV);
590 
591 #ifdef DIAGNOSTIC
592 	if (sce->edesc == NULL) {
593 		printf("ugenread: no edesc\n");
594 		return (EIO);
595 	}
596 	if (sce->pipeh == NULL) {
597 		printf("ugenread: no pipe\n");
598 		return (EIO);
599 	}
600 #endif
601 
602 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
603 	case UE_INTERRUPT:
604 		/* Block until activity occurred. */
605 		mutex_enter(&sc->sc_lock);
606 		while (sce->q.c_cc == 0) {
607 			if (flag & IO_NDELAY) {
608 				mutex_exit(&sc->sc_lock);
609 				return (EWOULDBLOCK);
610 			}
611 			sce->state |= UGEN_ASLP;
612 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
613 			/* "ugenri" */
614 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
615 			    mstohz(sce->timeout));
616 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
617 			if (sc->sc_dying)
618 				error = EIO;
619 			if (error) {
620 				sce->state &= ~UGEN_ASLP;
621 				break;
622 			}
623 		}
624 		mutex_exit(&sc->sc_lock);
625 
626 		/* Transfer as many chunks as possible. */
627 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
628 			n = min(sce->q.c_cc, uio->uio_resid);
629 			if (n > sizeof(sc->sc_buffer))
630 				n = sizeof(sc->sc_buffer);
631 
632 			/* Remove a small chunk from the input queue. */
633 			q_to_b(&sce->q, sc->sc_buffer, n);
634 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
635 
636 			/* Copy the data to the user process. */
637 			error = uiomove(sc->sc_buffer, n, uio);
638 			if (error)
639 				break;
640 		}
641 		break;
642 	case UE_BULK:
643 		if (sce->state & UGEN_BULK_RA) {
644 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
645 				     uio->uio_resid, sce->ra_wb_used));
646 			xfer = sce->ra_wb_xfer;
647 
648 			mutex_enter(&sc->sc_lock);
649 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
650 				mutex_exit(&sc->sc_lock);
651 				return (EWOULDBLOCK);
652 			}
653 			while (uio->uio_resid > 0 && !error) {
654 				while (sce->ra_wb_used == 0) {
655 					sce->state |= UGEN_ASLP;
656 					DPRINTFN(5,
657 						 ("ugenread: sleep on %p\n",
658 						  sce));
659 					/* "ugenrb" */
660 					error = cv_timedwait_sig(&sce->cv,
661 					    &sc->sc_lock, mstohz(sce->timeout));
662 					DPRINTFN(5,
663 						 ("ugenread: woke, error=%d\n",
664 						  error));
665 					if (sc->sc_dying)
666 						error = EIO;
667 					if (error) {
668 						sce->state &= ~UGEN_ASLP;
669 						break;
670 					}
671 				}
672 
673 				/* Copy data to the process. */
674 				while (uio->uio_resid > 0
675 				       && sce->ra_wb_used > 0) {
676 					n = min(uio->uio_resid,
677 						sce->ra_wb_used);
678 					n = min(n, sce->limit - sce->cur);
679 					error = uiomove(sce->cur, n, uio);
680 					if (error)
681 						break;
682 					sce->cur += n;
683 					sce->ra_wb_used -= n;
684 					if (sce->cur == sce->limit)
685 						sce->cur = sce->ibuf;
686 				}
687 
688 				/*
689 				 * If the transfers stopped because the
690 				 * buffer was full, restart them.
691 				 */
692 				if (sce->state & UGEN_RA_WB_STOP &&
693 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
694 					n = (sce->limit - sce->ibuf)
695 					    - sce->ra_wb_used;
696 					usbd_setup_xfer(xfer,
697 					    sce->pipeh, sce, NULL,
698 					    min(n, sce->ra_wb_xferlen),
699 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
700 					    ugen_bulkra_intr);
701 					sce->state &= ~UGEN_RA_WB_STOP;
702 					err = usbd_transfer(xfer);
703 					if (err != USBD_IN_PROGRESS)
704 						/*
705 						 * The transfer has not been
706 						 * queued.  Setting STOP
707 						 * will make us try
708 						 * again at the next read.
709 						 */
710 						sce->state |= UGEN_RA_WB_STOP;
711 				}
712 			}
713 			mutex_exit(&sc->sc_lock);
714 			break;
715 		}
716 		xfer = usbd_alloc_xfer(sc->sc_udev);
717 		if (xfer == 0)
718 			return (ENOMEM);
719 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
720 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
721 			tn = n;
722 			err = usbd_bulk_transfer(
723 				  xfer, sce->pipeh,
724 				  sce->state & UGEN_SHORT_OK ?
725 				      USBD_SHORT_XFER_OK : 0,
726 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
727 			if (err) {
728 				if (err == USBD_INTERRUPTED)
729 					error = EINTR;
730 				else if (err == USBD_TIMEOUT)
731 					error = ETIMEDOUT;
732 				else
733 					error = EIO;
734 				break;
735 			}
736 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
737 			error = uiomove(sc->sc_buffer, tn, uio);
738 			if (error || tn < n)
739 				break;
740 		}
741 		usbd_free_xfer(xfer);
742 		break;
743 	case UE_ISOCHRONOUS:
744 		mutex_enter(&sc->sc_lock);
745 		while (sce->cur == sce->fill) {
746 			if (flag & IO_NDELAY) {
747 				mutex_exit(&sc->sc_lock);
748 				return (EWOULDBLOCK);
749 			}
750 			sce->state |= UGEN_ASLP;
751 			/* "ugenri" */
752 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
753 			error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
754 			    mstohz(sce->timeout));
755 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
756 			if (sc->sc_dying)
757 				error = EIO;
758 			if (error) {
759 				sce->state &= ~UGEN_ASLP;
760 				break;
761 			}
762 		}
763 
764 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
765 			if(sce->fill > sce->cur)
766 				n = min(sce->fill - sce->cur, uio->uio_resid);
767 			else
768 				n = min(sce->limit - sce->cur, uio->uio_resid);
769 
770 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
771 
772 			/* Copy the data to the user process. */
773 			error = uiomove(sce->cur, n, uio);
774 			if (error)
775 				break;
776 			sce->cur += n;
777 			if (sce->cur >= sce->limit)
778 				sce->cur = sce->ibuf;
779 		}
780 		mutex_exit(&sc->sc_lock);
781 		break;
782 
783 
784 	default:
785 		return (ENXIO);
786 	}
787 	return (error);
788 }
789 
790 int
791 ugenread(dev_t dev, struct uio *uio, int flag)
792 {
793 	int endpt = UGENENDPOINT(dev);
794 	struct ugen_softc *sc;
795 	int error;
796 
797 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
798 	if (sc == NULL)
799 		return ENXIO;
800 
801 	mutex_enter(&sc->sc_lock);
802 	sc->sc_refcnt++;
803 	mutex_exit(&sc->sc_lock);
804 
805 	error = ugen_do_read(sc, endpt, uio, flag);
806 
807 	mutex_enter(&sc->sc_lock);
808 	if (--sc->sc_refcnt < 0)
809 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
810 	mutex_exit(&sc->sc_lock);
811 
812 	return (error);
813 }
814 
815 Static int
816 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
817 	int flag)
818 {
819 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
820 	u_int32_t n;
821 	int error = 0;
822 	u_int32_t tn;
823 	char *dbuf;
824 	usbd_xfer_handle xfer;
825 	usbd_status err;
826 
827 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
828 
829 	if (sc->sc_dying)
830 		return (EIO);
831 
832 	if (endpt == USB_CONTROL_ENDPOINT)
833 		return (ENODEV);
834 
835 #ifdef DIAGNOSTIC
836 	if (sce->edesc == NULL) {
837 		printf("ugenwrite: no edesc\n");
838 		return (EIO);
839 	}
840 	if (sce->pipeh == NULL) {
841 		printf("ugenwrite: no pipe\n");
842 		return (EIO);
843 	}
844 #endif
845 
846 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
847 	case UE_BULK:
848 		if (sce->state & UGEN_BULK_WB) {
849 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
850 				     uio->uio_resid, sce->ra_wb_used));
851 			xfer = sce->ra_wb_xfer;
852 
853 			mutex_enter(&sc->sc_lock);
854 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
855 			    flag & IO_NDELAY) {
856 				mutex_exit(&sc->sc_lock);
857 				return (EWOULDBLOCK);
858 			}
859 			while (uio->uio_resid > 0 && !error) {
860 				while (sce->ra_wb_used ==
861 				       sce->limit - sce->ibuf) {
862 					sce->state |= UGEN_ASLP;
863 					DPRINTFN(5,
864 						 ("ugenwrite: sleep on %p\n",
865 						  sce));
866 					/* "ugenwb" */
867 					error = cv_timedwait_sig(&sce->cv,
868 					    &sc->sc_lock, mstohz(sce->timeout));
869 					DPRINTFN(5,
870 						 ("ugenwrite: woke, error=%d\n",
871 						  error));
872 					if (sc->sc_dying)
873 						error = EIO;
874 					if (error) {
875 						sce->state &= ~UGEN_ASLP;
876 						break;
877 					}
878 				}
879 
880 				/* Copy data from the process. */
881 				while (uio->uio_resid > 0 &&
882 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
883 					n = min(uio->uio_resid,
884 						(sce->limit - sce->ibuf)
885 						 - sce->ra_wb_used);
886 					n = min(n, sce->limit - sce->fill);
887 					error = uiomove(sce->fill, n, uio);
888 					if (error)
889 						break;
890 					sce->fill += n;
891 					sce->ra_wb_used += n;
892 					if (sce->fill == sce->limit)
893 						sce->fill = sce->ibuf;
894 				}
895 
896 				/*
897 				 * If the transfers stopped because the
898 				 * buffer was empty, restart them.
899 				 */
900 				if (sce->state & UGEN_RA_WB_STOP &&
901 				    sce->ra_wb_used > 0) {
902 					dbuf = (char *)usbd_get_buffer(xfer);
903 					n = min(sce->ra_wb_used,
904 						sce->ra_wb_xferlen);
905 					tn = min(n, sce->limit - sce->cur);
906 					memcpy(dbuf, sce->cur, tn);
907 					dbuf += tn;
908 					if (n - tn > 0)
909 						memcpy(dbuf, sce->ibuf,
910 						       n - tn);
911 					usbd_setup_xfer(xfer,
912 					    sce->pipeh, sce, NULL, n,
913 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
914 					    ugen_bulkwb_intr);
915 					sce->state &= ~UGEN_RA_WB_STOP;
916 					err = usbd_transfer(xfer);
917 					if (err != USBD_IN_PROGRESS)
918 						/*
919 						 * The transfer has not been
920 						 * queued.  Setting STOP
921 						 * will make us try again
922 						 * at the next read.
923 						 */
924 						sce->state |= UGEN_RA_WB_STOP;
925 				}
926 			}
927 			mutex_exit(&sc->sc_lock);
928 			break;
929 		}
930 		xfer = usbd_alloc_xfer(sc->sc_udev);
931 		if (xfer == 0)
932 			return (EIO);
933 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
934 			error = uiomove(sc->sc_buffer, n, uio);
935 			if (error)
936 				break;
937 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
938 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
939 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
940 			if (err) {
941 				if (err == USBD_INTERRUPTED)
942 					error = EINTR;
943 				else if (err == USBD_TIMEOUT)
944 					error = ETIMEDOUT;
945 				else
946 					error = EIO;
947 				break;
948 			}
949 		}
950 		usbd_free_xfer(xfer);
951 		break;
952 	case UE_INTERRUPT:
953 		xfer = usbd_alloc_xfer(sc->sc_udev);
954 		if (xfer == 0)
955 			return (EIO);
956 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
957 		    uio->uio_resid)) != 0) {
958 			error = uiomove(sc->sc_buffer, n, uio);
959 			if (error)
960 				break;
961 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
962 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
963 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
964 			if (err) {
965 				if (err == USBD_INTERRUPTED)
966 					error = EINTR;
967 				else if (err == USBD_TIMEOUT)
968 					error = ETIMEDOUT;
969 				else
970 					error = EIO;
971 				break;
972 			}
973 		}
974 		usbd_free_xfer(xfer);
975 		break;
976 	default:
977 		return (ENXIO);
978 	}
979 	return (error);
980 }
981 
982 int
983 ugenwrite(dev_t dev, struct uio *uio, int flag)
984 {
985 	int endpt = UGENENDPOINT(dev);
986 	struct ugen_softc *sc;
987 	int error;
988 
989 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
990 	if (sc == NULL)
991 		return ENXIO;
992 
993 	mutex_enter(&sc->sc_lock);
994 	sc->sc_refcnt++;
995 	mutex_exit(&sc->sc_lock);
996 
997 	error = ugen_do_write(sc, endpt, uio, flag);
998 
999 	mutex_enter(&sc->sc_lock);
1000 	if (--sc->sc_refcnt < 0)
1001 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1002 	mutex_exit(&sc->sc_lock);
1003 
1004 	return (error);
1005 }
1006 
1007 int
1008 ugen_activate(device_t self, enum devact act)
1009 {
1010 	struct ugen_softc *sc = device_private(self);
1011 
1012 	switch (act) {
1013 	case DVACT_DEACTIVATE:
1014 		sc->sc_dying = 1;
1015 		return 0;
1016 	default:
1017 		return EOPNOTSUPP;
1018 	}
1019 }
1020 
1021 int
1022 ugen_detach(device_t self, int flags)
1023 {
1024 	struct ugen_softc *sc = device_private(self);
1025 	struct ugen_endpoint *sce;
1026 	int i, dir;
1027 	int maj, mn;
1028 
1029 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1030 
1031 	sc->sc_dying = 1;
1032 	pmf_device_deregister(self);
1033 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1034 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1035 		for (dir = OUT; dir <= IN; dir++) {
1036 			sce = &sc->sc_endpoints[i][dir];
1037 			if (sce && sce->pipeh)
1038 				usbd_abort_pipe(sce->pipeh);
1039 		}
1040 	}
1041 
1042 	mutex_enter(&sc->sc_lock);
1043 	if (--sc->sc_refcnt >= 0) {
1044 		/* Wake everyone */
1045 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1046 			cv_signal(&sc->sc_endpoints[i][IN].cv);
1047 		/* Wait for processes to go away. */
1048 		usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1049 	}
1050 	mutex_exit(&sc->sc_lock);
1051 
1052 	/* locate the major number */
1053 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1054 
1055 	/* Nuke the vnodes for any open instances (calls close). */
1056 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1057 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1058 
1059 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1060 			   sc->sc_dev);
1061 
1062 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1063 		for (dir = OUT; dir <= IN; dir++) {
1064 			sce = &sc->sc_endpoints[i][dir];
1065 			seldestroy(&sce->rsel);
1066 			cv_destroy(&sce->cv);
1067 		}
1068 	}
1069 
1070 	cv_destroy(&sc->sc_detach_cv);
1071 	mutex_destroy(&sc->sc_lock);
1072 
1073 	return (0);
1074 }
1075 
1076 Static void
1077 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1078 {
1079 	struct ugen_endpoint *sce = addr;
1080 	struct ugen_softc *sc = sce->sc;
1081 	u_int32_t count;
1082 	u_char *ibuf;
1083 
1084 	if (status == USBD_CANCELLED)
1085 		return;
1086 
1087 	if (status != USBD_NORMAL_COMPLETION) {
1088 		DPRINTF(("ugenintr: status=%d\n", status));
1089 		if (status == USBD_STALLED)
1090 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1091 		return;
1092 	}
1093 
1094 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1095 	ibuf = sce->ibuf;
1096 
1097 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1098 		     xfer, status, count));
1099 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1100 		     ibuf[0], ibuf[1], ibuf[2]));
1101 
1102 	(void)b_to_q(ibuf, count, &sce->q);
1103 
1104 	mutex_enter(&sc->sc_lock);
1105 	if (sce->state & UGEN_ASLP) {
1106 		sce->state &= ~UGEN_ASLP;
1107 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1108 		cv_signal(&sce->cv);
1109 	}
1110 	mutex_exit(&sc->sc_lock);
1111 	selnotify(&sce->rsel, 0, 0);
1112 }
1113 
1114 Static void
1115 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1116 		usbd_status status)
1117 {
1118 	struct isoreq *req = addr;
1119 	struct ugen_endpoint *sce = req->sce;
1120 	struct ugen_softc *sc = sce->sc;
1121 	u_int32_t count, n;
1122 	int i, isize;
1123 
1124 	/* Return if we are aborting. */
1125 	if (status == USBD_CANCELLED)
1126 		return;
1127 
1128 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1129 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1130 	    (long)(req - sce->isoreqs), count));
1131 
1132 	/* throw away oldest input if the buffer is full */
1133 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1134 		sce->cur += count;
1135 		if(sce->cur >= sce->limit)
1136 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1137 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1138 			     count));
1139 	}
1140 
1141 	isize = UGETW(sce->edesc->wMaxPacketSize);
1142 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1143 		u_int32_t actlen = req->sizes[i];
1144 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1145 
1146 		/* copy data to buffer */
1147 		while (actlen > 0) {
1148 			n = min(actlen, sce->limit - sce->fill);
1149 			memcpy(sce->fill, tbuf, n);
1150 
1151 			tbuf += n;
1152 			actlen -= n;
1153 			sce->fill += n;
1154 			if(sce->fill == sce->limit)
1155 				sce->fill = sce->ibuf;
1156 		}
1157 
1158 		/* setup size for next transfer */
1159 		req->sizes[i] = isize;
1160 	}
1161 
1162 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1163 			     USBD_NO_COPY, ugen_isoc_rintr);
1164 	(void)usbd_transfer(xfer);
1165 
1166 	mutex_enter(&sc->sc_lock);
1167 	if (sce->state & UGEN_ASLP) {
1168 		sce->state &= ~UGEN_ASLP;
1169 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1170 		cv_signal(&sce->cv);
1171 	}
1172 	mutex_exit(&sc->sc_lock);
1173 	selnotify(&sce->rsel, 0, 0);
1174 }
1175 
1176 Static void
1177 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1178 		 usbd_status status)
1179 {
1180 	struct ugen_endpoint *sce = addr;
1181 	struct ugen_softc *sc = sce->sc;
1182 	u_int32_t count, n;
1183 	char const *tbuf;
1184 	usbd_status err;
1185 
1186 	/* Return if we are aborting. */
1187 	if (status == USBD_CANCELLED)
1188 		return;
1189 
1190 	if (status != USBD_NORMAL_COMPLETION) {
1191 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1192 		sce->state |= UGEN_RA_WB_STOP;
1193 		if (status == USBD_STALLED)
1194 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1195 		return;
1196 	}
1197 
1198 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1199 
1200 	/* Keep track of how much is in the buffer. */
1201 	sce->ra_wb_used += count;
1202 
1203 	/* Copy data to buffer. */
1204 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1205 	n = min(count, sce->limit - sce->fill);
1206 	memcpy(sce->fill, tbuf, n);
1207 	tbuf += n;
1208 	count -= n;
1209 	sce->fill += n;
1210 	if (sce->fill == sce->limit)
1211 		sce->fill = sce->ibuf;
1212 	if (count > 0) {
1213 		memcpy(sce->fill, tbuf, count);
1214 		sce->fill += count;
1215 	}
1216 
1217 	/* Set up the next request if necessary. */
1218 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1219 	if (n > 0) {
1220 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1221 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1222 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1223 		err = usbd_transfer(xfer);
1224 		if (err != USBD_IN_PROGRESS) {
1225 			printf("usbd_bulkra_intr: error=%d\n", err);
1226 			/*
1227 			 * The transfer has not been queued.  Setting STOP
1228 			 * will make us try again at the next read.
1229 			 */
1230 			sce->state |= UGEN_RA_WB_STOP;
1231 		}
1232 	}
1233 	else
1234 		sce->state |= UGEN_RA_WB_STOP;
1235 
1236 	mutex_enter(&sc->sc_lock);
1237 	if (sce->state & UGEN_ASLP) {
1238 		sce->state &= ~UGEN_ASLP;
1239 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1240 		cv_signal(&sce->cv);
1241 	}
1242 	mutex_exit(&sc->sc_lock);
1243 	selnotify(&sce->rsel, 0, 0);
1244 }
1245 
1246 Static void
1247 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1248 		 usbd_status status)
1249 {
1250 	struct ugen_endpoint *sce = addr;
1251 	struct ugen_softc *sc = sce->sc;
1252 	u_int32_t count, n;
1253 	char *tbuf;
1254 	usbd_status err;
1255 
1256 	/* Return if we are aborting. */
1257 	if (status == USBD_CANCELLED)
1258 		return;
1259 
1260 	if (status != USBD_NORMAL_COMPLETION) {
1261 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1262 		sce->state |= UGEN_RA_WB_STOP;
1263 		if (status == USBD_STALLED)
1264 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1265 		return;
1266 	}
1267 
1268 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1269 
1270 	/* Keep track of how much is in the buffer. */
1271 	sce->ra_wb_used -= count;
1272 
1273 	/* Update buffer pointers. */
1274 	sce->cur += count;
1275 	if (sce->cur >= sce->limit)
1276 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1277 
1278 	/* Set up next request if necessary. */
1279 	if (sce->ra_wb_used > 0) {
1280 		/* copy data from buffer */
1281 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1282 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1283 		n = min(count, sce->limit - sce->cur);
1284 		memcpy(tbuf, sce->cur, n);
1285 		tbuf += n;
1286 		if (count - n > 0)
1287 			memcpy(tbuf, sce->ibuf, count - n);
1288 
1289 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1290 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1291 		err = usbd_transfer(xfer);
1292 		if (err != USBD_IN_PROGRESS) {
1293 			printf("usbd_bulkwb_intr: error=%d\n", err);
1294 			/*
1295 			 * The transfer has not been queued.  Setting STOP
1296 			 * will make us try again at the next write.
1297 			 */
1298 			sce->state |= UGEN_RA_WB_STOP;
1299 		}
1300 	}
1301 	else
1302 		sce->state |= UGEN_RA_WB_STOP;
1303 
1304 	mutex_enter(&sc->sc_lock);
1305 	if (sce->state & UGEN_ASLP) {
1306 		sce->state &= ~UGEN_ASLP;
1307 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1308 		cv_signal(&sce->cv);
1309 	}
1310 	mutex_exit(&sc->sc_lock);
1311 	selnotify(&sce->rsel, 0, 0);
1312 }
1313 
1314 Static usbd_status
1315 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1316 {
1317 	usbd_interface_handle iface;
1318 	usb_endpoint_descriptor_t *ed;
1319 	usbd_status err;
1320 	struct ugen_endpoint *sce;
1321 	u_int8_t niface, nendpt, endptno, endpt;
1322 	int dir;
1323 
1324 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1325 
1326 	err = usbd_interface_count(sc->sc_udev, &niface);
1327 	if (err)
1328 		return (err);
1329 	if (ifaceidx < 0 || ifaceidx >= niface)
1330 		return (USBD_INVAL);
1331 
1332 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1333 	if (err)
1334 		return (err);
1335 	err = usbd_endpoint_count(iface, &nendpt);
1336 	if (err)
1337 		return (err);
1338 	/* XXX should only do this after setting new altno has succeeded */
1339 	for (endptno = 0; endptno < nendpt; endptno++) {
1340 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1341 		endpt = ed->bEndpointAddress;
1342 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1343 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1344 		sce->sc = 0;
1345 		sce->edesc = 0;
1346 		sce->iface = 0;
1347 	}
1348 
1349 	/* change setting */
1350 	err = usbd_set_interface(iface, altno);
1351 	if (err)
1352 		return (err);
1353 
1354 	err = usbd_endpoint_count(iface, &nendpt);
1355 	if (err)
1356 		return (err);
1357 	for (endptno = 0; endptno < nendpt; endptno++) {
1358 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1359 		KASSERT(ed != NULL);
1360 		endpt = ed->bEndpointAddress;
1361 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1362 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1363 		sce->sc = sc;
1364 		sce->edesc = ed;
1365 		sce->iface = iface;
1366 	}
1367 	return (0);
1368 }
1369 
1370 /* Retrieve a complete descriptor for a certain device and index. */
1371 Static usb_config_descriptor_t *
1372 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1373 {
1374 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1375 	int len;
1376 	usbd_status err;
1377 
1378 	if (index == USB_CURRENT_CONFIG_INDEX) {
1379 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1380 		len = UGETW(tdesc->wTotalLength);
1381 		if (lenp)
1382 			*lenp = len;
1383 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1384 		memcpy(cdesc, tdesc, len);
1385 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1386 	} else {
1387 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1388 		if (err)
1389 			return (0);
1390 		len = UGETW(cdescr.wTotalLength);
1391 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1392 		if (lenp)
1393 			*lenp = len;
1394 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1395 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1396 		if (err) {
1397 			free(cdesc, M_TEMP);
1398 			return (0);
1399 		}
1400 	}
1401 	return (cdesc);
1402 }
1403 
1404 Static int
1405 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1406 {
1407 	usbd_interface_handle iface;
1408 	usbd_status err;
1409 
1410 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1411 	if (err)
1412 		return (-1);
1413 	return (usbd_get_interface_altindex(iface));
1414 }
1415 
1416 Static int
1417 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1418 	      void *addr, int flag, struct lwp *l)
1419 {
1420 	struct ugen_endpoint *sce;
1421 	usbd_status err;
1422 	usbd_interface_handle iface;
1423 	struct usb_config_desc *cd;
1424 	usb_config_descriptor_t *cdesc;
1425 	struct usb_interface_desc *id;
1426 	usb_interface_descriptor_t *idesc;
1427 	struct usb_endpoint_desc *ed;
1428 	usb_endpoint_descriptor_t *edesc;
1429 	struct usb_alt_interface *ai;
1430 	struct usb_string_desc *si;
1431 	u_int8_t conf, alt;
1432 
1433 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1434 	if (sc->sc_dying)
1435 		return (EIO);
1436 
1437 	switch (cmd) {
1438 	case FIONBIO:
1439 		/* All handled in the upper FS layer. */
1440 		return (0);
1441 	case USB_SET_SHORT_XFER:
1442 		if (endpt == USB_CONTROL_ENDPOINT)
1443 			return (EINVAL);
1444 		/* This flag only affects read */
1445 		sce = &sc->sc_endpoints[endpt][IN];
1446 		if (sce == NULL || sce->pipeh == NULL)
1447 			return (EINVAL);
1448 		if (*(int *)addr)
1449 			sce->state |= UGEN_SHORT_OK;
1450 		else
1451 			sce->state &= ~UGEN_SHORT_OK;
1452 		return (0);
1453 	case USB_SET_TIMEOUT:
1454 		sce = &sc->sc_endpoints[endpt][IN];
1455 		if (sce == NULL
1456 		    /* XXX this shouldn't happen, but the distinction between
1457 		       input and output pipes isn't clear enough.
1458 		       || sce->pipeh == NULL */
1459 			)
1460 			return (EINVAL);
1461 		sce->timeout = *(int *)addr;
1462 		return (0);
1463 	case USB_SET_BULK_RA:
1464 		if (endpt == USB_CONTROL_ENDPOINT)
1465 			return (EINVAL);
1466 		sce = &sc->sc_endpoints[endpt][IN];
1467 		if (sce == NULL || sce->pipeh == NULL)
1468 			return (EINVAL);
1469 		edesc = sce->edesc;
1470 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1471 			return (EINVAL);
1472 
1473 		if (*(int *)addr) {
1474 			/* Only turn RA on if it's currently off. */
1475 			if (sce->state & UGEN_BULK_RA)
1476 				return (0);
1477 
1478 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1479 				/* shouldn't happen */
1480 				return (EINVAL);
1481 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1482 			if (sce->ra_wb_xfer == NULL)
1483 				return (ENOMEM);
1484 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1485 			/*
1486 			 * Set up a dmabuf because we reuse the xfer with
1487 			 * the same (max) request length like isoc.
1488 			 */
1489 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1490 					      sce->ra_wb_xferlen) == 0) {
1491 				usbd_free_xfer(sce->ra_wb_xfer);
1492 				return (ENOMEM);
1493 			}
1494 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1495 					   M_USBDEV, M_WAITOK);
1496 			sce->fill = sce->cur = sce->ibuf;
1497 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1498 			sce->ra_wb_used = 0;
1499 			sce->state |= UGEN_BULK_RA;
1500 			sce->state &= ~UGEN_RA_WB_STOP;
1501 			/* Now start reading. */
1502 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1503 			    NULL,
1504 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1505 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1506 			    ugen_bulkra_intr);
1507 			err = usbd_transfer(sce->ra_wb_xfer);
1508 			if (err != USBD_IN_PROGRESS) {
1509 				sce->state &= ~UGEN_BULK_RA;
1510 				free(sce->ibuf, M_USBDEV);
1511 				sce->ibuf = NULL;
1512 				usbd_free_xfer(sce->ra_wb_xfer);
1513 				return (EIO);
1514 			}
1515 		} else {
1516 			/* Only turn RA off if it's currently on. */
1517 			if (!(sce->state & UGEN_BULK_RA))
1518 				return (0);
1519 
1520 			sce->state &= ~UGEN_BULK_RA;
1521 			usbd_abort_pipe(sce->pipeh);
1522 			usbd_free_xfer(sce->ra_wb_xfer);
1523 			/*
1524 			 * XXX Discard whatever's in the buffer, but we
1525 			 * should keep it around and drain the buffer
1526 			 * instead.
1527 			 */
1528 			free(sce->ibuf, M_USBDEV);
1529 			sce->ibuf = NULL;
1530 		}
1531 		return (0);
1532 	case USB_SET_BULK_WB:
1533 		if (endpt == USB_CONTROL_ENDPOINT)
1534 			return (EINVAL);
1535 		sce = &sc->sc_endpoints[endpt][OUT];
1536 		if (sce == NULL || sce->pipeh == NULL)
1537 			return (EINVAL);
1538 		edesc = sce->edesc;
1539 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1540 			return (EINVAL);
1541 
1542 		if (*(int *)addr) {
1543 			/* Only turn WB on if it's currently off. */
1544 			if (sce->state & UGEN_BULK_WB)
1545 				return (0);
1546 
1547 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1548 				/* shouldn't happen */
1549 				return (EINVAL);
1550 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1551 			if (sce->ra_wb_xfer == NULL)
1552 				return (ENOMEM);
1553 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1554 			/*
1555 			 * Set up a dmabuf because we reuse the xfer with
1556 			 * the same (max) request length like isoc.
1557 			 */
1558 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1559 					      sce->ra_wb_xferlen) == 0) {
1560 				usbd_free_xfer(sce->ra_wb_xfer);
1561 				return (ENOMEM);
1562 			}
1563 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1564 					   M_USBDEV, M_WAITOK);
1565 			sce->fill = sce->cur = sce->ibuf;
1566 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1567 			sce->ra_wb_used = 0;
1568 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1569 		} else {
1570 			/* Only turn WB off if it's currently on. */
1571 			if (!(sce->state & UGEN_BULK_WB))
1572 				return (0);
1573 
1574 			sce->state &= ~UGEN_BULK_WB;
1575 			/*
1576 			 * XXX Discard whatever's in the buffer, but we
1577 			 * should keep it around and keep writing to
1578 			 * drain the buffer instead.
1579 			 */
1580 			usbd_abort_pipe(sce->pipeh);
1581 			usbd_free_xfer(sce->ra_wb_xfer);
1582 			free(sce->ibuf, M_USBDEV);
1583 			sce->ibuf = NULL;
1584 		}
1585 		return (0);
1586 	case USB_SET_BULK_RA_OPT:
1587 	case USB_SET_BULK_WB_OPT:
1588 	{
1589 		struct usb_bulk_ra_wb_opt *opt;
1590 
1591 		if (endpt == USB_CONTROL_ENDPOINT)
1592 			return (EINVAL);
1593 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1594 		if (cmd == USB_SET_BULK_RA_OPT)
1595 			sce = &sc->sc_endpoints[endpt][IN];
1596 		else
1597 			sce = &sc->sc_endpoints[endpt][OUT];
1598 		if (sce == NULL || sce->pipeh == NULL)
1599 			return (EINVAL);
1600 		if (opt->ra_wb_buffer_size < 1 ||
1601 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1602 		    opt->ra_wb_request_size < 1 ||
1603 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1604 			return (EINVAL);
1605 		/*
1606 		 * XXX These changes do not take effect until the
1607 		 * next time RA/WB mode is enabled but they ought to
1608 		 * take effect immediately.
1609 		 */
1610 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1611 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1612 		return (0);
1613 	}
1614 	default:
1615 		break;
1616 	}
1617 
1618 	if (endpt != USB_CONTROL_ENDPOINT)
1619 		return (EINVAL);
1620 
1621 	switch (cmd) {
1622 #ifdef UGEN_DEBUG
1623 	case USB_SETDEBUG:
1624 		ugendebug = *(int *)addr;
1625 		break;
1626 #endif
1627 	case USB_GET_CONFIG:
1628 		err = usbd_get_config(sc->sc_udev, &conf);
1629 		if (err)
1630 			return (EIO);
1631 		*(int *)addr = conf;
1632 		break;
1633 	case USB_SET_CONFIG:
1634 		if (!(flag & FWRITE))
1635 			return (EPERM);
1636 		err = ugen_set_config(sc, *(int *)addr);
1637 		switch (err) {
1638 		case USBD_NORMAL_COMPLETION:
1639 			break;
1640 		case USBD_IN_USE:
1641 			return (EBUSY);
1642 		default:
1643 			return (EIO);
1644 		}
1645 		break;
1646 	case USB_GET_ALTINTERFACE:
1647 		ai = (struct usb_alt_interface *)addr;
1648 		err = usbd_device2interface_handle(sc->sc_udev,
1649 			  ai->uai_interface_index, &iface);
1650 		if (err)
1651 			return (EINVAL);
1652 		idesc = usbd_get_interface_descriptor(iface);
1653 		if (idesc == NULL)
1654 			return (EIO);
1655 		ai->uai_alt_no = idesc->bAlternateSetting;
1656 		break;
1657 	case USB_SET_ALTINTERFACE:
1658 		if (!(flag & FWRITE))
1659 			return (EPERM);
1660 		ai = (struct usb_alt_interface *)addr;
1661 		err = usbd_device2interface_handle(sc->sc_udev,
1662 			  ai->uai_interface_index, &iface);
1663 		if (err)
1664 			return (EINVAL);
1665 		err = ugen_set_interface(sc, ai->uai_interface_index,
1666 		    ai->uai_alt_no);
1667 		if (err)
1668 			return (EINVAL);
1669 		break;
1670 	case USB_GET_NO_ALT:
1671 		ai = (struct usb_alt_interface *)addr;
1672 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1673 		if (cdesc == NULL)
1674 			return (EINVAL);
1675 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1676 		if (idesc == NULL) {
1677 			free(cdesc, M_TEMP);
1678 			return (EINVAL);
1679 		}
1680 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1681 		    idesc->bInterfaceNumber);
1682 		free(cdesc, M_TEMP);
1683 		break;
1684 	case USB_GET_DEVICE_DESC:
1685 		*(usb_device_descriptor_t *)addr =
1686 			*usbd_get_device_descriptor(sc->sc_udev);
1687 		break;
1688 	case USB_GET_CONFIG_DESC:
1689 		cd = (struct usb_config_desc *)addr;
1690 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1691 		if (cdesc == NULL)
1692 			return (EINVAL);
1693 		cd->ucd_desc = *cdesc;
1694 		free(cdesc, M_TEMP);
1695 		break;
1696 	case USB_GET_INTERFACE_DESC:
1697 		id = (struct usb_interface_desc *)addr;
1698 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1699 		if (cdesc == NULL)
1700 			return (EINVAL);
1701 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1702 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1703 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1704 		else
1705 			alt = id->uid_alt_index;
1706 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1707 		if (idesc == NULL) {
1708 			free(cdesc, M_TEMP);
1709 			return (EINVAL);
1710 		}
1711 		id->uid_desc = *idesc;
1712 		free(cdesc, M_TEMP);
1713 		break;
1714 	case USB_GET_ENDPOINT_DESC:
1715 		ed = (struct usb_endpoint_desc *)addr;
1716 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1717 		if (cdesc == NULL)
1718 			return (EINVAL);
1719 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1720 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1721 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1722 		else
1723 			alt = ed->ued_alt_index;
1724 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1725 					alt, ed->ued_endpoint_index);
1726 		if (edesc == NULL) {
1727 			free(cdesc, M_TEMP);
1728 			return (EINVAL);
1729 		}
1730 		ed->ued_desc = *edesc;
1731 		free(cdesc, M_TEMP);
1732 		break;
1733 	case USB_GET_FULL_DESC:
1734 	{
1735 		int len;
1736 		struct iovec iov;
1737 		struct uio uio;
1738 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1739 		int error;
1740 
1741 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1742 		if (cdesc == NULL)
1743 			return (EINVAL);
1744 		if (len > fd->ufd_size)
1745 			len = fd->ufd_size;
1746 		iov.iov_base = (void *)fd->ufd_data;
1747 		iov.iov_len = len;
1748 		uio.uio_iov = &iov;
1749 		uio.uio_iovcnt = 1;
1750 		uio.uio_resid = len;
1751 		uio.uio_offset = 0;
1752 		uio.uio_rw = UIO_READ;
1753 		uio.uio_vmspace = l->l_proc->p_vmspace;
1754 		error = uiomove((void *)cdesc, len, &uio);
1755 		free(cdesc, M_TEMP);
1756 		return (error);
1757 	}
1758 	case USB_GET_STRING_DESC: {
1759 		int len;
1760 		si = (struct usb_string_desc *)addr;
1761 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1762 			  si->usd_language_id, &si->usd_desc, &len);
1763 		if (err)
1764 			return (EINVAL);
1765 		break;
1766 	}
1767 	case USB_DO_REQUEST:
1768 	{
1769 		struct usb_ctl_request *ur = (void *)addr;
1770 		int len = UGETW(ur->ucr_request.wLength);
1771 		struct iovec iov;
1772 		struct uio uio;
1773 		void *ptr = 0;
1774 		usbd_status xerr;
1775 		int error = 0;
1776 
1777 		if (!(flag & FWRITE))
1778 			return (EPERM);
1779 		/* Avoid requests that would damage the bus integrity. */
1780 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1781 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1782 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1783 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1784 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1785 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1786 			return (EINVAL);
1787 
1788 		if (len < 0 || len > 32767)
1789 			return (EINVAL);
1790 		if (len != 0) {
1791 			iov.iov_base = (void *)ur->ucr_data;
1792 			iov.iov_len = len;
1793 			uio.uio_iov = &iov;
1794 			uio.uio_iovcnt = 1;
1795 			uio.uio_resid = len;
1796 			uio.uio_offset = 0;
1797 			uio.uio_rw =
1798 				ur->ucr_request.bmRequestType & UT_READ ?
1799 				UIO_READ : UIO_WRITE;
1800 			uio.uio_vmspace = l->l_proc->p_vmspace;
1801 			ptr = malloc(len, M_TEMP, M_WAITOK);
1802 			if (uio.uio_rw == UIO_WRITE) {
1803 				error = uiomove(ptr, len, &uio);
1804 				if (error)
1805 					goto ret;
1806 			}
1807 		}
1808 		sce = &sc->sc_endpoints[endpt][IN];
1809 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1810 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1811 		if (xerr) {
1812 			error = EIO;
1813 			goto ret;
1814 		}
1815 		if (len != 0) {
1816 			if (uio.uio_rw == UIO_READ) {
1817 				error = uiomove(ptr, len, &uio);
1818 				if (error)
1819 					goto ret;
1820 			}
1821 		}
1822 	ret:
1823 		if (ptr)
1824 			free(ptr, M_TEMP);
1825 		return (error);
1826 	}
1827 	case USB_GET_DEVICEINFO:
1828 		usbd_fill_deviceinfo(sc->sc_udev,
1829 				     (struct usb_device_info *)addr, 0);
1830 		break;
1831 #ifdef COMPAT_30
1832 	case USB_GET_DEVICEINFO_OLD:
1833 		usbd_fill_deviceinfo_old(sc->sc_udev,
1834 					 (struct usb_device_info_old *)addr, 0);
1835 
1836 		break;
1837 #endif
1838 	default:
1839 		return (EINVAL);
1840 	}
1841 	return (0);
1842 }
1843 
1844 int
1845 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1846 {
1847 	int endpt = UGENENDPOINT(dev);
1848 	struct ugen_softc *sc;
1849 	int error;
1850 
1851 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1852 	if (sc == NULL)
1853 		return ENXIO;
1854 
1855 	sc->sc_refcnt++;
1856 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1857 	if (--sc->sc_refcnt < 0)
1858 		usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1859 	return (error);
1860 }
1861 
1862 int
1863 ugenpoll(dev_t dev, int events, struct lwp *l)
1864 {
1865 	struct ugen_softc *sc;
1866 	struct ugen_endpoint *sce_in, *sce_out;
1867 	int revents = 0;
1868 
1869 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1870 	if (sc == NULL)
1871 		return ENXIO;
1872 
1873 	if (sc->sc_dying)
1874 		return (POLLHUP);
1875 
1876 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1877 		return ENODEV;
1878 
1879 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1880 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1881 	if (sce_in == NULL && sce_out == NULL)
1882 		return (POLLERR);
1883 #ifdef DIAGNOSTIC
1884 	if (!sce_in->edesc && !sce_out->edesc) {
1885 		printf("ugenpoll: no edesc\n");
1886 		return (POLLERR);
1887 	}
1888 	/* It's possible to have only one pipe open. */
1889 	if (!sce_in->pipeh && !sce_out->pipeh) {
1890 		printf("ugenpoll: no pipe\n");
1891 		return (POLLERR);
1892 	}
1893 #endif
1894 
1895 	mutex_enter(&sc->sc_lock);
1896 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1897 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1898 		case UE_INTERRUPT:
1899 			if (sce_in->q.c_cc > 0)
1900 				revents |= events & (POLLIN | POLLRDNORM);
1901 			else
1902 				selrecord(l, &sce_in->rsel);
1903 			break;
1904 		case UE_ISOCHRONOUS:
1905 			if (sce_in->cur != sce_in->fill)
1906 				revents |= events & (POLLIN | POLLRDNORM);
1907 			else
1908 				selrecord(l, &sce_in->rsel);
1909 			break;
1910 		case UE_BULK:
1911 			if (sce_in->state & UGEN_BULK_RA) {
1912 				if (sce_in->ra_wb_used > 0)
1913 					revents |= events &
1914 					    (POLLIN | POLLRDNORM);
1915 				else
1916 					selrecord(l, &sce_in->rsel);
1917 				break;
1918 			}
1919 			/*
1920 			 * We have no easy way of determining if a read will
1921 			 * yield any data or a write will happen.
1922 			 * Pretend they will.
1923 			 */
1924 			 revents |= events & (POLLIN | POLLRDNORM);
1925 			 break;
1926 		default:
1927 			break;
1928 		}
1929 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1930 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1931 		case UE_INTERRUPT:
1932 		case UE_ISOCHRONOUS:
1933 			/* XXX unimplemented */
1934 			break;
1935 		case UE_BULK:
1936 			if (sce_out->state & UGEN_BULK_WB) {
1937 				if (sce_out->ra_wb_used <
1938 				    sce_out->limit - sce_out->ibuf)
1939 					revents |= events &
1940 					    (POLLOUT | POLLWRNORM);
1941 				else
1942 					selrecord(l, &sce_out->rsel);
1943 				break;
1944 			}
1945 			/*
1946 			 * We have no easy way of determining if a read will
1947 			 * yield any data or a write will happen.
1948 			 * Pretend they will.
1949 			 */
1950 			 revents |= events & (POLLOUT | POLLWRNORM);
1951 			 break;
1952 		default:
1953 			break;
1954 		}
1955 
1956 	mutex_exit(&sc->sc_lock);
1957 
1958 	return (revents);
1959 }
1960 
1961 static void
1962 filt_ugenrdetach(struct knote *kn)
1963 {
1964 	struct ugen_endpoint *sce = kn->kn_hook;
1965 	struct ugen_softc *sc = sce->sc;
1966 
1967 	mutex_enter(&sc->sc_lock);
1968 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1969 	mutex_exit(&sc->sc_lock);
1970 }
1971 
1972 static int
1973 filt_ugenread_intr(struct knote *kn, long hint)
1974 {
1975 	struct ugen_endpoint *sce = kn->kn_hook;
1976 
1977 	kn->kn_data = sce->q.c_cc;
1978 	return (kn->kn_data > 0);
1979 }
1980 
1981 static int
1982 filt_ugenread_isoc(struct knote *kn, long hint)
1983 {
1984 	struct ugen_endpoint *sce = kn->kn_hook;
1985 
1986 	if (sce->cur == sce->fill)
1987 		return (0);
1988 
1989 	if (sce->cur < sce->fill)
1990 		kn->kn_data = sce->fill - sce->cur;
1991 	else
1992 		kn->kn_data = (sce->limit - sce->cur) +
1993 		    (sce->fill - sce->ibuf);
1994 
1995 	return (1);
1996 }
1997 
1998 static int
1999 filt_ugenread_bulk(struct knote *kn, long hint)
2000 {
2001 	struct ugen_endpoint *sce = kn->kn_hook;
2002 
2003 	if (!(sce->state & UGEN_BULK_RA))
2004 		/*
2005 		 * We have no easy way of determining if a read will
2006 		 * yield any data or a write will happen.
2007 		 * So, emulate "seltrue".
2008 		 */
2009 		return (filt_seltrue(kn, hint));
2010 
2011 	if (sce->ra_wb_used == 0)
2012 		return (0);
2013 
2014 	kn->kn_data = sce->ra_wb_used;
2015 
2016 	return (1);
2017 }
2018 
2019 static int
2020 filt_ugenwrite_bulk(struct knote *kn, long hint)
2021 {
2022 	struct ugen_endpoint *sce = kn->kn_hook;
2023 
2024 	if (!(sce->state & UGEN_BULK_WB))
2025 		/*
2026 		 * We have no easy way of determining if a read will
2027 		 * yield any data or a write will happen.
2028 		 * So, emulate "seltrue".
2029 		 */
2030 		return (filt_seltrue(kn, hint));
2031 
2032 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2033 		return (0);
2034 
2035 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2036 
2037 	return (1);
2038 }
2039 
2040 static const struct filterops ugenread_intr_filtops =
2041 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2042 
2043 static const struct filterops ugenread_isoc_filtops =
2044 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2045 
2046 static const struct filterops ugenread_bulk_filtops =
2047 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2048 
2049 static const struct filterops ugenwrite_bulk_filtops =
2050 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2051 
2052 int
2053 ugenkqfilter(dev_t dev, struct knote *kn)
2054 {
2055 	struct ugen_softc *sc;
2056 	struct ugen_endpoint *sce;
2057 	struct klist *klist;
2058 
2059 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2060 	if (sc == NULL)
2061 		return ENXIO;
2062 
2063 	if (sc->sc_dying)
2064 		return (ENXIO);
2065 
2066 	if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2067 		return ENODEV;
2068 
2069 	switch (kn->kn_filter) {
2070 	case EVFILT_READ:
2071 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2072 		if (sce == NULL)
2073 			return (EINVAL);
2074 
2075 		klist = &sce->rsel.sel_klist;
2076 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2077 		case UE_INTERRUPT:
2078 			kn->kn_fop = &ugenread_intr_filtops;
2079 			break;
2080 		case UE_ISOCHRONOUS:
2081 			kn->kn_fop = &ugenread_isoc_filtops;
2082 			break;
2083 		case UE_BULK:
2084 			kn->kn_fop = &ugenread_bulk_filtops;
2085 			break;
2086 		default:
2087 			return (EINVAL);
2088 		}
2089 		break;
2090 
2091 	case EVFILT_WRITE:
2092 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2093 		if (sce == NULL)
2094 			return (EINVAL);
2095 
2096 		klist = &sce->rsel.sel_klist;
2097 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2098 		case UE_INTERRUPT:
2099 		case UE_ISOCHRONOUS:
2100 			/* XXX poll doesn't support this */
2101 			return (EINVAL);
2102 
2103 		case UE_BULK:
2104 			kn->kn_fop = &ugenwrite_bulk_filtops;
2105 			break;
2106 		default:
2107 			return (EINVAL);
2108 		}
2109 		break;
2110 
2111 	default:
2112 		return (EINVAL);
2113 	}
2114 
2115 	kn->kn_hook = sce;
2116 
2117 	mutex_enter(&sc->sc_lock);
2118 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2119 	mutex_exit(&sc->sc_lock);
2120 
2121 	return (0);
2122 }
2123