1 /* $NetBSD: ugen.c,v 1.177 2024/03/29 19:30:09 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart@augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.177 2024/03/29 19:30:09 thorpej Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67 #include <dev/usb/usbhist.h>
68
69 #include "ioconf.h"
70
71 #ifdef USB_DEBUG
72 #ifndef UGEN_DEBUG
73 #define ugendebug 0
74 #else
75
76 #ifndef UGEN_DEBUG_DEFAULT
77 #define UGEN_DEBUG_DEFAULT 0
78 #endif
79
80 int ugendebug = UGEN_DEBUG_DEFAULT;
81
82 SYSCTL_SETUP(sysctl_hw_ugen_setup, "sysctl hw.ugen setup")
83 {
84 int err;
85 const struct sysctlnode *rnode;
86 const struct sysctlnode *cnode;
87
88 err = sysctl_createv(clog, 0, NULL, &rnode,
89 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ugen",
90 SYSCTL_DESCR("ugen global controls"),
91 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
92
93 if (err)
94 goto fail;
95
96 /* control debugging printfs */
97 err = sysctl_createv(clog, 0, &rnode, &cnode,
98 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
99 "debug", SYSCTL_DESCR("Enable debugging output"),
100 NULL, 0, &ugendebug, sizeof(ugendebug), CTL_CREATE, CTL_EOL);
101 if (err)
102 goto fail;
103
104 return;
105 fail:
106 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
107 }
108
109 #endif /* UGEN_DEBUG */
110 #endif /* USB_DEBUG */
111
112 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(ugendebug,1,FMT,A,B,C,D)
113 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(ugendebug,N,FMT,A,B,C,D)
114 #define UGENHIST_FUNC() USBHIST_FUNC()
115 #define UGENHIST_CALLED(name) USBHIST_CALLED(ugendebug)
116 #define UGENHIST_CALLARGS(FMT,A,B,C,D) \
117 USBHIST_CALLARGS(ugendebug,FMT,A,B,C,D)
118 #define UGENHIST_CALLARGSN(N,FMT,A,B,C,D) \
119 USBHIST_CALLARGSN(ugendebug,N,FMT,A,B,C,D)
120
121 #define UGEN_CHUNK 128 /* chunk size for read */
122 #define UGEN_IBSIZE 1020 /* buffer size */
123 #define UGEN_BBSIZE 1024
124
125 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
126 #define UGEN_NISORFRMS 8 /* number of transactions per req */
127 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
128
129 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
130 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
131
132 struct isoreq {
133 struct ugen_endpoint *sce;
134 struct usbd_xfer *xfer;
135 void *dmabuf;
136 uint16_t sizes[UGEN_NISORFRMS];
137 };
138
139 struct ugen_endpoint {
140 struct ugen_softc *sc;
141 usb_endpoint_descriptor_t *edesc;
142 struct usbd_interface *iface;
143 int state;
144 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
145 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
146 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
147 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
148 struct usbd_pipe *pipeh;
149 struct clist q;
150 u_char *ibuf; /* start of buffer (circular for isoc) */
151 u_char *fill; /* location for input (isoc) */
152 u_char *limit; /* end of circular buffer (isoc) */
153 u_char *cur; /* current read location (isoc) */
154 uint32_t timeout;
155 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
156 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
157 uint32_t ra_wb_used; /* how much is in buffer */
158 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
159 struct usbd_xfer *ra_wb_xfer;
160 struct isoreq isoreqs[UGEN_NISOREQS];
161 /* Keep these last; we don't overwrite them in ugen_set_config() */
162 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
163 struct selinfo rsel;
164 kcondvar_t cv;
165 };
166
167 struct ugen_softc {
168 device_t sc_dev; /* base device */
169 struct usbd_device *sc_udev;
170 struct rb_node sc_node;
171 unsigned sc_unit;
172
173 kmutex_t sc_lock;
174 kcondvar_t sc_detach_cv;
175
176 char sc_is_open[USB_MAX_ENDPOINTS];
177 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
178 #define OUT 0
179 #define IN 1
180
181 int sc_refcnt;
182 char sc_buffer[UGEN_BBSIZE];
183 u_char sc_dying;
184 u_char sc_attached;
185 };
186
187 static struct {
188 kmutex_t lock;
189 rb_tree_t tree;
190 } ugenif __cacheline_aligned;
191
192 static int
compare_ugen(void * cookie,const void * vsca,const void * vscb)193 compare_ugen(void *cookie, const void *vsca, const void *vscb)
194 {
195 const struct ugen_softc *sca = vsca;
196 const struct ugen_softc *scb = vscb;
197
198 if (sca->sc_unit < scb->sc_unit)
199 return -1;
200 if (sca->sc_unit > scb->sc_unit)
201 return +1;
202 return 0;
203 }
204
205 static int
compare_ugen_key(void * cookie,const void * vsc,const void * vk)206 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
207 {
208 const struct ugen_softc *sc = vsc;
209 const unsigned *k = vk;
210
211 if (sc->sc_unit < *k)
212 return -1;
213 if (sc->sc_unit > *k)
214 return +1;
215 return 0;
216 }
217
218 static const rb_tree_ops_t ugenif_tree_ops = {
219 .rbto_compare_nodes = compare_ugen,
220 .rbto_compare_key = compare_ugen_key,
221 .rbto_node_offset = offsetof(struct ugen_softc, sc_node),
222 };
223
224 static void
ugenif_get_unit(struct ugen_softc * sc)225 ugenif_get_unit(struct ugen_softc *sc)
226 {
227 struct ugen_softc *sc0;
228 unsigned i;
229
230 mutex_enter(&ugenif.lock);
231 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
232 sc0 != NULL && i == sc0->sc_unit;
233 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
234 KASSERT(i < UINT_MAX);
235 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
236 sc->sc_unit = i;
237 sc0 = rb_tree_insert_node(&ugenif.tree, sc);
238 KASSERT(sc0 == sc);
239 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
240 mutex_exit(&ugenif.lock);
241
242 prop_dictionary_set_uint(device_properties(sc->sc_dev),
243 "ugen-unit", sc->sc_unit);
244 }
245
246 static void
ugenif_put_unit(struct ugen_softc * sc)247 ugenif_put_unit(struct ugen_softc *sc)
248 {
249
250 prop_dictionary_remove(device_properties(sc->sc_dev),
251 "ugen-unit");
252
253 mutex_enter(&ugenif.lock);
254 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
255 rb_tree_remove_node(&ugenif.tree, sc);
256 sc->sc_unit = -1;
257 mutex_exit(&ugenif.lock);
258 }
259
260 static struct ugen_softc *
ugenif_acquire(unsigned unit)261 ugenif_acquire(unsigned unit)
262 {
263 struct ugen_softc *sc;
264
265 mutex_enter(&ugenif.lock);
266 sc = rb_tree_find_node(&ugenif.tree, &unit);
267 if (sc == NULL)
268 goto out;
269 mutex_enter(&sc->sc_lock);
270 if (sc->sc_dying) {
271 mutex_exit(&sc->sc_lock);
272 sc = NULL;
273 goto out;
274 }
275 KASSERT(sc->sc_refcnt < INT_MAX);
276 sc->sc_refcnt++;
277 mutex_exit(&sc->sc_lock);
278 out: mutex_exit(&ugenif.lock);
279
280 return sc;
281 }
282
283 static void
ugenif_release(struct ugen_softc * sc)284 ugenif_release(struct ugen_softc *sc)
285 {
286
287 mutex_enter(&sc->sc_lock);
288 if (--sc->sc_refcnt < 0)
289 cv_broadcast(&sc->sc_detach_cv);
290 mutex_exit(&sc->sc_lock);
291 }
292
293 static dev_type_open(ugenopen);
294 static dev_type_close(ugenclose);
295 static dev_type_read(ugenread);
296 static dev_type_write(ugenwrite);
297 static dev_type_ioctl(ugenioctl);
298 static dev_type_poll(ugenpoll);
299 static dev_type_kqfilter(ugenkqfilter);
300
301 const struct cdevsw ugen_cdevsw = {
302 .d_open = ugenopen,
303 .d_close = ugenclose,
304 .d_read = ugenread,
305 .d_write = ugenwrite,
306 .d_ioctl = ugenioctl,
307 .d_stop = nostop,
308 .d_tty = notty,
309 .d_poll = ugenpoll,
310 .d_mmap = nommap,
311 .d_kqfilter = ugenkqfilter,
312 .d_discard = nodiscard,
313 .d_flag = D_OTHER,
314 };
315
316 Static void ugenintr(struct usbd_xfer *, void *,
317 usbd_status);
318 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
319 usbd_status);
320 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
321 usbd_status);
322 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
323 usbd_status);
324 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
325 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
326 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
327 void *, int, struct lwp *);
328 Static int ugen_set_config(struct ugen_softc *, int, int);
329 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
330 int, int *);
331 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
332 Static int ugen_get_alt_index(struct ugen_softc *, int);
333 Static void ugen_clear_endpoints(struct ugen_softc *);
334
335 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
336 #define UGENENDPOINT(n) (minor(n) & 0xf)
337 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
338
339 static int ugenif_match(device_t, cfdata_t, void *);
340 static void ugenif_attach(device_t, device_t, void *);
341 static int ugen_match(device_t, cfdata_t, void *);
342 static void ugen_attach(device_t, device_t, void *);
343 static int ugen_detach(device_t, int);
344 static int ugen_activate(device_t, enum devact);
345
346 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
347 ugen_attach, ugen_detach, ugen_activate);
348 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
349 ugenif_attach, ugen_detach, ugen_activate);
350
351 /* toggle to control attach priority. -1 means "let autoconf decide" */
352 int ugen_override = -1;
353
354 static int
ugen_match(device_t parent,cfdata_t match,void * aux)355 ugen_match(device_t parent, cfdata_t match, void *aux)
356 {
357 struct usb_attach_arg *uaa = aux;
358 int override;
359
360 if (ugen_override != -1)
361 override = ugen_override;
362 else
363 override = match->cf_flags & 1;
364
365 if (override)
366 return UMATCH_HIGHEST;
367 else if (uaa->uaa_usegeneric)
368 return UMATCH_GENERIC;
369 else
370 return UMATCH_NONE;
371 }
372
373 static int
ugenif_match(device_t parent,cfdata_t match,void * aux)374 ugenif_match(device_t parent, cfdata_t match, void *aux)
375 {
376 /*
377 * Like ugen(4), ugenif(4) also has an override flag. It has the
378 * opposite effect, however, causing us to match with GENERIC
379 * priority rather than HIGHEST.
380 */
381 return (match->cf_flags & 1) ? UMATCH_GENERIC : UMATCH_HIGHEST;
382 }
383
384 static void
ugen_attach(device_t parent,device_t self,void * aux)385 ugen_attach(device_t parent, device_t self, void *aux)
386 {
387 struct usb_attach_arg *uaa = aux;
388 struct usbif_attach_arg uiaa;
389
390 memset(&uiaa, 0, sizeof(uiaa));
391 uiaa.uiaa_port = uaa->uaa_port;
392 uiaa.uiaa_vendor = uaa->uaa_vendor;
393 uiaa.uiaa_product = uaa->uaa_product;
394 uiaa.uiaa_release = uaa->uaa_release;
395 uiaa.uiaa_device = uaa->uaa_device;
396 uiaa.uiaa_configno = -1;
397 uiaa.uiaa_ifaceno = -1;
398
399 ugenif_attach(parent, self, &uiaa);
400 }
401
402 static void
ugenif_attach(device_t parent,device_t self,void * aux)403 ugenif_attach(device_t parent, device_t self, void *aux)
404 {
405 struct ugen_softc *sc = device_private(self);
406 struct usbif_attach_arg *uiaa = aux;
407 struct usbd_device *udev;
408 char *devinfop;
409 usbd_status err;
410 int i, dir, conf;
411
412 aprint_naive("\n");
413 aprint_normal("\n");
414
415 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
416 cv_init(&sc->sc_detach_cv, "ugendet");
417
418 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
419 aprint_normal_dev(self, "%s\n", devinfop);
420 usbd_devinfo_free(devinfop);
421
422 sc->sc_dev = self;
423 sc->sc_udev = udev = uiaa->uiaa_device;
424
425 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
426 for (dir = OUT; dir <= IN; dir++) {
427 struct ugen_endpoint *sce;
428
429 sce = &sc->sc_endpoints[i][dir];
430 selinit(&sce->rsel);
431 cv_init(&sce->cv, "ugensce");
432 }
433 }
434
435 if (!pmf_device_register(self, NULL, NULL))
436 aprint_error_dev(self, "couldn't establish power handler\n");
437
438 if (uiaa->uiaa_ifaceno < 0) {
439 /*
440 * If we attach the whole device,
441 * set configuration index 0, the default one.
442 */
443 err = usbd_set_config_index(udev, 0, 0);
444 if (err) {
445 aprint_error_dev(self,
446 "setting configuration index 0 failed\n");
447 return;
448 }
449 }
450
451 /* Get current configuration */
452 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
453
454 /* Set up all the local state for this configuration. */
455 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
456 if (err) {
457 aprint_error_dev(self, "setting configuration %d failed\n",
458 conf);
459 return;
460 }
461
462 ugenif_get_unit(sc);
463 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
464 sc->sc_attached = 1;
465 }
466
467 Static void
ugen_clear_endpoints(struct ugen_softc * sc)468 ugen_clear_endpoints(struct ugen_softc *sc)
469 {
470
471 /* Clear out the old info, but leave the selinfo and cv initialised. */
472 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
473 for (int dir = OUT; dir <= IN; dir++) {
474 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
475 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
476 }
477 }
478 }
479
480 Static int
ugen_set_config(struct ugen_softc * sc,int configno,int chkopen)481 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
482 {
483 struct usbd_device *dev = sc->sc_udev;
484 usb_config_descriptor_t *cdesc;
485 struct usbd_interface *iface;
486 usb_endpoint_descriptor_t *ed;
487 struct ugen_endpoint *sce;
488 uint8_t niface, nendpt;
489 int ifaceno, endptno, endpt;
490 usbd_status err;
491 int dir;
492
493 UGENHIST_FUNC();
494 UGENHIST_CALLARGSN(1, "ugen%jd: to configno %jd, sc=%jx",
495 device_unit(sc->sc_dev), configno, (uintptr_t)sc, 0);
496
497 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
498
499 if (chkopen) {
500 /*
501 * We start at 1, not 0, because we don't care whether the
502 * control endpoint is open or not. It is always present.
503 */
504 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
505 if (sc->sc_is_open[endptno]) {
506 DPRINTFN(1,
507 "ugen%jd - endpoint %d is open",
508 device_unit(sc->sc_dev), endptno, 0, 0);
509 return USBD_IN_USE;
510 }
511
512 /* Prevent opening while we're setting the config. */
513 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
514 KASSERT(!sc->sc_is_open[endptno]);
515 sc->sc_is_open[endptno] = 1;
516 }
517 }
518
519 /* Avoid setting the current value. */
520 cdesc = usbd_get_config_descriptor(dev);
521 if (!cdesc || cdesc->bConfigurationValue != configno) {
522 err = usbd_set_config_no(dev, configno, 1);
523 if (err)
524 goto out;
525 }
526
527 ugen_clear_endpoints(sc);
528
529 err = usbd_interface_count(dev, &niface);
530 if (err)
531 goto out;
532
533 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
534 DPRINTFN(1, "ifaceno %jd", ifaceno, 0, 0, 0);
535 err = usbd_device2interface_handle(dev, ifaceno, &iface);
536 if (err)
537 goto out;
538 err = usbd_endpoint_count(iface, &nendpt);
539 if (err)
540 goto out;
541 for (endptno = 0; endptno < nendpt; endptno++) {
542 ed = usbd_interface2endpoint_descriptor(iface, endptno);
543 KASSERT(ed != NULL);
544 endpt = ed->bEndpointAddress;
545 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
546 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
547 DPRINTFN(1, "endptno %jd, endpt=0x%02jx (%jd,%jd)",
548 endptno, endpt, UE_GET_ADDR(endpt),
549 UE_GET_DIR(endpt));
550 sce->sc = sc;
551 sce->edesc = ed;
552 sce->iface = iface;
553 }
554 }
555 err = USBD_NORMAL_COMPLETION;
556
557 out: if (chkopen) {
558 /*
559 * Allow open again now that we're done trying to set
560 * the config.
561 */
562 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
563 KASSERT(sc->sc_is_open[endptno]);
564 sc->sc_is_open[endptno] = 0;
565 }
566 }
567 return err;
568 }
569
570 static int
ugenopen(dev_t dev,int flag,int mode,struct lwp * l)571 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
572 {
573 struct ugen_softc *sc;
574 int unit = UGENUNIT(dev);
575 int endpt = UGENENDPOINT(dev);
576 usb_endpoint_descriptor_t *edesc;
577 struct ugen_endpoint *sce;
578 int dir, isize;
579 usbd_status err;
580 struct usbd_xfer *xfer;
581 int i, j;
582 int error;
583 int opened = 0;
584
585 UGENHIST_FUNC();
586 UGENHIST_CALLARGS("flag=%jd, mode=%jd, unit=%jd endpt=%jd",
587 flag, mode, unit, endpt);
588
589 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
590
591 if ((sc = ugenif_acquire(unit)) == NULL)
592 return ENXIO;
593
594 /* The control endpoint allows multiple opens. */
595 if (endpt == USB_CONTROL_ENDPOINT) {
596 opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
597 error = 0;
598 goto out;
599 }
600
601 if (sc->sc_is_open[endpt]) {
602 error = EBUSY;
603 goto out;
604 }
605 opened = sc->sc_is_open[endpt] = 1;
606
607 /* Make sure there are pipes for all directions. */
608 for (dir = OUT; dir <= IN; dir++) {
609 if (flag & (dir == OUT ? FWRITE : FREAD)) {
610 sce = &sc->sc_endpoints[endpt][dir];
611 if (sce->edesc == NULL) {
612 error = ENXIO;
613 goto out;
614 }
615 }
616 }
617
618 /* Actually open the pipes. */
619 /* XXX Should back out properly if it fails. */
620 for (dir = OUT; dir <= IN; dir++) {
621 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
622 continue;
623 sce = &sc->sc_endpoints[endpt][dir];
624 sce->state = 0;
625 sce->timeout = USBD_NO_TIMEOUT;
626 DPRINTFN(5, "sc=%jx, endpt=%jd, dir=%jd, sce=%jp",
627 (uintptr_t)sc, endpt, dir, (uintptr_t)sce);
628 edesc = sce->edesc;
629 switch (edesc->bmAttributes & UE_XFERTYPE) {
630 case UE_INTERRUPT:
631 if (dir == OUT) {
632 err = usbd_open_pipe(sce->iface,
633 edesc->bEndpointAddress, 0, &sce->pipeh);
634 if (err) {
635 error = EIO;
636 goto out;
637 }
638 break;
639 }
640 isize = UGETW(edesc->wMaxPacketSize);
641 if (isize == 0) { /* shouldn't happen */
642 error = EINVAL;
643 goto out;
644 }
645 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
646 DPRINTFN(5, "intr endpt=%d, isize=%d",
647 endpt, isize, 0, 0);
648 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
649 kmem_free(sce->ibuf, isize);
650 sce->ibuf = NULL;
651 error = ENOMEM;
652 goto out;
653 }
654 err = usbd_open_pipe_intr(sce->iface,
655 edesc->bEndpointAddress,
656 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
657 sce->ibuf, isize, ugenintr,
658 USBD_DEFAULT_INTERVAL);
659 if (err) {
660 clfree(&sce->q);
661 kmem_free(sce->ibuf, isize);
662 sce->ibuf = NULL;
663 error = EIO;
664 goto out;
665 }
666 DPRINTFN(5, "interrupt open done", 0, 0, 0, 0);
667 break;
668 case UE_BULK:
669 err = usbd_open_pipe(sce->iface,
670 edesc->bEndpointAddress, 0, &sce->pipeh);
671 if (err) {
672 error = EIO;
673 goto out;
674 }
675 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
676 /*
677 * Use request size for non-RA/WB transfers
678 * as the default.
679 */
680 sce->ra_wb_reqsize = UGEN_BBSIZE;
681 break;
682 case UE_ISOCHRONOUS:
683 if (dir == OUT) {
684 error = EINVAL;
685 goto out;
686 }
687 isize = UGETW(edesc->wMaxPacketSize);
688 if (isize == 0) { /* shouldn't happen */
689 error = EINVAL;
690 goto out;
691 }
692 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
693 KM_SLEEP);
694 sce->cur = sce->fill = sce->ibuf;
695 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
696 DPRINTFN(5, "isoc endpt=%d, isize=%d",
697 endpt, isize, 0, 0);
698 err = usbd_open_pipe(sce->iface,
699 edesc->bEndpointAddress, 0, &sce->pipeh);
700 if (err) {
701 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
702 sce->ibuf = NULL;
703 error = EIO;
704 goto out;
705 }
706 for (i = 0; i < UGEN_NISOREQS; ++i) {
707 sce->isoreqs[i].sce = sce;
708 err = usbd_create_xfer(sce->pipeh,
709 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
710 &xfer);
711 if (err)
712 goto bad;
713 sce->isoreqs[i].xfer = xfer;
714 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
715 for (j = 0; j < UGEN_NISORFRMS; ++j)
716 sce->isoreqs[i].sizes[j] = isize;
717 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
718 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
719 ugen_isoc_rintr);
720 (void)usbd_transfer(xfer);
721 }
722 DPRINTFN(5, "isoc open done", 0, 0, 0, 0);
723 break;
724 bad:
725 while (--i >= 0) { /* implicit buffer free */
726 usbd_destroy_xfer(sce->isoreqs[i].xfer);
727 sce->isoreqs[i].xfer = NULL;
728 }
729 usbd_close_pipe(sce->pipeh);
730 sce->pipeh = NULL;
731 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
732 sce->ibuf = NULL;
733 error = ENOMEM;
734 goto out;
735 case UE_CONTROL:
736 sce->timeout = USBD_DEFAULT_TIMEOUT;
737 error = EINVAL;
738 goto out;
739 }
740 }
741 error = 0;
742 out: if (error && opened)
743 sc->sc_is_open[endpt] = 0;
744 ugenif_release(sc);
745 return error;
746 }
747
748 static void
ugen_do_close(struct ugen_softc * sc,int flag,int endpt)749 ugen_do_close(struct ugen_softc *sc, int flag, int endpt)
750 {
751 struct ugen_endpoint *sce;
752 int dir;
753 int i;
754
755 UGENHIST_FUNC();
756 UGENHIST_CALLARGS("flag=%jd endpt=%jd", flag, endpt, 0, 0);
757
758 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
759
760 if (!sc->sc_is_open[endpt])
761 goto out;
762
763 if (endpt == USB_CONTROL_ENDPOINT) {
764 DPRINTFN(5, "close control", 0, 0, 0, 0);
765 goto out;
766 }
767
768 for (dir = OUT; dir <= IN; dir++) {
769 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
770 continue;
771 sce = &sc->sc_endpoints[endpt][dir];
772 if (sce->pipeh == NULL)
773 continue;
774 DPRINTFN(5, "endpt=%jd dir=%jd sce=%jx",
775 endpt, dir, (uintptr_t)sce, 0);
776
777 usbd_abort_pipe(sce->pipeh);
778
779 int isize = UGETW(sce->edesc->wMaxPacketSize);
780 int msize = 0;
781
782 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
783 case UE_INTERRUPT:
784 ndflush(&sce->q, sce->q.c_cc);
785 clfree(&sce->q);
786 msize = isize;
787 break;
788 case UE_ISOCHRONOUS:
789 for (i = 0; i < UGEN_NISOREQS; ++i) {
790 usbd_destroy_xfer(sce->isoreqs[i].xfer);
791 sce->isoreqs[i].xfer = NULL;
792 }
793 msize = isize * UGEN_NISOFRAMES;
794 break;
795 case UE_BULK:
796 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
797 usbd_destroy_xfer(sce->ra_wb_xfer);
798 sce->ra_wb_xfer = NULL;
799 msize = sce->ra_wb_bufsize;
800 }
801 break;
802 default:
803 break;
804 }
805 usbd_close_pipe(sce->pipeh);
806 sce->pipeh = NULL;
807 if (sce->ibuf != NULL) {
808 kmem_free(sce->ibuf, msize);
809 sce->ibuf = NULL;
810 }
811 }
812
813 out: sc->sc_is_open[endpt] = 0;
814 for (dir = OUT; dir <= IN; dir++) {
815 sce = &sc->sc_endpoints[endpt][dir];
816 KASSERT(sce->pipeh == NULL);
817 KASSERT(sce->ibuf == NULL);
818 KASSERT(sce->ra_wb_xfer == NULL);
819 for (i = 0; i < UGEN_NISOREQS; i++)
820 KASSERT(sce->isoreqs[i].xfer == NULL);
821 }
822 }
823
824 static int
ugenclose(dev_t dev,int flag,int mode,struct lwp * l)825 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
826 {
827 int endpt = UGENENDPOINT(dev);
828 struct ugen_softc *sc;
829
830 UGENHIST_FUNC();
831 UGENHIST_CALLARGS("flag=%jd, mode=%jd, unit=%jd, endpt=%jd",
832 flag, mode, UGENUNIT(dev), endpt);
833
834 KASSERT(KERNEL_LOCKED_P()); /* ugen_do_close */
835
836 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
837 return ENXIO;
838
839 KASSERT(sc->sc_is_open[endpt]);
840 ugen_do_close(sc, flag, endpt);
841 KASSERT(!sc->sc_is_open[endpt]);
842
843 ugenif_release(sc);
844
845 return 0;
846 }
847
848 Static int
ugen_do_read(struct ugen_softc * sc,int endpt,struct uio * uio,int flag)849 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
850 {
851 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
852 uint32_t n, tn;
853 struct usbd_xfer *xfer;
854 usbd_status err;
855 int error = 0;
856
857 UGENHIST_FUNC();
858 UGENHIST_CALLARGS("ugen%d: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
859
860 if (endpt == USB_CONTROL_ENDPOINT)
861 return ENODEV;
862
863 KASSERT(sce->edesc);
864 KASSERT(sce->pipeh);
865
866 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
867 case UE_INTERRUPT:
868 /* Block until activity occurred. */
869 mutex_enter(&sc->sc_lock);
870 while (sce->q.c_cc == 0) {
871 if (flag & IO_NDELAY) {
872 mutex_exit(&sc->sc_lock);
873 return EWOULDBLOCK;
874 }
875 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
876 /* "ugenri" */
877 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
878 mstohz(sce->timeout));
879 DPRINTFN(5, "woke, error=%jd",
880 error, 0, 0, 0);
881 if (sc->sc_dying)
882 error = EIO;
883 if (error)
884 break;
885 }
886 mutex_exit(&sc->sc_lock);
887
888 /* Transfer as many chunks as possible. */
889 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
890 n = uimin(sce->q.c_cc, uio->uio_resid);
891 if (n > sizeof(sc->sc_buffer))
892 n = sizeof(sc->sc_buffer);
893
894 /* Remove a small chunk from the input queue. */
895 q_to_b(&sce->q, sc->sc_buffer, n);
896 DPRINTFN(5, "got %jd chars", n, 0, 0, 0);
897
898 /* Copy the data to the user process. */
899 error = uiomove(sc->sc_buffer, n, uio);
900 if (error)
901 break;
902 }
903 break;
904 case UE_BULK:
905 if (sce->state & UGEN_BULK_RA) {
906 DPRINTFN(5, "BULK_RA req: %zd used: %d",
907 uio->uio_resid, sce->ra_wb_used, 0, 0);
908 xfer = sce->ra_wb_xfer;
909
910 mutex_enter(&sc->sc_lock);
911 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
912 mutex_exit(&sc->sc_lock);
913 return EWOULDBLOCK;
914 }
915 while (uio->uio_resid > 0 && !error) {
916 while (sce->ra_wb_used == 0) {
917 DPRINTFN(5, "sleep on %jx",
918 (uintptr_t)sce, 0, 0, 0);
919 /* "ugenrb" */
920 error = cv_timedwait_sig(&sce->cv,
921 &sc->sc_lock, mstohz(sce->timeout));
922 DPRINTFN(5, "woke, error=%jd",
923 error, 0, 0, 0);
924 if (sc->sc_dying)
925 error = EIO;
926 if (error)
927 break;
928 }
929
930 /* Copy data to the process. */
931 while (uio->uio_resid > 0
932 && sce->ra_wb_used > 0) {
933 n = uimin(uio->uio_resid,
934 sce->ra_wb_used);
935 n = uimin(n, sce->limit - sce->cur);
936 error = uiomove(sce->cur, n, uio);
937 if (error)
938 break;
939 sce->cur += n;
940 sce->ra_wb_used -= n;
941 if (sce->cur == sce->limit)
942 sce->cur = sce->ibuf;
943 }
944
945 /*
946 * If the transfers stopped because the
947 * buffer was full, restart them.
948 */
949 if (sce->state & UGEN_RA_WB_STOP &&
950 sce->ra_wb_used < sce->limit - sce->ibuf) {
951 n = (sce->limit - sce->ibuf)
952 - sce->ra_wb_used;
953 usbd_setup_xfer(xfer, sce, NULL,
954 uimin(n, sce->ra_wb_xferlen),
955 0, USBD_NO_TIMEOUT,
956 ugen_bulkra_intr);
957 sce->state &= ~UGEN_RA_WB_STOP;
958 err = usbd_transfer(xfer);
959 if (err != USBD_IN_PROGRESS)
960 /*
961 * The transfer has not been
962 * queued. Setting STOP
963 * will make us try
964 * again at the next read.
965 */
966 sce->state |= UGEN_RA_WB_STOP;
967 }
968 }
969 mutex_exit(&sc->sc_lock);
970 break;
971 }
972 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
973 0, 0, &xfer);
974 if (error)
975 return error;
976 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
977 DPRINTFN(1, "start transfer %jd bytes", n, 0, 0, 0);
978 tn = n;
979 err = usbd_bulk_transfer(xfer, sce->pipeh,
980 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
981 sce->timeout, sc->sc_buffer, &tn);
982 if (err) {
983 if (err == USBD_INTERRUPTED)
984 error = EINTR;
985 else if (err == USBD_TIMEOUT)
986 error = ETIMEDOUT;
987 else
988 error = EIO;
989 break;
990 }
991 DPRINTFN(1, "got %jd bytes", tn, 0, 0, 0);
992 error = uiomove(sc->sc_buffer, tn, uio);
993 if (error || tn < n)
994 break;
995 }
996 usbd_destroy_xfer(xfer);
997 break;
998 case UE_ISOCHRONOUS:
999 mutex_enter(&sc->sc_lock);
1000 while (sce->cur == sce->fill) {
1001 if (flag & IO_NDELAY) {
1002 mutex_exit(&sc->sc_lock);
1003 return EWOULDBLOCK;
1004 }
1005 /* "ugenri" */
1006 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
1007 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
1008 mstohz(sce->timeout));
1009 DPRINTFN(5, "woke, error=%jd", error, 0, 0, 0);
1010 if (sc->sc_dying)
1011 error = EIO;
1012 if (error)
1013 break;
1014 }
1015
1016 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
1017 if(sce->fill > sce->cur)
1018 n = uimin(sce->fill - sce->cur, uio->uio_resid);
1019 else
1020 n = uimin(sce->limit - sce->cur, uio->uio_resid);
1021
1022 DPRINTFN(5, "isoc got %jd chars", n, 0, 0, 0);
1023
1024 /* Copy the data to the user process. */
1025 error = uiomove(sce->cur, n, uio);
1026 if (error)
1027 break;
1028 sce->cur += n;
1029 if (sce->cur >= sce->limit)
1030 sce->cur = sce->ibuf;
1031 }
1032 mutex_exit(&sc->sc_lock);
1033 break;
1034
1035
1036 default:
1037 return ENXIO;
1038 }
1039 return error;
1040 }
1041
1042 static int
ugenread(dev_t dev,struct uio * uio,int flag)1043 ugenread(dev_t dev, struct uio *uio, int flag)
1044 {
1045 int endpt = UGENENDPOINT(dev);
1046 struct ugen_softc *sc;
1047 int error;
1048
1049 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1050 return ENXIO;
1051 error = ugen_do_read(sc, endpt, uio, flag);
1052 ugenif_release(sc);
1053
1054 return error;
1055 }
1056
1057 Static int
ugen_do_write(struct ugen_softc * sc,int endpt,struct uio * uio,int flag)1058 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
1059 int flag)
1060 {
1061 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
1062 uint32_t n;
1063 int error = 0;
1064 uint32_t tn;
1065 char *dbuf;
1066 struct usbd_xfer *xfer;
1067 usbd_status err;
1068
1069 UGENHIST_FUNC();
1070 UGENHIST_CALLARGSN(5, "ugen%jd: %jd",
1071 device_unit(sc->sc_dev), endpt, 0, 0);
1072
1073 if (endpt == USB_CONTROL_ENDPOINT)
1074 return ENODEV;
1075
1076 KASSERT(sce->edesc);
1077 KASSERT(sce->pipeh);
1078
1079 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
1080 case UE_BULK:
1081 if (sce->state & UGEN_BULK_WB) {
1082 DPRINTFN(5, "BULK_WB req: %jd used: %jd",
1083 uio->uio_resid, sce->ra_wb_used, 0, 0);
1084 xfer = sce->ra_wb_xfer;
1085
1086 mutex_enter(&sc->sc_lock);
1087 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
1088 flag & IO_NDELAY) {
1089 mutex_exit(&sc->sc_lock);
1090 return EWOULDBLOCK;
1091 }
1092 while (uio->uio_resid > 0 && !error) {
1093 while (sce->ra_wb_used ==
1094 sce->limit - sce->ibuf) {
1095 DPRINTFN(5, "sleep on %#jx",
1096 (uintptr_t)sce, 0, 0, 0);
1097 /* "ugenwb" */
1098 error = cv_timedwait_sig(&sce->cv,
1099 &sc->sc_lock, mstohz(sce->timeout));
1100 DPRINTFN(5, "woke, error=%d",
1101 error, 0, 0, 0);
1102 if (sc->sc_dying)
1103 error = EIO;
1104 if (error)
1105 break;
1106 }
1107
1108 /* Copy data from the process. */
1109 while (uio->uio_resid > 0 &&
1110 sce->ra_wb_used < sce->limit - sce->ibuf) {
1111 n = uimin(uio->uio_resid,
1112 (sce->limit - sce->ibuf)
1113 - sce->ra_wb_used);
1114 n = uimin(n, sce->limit - sce->fill);
1115 error = uiomove(sce->fill, n, uio);
1116 if (error)
1117 break;
1118 sce->fill += n;
1119 sce->ra_wb_used += n;
1120 if (sce->fill == sce->limit)
1121 sce->fill = sce->ibuf;
1122 }
1123
1124 /*
1125 * If the transfers stopped because the
1126 * buffer was empty, restart them.
1127 */
1128 if (sce->state & UGEN_RA_WB_STOP &&
1129 sce->ra_wb_used > 0) {
1130 dbuf = (char *)usbd_get_buffer(xfer);
1131 n = uimin(sce->ra_wb_used,
1132 sce->ra_wb_xferlen);
1133 tn = uimin(n, sce->limit - sce->cur);
1134 memcpy(dbuf, sce->cur, tn);
1135 dbuf += tn;
1136 if (n - tn > 0)
1137 memcpy(dbuf, sce->ibuf,
1138 n - tn);
1139 usbd_setup_xfer(xfer, sce, NULL, n,
1140 0, USBD_NO_TIMEOUT,
1141 ugen_bulkwb_intr);
1142 sce->state &= ~UGEN_RA_WB_STOP;
1143 err = usbd_transfer(xfer);
1144 if (err != USBD_IN_PROGRESS)
1145 /*
1146 * The transfer has not been
1147 * queued. Setting STOP
1148 * will make us try again
1149 * at the next read.
1150 */
1151 sce->state |= UGEN_RA_WB_STOP;
1152 }
1153 }
1154 mutex_exit(&sc->sc_lock);
1155 break;
1156 }
1157 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1158 0, 0, &xfer);
1159 if (error)
1160 return error;
1161 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1162 error = uiomove(sc->sc_buffer, n, uio);
1163 if (error)
1164 break;
1165 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1166 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1167 sc->sc_buffer, &n);
1168 if (err) {
1169 if (err == USBD_INTERRUPTED)
1170 error = EINTR;
1171 else if (err == USBD_TIMEOUT)
1172 error = ETIMEDOUT;
1173 else
1174 error = EIO;
1175 break;
1176 }
1177 }
1178 usbd_destroy_xfer(xfer);
1179 break;
1180 case UE_INTERRUPT:
1181 error = usbd_create_xfer(sce->pipeh,
1182 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1183 if (error)
1184 return error;
1185 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1186 uio->uio_resid)) != 0) {
1187 error = uiomove(sc->sc_buffer, n, uio);
1188 if (error)
1189 break;
1190 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1191 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1192 sce->timeout, sc->sc_buffer, &n);
1193 if (err) {
1194 if (err == USBD_INTERRUPTED)
1195 error = EINTR;
1196 else if (err == USBD_TIMEOUT)
1197 error = ETIMEDOUT;
1198 else
1199 error = EIO;
1200 break;
1201 }
1202 }
1203 usbd_destroy_xfer(xfer);
1204 break;
1205 default:
1206 return ENXIO;
1207 }
1208 return error;
1209 }
1210
1211 static int
ugenwrite(dev_t dev,struct uio * uio,int flag)1212 ugenwrite(dev_t dev, struct uio *uio, int flag)
1213 {
1214 int endpt = UGENENDPOINT(dev);
1215 struct ugen_softc *sc;
1216 int error;
1217
1218 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1219 return ENXIO;
1220 error = ugen_do_write(sc, endpt, uio, flag);
1221 ugenif_release(sc);
1222
1223 return error;
1224 }
1225
1226 static int
ugen_activate(device_t self,enum devact act)1227 ugen_activate(device_t self, enum devact act)
1228 {
1229 struct ugen_softc *sc = device_private(self);
1230
1231 switch (act) {
1232 case DVACT_DEACTIVATE:
1233 sc->sc_dying = 1;
1234 return 0;
1235 default:
1236 return EOPNOTSUPP;
1237 }
1238 }
1239
1240 static int
ugen_detach(device_t self,int flags)1241 ugen_detach(device_t self, int flags)
1242 {
1243 struct ugen_softc *sc = device_private(self);
1244 struct ugen_endpoint *sce;
1245 int i, dir;
1246 int maj, mn;
1247
1248 UGENHIST_FUNC();
1249 UGENHIST_CALLARGS("sc=%ju flags=%ju", (uintptr_t)sc, flags, 0, 0);
1250
1251 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
1252
1253 /*
1254 * Fail if we're not forced to detach and userland has any
1255 * endpoints open.
1256 */
1257 if ((flags & DETACH_FORCE) == 0) {
1258 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1259 if (sc->sc_is_open[i])
1260 return EBUSY;
1261 }
1262 }
1263
1264 /* Prevent new users. Prevent suspend/resume. */
1265 sc->sc_dying = 1;
1266 pmf_device_deregister(self);
1267
1268 /*
1269 * If we never finished attaching, skip nixing endpoints and
1270 * users because there aren't any.
1271 */
1272 if (!sc->sc_attached)
1273 goto out;
1274
1275 /* Abort all pipes. */
1276 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1277 for (dir = OUT; dir <= IN; dir++) {
1278 sce = &sc->sc_endpoints[i][dir];
1279 if (sce->pipeh)
1280 usbd_abort_pipe(sce->pipeh);
1281 }
1282 }
1283
1284 /*
1285 * Wait for users to drain. Before this point there can be no
1286 * more I/O operations started because we set sc_dying; after
1287 * this, there can be no more I/O operations in progress, so it
1288 * will be safe to free things.
1289 */
1290 mutex_enter(&sc->sc_lock);
1291 if (--sc->sc_refcnt >= 0) {
1292 /* Wake everyone */
1293 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1294 for (dir = OUT; dir <= IN; dir++)
1295 cv_broadcast(&sc->sc_endpoints[i][dir].cv);
1296 }
1297 /* Wait for processes to go away. */
1298 do {
1299 cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
1300 } while (sc->sc_refcnt >= 0);
1301 }
1302 mutex_exit(&sc->sc_lock);
1303
1304 /* locate the major number */
1305 maj = cdevsw_lookup_major(&ugen_cdevsw);
1306
1307 /*
1308 * Nuke the vnodes for any open instances (calls ugenclose, but
1309 * with no effect because we already set sc_dying).
1310 */
1311 mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1312 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1313
1314 /* Actually close any lingering pipes. */
1315 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1316 ugen_do_close(sc, FREAD|FWRITE, i);
1317
1318 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1319 ugenif_put_unit(sc);
1320
1321 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1322 for (dir = OUT; dir <= IN; dir++) {
1323 sce = &sc->sc_endpoints[i][dir];
1324 seldestroy(&sce->rsel);
1325 cv_destroy(&sce->cv);
1326 }
1327 }
1328
1329 cv_destroy(&sc->sc_detach_cv);
1330 mutex_destroy(&sc->sc_lock);
1331
1332 return 0;
1333 }
1334
1335 Static void
ugenintr(struct usbd_xfer * xfer,void * addr,usbd_status status)1336 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1337 {
1338 struct ugen_endpoint *sce = addr;
1339 struct ugen_softc *sc = sce->sc;
1340 uint32_t count;
1341 u_char *ibuf;
1342
1343 UGENHIST_FUNC();
1344 UGENHIST_CALLARGS("xfer %jx status %d", (uintptr_t)xfer, status, 0, 0);
1345
1346 if (status == USBD_CANCELLED)
1347 return;
1348
1349 if (status != USBD_NORMAL_COMPLETION) {
1350 DPRINTF("status=%jd", status, 0, 0, 0);
1351 if (status == USBD_STALLED)
1352 usbd_clear_endpoint_stall_async(sce->pipeh);
1353 return;
1354 }
1355
1356 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1357 ibuf = sce->ibuf;
1358
1359 DPRINTFN(5, "xfer=%#jx status=%d count=%d",
1360 (uintptr_t)xfer, status, count, 0);
1361 DPRINTFN(5, " data = %02x %02x %02x",
1362 ibuf[0], ibuf[1], ibuf[2], 0);
1363
1364 mutex_enter(&sc->sc_lock);
1365 (void)b_to_q(ibuf, count, &sce->q);
1366 cv_signal(&sce->cv);
1367 mutex_exit(&sc->sc_lock);
1368 selnotify(&sce->rsel, 0, 0);
1369 }
1370
1371 Static void
ugen_isoc_rintr(struct usbd_xfer * xfer,void * addr,usbd_status status)1372 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1373 usbd_status status)
1374 {
1375 struct isoreq *req = addr;
1376 struct ugen_endpoint *sce = req->sce;
1377 struct ugen_softc *sc = sce->sc;
1378 uint32_t count, n;
1379 int i, isize;
1380
1381 UGENHIST_FUNC();
1382 UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1383
1384 /* Return if we are aborting. */
1385 if (status == USBD_CANCELLED)
1386 return;
1387
1388 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1389 DPRINTFN(5, "xfer %ld, count=%d",
1390 (long)(req - sce->isoreqs), count, 0, 0);
1391
1392 mutex_enter(&sc->sc_lock);
1393
1394 /* throw away oldest input if the buffer is full */
1395 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1396 sce->cur += count;
1397 if (sce->cur >= sce->limit)
1398 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1399 DPRINTFN(5, "throwing away %jd bytes",
1400 count, 0, 0, 0);
1401 }
1402
1403 isize = UGETW(sce->edesc->wMaxPacketSize);
1404 for (i = 0; i < UGEN_NISORFRMS; i++) {
1405 uint32_t actlen = req->sizes[i];
1406 char const *tbuf = (char const *)req->dmabuf + isize * i;
1407
1408 /* copy data to buffer */
1409 while (actlen > 0) {
1410 n = uimin(actlen, sce->limit - sce->fill);
1411 memcpy(sce->fill, tbuf, n);
1412
1413 tbuf += n;
1414 actlen -= n;
1415 sce->fill += n;
1416 if (sce->fill == sce->limit)
1417 sce->fill = sce->ibuf;
1418 }
1419
1420 /* setup size for next transfer */
1421 req->sizes[i] = isize;
1422 }
1423
1424 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1425 ugen_isoc_rintr);
1426 (void)usbd_transfer(xfer);
1427
1428 cv_signal(&sce->cv);
1429 mutex_exit(&sc->sc_lock);
1430 selnotify(&sce->rsel, 0, 0);
1431 }
1432
1433 Static void
ugen_bulkra_intr(struct usbd_xfer * xfer,void * addr,usbd_status status)1434 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1435 usbd_status status)
1436 {
1437 struct ugen_endpoint *sce = addr;
1438 struct ugen_softc *sc = sce->sc;
1439 uint32_t count, n;
1440 char const *tbuf;
1441 usbd_status err;
1442
1443 UGENHIST_FUNC();
1444 UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1445
1446 /* Return if we are aborting. */
1447 if (status == USBD_CANCELLED)
1448 return;
1449
1450 if (status != USBD_NORMAL_COMPLETION) {
1451 DPRINTF("status=%jd", status, 0, 0, 0);
1452 sce->state |= UGEN_RA_WB_STOP;
1453 if (status == USBD_STALLED)
1454 usbd_clear_endpoint_stall_async(sce->pipeh);
1455 return;
1456 }
1457
1458 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1459
1460 mutex_enter(&sc->sc_lock);
1461
1462 /* Keep track of how much is in the buffer. */
1463 sce->ra_wb_used += count;
1464
1465 /* Copy data to buffer. */
1466 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1467 n = uimin(count, sce->limit - sce->fill);
1468 memcpy(sce->fill, tbuf, n);
1469 tbuf += n;
1470 count -= n;
1471 sce->fill += n;
1472 if (sce->fill == sce->limit)
1473 sce->fill = sce->ibuf;
1474 if (count > 0) {
1475 memcpy(sce->fill, tbuf, count);
1476 sce->fill += count;
1477 }
1478
1479 /* Set up the next request if necessary. */
1480 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1481 if (n > 0) {
1482 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1483 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1484 err = usbd_transfer(xfer);
1485 if (err != USBD_IN_PROGRESS) {
1486 printf("error=%d", err);
1487 /*
1488 * The transfer has not been queued. Setting STOP
1489 * will make us try again at the next read.
1490 */
1491 sce->state |= UGEN_RA_WB_STOP;
1492 }
1493 }
1494 else
1495 sce->state |= UGEN_RA_WB_STOP;
1496
1497 cv_signal(&sce->cv);
1498 mutex_exit(&sc->sc_lock);
1499 selnotify(&sce->rsel, 0, 0);
1500 }
1501
1502 Static void
ugen_bulkwb_intr(struct usbd_xfer * xfer,void * addr,usbd_status status)1503 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1504 usbd_status status)
1505 {
1506 struct ugen_endpoint *sce = addr;
1507 struct ugen_softc *sc = sce->sc;
1508 uint32_t count, n;
1509 char *tbuf;
1510 usbd_status err;
1511
1512 UGENHIST_FUNC();
1513 UGENHIST_CALLARGS("xfer=%jx status=%jd", (uintptr_t)xfer, status, 0, 0);
1514
1515 /* Return if we are aborting. */
1516 if (status == USBD_CANCELLED)
1517 return;
1518
1519 if (status != USBD_NORMAL_COMPLETION) {
1520 DPRINTF("status=%jd", status, 0, 0, 0);
1521 sce->state |= UGEN_RA_WB_STOP;
1522 if (status == USBD_STALLED)
1523 usbd_clear_endpoint_stall_async(sce->pipeh);
1524 return;
1525 }
1526
1527 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1528
1529 mutex_enter(&sc->sc_lock);
1530
1531 /* Keep track of how much is in the buffer. */
1532 sce->ra_wb_used -= count;
1533
1534 /* Update buffer pointers. */
1535 sce->cur += count;
1536 if (sce->cur >= sce->limit)
1537 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1538
1539 /* Set up next request if necessary. */
1540 if (sce->ra_wb_used > 0) {
1541 /* copy data from buffer */
1542 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1543 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1544 n = uimin(count, sce->limit - sce->cur);
1545 memcpy(tbuf, sce->cur, n);
1546 tbuf += n;
1547 if (count - n > 0)
1548 memcpy(tbuf, sce->ibuf, count - n);
1549
1550 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1551 ugen_bulkwb_intr);
1552 err = usbd_transfer(xfer);
1553 if (err != USBD_IN_PROGRESS) {
1554 printf("error=%d", err);
1555 /*
1556 * The transfer has not been queued. Setting STOP
1557 * will make us try again at the next write.
1558 */
1559 sce->state |= UGEN_RA_WB_STOP;
1560 }
1561 }
1562 else
1563 sce->state |= UGEN_RA_WB_STOP;
1564
1565 cv_signal(&sce->cv);
1566 mutex_exit(&sc->sc_lock);
1567 selnotify(&sce->rsel, 0, 0);
1568 }
1569
1570 Static usbd_status
ugen_set_interface(struct ugen_softc * sc,int ifaceidx,int altno)1571 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1572 {
1573 struct usbd_interface *iface;
1574 usb_endpoint_descriptor_t *ed;
1575 usbd_status err;
1576 struct ugen_endpoint *sce;
1577 uint8_t niface, nendpt, endptno, endpt;
1578 int dir;
1579
1580 UGENHIST_FUNC();
1581 UGENHIST_CALLARGSN(15, "ifaceidx=%jd altno=%jd", ifaceidx, altno, 0, 0);
1582
1583 err = usbd_interface_count(sc->sc_udev, &niface);
1584 if (err)
1585 return err;
1586 if (ifaceidx < 0 || ifaceidx >= niface)
1587 return USBD_INVAL;
1588
1589 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1590 if (err)
1591 return err;
1592 err = usbd_endpoint_count(iface, &nendpt);
1593 if (err)
1594 return err;
1595
1596 /* change setting */
1597 err = usbd_set_interface(iface, altno);
1598 if (err)
1599 return err;
1600
1601 err = usbd_endpoint_count(iface, &nendpt);
1602 if (err)
1603 return err;
1604
1605 ugen_clear_endpoints(sc);
1606
1607 for (endptno = 0; endptno < nendpt; endptno++) {
1608 ed = usbd_interface2endpoint_descriptor(iface, endptno);
1609 KASSERT(ed != NULL);
1610 endpt = ed->bEndpointAddress;
1611 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1612 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1613 sce->sc = sc;
1614 sce->edesc = ed;
1615 sce->iface = iface;
1616 }
1617 return 0;
1618 }
1619
1620 /* Retrieve a complete descriptor for a certain device and index. */
1621 Static usb_config_descriptor_t *
ugen_get_cdesc(struct ugen_softc * sc,int index,int * lenp)1622 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1623 {
1624 usb_config_descriptor_t *cdesc = NULL, *tdesc, cdescr;
1625 int len = 0;
1626 usbd_status err;
1627
1628 UGENHIST_FUNC(); UGENHIST_CALLARGS("index=%jd", index, 0, 0, 0);
1629
1630 switch (index) {
1631 case USB_CURRENT_CONFIG_INDEX:
1632 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1633 if (tdesc == NULL)
1634 break;
1635 len = UGETW(tdesc->wTotalLength);
1636 cdesc = kmem_alloc(len, KM_SLEEP);
1637 memcpy(cdesc, tdesc, len);
1638 break;
1639 default:
1640 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1641 if (err)
1642 break;
1643 len = UGETW(cdescr.wTotalLength);
1644 cdesc = kmem_alloc(len, KM_SLEEP);
1645 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1646 if (err) {
1647 kmem_free(cdesc, len);
1648 cdesc = NULL;
1649 }
1650 break;
1651 }
1652 DPRINTFN(5, "req len=%jd cdesc=%jx", len, (uintptr_t)cdesc, 0, 0);
1653 if (cdesc && lenp)
1654 *lenp = len;
1655 return cdesc;
1656 }
1657
1658 Static int
ugen_get_alt_index(struct ugen_softc * sc,int ifaceidx)1659 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1660 {
1661 struct usbd_interface *iface;
1662 usbd_status err;
1663
1664 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1665 if (err)
1666 return -1;
1667 return usbd_get_interface_altindex(iface);
1668 }
1669
1670 Static int
ugen_do_ioctl(struct ugen_softc * sc,int endpt,u_long cmd,void * addr,int flag,struct lwp * l)1671 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1672 void *addr, int flag, struct lwp *l)
1673 {
1674 struct ugen_endpoint *sce;
1675 usbd_status err;
1676 struct usbd_interface *iface;
1677 struct usb_config_desc *cd;
1678 usb_config_descriptor_t *cdesc;
1679 struct usb_interface_desc *id;
1680 usb_interface_descriptor_t *idesc;
1681 struct usb_endpoint_desc *ed;
1682 usb_endpoint_descriptor_t *edesc;
1683 struct usb_alt_interface *ai;
1684 struct usb_string_desc *si;
1685 uint8_t conf, alt;
1686 int cdesclen;
1687 int error;
1688 int dir;
1689
1690 UGENHIST_FUNC();
1691 UGENHIST_CALLARGS("ugen%d: endpt=%ju cmd=%08jx flag=%jx",
1692 device_unit(sc->sc_dev), endpt, cmd, flag);
1693
1694 KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */
1695
1696 switch (cmd) {
1697 case FIONBIO:
1698 /* All handled in the upper FS layer. */
1699 return 0;
1700 case USB_SET_SHORT_XFER:
1701 if (endpt == USB_CONTROL_ENDPOINT)
1702 return EINVAL;
1703 /* This flag only affects read */
1704 sce = &sc->sc_endpoints[endpt][IN];
1705 if (sce == NULL || sce->pipeh == NULL)
1706 return EINVAL;
1707 if (*(int *)addr)
1708 sce->state |= UGEN_SHORT_OK;
1709 else
1710 sce->state &= ~UGEN_SHORT_OK;
1711 DPRINTFN(5, "pipe=%jx short xfer=%ju",
1712 (uintptr_t)sce->pipeh, sce->state & UGEN_SHORT_OK, 0, 0);
1713 return 0;
1714 case USB_SET_TIMEOUT:
1715 for (dir = OUT; dir <= IN; dir++) {
1716 sce = &sc->sc_endpoints[endpt][dir];
1717 if (sce == NULL)
1718 return EINVAL;
1719
1720 sce->timeout = *(int *)addr;
1721 DPRINTFN(5, "pipe=%jx timeout[dir=%ju] timeout=%ju",
1722 (uintptr_t)sce->pipeh, dir, sce->timeout, 0);
1723 }
1724 return 0;
1725 case USB_SET_BULK_RA:
1726 if (endpt == USB_CONTROL_ENDPOINT)
1727 return EINVAL;
1728 sce = &sc->sc_endpoints[endpt][IN];
1729 if (sce == NULL || sce->pipeh == NULL)
1730 return EINVAL;
1731 edesc = sce->edesc;
1732 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1733 return EINVAL;
1734
1735 if (*(int *)addr) {
1736 /* Only turn RA on if it's currently off. */
1737 if (sce->state & UGEN_BULK_RA)
1738 return 0;
1739 KASSERT(sce->ra_wb_xfer == NULL);
1740 KASSERT(sce->ibuf == NULL);
1741
1742 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1743 /* shouldn't happen */
1744 return EINVAL;
1745 error = usbd_create_xfer(sce->pipeh,
1746 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1747 if (error)
1748 return error;
1749 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1750 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1751 sce->fill = sce->cur = sce->ibuf;
1752 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1753 sce->ra_wb_used = 0;
1754 sce->state |= UGEN_BULK_RA;
1755 sce->state &= ~UGEN_RA_WB_STOP;
1756 /* Now start reading. */
1757 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1758 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1759 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1760 err = usbd_transfer(sce->ra_wb_xfer);
1761 if (err != USBD_IN_PROGRESS) {
1762 sce->state &= ~UGEN_BULK_RA;
1763 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1764 sce->ibuf = NULL;
1765 usbd_destroy_xfer(sce->ra_wb_xfer);
1766 sce->ra_wb_xfer = NULL;
1767 return EIO;
1768 }
1769 } else {
1770 /* Only turn RA off if it's currently on. */
1771 if (!(sce->state & UGEN_BULK_RA))
1772 return 0;
1773
1774 sce->state &= ~UGEN_BULK_RA;
1775 usbd_abort_pipe(sce->pipeh);
1776 usbd_destroy_xfer(sce->ra_wb_xfer);
1777 sce->ra_wb_xfer = NULL;
1778 /*
1779 * XXX Discard whatever's in the buffer, but we
1780 * should keep it around and drain the buffer
1781 * instead.
1782 */
1783 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1784 sce->ibuf = NULL;
1785 }
1786 return 0;
1787 case USB_SET_BULK_WB:
1788 if (endpt == USB_CONTROL_ENDPOINT)
1789 return EINVAL;
1790 sce = &sc->sc_endpoints[endpt][OUT];
1791 if (sce == NULL || sce->pipeh == NULL)
1792 return EINVAL;
1793 edesc = sce->edesc;
1794 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1795 return EINVAL;
1796
1797 if (*(int *)addr) {
1798 /* Only turn WB on if it's currently off. */
1799 if (sce->state & UGEN_BULK_WB)
1800 return 0;
1801 KASSERT(sce->ra_wb_xfer == NULL);
1802 KASSERT(sce->ibuf == NULL);
1803
1804 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1805 /* shouldn't happen */
1806 return EINVAL;
1807 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1808 0, 0, &sce->ra_wb_xfer);
1809 /* XXX check error??? */
1810 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1811 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1812 sce->fill = sce->cur = sce->ibuf;
1813 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1814 sce->ra_wb_used = 0;
1815 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1816 } else {
1817 /* Only turn WB off if it's currently on. */
1818 if (!(sce->state & UGEN_BULK_WB))
1819 return 0;
1820
1821 sce->state &= ~UGEN_BULK_WB;
1822 /*
1823 * XXX Discard whatever's in the buffer, but we
1824 * should keep it around and keep writing to
1825 * drain the buffer instead.
1826 */
1827 usbd_abort_pipe(sce->pipeh);
1828 usbd_destroy_xfer(sce->ra_wb_xfer);
1829 sce->ra_wb_xfer = NULL;
1830 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1831 sce->ibuf = NULL;
1832 }
1833 return 0;
1834 case USB_SET_BULK_RA_OPT:
1835 case USB_SET_BULK_WB_OPT:
1836 {
1837 struct usb_bulk_ra_wb_opt *opt;
1838
1839 if (endpt == USB_CONTROL_ENDPOINT)
1840 return EINVAL;
1841 opt = (struct usb_bulk_ra_wb_opt *)addr;
1842 if (cmd == USB_SET_BULK_RA_OPT)
1843 sce = &sc->sc_endpoints[endpt][IN];
1844 else
1845 sce = &sc->sc_endpoints[endpt][OUT];
1846 if (sce == NULL || sce->pipeh == NULL)
1847 return EINVAL;
1848 if (opt->ra_wb_buffer_size < 1 ||
1849 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1850 opt->ra_wb_request_size < 1 ||
1851 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1852 return EINVAL;
1853 /*
1854 * XXX These changes do not take effect until the
1855 * next time RA/WB mode is enabled but they ought to
1856 * take effect immediately.
1857 */
1858 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1859 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1860 return 0;
1861 }
1862 default:
1863 break;
1864 }
1865
1866 if (endpt != USB_CONTROL_ENDPOINT)
1867 return EINVAL;
1868
1869 switch (cmd) {
1870 #ifdef UGEN_DEBUG
1871 case USB_SETDEBUG:
1872 ugendebug = *(int *)addr;
1873 break;
1874 #endif
1875 case USB_GET_CONFIG:
1876 err = usbd_get_config(sc->sc_udev, &conf);
1877 if (err)
1878 return EIO;
1879 *(int *)addr = conf;
1880 break;
1881 case USB_SET_CONFIG:
1882 if (!(flag & FWRITE))
1883 return EPERM;
1884 err = ugen_set_config(sc, *(int *)addr, 1);
1885 switch (err) {
1886 case USBD_NORMAL_COMPLETION:
1887 break;
1888 case USBD_IN_USE:
1889 return EBUSY;
1890 default:
1891 return EIO;
1892 }
1893 break;
1894 case USB_GET_ALTINTERFACE:
1895 ai = (struct usb_alt_interface *)addr;
1896 err = usbd_device2interface_handle(sc->sc_udev,
1897 ai->uai_interface_index, &iface);
1898 if (err)
1899 return EINVAL;
1900 idesc = usbd_get_interface_descriptor(iface);
1901 if (idesc == NULL)
1902 return EIO;
1903 ai->uai_alt_no = idesc->bAlternateSetting;
1904 break;
1905 case USB_SET_ALTINTERFACE:
1906 if (!(flag & FWRITE))
1907 return EPERM;
1908 ai = (struct usb_alt_interface *)addr;
1909 err = usbd_device2interface_handle(sc->sc_udev,
1910 ai->uai_interface_index, &iface);
1911 if (err)
1912 return EINVAL;
1913 err = ugen_set_interface(sc, ai->uai_interface_index,
1914 ai->uai_alt_no);
1915 if (err)
1916 return EINVAL;
1917 break;
1918 case USB_GET_NO_ALT:
1919 ai = (struct usb_alt_interface *)addr;
1920 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1921 if (cdesc == NULL)
1922 return EINVAL;
1923 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1924 if (idesc == NULL) {
1925 kmem_free(cdesc, cdesclen);
1926 return EINVAL;
1927 }
1928 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1929 idesc->bInterfaceNumber);
1930 kmem_free(cdesc, cdesclen);
1931 break;
1932 case USB_GET_DEVICE_DESC:
1933 *(usb_device_descriptor_t *)addr =
1934 *usbd_get_device_descriptor(sc->sc_udev);
1935 break;
1936 case USB_GET_CONFIG_DESC:
1937 cd = (struct usb_config_desc *)addr;
1938 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1939 if (cdesc == NULL)
1940 return EINVAL;
1941 cd->ucd_desc = *cdesc;
1942 kmem_free(cdesc, cdesclen);
1943 break;
1944 case USB_GET_INTERFACE_DESC:
1945 id = (struct usb_interface_desc *)addr;
1946 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1947 if (cdesc == NULL)
1948 return EINVAL;
1949 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1950 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1951 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1952 else
1953 alt = id->uid_alt_index;
1954 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1955 if (idesc == NULL) {
1956 kmem_free(cdesc, cdesclen);
1957 return EINVAL;
1958 }
1959 id->uid_desc = *idesc;
1960 kmem_free(cdesc, cdesclen);
1961 break;
1962 case USB_GET_ENDPOINT_DESC:
1963 ed = (struct usb_endpoint_desc *)addr;
1964 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1965 if (cdesc == NULL)
1966 return EINVAL;
1967 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1968 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1969 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1970 else
1971 alt = ed->ued_alt_index;
1972 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1973 alt, ed->ued_endpoint_index);
1974 if (edesc == NULL) {
1975 kmem_free(cdesc, cdesclen);
1976 return EINVAL;
1977 }
1978 ed->ued_desc = *edesc;
1979 kmem_free(cdesc, cdesclen);
1980 break;
1981 case USB_GET_FULL_DESC:
1982 {
1983 int len;
1984 struct iovec iov;
1985 struct uio uio;
1986 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1987
1988 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1989 if (cdesc == NULL)
1990 return EINVAL;
1991 len = cdesclen;
1992 if (len > fd->ufd_size)
1993 len = fd->ufd_size;
1994 iov.iov_base = (void *)fd->ufd_data;
1995 iov.iov_len = len;
1996 uio.uio_iov = &iov;
1997 uio.uio_iovcnt = 1;
1998 uio.uio_resid = len;
1999 uio.uio_offset = 0;
2000 uio.uio_rw = UIO_READ;
2001 uio.uio_vmspace = l->l_proc->p_vmspace;
2002 error = uiomove((void *)cdesc, len, &uio);
2003 kmem_free(cdesc, cdesclen);
2004 return error;
2005 }
2006 case USB_GET_STRING_DESC: {
2007 int len;
2008 si = (struct usb_string_desc *)addr;
2009 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
2010 si->usd_language_id, &si->usd_desc, &len);
2011 if (err)
2012 return EINVAL;
2013 break;
2014 }
2015 case USB_DO_REQUEST:
2016 {
2017 struct usb_ctl_request *ur = (void *)addr;
2018 int len = UGETW(ur->ucr_request.wLength);
2019 struct iovec iov;
2020 struct uio uio;
2021 void *ptr = 0;
2022 usbd_status xerr;
2023
2024 error = 0;
2025
2026 if (!(flag & FWRITE))
2027 return EPERM;
2028 /* Avoid requests that would damage the bus integrity. */
2029 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2030 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
2031 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2032 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
2033 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
2034 ur->ucr_request.bRequest == UR_SET_INTERFACE))
2035 return EINVAL;
2036
2037 if (len < 0 || len > 32767)
2038 return EINVAL;
2039 if (len != 0) {
2040 iov.iov_base = (void *)ur->ucr_data;
2041 iov.iov_len = len;
2042 uio.uio_iov = &iov;
2043 uio.uio_iovcnt = 1;
2044 uio.uio_resid = len;
2045 uio.uio_offset = 0;
2046 uio.uio_rw =
2047 ur->ucr_request.bmRequestType & UT_READ ?
2048 UIO_READ : UIO_WRITE;
2049 uio.uio_vmspace = l->l_proc->p_vmspace;
2050 ptr = kmem_alloc(len, KM_SLEEP);
2051 if (uio.uio_rw == UIO_WRITE) {
2052 error = uiomove(ptr, len, &uio);
2053 if (error)
2054 goto ret;
2055 }
2056 }
2057 sce = &sc->sc_endpoints[endpt][IN];
2058 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
2059 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
2060 if (xerr) {
2061 error = EIO;
2062 goto ret;
2063 }
2064 if (len != 0) {
2065 if (uio.uio_rw == UIO_READ) {
2066 size_t alen = uimin(len, ur->ucr_actlen);
2067 error = uiomove(ptr, alen, &uio);
2068 if (error)
2069 goto ret;
2070 }
2071 }
2072 ret:
2073 if (ptr)
2074 kmem_free(ptr, len);
2075 return error;
2076 }
2077 case USB_GET_DEVICEINFO:
2078 usbd_fill_deviceinfo(sc->sc_udev,
2079 (struct usb_device_info *)addr, 0);
2080 break;
2081 case USB_GET_DEVICEINFO_30:
2082 {
2083 int ret;
2084 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
2085 (sc->sc_udev, (struct usb_device_info30 *)addr, 0,
2086 usbd_devinfo_vp, usbd_printBCD),
2087 enosys(), ret);
2088 if (ret == 0)
2089 return 0;
2090 return EINVAL;
2091 }
2092 default:
2093 return EINVAL;
2094 }
2095 return 0;
2096 }
2097
2098 static int
ugenioctl(dev_t dev,u_long cmd,void * addr,int flag,struct lwp * l)2099 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2100 {
2101 int endpt = UGENENDPOINT(dev);
2102 struct ugen_softc *sc;
2103 int error;
2104
2105 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
2106 return ENXIO;
2107 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
2108 ugenif_release(sc);
2109
2110 return error;
2111 }
2112
2113 static int
ugenpoll(dev_t dev,int events,struct lwp * l)2114 ugenpoll(dev_t dev, int events, struct lwp *l)
2115 {
2116 struct ugen_softc *sc;
2117 struct ugen_endpoint *sce_in, *sce_out;
2118 int revents = 0;
2119
2120 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2121 return POLLHUP;
2122
2123 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2124 revents |= POLLERR;
2125 goto out;
2126 }
2127
2128 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2129 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2130 KASSERT(sce_in->edesc || sce_out->edesc);
2131 KASSERT(sce_in->pipeh || sce_out->pipeh);
2132
2133 mutex_enter(&sc->sc_lock);
2134 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
2135 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
2136 case UE_INTERRUPT:
2137 if (sce_in->q.c_cc > 0)
2138 revents |= events & (POLLIN | POLLRDNORM);
2139 else
2140 selrecord(l, &sce_in->rsel);
2141 break;
2142 case UE_ISOCHRONOUS:
2143 if (sce_in->cur != sce_in->fill)
2144 revents |= events & (POLLIN | POLLRDNORM);
2145 else
2146 selrecord(l, &sce_in->rsel);
2147 break;
2148 case UE_BULK:
2149 if (sce_in->state & UGEN_BULK_RA) {
2150 if (sce_in->ra_wb_used > 0)
2151 revents |= events &
2152 (POLLIN | POLLRDNORM);
2153 else
2154 selrecord(l, &sce_in->rsel);
2155 break;
2156 }
2157 /*
2158 * We have no easy way of determining if a read will
2159 * yield any data or a write will happen.
2160 * Pretend they will.
2161 */
2162 revents |= events & (POLLIN | POLLRDNORM);
2163 break;
2164 default:
2165 break;
2166 }
2167 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
2168 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
2169 case UE_INTERRUPT:
2170 case UE_ISOCHRONOUS:
2171 /* XXX unimplemented */
2172 break;
2173 case UE_BULK:
2174 if (sce_out->state & UGEN_BULK_WB) {
2175 if (sce_out->ra_wb_used <
2176 sce_out->limit - sce_out->ibuf)
2177 revents |= events &
2178 (POLLOUT | POLLWRNORM);
2179 else
2180 selrecord(l, &sce_out->rsel);
2181 break;
2182 }
2183 /*
2184 * We have no easy way of determining if a read will
2185 * yield any data or a write will happen.
2186 * Pretend they will.
2187 */
2188 revents |= events & (POLLOUT | POLLWRNORM);
2189 break;
2190 default:
2191 break;
2192 }
2193
2194 mutex_exit(&sc->sc_lock);
2195
2196 out: ugenif_release(sc);
2197 return revents;
2198 }
2199
2200 static void
filt_ugenrdetach(struct knote * kn)2201 filt_ugenrdetach(struct knote *kn)
2202 {
2203 struct ugen_endpoint *sce = kn->kn_hook;
2204 struct ugen_softc *sc = sce->sc;
2205
2206 mutex_enter(&sc->sc_lock);
2207 selremove_knote(&sce->rsel, kn);
2208 mutex_exit(&sc->sc_lock);
2209 }
2210
2211 static int
filt_ugenread_intr(struct knote * kn,long hint)2212 filt_ugenread_intr(struct knote *kn, long hint)
2213 {
2214 struct ugen_endpoint *sce = kn->kn_hook;
2215 struct ugen_softc *sc = sce->sc;
2216 int ret;
2217
2218 mutex_enter(&sc->sc_lock);
2219 if (sc->sc_dying) {
2220 ret = 0;
2221 } else {
2222 kn->kn_data = sce->q.c_cc;
2223 ret = kn->kn_data > 0;
2224 }
2225 mutex_exit(&sc->sc_lock);
2226
2227 return ret;
2228 }
2229
2230 static int
filt_ugenread_isoc(struct knote * kn,long hint)2231 filt_ugenread_isoc(struct knote *kn, long hint)
2232 {
2233 struct ugen_endpoint *sce = kn->kn_hook;
2234 struct ugen_softc *sc = sce->sc;
2235 int ret;
2236
2237 mutex_enter(&sc->sc_lock);
2238 if (sc->sc_dying) {
2239 ret = 0;
2240 } else if (sce->cur == sce->fill) {
2241 ret = 0;
2242 } else if (sce->cur < sce->fill) {
2243 kn->kn_data = sce->fill - sce->cur;
2244 ret = 1;
2245 } else {
2246 kn->kn_data = (sce->limit - sce->cur) +
2247 (sce->fill - sce->ibuf);
2248 ret = 1;
2249 }
2250 mutex_exit(&sc->sc_lock);
2251
2252 return ret;
2253 }
2254
2255 static int
filt_ugenread_bulk(struct knote * kn,long hint)2256 filt_ugenread_bulk(struct knote *kn, long hint)
2257 {
2258 struct ugen_endpoint *sce = kn->kn_hook;
2259 struct ugen_softc *sc = sce->sc;
2260 int ret;
2261
2262 mutex_enter(&sc->sc_lock);
2263 if (sc->sc_dying) {
2264 ret = 0;
2265 } else if (!(sce->state & UGEN_BULK_RA)) {
2266 /*
2267 * We have no easy way of determining if a read will
2268 * yield any data or a write will happen.
2269 * So, emulate "seltrue".
2270 */
2271 ret = filt_seltrue(kn, hint);
2272 } else if (sce->ra_wb_used == 0) {
2273 ret = 0;
2274 } else {
2275 kn->kn_data = sce->ra_wb_used;
2276 ret = 1;
2277 }
2278 mutex_exit(&sc->sc_lock);
2279
2280 return ret;
2281 }
2282
2283 static int
filt_ugenwrite_bulk(struct knote * kn,long hint)2284 filt_ugenwrite_bulk(struct knote *kn, long hint)
2285 {
2286 struct ugen_endpoint *sce = kn->kn_hook;
2287 struct ugen_softc *sc = sce->sc;
2288 int ret;
2289
2290 mutex_enter(&sc->sc_lock);
2291 if (sc->sc_dying) {
2292 ret = 0;
2293 } else if (!(sce->state & UGEN_BULK_WB)) {
2294 /*
2295 * We have no easy way of determining if a read will
2296 * yield any data or a write will happen.
2297 * So, emulate "seltrue".
2298 */
2299 ret = filt_seltrue(kn, hint);
2300 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2301 ret = 0;
2302 } else {
2303 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2304 ret = 1;
2305 }
2306 mutex_exit(&sc->sc_lock);
2307
2308 return ret;
2309 }
2310
2311 static const struct filterops ugenread_intr_filtops = {
2312 .f_flags = FILTEROP_ISFD,
2313 .f_attach = NULL,
2314 .f_detach = filt_ugenrdetach,
2315 .f_event = filt_ugenread_intr,
2316 };
2317
2318 static const struct filterops ugenread_isoc_filtops = {
2319 .f_flags = FILTEROP_ISFD,
2320 .f_attach = NULL,
2321 .f_detach = filt_ugenrdetach,
2322 .f_event = filt_ugenread_isoc,
2323 };
2324
2325 static const struct filterops ugenread_bulk_filtops = {
2326 .f_flags = FILTEROP_ISFD,
2327 .f_attach = NULL,
2328 .f_detach = filt_ugenrdetach,
2329 .f_event = filt_ugenread_bulk,
2330 };
2331
2332 static const struct filterops ugenwrite_bulk_filtops = {
2333 .f_flags = FILTEROP_ISFD,
2334 .f_attach = NULL,
2335 .f_detach = filt_ugenrdetach,
2336 .f_event = filt_ugenwrite_bulk,
2337 };
2338
2339 static int
ugenkqfilter(dev_t dev,struct knote * kn)2340 ugenkqfilter(dev_t dev, struct knote *kn)
2341 {
2342 struct ugen_softc *sc;
2343 struct ugen_endpoint *sce;
2344 struct selinfo *sip;
2345 int error;
2346
2347 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2348 return ENXIO;
2349
2350 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2351 error = ENODEV;
2352 goto out;
2353 }
2354
2355 switch (kn->kn_filter) {
2356 case EVFILT_READ:
2357 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2358 if (sce == NULL) {
2359 error = EINVAL;
2360 goto out;
2361 }
2362
2363 sip = &sce->rsel;
2364 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2365 case UE_INTERRUPT:
2366 kn->kn_fop = &ugenread_intr_filtops;
2367 break;
2368 case UE_ISOCHRONOUS:
2369 kn->kn_fop = &ugenread_isoc_filtops;
2370 break;
2371 case UE_BULK:
2372 kn->kn_fop = &ugenread_bulk_filtops;
2373 break;
2374 default:
2375 error = EINVAL;
2376 goto out;
2377 }
2378 break;
2379
2380 case EVFILT_WRITE:
2381 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2382 if (sce == NULL) {
2383 error = EINVAL;
2384 goto out;
2385 }
2386
2387 sip = &sce->rsel;
2388 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2389 case UE_INTERRUPT:
2390 case UE_ISOCHRONOUS:
2391 /* XXX poll doesn't support this */
2392 error = EINVAL;
2393 goto out;
2394
2395 case UE_BULK:
2396 kn->kn_fop = &ugenwrite_bulk_filtops;
2397 break;
2398 default:
2399 error = EINVAL;
2400 goto out;
2401 }
2402 break;
2403
2404 default:
2405 error = EINVAL;
2406 goto out;
2407 }
2408
2409 kn->kn_hook = sce;
2410
2411 mutex_enter(&sc->sc_lock);
2412 selrecord_knote(sip, kn);
2413 mutex_exit(&sc->sc_lock);
2414
2415 error = 0;
2416
2417 out: ugenif_release(sc);
2418 return error;
2419 }
2420
2421 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2422
2423 static int
ugen_modcmd(modcmd_t cmd,void * aux)2424 ugen_modcmd(modcmd_t cmd, void *aux)
2425 {
2426
2427 switch (cmd) {
2428 case MODULE_CMD_INIT:
2429 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2430 rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2431 return 0;
2432 default:
2433 return ENOTTY;
2434 }
2435 }
2436