1 /* $NetBSD: ugen.c,v 1.171 2022/10/23 11:06:37 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.171 2022/10/23 11:06:37 riastradh Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_compat_netbsd.h" 44 #include "opt_usb.h" 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/kmem.h> 51 #include <sys/device.h> 52 #include <sys/ioctl.h> 53 #include <sys/conf.h> 54 #include <sys/tty.h> 55 #include <sys/file.h> 56 #include <sys/select.h> 57 #include <sys/proc.h> 58 #include <sys/vnode.h> 59 #include <sys/poll.h> 60 #include <sys/compat_stub.h> 61 #include <sys/module.h> 62 #include <sys/rbtree.h> 63 64 #include <dev/usb/usb.h> 65 #include <dev/usb/usbdi.h> 66 #include <dev/usb/usbdi_util.h> 67 #include <dev/usb/usbhist.h> 68 69 #include "ioconf.h" 70 71 #ifdef USB_DEBUG 72 #ifndef UGEN_DEBUG 73 #define ugendebug 0 74 #else 75 int ugendebug = 0; 76 77 SYSCTL_SETUP(sysctl_hw_ugen_setup, "sysctl hw.ugen setup") 78 { 79 int err; 80 const struct sysctlnode *rnode; 81 const struct sysctlnode *cnode; 82 83 err = sysctl_createv(clog, 0, NULL, &rnode, 84 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ugen", 85 SYSCTL_DESCR("ugen global controls"), 86 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 87 88 if (err) 89 goto fail; 90 91 /* control debugging printfs */ 92 err = sysctl_createv(clog, 0, &rnode, &cnode, 93 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 94 "debug", SYSCTL_DESCR("Enable debugging output"), 95 NULL, 0, &ugendebug, sizeof(ugendebug), CTL_CREATE, CTL_EOL); 96 if (err) 97 goto fail; 98 99 return; 100 fail: 101 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 102 } 103 104 #endif /* UGEN_DEBUG */ 105 #endif /* USB_DEBUG */ 106 107 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(ugendebug,1,FMT,A,B,C,D) 108 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(ugendebug,N,FMT,A,B,C,D) 109 #define UGENHIST_FUNC() USBHIST_FUNC() 110 #define UGENHIST_CALLED(name) USBHIST_CALLED(ugendebug) 111 112 #define UGEN_CHUNK 128 /* chunk size for read */ 113 #define UGEN_IBSIZE 1020 /* buffer size */ 114 #define UGEN_BBSIZE 1024 115 116 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */ 117 #define UGEN_NISORFRMS 8 /* number of transactions per req */ 118 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS) 119 120 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 121 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 122 123 struct isoreq { 124 struct ugen_endpoint *sce; 125 struct usbd_xfer *xfer; 126 void *dmabuf; 127 uint16_t sizes[UGEN_NISORFRMS]; 128 }; 129 130 struct ugen_endpoint { 131 struct ugen_softc *sc; 132 usb_endpoint_descriptor_t *edesc; 133 struct usbd_interface *iface; 134 int state; 135 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 136 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 137 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 138 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 139 struct usbd_pipe *pipeh; 140 struct clist q; 141 u_char *ibuf; /* start of buffer (circular for isoc) */ 142 u_char *fill; /* location for input (isoc) */ 143 u_char *limit; /* end of circular buffer (isoc) */ 144 u_char *cur; /* current read location (isoc) */ 145 uint32_t timeout; 146 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 147 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 148 uint32_t ra_wb_used; /* how much is in buffer */ 149 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 150 struct usbd_xfer *ra_wb_xfer; 151 struct isoreq isoreqs[UGEN_NISOREQS]; 152 /* Keep these last; we don't overwrite them in ugen_set_config() */ 153 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel) 154 struct selinfo rsel; 155 kcondvar_t cv; 156 }; 157 158 struct ugen_softc { 159 device_t sc_dev; /* base device */ 160 struct usbd_device *sc_udev; 161 struct rb_node sc_node; 162 unsigned sc_unit; 163 164 kmutex_t sc_lock; 165 kcondvar_t sc_detach_cv; 166 167 char sc_is_open[USB_MAX_ENDPOINTS]; 168 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 169 #define OUT 0 170 #define IN 1 171 172 int sc_refcnt; 173 char sc_buffer[UGEN_BBSIZE]; 174 u_char sc_dying; 175 u_char sc_attached; 176 }; 177 178 static struct { 179 kmutex_t lock; 180 rb_tree_t tree; 181 } ugenif __cacheline_aligned; 182 183 static int 184 compare_ugen(void *cookie, const void *vsca, const void *vscb) 185 { 186 const struct ugen_softc *sca = vsca; 187 const struct ugen_softc *scb = vscb; 188 189 if (sca->sc_unit < scb->sc_unit) 190 return -1; 191 if (sca->sc_unit > scb->sc_unit) 192 return +1; 193 return 0; 194 } 195 196 static int 197 compare_ugen_key(void *cookie, const void *vsc, const void *vk) 198 { 199 const struct ugen_softc *sc = vsc; 200 const unsigned *k = vk; 201 202 if (sc->sc_unit < *k) 203 return -1; 204 if (sc->sc_unit > *k) 205 return +1; 206 return 0; 207 } 208 209 static const rb_tree_ops_t ugenif_tree_ops = { 210 .rbto_compare_nodes = compare_ugen, 211 .rbto_compare_key = compare_ugen_key, 212 .rbto_node_offset = offsetof(struct ugen_softc, sc_node), 213 }; 214 215 static void 216 ugenif_get_unit(struct ugen_softc *sc) 217 { 218 struct ugen_softc *sc0; 219 unsigned i; 220 221 mutex_enter(&ugenif.lock); 222 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree); 223 sc0 != NULL && i == sc0->sc_unit; 224 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0)) 225 KASSERT(i < UINT_MAX); 226 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL); 227 sc->sc_unit = i; 228 sc0 = rb_tree_insert_node(&ugenif.tree, sc); 229 KASSERT(sc0 == sc); 230 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc); 231 mutex_exit(&ugenif.lock); 232 } 233 234 static void 235 ugenif_put_unit(struct ugen_softc *sc) 236 { 237 238 mutex_enter(&ugenif.lock); 239 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc); 240 rb_tree_remove_node(&ugenif.tree, sc); 241 sc->sc_unit = -1; 242 mutex_exit(&ugenif.lock); 243 } 244 245 static struct ugen_softc * 246 ugenif_acquire(unsigned unit) 247 { 248 struct ugen_softc *sc; 249 250 mutex_enter(&ugenif.lock); 251 sc = rb_tree_find_node(&ugenif.tree, &unit); 252 if (sc == NULL) 253 goto out; 254 mutex_enter(&sc->sc_lock); 255 if (sc->sc_dying) { 256 mutex_exit(&sc->sc_lock); 257 sc = NULL; 258 goto out; 259 } 260 KASSERT(sc->sc_refcnt < INT_MAX); 261 sc->sc_refcnt++; 262 mutex_exit(&sc->sc_lock); 263 out: mutex_exit(&ugenif.lock); 264 265 return sc; 266 } 267 268 static void 269 ugenif_release(struct ugen_softc *sc) 270 { 271 272 mutex_enter(&sc->sc_lock); 273 if (--sc->sc_refcnt < 0) 274 cv_broadcast(&sc->sc_detach_cv); 275 mutex_exit(&sc->sc_lock); 276 } 277 278 static dev_type_open(ugenopen); 279 static dev_type_close(ugenclose); 280 static dev_type_read(ugenread); 281 static dev_type_write(ugenwrite); 282 static dev_type_ioctl(ugenioctl); 283 static dev_type_poll(ugenpoll); 284 static dev_type_kqfilter(ugenkqfilter); 285 286 const struct cdevsw ugen_cdevsw = { 287 .d_open = ugenopen, 288 .d_close = ugenclose, 289 .d_read = ugenread, 290 .d_write = ugenwrite, 291 .d_ioctl = ugenioctl, 292 .d_stop = nostop, 293 .d_tty = notty, 294 .d_poll = ugenpoll, 295 .d_mmap = nommap, 296 .d_kqfilter = ugenkqfilter, 297 .d_discard = nodiscard, 298 .d_flag = D_OTHER, 299 }; 300 301 Static void ugenintr(struct usbd_xfer *, void *, 302 usbd_status); 303 Static void ugen_isoc_rintr(struct usbd_xfer *, void *, 304 usbd_status); 305 Static void ugen_bulkra_intr(struct usbd_xfer *, void *, 306 usbd_status); 307 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *, 308 usbd_status); 309 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 310 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 311 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 312 void *, int, struct lwp *); 313 Static int ugen_set_config(struct ugen_softc *, int, int); 314 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *, 315 int, int *); 316 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 317 Static int ugen_get_alt_index(struct ugen_softc *, int); 318 Static void ugen_clear_endpoints(struct ugen_softc *); 319 320 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 321 #define UGENENDPOINT(n) (minor(n) & 0xf) 322 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 323 324 static int ugenif_match(device_t, cfdata_t, void *); 325 static void ugenif_attach(device_t, device_t, void *); 326 static int ugen_match(device_t, cfdata_t, void *); 327 static void ugen_attach(device_t, device_t, void *); 328 static int ugen_detach(device_t, int); 329 static int ugen_activate(device_t, enum devact); 330 331 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, 332 ugen_attach, ugen_detach, ugen_activate); 333 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match, 334 ugenif_attach, ugen_detach, ugen_activate); 335 336 /* toggle to control attach priority. -1 means "let autoconf decide" */ 337 int ugen_override = -1; 338 339 static int 340 ugen_match(device_t parent, cfdata_t match, void *aux) 341 { 342 struct usb_attach_arg *uaa = aux; 343 int override; 344 345 if (ugen_override != -1) 346 override = ugen_override; 347 else 348 override = match->cf_flags & 1; 349 350 if (override) 351 return UMATCH_HIGHEST; 352 else if (uaa->uaa_usegeneric) 353 return UMATCH_GENERIC; 354 else 355 return UMATCH_NONE; 356 } 357 358 static int 359 ugenif_match(device_t parent, cfdata_t match, void *aux) 360 { 361 /* Assume that they knew what they configured! (see ugenif(4)) */ 362 return UMATCH_HIGHEST; 363 } 364 365 static void 366 ugen_attach(device_t parent, device_t self, void *aux) 367 { 368 struct usb_attach_arg *uaa = aux; 369 struct usbif_attach_arg uiaa; 370 371 memset(&uiaa, 0, sizeof(uiaa)); 372 uiaa.uiaa_port = uaa->uaa_port; 373 uiaa.uiaa_vendor = uaa->uaa_vendor; 374 uiaa.uiaa_product = uaa->uaa_product; 375 uiaa.uiaa_release = uaa->uaa_release; 376 uiaa.uiaa_device = uaa->uaa_device; 377 uiaa.uiaa_configno = -1; 378 uiaa.uiaa_ifaceno = -1; 379 380 ugenif_attach(parent, self, &uiaa); 381 } 382 383 static void 384 ugenif_attach(device_t parent, device_t self, void *aux) 385 { 386 struct ugen_softc *sc = device_private(self); 387 struct usbif_attach_arg *uiaa = aux; 388 struct usbd_device *udev; 389 char *devinfop; 390 usbd_status err; 391 int i, dir, conf; 392 393 aprint_naive("\n"); 394 aprint_normal("\n"); 395 396 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 397 cv_init(&sc->sc_detach_cv, "ugendet"); 398 399 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0); 400 aprint_normal_dev(self, "%s\n", devinfop); 401 usbd_devinfo_free(devinfop); 402 403 sc->sc_dev = self; 404 sc->sc_udev = udev = uiaa->uiaa_device; 405 406 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 407 for (dir = OUT; dir <= IN; dir++) { 408 struct ugen_endpoint *sce; 409 410 sce = &sc->sc_endpoints[i][dir]; 411 selinit(&sce->rsel); 412 cv_init(&sce->cv, "ugensce"); 413 } 414 } 415 416 if (!pmf_device_register(self, NULL, NULL)) 417 aprint_error_dev(self, "couldn't establish power handler\n"); 418 419 if (uiaa->uiaa_ifaceno < 0) { 420 /* 421 * If we attach the whole device, 422 * set configuration index 0, the default one. 423 */ 424 err = usbd_set_config_index(udev, 0, 0); 425 if (err) { 426 aprint_error_dev(self, 427 "setting configuration index 0 failed\n"); 428 return; 429 } 430 } 431 432 /* Get current configuration */ 433 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 434 435 /* Set up all the local state for this configuration. */ 436 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0); 437 if (err) { 438 aprint_error_dev(self, "setting configuration %d failed\n", 439 conf); 440 return; 441 } 442 443 ugenif_get_unit(sc); 444 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); 445 sc->sc_attached = 1; 446 } 447 448 Static void 449 ugen_clear_endpoints(struct ugen_softc *sc) 450 { 451 452 /* Clear out the old info, but leave the selinfo and cv initialised. */ 453 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) { 454 for (int dir = OUT; dir <= IN; dir++) { 455 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir]; 456 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT); 457 } 458 } 459 } 460 461 Static int 462 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen) 463 { 464 struct usbd_device *dev = sc->sc_udev; 465 usb_config_descriptor_t *cdesc; 466 struct usbd_interface *iface; 467 usb_endpoint_descriptor_t *ed; 468 struct ugen_endpoint *sce; 469 uint8_t niface, nendpt; 470 int ifaceno, endptno, endpt; 471 usbd_status err; 472 int dir; 473 474 UGENHIST_FUNC(); UGENHIST_CALLED(); 475 476 DPRINTFN(1, "ugen%jd: to configno %jd, sc=%jx", 477 device_unit(sc->sc_dev), configno, (uintptr_t)sc, 0); 478 479 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */ 480 481 if (chkopen) { 482 /* 483 * We start at 1, not 0, because we don't care whether the 484 * control endpoint is open or not. It is always present. 485 */ 486 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 487 if (sc->sc_is_open[endptno]) { 488 DPRINTFN(1, 489 "ugen%jd - endpoint %d is open", 490 device_unit(sc->sc_dev), endptno, 0, 0); 491 return USBD_IN_USE; 492 } 493 494 /* Prevent opening while we're setting the config. */ 495 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) { 496 KASSERT(!sc->sc_is_open[endptno]); 497 sc->sc_is_open[endptno] = 1; 498 } 499 } 500 501 /* Avoid setting the current value. */ 502 cdesc = usbd_get_config_descriptor(dev); 503 if (!cdesc || cdesc->bConfigurationValue != configno) { 504 err = usbd_set_config_no(dev, configno, 1); 505 if (err) 506 goto out; 507 } 508 509 ugen_clear_endpoints(sc); 510 511 err = usbd_interface_count(dev, &niface); 512 if (err) 513 goto out; 514 515 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 516 DPRINTFN(1, "ifaceno %jd", ifaceno, 0, 0, 0); 517 err = usbd_device2interface_handle(dev, ifaceno, &iface); 518 if (err) 519 goto out; 520 err = usbd_endpoint_count(iface, &nendpt); 521 if (err) 522 goto out; 523 for (endptno = 0; endptno < nendpt; endptno++) { 524 ed = usbd_interface2endpoint_descriptor(iface,endptno); 525 KASSERT(ed != NULL); 526 endpt = ed->bEndpointAddress; 527 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 528 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 529 DPRINTFN(1, "endptno %jd, endpt=0x%02jx (%jd,%jd)", 530 endptno, endpt, UE_GET_ADDR(endpt), 531 UE_GET_DIR(endpt)); 532 sce->sc = sc; 533 sce->edesc = ed; 534 sce->iface = iface; 535 } 536 } 537 err = USBD_NORMAL_COMPLETION; 538 539 out: if (chkopen) { 540 /* 541 * Allow open again now that we're done trying to set 542 * the config. 543 */ 544 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) { 545 KASSERT(sc->sc_is_open[endptno]); 546 sc->sc_is_open[endptno] = 0; 547 } 548 } 549 return err; 550 } 551 552 static int 553 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 554 { 555 struct ugen_softc *sc; 556 int unit = UGENUNIT(dev); 557 int endpt = UGENENDPOINT(dev); 558 usb_endpoint_descriptor_t *edesc; 559 struct ugen_endpoint *sce; 560 int dir, isize; 561 usbd_status err; 562 struct usbd_xfer *xfer; 563 int i, j; 564 int error; 565 int opened = 0; 566 567 UGENHIST_FUNC(); UGENHIST_CALLED(); 568 569 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */ 570 571 if ((sc = ugenif_acquire(unit)) == NULL) 572 return ENXIO; 573 574 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd endpt=%jd", 575 flag, mode, unit, endpt); 576 577 /* The control endpoint allows multiple opens. */ 578 if (endpt == USB_CONTROL_ENDPOINT) { 579 opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 580 error = 0; 581 goto out; 582 } 583 584 if (sc->sc_is_open[endpt]) { 585 error = EBUSY; 586 goto out; 587 } 588 opened = sc->sc_is_open[endpt] = 1; 589 590 /* Make sure there are pipes for all directions. */ 591 for (dir = OUT; dir <= IN; dir++) { 592 if (flag & (dir == OUT ? FWRITE : FREAD)) { 593 sce = &sc->sc_endpoints[endpt][dir]; 594 if (sce->edesc == NULL) { 595 error = ENXIO; 596 goto out; 597 } 598 } 599 } 600 601 /* Actually open the pipes. */ 602 /* XXX Should back out properly if it fails. */ 603 for (dir = OUT; dir <= IN; dir++) { 604 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 605 continue; 606 sce = &sc->sc_endpoints[endpt][dir]; 607 sce->state = 0; 608 sce->timeout = USBD_NO_TIMEOUT; 609 DPRINTFN(5, "sc=%jx, endpt=%jd, dir=%jd, sce=%jp", 610 (uintptr_t)sc, endpt, dir, (uintptr_t)sce); 611 edesc = sce->edesc; 612 switch (edesc->bmAttributes & UE_XFERTYPE) { 613 case UE_INTERRUPT: 614 if (dir == OUT) { 615 err = usbd_open_pipe(sce->iface, 616 edesc->bEndpointAddress, 0, &sce->pipeh); 617 if (err) { 618 error = EIO; 619 goto out; 620 } 621 break; 622 } 623 isize = UGETW(edesc->wMaxPacketSize); 624 if (isize == 0) { /* shouldn't happen */ 625 error = EINVAL; 626 goto out; 627 } 628 sce->ibuf = kmem_alloc(isize, KM_SLEEP); 629 DPRINTFN(5, "intr endpt=%d, isize=%d", 630 endpt, isize, 0, 0); 631 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) { 632 kmem_free(sce->ibuf, isize); 633 sce->ibuf = NULL; 634 error = ENOMEM; 635 goto out; 636 } 637 err = usbd_open_pipe_intr(sce->iface, 638 edesc->bEndpointAddress, 639 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 640 sce->ibuf, isize, ugenintr, 641 USBD_DEFAULT_INTERVAL); 642 if (err) { 643 clfree(&sce->q); 644 kmem_free(sce->ibuf, isize); 645 sce->ibuf = NULL; 646 error = EIO; 647 goto out; 648 } 649 DPRINTFN(5, "interrupt open done", 0, 0, 0, 0); 650 break; 651 case UE_BULK: 652 err = usbd_open_pipe(sce->iface, 653 edesc->bEndpointAddress, 0, &sce->pipeh); 654 if (err) { 655 error = EIO; 656 goto out; 657 } 658 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 659 /* 660 * Use request size for non-RA/WB transfers 661 * as the default. 662 */ 663 sce->ra_wb_reqsize = UGEN_BBSIZE; 664 break; 665 case UE_ISOCHRONOUS: 666 if (dir == OUT) { 667 error = EINVAL; 668 goto out; 669 } 670 isize = UGETW(edesc->wMaxPacketSize); 671 if (isize == 0) { /* shouldn't happen */ 672 error = EINVAL; 673 goto out; 674 } 675 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES, 676 KM_SLEEP); 677 sce->cur = sce->fill = sce->ibuf; 678 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 679 DPRINTFN(5, "isoc endpt=%d, isize=%d", 680 endpt, isize, 0, 0); 681 err = usbd_open_pipe(sce->iface, 682 edesc->bEndpointAddress, 0, &sce->pipeh); 683 if (err) { 684 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 685 sce->ibuf = NULL; 686 error = EIO; 687 goto out; 688 } 689 for (i = 0; i < UGEN_NISOREQS; ++i) { 690 sce->isoreqs[i].sce = sce; 691 err = usbd_create_xfer(sce->pipeh, 692 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS, 693 &xfer); 694 if (err) 695 goto bad; 696 sce->isoreqs[i].xfer = xfer; 697 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer); 698 for (j = 0; j < UGEN_NISORFRMS; ++j) 699 sce->isoreqs[i].sizes[j] = isize; 700 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i], 701 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0, 702 ugen_isoc_rintr); 703 (void)usbd_transfer(xfer); 704 } 705 DPRINTFN(5, "isoc open done", 0, 0, 0, 0); 706 break; 707 bad: 708 while (--i >= 0) { /* implicit buffer free */ 709 usbd_destroy_xfer(sce->isoreqs[i].xfer); 710 sce->isoreqs[i].xfer = NULL; 711 } 712 usbd_close_pipe(sce->pipeh); 713 sce->pipeh = NULL; 714 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 715 sce->ibuf = NULL; 716 error = ENOMEM; 717 goto out; 718 case UE_CONTROL: 719 sce->timeout = USBD_DEFAULT_TIMEOUT; 720 error = EINVAL; 721 goto out; 722 } 723 } 724 error = 0; 725 out: if (error && opened) 726 sc->sc_is_open[endpt] = 0; 727 ugenif_release(sc); 728 return error; 729 } 730 731 static void 732 ugen_do_close(struct ugen_softc *sc, int flag, int endpt) 733 { 734 struct ugen_endpoint *sce; 735 int dir; 736 int i; 737 738 UGENHIST_FUNC(); UGENHIST_CALLED(); 739 740 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */ 741 742 if (!sc->sc_is_open[endpt]) 743 goto out; 744 745 if (endpt == USB_CONTROL_ENDPOINT) { 746 DPRINTFN(5, "close control", 0, 0, 0, 0); 747 goto out; 748 } 749 750 for (dir = OUT; dir <= IN; dir++) { 751 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 752 continue; 753 sce = &sc->sc_endpoints[endpt][dir]; 754 if (sce->pipeh == NULL) 755 continue; 756 DPRINTFN(5, "endpt=%jd dir=%jd sce=%jx", 757 endpt, dir, (uintptr_t)sce, 0); 758 759 usbd_abort_pipe(sce->pipeh); 760 761 int isize = UGETW(sce->edesc->wMaxPacketSize); 762 int msize = 0; 763 764 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 765 case UE_INTERRUPT: 766 ndflush(&sce->q, sce->q.c_cc); 767 clfree(&sce->q); 768 msize = isize; 769 break; 770 case UE_ISOCHRONOUS: 771 for (i = 0; i < UGEN_NISOREQS; ++i) { 772 usbd_destroy_xfer(sce->isoreqs[i].xfer); 773 sce->isoreqs[i].xfer = NULL; 774 } 775 msize = isize * UGEN_NISOFRAMES; 776 break; 777 case UE_BULK: 778 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) { 779 usbd_destroy_xfer(sce->ra_wb_xfer); 780 sce->ra_wb_xfer = NULL; 781 msize = sce->ra_wb_bufsize; 782 } 783 break; 784 default: 785 break; 786 } 787 usbd_close_pipe(sce->pipeh); 788 sce->pipeh = NULL; 789 if (sce->ibuf != NULL) { 790 kmem_free(sce->ibuf, msize); 791 sce->ibuf = NULL; 792 } 793 } 794 795 out: sc->sc_is_open[endpt] = 0; 796 for (dir = OUT; dir <= IN; dir++) { 797 sce = &sc->sc_endpoints[endpt][dir]; 798 KASSERT(sce->pipeh == NULL); 799 KASSERT(sce->ibuf == NULL); 800 KASSERT(sce->ra_wb_xfer == NULL); 801 for (i = 0; i < UGEN_NISOREQS; i++) 802 KASSERT(sce->isoreqs[i].xfer == NULL); 803 } 804 } 805 806 static int 807 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 808 { 809 int endpt = UGENENDPOINT(dev); 810 struct ugen_softc *sc; 811 812 UGENHIST_FUNC(); UGENHIST_CALLED(); 813 814 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd, endpt=%jd", 815 flag, mode, UGENUNIT(dev), endpt); 816 817 KASSERT(KERNEL_LOCKED_P()); /* ugen_do_close */ 818 819 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL) 820 return ENXIO; 821 822 KASSERT(sc->sc_is_open[endpt]); 823 ugen_do_close(sc, flag, endpt); 824 KASSERT(!sc->sc_is_open[endpt]); 825 826 ugenif_release(sc); 827 828 return 0; 829 } 830 831 Static int 832 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 833 { 834 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 835 uint32_t n, tn; 836 struct usbd_xfer *xfer; 837 usbd_status err; 838 int error = 0; 839 840 UGENHIST_FUNC(); UGENHIST_CALLED(); 841 842 DPRINTFN(5, "ugen%d: %jd", device_unit(sc->sc_dev), endpt, 0, 0); 843 844 if (endpt == USB_CONTROL_ENDPOINT) 845 return ENODEV; 846 847 KASSERT(sce->edesc); 848 KASSERT(sce->pipeh); 849 850 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 851 case UE_INTERRUPT: 852 /* Block until activity occurred. */ 853 mutex_enter(&sc->sc_lock); 854 while (sce->q.c_cc == 0) { 855 if (flag & IO_NDELAY) { 856 mutex_exit(&sc->sc_lock); 857 return EWOULDBLOCK; 858 } 859 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0); 860 /* "ugenri" */ 861 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 862 mstohz(sce->timeout)); 863 DPRINTFN(5, "woke, error=%jd", 864 error, 0, 0, 0); 865 if (sc->sc_dying) 866 error = EIO; 867 if (error) 868 break; 869 } 870 mutex_exit(&sc->sc_lock); 871 872 /* Transfer as many chunks as possible. */ 873 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 874 n = uimin(sce->q.c_cc, uio->uio_resid); 875 if (n > sizeof(sc->sc_buffer)) 876 n = sizeof(sc->sc_buffer); 877 878 /* Remove a small chunk from the input queue. */ 879 q_to_b(&sce->q, sc->sc_buffer, n); 880 DPRINTFN(5, "got %jd chars", n, 0, 0, 0); 881 882 /* Copy the data to the user process. */ 883 error = uiomove(sc->sc_buffer, n, uio); 884 if (error) 885 break; 886 } 887 break; 888 case UE_BULK: 889 if (sce->state & UGEN_BULK_RA) { 890 DPRINTFN(5, "BULK_RA req: %zd used: %d", 891 uio->uio_resid, sce->ra_wb_used, 0, 0); 892 xfer = sce->ra_wb_xfer; 893 894 mutex_enter(&sc->sc_lock); 895 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 896 mutex_exit(&sc->sc_lock); 897 return EWOULDBLOCK; 898 } 899 while (uio->uio_resid > 0 && !error) { 900 while (sce->ra_wb_used == 0) { 901 DPRINTFN(5, "sleep on %jx", 902 (uintptr_t)sce, 0, 0, 0); 903 /* "ugenrb" */ 904 error = cv_timedwait_sig(&sce->cv, 905 &sc->sc_lock, mstohz(sce->timeout)); 906 DPRINTFN(5, "woke, error=%jd", 907 error, 0, 0, 0); 908 if (sc->sc_dying) 909 error = EIO; 910 if (error) 911 break; 912 } 913 914 /* Copy data to the process. */ 915 while (uio->uio_resid > 0 916 && sce->ra_wb_used > 0) { 917 n = uimin(uio->uio_resid, 918 sce->ra_wb_used); 919 n = uimin(n, sce->limit - sce->cur); 920 error = uiomove(sce->cur, n, uio); 921 if (error) 922 break; 923 sce->cur += n; 924 sce->ra_wb_used -= n; 925 if (sce->cur == sce->limit) 926 sce->cur = sce->ibuf; 927 } 928 929 /* 930 * If the transfers stopped because the 931 * buffer was full, restart them. 932 */ 933 if (sce->state & UGEN_RA_WB_STOP && 934 sce->ra_wb_used < sce->limit - sce->ibuf) { 935 n = (sce->limit - sce->ibuf) 936 - sce->ra_wb_used; 937 usbd_setup_xfer(xfer, sce, NULL, 938 uimin(n, sce->ra_wb_xferlen), 939 0, USBD_NO_TIMEOUT, 940 ugen_bulkra_intr); 941 sce->state &= ~UGEN_RA_WB_STOP; 942 err = usbd_transfer(xfer); 943 if (err != USBD_IN_PROGRESS) 944 /* 945 * The transfer has not been 946 * queued. Setting STOP 947 * will make us try 948 * again at the next read. 949 */ 950 sce->state |= UGEN_RA_WB_STOP; 951 } 952 } 953 mutex_exit(&sc->sc_lock); 954 break; 955 } 956 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 957 0, 0, &xfer); 958 if (error) 959 return error; 960 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) { 961 DPRINTFN(1, "start transfer %jd bytes", n, 0, 0, 0); 962 tn = n; 963 err = usbd_bulk_transfer(xfer, sce->pipeh, 964 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 965 sce->timeout, sc->sc_buffer, &tn); 966 if (err) { 967 if (err == USBD_INTERRUPTED) 968 error = EINTR; 969 else if (err == USBD_TIMEOUT) 970 error = ETIMEDOUT; 971 else 972 error = EIO; 973 break; 974 } 975 DPRINTFN(1, "got %jd bytes", tn, 0, 0, 0); 976 error = uiomove(sc->sc_buffer, tn, uio); 977 if (error || tn < n) 978 break; 979 } 980 usbd_destroy_xfer(xfer); 981 break; 982 case UE_ISOCHRONOUS: 983 mutex_enter(&sc->sc_lock); 984 while (sce->cur == sce->fill) { 985 if (flag & IO_NDELAY) { 986 mutex_exit(&sc->sc_lock); 987 return EWOULDBLOCK; 988 } 989 /* "ugenri" */ 990 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0); 991 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 992 mstohz(sce->timeout)); 993 DPRINTFN(5, "woke, error=%jd", error, 0, 0, 0); 994 if (sc->sc_dying) 995 error = EIO; 996 if (error) 997 break; 998 } 999 1000 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 1001 if(sce->fill > sce->cur) 1002 n = uimin(sce->fill - sce->cur, uio->uio_resid); 1003 else 1004 n = uimin(sce->limit - sce->cur, uio->uio_resid); 1005 1006 DPRINTFN(5, "isoc got %jd chars", n, 0, 0, 0); 1007 1008 /* Copy the data to the user process. */ 1009 error = uiomove(sce->cur, n, uio); 1010 if (error) 1011 break; 1012 sce->cur += n; 1013 if (sce->cur >= sce->limit) 1014 sce->cur = sce->ibuf; 1015 } 1016 mutex_exit(&sc->sc_lock); 1017 break; 1018 1019 1020 default: 1021 return ENXIO; 1022 } 1023 return error; 1024 } 1025 1026 static int 1027 ugenread(dev_t dev, struct uio *uio, int flag) 1028 { 1029 int endpt = UGENENDPOINT(dev); 1030 struct ugen_softc *sc; 1031 int error; 1032 1033 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL) 1034 return ENXIO; 1035 error = ugen_do_read(sc, endpt, uio, flag); 1036 ugenif_release(sc); 1037 1038 return error; 1039 } 1040 1041 Static int 1042 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 1043 int flag) 1044 { 1045 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 1046 uint32_t n; 1047 int error = 0; 1048 uint32_t tn; 1049 char *dbuf; 1050 struct usbd_xfer *xfer; 1051 usbd_status err; 1052 1053 UGENHIST_FUNC(); UGENHIST_CALLED(); 1054 1055 DPRINTFN(5, "ugen%jd: %jd", device_unit(sc->sc_dev), endpt, 0, 0); 1056 1057 if (endpt == USB_CONTROL_ENDPOINT) 1058 return ENODEV; 1059 1060 KASSERT(sce->edesc); 1061 KASSERT(sce->pipeh); 1062 1063 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 1064 case UE_BULK: 1065 if (sce->state & UGEN_BULK_WB) { 1066 DPRINTFN(5, "BULK_WB req: %jd used: %jd", 1067 uio->uio_resid, sce->ra_wb_used, 0, 0); 1068 xfer = sce->ra_wb_xfer; 1069 1070 mutex_enter(&sc->sc_lock); 1071 if (sce->ra_wb_used == sce->limit - sce->ibuf && 1072 flag & IO_NDELAY) { 1073 mutex_exit(&sc->sc_lock); 1074 return EWOULDBLOCK; 1075 } 1076 while (uio->uio_resid > 0 && !error) { 1077 while (sce->ra_wb_used == 1078 sce->limit - sce->ibuf) { 1079 DPRINTFN(5, "sleep on %#jx", 1080 (uintptr_t)sce, 0, 0, 0); 1081 /* "ugenwb" */ 1082 error = cv_timedwait_sig(&sce->cv, 1083 &sc->sc_lock, mstohz(sce->timeout)); 1084 DPRINTFN(5, "woke, error=%d", 1085 error, 0, 0, 0); 1086 if (sc->sc_dying) 1087 error = EIO; 1088 if (error) 1089 break; 1090 } 1091 1092 /* Copy data from the process. */ 1093 while (uio->uio_resid > 0 && 1094 sce->ra_wb_used < sce->limit - sce->ibuf) { 1095 n = uimin(uio->uio_resid, 1096 (sce->limit - sce->ibuf) 1097 - sce->ra_wb_used); 1098 n = uimin(n, sce->limit - sce->fill); 1099 error = uiomove(sce->fill, n, uio); 1100 if (error) 1101 break; 1102 sce->fill += n; 1103 sce->ra_wb_used += n; 1104 if (sce->fill == sce->limit) 1105 sce->fill = sce->ibuf; 1106 } 1107 1108 /* 1109 * If the transfers stopped because the 1110 * buffer was empty, restart them. 1111 */ 1112 if (sce->state & UGEN_RA_WB_STOP && 1113 sce->ra_wb_used > 0) { 1114 dbuf = (char *)usbd_get_buffer(xfer); 1115 n = uimin(sce->ra_wb_used, 1116 sce->ra_wb_xferlen); 1117 tn = uimin(n, sce->limit - sce->cur); 1118 memcpy(dbuf, sce->cur, tn); 1119 dbuf += tn; 1120 if (n - tn > 0) 1121 memcpy(dbuf, sce->ibuf, 1122 n - tn); 1123 usbd_setup_xfer(xfer, sce, NULL, n, 1124 0, USBD_NO_TIMEOUT, 1125 ugen_bulkwb_intr); 1126 sce->state &= ~UGEN_RA_WB_STOP; 1127 err = usbd_transfer(xfer); 1128 if (err != USBD_IN_PROGRESS) 1129 /* 1130 * The transfer has not been 1131 * queued. Setting STOP 1132 * will make us try again 1133 * at the next read. 1134 */ 1135 sce->state |= UGEN_RA_WB_STOP; 1136 } 1137 } 1138 mutex_exit(&sc->sc_lock); 1139 break; 1140 } 1141 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 1142 0, 0, &xfer); 1143 if (error) 1144 return error; 1145 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) { 1146 error = uiomove(sc->sc_buffer, n, uio); 1147 if (error) 1148 break; 1149 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0); 1150 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout, 1151 sc->sc_buffer, &n); 1152 if (err) { 1153 if (err == USBD_INTERRUPTED) 1154 error = EINTR; 1155 else if (err == USBD_TIMEOUT) 1156 error = ETIMEDOUT; 1157 else 1158 error = EIO; 1159 break; 1160 } 1161 } 1162 usbd_destroy_xfer(xfer); 1163 break; 1164 case UE_INTERRUPT: 1165 error = usbd_create_xfer(sce->pipeh, 1166 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer); 1167 if (error) 1168 return error; 1169 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize), 1170 uio->uio_resid)) != 0) { 1171 error = uiomove(sc->sc_buffer, n, uio); 1172 if (error) 1173 break; 1174 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0); 1175 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 1176 sce->timeout, sc->sc_buffer, &n); 1177 if (err) { 1178 if (err == USBD_INTERRUPTED) 1179 error = EINTR; 1180 else if (err == USBD_TIMEOUT) 1181 error = ETIMEDOUT; 1182 else 1183 error = EIO; 1184 break; 1185 } 1186 } 1187 usbd_destroy_xfer(xfer); 1188 break; 1189 default: 1190 return ENXIO; 1191 } 1192 return error; 1193 } 1194 1195 static int 1196 ugenwrite(dev_t dev, struct uio *uio, int flag) 1197 { 1198 int endpt = UGENENDPOINT(dev); 1199 struct ugen_softc *sc; 1200 int error; 1201 1202 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL) 1203 return ENXIO; 1204 error = ugen_do_write(sc, endpt, uio, flag); 1205 ugenif_release(sc); 1206 1207 return error; 1208 } 1209 1210 static int 1211 ugen_activate(device_t self, enum devact act) 1212 { 1213 struct ugen_softc *sc = device_private(self); 1214 1215 switch (act) { 1216 case DVACT_DEACTIVATE: 1217 sc->sc_dying = 1; 1218 return 0; 1219 default: 1220 return EOPNOTSUPP; 1221 } 1222 } 1223 1224 static int 1225 ugen_detach(device_t self, int flags) 1226 { 1227 struct ugen_softc *sc = device_private(self); 1228 struct ugen_endpoint *sce; 1229 int i, dir; 1230 int maj, mn; 1231 1232 UGENHIST_FUNC(); UGENHIST_CALLED(); 1233 1234 DPRINTF("sc=%ju flags=%ju", (uintptr_t)sc, flags, 0, 0); 1235 1236 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */ 1237 1238 /* 1239 * Fail if we're not forced to detach and userland has any 1240 * endpoints open. 1241 */ 1242 if ((flags & DETACH_FORCE) == 0) { 1243 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1244 if (sc->sc_is_open[i]) 1245 return EBUSY; 1246 } 1247 } 1248 1249 /* Prevent new users. Prevent suspend/resume. */ 1250 sc->sc_dying = 1; 1251 pmf_device_deregister(self); 1252 1253 /* 1254 * If we never finished attaching, skip nixing endpoints and 1255 * users because there aren't any. 1256 */ 1257 if (!sc->sc_attached) 1258 goto out; 1259 1260 /* Abort all pipes. */ 1261 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1262 for (dir = OUT; dir <= IN; dir++) { 1263 sce = &sc->sc_endpoints[i][dir]; 1264 if (sce->pipeh) 1265 usbd_abort_pipe(sce->pipeh); 1266 } 1267 } 1268 1269 /* 1270 * Wait for users to drain. Before this point there can be no 1271 * more I/O operations started because we set sc_dying; after 1272 * this, there can be no more I/O operations in progress, so it 1273 * will be safe to free things. 1274 */ 1275 mutex_enter(&sc->sc_lock); 1276 if (--sc->sc_refcnt >= 0) { 1277 /* Wake everyone */ 1278 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1279 for (dir = OUT; dir <= IN; dir++) 1280 cv_broadcast(&sc->sc_endpoints[i][dir].cv); 1281 } 1282 /* Wait for processes to go away. */ 1283 do { 1284 cv_wait(&sc->sc_detach_cv, &sc->sc_lock); 1285 } while (sc->sc_refcnt >= 0); 1286 } 1287 mutex_exit(&sc->sc_lock); 1288 1289 /* locate the major number */ 1290 maj = cdevsw_lookup_major(&ugen_cdevsw); 1291 1292 /* 1293 * Nuke the vnodes for any open instances (calls ugenclose, but 1294 * with no effect because we already set sc_dying). 1295 */ 1296 mn = sc->sc_unit * USB_MAX_ENDPOINTS; 1297 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1298 1299 /* Actually close any lingering pipes. */ 1300 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1301 ugen_do_close(sc, FREAD|FWRITE, i); 1302 1303 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); 1304 ugenif_put_unit(sc); 1305 1306 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1307 for (dir = OUT; dir <= IN; dir++) { 1308 sce = &sc->sc_endpoints[i][dir]; 1309 seldestroy(&sce->rsel); 1310 cv_destroy(&sce->cv); 1311 } 1312 } 1313 1314 cv_destroy(&sc->sc_detach_cv); 1315 mutex_destroy(&sc->sc_lock); 1316 1317 return 0; 1318 } 1319 1320 Static void 1321 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status) 1322 { 1323 struct ugen_endpoint *sce = addr; 1324 struct ugen_softc *sc = sce->sc; 1325 uint32_t count; 1326 u_char *ibuf; 1327 1328 UGENHIST_FUNC(); UGENHIST_CALLED(); 1329 1330 if (status == USBD_CANCELLED) 1331 return; 1332 1333 if (status != USBD_NORMAL_COMPLETION) { 1334 DPRINTF("status=%jd", status, 0, 0, 0); 1335 if (status == USBD_STALLED) 1336 usbd_clear_endpoint_stall_async(sce->pipeh); 1337 return; 1338 } 1339 1340 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1341 ibuf = sce->ibuf; 1342 1343 DPRINTFN(5, "xfer=%#jx status=%d count=%d", 1344 (uintptr_t)xfer, status, count, 0); 1345 DPRINTFN(5, " data = %02x %02x %02x", 1346 ibuf[0], ibuf[1], ibuf[2], 0); 1347 1348 mutex_enter(&sc->sc_lock); 1349 (void)b_to_q(ibuf, count, &sce->q); 1350 cv_signal(&sce->cv); 1351 mutex_exit(&sc->sc_lock); 1352 selnotify(&sce->rsel, 0, 0); 1353 } 1354 1355 Static void 1356 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr, 1357 usbd_status status) 1358 { 1359 struct isoreq *req = addr; 1360 struct ugen_endpoint *sce = req->sce; 1361 struct ugen_softc *sc = sce->sc; 1362 uint32_t count, n; 1363 int i, isize; 1364 1365 UGENHIST_FUNC(); UGENHIST_CALLED(); 1366 1367 /* Return if we are aborting. */ 1368 if (status == USBD_CANCELLED) 1369 return; 1370 1371 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1372 DPRINTFN(5, "xfer %ld, count=%d", 1373 (long)(req - sce->isoreqs), count, 0, 0); 1374 1375 mutex_enter(&sc->sc_lock); 1376 1377 /* throw away oldest input if the buffer is full */ 1378 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1379 sce->cur += count; 1380 if (sce->cur >= sce->limit) 1381 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1382 DPRINTFN(5, "throwing away %jd bytes", 1383 count, 0, 0, 0); 1384 } 1385 1386 isize = UGETW(sce->edesc->wMaxPacketSize); 1387 for (i = 0; i < UGEN_NISORFRMS; i++) { 1388 uint32_t actlen = req->sizes[i]; 1389 char const *tbuf = (char const *)req->dmabuf + isize * i; 1390 1391 /* copy data to buffer */ 1392 while (actlen > 0) { 1393 n = uimin(actlen, sce->limit - sce->fill); 1394 memcpy(sce->fill, tbuf, n); 1395 1396 tbuf += n; 1397 actlen -= n; 1398 sce->fill += n; 1399 if (sce->fill == sce->limit) 1400 sce->fill = sce->ibuf; 1401 } 1402 1403 /* setup size for next transfer */ 1404 req->sizes[i] = isize; 1405 } 1406 1407 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0, 1408 ugen_isoc_rintr); 1409 (void)usbd_transfer(xfer); 1410 1411 cv_signal(&sce->cv); 1412 mutex_exit(&sc->sc_lock); 1413 selnotify(&sce->rsel, 0, 0); 1414 } 1415 1416 Static void 1417 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr, 1418 usbd_status status) 1419 { 1420 struct ugen_endpoint *sce = addr; 1421 struct ugen_softc *sc = sce->sc; 1422 uint32_t count, n; 1423 char const *tbuf; 1424 usbd_status err; 1425 1426 UGENHIST_FUNC(); UGENHIST_CALLED(); 1427 1428 /* Return if we are aborting. */ 1429 if (status == USBD_CANCELLED) 1430 return; 1431 1432 if (status != USBD_NORMAL_COMPLETION) { 1433 DPRINTF("status=%jd", status, 0, 0, 0); 1434 sce->state |= UGEN_RA_WB_STOP; 1435 if (status == USBD_STALLED) 1436 usbd_clear_endpoint_stall_async(sce->pipeh); 1437 return; 1438 } 1439 1440 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1441 1442 mutex_enter(&sc->sc_lock); 1443 1444 /* Keep track of how much is in the buffer. */ 1445 sce->ra_wb_used += count; 1446 1447 /* Copy data to buffer. */ 1448 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1449 n = uimin(count, sce->limit - sce->fill); 1450 memcpy(sce->fill, tbuf, n); 1451 tbuf += n; 1452 count -= n; 1453 sce->fill += n; 1454 if (sce->fill == sce->limit) 1455 sce->fill = sce->ibuf; 1456 if (count > 0) { 1457 memcpy(sce->fill, tbuf, count); 1458 sce->fill += count; 1459 } 1460 1461 /* Set up the next request if necessary. */ 1462 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1463 if (n > 0) { 1464 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0, 1465 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1466 err = usbd_transfer(xfer); 1467 if (err != USBD_IN_PROGRESS) { 1468 printf("error=%d", err); 1469 /* 1470 * The transfer has not been queued. Setting STOP 1471 * will make us try again at the next read. 1472 */ 1473 sce->state |= UGEN_RA_WB_STOP; 1474 } 1475 } 1476 else 1477 sce->state |= UGEN_RA_WB_STOP; 1478 1479 cv_signal(&sce->cv); 1480 mutex_exit(&sc->sc_lock); 1481 selnotify(&sce->rsel, 0, 0); 1482 } 1483 1484 Static void 1485 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr, 1486 usbd_status status) 1487 { 1488 struct ugen_endpoint *sce = addr; 1489 struct ugen_softc *sc = sce->sc; 1490 uint32_t count, n; 1491 char *tbuf; 1492 usbd_status err; 1493 1494 UGENHIST_FUNC(); UGENHIST_CALLED(); 1495 1496 /* Return if we are aborting. */ 1497 if (status == USBD_CANCELLED) 1498 return; 1499 1500 if (status != USBD_NORMAL_COMPLETION) { 1501 DPRINTF("status=%jd", status, 0, 0, 0); 1502 sce->state |= UGEN_RA_WB_STOP; 1503 if (status == USBD_STALLED) 1504 usbd_clear_endpoint_stall_async(sce->pipeh); 1505 return; 1506 } 1507 1508 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1509 1510 mutex_enter(&sc->sc_lock); 1511 1512 /* Keep track of how much is in the buffer. */ 1513 sce->ra_wb_used -= count; 1514 1515 /* Update buffer pointers. */ 1516 sce->cur += count; 1517 if (sce->cur >= sce->limit) 1518 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1519 1520 /* Set up next request if necessary. */ 1521 if (sce->ra_wb_used > 0) { 1522 /* copy data from buffer */ 1523 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1524 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen); 1525 n = uimin(count, sce->limit - sce->cur); 1526 memcpy(tbuf, sce->cur, n); 1527 tbuf += n; 1528 if (count - n > 0) 1529 memcpy(tbuf, sce->ibuf, count - n); 1530 1531 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT, 1532 ugen_bulkwb_intr); 1533 err = usbd_transfer(xfer); 1534 if (err != USBD_IN_PROGRESS) { 1535 printf("error=%d", err); 1536 /* 1537 * The transfer has not been queued. Setting STOP 1538 * will make us try again at the next write. 1539 */ 1540 sce->state |= UGEN_RA_WB_STOP; 1541 } 1542 } 1543 else 1544 sce->state |= UGEN_RA_WB_STOP; 1545 1546 cv_signal(&sce->cv); 1547 mutex_exit(&sc->sc_lock); 1548 selnotify(&sce->rsel, 0, 0); 1549 } 1550 1551 Static usbd_status 1552 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1553 { 1554 struct usbd_interface *iface; 1555 usb_endpoint_descriptor_t *ed; 1556 usbd_status err; 1557 struct ugen_endpoint *sce; 1558 uint8_t niface, nendpt, endptno, endpt; 1559 int dir; 1560 1561 UGENHIST_FUNC(); UGENHIST_CALLED(); 1562 1563 DPRINTFN(15, "%d %d", ifaceidx, altno, 0, 0); 1564 1565 err = usbd_interface_count(sc->sc_udev, &niface); 1566 if (err) 1567 return err; 1568 if (ifaceidx < 0 || ifaceidx >= niface) 1569 return USBD_INVAL; 1570 1571 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1572 if (err) 1573 return err; 1574 err = usbd_endpoint_count(iface, &nendpt); 1575 if (err) 1576 return err; 1577 1578 /* change setting */ 1579 err = usbd_set_interface(iface, altno); 1580 if (err) 1581 return err; 1582 1583 err = usbd_endpoint_count(iface, &nendpt); 1584 if (err) 1585 return err; 1586 1587 ugen_clear_endpoints(sc); 1588 1589 for (endptno = 0; endptno < nendpt; endptno++) { 1590 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1591 KASSERT(ed != NULL); 1592 endpt = ed->bEndpointAddress; 1593 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1594 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1595 sce->sc = sc; 1596 sce->edesc = ed; 1597 sce->iface = iface; 1598 } 1599 return 0; 1600 } 1601 1602 /* Retrieve a complete descriptor for a certain device and index. */ 1603 Static usb_config_descriptor_t * 1604 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1605 { 1606 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1607 int len; 1608 usbd_status err; 1609 1610 UGENHIST_FUNC(); UGENHIST_CALLED(); 1611 1612 if (index == USB_CURRENT_CONFIG_INDEX) { 1613 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1614 if (tdesc == NULL) 1615 return NULL; 1616 len = UGETW(tdesc->wTotalLength); 1617 if (lenp) 1618 *lenp = len; 1619 cdesc = kmem_alloc(len, KM_SLEEP); 1620 memcpy(cdesc, tdesc, len); 1621 DPRINTFN(5, "current, len=%jd", len, 0, 0, 0); 1622 } else { 1623 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1624 if (err) 1625 return 0; 1626 len = UGETW(cdescr.wTotalLength); 1627 DPRINTFN(5, "index=%jd, len=%jd", index, len, 0, 0); 1628 if (lenp) 1629 *lenp = len; 1630 cdesc = kmem_alloc(len, KM_SLEEP); 1631 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1632 if (err) { 1633 kmem_free(cdesc, len); 1634 return 0; 1635 } 1636 } 1637 return cdesc; 1638 } 1639 1640 Static int 1641 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1642 { 1643 struct usbd_interface *iface; 1644 usbd_status err; 1645 1646 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1647 if (err) 1648 return -1; 1649 return usbd_get_interface_altindex(iface); 1650 } 1651 1652 Static int 1653 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1654 void *addr, int flag, struct lwp *l) 1655 { 1656 struct ugen_endpoint *sce; 1657 usbd_status err; 1658 struct usbd_interface *iface; 1659 struct usb_config_desc *cd; 1660 usb_config_descriptor_t *cdesc; 1661 struct usb_interface_desc *id; 1662 usb_interface_descriptor_t *idesc; 1663 struct usb_endpoint_desc *ed; 1664 usb_endpoint_descriptor_t *edesc; 1665 struct usb_alt_interface *ai; 1666 struct usb_string_desc *si; 1667 uint8_t conf, alt; 1668 int cdesclen; 1669 int error; 1670 int dir; 1671 1672 UGENHIST_FUNC(); UGENHIST_CALLED(); 1673 1674 KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */ 1675 1676 DPRINTFN(5, "cmd=%08jx", cmd, 0, 0, 0); 1677 1678 switch (cmd) { 1679 case FIONBIO: 1680 /* All handled in the upper FS layer. */ 1681 return 0; 1682 case USB_SET_SHORT_XFER: 1683 if (endpt == USB_CONTROL_ENDPOINT) 1684 return EINVAL; 1685 /* This flag only affects read */ 1686 sce = &sc->sc_endpoints[endpt][IN]; 1687 if (sce == NULL || sce->pipeh == NULL) 1688 return EINVAL; 1689 if (*(int *)addr) 1690 sce->state |= UGEN_SHORT_OK; 1691 else 1692 sce->state &= ~UGEN_SHORT_OK; 1693 return 0; 1694 case USB_SET_TIMEOUT: 1695 for (dir = OUT; dir <= IN; dir++) { 1696 sce = &sc->sc_endpoints[endpt][dir]; 1697 if (sce == NULL) 1698 return EINVAL; 1699 1700 sce->timeout = *(int *)addr; 1701 } 1702 return 0; 1703 case USB_SET_BULK_RA: 1704 if (endpt == USB_CONTROL_ENDPOINT) 1705 return EINVAL; 1706 sce = &sc->sc_endpoints[endpt][IN]; 1707 if (sce == NULL || sce->pipeh == NULL) 1708 return EINVAL; 1709 edesc = sce->edesc; 1710 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1711 return EINVAL; 1712 1713 if (*(int *)addr) { 1714 /* Only turn RA on if it's currently off. */ 1715 if (sce->state & UGEN_BULK_RA) 1716 return 0; 1717 KASSERT(sce->ra_wb_xfer == NULL); 1718 KASSERT(sce->ibuf == NULL); 1719 1720 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1721 /* shouldn't happen */ 1722 return EINVAL; 1723 error = usbd_create_xfer(sce->pipeh, 1724 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer); 1725 if (error) 1726 return error; 1727 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1728 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1729 sce->fill = sce->cur = sce->ibuf; 1730 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1731 sce->ra_wb_used = 0; 1732 sce->state |= UGEN_BULK_RA; 1733 sce->state &= ~UGEN_RA_WB_STOP; 1734 /* Now start reading. */ 1735 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL, 1736 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1737 0, USBD_NO_TIMEOUT, ugen_bulkra_intr); 1738 err = usbd_transfer(sce->ra_wb_xfer); 1739 if (err != USBD_IN_PROGRESS) { 1740 sce->state &= ~UGEN_BULK_RA; 1741 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1742 sce->ibuf = NULL; 1743 usbd_destroy_xfer(sce->ra_wb_xfer); 1744 sce->ra_wb_xfer = NULL; 1745 return EIO; 1746 } 1747 } else { 1748 /* Only turn RA off if it's currently on. */ 1749 if (!(sce->state & UGEN_BULK_RA)) 1750 return 0; 1751 1752 sce->state &= ~UGEN_BULK_RA; 1753 usbd_abort_pipe(sce->pipeh); 1754 usbd_destroy_xfer(sce->ra_wb_xfer); 1755 sce->ra_wb_xfer = NULL; 1756 /* 1757 * XXX Discard whatever's in the buffer, but we 1758 * should keep it around and drain the buffer 1759 * instead. 1760 */ 1761 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1762 sce->ibuf = NULL; 1763 } 1764 return 0; 1765 case USB_SET_BULK_WB: 1766 if (endpt == USB_CONTROL_ENDPOINT) 1767 return EINVAL; 1768 sce = &sc->sc_endpoints[endpt][OUT]; 1769 if (sce == NULL || sce->pipeh == NULL) 1770 return EINVAL; 1771 edesc = sce->edesc; 1772 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1773 return EINVAL; 1774 1775 if (*(int *)addr) { 1776 /* Only turn WB on if it's currently off. */ 1777 if (sce->state & UGEN_BULK_WB) 1778 return 0; 1779 KASSERT(sce->ra_wb_xfer == NULL); 1780 KASSERT(sce->ibuf == NULL); 1781 1782 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1783 /* shouldn't happen */ 1784 return EINVAL; 1785 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize, 1786 0, 0, &sce->ra_wb_xfer); 1787 /* XXX check error??? */ 1788 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1789 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1790 sce->fill = sce->cur = sce->ibuf; 1791 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1792 sce->ra_wb_used = 0; 1793 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1794 } else { 1795 /* Only turn WB off if it's currently on. */ 1796 if (!(sce->state & UGEN_BULK_WB)) 1797 return 0; 1798 1799 sce->state &= ~UGEN_BULK_WB; 1800 /* 1801 * XXX Discard whatever's in the buffer, but we 1802 * should keep it around and keep writing to 1803 * drain the buffer instead. 1804 */ 1805 usbd_abort_pipe(sce->pipeh); 1806 usbd_destroy_xfer(sce->ra_wb_xfer); 1807 sce->ra_wb_xfer = NULL; 1808 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1809 sce->ibuf = NULL; 1810 } 1811 return 0; 1812 case USB_SET_BULK_RA_OPT: 1813 case USB_SET_BULK_WB_OPT: 1814 { 1815 struct usb_bulk_ra_wb_opt *opt; 1816 1817 if (endpt == USB_CONTROL_ENDPOINT) 1818 return EINVAL; 1819 opt = (struct usb_bulk_ra_wb_opt *)addr; 1820 if (cmd == USB_SET_BULK_RA_OPT) 1821 sce = &sc->sc_endpoints[endpt][IN]; 1822 else 1823 sce = &sc->sc_endpoints[endpt][OUT]; 1824 if (sce == NULL || sce->pipeh == NULL) 1825 return EINVAL; 1826 if (opt->ra_wb_buffer_size < 1 || 1827 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1828 opt->ra_wb_request_size < 1 || 1829 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1830 return EINVAL; 1831 /* 1832 * XXX These changes do not take effect until the 1833 * next time RA/WB mode is enabled but they ought to 1834 * take effect immediately. 1835 */ 1836 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1837 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1838 return 0; 1839 } 1840 default: 1841 break; 1842 } 1843 1844 if (endpt != USB_CONTROL_ENDPOINT) 1845 return EINVAL; 1846 1847 switch (cmd) { 1848 #ifdef UGEN_DEBUG 1849 case USB_SETDEBUG: 1850 ugendebug = *(int *)addr; 1851 break; 1852 #endif 1853 case USB_GET_CONFIG: 1854 err = usbd_get_config(sc->sc_udev, &conf); 1855 if (err) 1856 return EIO; 1857 *(int *)addr = conf; 1858 break; 1859 case USB_SET_CONFIG: 1860 if (!(flag & FWRITE)) 1861 return EPERM; 1862 err = ugen_set_config(sc, *(int *)addr, 1); 1863 switch (err) { 1864 case USBD_NORMAL_COMPLETION: 1865 break; 1866 case USBD_IN_USE: 1867 return EBUSY; 1868 default: 1869 return EIO; 1870 } 1871 break; 1872 case USB_GET_ALTINTERFACE: 1873 ai = (struct usb_alt_interface *)addr; 1874 err = usbd_device2interface_handle(sc->sc_udev, 1875 ai->uai_interface_index, &iface); 1876 if (err) 1877 return EINVAL; 1878 idesc = usbd_get_interface_descriptor(iface); 1879 if (idesc == NULL) 1880 return EIO; 1881 ai->uai_alt_no = idesc->bAlternateSetting; 1882 break; 1883 case USB_SET_ALTINTERFACE: 1884 if (!(flag & FWRITE)) 1885 return EPERM; 1886 ai = (struct usb_alt_interface *)addr; 1887 err = usbd_device2interface_handle(sc->sc_udev, 1888 ai->uai_interface_index, &iface); 1889 if (err) 1890 return EINVAL; 1891 err = ugen_set_interface(sc, ai->uai_interface_index, 1892 ai->uai_alt_no); 1893 if (err) 1894 return EINVAL; 1895 break; 1896 case USB_GET_NO_ALT: 1897 ai = (struct usb_alt_interface *)addr; 1898 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen); 1899 if (cdesc == NULL) 1900 return EINVAL; 1901 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1902 if (idesc == NULL) { 1903 kmem_free(cdesc, cdesclen); 1904 return EINVAL; 1905 } 1906 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1907 idesc->bInterfaceNumber); 1908 kmem_free(cdesc, cdesclen); 1909 break; 1910 case USB_GET_DEVICE_DESC: 1911 *(usb_device_descriptor_t *)addr = 1912 *usbd_get_device_descriptor(sc->sc_udev); 1913 break; 1914 case USB_GET_CONFIG_DESC: 1915 cd = (struct usb_config_desc *)addr; 1916 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen); 1917 if (cdesc == NULL) 1918 return EINVAL; 1919 cd->ucd_desc = *cdesc; 1920 kmem_free(cdesc, cdesclen); 1921 break; 1922 case USB_GET_INTERFACE_DESC: 1923 id = (struct usb_interface_desc *)addr; 1924 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen); 1925 if (cdesc == NULL) 1926 return EINVAL; 1927 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1928 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1929 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1930 else 1931 alt = id->uid_alt_index; 1932 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1933 if (idesc == NULL) { 1934 kmem_free(cdesc, cdesclen); 1935 return EINVAL; 1936 } 1937 id->uid_desc = *idesc; 1938 kmem_free(cdesc, cdesclen); 1939 break; 1940 case USB_GET_ENDPOINT_DESC: 1941 ed = (struct usb_endpoint_desc *)addr; 1942 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen); 1943 if (cdesc == NULL) 1944 return EINVAL; 1945 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1946 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1947 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1948 else 1949 alt = ed->ued_alt_index; 1950 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1951 alt, ed->ued_endpoint_index); 1952 if (edesc == NULL) { 1953 kmem_free(cdesc, cdesclen); 1954 return EINVAL; 1955 } 1956 ed->ued_desc = *edesc; 1957 kmem_free(cdesc, cdesclen); 1958 break; 1959 case USB_GET_FULL_DESC: 1960 { 1961 int len; 1962 struct iovec iov; 1963 struct uio uio; 1964 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1965 1966 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen); 1967 if (cdesc == NULL) 1968 return EINVAL; 1969 len = cdesclen; 1970 if (len > fd->ufd_size) 1971 len = fd->ufd_size; 1972 iov.iov_base = (void *)fd->ufd_data; 1973 iov.iov_len = len; 1974 uio.uio_iov = &iov; 1975 uio.uio_iovcnt = 1; 1976 uio.uio_resid = len; 1977 uio.uio_offset = 0; 1978 uio.uio_rw = UIO_READ; 1979 uio.uio_vmspace = l->l_proc->p_vmspace; 1980 error = uiomove((void *)cdesc, len, &uio); 1981 kmem_free(cdesc, cdesclen); 1982 return error; 1983 } 1984 case USB_GET_STRING_DESC: { 1985 int len; 1986 si = (struct usb_string_desc *)addr; 1987 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1988 si->usd_language_id, &si->usd_desc, &len); 1989 if (err) 1990 return EINVAL; 1991 break; 1992 } 1993 case USB_DO_REQUEST: 1994 { 1995 struct usb_ctl_request *ur = (void *)addr; 1996 int len = UGETW(ur->ucr_request.wLength); 1997 struct iovec iov; 1998 struct uio uio; 1999 void *ptr = 0; 2000 usbd_status xerr; 2001 2002 error = 0; 2003 2004 if (!(flag & FWRITE)) 2005 return EPERM; 2006 /* Avoid requests that would damage the bus integrity. */ 2007 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 2008 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 2009 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 2010 ur->ucr_request.bRequest == UR_SET_CONFIG) || 2011 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 2012 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 2013 return EINVAL; 2014 2015 if (len < 0 || len > 32767) 2016 return EINVAL; 2017 if (len != 0) { 2018 iov.iov_base = (void *)ur->ucr_data; 2019 iov.iov_len = len; 2020 uio.uio_iov = &iov; 2021 uio.uio_iovcnt = 1; 2022 uio.uio_resid = len; 2023 uio.uio_offset = 0; 2024 uio.uio_rw = 2025 ur->ucr_request.bmRequestType & UT_READ ? 2026 UIO_READ : UIO_WRITE; 2027 uio.uio_vmspace = l->l_proc->p_vmspace; 2028 ptr = kmem_alloc(len, KM_SLEEP); 2029 if (uio.uio_rw == UIO_WRITE) { 2030 error = uiomove(ptr, len, &uio); 2031 if (error) 2032 goto ret; 2033 } 2034 } 2035 sce = &sc->sc_endpoints[endpt][IN]; 2036 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 2037 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 2038 if (xerr) { 2039 error = EIO; 2040 goto ret; 2041 } 2042 if (len != 0) { 2043 if (uio.uio_rw == UIO_READ) { 2044 size_t alen = uimin(len, ur->ucr_actlen); 2045 error = uiomove(ptr, alen, &uio); 2046 if (error) 2047 goto ret; 2048 } 2049 } 2050 ret: 2051 if (ptr) 2052 kmem_free(ptr, len); 2053 return error; 2054 } 2055 case USB_GET_DEVICEINFO: 2056 usbd_fill_deviceinfo(sc->sc_udev, 2057 (struct usb_device_info *)addr, 0); 2058 break; 2059 case USB_GET_DEVICEINFO_OLD: 2060 { 2061 int ret; 2062 MODULE_HOOK_CALL(usb_subr_fill_30_hook, 2063 (sc->sc_udev, (struct usb_device_info_old *)addr, 0, 2064 usbd_devinfo_vp, usbd_printBCD), 2065 enosys(), ret); 2066 if (ret == 0) 2067 return 0; 2068 return EINVAL; 2069 } 2070 default: 2071 return EINVAL; 2072 } 2073 return 0; 2074 } 2075 2076 static int 2077 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 2078 { 2079 int endpt = UGENENDPOINT(dev); 2080 struct ugen_softc *sc; 2081 int error; 2082 2083 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0) 2084 return ENXIO; 2085 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 2086 ugenif_release(sc); 2087 2088 return error; 2089 } 2090 2091 static int 2092 ugenpoll(dev_t dev, int events, struct lwp *l) 2093 { 2094 struct ugen_softc *sc; 2095 struct ugen_endpoint *sce_in, *sce_out; 2096 int revents = 0; 2097 2098 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL) 2099 return POLLHUP; 2100 2101 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) { 2102 revents |= POLLERR; 2103 goto out; 2104 } 2105 2106 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2107 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2108 KASSERT(sce_in->edesc || sce_out->edesc); 2109 KASSERT(sce_in->pipeh || sce_out->pipeh); 2110 2111 mutex_enter(&sc->sc_lock); 2112 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 2113 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 2114 case UE_INTERRUPT: 2115 if (sce_in->q.c_cc > 0) 2116 revents |= events & (POLLIN | POLLRDNORM); 2117 else 2118 selrecord(l, &sce_in->rsel); 2119 break; 2120 case UE_ISOCHRONOUS: 2121 if (sce_in->cur != sce_in->fill) 2122 revents |= events & (POLLIN | POLLRDNORM); 2123 else 2124 selrecord(l, &sce_in->rsel); 2125 break; 2126 case UE_BULK: 2127 if (sce_in->state & UGEN_BULK_RA) { 2128 if (sce_in->ra_wb_used > 0) 2129 revents |= events & 2130 (POLLIN | POLLRDNORM); 2131 else 2132 selrecord(l, &sce_in->rsel); 2133 break; 2134 } 2135 /* 2136 * We have no easy way of determining if a read will 2137 * yield any data or a write will happen. 2138 * Pretend they will. 2139 */ 2140 revents |= events & (POLLIN | POLLRDNORM); 2141 break; 2142 default: 2143 break; 2144 } 2145 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 2146 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 2147 case UE_INTERRUPT: 2148 case UE_ISOCHRONOUS: 2149 /* XXX unimplemented */ 2150 break; 2151 case UE_BULK: 2152 if (sce_out->state & UGEN_BULK_WB) { 2153 if (sce_out->ra_wb_used < 2154 sce_out->limit - sce_out->ibuf) 2155 revents |= events & 2156 (POLLOUT | POLLWRNORM); 2157 else 2158 selrecord(l, &sce_out->rsel); 2159 break; 2160 } 2161 /* 2162 * We have no easy way of determining if a read will 2163 * yield any data or a write will happen. 2164 * Pretend they will. 2165 */ 2166 revents |= events & (POLLOUT | POLLWRNORM); 2167 break; 2168 default: 2169 break; 2170 } 2171 2172 mutex_exit(&sc->sc_lock); 2173 2174 out: ugenif_release(sc); 2175 return revents; 2176 } 2177 2178 static void 2179 filt_ugenrdetach(struct knote *kn) 2180 { 2181 struct ugen_endpoint *sce = kn->kn_hook; 2182 struct ugen_softc *sc = sce->sc; 2183 2184 mutex_enter(&sc->sc_lock); 2185 selremove_knote(&sce->rsel, kn); 2186 mutex_exit(&sc->sc_lock); 2187 } 2188 2189 static int 2190 filt_ugenread_intr(struct knote *kn, long hint) 2191 { 2192 struct ugen_endpoint *sce = kn->kn_hook; 2193 struct ugen_softc *sc = sce->sc; 2194 int ret; 2195 2196 mutex_enter(&sc->sc_lock); 2197 if (sc->sc_dying) { 2198 ret = 0; 2199 } else { 2200 kn->kn_data = sce->q.c_cc; 2201 ret = kn->kn_data > 0; 2202 } 2203 mutex_exit(&sc->sc_lock); 2204 2205 return ret; 2206 } 2207 2208 static int 2209 filt_ugenread_isoc(struct knote *kn, long hint) 2210 { 2211 struct ugen_endpoint *sce = kn->kn_hook; 2212 struct ugen_softc *sc = sce->sc; 2213 int ret; 2214 2215 mutex_enter(&sc->sc_lock); 2216 if (sc->sc_dying) { 2217 ret = 0; 2218 } else if (sce->cur == sce->fill) { 2219 ret = 0; 2220 } else if (sce->cur < sce->fill) { 2221 kn->kn_data = sce->fill - sce->cur; 2222 ret = 1; 2223 } else { 2224 kn->kn_data = (sce->limit - sce->cur) + 2225 (sce->fill - sce->ibuf); 2226 ret = 1; 2227 } 2228 mutex_exit(&sc->sc_lock); 2229 2230 return ret; 2231 } 2232 2233 static int 2234 filt_ugenread_bulk(struct knote *kn, long hint) 2235 { 2236 struct ugen_endpoint *sce = kn->kn_hook; 2237 struct ugen_softc *sc = sce->sc; 2238 int ret; 2239 2240 mutex_enter(&sc->sc_lock); 2241 if (sc->sc_dying) { 2242 ret = 0; 2243 } else if (!(sce->state & UGEN_BULK_RA)) { 2244 /* 2245 * We have no easy way of determining if a read will 2246 * yield any data or a write will happen. 2247 * So, emulate "seltrue". 2248 */ 2249 ret = filt_seltrue(kn, hint); 2250 } else if (sce->ra_wb_used == 0) { 2251 ret = 0; 2252 } else { 2253 kn->kn_data = sce->ra_wb_used; 2254 ret = 1; 2255 } 2256 mutex_exit(&sc->sc_lock); 2257 2258 return ret; 2259 } 2260 2261 static int 2262 filt_ugenwrite_bulk(struct knote *kn, long hint) 2263 { 2264 struct ugen_endpoint *sce = kn->kn_hook; 2265 struct ugen_softc *sc = sce->sc; 2266 int ret; 2267 2268 mutex_enter(&sc->sc_lock); 2269 if (sc->sc_dying) { 2270 ret = 0; 2271 } else if (!(sce->state & UGEN_BULK_WB)) { 2272 /* 2273 * We have no easy way of determining if a read will 2274 * yield any data or a write will happen. 2275 * So, emulate "seltrue". 2276 */ 2277 ret = filt_seltrue(kn, hint); 2278 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) { 2279 ret = 0; 2280 } else { 2281 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2282 ret = 1; 2283 } 2284 mutex_exit(&sc->sc_lock); 2285 2286 return ret; 2287 } 2288 2289 static const struct filterops ugenread_intr_filtops = { 2290 .f_flags = FILTEROP_ISFD, 2291 .f_attach = NULL, 2292 .f_detach = filt_ugenrdetach, 2293 .f_event = filt_ugenread_intr, 2294 }; 2295 2296 static const struct filterops ugenread_isoc_filtops = { 2297 .f_flags = FILTEROP_ISFD, 2298 .f_attach = NULL, 2299 .f_detach = filt_ugenrdetach, 2300 .f_event = filt_ugenread_isoc, 2301 }; 2302 2303 static const struct filterops ugenread_bulk_filtops = { 2304 .f_flags = FILTEROP_ISFD, 2305 .f_attach = NULL, 2306 .f_detach = filt_ugenrdetach, 2307 .f_event = filt_ugenread_bulk, 2308 }; 2309 2310 static const struct filterops ugenwrite_bulk_filtops = { 2311 .f_flags = FILTEROP_ISFD, 2312 .f_attach = NULL, 2313 .f_detach = filt_ugenrdetach, 2314 .f_event = filt_ugenwrite_bulk, 2315 }; 2316 2317 static int 2318 ugenkqfilter(dev_t dev, struct knote *kn) 2319 { 2320 struct ugen_softc *sc; 2321 struct ugen_endpoint *sce; 2322 struct selinfo *sip; 2323 int error; 2324 2325 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL) 2326 return ENXIO; 2327 2328 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) { 2329 error = ENODEV; 2330 goto out; 2331 } 2332 2333 switch (kn->kn_filter) { 2334 case EVFILT_READ: 2335 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2336 if (sce == NULL) { 2337 error = EINVAL; 2338 goto out; 2339 } 2340 2341 sip = &sce->rsel; 2342 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2343 case UE_INTERRUPT: 2344 kn->kn_fop = &ugenread_intr_filtops; 2345 break; 2346 case UE_ISOCHRONOUS: 2347 kn->kn_fop = &ugenread_isoc_filtops; 2348 break; 2349 case UE_BULK: 2350 kn->kn_fop = &ugenread_bulk_filtops; 2351 break; 2352 default: 2353 error = EINVAL; 2354 goto out; 2355 } 2356 break; 2357 2358 case EVFILT_WRITE: 2359 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2360 if (sce == NULL) { 2361 error = EINVAL; 2362 goto out; 2363 } 2364 2365 sip = &sce->rsel; 2366 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2367 case UE_INTERRUPT: 2368 case UE_ISOCHRONOUS: 2369 /* XXX poll doesn't support this */ 2370 error = EINVAL; 2371 goto out; 2372 2373 case UE_BULK: 2374 kn->kn_fop = &ugenwrite_bulk_filtops; 2375 break; 2376 default: 2377 error = EINVAL; 2378 goto out; 2379 } 2380 break; 2381 2382 default: 2383 error = EINVAL; 2384 goto out; 2385 } 2386 2387 kn->kn_hook = sce; 2388 2389 mutex_enter(&sc->sc_lock); 2390 selrecord_knote(sip, kn); 2391 mutex_exit(&sc->sc_lock); 2392 2393 error = 0; 2394 2395 out: ugenif_release(sc); 2396 return error; 2397 } 2398 2399 MODULE(MODULE_CLASS_DRIVER, ugen, NULL); 2400 2401 static int 2402 ugen_modcmd(modcmd_t cmd, void *aux) 2403 { 2404 2405 switch (cmd) { 2406 case MODULE_CMD_INIT: 2407 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE); 2408 rb_tree_init(&ugenif.tree, &ugenif_tree_ops); 2409 return 0; 2410 default: 2411 return ENOTTY; 2412 } 2413 } 2414