1 /* $NetBSD: ugen.c,v 1.133 2016/04/23 10:15:32 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.133 2016/04/23 10:15:32 skrll Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_compat_netbsd.h" 44 #include "opt_usb.h" 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/kmem.h> 51 #include <sys/device.h> 52 #include <sys/ioctl.h> 53 #include <sys/conf.h> 54 #include <sys/tty.h> 55 #include <sys/file.h> 56 #include <sys/select.h> 57 #include <sys/proc.h> 58 #include <sys/vnode.h> 59 #include <sys/poll.h> 60 61 #include <dev/usb/usb.h> 62 #include <dev/usb/usbdi.h> 63 #include <dev/usb/usbdi_util.h> 64 65 #ifdef UGEN_DEBUG 66 #define DPRINTF(x) if (ugendebug) printf x 67 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x 68 int ugendebug = 0; 69 #else 70 #define DPRINTF(x) 71 #define DPRINTFN(n,x) 72 #endif 73 74 #define UGEN_CHUNK 128 /* chunk size for read */ 75 #define UGEN_IBSIZE 1020 /* buffer size */ 76 #define UGEN_BBSIZE 1024 77 78 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */ 79 #define UGEN_NISORFRMS 8 /* number of transactions per req */ 80 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS) 81 82 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 83 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 84 85 struct isoreq { 86 struct ugen_endpoint *sce; 87 struct usbd_xfer *xfer; 88 void *dmabuf; 89 uint16_t sizes[UGEN_NISORFRMS]; 90 }; 91 92 struct ugen_endpoint { 93 struct ugen_softc *sc; 94 usb_endpoint_descriptor_t *edesc; 95 struct usbd_interface *iface; 96 int state; 97 #define UGEN_ASLP 0x02 /* waiting for data */ 98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 102 struct usbd_pipe *pipeh; 103 struct clist q; 104 u_char *ibuf; /* start of buffer (circular for isoc) */ 105 u_char *fill; /* location for input (isoc) */ 106 u_char *limit; /* end of circular buffer (isoc) */ 107 u_char *cur; /* current read location (isoc) */ 108 uint32_t timeout; 109 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 110 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 111 uint32_t ra_wb_used; /* how much is in buffer */ 112 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 113 struct usbd_xfer *ra_wb_xfer; 114 struct isoreq isoreqs[UGEN_NISOREQS]; 115 /* Keep these last; we don't overwrite them in ugen_set_config() */ 116 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel) 117 struct selinfo rsel; 118 kcondvar_t cv; 119 }; 120 121 struct ugen_softc { 122 device_t sc_dev; /* base device */ 123 struct usbd_device *sc_udev; 124 125 kmutex_t sc_lock; 126 kcondvar_t sc_detach_cv; 127 128 char sc_is_open[USB_MAX_ENDPOINTS]; 129 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 130 #define OUT 0 131 #define IN 1 132 133 int sc_refcnt; 134 char sc_buffer[UGEN_BBSIZE]; 135 u_char sc_dying; 136 }; 137 138 dev_type_open(ugenopen); 139 dev_type_close(ugenclose); 140 dev_type_read(ugenread); 141 dev_type_write(ugenwrite); 142 dev_type_ioctl(ugenioctl); 143 dev_type_poll(ugenpoll); 144 dev_type_kqfilter(ugenkqfilter); 145 146 const struct cdevsw ugen_cdevsw = { 147 .d_open = ugenopen, 148 .d_close = ugenclose, 149 .d_read = ugenread, 150 .d_write = ugenwrite, 151 .d_ioctl = ugenioctl, 152 .d_stop = nostop, 153 .d_tty = notty, 154 .d_poll = ugenpoll, 155 .d_mmap = nommap, 156 .d_kqfilter = ugenkqfilter, 157 .d_discard = nodiscard, 158 .d_flag = D_OTHER, 159 }; 160 161 Static void ugenintr(struct usbd_xfer *, void *, 162 usbd_status); 163 Static void ugen_isoc_rintr(struct usbd_xfer *, void *, 164 usbd_status); 165 Static void ugen_bulkra_intr(struct usbd_xfer *, void *, 166 usbd_status); 167 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *, 168 usbd_status); 169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 172 void *, int, struct lwp *); 173 Static int ugen_set_config(struct ugen_softc *, int); 174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *, 175 int, int *); 176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 177 Static int ugen_get_alt_index(struct ugen_softc *, int); 178 Static void ugen_clear_endpoints(struct ugen_softc *); 179 180 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 181 #define UGENENDPOINT(n) (minor(n) & 0xf) 182 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 183 184 int ugen_match(device_t, cfdata_t, void *); 185 void ugen_attach(device_t, device_t, void *); 186 int ugen_detach(device_t, int); 187 int ugen_activate(device_t, enum devact); 188 extern struct cfdriver ugen_cd; 189 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate); 190 191 /* toggle to control attach priority. -1 means "let autoconf decide" */ 192 int ugen_override = -1; 193 194 int 195 ugen_match(device_t parent, cfdata_t match, void *aux) 196 { 197 struct usb_attach_arg *uaa = aux; 198 int override; 199 200 if (ugen_override != -1) 201 override = ugen_override; 202 else 203 override = match->cf_flags & 1; 204 205 if (override) 206 return UMATCH_HIGHEST; 207 else if (uaa->uaa_usegeneric) 208 return UMATCH_GENERIC; 209 else 210 return UMATCH_NONE; 211 } 212 213 void 214 ugen_attach(device_t parent, device_t self, void *aux) 215 { 216 struct ugen_softc *sc = device_private(self); 217 struct usb_attach_arg *uaa = aux; 218 struct usbd_device *udev; 219 char *devinfop; 220 usbd_status err; 221 int i, dir, conf; 222 223 aprint_naive("\n"); 224 aprint_normal("\n"); 225 226 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 227 cv_init(&sc->sc_detach_cv, "ugendet"); 228 229 devinfop = usbd_devinfo_alloc(uaa->uaa_device, 0); 230 aprint_normal_dev(self, "%s\n", devinfop); 231 usbd_devinfo_free(devinfop); 232 233 sc->sc_dev = self; 234 sc->sc_udev = udev = uaa->uaa_device; 235 236 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 237 for (dir = OUT; dir <= IN; dir++) { 238 struct ugen_endpoint *sce; 239 240 sce = &sc->sc_endpoints[i][dir]; 241 selinit(&sce->rsel); 242 cv_init(&sce->cv, "ugensce"); 243 } 244 } 245 246 /* First set configuration index 0, the default one for ugen. */ 247 err = usbd_set_config_index(udev, 0, 0); 248 if (err) { 249 aprint_error_dev(self, 250 "setting configuration index 0 failed\n"); 251 sc->sc_dying = 1; 252 return; 253 } 254 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 255 256 /* Set up all the local state for this configuration. */ 257 err = ugen_set_config(sc, conf); 258 if (err) { 259 aprint_error_dev(self, "setting configuration %d failed\n", 260 conf); 261 sc->sc_dying = 1; 262 return; 263 } 264 265 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 266 sc->sc_dev); 267 268 if (!pmf_device_register(self, NULL, NULL)) 269 aprint_error_dev(self, "couldn't establish power handler\n"); 270 271 return; 272 } 273 274 Static void 275 ugen_clear_endpoints(struct ugen_softc *sc) 276 { 277 278 /* Clear out the old info, but leave the selinfo and cv initialised. */ 279 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) { 280 for (int dir = OUT; dir <= IN; dir++) { 281 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir]; 282 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT); 283 } 284 } 285 } 286 287 Static int 288 ugen_set_config(struct ugen_softc *sc, int configno) 289 { 290 struct usbd_device *dev = sc->sc_udev; 291 usb_config_descriptor_t *cdesc; 292 struct usbd_interface *iface; 293 usb_endpoint_descriptor_t *ed; 294 struct ugen_endpoint *sce; 295 uint8_t niface, nendpt; 296 int ifaceno, endptno, endpt; 297 usbd_status err; 298 int dir; 299 300 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 301 device_xname(sc->sc_dev), configno, sc)); 302 303 /* 304 * We start at 1, not 0, because we don't care whether the 305 * control endpoint is open or not. It is always present. 306 */ 307 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 308 if (sc->sc_is_open[endptno]) { 309 DPRINTFN(1, 310 ("ugen_set_config: %s - endpoint %d is open\n", 311 device_xname(sc->sc_dev), endptno)); 312 return USBD_IN_USE; 313 } 314 315 /* Avoid setting the current value. */ 316 cdesc = usbd_get_config_descriptor(dev); 317 if (!cdesc || cdesc->bConfigurationValue != configno) { 318 err = usbd_set_config_no(dev, configno, 1); 319 if (err) 320 return err; 321 } 322 323 err = usbd_interface_count(dev, &niface); 324 if (err) 325 return err; 326 327 ugen_clear_endpoints(sc); 328 329 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 330 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 331 err = usbd_device2interface_handle(dev, ifaceno, &iface); 332 if (err) 333 return err; 334 err = usbd_endpoint_count(iface, &nendpt); 335 if (err) 336 return err; 337 for (endptno = 0; endptno < nendpt; endptno++) { 338 ed = usbd_interface2endpoint_descriptor(iface,endptno); 339 KASSERT(ed != NULL); 340 endpt = ed->bEndpointAddress; 341 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 342 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 343 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 344 "(%d,%d), sce=%p\n", 345 endptno, endpt, UE_GET_ADDR(endpt), 346 UE_GET_DIR(endpt), sce)); 347 sce->sc = sc; 348 sce->edesc = ed; 349 sce->iface = iface; 350 } 351 } 352 return USBD_NORMAL_COMPLETION; 353 } 354 355 int 356 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 357 { 358 struct ugen_softc *sc; 359 int unit = UGENUNIT(dev); 360 int endpt = UGENENDPOINT(dev); 361 usb_endpoint_descriptor_t *edesc; 362 struct ugen_endpoint *sce; 363 int dir, isize; 364 usbd_status err; 365 struct usbd_xfer *xfer; 366 int i, j; 367 368 sc = device_lookup_private(&ugen_cd, unit); 369 if (sc == NULL) 370 return ENXIO; 371 372 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 373 flag, mode, unit, endpt)); 374 375 if (sc == NULL || sc->sc_dying) 376 return ENXIO; 377 378 /* The control endpoint allows multiple opens. */ 379 if (endpt == USB_CONTROL_ENDPOINT) { 380 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 381 return 0; 382 } 383 384 if (sc->sc_is_open[endpt]) 385 return EBUSY; 386 387 /* Make sure there are pipes for all directions. */ 388 for (dir = OUT; dir <= IN; dir++) { 389 if (flag & (dir == OUT ? FWRITE : FREAD)) { 390 sce = &sc->sc_endpoints[endpt][dir]; 391 if (sce->edesc == NULL) 392 return ENXIO; 393 } 394 } 395 396 /* Actually open the pipes. */ 397 /* XXX Should back out properly if it fails. */ 398 for (dir = OUT; dir <= IN; dir++) { 399 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 400 continue; 401 sce = &sc->sc_endpoints[endpt][dir]; 402 sce->state = 0; 403 sce->timeout = USBD_NO_TIMEOUT; 404 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 405 sc, endpt, dir, sce)); 406 edesc = sce->edesc; 407 switch (edesc->bmAttributes & UE_XFERTYPE) { 408 case UE_INTERRUPT: 409 if (dir == OUT) { 410 err = usbd_open_pipe(sce->iface, 411 edesc->bEndpointAddress, 0, &sce->pipeh); 412 if (err) 413 return EIO; 414 break; 415 } 416 isize = UGETW(edesc->wMaxPacketSize); 417 if (isize == 0) /* shouldn't happen */ 418 return EINVAL; 419 sce->ibuf = kmem_alloc(isize, KM_SLEEP); 420 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 421 endpt, isize)); 422 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) { 423 kmem_free(sce->ibuf, isize); 424 sce->ibuf = NULL; 425 return ENOMEM; 426 } 427 err = usbd_open_pipe_intr(sce->iface, 428 edesc->bEndpointAddress, 429 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 430 sce->ibuf, isize, ugenintr, 431 USBD_DEFAULT_INTERVAL); 432 if (err) { 433 clfree(&sce->q); 434 kmem_free(sce->ibuf, isize); 435 sce->ibuf = NULL; 436 return EIO; 437 } 438 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 439 break; 440 case UE_BULK: 441 err = usbd_open_pipe(sce->iface, 442 edesc->bEndpointAddress, 0, &sce->pipeh); 443 if (err) 444 return EIO; 445 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 446 /* 447 * Use request size for non-RA/WB transfers 448 * as the default. 449 */ 450 sce->ra_wb_reqsize = UGEN_BBSIZE; 451 break; 452 case UE_ISOCHRONOUS: 453 if (dir == OUT) 454 return EINVAL; 455 isize = UGETW(edesc->wMaxPacketSize); 456 if (isize == 0) /* shouldn't happen */ 457 return EINVAL; 458 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES, 459 KM_SLEEP); 460 sce->cur = sce->fill = sce->ibuf; 461 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 462 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 463 endpt, isize)); 464 err = usbd_open_pipe(sce->iface, 465 edesc->bEndpointAddress, 0, &sce->pipeh); 466 if (err) { 467 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 468 sce->ibuf = NULL; 469 return EIO; 470 } 471 for (i = 0; i < UGEN_NISOREQS; ++i) { 472 sce->isoreqs[i].sce = sce; 473 err = usbd_create_xfer(sce->pipeh, 474 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS, 475 &xfer); 476 if (err) 477 goto bad; 478 sce->isoreqs[i].xfer = xfer; 479 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer); 480 for (j = 0; j < UGEN_NISORFRMS; ++j) 481 sce->isoreqs[i].sizes[j] = isize; 482 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i], 483 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0, 484 ugen_isoc_rintr); 485 (void)usbd_transfer(xfer); 486 } 487 DPRINTFN(5, ("ugenopen: isoc open done\n")); 488 break; 489 bad: 490 while (--i >= 0) /* implicit buffer free */ 491 usbd_destroy_xfer(sce->isoreqs[i].xfer); 492 usbd_close_pipe(sce->pipeh); 493 sce->pipeh = NULL; 494 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 495 sce->ibuf = NULL; 496 return ENOMEM; 497 case UE_CONTROL: 498 sce->timeout = USBD_DEFAULT_TIMEOUT; 499 return EINVAL; 500 } 501 } 502 sc->sc_is_open[endpt] = 1; 503 return 0; 504 } 505 506 int 507 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 508 { 509 int endpt = UGENENDPOINT(dev); 510 struct ugen_softc *sc; 511 struct ugen_endpoint *sce; 512 int dir; 513 int i; 514 515 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 516 if (sc == NULL) 517 return ENXIO; 518 519 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 520 flag, mode, UGENUNIT(dev), endpt)); 521 522 #ifdef DIAGNOSTIC 523 if (!sc->sc_is_open[endpt]) { 524 printf("ugenclose: not open\n"); 525 return EINVAL; 526 } 527 #endif 528 529 if (endpt == USB_CONTROL_ENDPOINT) { 530 DPRINTFN(5, ("ugenclose: close control\n")); 531 sc->sc_is_open[endpt] = 0; 532 return 0; 533 } 534 535 for (dir = OUT; dir <= IN; dir++) { 536 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 537 continue; 538 sce = &sc->sc_endpoints[endpt][dir]; 539 if (sce->pipeh == NULL) 540 continue; 541 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 542 endpt, dir, sce)); 543 544 usbd_abort_pipe(sce->pipeh); 545 546 int isize = UGETW(sce->edesc->wMaxPacketSize); 547 int msize = 0; 548 549 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 550 case UE_INTERRUPT: 551 ndflush(&sce->q, sce->q.c_cc); 552 clfree(&sce->q); 553 msize = isize; 554 break; 555 case UE_ISOCHRONOUS: 556 for (i = 0; i < UGEN_NISOREQS; ++i) 557 usbd_destroy_xfer(sce->isoreqs[i].xfer); 558 msize = isize * UGEN_NISOFRAMES; 559 break; 560 case UE_BULK: 561 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) { 562 usbd_destroy_xfer(sce->ra_wb_xfer); 563 msize = sce->ra_wb_bufsize; 564 } 565 break; 566 default: 567 break; 568 } 569 usbd_close_pipe(sce->pipeh); 570 sce->pipeh = NULL; 571 if (sce->ibuf != NULL) { 572 kmem_free(sce->ibuf, msize); 573 sce->ibuf = NULL; 574 } 575 } 576 sc->sc_is_open[endpt] = 0; 577 578 return 0; 579 } 580 581 Static int 582 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 583 { 584 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 585 uint32_t n, tn; 586 struct usbd_xfer *xfer; 587 usbd_status err; 588 int error = 0; 589 590 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt)); 591 592 if (sc->sc_dying) 593 return EIO; 594 595 if (endpt == USB_CONTROL_ENDPOINT) 596 return ENODEV; 597 598 #ifdef DIAGNOSTIC 599 if (sce->edesc == NULL) { 600 printf("ugenread: no edesc\n"); 601 return EIO; 602 } 603 if (sce->pipeh == NULL) { 604 printf("ugenread: no pipe\n"); 605 return EIO; 606 } 607 #endif 608 609 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 610 case UE_INTERRUPT: 611 /* Block until activity occurred. */ 612 mutex_enter(&sc->sc_lock); 613 while (sce->q.c_cc == 0) { 614 if (flag & IO_NDELAY) { 615 mutex_exit(&sc->sc_lock); 616 return EWOULDBLOCK; 617 } 618 sce->state |= UGEN_ASLP; 619 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 620 /* "ugenri" */ 621 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 622 mstohz(sce->timeout)); 623 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 624 if (sc->sc_dying) 625 error = EIO; 626 if (error) { 627 sce->state &= ~UGEN_ASLP; 628 break; 629 } 630 } 631 mutex_exit(&sc->sc_lock); 632 633 /* Transfer as many chunks as possible. */ 634 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 635 n = min(sce->q.c_cc, uio->uio_resid); 636 if (n > sizeof(sc->sc_buffer)) 637 n = sizeof(sc->sc_buffer); 638 639 /* Remove a small chunk from the input queue. */ 640 q_to_b(&sce->q, sc->sc_buffer, n); 641 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 642 643 /* Copy the data to the user process. */ 644 error = uiomove(sc->sc_buffer, n, uio); 645 if (error) 646 break; 647 } 648 break; 649 case UE_BULK: 650 if (sce->state & UGEN_BULK_RA) { 651 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 652 uio->uio_resid, sce->ra_wb_used)); 653 xfer = sce->ra_wb_xfer; 654 655 mutex_enter(&sc->sc_lock); 656 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 657 mutex_exit(&sc->sc_lock); 658 return EWOULDBLOCK; 659 } 660 while (uio->uio_resid > 0 && !error) { 661 while (sce->ra_wb_used == 0) { 662 sce->state |= UGEN_ASLP; 663 DPRINTFN(5, 664 ("ugenread: sleep on %p\n", 665 sce)); 666 /* "ugenrb" */ 667 error = cv_timedwait_sig(&sce->cv, 668 &sc->sc_lock, mstohz(sce->timeout)); 669 DPRINTFN(5, 670 ("ugenread: woke, error=%d\n", 671 error)); 672 if (sc->sc_dying) 673 error = EIO; 674 if (error) { 675 sce->state &= ~UGEN_ASLP; 676 break; 677 } 678 } 679 680 /* Copy data to the process. */ 681 while (uio->uio_resid > 0 682 && sce->ra_wb_used > 0) { 683 n = min(uio->uio_resid, 684 sce->ra_wb_used); 685 n = min(n, sce->limit - sce->cur); 686 error = uiomove(sce->cur, n, uio); 687 if (error) 688 break; 689 sce->cur += n; 690 sce->ra_wb_used -= n; 691 if (sce->cur == sce->limit) 692 sce->cur = sce->ibuf; 693 } 694 695 /* 696 * If the transfers stopped because the 697 * buffer was full, restart them. 698 */ 699 if (sce->state & UGEN_RA_WB_STOP && 700 sce->ra_wb_used < sce->limit - sce->ibuf) { 701 n = (sce->limit - sce->ibuf) 702 - sce->ra_wb_used; 703 usbd_setup_xfer(xfer, sce, NULL, 704 min(n, sce->ra_wb_xferlen), 705 0, USBD_NO_TIMEOUT, 706 ugen_bulkra_intr); 707 sce->state &= ~UGEN_RA_WB_STOP; 708 err = usbd_transfer(xfer); 709 if (err != USBD_IN_PROGRESS) 710 /* 711 * The transfer has not been 712 * queued. Setting STOP 713 * will make us try 714 * again at the next read. 715 */ 716 sce->state |= UGEN_RA_WB_STOP; 717 } 718 } 719 mutex_exit(&sc->sc_lock); 720 break; 721 } 722 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 723 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 724 0, &xfer); 725 if (error) 726 return error; 727 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 728 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 729 tn = n; 730 err = usbd_bulk_transfer(xfer, sce->pipeh, 731 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 732 sce->timeout, sc->sc_buffer, &tn); 733 if (err) { 734 if (err == USBD_INTERRUPTED) 735 error = EINTR; 736 else if (err == USBD_TIMEOUT) 737 error = ETIMEDOUT; 738 else 739 error = EIO; 740 break; 741 } 742 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 743 error = uiomove(sc->sc_buffer, tn, uio); 744 if (error || tn < n) 745 break; 746 } 747 usbd_destroy_xfer(xfer); 748 break; 749 case UE_ISOCHRONOUS: 750 mutex_enter(&sc->sc_lock); 751 while (sce->cur == sce->fill) { 752 if (flag & IO_NDELAY) { 753 mutex_exit(&sc->sc_lock); 754 return EWOULDBLOCK; 755 } 756 sce->state |= UGEN_ASLP; 757 /* "ugenri" */ 758 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 759 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 760 mstohz(sce->timeout)); 761 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 762 if (sc->sc_dying) 763 error = EIO; 764 if (error) { 765 sce->state &= ~UGEN_ASLP; 766 break; 767 } 768 } 769 770 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 771 if(sce->fill > sce->cur) 772 n = min(sce->fill - sce->cur, uio->uio_resid); 773 else 774 n = min(sce->limit - sce->cur, uio->uio_resid); 775 776 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 777 778 /* Copy the data to the user process. */ 779 error = uiomove(sce->cur, n, uio); 780 if (error) 781 break; 782 sce->cur += n; 783 if (sce->cur >= sce->limit) 784 sce->cur = sce->ibuf; 785 } 786 mutex_exit(&sc->sc_lock); 787 break; 788 789 790 default: 791 return ENXIO; 792 } 793 return error; 794 } 795 796 int 797 ugenread(dev_t dev, struct uio *uio, int flag) 798 { 799 int endpt = UGENENDPOINT(dev); 800 struct ugen_softc *sc; 801 int error; 802 803 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 804 if (sc == NULL) 805 return ENXIO; 806 807 mutex_enter(&sc->sc_lock); 808 sc->sc_refcnt++; 809 mutex_exit(&sc->sc_lock); 810 811 error = ugen_do_read(sc, endpt, uio, flag); 812 813 mutex_enter(&sc->sc_lock); 814 if (--sc->sc_refcnt < 0) 815 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 816 mutex_exit(&sc->sc_lock); 817 818 return error; 819 } 820 821 Static int 822 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 823 int flag) 824 { 825 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 826 uint32_t n; 827 int error = 0; 828 uint32_t tn; 829 char *dbuf; 830 struct usbd_xfer *xfer; 831 usbd_status err; 832 833 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt)); 834 835 if (sc->sc_dying) 836 return EIO; 837 838 if (endpt == USB_CONTROL_ENDPOINT) 839 return ENODEV; 840 841 #ifdef DIAGNOSTIC 842 if (sce->edesc == NULL) { 843 printf("ugenwrite: no edesc\n"); 844 return EIO; 845 } 846 if (sce->pipeh == NULL) { 847 printf("ugenwrite: no pipe\n"); 848 return EIO; 849 } 850 #endif 851 852 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 853 case UE_BULK: 854 if (sce->state & UGEN_BULK_WB) { 855 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 856 uio->uio_resid, sce->ra_wb_used)); 857 xfer = sce->ra_wb_xfer; 858 859 mutex_enter(&sc->sc_lock); 860 if (sce->ra_wb_used == sce->limit - sce->ibuf && 861 flag & IO_NDELAY) { 862 mutex_exit(&sc->sc_lock); 863 return EWOULDBLOCK; 864 } 865 while (uio->uio_resid > 0 && !error) { 866 while (sce->ra_wb_used == 867 sce->limit - sce->ibuf) { 868 sce->state |= UGEN_ASLP; 869 DPRINTFN(5, 870 ("ugenwrite: sleep on %p\n", 871 sce)); 872 /* "ugenwb" */ 873 error = cv_timedwait_sig(&sce->cv, 874 &sc->sc_lock, mstohz(sce->timeout)); 875 DPRINTFN(5, 876 ("ugenwrite: woke, error=%d\n", 877 error)); 878 if (sc->sc_dying) 879 error = EIO; 880 if (error) { 881 sce->state &= ~UGEN_ASLP; 882 break; 883 } 884 } 885 886 /* Copy data from the process. */ 887 while (uio->uio_resid > 0 && 888 sce->ra_wb_used < sce->limit - sce->ibuf) { 889 n = min(uio->uio_resid, 890 (sce->limit - sce->ibuf) 891 - sce->ra_wb_used); 892 n = min(n, sce->limit - sce->fill); 893 error = uiomove(sce->fill, n, uio); 894 if (error) 895 break; 896 sce->fill += n; 897 sce->ra_wb_used += n; 898 if (sce->fill == sce->limit) 899 sce->fill = sce->ibuf; 900 } 901 902 /* 903 * If the transfers stopped because the 904 * buffer was empty, restart them. 905 */ 906 if (sce->state & UGEN_RA_WB_STOP && 907 sce->ra_wb_used > 0) { 908 dbuf = (char *)usbd_get_buffer(xfer); 909 n = min(sce->ra_wb_used, 910 sce->ra_wb_xferlen); 911 tn = min(n, sce->limit - sce->cur); 912 memcpy(dbuf, sce->cur, tn); 913 dbuf += tn; 914 if (n - tn > 0) 915 memcpy(dbuf, sce->ibuf, 916 n - tn); 917 usbd_setup_xfer(xfer, sce, NULL, n, 918 0, USBD_NO_TIMEOUT, 919 ugen_bulkwb_intr); 920 sce->state &= ~UGEN_RA_WB_STOP; 921 err = usbd_transfer(xfer); 922 if (err != USBD_IN_PROGRESS) 923 /* 924 * The transfer has not been 925 * queued. Setting STOP 926 * will make us try again 927 * at the next read. 928 */ 929 sce->state |= UGEN_RA_WB_STOP; 930 } 931 } 932 mutex_exit(&sc->sc_lock); 933 break; 934 } 935 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 936 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 937 0, &xfer); 938 if (error) 939 return error; 940 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 941 error = uiomove(sc->sc_buffer, n, uio); 942 if (error) 943 break; 944 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 945 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout, 946 sc->sc_buffer, &n); 947 if (err) { 948 if (err == USBD_INTERRUPTED) 949 error = EINTR; 950 else if (err == USBD_TIMEOUT) 951 error = ETIMEDOUT; 952 else 953 error = EIO; 954 break; 955 } 956 } 957 usbd_destroy_xfer(xfer); 958 break; 959 case UE_INTERRUPT: 960 error = usbd_create_xfer(sce->pipeh, 961 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer); 962 if (error) 963 return error; 964 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 965 uio->uio_resid)) != 0) { 966 error = uiomove(sc->sc_buffer, n, uio); 967 if (error) 968 break; 969 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 970 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 971 sce->timeout, sc->sc_buffer, &n); 972 if (err) { 973 if (err == USBD_INTERRUPTED) 974 error = EINTR; 975 else if (err == USBD_TIMEOUT) 976 error = ETIMEDOUT; 977 else 978 error = EIO; 979 break; 980 } 981 } 982 usbd_destroy_xfer(xfer); 983 break; 984 default: 985 return ENXIO; 986 } 987 return error; 988 } 989 990 int 991 ugenwrite(dev_t dev, struct uio *uio, int flag) 992 { 993 int endpt = UGENENDPOINT(dev); 994 struct ugen_softc *sc; 995 int error; 996 997 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 998 if (sc == NULL) 999 return ENXIO; 1000 1001 mutex_enter(&sc->sc_lock); 1002 sc->sc_refcnt++; 1003 mutex_exit(&sc->sc_lock); 1004 1005 error = ugen_do_write(sc, endpt, uio, flag); 1006 1007 mutex_enter(&sc->sc_lock); 1008 if (--sc->sc_refcnt < 0) 1009 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 1010 mutex_exit(&sc->sc_lock); 1011 1012 return error; 1013 } 1014 1015 int 1016 ugen_activate(device_t self, enum devact act) 1017 { 1018 struct ugen_softc *sc = device_private(self); 1019 1020 switch (act) { 1021 case DVACT_DEACTIVATE: 1022 sc->sc_dying = 1; 1023 return 0; 1024 default: 1025 return EOPNOTSUPP; 1026 } 1027 } 1028 1029 int 1030 ugen_detach(device_t self, int flags) 1031 { 1032 struct ugen_softc *sc = device_private(self); 1033 struct ugen_endpoint *sce; 1034 int i, dir; 1035 int maj, mn; 1036 1037 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1038 1039 sc->sc_dying = 1; 1040 pmf_device_deregister(self); 1041 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1042 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1043 for (dir = OUT; dir <= IN; dir++) { 1044 sce = &sc->sc_endpoints[i][dir]; 1045 if (sce->pipeh) 1046 usbd_abort_pipe(sce->pipeh); 1047 } 1048 } 1049 1050 mutex_enter(&sc->sc_lock); 1051 if (--sc->sc_refcnt >= 0) { 1052 /* Wake everyone */ 1053 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1054 cv_signal(&sc->sc_endpoints[i][IN].cv); 1055 /* Wait for processes to go away. */ 1056 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock); 1057 } 1058 mutex_exit(&sc->sc_lock); 1059 1060 /* locate the major number */ 1061 maj = cdevsw_lookup_major(&ugen_cdevsw); 1062 1063 /* Nuke the vnodes for any open instances (calls close). */ 1064 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1065 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1066 1067 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1068 sc->sc_dev); 1069 1070 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1071 for (dir = OUT; dir <= IN; dir++) { 1072 sce = &sc->sc_endpoints[i][dir]; 1073 seldestroy(&sce->rsel); 1074 cv_destroy(&sce->cv); 1075 } 1076 } 1077 1078 cv_destroy(&sc->sc_detach_cv); 1079 mutex_destroy(&sc->sc_lock); 1080 1081 return 0; 1082 } 1083 1084 Static void 1085 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status) 1086 { 1087 struct ugen_endpoint *sce = addr; 1088 struct ugen_softc *sc = sce->sc; 1089 uint32_t count; 1090 u_char *ibuf; 1091 1092 if (status == USBD_CANCELLED) 1093 return; 1094 1095 if (status != USBD_NORMAL_COMPLETION) { 1096 DPRINTF(("ugenintr: status=%d\n", status)); 1097 if (status == USBD_STALLED) 1098 usbd_clear_endpoint_stall_async(sce->pipeh); 1099 return; 1100 } 1101 1102 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1103 ibuf = sce->ibuf; 1104 1105 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1106 xfer, status, count)); 1107 DPRINTFN(5, (" data = %02x %02x %02x\n", 1108 ibuf[0], ibuf[1], ibuf[2])); 1109 1110 (void)b_to_q(ibuf, count, &sce->q); 1111 1112 mutex_enter(&sc->sc_lock); 1113 if (sce->state & UGEN_ASLP) { 1114 sce->state &= ~UGEN_ASLP; 1115 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1116 cv_signal(&sce->cv); 1117 } 1118 mutex_exit(&sc->sc_lock); 1119 selnotify(&sce->rsel, 0, 0); 1120 } 1121 1122 Static void 1123 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr, 1124 usbd_status status) 1125 { 1126 struct isoreq *req = addr; 1127 struct ugen_endpoint *sce = req->sce; 1128 struct ugen_softc *sc = sce->sc; 1129 uint32_t count, n; 1130 int i, isize; 1131 1132 /* Return if we are aborting. */ 1133 if (status == USBD_CANCELLED) 1134 return; 1135 1136 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1137 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1138 (long)(req - sce->isoreqs), count)); 1139 1140 /* throw away oldest input if the buffer is full */ 1141 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1142 sce->cur += count; 1143 if(sce->cur >= sce->limit) 1144 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1145 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1146 count)); 1147 } 1148 1149 isize = UGETW(sce->edesc->wMaxPacketSize); 1150 for (i = 0; i < UGEN_NISORFRMS; i++) { 1151 uint32_t actlen = req->sizes[i]; 1152 char const *tbuf = (char const *)req->dmabuf + isize * i; 1153 1154 /* copy data to buffer */ 1155 while (actlen > 0) { 1156 n = min(actlen, sce->limit - sce->fill); 1157 memcpy(sce->fill, tbuf, n); 1158 1159 tbuf += n; 1160 actlen -= n; 1161 sce->fill += n; 1162 if(sce->fill == sce->limit) 1163 sce->fill = sce->ibuf; 1164 } 1165 1166 /* setup size for next transfer */ 1167 req->sizes[i] = isize; 1168 } 1169 1170 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0, 1171 ugen_isoc_rintr); 1172 (void)usbd_transfer(xfer); 1173 1174 mutex_enter(&sc->sc_lock); 1175 if (sce->state & UGEN_ASLP) { 1176 sce->state &= ~UGEN_ASLP; 1177 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1178 cv_signal(&sce->cv); 1179 } 1180 mutex_exit(&sc->sc_lock); 1181 selnotify(&sce->rsel, 0, 0); 1182 } 1183 1184 Static void 1185 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr, 1186 usbd_status status) 1187 { 1188 struct ugen_endpoint *sce = addr; 1189 struct ugen_softc *sc = sce->sc; 1190 uint32_t count, n; 1191 char const *tbuf; 1192 usbd_status err; 1193 1194 /* Return if we are aborting. */ 1195 if (status == USBD_CANCELLED) 1196 return; 1197 1198 if (status != USBD_NORMAL_COMPLETION) { 1199 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1200 sce->state |= UGEN_RA_WB_STOP; 1201 if (status == USBD_STALLED) 1202 usbd_clear_endpoint_stall_async(sce->pipeh); 1203 return; 1204 } 1205 1206 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1207 1208 /* Keep track of how much is in the buffer. */ 1209 sce->ra_wb_used += count; 1210 1211 /* Copy data to buffer. */ 1212 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1213 n = min(count, sce->limit - sce->fill); 1214 memcpy(sce->fill, tbuf, n); 1215 tbuf += n; 1216 count -= n; 1217 sce->fill += n; 1218 if (sce->fill == sce->limit) 1219 sce->fill = sce->ibuf; 1220 if (count > 0) { 1221 memcpy(sce->fill, tbuf, count); 1222 sce->fill += count; 1223 } 1224 1225 /* Set up the next request if necessary. */ 1226 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1227 if (n > 0) { 1228 usbd_setup_xfer(xfer, sce, NULL, min(n, sce->ra_wb_xferlen), 0, 1229 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1230 err = usbd_transfer(xfer); 1231 if (err != USBD_IN_PROGRESS) { 1232 printf("usbd_bulkra_intr: error=%d\n", err); 1233 /* 1234 * The transfer has not been queued. Setting STOP 1235 * will make us try again at the next read. 1236 */ 1237 sce->state |= UGEN_RA_WB_STOP; 1238 } 1239 } 1240 else 1241 sce->state |= UGEN_RA_WB_STOP; 1242 1243 mutex_enter(&sc->sc_lock); 1244 if (sce->state & UGEN_ASLP) { 1245 sce->state &= ~UGEN_ASLP; 1246 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1247 cv_signal(&sce->cv); 1248 } 1249 mutex_exit(&sc->sc_lock); 1250 selnotify(&sce->rsel, 0, 0); 1251 } 1252 1253 Static void 1254 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr, 1255 usbd_status status) 1256 { 1257 struct ugen_endpoint *sce = addr; 1258 struct ugen_softc *sc = sce->sc; 1259 uint32_t count, n; 1260 char *tbuf; 1261 usbd_status err; 1262 1263 /* Return if we are aborting. */ 1264 if (status == USBD_CANCELLED) 1265 return; 1266 1267 if (status != USBD_NORMAL_COMPLETION) { 1268 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1269 sce->state |= UGEN_RA_WB_STOP; 1270 if (status == USBD_STALLED) 1271 usbd_clear_endpoint_stall_async(sce->pipeh); 1272 return; 1273 } 1274 1275 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1276 1277 /* Keep track of how much is in the buffer. */ 1278 sce->ra_wb_used -= count; 1279 1280 /* Update buffer pointers. */ 1281 sce->cur += count; 1282 if (sce->cur >= sce->limit) 1283 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1284 1285 /* Set up next request if necessary. */ 1286 if (sce->ra_wb_used > 0) { 1287 /* copy data from buffer */ 1288 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1289 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1290 n = min(count, sce->limit - sce->cur); 1291 memcpy(tbuf, sce->cur, n); 1292 tbuf += n; 1293 if (count - n > 0) 1294 memcpy(tbuf, sce->ibuf, count - n); 1295 1296 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT, 1297 ugen_bulkwb_intr); 1298 err = usbd_transfer(xfer); 1299 if (err != USBD_IN_PROGRESS) { 1300 printf("usbd_bulkwb_intr: error=%d\n", err); 1301 /* 1302 * The transfer has not been queued. Setting STOP 1303 * will make us try again at the next write. 1304 */ 1305 sce->state |= UGEN_RA_WB_STOP; 1306 } 1307 } 1308 else 1309 sce->state |= UGEN_RA_WB_STOP; 1310 1311 mutex_enter(&sc->sc_lock); 1312 if (sce->state & UGEN_ASLP) { 1313 sce->state &= ~UGEN_ASLP; 1314 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1315 cv_signal(&sce->cv); 1316 } 1317 mutex_exit(&sc->sc_lock); 1318 selnotify(&sce->rsel, 0, 0); 1319 } 1320 1321 Static usbd_status 1322 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1323 { 1324 struct usbd_interface *iface; 1325 usb_endpoint_descriptor_t *ed; 1326 usbd_status err; 1327 struct ugen_endpoint *sce; 1328 uint8_t niface, nendpt, endptno, endpt; 1329 int dir; 1330 1331 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1332 1333 err = usbd_interface_count(sc->sc_udev, &niface); 1334 if (err) 1335 return err; 1336 if (ifaceidx < 0 || ifaceidx >= niface) 1337 return USBD_INVAL; 1338 1339 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1340 if (err) 1341 return err; 1342 err = usbd_endpoint_count(iface, &nendpt); 1343 if (err) 1344 return err; 1345 1346 /* change setting */ 1347 err = usbd_set_interface(iface, altno); 1348 if (err) 1349 return err; 1350 1351 err = usbd_endpoint_count(iface, &nendpt); 1352 if (err) 1353 return err; 1354 1355 ugen_clear_endpoints(sc); 1356 1357 for (endptno = 0; endptno < nendpt; endptno++) { 1358 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1359 KASSERT(ed != NULL); 1360 endpt = ed->bEndpointAddress; 1361 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1362 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1363 sce->sc = sc; 1364 sce->edesc = ed; 1365 sce->iface = iface; 1366 } 1367 return 0; 1368 } 1369 1370 /* Retrieve a complete descriptor for a certain device and index. */ 1371 Static usb_config_descriptor_t * 1372 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1373 { 1374 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1375 int len; 1376 usbd_status err; 1377 1378 if (index == USB_CURRENT_CONFIG_INDEX) { 1379 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1380 len = UGETW(tdesc->wTotalLength); 1381 if (lenp) 1382 *lenp = len; 1383 cdesc = kmem_alloc(len, KM_SLEEP); 1384 memcpy(cdesc, tdesc, len); 1385 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1386 } else { 1387 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1388 if (err) 1389 return 0; 1390 len = UGETW(cdescr.wTotalLength); 1391 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1392 if (lenp) 1393 *lenp = len; 1394 cdesc = kmem_alloc(len, KM_SLEEP); 1395 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1396 if (err) { 1397 kmem_free(cdesc, len); 1398 return 0; 1399 } 1400 } 1401 return cdesc; 1402 } 1403 1404 Static int 1405 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1406 { 1407 struct usbd_interface *iface; 1408 usbd_status err; 1409 1410 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1411 if (err) 1412 return -1; 1413 return usbd_get_interface_altindex(iface); 1414 } 1415 1416 Static int 1417 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1418 void *addr, int flag, struct lwp *l) 1419 { 1420 struct ugen_endpoint *sce; 1421 usbd_status err; 1422 struct usbd_interface *iface; 1423 struct usb_config_desc *cd; 1424 usb_config_descriptor_t *cdesc; 1425 struct usb_interface_desc *id; 1426 usb_interface_descriptor_t *idesc; 1427 struct usb_endpoint_desc *ed; 1428 usb_endpoint_descriptor_t *edesc; 1429 struct usb_alt_interface *ai; 1430 struct usb_string_desc *si; 1431 uint8_t conf, alt; 1432 int cdesclen; 1433 int error; 1434 1435 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1436 if (sc->sc_dying) 1437 return EIO; 1438 1439 switch (cmd) { 1440 case FIONBIO: 1441 /* All handled in the upper FS layer. */ 1442 return 0; 1443 case USB_SET_SHORT_XFER: 1444 if (endpt == USB_CONTROL_ENDPOINT) 1445 return EINVAL; 1446 /* This flag only affects read */ 1447 sce = &sc->sc_endpoints[endpt][IN]; 1448 if (sce == NULL || sce->pipeh == NULL) 1449 return EINVAL; 1450 if (*(int *)addr) 1451 sce->state |= UGEN_SHORT_OK; 1452 else 1453 sce->state &= ~UGEN_SHORT_OK; 1454 return 0; 1455 case USB_SET_TIMEOUT: 1456 sce = &sc->sc_endpoints[endpt][IN]; 1457 if (sce == NULL 1458 /* XXX this shouldn't happen, but the distinction between 1459 input and output pipes isn't clear enough. 1460 || sce->pipeh == NULL */ 1461 ) 1462 return EINVAL; 1463 sce->timeout = *(int *)addr; 1464 return 0; 1465 case USB_SET_BULK_RA: 1466 if (endpt == USB_CONTROL_ENDPOINT) 1467 return EINVAL; 1468 sce = &sc->sc_endpoints[endpt][IN]; 1469 if (sce == NULL || sce->pipeh == NULL) 1470 return EINVAL; 1471 edesc = sce->edesc; 1472 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1473 return EINVAL; 1474 1475 if (*(int *)addr) { 1476 /* Only turn RA on if it's currently off. */ 1477 if (sce->state & UGEN_BULK_RA) 1478 return 0; 1479 1480 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1481 /* shouldn't happen */ 1482 return EINVAL; 1483 error = usbd_create_xfer(sce->pipeh, 1484 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer); 1485 if (error) 1486 return error; 1487 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1488 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1489 sce->fill = sce->cur = sce->ibuf; 1490 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1491 sce->ra_wb_used = 0; 1492 sce->state |= UGEN_BULK_RA; 1493 sce->state &= ~UGEN_RA_WB_STOP; 1494 /* Now start reading. */ 1495 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL, 1496 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1497 0, USBD_NO_TIMEOUT, ugen_bulkra_intr); 1498 err = usbd_transfer(sce->ra_wb_xfer); 1499 if (err != USBD_IN_PROGRESS) { 1500 sce->state &= ~UGEN_BULK_RA; 1501 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1502 sce->ibuf = NULL; 1503 usbd_destroy_xfer(sce->ra_wb_xfer); 1504 return EIO; 1505 } 1506 } else { 1507 /* Only turn RA off if it's currently on. */ 1508 if (!(sce->state & UGEN_BULK_RA)) 1509 return 0; 1510 1511 sce->state &= ~UGEN_BULK_RA; 1512 usbd_abort_pipe(sce->pipeh); 1513 usbd_destroy_xfer(sce->ra_wb_xfer); 1514 /* 1515 * XXX Discard whatever's in the buffer, but we 1516 * should keep it around and drain the buffer 1517 * instead. 1518 */ 1519 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1520 sce->ibuf = NULL; 1521 } 1522 return 0; 1523 case USB_SET_BULK_WB: 1524 if (endpt == USB_CONTROL_ENDPOINT) 1525 return EINVAL; 1526 sce = &sc->sc_endpoints[endpt][OUT]; 1527 if (sce == NULL || sce->pipeh == NULL) 1528 return EINVAL; 1529 edesc = sce->edesc; 1530 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1531 return EINVAL; 1532 1533 if (*(int *)addr) { 1534 /* Only turn WB on if it's currently off. */ 1535 if (sce->state & UGEN_BULK_WB) 1536 return 0; 1537 1538 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1539 /* shouldn't happen */ 1540 return EINVAL; 1541 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize, 1542 0, 0, &sce->ra_wb_xfer); 1543 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1544 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1545 sce->fill = sce->cur = sce->ibuf; 1546 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1547 sce->ra_wb_used = 0; 1548 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1549 } else { 1550 /* Only turn WB off if it's currently on. */ 1551 if (!(sce->state & UGEN_BULK_WB)) 1552 return 0; 1553 1554 sce->state &= ~UGEN_BULK_WB; 1555 /* 1556 * XXX Discard whatever's in the buffer, but we 1557 * should keep it around and keep writing to 1558 * drain the buffer instead. 1559 */ 1560 usbd_abort_pipe(sce->pipeh); 1561 usbd_destroy_xfer(sce->ra_wb_xfer); 1562 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1563 sce->ibuf = NULL; 1564 } 1565 return 0; 1566 case USB_SET_BULK_RA_OPT: 1567 case USB_SET_BULK_WB_OPT: 1568 { 1569 struct usb_bulk_ra_wb_opt *opt; 1570 1571 if (endpt == USB_CONTROL_ENDPOINT) 1572 return EINVAL; 1573 opt = (struct usb_bulk_ra_wb_opt *)addr; 1574 if (cmd == USB_SET_BULK_RA_OPT) 1575 sce = &sc->sc_endpoints[endpt][IN]; 1576 else 1577 sce = &sc->sc_endpoints[endpt][OUT]; 1578 if (sce == NULL || sce->pipeh == NULL) 1579 return EINVAL; 1580 if (opt->ra_wb_buffer_size < 1 || 1581 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1582 opt->ra_wb_request_size < 1 || 1583 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1584 return EINVAL; 1585 /* 1586 * XXX These changes do not take effect until the 1587 * next time RA/WB mode is enabled but they ought to 1588 * take effect immediately. 1589 */ 1590 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1591 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1592 return 0; 1593 } 1594 default: 1595 break; 1596 } 1597 1598 if (endpt != USB_CONTROL_ENDPOINT) 1599 return EINVAL; 1600 1601 switch (cmd) { 1602 #ifdef UGEN_DEBUG 1603 case USB_SETDEBUG: 1604 ugendebug = *(int *)addr; 1605 break; 1606 #endif 1607 case USB_GET_CONFIG: 1608 err = usbd_get_config(sc->sc_udev, &conf); 1609 if (err) 1610 return EIO; 1611 *(int *)addr = conf; 1612 break; 1613 case USB_SET_CONFIG: 1614 if (!(flag & FWRITE)) 1615 return EPERM; 1616 err = ugen_set_config(sc, *(int *)addr); 1617 switch (err) { 1618 case USBD_NORMAL_COMPLETION: 1619 break; 1620 case USBD_IN_USE: 1621 return EBUSY; 1622 default: 1623 return EIO; 1624 } 1625 break; 1626 case USB_GET_ALTINTERFACE: 1627 ai = (struct usb_alt_interface *)addr; 1628 err = usbd_device2interface_handle(sc->sc_udev, 1629 ai->uai_interface_index, &iface); 1630 if (err) 1631 return EINVAL; 1632 idesc = usbd_get_interface_descriptor(iface); 1633 if (idesc == NULL) 1634 return EIO; 1635 ai->uai_alt_no = idesc->bAlternateSetting; 1636 break; 1637 case USB_SET_ALTINTERFACE: 1638 if (!(flag & FWRITE)) 1639 return EPERM; 1640 ai = (struct usb_alt_interface *)addr; 1641 err = usbd_device2interface_handle(sc->sc_udev, 1642 ai->uai_interface_index, &iface); 1643 if (err) 1644 return EINVAL; 1645 err = ugen_set_interface(sc, ai->uai_interface_index, 1646 ai->uai_alt_no); 1647 if (err) 1648 return EINVAL; 1649 break; 1650 case USB_GET_NO_ALT: 1651 ai = (struct usb_alt_interface *)addr; 1652 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen); 1653 if (cdesc == NULL) 1654 return EINVAL; 1655 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1656 if (idesc == NULL) { 1657 kmem_free(cdesc, cdesclen); 1658 return EINVAL; 1659 } 1660 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1661 idesc->bInterfaceNumber); 1662 kmem_free(cdesc, cdesclen); 1663 break; 1664 case USB_GET_DEVICE_DESC: 1665 *(usb_device_descriptor_t *)addr = 1666 *usbd_get_device_descriptor(sc->sc_udev); 1667 break; 1668 case USB_GET_CONFIG_DESC: 1669 cd = (struct usb_config_desc *)addr; 1670 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen); 1671 if (cdesc == NULL) 1672 return EINVAL; 1673 cd->ucd_desc = *cdesc; 1674 kmem_free(cdesc, cdesclen); 1675 break; 1676 case USB_GET_INTERFACE_DESC: 1677 id = (struct usb_interface_desc *)addr; 1678 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen); 1679 if (cdesc == NULL) 1680 return EINVAL; 1681 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1682 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1683 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1684 else 1685 alt = id->uid_alt_index; 1686 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1687 if (idesc == NULL) { 1688 kmem_free(cdesc, cdesclen); 1689 return EINVAL; 1690 } 1691 id->uid_desc = *idesc; 1692 kmem_free(cdesc, cdesclen); 1693 break; 1694 case USB_GET_ENDPOINT_DESC: 1695 ed = (struct usb_endpoint_desc *)addr; 1696 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen); 1697 if (cdesc == NULL) 1698 return EINVAL; 1699 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1700 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1701 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1702 else 1703 alt = ed->ued_alt_index; 1704 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1705 alt, ed->ued_endpoint_index); 1706 if (edesc == NULL) { 1707 kmem_free(cdesc, cdesclen); 1708 return EINVAL; 1709 } 1710 ed->ued_desc = *edesc; 1711 kmem_free(cdesc, cdesclen); 1712 break; 1713 case USB_GET_FULL_DESC: 1714 { 1715 int len; 1716 struct iovec iov; 1717 struct uio uio; 1718 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1719 1720 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen); 1721 if (cdesc == NULL) 1722 return EINVAL; 1723 len = cdesclen; 1724 if (len > fd->ufd_size) 1725 len = fd->ufd_size; 1726 iov.iov_base = (void *)fd->ufd_data; 1727 iov.iov_len = len; 1728 uio.uio_iov = &iov; 1729 uio.uio_iovcnt = 1; 1730 uio.uio_resid = len; 1731 uio.uio_offset = 0; 1732 uio.uio_rw = UIO_READ; 1733 uio.uio_vmspace = l->l_proc->p_vmspace; 1734 error = uiomove((void *)cdesc, len, &uio); 1735 kmem_free(cdesc, cdesclen); 1736 return error; 1737 } 1738 case USB_GET_STRING_DESC: { 1739 int len; 1740 si = (struct usb_string_desc *)addr; 1741 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1742 si->usd_language_id, &si->usd_desc, &len); 1743 if (err) 1744 return EINVAL; 1745 break; 1746 } 1747 case USB_DO_REQUEST: 1748 { 1749 struct usb_ctl_request *ur = (void *)addr; 1750 int len = UGETW(ur->ucr_request.wLength); 1751 struct iovec iov; 1752 struct uio uio; 1753 void *ptr = 0; 1754 usbd_status xerr; 1755 1756 error = 0; 1757 1758 if (!(flag & FWRITE)) 1759 return EPERM; 1760 /* Avoid requests that would damage the bus integrity. */ 1761 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1762 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1763 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1764 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1765 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1766 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1767 return EINVAL; 1768 1769 if (len < 0 || len > 32767) 1770 return EINVAL; 1771 if (len != 0) { 1772 iov.iov_base = (void *)ur->ucr_data; 1773 iov.iov_len = len; 1774 uio.uio_iov = &iov; 1775 uio.uio_iovcnt = 1; 1776 uio.uio_resid = len; 1777 uio.uio_offset = 0; 1778 uio.uio_rw = 1779 ur->ucr_request.bmRequestType & UT_READ ? 1780 UIO_READ : UIO_WRITE; 1781 uio.uio_vmspace = l->l_proc->p_vmspace; 1782 ptr = kmem_alloc(len, KM_SLEEP); 1783 if (uio.uio_rw == UIO_WRITE) { 1784 error = uiomove(ptr, len, &uio); 1785 if (error) 1786 goto ret; 1787 } 1788 } 1789 sce = &sc->sc_endpoints[endpt][IN]; 1790 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1791 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1792 if (xerr) { 1793 error = EIO; 1794 goto ret; 1795 } 1796 if (len != 0) { 1797 if (uio.uio_rw == UIO_READ) { 1798 size_t alen = min(len, ur->ucr_actlen); 1799 error = uiomove(ptr, alen, &uio); 1800 if (error) 1801 goto ret; 1802 } 1803 } 1804 ret: 1805 if (ptr) 1806 kmem_free(ptr, len); 1807 return error; 1808 } 1809 case USB_GET_DEVICEINFO: 1810 usbd_fill_deviceinfo(sc->sc_udev, 1811 (struct usb_device_info *)addr, 0); 1812 break; 1813 #ifdef COMPAT_30 1814 case USB_GET_DEVICEINFO_OLD: 1815 usbd_fill_deviceinfo_old(sc->sc_udev, 1816 (struct usb_device_info_old *)addr, 0); 1817 1818 break; 1819 #endif 1820 default: 1821 return EINVAL; 1822 } 1823 return 0; 1824 } 1825 1826 int 1827 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1828 { 1829 int endpt = UGENENDPOINT(dev); 1830 struct ugen_softc *sc; 1831 int error; 1832 1833 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 1834 if (sc == NULL) 1835 return ENXIO; 1836 1837 sc->sc_refcnt++; 1838 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1839 if (--sc->sc_refcnt < 0) 1840 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 1841 return error; 1842 } 1843 1844 int 1845 ugenpoll(dev_t dev, int events, struct lwp *l) 1846 { 1847 struct ugen_softc *sc; 1848 struct ugen_endpoint *sce_in, *sce_out; 1849 int revents = 0; 1850 1851 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 1852 if (sc == NULL) 1853 return ENXIO; 1854 1855 if (sc->sc_dying) 1856 return POLLHUP; 1857 1858 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 1859 return ENODEV; 1860 1861 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1862 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1863 if (sce_in == NULL && sce_out == NULL) 1864 return POLLERR; 1865 #ifdef DIAGNOSTIC 1866 if (!sce_in->edesc && !sce_out->edesc) { 1867 printf("ugenpoll: no edesc\n"); 1868 return POLLERR; 1869 } 1870 /* It's possible to have only one pipe open. */ 1871 if (!sce_in->pipeh && !sce_out->pipeh) { 1872 printf("ugenpoll: no pipe\n"); 1873 return POLLERR; 1874 } 1875 #endif 1876 1877 mutex_enter(&sc->sc_lock); 1878 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1879 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1880 case UE_INTERRUPT: 1881 if (sce_in->q.c_cc > 0) 1882 revents |= events & (POLLIN | POLLRDNORM); 1883 else 1884 selrecord(l, &sce_in->rsel); 1885 break; 1886 case UE_ISOCHRONOUS: 1887 if (sce_in->cur != sce_in->fill) 1888 revents |= events & (POLLIN | POLLRDNORM); 1889 else 1890 selrecord(l, &sce_in->rsel); 1891 break; 1892 case UE_BULK: 1893 if (sce_in->state & UGEN_BULK_RA) { 1894 if (sce_in->ra_wb_used > 0) 1895 revents |= events & 1896 (POLLIN | POLLRDNORM); 1897 else 1898 selrecord(l, &sce_in->rsel); 1899 break; 1900 } 1901 /* 1902 * We have no easy way of determining if a read will 1903 * yield any data or a write will happen. 1904 * Pretend they will. 1905 */ 1906 revents |= events & (POLLIN | POLLRDNORM); 1907 break; 1908 default: 1909 break; 1910 } 1911 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1912 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1913 case UE_INTERRUPT: 1914 case UE_ISOCHRONOUS: 1915 /* XXX unimplemented */ 1916 break; 1917 case UE_BULK: 1918 if (sce_out->state & UGEN_BULK_WB) { 1919 if (sce_out->ra_wb_used < 1920 sce_out->limit - sce_out->ibuf) 1921 revents |= events & 1922 (POLLOUT | POLLWRNORM); 1923 else 1924 selrecord(l, &sce_out->rsel); 1925 break; 1926 } 1927 /* 1928 * We have no easy way of determining if a read will 1929 * yield any data or a write will happen. 1930 * Pretend they will. 1931 */ 1932 revents |= events & (POLLOUT | POLLWRNORM); 1933 break; 1934 default: 1935 break; 1936 } 1937 1938 mutex_exit(&sc->sc_lock); 1939 1940 return revents; 1941 } 1942 1943 static void 1944 filt_ugenrdetach(struct knote *kn) 1945 { 1946 struct ugen_endpoint *sce = kn->kn_hook; 1947 struct ugen_softc *sc = sce->sc; 1948 1949 mutex_enter(&sc->sc_lock); 1950 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1951 mutex_exit(&sc->sc_lock); 1952 } 1953 1954 static int 1955 filt_ugenread_intr(struct knote *kn, long hint) 1956 { 1957 struct ugen_endpoint *sce = kn->kn_hook; 1958 1959 kn->kn_data = sce->q.c_cc; 1960 return kn->kn_data > 0; 1961 } 1962 1963 static int 1964 filt_ugenread_isoc(struct knote *kn, long hint) 1965 { 1966 struct ugen_endpoint *sce = kn->kn_hook; 1967 1968 if (sce->cur == sce->fill) 1969 return 0; 1970 1971 if (sce->cur < sce->fill) 1972 kn->kn_data = sce->fill - sce->cur; 1973 else 1974 kn->kn_data = (sce->limit - sce->cur) + 1975 (sce->fill - sce->ibuf); 1976 1977 return 1; 1978 } 1979 1980 static int 1981 filt_ugenread_bulk(struct knote *kn, long hint) 1982 { 1983 struct ugen_endpoint *sce = kn->kn_hook; 1984 1985 if (!(sce->state & UGEN_BULK_RA)) 1986 /* 1987 * We have no easy way of determining if a read will 1988 * yield any data or a write will happen. 1989 * So, emulate "seltrue". 1990 */ 1991 return filt_seltrue(kn, hint); 1992 1993 if (sce->ra_wb_used == 0) 1994 return 0; 1995 1996 kn->kn_data = sce->ra_wb_used; 1997 1998 return 1; 1999 } 2000 2001 static int 2002 filt_ugenwrite_bulk(struct knote *kn, long hint) 2003 { 2004 struct ugen_endpoint *sce = kn->kn_hook; 2005 2006 if (!(sce->state & UGEN_BULK_WB)) 2007 /* 2008 * We have no easy way of determining if a read will 2009 * yield any data or a write will happen. 2010 * So, emulate "seltrue". 2011 */ 2012 return filt_seltrue(kn, hint); 2013 2014 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2015 return 0; 2016 2017 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2018 2019 return 1; 2020 } 2021 2022 static const struct filterops ugenread_intr_filtops = 2023 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2024 2025 static const struct filterops ugenread_isoc_filtops = 2026 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2027 2028 static const struct filterops ugenread_bulk_filtops = 2029 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2030 2031 static const struct filterops ugenwrite_bulk_filtops = 2032 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2033 2034 int 2035 ugenkqfilter(dev_t dev, struct knote *kn) 2036 { 2037 struct ugen_softc *sc; 2038 struct ugen_endpoint *sce; 2039 struct klist *klist; 2040 2041 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 2042 if (sc == NULL) 2043 return ENXIO; 2044 2045 if (sc->sc_dying) 2046 return ENXIO; 2047 2048 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 2049 return ENODEV; 2050 2051 switch (kn->kn_filter) { 2052 case EVFILT_READ: 2053 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2054 if (sce == NULL) 2055 return EINVAL; 2056 2057 klist = &sce->rsel.sel_klist; 2058 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2059 case UE_INTERRUPT: 2060 kn->kn_fop = &ugenread_intr_filtops; 2061 break; 2062 case UE_ISOCHRONOUS: 2063 kn->kn_fop = &ugenread_isoc_filtops; 2064 break; 2065 case UE_BULK: 2066 kn->kn_fop = &ugenread_bulk_filtops; 2067 break; 2068 default: 2069 return EINVAL; 2070 } 2071 break; 2072 2073 case EVFILT_WRITE: 2074 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2075 if (sce == NULL) 2076 return EINVAL; 2077 2078 klist = &sce->rsel.sel_klist; 2079 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2080 case UE_INTERRUPT: 2081 case UE_ISOCHRONOUS: 2082 /* XXX poll doesn't support this */ 2083 return EINVAL; 2084 2085 case UE_BULK: 2086 kn->kn_fop = &ugenwrite_bulk_filtops; 2087 break; 2088 default: 2089 return EINVAL; 2090 } 2091 break; 2092 2093 default: 2094 return EINVAL; 2095 } 2096 2097 kn->kn_hook = sce; 2098 2099 mutex_enter(&sc->sc_lock); 2100 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2101 mutex_exit(&sc->sc_lock); 2102 2103 return 0; 2104 } 2105