1 /* $NetBSD: ugen.c,v 1.102 2009/03/20 20:47:43 drochner Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.102 2009/03/20 20:47:43 drochner Exp $"); 41 42 #include "opt_ugen_bulk_ra_wb.h" 43 #include "opt_compat_netbsd.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #if defined(__NetBSD__) || defined(__OpenBSD__) 50 #include <sys/device.h> 51 #include <sys/ioctl.h> 52 #elif defined(__FreeBSD__) 53 #include <sys/module.h> 54 #include <sys/bus.h> 55 #include <sys/ioccom.h> 56 #include <sys/conf.h> 57 #include <sys/fcntl.h> 58 #include <sys/filio.h> 59 #endif 60 #include <sys/conf.h> 61 #include <sys/tty.h> 62 #include <sys/file.h> 63 #include <sys/select.h> 64 #include <sys/proc.h> 65 #include <sys/vnode.h> 66 #include <sys/poll.h> 67 68 #include <dev/usb/usb.h> 69 #include <dev/usb/usbdi.h> 70 #include <dev/usb/usbdi_util.h> 71 72 #ifdef UGEN_DEBUG 73 #define DPRINTF(x) if (ugendebug) logprintf x 74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x 75 int ugendebug = 0; 76 #else 77 #define DPRINTF(x) 78 #define DPRINTFN(n,x) 79 #endif 80 81 #define UGEN_CHUNK 128 /* chunk size for read */ 82 #define UGEN_IBSIZE 1020 /* buffer size */ 83 #define UGEN_BBSIZE 1024 84 85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */ 86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */ 87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */ 88 89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 91 92 struct ugen_endpoint { 93 struct ugen_softc *sc; 94 usb_endpoint_descriptor_t *edesc; 95 usbd_interface_handle iface; 96 int state; 97 #define UGEN_ASLP 0x02 /* waiting for data */ 98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 102 usbd_pipe_handle pipeh; 103 struct clist q; 104 struct selinfo rsel; 105 u_char *ibuf; /* start of buffer (circular for isoc) */ 106 u_char *fill; /* location for input (isoc) */ 107 u_char *limit; /* end of circular buffer (isoc) */ 108 u_char *cur; /* current read location (isoc) */ 109 u_int32_t timeout; 110 #ifdef UGEN_BULK_RA_WB 111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 113 u_int32_t ra_wb_used; /* how much is in buffer */ 114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 115 usbd_xfer_handle ra_wb_xfer; 116 #endif 117 struct isoreq { 118 struct ugen_endpoint *sce; 119 usbd_xfer_handle xfer; 120 void *dmabuf; 121 u_int16_t sizes[UGEN_NISORFRMS]; 122 } isoreqs[UGEN_NISOREQS]; 123 }; 124 125 struct ugen_softc { 126 USBBASEDEVICE sc_dev; /* base device */ 127 usbd_device_handle sc_udev; 128 129 char sc_is_open[USB_MAX_ENDPOINTS]; 130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 131 #define OUT 0 132 #define IN 1 133 134 int sc_refcnt; 135 char sc_buffer[UGEN_BBSIZE]; 136 u_char sc_dying; 137 }; 138 139 #if defined(__NetBSD__) 140 dev_type_open(ugenopen); 141 dev_type_close(ugenclose); 142 dev_type_read(ugenread); 143 dev_type_write(ugenwrite); 144 dev_type_ioctl(ugenioctl); 145 dev_type_poll(ugenpoll); 146 dev_type_kqfilter(ugenkqfilter); 147 148 const struct cdevsw ugen_cdevsw = { 149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl, 150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER, 151 }; 152 #elif defined(__OpenBSD__) 153 cdev_decl(ugen); 154 #elif defined(__FreeBSD__) 155 d_open_t ugenopen; 156 d_close_t ugenclose; 157 d_read_t ugenread; 158 d_write_t ugenwrite; 159 d_ioctl_t ugenioctl; 160 d_poll_t ugenpoll; 161 162 #define UGEN_CDEV_MAJOR 114 163 164 Static struct cdevsw ugen_cdevsw = { 165 /* open */ ugenopen, 166 /* close */ ugenclose, 167 /* read */ ugenread, 168 /* write */ ugenwrite, 169 /* ioctl */ ugenioctl, 170 /* poll */ ugenpoll, 171 /* mmap */ nommap, 172 /* strategy */ nostrategy, 173 /* name */ "ugen", 174 /* maj */ UGEN_CDEV_MAJOR, 175 /* dump */ nodump, 176 /* psize */ nopsize, 177 /* flags */ 0, 178 /* bmaj */ -1 179 }; 180 #endif 181 182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, 183 usbd_status status); 184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 185 usbd_status status); 186 #ifdef UGEN_BULK_RA_WB 187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 188 usbd_status status); 189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 190 usbd_status status); 191 #endif 192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 195 void *, int, struct lwp *); 196 Static int ugen_set_config(struct ugen_softc *sc, int configno); 197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc, 198 int index, int *lenp); 199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx); 201 202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 203 #define UGENENDPOINT(n) (minor(n) & 0xf) 204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 205 206 USB_DECLARE_DRIVER(ugen); 207 208 USB_MATCH(ugen) 209 { 210 USB_MATCH_START(ugen, uaa); 211 212 if (match->cf_flags & 1) 213 return (UMATCH_HIGHEST); 214 else if (uaa->usegeneric) 215 return (UMATCH_GENERIC); 216 else 217 return (UMATCH_NONE); 218 } 219 220 USB_ATTACH(ugen) 221 { 222 USB_ATTACH_START(ugen, sc, uaa); 223 usbd_device_handle udev; 224 char *devinfop; 225 usbd_status err; 226 int i, dir, conf; 227 228 devinfop = usbd_devinfo_alloc(uaa->device, 0); 229 USB_ATTACH_SETUP; 230 aprint_normal_dev(self, "%s\n", devinfop); 231 usbd_devinfo_free(devinfop); 232 233 sc->sc_dev = self; 234 sc->sc_udev = udev = uaa->device; 235 236 /* First set configuration index 0, the default one for ugen. */ 237 err = usbd_set_config_index(udev, 0, 0); 238 if (err) { 239 aprint_error_dev(self, 240 "setting configuration index 0 failed\n"); 241 sc->sc_dying = 1; 242 USB_ATTACH_ERROR_RETURN; 243 } 244 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 245 246 /* Set up all the local state for this configuration. */ 247 err = ugen_set_config(sc, conf); 248 if (err) { 249 aprint_error_dev(self, "setting configuration %d failed\n", 250 conf); 251 sc->sc_dying = 1; 252 USB_ATTACH_ERROR_RETURN; 253 } 254 255 #ifdef __FreeBSD__ 256 { 257 static int global_init_done = 0; 258 if (!global_init_done) { 259 cdevsw_add(&ugen_cdevsw); 260 global_init_done = 1; 261 } 262 } 263 #endif 264 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 265 for (dir = OUT; dir <= IN; dir++) { 266 struct ugen_endpoint *sce; 267 268 sce = &sc->sc_endpoints[i][dir]; 269 selinit(&sce->rsel); 270 } 271 } 272 273 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 274 USBDEV(sc->sc_dev)); 275 276 if (!pmf_device_register(self, NULL, NULL)) 277 aprint_error_dev(self, "couldn't establish power handler\n"); 278 279 USB_ATTACH_SUCCESS_RETURN; 280 } 281 282 Static int 283 ugen_set_config(struct ugen_softc *sc, int configno) 284 { 285 usbd_device_handle dev = sc->sc_udev; 286 usb_config_descriptor_t *cdesc; 287 usbd_interface_handle iface; 288 usb_endpoint_descriptor_t *ed; 289 struct ugen_endpoint *sce; 290 u_int8_t niface, nendpt; 291 int ifaceno, endptno, endpt; 292 usbd_status err; 293 int dir; 294 295 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 296 USBDEVNAME(sc->sc_dev), configno, sc)); 297 298 /* 299 * We start at 1, not 0, because we don't care whether the 300 * control endpoint is open or not. It is always present. 301 */ 302 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 303 if (sc->sc_is_open[endptno]) { 304 DPRINTFN(1, 305 ("ugen_set_config: %s - endpoint %d is open\n", 306 USBDEVNAME(sc->sc_dev), endptno)); 307 return (USBD_IN_USE); 308 } 309 310 /* Avoid setting the current value. */ 311 cdesc = usbd_get_config_descriptor(dev); 312 if (!cdesc || cdesc->bConfigurationValue != configno) { 313 err = usbd_set_config_no(dev, configno, 1); 314 if (err) 315 return (err); 316 } 317 318 err = usbd_interface_count(dev, &niface); 319 if (err) 320 return (err); 321 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints); 322 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 323 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 324 err = usbd_device2interface_handle(dev, ifaceno, &iface); 325 if (err) 326 return (err); 327 err = usbd_endpoint_count(iface, &nendpt); 328 if (err) 329 return (err); 330 for (endptno = 0; endptno < nendpt; endptno++) { 331 ed = usbd_interface2endpoint_descriptor(iface,endptno); 332 KASSERT(ed != NULL); 333 endpt = ed->bEndpointAddress; 334 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 335 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 336 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 337 "(%d,%d), sce=%p\n", 338 endptno, endpt, UE_GET_ADDR(endpt), 339 UE_GET_DIR(endpt), sce)); 340 sce->sc = sc; 341 sce->edesc = ed; 342 sce->iface = iface; 343 } 344 } 345 return (USBD_NORMAL_COMPLETION); 346 } 347 348 int 349 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 350 { 351 struct ugen_softc *sc; 352 int unit = UGENUNIT(dev); 353 int endpt = UGENENDPOINT(dev); 354 usb_endpoint_descriptor_t *edesc; 355 struct ugen_endpoint *sce; 356 int dir, isize; 357 usbd_status err; 358 usbd_xfer_handle xfer; 359 void *tbuf; 360 int i, j; 361 362 USB_GET_SC_OPEN(ugen, unit, sc); 363 364 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 365 flag, mode, unit, endpt)); 366 367 if (sc == NULL || sc->sc_dying) 368 return (ENXIO); 369 370 /* The control endpoint allows multiple opens. */ 371 if (endpt == USB_CONTROL_ENDPOINT) { 372 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 373 return (0); 374 } 375 376 if (sc->sc_is_open[endpt]) 377 return (EBUSY); 378 379 /* Make sure there are pipes for all directions. */ 380 for (dir = OUT; dir <= IN; dir++) { 381 if (flag & (dir == OUT ? FWRITE : FREAD)) { 382 sce = &sc->sc_endpoints[endpt][dir]; 383 if (sce == 0 || sce->edesc == 0) 384 return (ENXIO); 385 } 386 } 387 388 /* Actually open the pipes. */ 389 /* XXX Should back out properly if it fails. */ 390 for (dir = OUT; dir <= IN; dir++) { 391 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 392 continue; 393 sce = &sc->sc_endpoints[endpt][dir]; 394 sce->state = 0; 395 sce->timeout = USBD_NO_TIMEOUT; 396 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 397 sc, endpt, dir, sce)); 398 edesc = sce->edesc; 399 switch (edesc->bmAttributes & UE_XFERTYPE) { 400 case UE_INTERRUPT: 401 if (dir == OUT) { 402 err = usbd_open_pipe(sce->iface, 403 edesc->bEndpointAddress, 0, &sce->pipeh); 404 if (err) 405 return (EIO); 406 break; 407 } 408 isize = UGETW(edesc->wMaxPacketSize); 409 if (isize == 0) /* shouldn't happen */ 410 return (EINVAL); 411 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); 412 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 413 endpt, isize)); 414 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) 415 return (ENOMEM); 416 err = usbd_open_pipe_intr(sce->iface, 417 edesc->bEndpointAddress, 418 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 419 sce->ibuf, isize, ugenintr, 420 USBD_DEFAULT_INTERVAL); 421 if (err) { 422 free(sce->ibuf, M_USBDEV); 423 clfree(&sce->q); 424 return (EIO); 425 } 426 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 427 break; 428 case UE_BULK: 429 err = usbd_open_pipe(sce->iface, 430 edesc->bEndpointAddress, 0, &sce->pipeh); 431 if (err) 432 return (EIO); 433 #ifdef UGEN_BULK_RA_WB 434 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 435 /* 436 * Use request size for non-RA/WB transfers 437 * as the default. 438 */ 439 sce->ra_wb_reqsize = UGEN_BBSIZE; 440 #endif 441 break; 442 case UE_ISOCHRONOUS: 443 if (dir == OUT) 444 return (EINVAL); 445 isize = UGETW(edesc->wMaxPacketSize); 446 if (isize == 0) /* shouldn't happen */ 447 return (EINVAL); 448 sce->ibuf = malloc(isize * UGEN_NISOFRAMES, 449 M_USBDEV, M_WAITOK); 450 sce->cur = sce->fill = sce->ibuf; 451 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 452 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 453 endpt, isize)); 454 err = usbd_open_pipe(sce->iface, 455 edesc->bEndpointAddress, 0, &sce->pipeh); 456 if (err) { 457 free(sce->ibuf, M_USBDEV); 458 return (EIO); 459 } 460 for(i = 0; i < UGEN_NISOREQS; ++i) { 461 sce->isoreqs[i].sce = sce; 462 xfer = usbd_alloc_xfer(sc->sc_udev); 463 if (xfer == 0) 464 goto bad; 465 sce->isoreqs[i].xfer = xfer; 466 tbuf = usbd_alloc_buffer 467 (xfer, isize * UGEN_NISORFRMS); 468 if (tbuf == 0) { 469 i++; 470 goto bad; 471 } 472 sce->isoreqs[i].dmabuf = tbuf; 473 for(j = 0; j < UGEN_NISORFRMS; ++j) 474 sce->isoreqs[i].sizes[j] = isize; 475 usbd_setup_isoc_xfer 476 (xfer, sce->pipeh, &sce->isoreqs[i], 477 sce->isoreqs[i].sizes, 478 UGEN_NISORFRMS, USBD_NO_COPY, 479 ugen_isoc_rintr); 480 (void)usbd_transfer(xfer); 481 } 482 DPRINTFN(5, ("ugenopen: isoc open done\n")); 483 break; 484 bad: 485 while (--i >= 0) /* implicit buffer free */ 486 usbd_free_xfer(sce->isoreqs[i].xfer); 487 return (ENOMEM); 488 case UE_CONTROL: 489 sce->timeout = USBD_DEFAULT_TIMEOUT; 490 return (EINVAL); 491 } 492 } 493 sc->sc_is_open[endpt] = 1; 494 return (0); 495 } 496 497 int 498 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 499 { 500 int endpt = UGENENDPOINT(dev); 501 struct ugen_softc *sc; 502 struct ugen_endpoint *sce; 503 int dir; 504 int i; 505 506 USB_GET_SC(ugen, UGENUNIT(dev), sc); 507 508 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 509 flag, mode, UGENUNIT(dev), endpt)); 510 511 #ifdef DIAGNOSTIC 512 if (!sc->sc_is_open[endpt]) { 513 printf("ugenclose: not open\n"); 514 return (EINVAL); 515 } 516 #endif 517 518 if (endpt == USB_CONTROL_ENDPOINT) { 519 DPRINTFN(5, ("ugenclose: close control\n")); 520 sc->sc_is_open[endpt] = 0; 521 return (0); 522 } 523 524 for (dir = OUT; dir <= IN; dir++) { 525 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 526 continue; 527 sce = &sc->sc_endpoints[endpt][dir]; 528 if (sce == NULL || sce->pipeh == NULL) 529 continue; 530 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 531 endpt, dir, sce)); 532 533 usbd_abort_pipe(sce->pipeh); 534 usbd_close_pipe(sce->pipeh); 535 sce->pipeh = NULL; 536 537 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 538 case UE_INTERRUPT: 539 ndflush(&sce->q, sce->q.c_cc); 540 clfree(&sce->q); 541 break; 542 case UE_ISOCHRONOUS: 543 for (i = 0; i < UGEN_NISOREQS; ++i) 544 usbd_free_xfer(sce->isoreqs[i].xfer); 545 break; 546 #ifdef UGEN_BULK_RA_WB 547 case UE_BULK: 548 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) 549 /* ibuf freed below */ 550 usbd_free_xfer(sce->ra_wb_xfer); 551 break; 552 #endif 553 default: 554 break; 555 } 556 557 if (sce->ibuf != NULL) { 558 free(sce->ibuf, M_USBDEV); 559 sce->ibuf = NULL; 560 clfree(&sce->q); 561 } 562 } 563 sc->sc_is_open[endpt] = 0; 564 565 return (0); 566 } 567 568 Static int 569 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 570 { 571 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 572 u_int32_t n, tn; 573 usbd_xfer_handle xfer; 574 usbd_status err; 575 int s; 576 int error = 0; 577 578 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 579 580 if (sc->sc_dying) 581 return (EIO); 582 583 if (endpt == USB_CONTROL_ENDPOINT) 584 return (ENODEV); 585 586 #ifdef DIAGNOSTIC 587 if (sce->edesc == NULL) { 588 printf("ugenread: no edesc\n"); 589 return (EIO); 590 } 591 if (sce->pipeh == NULL) { 592 printf("ugenread: no pipe\n"); 593 return (EIO); 594 } 595 #endif 596 597 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 598 case UE_INTERRUPT: 599 /* Block until activity occurred. */ 600 s = splusb(); 601 while (sce->q.c_cc == 0) { 602 if (flag & IO_NDELAY) { 603 splx(s); 604 return (EWOULDBLOCK); 605 } 606 sce->state |= UGEN_ASLP; 607 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 608 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 609 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 610 if (sc->sc_dying) 611 error = EIO; 612 if (error) { 613 sce->state &= ~UGEN_ASLP; 614 break; 615 } 616 } 617 splx(s); 618 619 /* Transfer as many chunks as possible. */ 620 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 621 n = min(sce->q.c_cc, uio->uio_resid); 622 if (n > sizeof(sc->sc_buffer)) 623 n = sizeof(sc->sc_buffer); 624 625 /* Remove a small chunk from the input queue. */ 626 q_to_b(&sce->q, sc->sc_buffer, n); 627 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 628 629 /* Copy the data to the user process. */ 630 error = uiomove(sc->sc_buffer, n, uio); 631 if (error) 632 break; 633 } 634 break; 635 case UE_BULK: 636 #ifdef UGEN_BULK_RA_WB 637 if (sce->state & UGEN_BULK_RA) { 638 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 639 uio->uio_resid, sce->ra_wb_used)); 640 xfer = sce->ra_wb_xfer; 641 642 s = splusb(); 643 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 644 splx(s); 645 return (EWOULDBLOCK); 646 } 647 while (uio->uio_resid > 0 && !error) { 648 while (sce->ra_wb_used == 0) { 649 sce->state |= UGEN_ASLP; 650 DPRINTFN(5, 651 ("ugenread: sleep on %p\n", 652 sce)); 653 error = tsleep(sce, PZERO | PCATCH, 654 "ugenrb", 0); 655 DPRINTFN(5, 656 ("ugenread: woke, error=%d\n", 657 error)); 658 if (sc->sc_dying) 659 error = EIO; 660 if (error) { 661 sce->state &= ~UGEN_ASLP; 662 break; 663 } 664 } 665 666 /* Copy data to the process. */ 667 while (uio->uio_resid > 0 668 && sce->ra_wb_used > 0) { 669 n = min(uio->uio_resid, 670 sce->ra_wb_used); 671 n = min(n, sce->limit - sce->cur); 672 error = uiomove(sce->cur, n, uio); 673 if (error) 674 break; 675 sce->cur += n; 676 sce->ra_wb_used -= n; 677 if (sce->cur == sce->limit) 678 sce->cur = sce->ibuf; 679 } 680 681 /* 682 * If the transfers stopped because the 683 * buffer was full, restart them. 684 */ 685 if (sce->state & UGEN_RA_WB_STOP && 686 sce->ra_wb_used < sce->limit - sce->ibuf) { 687 n = (sce->limit - sce->ibuf) 688 - sce->ra_wb_used; 689 usbd_setup_xfer(xfer, 690 sce->pipeh, sce, NULL, 691 min(n, sce->ra_wb_xferlen), 692 USBD_NO_COPY, USBD_NO_TIMEOUT, 693 ugen_bulkra_intr); 694 sce->state &= ~UGEN_RA_WB_STOP; 695 err = usbd_transfer(xfer); 696 if (err != USBD_IN_PROGRESS) 697 /* 698 * The transfer has not been 699 * queued. Setting STOP 700 * will make us try 701 * again at the next read. 702 */ 703 sce->state |= UGEN_RA_WB_STOP; 704 } 705 } 706 splx(s); 707 break; 708 } 709 #endif 710 xfer = usbd_alloc_xfer(sc->sc_udev); 711 if (xfer == 0) 712 return (ENOMEM); 713 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 714 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 715 tn = n; 716 err = usbd_bulk_transfer( 717 xfer, sce->pipeh, 718 sce->state & UGEN_SHORT_OK ? 719 USBD_SHORT_XFER_OK : 0, 720 sce->timeout, sc->sc_buffer, &tn, "ugenrb"); 721 if (err) { 722 if (err == USBD_INTERRUPTED) 723 error = EINTR; 724 else if (err == USBD_TIMEOUT) 725 error = ETIMEDOUT; 726 else 727 error = EIO; 728 break; 729 } 730 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 731 error = uiomove(sc->sc_buffer, tn, uio); 732 if (error || tn < n) 733 break; 734 } 735 usbd_free_xfer(xfer); 736 break; 737 case UE_ISOCHRONOUS: 738 s = splusb(); 739 while (sce->cur == sce->fill) { 740 if (flag & IO_NDELAY) { 741 splx(s); 742 return (EWOULDBLOCK); 743 } 744 sce->state |= UGEN_ASLP; 745 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 746 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 747 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 748 if (sc->sc_dying) 749 error = EIO; 750 if (error) { 751 sce->state &= ~UGEN_ASLP; 752 break; 753 } 754 } 755 756 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 757 if(sce->fill > sce->cur) 758 n = min(sce->fill - sce->cur, uio->uio_resid); 759 else 760 n = min(sce->limit - sce->cur, uio->uio_resid); 761 762 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 763 764 /* Copy the data to the user process. */ 765 error = uiomove(sce->cur, n, uio); 766 if (error) 767 break; 768 sce->cur += n; 769 if(sce->cur >= sce->limit) 770 sce->cur = sce->ibuf; 771 } 772 splx(s); 773 break; 774 775 776 default: 777 return (ENXIO); 778 } 779 return (error); 780 } 781 782 int 783 ugenread(dev_t dev, struct uio *uio, int flag) 784 { 785 int endpt = UGENENDPOINT(dev); 786 struct ugen_softc *sc; 787 int error; 788 789 USB_GET_SC(ugen, UGENUNIT(dev), sc); 790 791 sc->sc_refcnt++; 792 error = ugen_do_read(sc, endpt, uio, flag); 793 if (--sc->sc_refcnt < 0) 794 usb_detach_wakeup(USBDEV(sc->sc_dev)); 795 return (error); 796 } 797 798 Static int 799 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 800 int flag) 801 { 802 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 803 u_int32_t n; 804 int error = 0; 805 #ifdef UGEN_BULK_RA_WB 806 int s; 807 u_int32_t tn; 808 char *dbuf; 809 #endif 810 usbd_xfer_handle xfer; 811 usbd_status err; 812 813 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 814 815 if (sc->sc_dying) 816 return (EIO); 817 818 if (endpt == USB_CONTROL_ENDPOINT) 819 return (ENODEV); 820 821 #ifdef DIAGNOSTIC 822 if (sce->edesc == NULL) { 823 printf("ugenwrite: no edesc\n"); 824 return (EIO); 825 } 826 if (sce->pipeh == NULL) { 827 printf("ugenwrite: no pipe\n"); 828 return (EIO); 829 } 830 #endif 831 832 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 833 case UE_BULK: 834 #ifdef UGEN_BULK_RA_WB 835 if (sce->state & UGEN_BULK_WB) { 836 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 837 uio->uio_resid, sce->ra_wb_used)); 838 xfer = sce->ra_wb_xfer; 839 840 s = splusb(); 841 if (sce->ra_wb_used == sce->limit - sce->ibuf && 842 flag & IO_NDELAY) { 843 splx(s); 844 return (EWOULDBLOCK); 845 } 846 while (uio->uio_resid > 0 && !error) { 847 while (sce->ra_wb_used == 848 sce->limit - sce->ibuf) { 849 sce->state |= UGEN_ASLP; 850 DPRINTFN(5, 851 ("ugenwrite: sleep on %p\n", 852 sce)); 853 error = tsleep(sce, PZERO | PCATCH, 854 "ugenwb", 0); 855 DPRINTFN(5, 856 ("ugenwrite: woke, error=%d\n", 857 error)); 858 if (sc->sc_dying) 859 error = EIO; 860 if (error) { 861 sce->state &= ~UGEN_ASLP; 862 break; 863 } 864 } 865 866 /* Copy data from the process. */ 867 while (uio->uio_resid > 0 && 868 sce->ra_wb_used < sce->limit - sce->ibuf) { 869 n = min(uio->uio_resid, 870 (sce->limit - sce->ibuf) 871 - sce->ra_wb_used); 872 n = min(n, sce->limit - sce->fill); 873 error = uiomove(sce->fill, n, uio); 874 if (error) 875 break; 876 sce->fill += n; 877 sce->ra_wb_used += n; 878 if (sce->fill == sce->limit) 879 sce->fill = sce->ibuf; 880 } 881 882 /* 883 * If the transfers stopped because the 884 * buffer was empty, restart them. 885 */ 886 if (sce->state & UGEN_RA_WB_STOP && 887 sce->ra_wb_used > 0) { 888 dbuf = (char *)usbd_get_buffer(xfer); 889 n = min(sce->ra_wb_used, 890 sce->ra_wb_xferlen); 891 tn = min(n, sce->limit - sce->cur); 892 memcpy(dbuf, sce->cur, tn); 893 dbuf += tn; 894 if (n - tn > 0) 895 memcpy(dbuf, sce->ibuf, 896 n - tn); 897 usbd_setup_xfer(xfer, 898 sce->pipeh, sce, NULL, n, 899 USBD_NO_COPY, USBD_NO_TIMEOUT, 900 ugen_bulkwb_intr); 901 sce->state &= ~UGEN_RA_WB_STOP; 902 err = usbd_transfer(xfer); 903 if (err != USBD_IN_PROGRESS) 904 /* 905 * The transfer has not been 906 * queued. Setting STOP 907 * will make us try again 908 * at the next read. 909 */ 910 sce->state |= UGEN_RA_WB_STOP; 911 } 912 } 913 splx(s); 914 break; 915 } 916 #endif 917 xfer = usbd_alloc_xfer(sc->sc_udev); 918 if (xfer == 0) 919 return (EIO); 920 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 921 error = uiomove(sc->sc_buffer, n, uio); 922 if (error) 923 break; 924 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 925 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, 926 sce->timeout, sc->sc_buffer, &n,"ugenwb"); 927 if (err) { 928 if (err == USBD_INTERRUPTED) 929 error = EINTR; 930 else if (err == USBD_TIMEOUT) 931 error = ETIMEDOUT; 932 else 933 error = EIO; 934 break; 935 } 936 } 937 usbd_free_xfer(xfer); 938 break; 939 case UE_INTERRUPT: 940 xfer = usbd_alloc_xfer(sc->sc_udev); 941 if (xfer == 0) 942 return (EIO); 943 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 944 uio->uio_resid)) != 0) { 945 error = uiomove(sc->sc_buffer, n, uio); 946 if (error) 947 break; 948 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 949 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 950 sce->timeout, sc->sc_buffer, &n, "ugenwi"); 951 if (err) { 952 if (err == USBD_INTERRUPTED) 953 error = EINTR; 954 else if (err == USBD_TIMEOUT) 955 error = ETIMEDOUT; 956 else 957 error = EIO; 958 break; 959 } 960 } 961 usbd_free_xfer(xfer); 962 break; 963 default: 964 return (ENXIO); 965 } 966 return (error); 967 } 968 969 int 970 ugenwrite(dev_t dev, struct uio *uio, int flag) 971 { 972 int endpt = UGENENDPOINT(dev); 973 struct ugen_softc *sc; 974 int error; 975 976 USB_GET_SC(ugen, UGENUNIT(dev), sc); 977 978 sc->sc_refcnt++; 979 error = ugen_do_write(sc, endpt, uio, flag); 980 if (--sc->sc_refcnt < 0) 981 usb_detach_wakeup(USBDEV(sc->sc_dev)); 982 return (error); 983 } 984 985 #if defined(__NetBSD__) || defined(__OpenBSD__) 986 int 987 ugen_activate(device_ptr_t self, enum devact act) 988 { 989 struct ugen_softc *sc = device_private(self); 990 991 switch (act) { 992 case DVACT_ACTIVATE: 993 return (EOPNOTSUPP); 994 995 case DVACT_DEACTIVATE: 996 sc->sc_dying = 1; 997 break; 998 } 999 return (0); 1000 } 1001 #endif 1002 1003 USB_DETACH(ugen) 1004 { 1005 USB_DETACH_START(ugen, sc); 1006 struct ugen_endpoint *sce; 1007 int i, dir; 1008 int s; 1009 #if defined(__NetBSD__) || defined(__OpenBSD__) 1010 int maj, mn; 1011 1012 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1013 #elif defined(__FreeBSD__) 1014 DPRINTF(("ugen_detach: sc=%p\n", sc)); 1015 #endif 1016 1017 sc->sc_dying = 1; 1018 pmf_device_deregister(self); 1019 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1020 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1021 for (dir = OUT; dir <= IN; dir++) { 1022 sce = &sc->sc_endpoints[i][dir]; 1023 if (sce && sce->pipeh) 1024 usbd_abort_pipe(sce->pipeh); 1025 } 1026 } 1027 1028 s = splusb(); 1029 if (--sc->sc_refcnt >= 0) { 1030 /* Wake everyone */ 1031 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1032 wakeup(&sc->sc_endpoints[i][IN]); 1033 /* Wait for processes to go away. */ 1034 usb_detach_wait(USBDEV(sc->sc_dev)); 1035 } 1036 splx(s); 1037 1038 #if defined(__NetBSD__) || defined(__OpenBSD__) 1039 /* locate the major number */ 1040 #if defined(__NetBSD__) 1041 maj = cdevsw_lookup_major(&ugen_cdevsw); 1042 #elif defined(__OpenBSD__) 1043 for (maj = 0; maj < nchrdev; maj++) 1044 if (cdevsw[maj].d_open == ugenopen) 1045 break; 1046 #endif 1047 1048 /* Nuke the vnodes for any open instances (calls close). */ 1049 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1050 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1051 #elif defined(__FreeBSD__) 1052 /* XXX not implemented yet */ 1053 #endif 1054 1055 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1056 USBDEV(sc->sc_dev)); 1057 1058 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1059 for (dir = OUT; dir <= IN; dir++) { 1060 sce = &sc->sc_endpoints[i][dir]; 1061 seldestroy(&sce->rsel); 1062 } 1063 } 1064 1065 return (0); 1066 } 1067 1068 Static void 1069 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) 1070 { 1071 struct ugen_endpoint *sce = addr; 1072 /*struct ugen_softc *sc = sce->sc;*/ 1073 u_int32_t count; 1074 u_char *ibuf; 1075 1076 if (status == USBD_CANCELLED) 1077 return; 1078 1079 if (status != USBD_NORMAL_COMPLETION) { 1080 DPRINTF(("ugenintr: status=%d\n", status)); 1081 if (status == USBD_STALLED) 1082 usbd_clear_endpoint_stall_async(sce->pipeh); 1083 return; 1084 } 1085 1086 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1087 ibuf = sce->ibuf; 1088 1089 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1090 xfer, status, count)); 1091 DPRINTFN(5, (" data = %02x %02x %02x\n", 1092 ibuf[0], ibuf[1], ibuf[2])); 1093 1094 (void)b_to_q(ibuf, count, &sce->q); 1095 1096 if (sce->state & UGEN_ASLP) { 1097 sce->state &= ~UGEN_ASLP; 1098 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1099 wakeup(sce); 1100 } 1101 selnotify(&sce->rsel, 0, 0); 1102 } 1103 1104 Static void 1105 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 1106 usbd_status status) 1107 { 1108 struct isoreq *req = addr; 1109 struct ugen_endpoint *sce = req->sce; 1110 u_int32_t count, n; 1111 int i, isize; 1112 1113 /* Return if we are aborting. */ 1114 if (status == USBD_CANCELLED) 1115 return; 1116 1117 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1118 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1119 (long)(req - sce->isoreqs), count)); 1120 1121 /* throw away oldest input if the buffer is full */ 1122 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1123 sce->cur += count; 1124 if(sce->cur >= sce->limit) 1125 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1126 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1127 count)); 1128 } 1129 1130 isize = UGETW(sce->edesc->wMaxPacketSize); 1131 for (i = 0; i < UGEN_NISORFRMS; i++) { 1132 u_int32_t actlen = req->sizes[i]; 1133 char const *tbuf = (char const *)req->dmabuf + isize * i; 1134 1135 /* copy data to buffer */ 1136 while (actlen > 0) { 1137 n = min(actlen, sce->limit - sce->fill); 1138 memcpy(sce->fill, tbuf, n); 1139 1140 tbuf += n; 1141 actlen -= n; 1142 sce->fill += n; 1143 if(sce->fill == sce->limit) 1144 sce->fill = sce->ibuf; 1145 } 1146 1147 /* setup size for next transfer */ 1148 req->sizes[i] = isize; 1149 } 1150 1151 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS, 1152 USBD_NO_COPY, ugen_isoc_rintr); 1153 (void)usbd_transfer(xfer); 1154 1155 if (sce->state & UGEN_ASLP) { 1156 sce->state &= ~UGEN_ASLP; 1157 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1158 wakeup(sce); 1159 } 1160 selnotify(&sce->rsel, 0, 0); 1161 } 1162 1163 #ifdef UGEN_BULK_RA_WB 1164 Static void 1165 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1166 usbd_status status) 1167 { 1168 struct ugen_endpoint *sce = addr; 1169 u_int32_t count, n; 1170 char const *tbuf; 1171 usbd_status err; 1172 1173 /* Return if we are aborting. */ 1174 if (status == USBD_CANCELLED) 1175 return; 1176 1177 if (status != USBD_NORMAL_COMPLETION) { 1178 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1179 sce->state |= UGEN_RA_WB_STOP; 1180 if (status == USBD_STALLED) 1181 usbd_clear_endpoint_stall_async(sce->pipeh); 1182 return; 1183 } 1184 1185 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1186 1187 /* Keep track of how much is in the buffer. */ 1188 sce->ra_wb_used += count; 1189 1190 /* Copy data to buffer. */ 1191 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1192 n = min(count, sce->limit - sce->fill); 1193 memcpy(sce->fill, tbuf, n); 1194 tbuf += n; 1195 count -= n; 1196 sce->fill += n; 1197 if (sce->fill == sce->limit) 1198 sce->fill = sce->ibuf; 1199 if (count > 0) { 1200 memcpy(sce->fill, tbuf, count); 1201 sce->fill += count; 1202 } 1203 1204 /* Set up the next request if necessary. */ 1205 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1206 if (n > 0) { 1207 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1208 min(n, sce->ra_wb_xferlen), USBD_NO_COPY, 1209 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1210 err = usbd_transfer(xfer); 1211 if (err != USBD_IN_PROGRESS) { 1212 printf("usbd_bulkra_intr: error=%d\n", err); 1213 /* 1214 * The transfer has not been queued. Setting STOP 1215 * will make us try again at the next read. 1216 */ 1217 sce->state |= UGEN_RA_WB_STOP; 1218 } 1219 } 1220 else 1221 sce->state |= UGEN_RA_WB_STOP; 1222 1223 if (sce->state & UGEN_ASLP) { 1224 sce->state &= ~UGEN_ASLP; 1225 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1226 wakeup(sce); 1227 } 1228 selnotify(&sce->rsel, 0, 0); 1229 } 1230 1231 Static void 1232 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1233 usbd_status status) 1234 { 1235 struct ugen_endpoint *sce = addr; 1236 u_int32_t count, n; 1237 char *tbuf; 1238 usbd_status err; 1239 1240 /* Return if we are aborting. */ 1241 if (status == USBD_CANCELLED) 1242 return; 1243 1244 if (status != USBD_NORMAL_COMPLETION) { 1245 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1246 sce->state |= UGEN_RA_WB_STOP; 1247 if (status == USBD_STALLED) 1248 usbd_clear_endpoint_stall_async(sce->pipeh); 1249 return; 1250 } 1251 1252 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1253 1254 /* Keep track of how much is in the buffer. */ 1255 sce->ra_wb_used -= count; 1256 1257 /* Update buffer pointers. */ 1258 sce->cur += count; 1259 if (sce->cur >= sce->limit) 1260 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1261 1262 /* Set up next request if necessary. */ 1263 if (sce->ra_wb_used > 0) { 1264 /* copy data from buffer */ 1265 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1266 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1267 n = min(count, sce->limit - sce->cur); 1268 memcpy(tbuf, sce->cur, n); 1269 tbuf += n; 1270 if (count - n > 0) 1271 memcpy(tbuf, sce->ibuf, count - n); 1272 1273 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1274 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr); 1275 err = usbd_transfer(xfer); 1276 if (err != USBD_IN_PROGRESS) { 1277 printf("usbd_bulkwb_intr: error=%d\n", err); 1278 /* 1279 * The transfer has not been queued. Setting STOP 1280 * will make us try again at the next write. 1281 */ 1282 sce->state |= UGEN_RA_WB_STOP; 1283 } 1284 } 1285 else 1286 sce->state |= UGEN_RA_WB_STOP; 1287 1288 if (sce->state & UGEN_ASLP) { 1289 sce->state &= ~UGEN_ASLP; 1290 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1291 wakeup(sce); 1292 } 1293 selnotify(&sce->rsel, 0, 0); 1294 } 1295 #endif 1296 1297 Static usbd_status 1298 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1299 { 1300 usbd_interface_handle iface; 1301 usb_endpoint_descriptor_t *ed; 1302 usbd_status err; 1303 struct ugen_endpoint *sce; 1304 u_int8_t niface, nendpt, endptno, endpt; 1305 int dir; 1306 1307 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1308 1309 err = usbd_interface_count(sc->sc_udev, &niface); 1310 if (err) 1311 return (err); 1312 if (ifaceidx < 0 || ifaceidx >= niface) 1313 return (USBD_INVAL); 1314 1315 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1316 if (err) 1317 return (err); 1318 err = usbd_endpoint_count(iface, &nendpt); 1319 if (err) 1320 return (err); 1321 /* XXX should only do this after setting new altno has succeeded */ 1322 for (endptno = 0; endptno < nendpt; endptno++) { 1323 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1324 endpt = ed->bEndpointAddress; 1325 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1326 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1327 sce->sc = 0; 1328 sce->edesc = 0; 1329 sce->iface = 0; 1330 } 1331 1332 /* change setting */ 1333 err = usbd_set_interface(iface, altno); 1334 if (err) 1335 return (err); 1336 1337 err = usbd_endpoint_count(iface, &nendpt); 1338 if (err) 1339 return (err); 1340 for (endptno = 0; endptno < nendpt; endptno++) { 1341 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1342 KASSERT(ed != NULL); 1343 endpt = ed->bEndpointAddress; 1344 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1345 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1346 sce->sc = sc; 1347 sce->edesc = ed; 1348 sce->iface = iface; 1349 } 1350 return (0); 1351 } 1352 1353 /* Retrieve a complete descriptor for a certain device and index. */ 1354 Static usb_config_descriptor_t * 1355 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1356 { 1357 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1358 int len; 1359 usbd_status err; 1360 1361 if (index == USB_CURRENT_CONFIG_INDEX) { 1362 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1363 len = UGETW(tdesc->wTotalLength); 1364 if (lenp) 1365 *lenp = len; 1366 cdesc = malloc(len, M_TEMP, M_WAITOK); 1367 memcpy(cdesc, tdesc, len); 1368 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1369 } else { 1370 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1371 if (err) 1372 return (0); 1373 len = UGETW(cdescr.wTotalLength); 1374 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1375 if (lenp) 1376 *lenp = len; 1377 cdesc = malloc(len, M_TEMP, M_WAITOK); 1378 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1379 if (err) { 1380 free(cdesc, M_TEMP); 1381 return (0); 1382 } 1383 } 1384 return (cdesc); 1385 } 1386 1387 Static int 1388 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1389 { 1390 usbd_interface_handle iface; 1391 usbd_status err; 1392 1393 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1394 if (err) 1395 return (-1); 1396 return (usbd_get_interface_altindex(iface)); 1397 } 1398 1399 Static int 1400 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1401 void *addr, int flag, struct lwp *l) 1402 { 1403 struct ugen_endpoint *sce; 1404 usbd_status err; 1405 usbd_interface_handle iface; 1406 struct usb_config_desc *cd; 1407 usb_config_descriptor_t *cdesc; 1408 struct usb_interface_desc *id; 1409 usb_interface_descriptor_t *idesc; 1410 struct usb_endpoint_desc *ed; 1411 usb_endpoint_descriptor_t *edesc; 1412 struct usb_alt_interface *ai; 1413 struct usb_string_desc *si; 1414 u_int8_t conf, alt; 1415 1416 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1417 if (sc->sc_dying) 1418 return (EIO); 1419 1420 switch (cmd) { 1421 case FIONBIO: 1422 /* All handled in the upper FS layer. */ 1423 return (0); 1424 case USB_SET_SHORT_XFER: 1425 if (endpt == USB_CONTROL_ENDPOINT) 1426 return (EINVAL); 1427 /* This flag only affects read */ 1428 sce = &sc->sc_endpoints[endpt][IN]; 1429 if (sce == NULL || sce->pipeh == NULL) 1430 return (EINVAL); 1431 if (*(int *)addr) 1432 sce->state |= UGEN_SHORT_OK; 1433 else 1434 sce->state &= ~UGEN_SHORT_OK; 1435 return (0); 1436 case USB_SET_TIMEOUT: 1437 sce = &sc->sc_endpoints[endpt][IN]; 1438 if (sce == NULL 1439 /* XXX this shouldn't happen, but the distinction between 1440 input and output pipes isn't clear enough. 1441 || sce->pipeh == NULL */ 1442 ) 1443 return (EINVAL); 1444 sce->timeout = *(int *)addr; 1445 return (0); 1446 case USB_SET_BULK_RA: 1447 #ifdef UGEN_BULK_RA_WB 1448 if (endpt == USB_CONTROL_ENDPOINT) 1449 return (EINVAL); 1450 sce = &sc->sc_endpoints[endpt][IN]; 1451 if (sce == NULL || sce->pipeh == NULL) 1452 return (EINVAL); 1453 edesc = sce->edesc; 1454 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1455 return (EINVAL); 1456 1457 if (*(int *)addr) { 1458 /* Only turn RA on if it's currently off. */ 1459 if (sce->state & UGEN_BULK_RA) 1460 return (0); 1461 1462 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1463 /* shouldn't happen */ 1464 return (EINVAL); 1465 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1466 if (sce->ra_wb_xfer == NULL) 1467 return (ENOMEM); 1468 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1469 /* 1470 * Set up a dmabuf because we reuse the xfer with 1471 * the same (max) request length like isoc. 1472 */ 1473 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1474 sce->ra_wb_xferlen) == 0) { 1475 usbd_free_xfer(sce->ra_wb_xfer); 1476 return (ENOMEM); 1477 } 1478 sce->ibuf = malloc(sce->ra_wb_bufsize, 1479 M_USBDEV, M_WAITOK); 1480 sce->fill = sce->cur = sce->ibuf; 1481 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1482 sce->ra_wb_used = 0; 1483 sce->state |= UGEN_BULK_RA; 1484 sce->state &= ~UGEN_RA_WB_STOP; 1485 /* Now start reading. */ 1486 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce, 1487 NULL, 1488 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1489 USBD_NO_COPY, USBD_NO_TIMEOUT, 1490 ugen_bulkra_intr); 1491 err = usbd_transfer(sce->ra_wb_xfer); 1492 if (err != USBD_IN_PROGRESS) { 1493 sce->state &= ~UGEN_BULK_RA; 1494 free(sce->ibuf, M_USBDEV); 1495 sce->ibuf = NULL; 1496 usbd_free_xfer(sce->ra_wb_xfer); 1497 return (EIO); 1498 } 1499 } else { 1500 /* Only turn RA off if it's currently on. */ 1501 if (!(sce->state & UGEN_BULK_RA)) 1502 return (0); 1503 1504 sce->state &= ~UGEN_BULK_RA; 1505 usbd_abort_pipe(sce->pipeh); 1506 usbd_free_xfer(sce->ra_wb_xfer); 1507 /* 1508 * XXX Discard whatever's in the buffer, but we 1509 * should keep it around and drain the buffer 1510 * instead. 1511 */ 1512 free(sce->ibuf, M_USBDEV); 1513 sce->ibuf = NULL; 1514 } 1515 return (0); 1516 #else 1517 return (EOPNOTSUPP); 1518 #endif 1519 case USB_SET_BULK_WB: 1520 #ifdef UGEN_BULK_RA_WB 1521 if (endpt == USB_CONTROL_ENDPOINT) 1522 return (EINVAL); 1523 sce = &sc->sc_endpoints[endpt][OUT]; 1524 if (sce == NULL || sce->pipeh == NULL) 1525 return (EINVAL); 1526 edesc = sce->edesc; 1527 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1528 return (EINVAL); 1529 1530 if (*(int *)addr) { 1531 /* Only turn WB on if it's currently off. */ 1532 if (sce->state & UGEN_BULK_WB) 1533 return (0); 1534 1535 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1536 /* shouldn't happen */ 1537 return (EINVAL); 1538 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1539 if (sce->ra_wb_xfer == NULL) 1540 return (ENOMEM); 1541 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1542 /* 1543 * Set up a dmabuf because we reuse the xfer with 1544 * the same (max) request length like isoc. 1545 */ 1546 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1547 sce->ra_wb_xferlen) == 0) { 1548 usbd_free_xfer(sce->ra_wb_xfer); 1549 return (ENOMEM); 1550 } 1551 sce->ibuf = malloc(sce->ra_wb_bufsize, 1552 M_USBDEV, M_WAITOK); 1553 sce->fill = sce->cur = sce->ibuf; 1554 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1555 sce->ra_wb_used = 0; 1556 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1557 } else { 1558 /* Only turn WB off if it's currently on. */ 1559 if (!(sce->state & UGEN_BULK_WB)) 1560 return (0); 1561 1562 sce->state &= ~UGEN_BULK_WB; 1563 /* 1564 * XXX Discard whatever's in the buffer, but we 1565 * should keep it around and keep writing to 1566 * drain the buffer instead. 1567 */ 1568 usbd_abort_pipe(sce->pipeh); 1569 usbd_free_xfer(sce->ra_wb_xfer); 1570 free(sce->ibuf, M_USBDEV); 1571 sce->ibuf = NULL; 1572 } 1573 return (0); 1574 #else 1575 return (EOPNOTSUPP); 1576 #endif 1577 case USB_SET_BULK_RA_OPT: 1578 case USB_SET_BULK_WB_OPT: 1579 #ifdef UGEN_BULK_RA_WB 1580 { 1581 struct usb_bulk_ra_wb_opt *opt; 1582 1583 if (endpt == USB_CONTROL_ENDPOINT) 1584 return (EINVAL); 1585 opt = (struct usb_bulk_ra_wb_opt *)addr; 1586 if (cmd == USB_SET_BULK_RA_OPT) 1587 sce = &sc->sc_endpoints[endpt][IN]; 1588 else 1589 sce = &sc->sc_endpoints[endpt][OUT]; 1590 if (sce == NULL || sce->pipeh == NULL) 1591 return (EINVAL); 1592 if (opt->ra_wb_buffer_size < 1 || 1593 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1594 opt->ra_wb_request_size < 1 || 1595 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1596 return (EINVAL); 1597 /* 1598 * XXX These changes do not take effect until the 1599 * next time RA/WB mode is enabled but they ought to 1600 * take effect immediately. 1601 */ 1602 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1603 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1604 return (0); 1605 } 1606 #else 1607 return (EOPNOTSUPP); 1608 #endif 1609 default: 1610 break; 1611 } 1612 1613 if (endpt != USB_CONTROL_ENDPOINT) 1614 return (EINVAL); 1615 1616 switch (cmd) { 1617 #ifdef UGEN_DEBUG 1618 case USB_SETDEBUG: 1619 ugendebug = *(int *)addr; 1620 break; 1621 #endif 1622 case USB_GET_CONFIG: 1623 err = usbd_get_config(sc->sc_udev, &conf); 1624 if (err) 1625 return (EIO); 1626 *(int *)addr = conf; 1627 break; 1628 case USB_SET_CONFIG: 1629 if (!(flag & FWRITE)) 1630 return (EPERM); 1631 err = ugen_set_config(sc, *(int *)addr); 1632 switch (err) { 1633 case USBD_NORMAL_COMPLETION: 1634 break; 1635 case USBD_IN_USE: 1636 return (EBUSY); 1637 default: 1638 return (EIO); 1639 } 1640 break; 1641 case USB_GET_ALTINTERFACE: 1642 ai = (struct usb_alt_interface *)addr; 1643 err = usbd_device2interface_handle(sc->sc_udev, 1644 ai->uai_interface_index, &iface); 1645 if (err) 1646 return (EINVAL); 1647 idesc = usbd_get_interface_descriptor(iface); 1648 if (idesc == NULL) 1649 return (EIO); 1650 ai->uai_alt_no = idesc->bAlternateSetting; 1651 break; 1652 case USB_SET_ALTINTERFACE: 1653 if (!(flag & FWRITE)) 1654 return (EPERM); 1655 ai = (struct usb_alt_interface *)addr; 1656 err = usbd_device2interface_handle(sc->sc_udev, 1657 ai->uai_interface_index, &iface); 1658 if (err) 1659 return (EINVAL); 1660 err = ugen_set_interface(sc, ai->uai_interface_index, 1661 ai->uai_alt_no); 1662 if (err) 1663 return (EINVAL); 1664 break; 1665 case USB_GET_NO_ALT: 1666 ai = (struct usb_alt_interface *)addr; 1667 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0); 1668 if (cdesc == NULL) 1669 return (EINVAL); 1670 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1671 if (idesc == NULL) { 1672 free(cdesc, M_TEMP); 1673 return (EINVAL); 1674 } 1675 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1676 idesc->bInterfaceNumber); 1677 free(cdesc, M_TEMP); 1678 break; 1679 case USB_GET_DEVICE_DESC: 1680 *(usb_device_descriptor_t *)addr = 1681 *usbd_get_device_descriptor(sc->sc_udev); 1682 break; 1683 case USB_GET_CONFIG_DESC: 1684 cd = (struct usb_config_desc *)addr; 1685 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0); 1686 if (cdesc == NULL) 1687 return (EINVAL); 1688 cd->ucd_desc = *cdesc; 1689 free(cdesc, M_TEMP); 1690 break; 1691 case USB_GET_INTERFACE_DESC: 1692 id = (struct usb_interface_desc *)addr; 1693 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0); 1694 if (cdesc == NULL) 1695 return (EINVAL); 1696 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1697 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1698 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1699 else 1700 alt = id->uid_alt_index; 1701 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1702 if (idesc == NULL) { 1703 free(cdesc, M_TEMP); 1704 return (EINVAL); 1705 } 1706 id->uid_desc = *idesc; 1707 free(cdesc, M_TEMP); 1708 break; 1709 case USB_GET_ENDPOINT_DESC: 1710 ed = (struct usb_endpoint_desc *)addr; 1711 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0); 1712 if (cdesc == NULL) 1713 return (EINVAL); 1714 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1715 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1716 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1717 else 1718 alt = ed->ued_alt_index; 1719 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1720 alt, ed->ued_endpoint_index); 1721 if (edesc == NULL) { 1722 free(cdesc, M_TEMP); 1723 return (EINVAL); 1724 } 1725 ed->ued_desc = *edesc; 1726 free(cdesc, M_TEMP); 1727 break; 1728 case USB_GET_FULL_DESC: 1729 { 1730 int len; 1731 struct iovec iov; 1732 struct uio uio; 1733 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1734 int error; 1735 1736 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len); 1737 if (len > fd->ufd_size) 1738 len = fd->ufd_size; 1739 iov.iov_base = (void *)fd->ufd_data; 1740 iov.iov_len = len; 1741 uio.uio_iov = &iov; 1742 uio.uio_iovcnt = 1; 1743 uio.uio_resid = len; 1744 uio.uio_offset = 0; 1745 uio.uio_rw = UIO_READ; 1746 uio.uio_vmspace = l->l_proc->p_vmspace; 1747 error = uiomove((void *)cdesc, len, &uio); 1748 free(cdesc, M_TEMP); 1749 return (error); 1750 } 1751 case USB_GET_STRING_DESC: { 1752 int len; 1753 si = (struct usb_string_desc *)addr; 1754 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1755 si->usd_language_id, &si->usd_desc, &len); 1756 if (err) 1757 return (EINVAL); 1758 break; 1759 } 1760 case USB_DO_REQUEST: 1761 { 1762 struct usb_ctl_request *ur = (void *)addr; 1763 int len = UGETW(ur->ucr_request.wLength); 1764 struct iovec iov; 1765 struct uio uio; 1766 void *ptr = 0; 1767 usbd_status xerr; 1768 int error = 0; 1769 1770 if (!(flag & FWRITE)) 1771 return (EPERM); 1772 /* Avoid requests that would damage the bus integrity. */ 1773 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1774 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1775 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1776 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1777 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1778 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1779 return (EINVAL); 1780 1781 if (len < 0 || len > 32767) 1782 return (EINVAL); 1783 if (len != 0) { 1784 iov.iov_base = (void *)ur->ucr_data; 1785 iov.iov_len = len; 1786 uio.uio_iov = &iov; 1787 uio.uio_iovcnt = 1; 1788 uio.uio_resid = len; 1789 uio.uio_offset = 0; 1790 uio.uio_rw = 1791 ur->ucr_request.bmRequestType & UT_READ ? 1792 UIO_READ : UIO_WRITE; 1793 uio.uio_vmspace = l->l_proc->p_vmspace; 1794 ptr = malloc(len, M_TEMP, M_WAITOK); 1795 if (uio.uio_rw == UIO_WRITE) { 1796 error = uiomove(ptr, len, &uio); 1797 if (error) 1798 goto ret; 1799 } 1800 } 1801 sce = &sc->sc_endpoints[endpt][IN]; 1802 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1803 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1804 if (xerr) { 1805 error = EIO; 1806 goto ret; 1807 } 1808 if (len != 0) { 1809 if (uio.uio_rw == UIO_READ) { 1810 error = uiomove(ptr, len, &uio); 1811 if (error) 1812 goto ret; 1813 } 1814 } 1815 ret: 1816 if (ptr) 1817 free(ptr, M_TEMP); 1818 return (error); 1819 } 1820 case USB_GET_DEVICEINFO: 1821 usbd_fill_deviceinfo(sc->sc_udev, 1822 (struct usb_device_info *)addr, 0); 1823 break; 1824 #ifdef COMPAT_30 1825 case USB_GET_DEVICEINFO_OLD: 1826 usbd_fill_deviceinfo_old(sc->sc_udev, 1827 (struct usb_device_info_old *)addr, 0); 1828 1829 break; 1830 #endif 1831 default: 1832 return (EINVAL); 1833 } 1834 return (0); 1835 } 1836 1837 int 1838 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1839 { 1840 int endpt = UGENENDPOINT(dev); 1841 struct ugen_softc *sc; 1842 int error; 1843 1844 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1845 1846 sc->sc_refcnt++; 1847 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1848 if (--sc->sc_refcnt < 0) 1849 usb_detach_wakeup(USBDEV(sc->sc_dev)); 1850 return (error); 1851 } 1852 1853 int 1854 ugenpoll(dev_t dev, int events, struct lwp *l) 1855 { 1856 struct ugen_softc *sc; 1857 struct ugen_endpoint *sce_in, *sce_out; 1858 int revents = 0; 1859 int s; 1860 1861 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1862 1863 if (sc->sc_dying) 1864 return (POLLHUP); 1865 1866 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1867 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1868 if (sce_in == NULL && sce_out == NULL) 1869 return (POLLERR); 1870 #ifdef DIAGNOSTIC 1871 if (!sce_in->edesc && !sce_out->edesc) { 1872 printf("ugenpoll: no edesc\n"); 1873 return (POLLERR); 1874 } 1875 /* It's possible to have only one pipe open. */ 1876 if (!sce_in->pipeh && !sce_out->pipeh) { 1877 printf("ugenpoll: no pipe\n"); 1878 return (POLLERR); 1879 } 1880 #endif 1881 s = splusb(); 1882 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1883 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1884 case UE_INTERRUPT: 1885 if (sce_in->q.c_cc > 0) 1886 revents |= events & (POLLIN | POLLRDNORM); 1887 else 1888 selrecord(l, &sce_in->rsel); 1889 break; 1890 case UE_ISOCHRONOUS: 1891 if (sce_in->cur != sce_in->fill) 1892 revents |= events & (POLLIN | POLLRDNORM); 1893 else 1894 selrecord(l, &sce_in->rsel); 1895 break; 1896 case UE_BULK: 1897 #ifdef UGEN_BULK_RA_WB 1898 if (sce_in->state & UGEN_BULK_RA) { 1899 if (sce_in->ra_wb_used > 0) 1900 revents |= events & 1901 (POLLIN | POLLRDNORM); 1902 else 1903 selrecord(l, &sce_in->rsel); 1904 break; 1905 } 1906 #endif 1907 /* 1908 * We have no easy way of determining if a read will 1909 * yield any data or a write will happen. 1910 * Pretend they will. 1911 */ 1912 revents |= events & (POLLIN | POLLRDNORM); 1913 break; 1914 default: 1915 break; 1916 } 1917 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1918 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1919 case UE_INTERRUPT: 1920 case UE_ISOCHRONOUS: 1921 /* XXX unimplemented */ 1922 break; 1923 case UE_BULK: 1924 #ifdef UGEN_BULK_RA_WB 1925 if (sce_out->state & UGEN_BULK_WB) { 1926 if (sce_out->ra_wb_used < 1927 sce_out->limit - sce_out->ibuf) 1928 revents |= events & 1929 (POLLOUT | POLLWRNORM); 1930 else 1931 selrecord(l, &sce_out->rsel); 1932 break; 1933 } 1934 #endif 1935 /* 1936 * We have no easy way of determining if a read will 1937 * yield any data or a write will happen. 1938 * Pretend they will. 1939 */ 1940 revents |= events & (POLLOUT | POLLWRNORM); 1941 break; 1942 default: 1943 break; 1944 } 1945 1946 1947 splx(s); 1948 return (revents); 1949 } 1950 1951 static void 1952 filt_ugenrdetach(struct knote *kn) 1953 { 1954 struct ugen_endpoint *sce = kn->kn_hook; 1955 int s; 1956 1957 s = splusb(); 1958 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1959 splx(s); 1960 } 1961 1962 static int 1963 filt_ugenread_intr(struct knote *kn, long hint) 1964 { 1965 struct ugen_endpoint *sce = kn->kn_hook; 1966 1967 kn->kn_data = sce->q.c_cc; 1968 return (kn->kn_data > 0); 1969 } 1970 1971 static int 1972 filt_ugenread_isoc(struct knote *kn, long hint) 1973 { 1974 struct ugen_endpoint *sce = kn->kn_hook; 1975 1976 if (sce->cur == sce->fill) 1977 return (0); 1978 1979 if (sce->cur < sce->fill) 1980 kn->kn_data = sce->fill - sce->cur; 1981 else 1982 kn->kn_data = (sce->limit - sce->cur) + 1983 (sce->fill - sce->ibuf); 1984 1985 return (1); 1986 } 1987 1988 #ifdef UGEN_BULK_RA_WB 1989 static int 1990 filt_ugenread_bulk(struct knote *kn, long hint) 1991 { 1992 struct ugen_endpoint *sce = kn->kn_hook; 1993 1994 if (!(sce->state & UGEN_BULK_RA)) 1995 /* 1996 * We have no easy way of determining if a read will 1997 * yield any data or a write will happen. 1998 * So, emulate "seltrue". 1999 */ 2000 return (filt_seltrue(kn, hint)); 2001 2002 if (sce->ra_wb_used == 0) 2003 return (0); 2004 2005 kn->kn_data = sce->ra_wb_used; 2006 2007 return (1); 2008 } 2009 2010 static int 2011 filt_ugenwrite_bulk(struct knote *kn, long hint) 2012 { 2013 struct ugen_endpoint *sce = kn->kn_hook; 2014 2015 if (!(sce->state & UGEN_BULK_WB)) 2016 /* 2017 * We have no easy way of determining if a read will 2018 * yield any data or a write will happen. 2019 * So, emulate "seltrue". 2020 */ 2021 return (filt_seltrue(kn, hint)); 2022 2023 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2024 return (0); 2025 2026 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2027 2028 return (1); 2029 } 2030 #endif 2031 2032 static const struct filterops ugenread_intr_filtops = 2033 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2034 2035 static const struct filterops ugenread_isoc_filtops = 2036 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2037 2038 #ifdef UGEN_BULK_RA_WB 2039 static const struct filterops ugenread_bulk_filtops = 2040 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2041 2042 static const struct filterops ugenwrite_bulk_filtops = 2043 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2044 #else 2045 static const struct filterops ugen_seltrue_filtops = 2046 { 1, NULL, filt_ugenrdetach, filt_seltrue }; 2047 #endif 2048 2049 int 2050 ugenkqfilter(dev_t dev, struct knote *kn) 2051 { 2052 struct ugen_softc *sc; 2053 struct ugen_endpoint *sce; 2054 struct klist *klist; 2055 int s; 2056 2057 USB_GET_SC(ugen, UGENUNIT(dev), sc); 2058 2059 if (sc->sc_dying) 2060 return (ENXIO); 2061 2062 switch (kn->kn_filter) { 2063 case EVFILT_READ: 2064 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2065 if (sce == NULL) 2066 return (EINVAL); 2067 2068 klist = &sce->rsel.sel_klist; 2069 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2070 case UE_INTERRUPT: 2071 kn->kn_fop = &ugenread_intr_filtops; 2072 break; 2073 case UE_ISOCHRONOUS: 2074 kn->kn_fop = &ugenread_isoc_filtops; 2075 break; 2076 case UE_BULK: 2077 #ifdef UGEN_BULK_RA_WB 2078 kn->kn_fop = &ugenread_bulk_filtops; 2079 break; 2080 #else 2081 /* 2082 * We have no easy way of determining if a read will 2083 * yield any data or a write will happen. 2084 * So, emulate "seltrue". 2085 */ 2086 kn->kn_fop = &ugen_seltrue_filtops; 2087 #endif 2088 break; 2089 default: 2090 return (EINVAL); 2091 } 2092 break; 2093 2094 case EVFILT_WRITE: 2095 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2096 if (sce == NULL) 2097 return (EINVAL); 2098 2099 klist = &sce->rsel.sel_klist; 2100 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2101 case UE_INTERRUPT: 2102 case UE_ISOCHRONOUS: 2103 /* XXX poll doesn't support this */ 2104 return (EINVAL); 2105 2106 case UE_BULK: 2107 #ifdef UGEN_BULK_RA_WB 2108 kn->kn_fop = &ugenwrite_bulk_filtops; 2109 #else 2110 /* 2111 * We have no easy way of determining if a read will 2112 * yield any data or a write will happen. 2113 * So, emulate "seltrue". 2114 */ 2115 kn->kn_fop = &ugen_seltrue_filtops; 2116 #endif 2117 break; 2118 default: 2119 return (EINVAL); 2120 } 2121 break; 2122 2123 default: 2124 return (EINVAL); 2125 } 2126 2127 kn->kn_hook = sce; 2128 2129 s = splusb(); 2130 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2131 splx(s); 2132 2133 return (0); 2134 } 2135 2136 #if defined(__FreeBSD__) 2137 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0); 2138 #endif 2139