1 /* $NetBSD: ugen.c,v 1.108 2009/12/24 01:32:22 jakllsch Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.108 2009/12/24 01:32:22 jakllsch Exp $"); 41 42 #include "opt_compat_netbsd.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #if defined(__NetBSD__) || defined(__OpenBSD__) 49 #include <sys/device.h> 50 #include <sys/ioctl.h> 51 #elif defined(__FreeBSD__) 52 #include <sys/module.h> 53 #include <sys/bus.h> 54 #include <sys/ioccom.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/filio.h> 58 #endif 59 #include <sys/conf.h> 60 #include <sys/tty.h> 61 #include <sys/file.h> 62 #include <sys/select.h> 63 #include <sys/proc.h> 64 #include <sys/vnode.h> 65 #include <sys/poll.h> 66 67 #include <dev/usb/usb.h> 68 #include <dev/usb/usbdi.h> 69 #include <dev/usb/usbdi_util.h> 70 71 #ifdef UGEN_DEBUG 72 #define DPRINTF(x) if (ugendebug) logprintf x 73 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x 74 int ugendebug = 0; 75 #else 76 #define DPRINTF(x) 77 #define DPRINTFN(n,x) 78 #endif 79 80 #define UGEN_CHUNK 128 /* chunk size for read */ 81 #define UGEN_IBSIZE 1020 /* buffer size */ 82 #define UGEN_BBSIZE 1024 83 84 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */ 85 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */ 86 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */ 87 88 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 89 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 90 91 struct ugen_endpoint { 92 struct ugen_softc *sc; 93 usb_endpoint_descriptor_t *edesc; 94 usbd_interface_handle iface; 95 int state; 96 #define UGEN_ASLP 0x02 /* waiting for data */ 97 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 98 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 99 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 100 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 101 usbd_pipe_handle pipeh; 102 struct clist q; 103 struct selinfo rsel; 104 u_char *ibuf; /* start of buffer (circular for isoc) */ 105 u_char *fill; /* location for input (isoc) */ 106 u_char *limit; /* end of circular buffer (isoc) */ 107 u_char *cur; /* current read location (isoc) */ 108 u_int32_t timeout; 109 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 110 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 111 u_int32_t ra_wb_used; /* how much is in buffer */ 112 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 113 usbd_xfer_handle ra_wb_xfer; 114 struct isoreq { 115 struct ugen_endpoint *sce; 116 usbd_xfer_handle xfer; 117 void *dmabuf; 118 u_int16_t sizes[UGEN_NISORFRMS]; 119 } isoreqs[UGEN_NISOREQS]; 120 }; 121 122 struct ugen_softc { 123 USBBASEDEVICE sc_dev; /* base device */ 124 usbd_device_handle sc_udev; 125 126 char sc_is_open[USB_MAX_ENDPOINTS]; 127 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 128 #define OUT 0 129 #define IN 1 130 131 int sc_refcnt; 132 char sc_buffer[UGEN_BBSIZE]; 133 u_char sc_dying; 134 }; 135 136 #if defined(__NetBSD__) 137 dev_type_open(ugenopen); 138 dev_type_close(ugenclose); 139 dev_type_read(ugenread); 140 dev_type_write(ugenwrite); 141 dev_type_ioctl(ugenioctl); 142 dev_type_poll(ugenpoll); 143 dev_type_kqfilter(ugenkqfilter); 144 145 const struct cdevsw ugen_cdevsw = { 146 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl, 147 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER, 148 }; 149 #elif defined(__OpenBSD__) 150 cdev_decl(ugen); 151 #elif defined(__FreeBSD__) 152 d_open_t ugenopen; 153 d_close_t ugenclose; 154 d_read_t ugenread; 155 d_write_t ugenwrite; 156 d_ioctl_t ugenioctl; 157 d_poll_t ugenpoll; 158 159 #define UGEN_CDEV_MAJOR 114 160 161 Static struct cdevsw ugen_cdevsw = { 162 /* open */ ugenopen, 163 /* close */ ugenclose, 164 /* read */ ugenread, 165 /* write */ ugenwrite, 166 /* ioctl */ ugenioctl, 167 /* poll */ ugenpoll, 168 /* mmap */ nommap, 169 /* strategy */ nostrategy, 170 /* name */ "ugen", 171 /* maj */ UGEN_CDEV_MAJOR, 172 /* dump */ nodump, 173 /* psize */ nopsize, 174 /* flags */ 0, 175 /* bmaj */ -1 176 }; 177 #endif 178 179 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, 180 usbd_status status); 181 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 182 usbd_status status); 183 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 184 usbd_status status); 185 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 186 usbd_status status); 187 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 188 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 189 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 190 void *, int, struct lwp *); 191 Static int ugen_set_config(struct ugen_softc *sc, int configno); 192 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc, 193 int index, int *lenp); 194 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 195 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx); 196 197 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 198 #define UGENENDPOINT(n) (minor(n) & 0xf) 199 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 200 201 USB_DECLARE_DRIVER(ugen); 202 203 /* toggle to control attach priority. -1 means "let autoconf decide" */ 204 int ugen_override = -1; 205 206 USB_MATCH(ugen) 207 { 208 USB_MATCH_START(ugen, uaa); 209 int override; 210 211 if (ugen_override != -1) 212 override = ugen_override; 213 else 214 override = match->cf_flags & 1; 215 216 if (override) 217 return (UMATCH_HIGHEST); 218 else if (uaa->usegeneric) 219 return (UMATCH_GENERIC); 220 else 221 return (UMATCH_NONE); 222 } 223 224 USB_ATTACH(ugen) 225 { 226 USB_ATTACH_START(ugen, sc, uaa); 227 usbd_device_handle udev; 228 char *devinfop; 229 usbd_status err; 230 int i, dir, conf; 231 232 aprint_naive("\n"); 233 aprint_normal("\n"); 234 235 devinfop = usbd_devinfo_alloc(uaa->device, 0); 236 aprint_normal_dev(self, "%s\n", devinfop); 237 usbd_devinfo_free(devinfop); 238 239 sc->sc_dev = self; 240 sc->sc_udev = udev = uaa->device; 241 242 /* First set configuration index 0, the default one for ugen. */ 243 err = usbd_set_config_index(udev, 0, 0); 244 if (err) { 245 aprint_error_dev(self, 246 "setting configuration index 0 failed\n"); 247 sc->sc_dying = 1; 248 USB_ATTACH_ERROR_RETURN; 249 } 250 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 251 252 /* Set up all the local state for this configuration. */ 253 err = ugen_set_config(sc, conf); 254 if (err) { 255 aprint_error_dev(self, "setting configuration %d failed\n", 256 conf); 257 sc->sc_dying = 1; 258 USB_ATTACH_ERROR_RETURN; 259 } 260 261 #ifdef __FreeBSD__ 262 { 263 static int global_init_done = 0; 264 if (!global_init_done) { 265 cdevsw_add(&ugen_cdevsw); 266 global_init_done = 1; 267 } 268 } 269 #endif 270 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 271 for (dir = OUT; dir <= IN; dir++) { 272 struct ugen_endpoint *sce; 273 274 sce = &sc->sc_endpoints[i][dir]; 275 selinit(&sce->rsel); 276 } 277 } 278 279 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 280 USBDEV(sc->sc_dev)); 281 282 if (!pmf_device_register(self, NULL, NULL)) 283 aprint_error_dev(self, "couldn't establish power handler\n"); 284 285 USB_ATTACH_SUCCESS_RETURN; 286 } 287 288 Static int 289 ugen_set_config(struct ugen_softc *sc, int configno) 290 { 291 usbd_device_handle dev = sc->sc_udev; 292 usb_config_descriptor_t *cdesc; 293 usbd_interface_handle iface; 294 usb_endpoint_descriptor_t *ed; 295 struct ugen_endpoint *sce; 296 u_int8_t niface, nendpt; 297 int ifaceno, endptno, endpt; 298 usbd_status err; 299 int dir; 300 301 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 302 USBDEVNAME(sc->sc_dev), configno, sc)); 303 304 /* 305 * We start at 1, not 0, because we don't care whether the 306 * control endpoint is open or not. It is always present. 307 */ 308 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 309 if (sc->sc_is_open[endptno]) { 310 DPRINTFN(1, 311 ("ugen_set_config: %s - endpoint %d is open\n", 312 USBDEVNAME(sc->sc_dev), endptno)); 313 return (USBD_IN_USE); 314 } 315 316 /* Avoid setting the current value. */ 317 cdesc = usbd_get_config_descriptor(dev); 318 if (!cdesc || cdesc->bConfigurationValue != configno) { 319 err = usbd_set_config_no(dev, configno, 1); 320 if (err) 321 return (err); 322 } 323 324 err = usbd_interface_count(dev, &niface); 325 if (err) 326 return (err); 327 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints); 328 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 329 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 330 err = usbd_device2interface_handle(dev, ifaceno, &iface); 331 if (err) 332 return (err); 333 err = usbd_endpoint_count(iface, &nendpt); 334 if (err) 335 return (err); 336 for (endptno = 0; endptno < nendpt; endptno++) { 337 ed = usbd_interface2endpoint_descriptor(iface,endptno); 338 KASSERT(ed != NULL); 339 endpt = ed->bEndpointAddress; 340 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 341 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 342 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 343 "(%d,%d), sce=%p\n", 344 endptno, endpt, UE_GET_ADDR(endpt), 345 UE_GET_DIR(endpt), sce)); 346 sce->sc = sc; 347 sce->edesc = ed; 348 sce->iface = iface; 349 } 350 } 351 return (USBD_NORMAL_COMPLETION); 352 } 353 354 int 355 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 356 { 357 struct ugen_softc *sc; 358 int unit = UGENUNIT(dev); 359 int endpt = UGENENDPOINT(dev); 360 usb_endpoint_descriptor_t *edesc; 361 struct ugen_endpoint *sce; 362 int dir, isize; 363 usbd_status err; 364 usbd_xfer_handle xfer; 365 void *tbuf; 366 int i, j; 367 368 USB_GET_SC_OPEN(ugen, unit, sc); 369 370 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 371 flag, mode, unit, endpt)); 372 373 if (sc == NULL || sc->sc_dying) 374 return (ENXIO); 375 376 /* The control endpoint allows multiple opens. */ 377 if (endpt == USB_CONTROL_ENDPOINT) { 378 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 379 return (0); 380 } 381 382 if (sc->sc_is_open[endpt]) 383 return (EBUSY); 384 385 /* Make sure there are pipes for all directions. */ 386 for (dir = OUT; dir <= IN; dir++) { 387 if (flag & (dir == OUT ? FWRITE : FREAD)) { 388 sce = &sc->sc_endpoints[endpt][dir]; 389 if (sce == 0 || sce->edesc == 0) 390 return (ENXIO); 391 } 392 } 393 394 /* Actually open the pipes. */ 395 /* XXX Should back out properly if it fails. */ 396 for (dir = OUT; dir <= IN; dir++) { 397 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 398 continue; 399 sce = &sc->sc_endpoints[endpt][dir]; 400 sce->state = 0; 401 sce->timeout = USBD_NO_TIMEOUT; 402 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 403 sc, endpt, dir, sce)); 404 edesc = sce->edesc; 405 switch (edesc->bmAttributes & UE_XFERTYPE) { 406 case UE_INTERRUPT: 407 if (dir == OUT) { 408 err = usbd_open_pipe(sce->iface, 409 edesc->bEndpointAddress, 0, &sce->pipeh); 410 if (err) 411 return (EIO); 412 break; 413 } 414 isize = UGETW(edesc->wMaxPacketSize); 415 if (isize == 0) /* shouldn't happen */ 416 return (EINVAL); 417 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); 418 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 419 endpt, isize)); 420 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) 421 return (ENOMEM); 422 err = usbd_open_pipe_intr(sce->iface, 423 edesc->bEndpointAddress, 424 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 425 sce->ibuf, isize, ugenintr, 426 USBD_DEFAULT_INTERVAL); 427 if (err) { 428 free(sce->ibuf, M_USBDEV); 429 clfree(&sce->q); 430 return (EIO); 431 } 432 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 433 break; 434 case UE_BULK: 435 err = usbd_open_pipe(sce->iface, 436 edesc->bEndpointAddress, 0, &sce->pipeh); 437 if (err) 438 return (EIO); 439 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 440 /* 441 * Use request size for non-RA/WB transfers 442 * as the default. 443 */ 444 sce->ra_wb_reqsize = UGEN_BBSIZE; 445 break; 446 case UE_ISOCHRONOUS: 447 if (dir == OUT) 448 return (EINVAL); 449 isize = UGETW(edesc->wMaxPacketSize); 450 if (isize == 0) /* shouldn't happen */ 451 return (EINVAL); 452 sce->ibuf = malloc(isize * UGEN_NISOFRAMES, 453 M_USBDEV, M_WAITOK); 454 sce->cur = sce->fill = sce->ibuf; 455 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 456 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 457 endpt, isize)); 458 err = usbd_open_pipe(sce->iface, 459 edesc->bEndpointAddress, 0, &sce->pipeh); 460 if (err) { 461 free(sce->ibuf, M_USBDEV); 462 return (EIO); 463 } 464 for(i = 0; i < UGEN_NISOREQS; ++i) { 465 sce->isoreqs[i].sce = sce; 466 xfer = usbd_alloc_xfer(sc->sc_udev); 467 if (xfer == 0) 468 goto bad; 469 sce->isoreqs[i].xfer = xfer; 470 tbuf = usbd_alloc_buffer 471 (xfer, isize * UGEN_NISORFRMS); 472 if (tbuf == 0) { 473 i++; 474 goto bad; 475 } 476 sce->isoreqs[i].dmabuf = tbuf; 477 for(j = 0; j < UGEN_NISORFRMS; ++j) 478 sce->isoreqs[i].sizes[j] = isize; 479 usbd_setup_isoc_xfer 480 (xfer, sce->pipeh, &sce->isoreqs[i], 481 sce->isoreqs[i].sizes, 482 UGEN_NISORFRMS, USBD_NO_COPY, 483 ugen_isoc_rintr); 484 (void)usbd_transfer(xfer); 485 } 486 DPRINTFN(5, ("ugenopen: isoc open done\n")); 487 break; 488 bad: 489 while (--i >= 0) /* implicit buffer free */ 490 usbd_free_xfer(sce->isoreqs[i].xfer); 491 return (ENOMEM); 492 case UE_CONTROL: 493 sce->timeout = USBD_DEFAULT_TIMEOUT; 494 return (EINVAL); 495 } 496 } 497 sc->sc_is_open[endpt] = 1; 498 return (0); 499 } 500 501 int 502 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 503 { 504 int endpt = UGENENDPOINT(dev); 505 struct ugen_softc *sc; 506 struct ugen_endpoint *sce; 507 int dir; 508 int i; 509 510 USB_GET_SC(ugen, UGENUNIT(dev), sc); 511 512 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 513 flag, mode, UGENUNIT(dev), endpt)); 514 515 #ifdef DIAGNOSTIC 516 if (!sc->sc_is_open[endpt]) { 517 printf("ugenclose: not open\n"); 518 return (EINVAL); 519 } 520 #endif 521 522 if (endpt == USB_CONTROL_ENDPOINT) { 523 DPRINTFN(5, ("ugenclose: close control\n")); 524 sc->sc_is_open[endpt] = 0; 525 return (0); 526 } 527 528 for (dir = OUT; dir <= IN; dir++) { 529 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 530 continue; 531 sce = &sc->sc_endpoints[endpt][dir]; 532 if (sce == NULL || sce->pipeh == NULL) 533 continue; 534 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 535 endpt, dir, sce)); 536 537 usbd_abort_pipe(sce->pipeh); 538 usbd_close_pipe(sce->pipeh); 539 sce->pipeh = NULL; 540 541 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 542 case UE_INTERRUPT: 543 ndflush(&sce->q, sce->q.c_cc); 544 clfree(&sce->q); 545 break; 546 case UE_ISOCHRONOUS: 547 for (i = 0; i < UGEN_NISOREQS; ++i) 548 usbd_free_xfer(sce->isoreqs[i].xfer); 549 break; 550 case UE_BULK: 551 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) 552 /* ibuf freed below */ 553 usbd_free_xfer(sce->ra_wb_xfer); 554 break; 555 default: 556 break; 557 } 558 559 if (sce->ibuf != NULL) { 560 free(sce->ibuf, M_USBDEV); 561 sce->ibuf = NULL; 562 clfree(&sce->q); 563 } 564 } 565 sc->sc_is_open[endpt] = 0; 566 567 return (0); 568 } 569 570 Static int 571 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 572 { 573 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 574 u_int32_t n, tn; 575 usbd_xfer_handle xfer; 576 usbd_status err; 577 int s; 578 int error = 0; 579 580 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 581 582 if (sc->sc_dying) 583 return (EIO); 584 585 if (endpt == USB_CONTROL_ENDPOINT) 586 return (ENODEV); 587 588 #ifdef DIAGNOSTIC 589 if (sce->edesc == NULL) { 590 printf("ugenread: no edesc\n"); 591 return (EIO); 592 } 593 if (sce->pipeh == NULL) { 594 printf("ugenread: no pipe\n"); 595 return (EIO); 596 } 597 #endif 598 599 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 600 case UE_INTERRUPT: 601 /* Block until activity occurred. */ 602 s = splusb(); 603 while (sce->q.c_cc == 0) { 604 if (flag & IO_NDELAY) { 605 splx(s); 606 return (EWOULDBLOCK); 607 } 608 sce->state |= UGEN_ASLP; 609 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 610 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 611 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 612 if (sc->sc_dying) 613 error = EIO; 614 if (error) { 615 sce->state &= ~UGEN_ASLP; 616 break; 617 } 618 } 619 splx(s); 620 621 /* Transfer as many chunks as possible. */ 622 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 623 n = min(sce->q.c_cc, uio->uio_resid); 624 if (n > sizeof(sc->sc_buffer)) 625 n = sizeof(sc->sc_buffer); 626 627 /* Remove a small chunk from the input queue. */ 628 q_to_b(&sce->q, sc->sc_buffer, n); 629 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 630 631 /* Copy the data to the user process. */ 632 error = uiomove(sc->sc_buffer, n, uio); 633 if (error) 634 break; 635 } 636 break; 637 case UE_BULK: 638 if (sce->state & UGEN_BULK_RA) { 639 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 640 uio->uio_resid, sce->ra_wb_used)); 641 xfer = sce->ra_wb_xfer; 642 643 s = splusb(); 644 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 645 splx(s); 646 return (EWOULDBLOCK); 647 } 648 while (uio->uio_resid > 0 && !error) { 649 while (sce->ra_wb_used == 0) { 650 sce->state |= UGEN_ASLP; 651 DPRINTFN(5, 652 ("ugenread: sleep on %p\n", 653 sce)); 654 error = tsleep(sce, PZERO | PCATCH, 655 "ugenrb", 0); 656 DPRINTFN(5, 657 ("ugenread: woke, error=%d\n", 658 error)); 659 if (sc->sc_dying) 660 error = EIO; 661 if (error) { 662 sce->state &= ~UGEN_ASLP; 663 break; 664 } 665 } 666 667 /* Copy data to the process. */ 668 while (uio->uio_resid > 0 669 && sce->ra_wb_used > 0) { 670 n = min(uio->uio_resid, 671 sce->ra_wb_used); 672 n = min(n, sce->limit - sce->cur); 673 error = uiomove(sce->cur, n, uio); 674 if (error) 675 break; 676 sce->cur += n; 677 sce->ra_wb_used -= n; 678 if (sce->cur == sce->limit) 679 sce->cur = sce->ibuf; 680 } 681 682 /* 683 * If the transfers stopped because the 684 * buffer was full, restart them. 685 */ 686 if (sce->state & UGEN_RA_WB_STOP && 687 sce->ra_wb_used < sce->limit - sce->ibuf) { 688 n = (sce->limit - sce->ibuf) 689 - sce->ra_wb_used; 690 usbd_setup_xfer(xfer, 691 sce->pipeh, sce, NULL, 692 min(n, sce->ra_wb_xferlen), 693 USBD_NO_COPY, USBD_NO_TIMEOUT, 694 ugen_bulkra_intr); 695 sce->state &= ~UGEN_RA_WB_STOP; 696 err = usbd_transfer(xfer); 697 if (err != USBD_IN_PROGRESS) 698 /* 699 * The transfer has not been 700 * queued. Setting STOP 701 * will make us try 702 * again at the next read. 703 */ 704 sce->state |= UGEN_RA_WB_STOP; 705 } 706 } 707 splx(s); 708 break; 709 } 710 xfer = usbd_alloc_xfer(sc->sc_udev); 711 if (xfer == 0) 712 return (ENOMEM); 713 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 714 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 715 tn = n; 716 err = usbd_bulk_transfer( 717 xfer, sce->pipeh, 718 sce->state & UGEN_SHORT_OK ? 719 USBD_SHORT_XFER_OK : 0, 720 sce->timeout, sc->sc_buffer, &tn, "ugenrb"); 721 if (err) { 722 if (err == USBD_INTERRUPTED) 723 error = EINTR; 724 else if (err == USBD_TIMEOUT) 725 error = ETIMEDOUT; 726 else 727 error = EIO; 728 break; 729 } 730 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 731 error = uiomove(sc->sc_buffer, tn, uio); 732 if (error || tn < n) 733 break; 734 } 735 usbd_free_xfer(xfer); 736 break; 737 case UE_ISOCHRONOUS: 738 s = splusb(); 739 while (sce->cur == sce->fill) { 740 if (flag & IO_NDELAY) { 741 splx(s); 742 return (EWOULDBLOCK); 743 } 744 sce->state |= UGEN_ASLP; 745 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 746 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 747 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 748 if (sc->sc_dying) 749 error = EIO; 750 if (error) { 751 sce->state &= ~UGEN_ASLP; 752 break; 753 } 754 } 755 756 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 757 if(sce->fill > sce->cur) 758 n = min(sce->fill - sce->cur, uio->uio_resid); 759 else 760 n = min(sce->limit - sce->cur, uio->uio_resid); 761 762 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 763 764 /* Copy the data to the user process. */ 765 error = uiomove(sce->cur, n, uio); 766 if (error) 767 break; 768 sce->cur += n; 769 if(sce->cur >= sce->limit) 770 sce->cur = sce->ibuf; 771 } 772 splx(s); 773 break; 774 775 776 default: 777 return (ENXIO); 778 } 779 return (error); 780 } 781 782 int 783 ugenread(dev_t dev, struct uio *uio, int flag) 784 { 785 int endpt = UGENENDPOINT(dev); 786 struct ugen_softc *sc; 787 int error; 788 789 USB_GET_SC(ugen, UGENUNIT(dev), sc); 790 791 sc->sc_refcnt++; 792 error = ugen_do_read(sc, endpt, uio, flag); 793 if (--sc->sc_refcnt < 0) 794 usb_detach_wakeup(USBDEV(sc->sc_dev)); 795 return (error); 796 } 797 798 Static int 799 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 800 int flag) 801 { 802 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 803 u_int32_t n; 804 int error = 0; 805 int s; 806 u_int32_t tn; 807 char *dbuf; 808 usbd_xfer_handle xfer; 809 usbd_status err; 810 811 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 812 813 if (sc->sc_dying) 814 return (EIO); 815 816 if (endpt == USB_CONTROL_ENDPOINT) 817 return (ENODEV); 818 819 #ifdef DIAGNOSTIC 820 if (sce->edesc == NULL) { 821 printf("ugenwrite: no edesc\n"); 822 return (EIO); 823 } 824 if (sce->pipeh == NULL) { 825 printf("ugenwrite: no pipe\n"); 826 return (EIO); 827 } 828 #endif 829 830 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 831 case UE_BULK: 832 if (sce->state & UGEN_BULK_WB) { 833 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 834 uio->uio_resid, sce->ra_wb_used)); 835 xfer = sce->ra_wb_xfer; 836 837 s = splusb(); 838 if (sce->ra_wb_used == sce->limit - sce->ibuf && 839 flag & IO_NDELAY) { 840 splx(s); 841 return (EWOULDBLOCK); 842 } 843 while (uio->uio_resid > 0 && !error) { 844 while (sce->ra_wb_used == 845 sce->limit - sce->ibuf) { 846 sce->state |= UGEN_ASLP; 847 DPRINTFN(5, 848 ("ugenwrite: sleep on %p\n", 849 sce)); 850 error = tsleep(sce, PZERO | PCATCH, 851 "ugenwb", 0); 852 DPRINTFN(5, 853 ("ugenwrite: woke, error=%d\n", 854 error)); 855 if (sc->sc_dying) 856 error = EIO; 857 if (error) { 858 sce->state &= ~UGEN_ASLP; 859 break; 860 } 861 } 862 863 /* Copy data from the process. */ 864 while (uio->uio_resid > 0 && 865 sce->ra_wb_used < sce->limit - sce->ibuf) { 866 n = min(uio->uio_resid, 867 (sce->limit - sce->ibuf) 868 - sce->ra_wb_used); 869 n = min(n, sce->limit - sce->fill); 870 error = uiomove(sce->fill, n, uio); 871 if (error) 872 break; 873 sce->fill += n; 874 sce->ra_wb_used += n; 875 if (sce->fill == sce->limit) 876 sce->fill = sce->ibuf; 877 } 878 879 /* 880 * If the transfers stopped because the 881 * buffer was empty, restart them. 882 */ 883 if (sce->state & UGEN_RA_WB_STOP && 884 sce->ra_wb_used > 0) { 885 dbuf = (char *)usbd_get_buffer(xfer); 886 n = min(sce->ra_wb_used, 887 sce->ra_wb_xferlen); 888 tn = min(n, sce->limit - sce->cur); 889 memcpy(dbuf, sce->cur, tn); 890 dbuf += tn; 891 if (n - tn > 0) 892 memcpy(dbuf, sce->ibuf, 893 n - tn); 894 usbd_setup_xfer(xfer, 895 sce->pipeh, sce, NULL, n, 896 USBD_NO_COPY, USBD_NO_TIMEOUT, 897 ugen_bulkwb_intr); 898 sce->state &= ~UGEN_RA_WB_STOP; 899 err = usbd_transfer(xfer); 900 if (err != USBD_IN_PROGRESS) 901 /* 902 * The transfer has not been 903 * queued. Setting STOP 904 * will make us try again 905 * at the next read. 906 */ 907 sce->state |= UGEN_RA_WB_STOP; 908 } 909 } 910 splx(s); 911 break; 912 } 913 xfer = usbd_alloc_xfer(sc->sc_udev); 914 if (xfer == 0) 915 return (EIO); 916 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 917 error = uiomove(sc->sc_buffer, n, uio); 918 if (error) 919 break; 920 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 921 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, 922 sce->timeout, sc->sc_buffer, &n,"ugenwb"); 923 if (err) { 924 if (err == USBD_INTERRUPTED) 925 error = EINTR; 926 else if (err == USBD_TIMEOUT) 927 error = ETIMEDOUT; 928 else 929 error = EIO; 930 break; 931 } 932 } 933 usbd_free_xfer(xfer); 934 break; 935 case UE_INTERRUPT: 936 xfer = usbd_alloc_xfer(sc->sc_udev); 937 if (xfer == 0) 938 return (EIO); 939 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 940 uio->uio_resid)) != 0) { 941 error = uiomove(sc->sc_buffer, n, uio); 942 if (error) 943 break; 944 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 945 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 946 sce->timeout, sc->sc_buffer, &n, "ugenwi"); 947 if (err) { 948 if (err == USBD_INTERRUPTED) 949 error = EINTR; 950 else if (err == USBD_TIMEOUT) 951 error = ETIMEDOUT; 952 else 953 error = EIO; 954 break; 955 } 956 } 957 usbd_free_xfer(xfer); 958 break; 959 default: 960 return (ENXIO); 961 } 962 return (error); 963 } 964 965 int 966 ugenwrite(dev_t dev, struct uio *uio, int flag) 967 { 968 int endpt = UGENENDPOINT(dev); 969 struct ugen_softc *sc; 970 int error; 971 972 USB_GET_SC(ugen, UGENUNIT(dev), sc); 973 974 sc->sc_refcnt++; 975 error = ugen_do_write(sc, endpt, uio, flag); 976 if (--sc->sc_refcnt < 0) 977 usb_detach_wakeup(USBDEV(sc->sc_dev)); 978 return (error); 979 } 980 981 #if defined(__NetBSD__) || defined(__OpenBSD__) 982 int 983 ugen_activate(device_ptr_t self, enum devact act) 984 { 985 struct ugen_softc *sc = device_private(self); 986 987 switch (act) { 988 case DVACT_DEACTIVATE: 989 sc->sc_dying = 1; 990 return 0; 991 default: 992 return EOPNOTSUPP; 993 } 994 } 995 #endif 996 997 USB_DETACH(ugen) 998 { 999 USB_DETACH_START(ugen, sc); 1000 struct ugen_endpoint *sce; 1001 int i, dir; 1002 int s; 1003 #if defined(__NetBSD__) || defined(__OpenBSD__) 1004 int maj, mn; 1005 1006 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1007 #elif defined(__FreeBSD__) 1008 DPRINTF(("ugen_detach: sc=%p\n", sc)); 1009 #endif 1010 1011 sc->sc_dying = 1; 1012 pmf_device_deregister(self); 1013 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1014 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1015 for (dir = OUT; dir <= IN; dir++) { 1016 sce = &sc->sc_endpoints[i][dir]; 1017 if (sce && sce->pipeh) 1018 usbd_abort_pipe(sce->pipeh); 1019 } 1020 } 1021 1022 s = splusb(); 1023 if (--sc->sc_refcnt >= 0) { 1024 /* Wake everyone */ 1025 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1026 wakeup(&sc->sc_endpoints[i][IN]); 1027 /* Wait for processes to go away. */ 1028 usb_detach_wait(USBDEV(sc->sc_dev)); 1029 } 1030 splx(s); 1031 1032 #if defined(__NetBSD__) || defined(__OpenBSD__) 1033 /* locate the major number */ 1034 #if defined(__NetBSD__) 1035 maj = cdevsw_lookup_major(&ugen_cdevsw); 1036 #elif defined(__OpenBSD__) 1037 for (maj = 0; maj < nchrdev; maj++) 1038 if (cdevsw[maj].d_open == ugenopen) 1039 break; 1040 #endif 1041 1042 /* Nuke the vnodes for any open instances (calls close). */ 1043 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1044 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1045 #elif defined(__FreeBSD__) 1046 /* XXX not implemented yet */ 1047 #endif 1048 1049 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1050 USBDEV(sc->sc_dev)); 1051 1052 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1053 for (dir = OUT; dir <= IN; dir++) { 1054 sce = &sc->sc_endpoints[i][dir]; 1055 seldestroy(&sce->rsel); 1056 } 1057 } 1058 1059 return (0); 1060 } 1061 1062 Static void 1063 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) 1064 { 1065 struct ugen_endpoint *sce = addr; 1066 /*struct ugen_softc *sc = sce->sc;*/ 1067 u_int32_t count; 1068 u_char *ibuf; 1069 1070 if (status == USBD_CANCELLED) 1071 return; 1072 1073 if (status != USBD_NORMAL_COMPLETION) { 1074 DPRINTF(("ugenintr: status=%d\n", status)); 1075 if (status == USBD_STALLED) 1076 usbd_clear_endpoint_stall_async(sce->pipeh); 1077 return; 1078 } 1079 1080 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1081 ibuf = sce->ibuf; 1082 1083 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1084 xfer, status, count)); 1085 DPRINTFN(5, (" data = %02x %02x %02x\n", 1086 ibuf[0], ibuf[1], ibuf[2])); 1087 1088 (void)b_to_q(ibuf, count, &sce->q); 1089 1090 if (sce->state & UGEN_ASLP) { 1091 sce->state &= ~UGEN_ASLP; 1092 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1093 wakeup(sce); 1094 } 1095 selnotify(&sce->rsel, 0, 0); 1096 } 1097 1098 Static void 1099 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 1100 usbd_status status) 1101 { 1102 struct isoreq *req = addr; 1103 struct ugen_endpoint *sce = req->sce; 1104 u_int32_t count, n; 1105 int i, isize; 1106 1107 /* Return if we are aborting. */ 1108 if (status == USBD_CANCELLED) 1109 return; 1110 1111 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1112 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1113 (long)(req - sce->isoreqs), count)); 1114 1115 /* throw away oldest input if the buffer is full */ 1116 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1117 sce->cur += count; 1118 if(sce->cur >= sce->limit) 1119 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1120 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1121 count)); 1122 } 1123 1124 isize = UGETW(sce->edesc->wMaxPacketSize); 1125 for (i = 0; i < UGEN_NISORFRMS; i++) { 1126 u_int32_t actlen = req->sizes[i]; 1127 char const *tbuf = (char const *)req->dmabuf + isize * i; 1128 1129 /* copy data to buffer */ 1130 while (actlen > 0) { 1131 n = min(actlen, sce->limit - sce->fill); 1132 memcpy(sce->fill, tbuf, n); 1133 1134 tbuf += n; 1135 actlen -= n; 1136 sce->fill += n; 1137 if(sce->fill == sce->limit) 1138 sce->fill = sce->ibuf; 1139 } 1140 1141 /* setup size for next transfer */ 1142 req->sizes[i] = isize; 1143 } 1144 1145 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS, 1146 USBD_NO_COPY, ugen_isoc_rintr); 1147 (void)usbd_transfer(xfer); 1148 1149 if (sce->state & UGEN_ASLP) { 1150 sce->state &= ~UGEN_ASLP; 1151 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1152 wakeup(sce); 1153 } 1154 selnotify(&sce->rsel, 0, 0); 1155 } 1156 1157 Static void 1158 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1159 usbd_status status) 1160 { 1161 struct ugen_endpoint *sce = addr; 1162 u_int32_t count, n; 1163 char const *tbuf; 1164 usbd_status err; 1165 1166 /* Return if we are aborting. */ 1167 if (status == USBD_CANCELLED) 1168 return; 1169 1170 if (status != USBD_NORMAL_COMPLETION) { 1171 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1172 sce->state |= UGEN_RA_WB_STOP; 1173 if (status == USBD_STALLED) 1174 usbd_clear_endpoint_stall_async(sce->pipeh); 1175 return; 1176 } 1177 1178 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1179 1180 /* Keep track of how much is in the buffer. */ 1181 sce->ra_wb_used += count; 1182 1183 /* Copy data to buffer. */ 1184 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1185 n = min(count, sce->limit - sce->fill); 1186 memcpy(sce->fill, tbuf, n); 1187 tbuf += n; 1188 count -= n; 1189 sce->fill += n; 1190 if (sce->fill == sce->limit) 1191 sce->fill = sce->ibuf; 1192 if (count > 0) { 1193 memcpy(sce->fill, tbuf, count); 1194 sce->fill += count; 1195 } 1196 1197 /* Set up the next request if necessary. */ 1198 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1199 if (n > 0) { 1200 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1201 min(n, sce->ra_wb_xferlen), USBD_NO_COPY, 1202 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1203 err = usbd_transfer(xfer); 1204 if (err != USBD_IN_PROGRESS) { 1205 printf("usbd_bulkra_intr: error=%d\n", err); 1206 /* 1207 * The transfer has not been queued. Setting STOP 1208 * will make us try again at the next read. 1209 */ 1210 sce->state |= UGEN_RA_WB_STOP; 1211 } 1212 } 1213 else 1214 sce->state |= UGEN_RA_WB_STOP; 1215 1216 if (sce->state & UGEN_ASLP) { 1217 sce->state &= ~UGEN_ASLP; 1218 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1219 wakeup(sce); 1220 } 1221 selnotify(&sce->rsel, 0, 0); 1222 } 1223 1224 Static void 1225 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1226 usbd_status status) 1227 { 1228 struct ugen_endpoint *sce = addr; 1229 u_int32_t count, n; 1230 char *tbuf; 1231 usbd_status err; 1232 1233 /* Return if we are aborting. */ 1234 if (status == USBD_CANCELLED) 1235 return; 1236 1237 if (status != USBD_NORMAL_COMPLETION) { 1238 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1239 sce->state |= UGEN_RA_WB_STOP; 1240 if (status == USBD_STALLED) 1241 usbd_clear_endpoint_stall_async(sce->pipeh); 1242 return; 1243 } 1244 1245 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1246 1247 /* Keep track of how much is in the buffer. */ 1248 sce->ra_wb_used -= count; 1249 1250 /* Update buffer pointers. */ 1251 sce->cur += count; 1252 if (sce->cur >= sce->limit) 1253 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1254 1255 /* Set up next request if necessary. */ 1256 if (sce->ra_wb_used > 0) { 1257 /* copy data from buffer */ 1258 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1259 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1260 n = min(count, sce->limit - sce->cur); 1261 memcpy(tbuf, sce->cur, n); 1262 tbuf += n; 1263 if (count - n > 0) 1264 memcpy(tbuf, sce->ibuf, count - n); 1265 1266 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1267 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr); 1268 err = usbd_transfer(xfer); 1269 if (err != USBD_IN_PROGRESS) { 1270 printf("usbd_bulkwb_intr: error=%d\n", err); 1271 /* 1272 * The transfer has not been queued. Setting STOP 1273 * will make us try again at the next write. 1274 */ 1275 sce->state |= UGEN_RA_WB_STOP; 1276 } 1277 } 1278 else 1279 sce->state |= UGEN_RA_WB_STOP; 1280 1281 if (sce->state & UGEN_ASLP) { 1282 sce->state &= ~UGEN_ASLP; 1283 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1284 wakeup(sce); 1285 } 1286 selnotify(&sce->rsel, 0, 0); 1287 } 1288 1289 Static usbd_status 1290 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1291 { 1292 usbd_interface_handle iface; 1293 usb_endpoint_descriptor_t *ed; 1294 usbd_status err; 1295 struct ugen_endpoint *sce; 1296 u_int8_t niface, nendpt, endptno, endpt; 1297 int dir; 1298 1299 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1300 1301 err = usbd_interface_count(sc->sc_udev, &niface); 1302 if (err) 1303 return (err); 1304 if (ifaceidx < 0 || ifaceidx >= niface) 1305 return (USBD_INVAL); 1306 1307 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1308 if (err) 1309 return (err); 1310 err = usbd_endpoint_count(iface, &nendpt); 1311 if (err) 1312 return (err); 1313 /* XXX should only do this after setting new altno has succeeded */ 1314 for (endptno = 0; endptno < nendpt; endptno++) { 1315 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1316 endpt = ed->bEndpointAddress; 1317 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1318 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1319 sce->sc = 0; 1320 sce->edesc = 0; 1321 sce->iface = 0; 1322 } 1323 1324 /* change setting */ 1325 err = usbd_set_interface(iface, altno); 1326 if (err) 1327 return (err); 1328 1329 err = usbd_endpoint_count(iface, &nendpt); 1330 if (err) 1331 return (err); 1332 for (endptno = 0; endptno < nendpt; endptno++) { 1333 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1334 KASSERT(ed != NULL); 1335 endpt = ed->bEndpointAddress; 1336 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1337 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1338 sce->sc = sc; 1339 sce->edesc = ed; 1340 sce->iface = iface; 1341 } 1342 return (0); 1343 } 1344 1345 /* Retrieve a complete descriptor for a certain device and index. */ 1346 Static usb_config_descriptor_t * 1347 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1348 { 1349 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1350 int len; 1351 usbd_status err; 1352 1353 if (index == USB_CURRENT_CONFIG_INDEX) { 1354 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1355 len = UGETW(tdesc->wTotalLength); 1356 if (lenp) 1357 *lenp = len; 1358 cdesc = malloc(len, M_TEMP, M_WAITOK); 1359 memcpy(cdesc, tdesc, len); 1360 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1361 } else { 1362 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1363 if (err) 1364 return (0); 1365 len = UGETW(cdescr.wTotalLength); 1366 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1367 if (lenp) 1368 *lenp = len; 1369 cdesc = malloc(len, M_TEMP, M_WAITOK); 1370 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1371 if (err) { 1372 free(cdesc, M_TEMP); 1373 return (0); 1374 } 1375 } 1376 return (cdesc); 1377 } 1378 1379 Static int 1380 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1381 { 1382 usbd_interface_handle iface; 1383 usbd_status err; 1384 1385 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1386 if (err) 1387 return (-1); 1388 return (usbd_get_interface_altindex(iface)); 1389 } 1390 1391 Static int 1392 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1393 void *addr, int flag, struct lwp *l) 1394 { 1395 struct ugen_endpoint *sce; 1396 usbd_status err; 1397 usbd_interface_handle iface; 1398 struct usb_config_desc *cd; 1399 usb_config_descriptor_t *cdesc; 1400 struct usb_interface_desc *id; 1401 usb_interface_descriptor_t *idesc; 1402 struct usb_endpoint_desc *ed; 1403 usb_endpoint_descriptor_t *edesc; 1404 struct usb_alt_interface *ai; 1405 struct usb_string_desc *si; 1406 u_int8_t conf, alt; 1407 1408 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1409 if (sc->sc_dying) 1410 return (EIO); 1411 1412 switch (cmd) { 1413 case FIONBIO: 1414 /* All handled in the upper FS layer. */ 1415 return (0); 1416 case USB_SET_SHORT_XFER: 1417 if (endpt == USB_CONTROL_ENDPOINT) 1418 return (EINVAL); 1419 /* This flag only affects read */ 1420 sce = &sc->sc_endpoints[endpt][IN]; 1421 if (sce == NULL || sce->pipeh == NULL) 1422 return (EINVAL); 1423 if (*(int *)addr) 1424 sce->state |= UGEN_SHORT_OK; 1425 else 1426 sce->state &= ~UGEN_SHORT_OK; 1427 return (0); 1428 case USB_SET_TIMEOUT: 1429 sce = &sc->sc_endpoints[endpt][IN]; 1430 if (sce == NULL 1431 /* XXX this shouldn't happen, but the distinction between 1432 input and output pipes isn't clear enough. 1433 || sce->pipeh == NULL */ 1434 ) 1435 return (EINVAL); 1436 sce->timeout = *(int *)addr; 1437 return (0); 1438 case USB_SET_BULK_RA: 1439 if (endpt == USB_CONTROL_ENDPOINT) 1440 return (EINVAL); 1441 sce = &sc->sc_endpoints[endpt][IN]; 1442 if (sce == NULL || sce->pipeh == NULL) 1443 return (EINVAL); 1444 edesc = sce->edesc; 1445 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1446 return (EINVAL); 1447 1448 if (*(int *)addr) { 1449 /* Only turn RA on if it's currently off. */ 1450 if (sce->state & UGEN_BULK_RA) 1451 return (0); 1452 1453 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1454 /* shouldn't happen */ 1455 return (EINVAL); 1456 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1457 if (sce->ra_wb_xfer == NULL) 1458 return (ENOMEM); 1459 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1460 /* 1461 * Set up a dmabuf because we reuse the xfer with 1462 * the same (max) request length like isoc. 1463 */ 1464 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1465 sce->ra_wb_xferlen) == 0) { 1466 usbd_free_xfer(sce->ra_wb_xfer); 1467 return (ENOMEM); 1468 } 1469 sce->ibuf = malloc(sce->ra_wb_bufsize, 1470 M_USBDEV, M_WAITOK); 1471 sce->fill = sce->cur = sce->ibuf; 1472 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1473 sce->ra_wb_used = 0; 1474 sce->state |= UGEN_BULK_RA; 1475 sce->state &= ~UGEN_RA_WB_STOP; 1476 /* Now start reading. */ 1477 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce, 1478 NULL, 1479 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1480 USBD_NO_COPY, USBD_NO_TIMEOUT, 1481 ugen_bulkra_intr); 1482 err = usbd_transfer(sce->ra_wb_xfer); 1483 if (err != USBD_IN_PROGRESS) { 1484 sce->state &= ~UGEN_BULK_RA; 1485 free(sce->ibuf, M_USBDEV); 1486 sce->ibuf = NULL; 1487 usbd_free_xfer(sce->ra_wb_xfer); 1488 return (EIO); 1489 } 1490 } else { 1491 /* Only turn RA off if it's currently on. */ 1492 if (!(sce->state & UGEN_BULK_RA)) 1493 return (0); 1494 1495 sce->state &= ~UGEN_BULK_RA; 1496 usbd_abort_pipe(sce->pipeh); 1497 usbd_free_xfer(sce->ra_wb_xfer); 1498 /* 1499 * XXX Discard whatever's in the buffer, but we 1500 * should keep it around and drain the buffer 1501 * instead. 1502 */ 1503 free(sce->ibuf, M_USBDEV); 1504 sce->ibuf = NULL; 1505 } 1506 return (0); 1507 case USB_SET_BULK_WB: 1508 if (endpt == USB_CONTROL_ENDPOINT) 1509 return (EINVAL); 1510 sce = &sc->sc_endpoints[endpt][OUT]; 1511 if (sce == NULL || sce->pipeh == NULL) 1512 return (EINVAL); 1513 edesc = sce->edesc; 1514 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1515 return (EINVAL); 1516 1517 if (*(int *)addr) { 1518 /* Only turn WB on if it's currently off. */ 1519 if (sce->state & UGEN_BULK_WB) 1520 return (0); 1521 1522 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1523 /* shouldn't happen */ 1524 return (EINVAL); 1525 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1526 if (sce->ra_wb_xfer == NULL) 1527 return (ENOMEM); 1528 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1529 /* 1530 * Set up a dmabuf because we reuse the xfer with 1531 * the same (max) request length like isoc. 1532 */ 1533 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1534 sce->ra_wb_xferlen) == 0) { 1535 usbd_free_xfer(sce->ra_wb_xfer); 1536 return (ENOMEM); 1537 } 1538 sce->ibuf = malloc(sce->ra_wb_bufsize, 1539 M_USBDEV, M_WAITOK); 1540 sce->fill = sce->cur = sce->ibuf; 1541 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1542 sce->ra_wb_used = 0; 1543 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1544 } else { 1545 /* Only turn WB off if it's currently on. */ 1546 if (!(sce->state & UGEN_BULK_WB)) 1547 return (0); 1548 1549 sce->state &= ~UGEN_BULK_WB; 1550 /* 1551 * XXX Discard whatever's in the buffer, but we 1552 * should keep it around and keep writing to 1553 * drain the buffer instead. 1554 */ 1555 usbd_abort_pipe(sce->pipeh); 1556 usbd_free_xfer(sce->ra_wb_xfer); 1557 free(sce->ibuf, M_USBDEV); 1558 sce->ibuf = NULL; 1559 } 1560 return (0); 1561 case USB_SET_BULK_RA_OPT: 1562 case USB_SET_BULK_WB_OPT: 1563 { 1564 struct usb_bulk_ra_wb_opt *opt; 1565 1566 if (endpt == USB_CONTROL_ENDPOINT) 1567 return (EINVAL); 1568 opt = (struct usb_bulk_ra_wb_opt *)addr; 1569 if (cmd == USB_SET_BULK_RA_OPT) 1570 sce = &sc->sc_endpoints[endpt][IN]; 1571 else 1572 sce = &sc->sc_endpoints[endpt][OUT]; 1573 if (sce == NULL || sce->pipeh == NULL) 1574 return (EINVAL); 1575 if (opt->ra_wb_buffer_size < 1 || 1576 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1577 opt->ra_wb_request_size < 1 || 1578 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1579 return (EINVAL); 1580 /* 1581 * XXX These changes do not take effect until the 1582 * next time RA/WB mode is enabled but they ought to 1583 * take effect immediately. 1584 */ 1585 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1586 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1587 return (0); 1588 } 1589 default: 1590 break; 1591 } 1592 1593 if (endpt != USB_CONTROL_ENDPOINT) 1594 return (EINVAL); 1595 1596 switch (cmd) { 1597 #ifdef UGEN_DEBUG 1598 case USB_SETDEBUG: 1599 ugendebug = *(int *)addr; 1600 break; 1601 #endif 1602 case USB_GET_CONFIG: 1603 err = usbd_get_config(sc->sc_udev, &conf); 1604 if (err) 1605 return (EIO); 1606 *(int *)addr = conf; 1607 break; 1608 case USB_SET_CONFIG: 1609 if (!(flag & FWRITE)) 1610 return (EPERM); 1611 err = ugen_set_config(sc, *(int *)addr); 1612 switch (err) { 1613 case USBD_NORMAL_COMPLETION: 1614 break; 1615 case USBD_IN_USE: 1616 return (EBUSY); 1617 default: 1618 return (EIO); 1619 } 1620 break; 1621 case USB_GET_ALTINTERFACE: 1622 ai = (struct usb_alt_interface *)addr; 1623 err = usbd_device2interface_handle(sc->sc_udev, 1624 ai->uai_interface_index, &iface); 1625 if (err) 1626 return (EINVAL); 1627 idesc = usbd_get_interface_descriptor(iface); 1628 if (idesc == NULL) 1629 return (EIO); 1630 ai->uai_alt_no = idesc->bAlternateSetting; 1631 break; 1632 case USB_SET_ALTINTERFACE: 1633 if (!(flag & FWRITE)) 1634 return (EPERM); 1635 ai = (struct usb_alt_interface *)addr; 1636 err = usbd_device2interface_handle(sc->sc_udev, 1637 ai->uai_interface_index, &iface); 1638 if (err) 1639 return (EINVAL); 1640 err = ugen_set_interface(sc, ai->uai_interface_index, 1641 ai->uai_alt_no); 1642 if (err) 1643 return (EINVAL); 1644 break; 1645 case USB_GET_NO_ALT: 1646 ai = (struct usb_alt_interface *)addr; 1647 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0); 1648 if (cdesc == NULL) 1649 return (EINVAL); 1650 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1651 if (idesc == NULL) { 1652 free(cdesc, M_TEMP); 1653 return (EINVAL); 1654 } 1655 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1656 idesc->bInterfaceNumber); 1657 free(cdesc, M_TEMP); 1658 break; 1659 case USB_GET_DEVICE_DESC: 1660 *(usb_device_descriptor_t *)addr = 1661 *usbd_get_device_descriptor(sc->sc_udev); 1662 break; 1663 case USB_GET_CONFIG_DESC: 1664 cd = (struct usb_config_desc *)addr; 1665 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0); 1666 if (cdesc == NULL) 1667 return (EINVAL); 1668 cd->ucd_desc = *cdesc; 1669 free(cdesc, M_TEMP); 1670 break; 1671 case USB_GET_INTERFACE_DESC: 1672 id = (struct usb_interface_desc *)addr; 1673 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0); 1674 if (cdesc == NULL) 1675 return (EINVAL); 1676 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1677 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1678 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1679 else 1680 alt = id->uid_alt_index; 1681 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1682 if (idesc == NULL) { 1683 free(cdesc, M_TEMP); 1684 return (EINVAL); 1685 } 1686 id->uid_desc = *idesc; 1687 free(cdesc, M_TEMP); 1688 break; 1689 case USB_GET_ENDPOINT_DESC: 1690 ed = (struct usb_endpoint_desc *)addr; 1691 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0); 1692 if (cdesc == NULL) 1693 return (EINVAL); 1694 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1695 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1696 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1697 else 1698 alt = ed->ued_alt_index; 1699 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1700 alt, ed->ued_endpoint_index); 1701 if (edesc == NULL) { 1702 free(cdesc, M_TEMP); 1703 return (EINVAL); 1704 } 1705 ed->ued_desc = *edesc; 1706 free(cdesc, M_TEMP); 1707 break; 1708 case USB_GET_FULL_DESC: 1709 { 1710 int len; 1711 struct iovec iov; 1712 struct uio uio; 1713 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1714 int error; 1715 1716 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len); 1717 if (cdesc == NULL) 1718 return (EINVAL); 1719 if (len > fd->ufd_size) 1720 len = fd->ufd_size; 1721 iov.iov_base = (void *)fd->ufd_data; 1722 iov.iov_len = len; 1723 uio.uio_iov = &iov; 1724 uio.uio_iovcnt = 1; 1725 uio.uio_resid = len; 1726 uio.uio_offset = 0; 1727 uio.uio_rw = UIO_READ; 1728 uio.uio_vmspace = l->l_proc->p_vmspace; 1729 error = uiomove((void *)cdesc, len, &uio); 1730 free(cdesc, M_TEMP); 1731 return (error); 1732 } 1733 case USB_GET_STRING_DESC: { 1734 int len; 1735 si = (struct usb_string_desc *)addr; 1736 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1737 si->usd_language_id, &si->usd_desc, &len); 1738 if (err) 1739 return (EINVAL); 1740 break; 1741 } 1742 case USB_DO_REQUEST: 1743 { 1744 struct usb_ctl_request *ur = (void *)addr; 1745 int len = UGETW(ur->ucr_request.wLength); 1746 struct iovec iov; 1747 struct uio uio; 1748 void *ptr = 0; 1749 usbd_status xerr; 1750 int error = 0; 1751 1752 if (!(flag & FWRITE)) 1753 return (EPERM); 1754 /* Avoid requests that would damage the bus integrity. */ 1755 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1756 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1757 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1758 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1759 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1760 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1761 return (EINVAL); 1762 1763 if (len < 0 || len > 32767) 1764 return (EINVAL); 1765 if (len != 0) { 1766 iov.iov_base = (void *)ur->ucr_data; 1767 iov.iov_len = len; 1768 uio.uio_iov = &iov; 1769 uio.uio_iovcnt = 1; 1770 uio.uio_resid = len; 1771 uio.uio_offset = 0; 1772 uio.uio_rw = 1773 ur->ucr_request.bmRequestType & UT_READ ? 1774 UIO_READ : UIO_WRITE; 1775 uio.uio_vmspace = l->l_proc->p_vmspace; 1776 ptr = malloc(len, M_TEMP, M_WAITOK); 1777 if (uio.uio_rw == UIO_WRITE) { 1778 error = uiomove(ptr, len, &uio); 1779 if (error) 1780 goto ret; 1781 } 1782 } 1783 sce = &sc->sc_endpoints[endpt][IN]; 1784 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1785 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1786 if (xerr) { 1787 error = EIO; 1788 goto ret; 1789 } 1790 if (len != 0) { 1791 if (uio.uio_rw == UIO_READ) { 1792 error = uiomove(ptr, len, &uio); 1793 if (error) 1794 goto ret; 1795 } 1796 } 1797 ret: 1798 if (ptr) 1799 free(ptr, M_TEMP); 1800 return (error); 1801 } 1802 case USB_GET_DEVICEINFO: 1803 usbd_fill_deviceinfo(sc->sc_udev, 1804 (struct usb_device_info *)addr, 0); 1805 break; 1806 #ifdef COMPAT_30 1807 case USB_GET_DEVICEINFO_OLD: 1808 usbd_fill_deviceinfo_old(sc->sc_udev, 1809 (struct usb_device_info_old *)addr, 0); 1810 1811 break; 1812 #endif 1813 default: 1814 return (EINVAL); 1815 } 1816 return (0); 1817 } 1818 1819 int 1820 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1821 { 1822 int endpt = UGENENDPOINT(dev); 1823 struct ugen_softc *sc; 1824 int error; 1825 1826 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1827 1828 sc->sc_refcnt++; 1829 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1830 if (--sc->sc_refcnt < 0) 1831 usb_detach_wakeup(USBDEV(sc->sc_dev)); 1832 return (error); 1833 } 1834 1835 int 1836 ugenpoll(dev_t dev, int events, struct lwp *l) 1837 { 1838 struct ugen_softc *sc; 1839 struct ugen_endpoint *sce_in, *sce_out; 1840 int revents = 0; 1841 int s; 1842 1843 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1844 1845 if (sc->sc_dying) 1846 return (POLLHUP); 1847 1848 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1849 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1850 if (sce_in == NULL && sce_out == NULL) 1851 return (POLLERR); 1852 #ifdef DIAGNOSTIC 1853 if (!sce_in->edesc && !sce_out->edesc) { 1854 printf("ugenpoll: no edesc\n"); 1855 return (POLLERR); 1856 } 1857 /* It's possible to have only one pipe open. */ 1858 if (!sce_in->pipeh && !sce_out->pipeh) { 1859 printf("ugenpoll: no pipe\n"); 1860 return (POLLERR); 1861 } 1862 #endif 1863 s = splusb(); 1864 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1865 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1866 case UE_INTERRUPT: 1867 if (sce_in->q.c_cc > 0) 1868 revents |= events & (POLLIN | POLLRDNORM); 1869 else 1870 selrecord(l, &sce_in->rsel); 1871 break; 1872 case UE_ISOCHRONOUS: 1873 if (sce_in->cur != sce_in->fill) 1874 revents |= events & (POLLIN | POLLRDNORM); 1875 else 1876 selrecord(l, &sce_in->rsel); 1877 break; 1878 case UE_BULK: 1879 if (sce_in->state & UGEN_BULK_RA) { 1880 if (sce_in->ra_wb_used > 0) 1881 revents |= events & 1882 (POLLIN | POLLRDNORM); 1883 else 1884 selrecord(l, &sce_in->rsel); 1885 break; 1886 } 1887 /* 1888 * We have no easy way of determining if a read will 1889 * yield any data or a write will happen. 1890 * Pretend they will. 1891 */ 1892 revents |= events & (POLLIN | POLLRDNORM); 1893 break; 1894 default: 1895 break; 1896 } 1897 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1898 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1899 case UE_INTERRUPT: 1900 case UE_ISOCHRONOUS: 1901 /* XXX unimplemented */ 1902 break; 1903 case UE_BULK: 1904 if (sce_out->state & UGEN_BULK_WB) { 1905 if (sce_out->ra_wb_used < 1906 sce_out->limit - sce_out->ibuf) 1907 revents |= events & 1908 (POLLOUT | POLLWRNORM); 1909 else 1910 selrecord(l, &sce_out->rsel); 1911 break; 1912 } 1913 /* 1914 * We have no easy way of determining if a read will 1915 * yield any data or a write will happen. 1916 * Pretend they will. 1917 */ 1918 revents |= events & (POLLOUT | POLLWRNORM); 1919 break; 1920 default: 1921 break; 1922 } 1923 1924 1925 splx(s); 1926 return (revents); 1927 } 1928 1929 static void 1930 filt_ugenrdetach(struct knote *kn) 1931 { 1932 struct ugen_endpoint *sce = kn->kn_hook; 1933 int s; 1934 1935 s = splusb(); 1936 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1937 splx(s); 1938 } 1939 1940 static int 1941 filt_ugenread_intr(struct knote *kn, long hint) 1942 { 1943 struct ugen_endpoint *sce = kn->kn_hook; 1944 1945 kn->kn_data = sce->q.c_cc; 1946 return (kn->kn_data > 0); 1947 } 1948 1949 static int 1950 filt_ugenread_isoc(struct knote *kn, long hint) 1951 { 1952 struct ugen_endpoint *sce = kn->kn_hook; 1953 1954 if (sce->cur == sce->fill) 1955 return (0); 1956 1957 if (sce->cur < sce->fill) 1958 kn->kn_data = sce->fill - sce->cur; 1959 else 1960 kn->kn_data = (sce->limit - sce->cur) + 1961 (sce->fill - sce->ibuf); 1962 1963 return (1); 1964 } 1965 1966 static int 1967 filt_ugenread_bulk(struct knote *kn, long hint) 1968 { 1969 struct ugen_endpoint *sce = kn->kn_hook; 1970 1971 if (!(sce->state & UGEN_BULK_RA)) 1972 /* 1973 * We have no easy way of determining if a read will 1974 * yield any data or a write will happen. 1975 * So, emulate "seltrue". 1976 */ 1977 return (filt_seltrue(kn, hint)); 1978 1979 if (sce->ra_wb_used == 0) 1980 return (0); 1981 1982 kn->kn_data = sce->ra_wb_used; 1983 1984 return (1); 1985 } 1986 1987 static int 1988 filt_ugenwrite_bulk(struct knote *kn, long hint) 1989 { 1990 struct ugen_endpoint *sce = kn->kn_hook; 1991 1992 if (!(sce->state & UGEN_BULK_WB)) 1993 /* 1994 * We have no easy way of determining if a read will 1995 * yield any data or a write will happen. 1996 * So, emulate "seltrue". 1997 */ 1998 return (filt_seltrue(kn, hint)); 1999 2000 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2001 return (0); 2002 2003 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2004 2005 return (1); 2006 } 2007 2008 static const struct filterops ugenread_intr_filtops = 2009 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2010 2011 static const struct filterops ugenread_isoc_filtops = 2012 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2013 2014 static const struct filterops ugenread_bulk_filtops = 2015 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2016 2017 static const struct filterops ugenwrite_bulk_filtops = 2018 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2019 2020 int 2021 ugenkqfilter(dev_t dev, struct knote *kn) 2022 { 2023 struct ugen_softc *sc; 2024 struct ugen_endpoint *sce; 2025 struct klist *klist; 2026 int s; 2027 2028 USB_GET_SC(ugen, UGENUNIT(dev), sc); 2029 2030 if (sc->sc_dying) 2031 return (ENXIO); 2032 2033 switch (kn->kn_filter) { 2034 case EVFILT_READ: 2035 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2036 if (sce == NULL) 2037 return (EINVAL); 2038 2039 klist = &sce->rsel.sel_klist; 2040 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2041 case UE_INTERRUPT: 2042 kn->kn_fop = &ugenread_intr_filtops; 2043 break; 2044 case UE_ISOCHRONOUS: 2045 kn->kn_fop = &ugenread_isoc_filtops; 2046 break; 2047 case UE_BULK: 2048 kn->kn_fop = &ugenread_bulk_filtops; 2049 break; 2050 break; 2051 default: 2052 return (EINVAL); 2053 } 2054 break; 2055 2056 case EVFILT_WRITE: 2057 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2058 if (sce == NULL) 2059 return (EINVAL); 2060 2061 klist = &sce->rsel.sel_klist; 2062 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2063 case UE_INTERRUPT: 2064 case UE_ISOCHRONOUS: 2065 /* XXX poll doesn't support this */ 2066 return (EINVAL); 2067 2068 case UE_BULK: 2069 kn->kn_fop = &ugenwrite_bulk_filtops; 2070 break; 2071 default: 2072 return (EINVAL); 2073 } 2074 break; 2075 2076 default: 2077 return (EINVAL); 2078 } 2079 2080 kn->kn_hook = sce; 2081 2082 s = splusb(); 2083 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2084 splx(s); 2085 2086 return (0); 2087 } 2088 2089 #if defined(__FreeBSD__) 2090 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0); 2091 #endif 2092