1 /* $NetBSD: ugen.c,v 1.105 2009/09/24 22:33:04 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.105 2009/09/24 22:33:04 pooka Exp $"); 41 42 #include "opt_ugen_bulk_ra_wb.h" 43 #include "opt_compat_netbsd.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #if defined(__NetBSD__) || defined(__OpenBSD__) 50 #include <sys/device.h> 51 #include <sys/ioctl.h> 52 #elif defined(__FreeBSD__) 53 #include <sys/module.h> 54 #include <sys/bus.h> 55 #include <sys/ioccom.h> 56 #include <sys/conf.h> 57 #include <sys/fcntl.h> 58 #include <sys/filio.h> 59 #endif 60 #include <sys/conf.h> 61 #include <sys/tty.h> 62 #include <sys/file.h> 63 #include <sys/select.h> 64 #include <sys/proc.h> 65 #include <sys/vnode.h> 66 #include <sys/poll.h> 67 68 #include <dev/usb/usb.h> 69 #include <dev/usb/usbdi.h> 70 #include <dev/usb/usbdi_util.h> 71 72 #ifdef UGEN_DEBUG 73 #define DPRINTF(x) if (ugendebug) logprintf x 74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x 75 int ugendebug = 0; 76 #else 77 #define DPRINTF(x) 78 #define DPRINTFN(n,x) 79 #endif 80 81 #define UGEN_CHUNK 128 /* chunk size for read */ 82 #define UGEN_IBSIZE 1020 /* buffer size */ 83 #define UGEN_BBSIZE 1024 84 85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */ 86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */ 87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */ 88 89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 91 92 struct ugen_endpoint { 93 struct ugen_softc *sc; 94 usb_endpoint_descriptor_t *edesc; 95 usbd_interface_handle iface; 96 int state; 97 #define UGEN_ASLP 0x02 /* waiting for data */ 98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 102 usbd_pipe_handle pipeh; 103 struct clist q; 104 struct selinfo rsel; 105 u_char *ibuf; /* start of buffer (circular for isoc) */ 106 u_char *fill; /* location for input (isoc) */ 107 u_char *limit; /* end of circular buffer (isoc) */ 108 u_char *cur; /* current read location (isoc) */ 109 u_int32_t timeout; 110 #ifdef UGEN_BULK_RA_WB 111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 113 u_int32_t ra_wb_used; /* how much is in buffer */ 114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 115 usbd_xfer_handle ra_wb_xfer; 116 #endif 117 struct isoreq { 118 struct ugen_endpoint *sce; 119 usbd_xfer_handle xfer; 120 void *dmabuf; 121 u_int16_t sizes[UGEN_NISORFRMS]; 122 } isoreqs[UGEN_NISOREQS]; 123 }; 124 125 struct ugen_softc { 126 USBBASEDEVICE sc_dev; /* base device */ 127 usbd_device_handle sc_udev; 128 129 char sc_is_open[USB_MAX_ENDPOINTS]; 130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 131 #define OUT 0 132 #define IN 1 133 134 int sc_refcnt; 135 char sc_buffer[UGEN_BBSIZE]; 136 u_char sc_dying; 137 }; 138 139 #if defined(__NetBSD__) 140 dev_type_open(ugenopen); 141 dev_type_close(ugenclose); 142 dev_type_read(ugenread); 143 dev_type_write(ugenwrite); 144 dev_type_ioctl(ugenioctl); 145 dev_type_poll(ugenpoll); 146 dev_type_kqfilter(ugenkqfilter); 147 148 const struct cdevsw ugen_cdevsw = { 149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl, 150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER, 151 }; 152 #elif defined(__OpenBSD__) 153 cdev_decl(ugen); 154 #elif defined(__FreeBSD__) 155 d_open_t ugenopen; 156 d_close_t ugenclose; 157 d_read_t ugenread; 158 d_write_t ugenwrite; 159 d_ioctl_t ugenioctl; 160 d_poll_t ugenpoll; 161 162 #define UGEN_CDEV_MAJOR 114 163 164 Static struct cdevsw ugen_cdevsw = { 165 /* open */ ugenopen, 166 /* close */ ugenclose, 167 /* read */ ugenread, 168 /* write */ ugenwrite, 169 /* ioctl */ ugenioctl, 170 /* poll */ ugenpoll, 171 /* mmap */ nommap, 172 /* strategy */ nostrategy, 173 /* name */ "ugen", 174 /* maj */ UGEN_CDEV_MAJOR, 175 /* dump */ nodump, 176 /* psize */ nopsize, 177 /* flags */ 0, 178 /* bmaj */ -1 179 }; 180 #endif 181 182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, 183 usbd_status status); 184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 185 usbd_status status); 186 #ifdef UGEN_BULK_RA_WB 187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 188 usbd_status status); 189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 190 usbd_status status); 191 #endif 192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 195 void *, int, struct lwp *); 196 Static int ugen_set_config(struct ugen_softc *sc, int configno); 197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc, 198 int index, int *lenp); 199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx); 201 202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 203 #define UGENENDPOINT(n) (minor(n) & 0xf) 204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 205 206 USB_DECLARE_DRIVER(ugen); 207 208 /* toggle to control attach priority. -1 means "let autoconf decide" */ 209 int ugen_override = -1; 210 211 USB_MATCH(ugen) 212 { 213 USB_MATCH_START(ugen, uaa); 214 int override; 215 216 if (ugen_override != -1) 217 override = ugen_override; 218 else 219 override = match->cf_flags & 1; 220 221 if (override) 222 return (UMATCH_HIGHEST); 223 else if (uaa->usegeneric) 224 return (UMATCH_GENERIC); 225 else 226 return (UMATCH_NONE); 227 } 228 229 USB_ATTACH(ugen) 230 { 231 USB_ATTACH_START(ugen, sc, uaa); 232 usbd_device_handle udev; 233 char *devinfop; 234 usbd_status err; 235 int i, dir, conf; 236 237 aprint_naive("\n"); 238 aprint_normal("\n"); 239 240 devinfop = usbd_devinfo_alloc(uaa->device, 0); 241 aprint_normal_dev(self, "%s\n", devinfop); 242 usbd_devinfo_free(devinfop); 243 244 sc->sc_dev = self; 245 sc->sc_udev = udev = uaa->device; 246 247 /* First set configuration index 0, the default one for ugen. */ 248 err = usbd_set_config_index(udev, 0, 0); 249 if (err) { 250 aprint_error_dev(self, 251 "setting configuration index 0 failed\n"); 252 sc->sc_dying = 1; 253 USB_ATTACH_ERROR_RETURN; 254 } 255 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 256 257 /* Set up all the local state for this configuration. */ 258 err = ugen_set_config(sc, conf); 259 if (err) { 260 aprint_error_dev(self, "setting configuration %d failed\n", 261 conf); 262 sc->sc_dying = 1; 263 USB_ATTACH_ERROR_RETURN; 264 } 265 266 #ifdef __FreeBSD__ 267 { 268 static int global_init_done = 0; 269 if (!global_init_done) { 270 cdevsw_add(&ugen_cdevsw); 271 global_init_done = 1; 272 } 273 } 274 #endif 275 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 276 for (dir = OUT; dir <= IN; dir++) { 277 struct ugen_endpoint *sce; 278 279 sce = &sc->sc_endpoints[i][dir]; 280 selinit(&sce->rsel); 281 } 282 } 283 284 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 285 USBDEV(sc->sc_dev)); 286 287 if (!pmf_device_register(self, NULL, NULL)) 288 aprint_error_dev(self, "couldn't establish power handler\n"); 289 290 USB_ATTACH_SUCCESS_RETURN; 291 } 292 293 Static int 294 ugen_set_config(struct ugen_softc *sc, int configno) 295 { 296 usbd_device_handle dev = sc->sc_udev; 297 usb_config_descriptor_t *cdesc; 298 usbd_interface_handle iface; 299 usb_endpoint_descriptor_t *ed; 300 struct ugen_endpoint *sce; 301 u_int8_t niface, nendpt; 302 int ifaceno, endptno, endpt; 303 usbd_status err; 304 int dir; 305 306 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 307 USBDEVNAME(sc->sc_dev), configno, sc)); 308 309 /* 310 * We start at 1, not 0, because we don't care whether the 311 * control endpoint is open or not. It is always present. 312 */ 313 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 314 if (sc->sc_is_open[endptno]) { 315 DPRINTFN(1, 316 ("ugen_set_config: %s - endpoint %d is open\n", 317 USBDEVNAME(sc->sc_dev), endptno)); 318 return (USBD_IN_USE); 319 } 320 321 /* Avoid setting the current value. */ 322 cdesc = usbd_get_config_descriptor(dev); 323 if (!cdesc || cdesc->bConfigurationValue != configno) { 324 err = usbd_set_config_no(dev, configno, 1); 325 if (err) 326 return (err); 327 } 328 329 err = usbd_interface_count(dev, &niface); 330 if (err) 331 return (err); 332 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints); 333 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 334 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 335 err = usbd_device2interface_handle(dev, ifaceno, &iface); 336 if (err) 337 return (err); 338 err = usbd_endpoint_count(iface, &nendpt); 339 if (err) 340 return (err); 341 for (endptno = 0; endptno < nendpt; endptno++) { 342 ed = usbd_interface2endpoint_descriptor(iface,endptno); 343 KASSERT(ed != NULL); 344 endpt = ed->bEndpointAddress; 345 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 346 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 347 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 348 "(%d,%d), sce=%p\n", 349 endptno, endpt, UE_GET_ADDR(endpt), 350 UE_GET_DIR(endpt), sce)); 351 sce->sc = sc; 352 sce->edesc = ed; 353 sce->iface = iface; 354 } 355 } 356 return (USBD_NORMAL_COMPLETION); 357 } 358 359 int 360 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 361 { 362 struct ugen_softc *sc; 363 int unit = UGENUNIT(dev); 364 int endpt = UGENENDPOINT(dev); 365 usb_endpoint_descriptor_t *edesc; 366 struct ugen_endpoint *sce; 367 int dir, isize; 368 usbd_status err; 369 usbd_xfer_handle xfer; 370 void *tbuf; 371 int i, j; 372 373 USB_GET_SC_OPEN(ugen, unit, sc); 374 375 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 376 flag, mode, unit, endpt)); 377 378 if (sc == NULL || sc->sc_dying) 379 return (ENXIO); 380 381 /* The control endpoint allows multiple opens. */ 382 if (endpt == USB_CONTROL_ENDPOINT) { 383 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 384 return (0); 385 } 386 387 if (sc->sc_is_open[endpt]) 388 return (EBUSY); 389 390 /* Make sure there are pipes for all directions. */ 391 for (dir = OUT; dir <= IN; dir++) { 392 if (flag & (dir == OUT ? FWRITE : FREAD)) { 393 sce = &sc->sc_endpoints[endpt][dir]; 394 if (sce == 0 || sce->edesc == 0) 395 return (ENXIO); 396 } 397 } 398 399 /* Actually open the pipes. */ 400 /* XXX Should back out properly if it fails. */ 401 for (dir = OUT; dir <= IN; dir++) { 402 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 403 continue; 404 sce = &sc->sc_endpoints[endpt][dir]; 405 sce->state = 0; 406 sce->timeout = USBD_NO_TIMEOUT; 407 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 408 sc, endpt, dir, sce)); 409 edesc = sce->edesc; 410 switch (edesc->bmAttributes & UE_XFERTYPE) { 411 case UE_INTERRUPT: 412 if (dir == OUT) { 413 err = usbd_open_pipe(sce->iface, 414 edesc->bEndpointAddress, 0, &sce->pipeh); 415 if (err) 416 return (EIO); 417 break; 418 } 419 isize = UGETW(edesc->wMaxPacketSize); 420 if (isize == 0) /* shouldn't happen */ 421 return (EINVAL); 422 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); 423 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 424 endpt, isize)); 425 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) 426 return (ENOMEM); 427 err = usbd_open_pipe_intr(sce->iface, 428 edesc->bEndpointAddress, 429 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 430 sce->ibuf, isize, ugenintr, 431 USBD_DEFAULT_INTERVAL); 432 if (err) { 433 free(sce->ibuf, M_USBDEV); 434 clfree(&sce->q); 435 return (EIO); 436 } 437 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 438 break; 439 case UE_BULK: 440 err = usbd_open_pipe(sce->iface, 441 edesc->bEndpointAddress, 0, &sce->pipeh); 442 if (err) 443 return (EIO); 444 #ifdef UGEN_BULK_RA_WB 445 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 446 /* 447 * Use request size for non-RA/WB transfers 448 * as the default. 449 */ 450 sce->ra_wb_reqsize = UGEN_BBSIZE; 451 #endif 452 break; 453 case UE_ISOCHRONOUS: 454 if (dir == OUT) 455 return (EINVAL); 456 isize = UGETW(edesc->wMaxPacketSize); 457 if (isize == 0) /* shouldn't happen */ 458 return (EINVAL); 459 sce->ibuf = malloc(isize * UGEN_NISOFRAMES, 460 M_USBDEV, M_WAITOK); 461 sce->cur = sce->fill = sce->ibuf; 462 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 463 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 464 endpt, isize)); 465 err = usbd_open_pipe(sce->iface, 466 edesc->bEndpointAddress, 0, &sce->pipeh); 467 if (err) { 468 free(sce->ibuf, M_USBDEV); 469 return (EIO); 470 } 471 for(i = 0; i < UGEN_NISOREQS; ++i) { 472 sce->isoreqs[i].sce = sce; 473 xfer = usbd_alloc_xfer(sc->sc_udev); 474 if (xfer == 0) 475 goto bad; 476 sce->isoreqs[i].xfer = xfer; 477 tbuf = usbd_alloc_buffer 478 (xfer, isize * UGEN_NISORFRMS); 479 if (tbuf == 0) { 480 i++; 481 goto bad; 482 } 483 sce->isoreqs[i].dmabuf = tbuf; 484 for(j = 0; j < UGEN_NISORFRMS; ++j) 485 sce->isoreqs[i].sizes[j] = isize; 486 usbd_setup_isoc_xfer 487 (xfer, sce->pipeh, &sce->isoreqs[i], 488 sce->isoreqs[i].sizes, 489 UGEN_NISORFRMS, USBD_NO_COPY, 490 ugen_isoc_rintr); 491 (void)usbd_transfer(xfer); 492 } 493 DPRINTFN(5, ("ugenopen: isoc open done\n")); 494 break; 495 bad: 496 while (--i >= 0) /* implicit buffer free */ 497 usbd_free_xfer(sce->isoreqs[i].xfer); 498 return (ENOMEM); 499 case UE_CONTROL: 500 sce->timeout = USBD_DEFAULT_TIMEOUT; 501 return (EINVAL); 502 } 503 } 504 sc->sc_is_open[endpt] = 1; 505 return (0); 506 } 507 508 int 509 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 510 { 511 int endpt = UGENENDPOINT(dev); 512 struct ugen_softc *sc; 513 struct ugen_endpoint *sce; 514 int dir; 515 int i; 516 517 USB_GET_SC(ugen, UGENUNIT(dev), sc); 518 519 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 520 flag, mode, UGENUNIT(dev), endpt)); 521 522 #ifdef DIAGNOSTIC 523 if (!sc->sc_is_open[endpt]) { 524 printf("ugenclose: not open\n"); 525 return (EINVAL); 526 } 527 #endif 528 529 if (endpt == USB_CONTROL_ENDPOINT) { 530 DPRINTFN(5, ("ugenclose: close control\n")); 531 sc->sc_is_open[endpt] = 0; 532 return (0); 533 } 534 535 for (dir = OUT; dir <= IN; dir++) { 536 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 537 continue; 538 sce = &sc->sc_endpoints[endpt][dir]; 539 if (sce == NULL || sce->pipeh == NULL) 540 continue; 541 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 542 endpt, dir, sce)); 543 544 usbd_abort_pipe(sce->pipeh); 545 usbd_close_pipe(sce->pipeh); 546 sce->pipeh = NULL; 547 548 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 549 case UE_INTERRUPT: 550 ndflush(&sce->q, sce->q.c_cc); 551 clfree(&sce->q); 552 break; 553 case UE_ISOCHRONOUS: 554 for (i = 0; i < UGEN_NISOREQS; ++i) 555 usbd_free_xfer(sce->isoreqs[i].xfer); 556 break; 557 #ifdef UGEN_BULK_RA_WB 558 case UE_BULK: 559 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) 560 /* ibuf freed below */ 561 usbd_free_xfer(sce->ra_wb_xfer); 562 break; 563 #endif 564 default: 565 break; 566 } 567 568 if (sce->ibuf != NULL) { 569 free(sce->ibuf, M_USBDEV); 570 sce->ibuf = NULL; 571 clfree(&sce->q); 572 } 573 } 574 sc->sc_is_open[endpt] = 0; 575 576 return (0); 577 } 578 579 Static int 580 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 581 { 582 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 583 u_int32_t n, tn; 584 usbd_xfer_handle xfer; 585 usbd_status err; 586 int s; 587 int error = 0; 588 589 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 590 591 if (sc->sc_dying) 592 return (EIO); 593 594 if (endpt == USB_CONTROL_ENDPOINT) 595 return (ENODEV); 596 597 #ifdef DIAGNOSTIC 598 if (sce->edesc == NULL) { 599 printf("ugenread: no edesc\n"); 600 return (EIO); 601 } 602 if (sce->pipeh == NULL) { 603 printf("ugenread: no pipe\n"); 604 return (EIO); 605 } 606 #endif 607 608 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 609 case UE_INTERRUPT: 610 /* Block until activity occurred. */ 611 s = splusb(); 612 while (sce->q.c_cc == 0) { 613 if (flag & IO_NDELAY) { 614 splx(s); 615 return (EWOULDBLOCK); 616 } 617 sce->state |= UGEN_ASLP; 618 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 619 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 620 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 621 if (sc->sc_dying) 622 error = EIO; 623 if (error) { 624 sce->state &= ~UGEN_ASLP; 625 break; 626 } 627 } 628 splx(s); 629 630 /* Transfer as many chunks as possible. */ 631 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 632 n = min(sce->q.c_cc, uio->uio_resid); 633 if (n > sizeof(sc->sc_buffer)) 634 n = sizeof(sc->sc_buffer); 635 636 /* Remove a small chunk from the input queue. */ 637 q_to_b(&sce->q, sc->sc_buffer, n); 638 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 639 640 /* Copy the data to the user process. */ 641 error = uiomove(sc->sc_buffer, n, uio); 642 if (error) 643 break; 644 } 645 break; 646 case UE_BULK: 647 #ifdef UGEN_BULK_RA_WB 648 if (sce->state & UGEN_BULK_RA) { 649 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 650 uio->uio_resid, sce->ra_wb_used)); 651 xfer = sce->ra_wb_xfer; 652 653 s = splusb(); 654 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 655 splx(s); 656 return (EWOULDBLOCK); 657 } 658 while (uio->uio_resid > 0 && !error) { 659 while (sce->ra_wb_used == 0) { 660 sce->state |= UGEN_ASLP; 661 DPRINTFN(5, 662 ("ugenread: sleep on %p\n", 663 sce)); 664 error = tsleep(sce, PZERO | PCATCH, 665 "ugenrb", 0); 666 DPRINTFN(5, 667 ("ugenread: woke, error=%d\n", 668 error)); 669 if (sc->sc_dying) 670 error = EIO; 671 if (error) { 672 sce->state &= ~UGEN_ASLP; 673 break; 674 } 675 } 676 677 /* Copy data to the process. */ 678 while (uio->uio_resid > 0 679 && sce->ra_wb_used > 0) { 680 n = min(uio->uio_resid, 681 sce->ra_wb_used); 682 n = min(n, sce->limit - sce->cur); 683 error = uiomove(sce->cur, n, uio); 684 if (error) 685 break; 686 sce->cur += n; 687 sce->ra_wb_used -= n; 688 if (sce->cur == sce->limit) 689 sce->cur = sce->ibuf; 690 } 691 692 /* 693 * If the transfers stopped because the 694 * buffer was full, restart them. 695 */ 696 if (sce->state & UGEN_RA_WB_STOP && 697 sce->ra_wb_used < sce->limit - sce->ibuf) { 698 n = (sce->limit - sce->ibuf) 699 - sce->ra_wb_used; 700 usbd_setup_xfer(xfer, 701 sce->pipeh, sce, NULL, 702 min(n, sce->ra_wb_xferlen), 703 USBD_NO_COPY, USBD_NO_TIMEOUT, 704 ugen_bulkra_intr); 705 sce->state &= ~UGEN_RA_WB_STOP; 706 err = usbd_transfer(xfer); 707 if (err != USBD_IN_PROGRESS) 708 /* 709 * The transfer has not been 710 * queued. Setting STOP 711 * will make us try 712 * again at the next read. 713 */ 714 sce->state |= UGEN_RA_WB_STOP; 715 } 716 } 717 splx(s); 718 break; 719 } 720 #endif 721 xfer = usbd_alloc_xfer(sc->sc_udev); 722 if (xfer == 0) 723 return (ENOMEM); 724 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 725 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 726 tn = n; 727 err = usbd_bulk_transfer( 728 xfer, sce->pipeh, 729 sce->state & UGEN_SHORT_OK ? 730 USBD_SHORT_XFER_OK : 0, 731 sce->timeout, sc->sc_buffer, &tn, "ugenrb"); 732 if (err) { 733 if (err == USBD_INTERRUPTED) 734 error = EINTR; 735 else if (err == USBD_TIMEOUT) 736 error = ETIMEDOUT; 737 else 738 error = EIO; 739 break; 740 } 741 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 742 error = uiomove(sc->sc_buffer, tn, uio); 743 if (error || tn < n) 744 break; 745 } 746 usbd_free_xfer(xfer); 747 break; 748 case UE_ISOCHRONOUS: 749 s = splusb(); 750 while (sce->cur == sce->fill) { 751 if (flag & IO_NDELAY) { 752 splx(s); 753 return (EWOULDBLOCK); 754 } 755 sce->state |= UGEN_ASLP; 756 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 757 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 758 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 759 if (sc->sc_dying) 760 error = EIO; 761 if (error) { 762 sce->state &= ~UGEN_ASLP; 763 break; 764 } 765 } 766 767 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 768 if(sce->fill > sce->cur) 769 n = min(sce->fill - sce->cur, uio->uio_resid); 770 else 771 n = min(sce->limit - sce->cur, uio->uio_resid); 772 773 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 774 775 /* Copy the data to the user process. */ 776 error = uiomove(sce->cur, n, uio); 777 if (error) 778 break; 779 sce->cur += n; 780 if(sce->cur >= sce->limit) 781 sce->cur = sce->ibuf; 782 } 783 splx(s); 784 break; 785 786 787 default: 788 return (ENXIO); 789 } 790 return (error); 791 } 792 793 int 794 ugenread(dev_t dev, struct uio *uio, int flag) 795 { 796 int endpt = UGENENDPOINT(dev); 797 struct ugen_softc *sc; 798 int error; 799 800 USB_GET_SC(ugen, UGENUNIT(dev), sc); 801 802 sc->sc_refcnt++; 803 error = ugen_do_read(sc, endpt, uio, flag); 804 if (--sc->sc_refcnt < 0) 805 usb_detach_wakeup(USBDEV(sc->sc_dev)); 806 return (error); 807 } 808 809 Static int 810 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 811 int flag) 812 { 813 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 814 u_int32_t n; 815 int error = 0; 816 #ifdef UGEN_BULK_RA_WB 817 int s; 818 u_int32_t tn; 819 char *dbuf; 820 #endif 821 usbd_xfer_handle xfer; 822 usbd_status err; 823 824 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 825 826 if (sc->sc_dying) 827 return (EIO); 828 829 if (endpt == USB_CONTROL_ENDPOINT) 830 return (ENODEV); 831 832 #ifdef DIAGNOSTIC 833 if (sce->edesc == NULL) { 834 printf("ugenwrite: no edesc\n"); 835 return (EIO); 836 } 837 if (sce->pipeh == NULL) { 838 printf("ugenwrite: no pipe\n"); 839 return (EIO); 840 } 841 #endif 842 843 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 844 case UE_BULK: 845 #ifdef UGEN_BULK_RA_WB 846 if (sce->state & UGEN_BULK_WB) { 847 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 848 uio->uio_resid, sce->ra_wb_used)); 849 xfer = sce->ra_wb_xfer; 850 851 s = splusb(); 852 if (sce->ra_wb_used == sce->limit - sce->ibuf && 853 flag & IO_NDELAY) { 854 splx(s); 855 return (EWOULDBLOCK); 856 } 857 while (uio->uio_resid > 0 && !error) { 858 while (sce->ra_wb_used == 859 sce->limit - sce->ibuf) { 860 sce->state |= UGEN_ASLP; 861 DPRINTFN(5, 862 ("ugenwrite: sleep on %p\n", 863 sce)); 864 error = tsleep(sce, PZERO | PCATCH, 865 "ugenwb", 0); 866 DPRINTFN(5, 867 ("ugenwrite: woke, error=%d\n", 868 error)); 869 if (sc->sc_dying) 870 error = EIO; 871 if (error) { 872 sce->state &= ~UGEN_ASLP; 873 break; 874 } 875 } 876 877 /* Copy data from the process. */ 878 while (uio->uio_resid > 0 && 879 sce->ra_wb_used < sce->limit - sce->ibuf) { 880 n = min(uio->uio_resid, 881 (sce->limit - sce->ibuf) 882 - sce->ra_wb_used); 883 n = min(n, sce->limit - sce->fill); 884 error = uiomove(sce->fill, n, uio); 885 if (error) 886 break; 887 sce->fill += n; 888 sce->ra_wb_used += n; 889 if (sce->fill == sce->limit) 890 sce->fill = sce->ibuf; 891 } 892 893 /* 894 * If the transfers stopped because the 895 * buffer was empty, restart them. 896 */ 897 if (sce->state & UGEN_RA_WB_STOP && 898 sce->ra_wb_used > 0) { 899 dbuf = (char *)usbd_get_buffer(xfer); 900 n = min(sce->ra_wb_used, 901 sce->ra_wb_xferlen); 902 tn = min(n, sce->limit - sce->cur); 903 memcpy(dbuf, sce->cur, tn); 904 dbuf += tn; 905 if (n - tn > 0) 906 memcpy(dbuf, sce->ibuf, 907 n - tn); 908 usbd_setup_xfer(xfer, 909 sce->pipeh, sce, NULL, n, 910 USBD_NO_COPY, USBD_NO_TIMEOUT, 911 ugen_bulkwb_intr); 912 sce->state &= ~UGEN_RA_WB_STOP; 913 err = usbd_transfer(xfer); 914 if (err != USBD_IN_PROGRESS) 915 /* 916 * The transfer has not been 917 * queued. Setting STOP 918 * will make us try again 919 * at the next read. 920 */ 921 sce->state |= UGEN_RA_WB_STOP; 922 } 923 } 924 splx(s); 925 break; 926 } 927 #endif 928 xfer = usbd_alloc_xfer(sc->sc_udev); 929 if (xfer == 0) 930 return (EIO); 931 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 932 error = uiomove(sc->sc_buffer, n, uio); 933 if (error) 934 break; 935 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 936 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, 937 sce->timeout, sc->sc_buffer, &n,"ugenwb"); 938 if (err) { 939 if (err == USBD_INTERRUPTED) 940 error = EINTR; 941 else if (err == USBD_TIMEOUT) 942 error = ETIMEDOUT; 943 else 944 error = EIO; 945 break; 946 } 947 } 948 usbd_free_xfer(xfer); 949 break; 950 case UE_INTERRUPT: 951 xfer = usbd_alloc_xfer(sc->sc_udev); 952 if (xfer == 0) 953 return (EIO); 954 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 955 uio->uio_resid)) != 0) { 956 error = uiomove(sc->sc_buffer, n, uio); 957 if (error) 958 break; 959 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 960 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 961 sce->timeout, sc->sc_buffer, &n, "ugenwi"); 962 if (err) { 963 if (err == USBD_INTERRUPTED) 964 error = EINTR; 965 else if (err == USBD_TIMEOUT) 966 error = ETIMEDOUT; 967 else 968 error = EIO; 969 break; 970 } 971 } 972 usbd_free_xfer(xfer); 973 break; 974 default: 975 return (ENXIO); 976 } 977 return (error); 978 } 979 980 int 981 ugenwrite(dev_t dev, struct uio *uio, int flag) 982 { 983 int endpt = UGENENDPOINT(dev); 984 struct ugen_softc *sc; 985 int error; 986 987 USB_GET_SC(ugen, UGENUNIT(dev), sc); 988 989 sc->sc_refcnt++; 990 error = ugen_do_write(sc, endpt, uio, flag); 991 if (--sc->sc_refcnt < 0) 992 usb_detach_wakeup(USBDEV(sc->sc_dev)); 993 return (error); 994 } 995 996 #if defined(__NetBSD__) || defined(__OpenBSD__) 997 int 998 ugen_activate(device_ptr_t self, enum devact act) 999 { 1000 struct ugen_softc *sc = device_private(self); 1001 1002 switch (act) { 1003 case DVACT_ACTIVATE: 1004 return (EOPNOTSUPP); 1005 1006 case DVACT_DEACTIVATE: 1007 sc->sc_dying = 1; 1008 break; 1009 } 1010 return (0); 1011 } 1012 #endif 1013 1014 USB_DETACH(ugen) 1015 { 1016 USB_DETACH_START(ugen, sc); 1017 struct ugen_endpoint *sce; 1018 int i, dir; 1019 int s; 1020 #if defined(__NetBSD__) || defined(__OpenBSD__) 1021 int maj, mn; 1022 1023 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1024 #elif defined(__FreeBSD__) 1025 DPRINTF(("ugen_detach: sc=%p\n", sc)); 1026 #endif 1027 1028 sc->sc_dying = 1; 1029 pmf_device_deregister(self); 1030 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1031 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1032 for (dir = OUT; dir <= IN; dir++) { 1033 sce = &sc->sc_endpoints[i][dir]; 1034 if (sce && sce->pipeh) 1035 usbd_abort_pipe(sce->pipeh); 1036 } 1037 } 1038 1039 s = splusb(); 1040 if (--sc->sc_refcnt >= 0) { 1041 /* Wake everyone */ 1042 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1043 wakeup(&sc->sc_endpoints[i][IN]); 1044 /* Wait for processes to go away. */ 1045 usb_detach_wait(USBDEV(sc->sc_dev)); 1046 } 1047 splx(s); 1048 1049 #if defined(__NetBSD__) || defined(__OpenBSD__) 1050 /* locate the major number */ 1051 #if defined(__NetBSD__) 1052 maj = cdevsw_lookup_major(&ugen_cdevsw); 1053 #elif defined(__OpenBSD__) 1054 for (maj = 0; maj < nchrdev; maj++) 1055 if (cdevsw[maj].d_open == ugenopen) 1056 break; 1057 #endif 1058 1059 /* Nuke the vnodes for any open instances (calls close). */ 1060 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1061 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1062 #elif defined(__FreeBSD__) 1063 /* XXX not implemented yet */ 1064 #endif 1065 1066 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1067 USBDEV(sc->sc_dev)); 1068 1069 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1070 for (dir = OUT; dir <= IN; dir++) { 1071 sce = &sc->sc_endpoints[i][dir]; 1072 seldestroy(&sce->rsel); 1073 } 1074 } 1075 1076 return (0); 1077 } 1078 1079 Static void 1080 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) 1081 { 1082 struct ugen_endpoint *sce = addr; 1083 /*struct ugen_softc *sc = sce->sc;*/ 1084 u_int32_t count; 1085 u_char *ibuf; 1086 1087 if (status == USBD_CANCELLED) 1088 return; 1089 1090 if (status != USBD_NORMAL_COMPLETION) { 1091 DPRINTF(("ugenintr: status=%d\n", status)); 1092 if (status == USBD_STALLED) 1093 usbd_clear_endpoint_stall_async(sce->pipeh); 1094 return; 1095 } 1096 1097 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1098 ibuf = sce->ibuf; 1099 1100 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1101 xfer, status, count)); 1102 DPRINTFN(5, (" data = %02x %02x %02x\n", 1103 ibuf[0], ibuf[1], ibuf[2])); 1104 1105 (void)b_to_q(ibuf, count, &sce->q); 1106 1107 if (sce->state & UGEN_ASLP) { 1108 sce->state &= ~UGEN_ASLP; 1109 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1110 wakeup(sce); 1111 } 1112 selnotify(&sce->rsel, 0, 0); 1113 } 1114 1115 Static void 1116 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 1117 usbd_status status) 1118 { 1119 struct isoreq *req = addr; 1120 struct ugen_endpoint *sce = req->sce; 1121 u_int32_t count, n; 1122 int i, isize; 1123 1124 /* Return if we are aborting. */ 1125 if (status == USBD_CANCELLED) 1126 return; 1127 1128 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1129 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1130 (long)(req - sce->isoreqs), count)); 1131 1132 /* throw away oldest input if the buffer is full */ 1133 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1134 sce->cur += count; 1135 if(sce->cur >= sce->limit) 1136 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1137 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1138 count)); 1139 } 1140 1141 isize = UGETW(sce->edesc->wMaxPacketSize); 1142 for (i = 0; i < UGEN_NISORFRMS; i++) { 1143 u_int32_t actlen = req->sizes[i]; 1144 char const *tbuf = (char const *)req->dmabuf + isize * i; 1145 1146 /* copy data to buffer */ 1147 while (actlen > 0) { 1148 n = min(actlen, sce->limit - sce->fill); 1149 memcpy(sce->fill, tbuf, n); 1150 1151 tbuf += n; 1152 actlen -= n; 1153 sce->fill += n; 1154 if(sce->fill == sce->limit) 1155 sce->fill = sce->ibuf; 1156 } 1157 1158 /* setup size for next transfer */ 1159 req->sizes[i] = isize; 1160 } 1161 1162 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS, 1163 USBD_NO_COPY, ugen_isoc_rintr); 1164 (void)usbd_transfer(xfer); 1165 1166 if (sce->state & UGEN_ASLP) { 1167 sce->state &= ~UGEN_ASLP; 1168 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1169 wakeup(sce); 1170 } 1171 selnotify(&sce->rsel, 0, 0); 1172 } 1173 1174 #ifdef UGEN_BULK_RA_WB 1175 Static void 1176 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1177 usbd_status status) 1178 { 1179 struct ugen_endpoint *sce = addr; 1180 u_int32_t count, n; 1181 char const *tbuf; 1182 usbd_status err; 1183 1184 /* Return if we are aborting. */ 1185 if (status == USBD_CANCELLED) 1186 return; 1187 1188 if (status != USBD_NORMAL_COMPLETION) { 1189 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1190 sce->state |= UGEN_RA_WB_STOP; 1191 if (status == USBD_STALLED) 1192 usbd_clear_endpoint_stall_async(sce->pipeh); 1193 return; 1194 } 1195 1196 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1197 1198 /* Keep track of how much is in the buffer. */ 1199 sce->ra_wb_used += count; 1200 1201 /* Copy data to buffer. */ 1202 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1203 n = min(count, sce->limit - sce->fill); 1204 memcpy(sce->fill, tbuf, n); 1205 tbuf += n; 1206 count -= n; 1207 sce->fill += n; 1208 if (sce->fill == sce->limit) 1209 sce->fill = sce->ibuf; 1210 if (count > 0) { 1211 memcpy(sce->fill, tbuf, count); 1212 sce->fill += count; 1213 } 1214 1215 /* Set up the next request if necessary. */ 1216 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1217 if (n > 0) { 1218 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1219 min(n, sce->ra_wb_xferlen), USBD_NO_COPY, 1220 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1221 err = usbd_transfer(xfer); 1222 if (err != USBD_IN_PROGRESS) { 1223 printf("usbd_bulkra_intr: error=%d\n", err); 1224 /* 1225 * The transfer has not been queued. Setting STOP 1226 * will make us try again at the next read. 1227 */ 1228 sce->state |= UGEN_RA_WB_STOP; 1229 } 1230 } 1231 else 1232 sce->state |= UGEN_RA_WB_STOP; 1233 1234 if (sce->state & UGEN_ASLP) { 1235 sce->state &= ~UGEN_ASLP; 1236 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1237 wakeup(sce); 1238 } 1239 selnotify(&sce->rsel, 0, 0); 1240 } 1241 1242 Static void 1243 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1244 usbd_status status) 1245 { 1246 struct ugen_endpoint *sce = addr; 1247 u_int32_t count, n; 1248 char *tbuf; 1249 usbd_status err; 1250 1251 /* Return if we are aborting. */ 1252 if (status == USBD_CANCELLED) 1253 return; 1254 1255 if (status != USBD_NORMAL_COMPLETION) { 1256 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1257 sce->state |= UGEN_RA_WB_STOP; 1258 if (status == USBD_STALLED) 1259 usbd_clear_endpoint_stall_async(sce->pipeh); 1260 return; 1261 } 1262 1263 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1264 1265 /* Keep track of how much is in the buffer. */ 1266 sce->ra_wb_used -= count; 1267 1268 /* Update buffer pointers. */ 1269 sce->cur += count; 1270 if (sce->cur >= sce->limit) 1271 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1272 1273 /* Set up next request if necessary. */ 1274 if (sce->ra_wb_used > 0) { 1275 /* copy data from buffer */ 1276 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1277 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1278 n = min(count, sce->limit - sce->cur); 1279 memcpy(tbuf, sce->cur, n); 1280 tbuf += n; 1281 if (count - n > 0) 1282 memcpy(tbuf, sce->ibuf, count - n); 1283 1284 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1285 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr); 1286 err = usbd_transfer(xfer); 1287 if (err != USBD_IN_PROGRESS) { 1288 printf("usbd_bulkwb_intr: error=%d\n", err); 1289 /* 1290 * The transfer has not been queued. Setting STOP 1291 * will make us try again at the next write. 1292 */ 1293 sce->state |= UGEN_RA_WB_STOP; 1294 } 1295 } 1296 else 1297 sce->state |= UGEN_RA_WB_STOP; 1298 1299 if (sce->state & UGEN_ASLP) { 1300 sce->state &= ~UGEN_ASLP; 1301 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1302 wakeup(sce); 1303 } 1304 selnotify(&sce->rsel, 0, 0); 1305 } 1306 #endif 1307 1308 Static usbd_status 1309 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1310 { 1311 usbd_interface_handle iface; 1312 usb_endpoint_descriptor_t *ed; 1313 usbd_status err; 1314 struct ugen_endpoint *sce; 1315 u_int8_t niface, nendpt, endptno, endpt; 1316 int dir; 1317 1318 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1319 1320 err = usbd_interface_count(sc->sc_udev, &niface); 1321 if (err) 1322 return (err); 1323 if (ifaceidx < 0 || ifaceidx >= niface) 1324 return (USBD_INVAL); 1325 1326 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1327 if (err) 1328 return (err); 1329 err = usbd_endpoint_count(iface, &nendpt); 1330 if (err) 1331 return (err); 1332 /* XXX should only do this after setting new altno has succeeded */ 1333 for (endptno = 0; endptno < nendpt; endptno++) { 1334 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1335 endpt = ed->bEndpointAddress; 1336 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1337 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1338 sce->sc = 0; 1339 sce->edesc = 0; 1340 sce->iface = 0; 1341 } 1342 1343 /* change setting */ 1344 err = usbd_set_interface(iface, altno); 1345 if (err) 1346 return (err); 1347 1348 err = usbd_endpoint_count(iface, &nendpt); 1349 if (err) 1350 return (err); 1351 for (endptno = 0; endptno < nendpt; endptno++) { 1352 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1353 KASSERT(ed != NULL); 1354 endpt = ed->bEndpointAddress; 1355 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1356 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1357 sce->sc = sc; 1358 sce->edesc = ed; 1359 sce->iface = iface; 1360 } 1361 return (0); 1362 } 1363 1364 /* Retrieve a complete descriptor for a certain device and index. */ 1365 Static usb_config_descriptor_t * 1366 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1367 { 1368 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1369 int len; 1370 usbd_status err; 1371 1372 if (index == USB_CURRENT_CONFIG_INDEX) { 1373 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1374 len = UGETW(tdesc->wTotalLength); 1375 if (lenp) 1376 *lenp = len; 1377 cdesc = malloc(len, M_TEMP, M_WAITOK); 1378 memcpy(cdesc, tdesc, len); 1379 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1380 } else { 1381 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1382 if (err) 1383 return (0); 1384 len = UGETW(cdescr.wTotalLength); 1385 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1386 if (lenp) 1387 *lenp = len; 1388 cdesc = malloc(len, M_TEMP, M_WAITOK); 1389 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1390 if (err) { 1391 free(cdesc, M_TEMP); 1392 return (0); 1393 } 1394 } 1395 return (cdesc); 1396 } 1397 1398 Static int 1399 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1400 { 1401 usbd_interface_handle iface; 1402 usbd_status err; 1403 1404 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1405 if (err) 1406 return (-1); 1407 return (usbd_get_interface_altindex(iface)); 1408 } 1409 1410 Static int 1411 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1412 void *addr, int flag, struct lwp *l) 1413 { 1414 struct ugen_endpoint *sce; 1415 usbd_status err; 1416 usbd_interface_handle iface; 1417 struct usb_config_desc *cd; 1418 usb_config_descriptor_t *cdesc; 1419 struct usb_interface_desc *id; 1420 usb_interface_descriptor_t *idesc; 1421 struct usb_endpoint_desc *ed; 1422 usb_endpoint_descriptor_t *edesc; 1423 struct usb_alt_interface *ai; 1424 struct usb_string_desc *si; 1425 u_int8_t conf, alt; 1426 1427 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1428 if (sc->sc_dying) 1429 return (EIO); 1430 1431 switch (cmd) { 1432 case FIONBIO: 1433 /* All handled in the upper FS layer. */ 1434 return (0); 1435 case USB_SET_SHORT_XFER: 1436 if (endpt == USB_CONTROL_ENDPOINT) 1437 return (EINVAL); 1438 /* This flag only affects read */ 1439 sce = &sc->sc_endpoints[endpt][IN]; 1440 if (sce == NULL || sce->pipeh == NULL) 1441 return (EINVAL); 1442 if (*(int *)addr) 1443 sce->state |= UGEN_SHORT_OK; 1444 else 1445 sce->state &= ~UGEN_SHORT_OK; 1446 return (0); 1447 case USB_SET_TIMEOUT: 1448 sce = &sc->sc_endpoints[endpt][IN]; 1449 if (sce == NULL 1450 /* XXX this shouldn't happen, but the distinction between 1451 input and output pipes isn't clear enough. 1452 || sce->pipeh == NULL */ 1453 ) 1454 return (EINVAL); 1455 sce->timeout = *(int *)addr; 1456 return (0); 1457 case USB_SET_BULK_RA: 1458 #ifdef UGEN_BULK_RA_WB 1459 if (endpt == USB_CONTROL_ENDPOINT) 1460 return (EINVAL); 1461 sce = &sc->sc_endpoints[endpt][IN]; 1462 if (sce == NULL || sce->pipeh == NULL) 1463 return (EINVAL); 1464 edesc = sce->edesc; 1465 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1466 return (EINVAL); 1467 1468 if (*(int *)addr) { 1469 /* Only turn RA on if it's currently off. */ 1470 if (sce->state & UGEN_BULK_RA) 1471 return (0); 1472 1473 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1474 /* shouldn't happen */ 1475 return (EINVAL); 1476 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1477 if (sce->ra_wb_xfer == NULL) 1478 return (ENOMEM); 1479 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1480 /* 1481 * Set up a dmabuf because we reuse the xfer with 1482 * the same (max) request length like isoc. 1483 */ 1484 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1485 sce->ra_wb_xferlen) == 0) { 1486 usbd_free_xfer(sce->ra_wb_xfer); 1487 return (ENOMEM); 1488 } 1489 sce->ibuf = malloc(sce->ra_wb_bufsize, 1490 M_USBDEV, M_WAITOK); 1491 sce->fill = sce->cur = sce->ibuf; 1492 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1493 sce->ra_wb_used = 0; 1494 sce->state |= UGEN_BULK_RA; 1495 sce->state &= ~UGEN_RA_WB_STOP; 1496 /* Now start reading. */ 1497 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce, 1498 NULL, 1499 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1500 USBD_NO_COPY, USBD_NO_TIMEOUT, 1501 ugen_bulkra_intr); 1502 err = usbd_transfer(sce->ra_wb_xfer); 1503 if (err != USBD_IN_PROGRESS) { 1504 sce->state &= ~UGEN_BULK_RA; 1505 free(sce->ibuf, M_USBDEV); 1506 sce->ibuf = NULL; 1507 usbd_free_xfer(sce->ra_wb_xfer); 1508 return (EIO); 1509 } 1510 } else { 1511 /* Only turn RA off if it's currently on. */ 1512 if (!(sce->state & UGEN_BULK_RA)) 1513 return (0); 1514 1515 sce->state &= ~UGEN_BULK_RA; 1516 usbd_abort_pipe(sce->pipeh); 1517 usbd_free_xfer(sce->ra_wb_xfer); 1518 /* 1519 * XXX Discard whatever's in the buffer, but we 1520 * should keep it around and drain the buffer 1521 * instead. 1522 */ 1523 free(sce->ibuf, M_USBDEV); 1524 sce->ibuf = NULL; 1525 } 1526 return (0); 1527 #else 1528 return (EOPNOTSUPP); 1529 #endif 1530 case USB_SET_BULK_WB: 1531 #ifdef UGEN_BULK_RA_WB 1532 if (endpt == USB_CONTROL_ENDPOINT) 1533 return (EINVAL); 1534 sce = &sc->sc_endpoints[endpt][OUT]; 1535 if (sce == NULL || sce->pipeh == NULL) 1536 return (EINVAL); 1537 edesc = sce->edesc; 1538 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1539 return (EINVAL); 1540 1541 if (*(int *)addr) { 1542 /* Only turn WB on if it's currently off. */ 1543 if (sce->state & UGEN_BULK_WB) 1544 return (0); 1545 1546 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1547 /* shouldn't happen */ 1548 return (EINVAL); 1549 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1550 if (sce->ra_wb_xfer == NULL) 1551 return (ENOMEM); 1552 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1553 /* 1554 * Set up a dmabuf because we reuse the xfer with 1555 * the same (max) request length like isoc. 1556 */ 1557 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1558 sce->ra_wb_xferlen) == 0) { 1559 usbd_free_xfer(sce->ra_wb_xfer); 1560 return (ENOMEM); 1561 } 1562 sce->ibuf = malloc(sce->ra_wb_bufsize, 1563 M_USBDEV, M_WAITOK); 1564 sce->fill = sce->cur = sce->ibuf; 1565 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1566 sce->ra_wb_used = 0; 1567 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1568 } else { 1569 /* Only turn WB off if it's currently on. */ 1570 if (!(sce->state & UGEN_BULK_WB)) 1571 return (0); 1572 1573 sce->state &= ~UGEN_BULK_WB; 1574 /* 1575 * XXX Discard whatever's in the buffer, but we 1576 * should keep it around and keep writing to 1577 * drain the buffer instead. 1578 */ 1579 usbd_abort_pipe(sce->pipeh); 1580 usbd_free_xfer(sce->ra_wb_xfer); 1581 free(sce->ibuf, M_USBDEV); 1582 sce->ibuf = NULL; 1583 } 1584 return (0); 1585 #else 1586 return (EOPNOTSUPP); 1587 #endif 1588 case USB_SET_BULK_RA_OPT: 1589 case USB_SET_BULK_WB_OPT: 1590 #ifdef UGEN_BULK_RA_WB 1591 { 1592 struct usb_bulk_ra_wb_opt *opt; 1593 1594 if (endpt == USB_CONTROL_ENDPOINT) 1595 return (EINVAL); 1596 opt = (struct usb_bulk_ra_wb_opt *)addr; 1597 if (cmd == USB_SET_BULK_RA_OPT) 1598 sce = &sc->sc_endpoints[endpt][IN]; 1599 else 1600 sce = &sc->sc_endpoints[endpt][OUT]; 1601 if (sce == NULL || sce->pipeh == NULL) 1602 return (EINVAL); 1603 if (opt->ra_wb_buffer_size < 1 || 1604 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1605 opt->ra_wb_request_size < 1 || 1606 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1607 return (EINVAL); 1608 /* 1609 * XXX These changes do not take effect until the 1610 * next time RA/WB mode is enabled but they ought to 1611 * take effect immediately. 1612 */ 1613 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1614 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1615 return (0); 1616 } 1617 #else 1618 return (EOPNOTSUPP); 1619 #endif 1620 default: 1621 break; 1622 } 1623 1624 if (endpt != USB_CONTROL_ENDPOINT) 1625 return (EINVAL); 1626 1627 switch (cmd) { 1628 #ifdef UGEN_DEBUG 1629 case USB_SETDEBUG: 1630 ugendebug = *(int *)addr; 1631 break; 1632 #endif 1633 case USB_GET_CONFIG: 1634 err = usbd_get_config(sc->sc_udev, &conf); 1635 if (err) 1636 return (EIO); 1637 *(int *)addr = conf; 1638 break; 1639 case USB_SET_CONFIG: 1640 if (!(flag & FWRITE)) 1641 return (EPERM); 1642 err = ugen_set_config(sc, *(int *)addr); 1643 switch (err) { 1644 case USBD_NORMAL_COMPLETION: 1645 break; 1646 case USBD_IN_USE: 1647 return (EBUSY); 1648 default: 1649 return (EIO); 1650 } 1651 break; 1652 case USB_GET_ALTINTERFACE: 1653 ai = (struct usb_alt_interface *)addr; 1654 err = usbd_device2interface_handle(sc->sc_udev, 1655 ai->uai_interface_index, &iface); 1656 if (err) 1657 return (EINVAL); 1658 idesc = usbd_get_interface_descriptor(iface); 1659 if (idesc == NULL) 1660 return (EIO); 1661 ai->uai_alt_no = idesc->bAlternateSetting; 1662 break; 1663 case USB_SET_ALTINTERFACE: 1664 if (!(flag & FWRITE)) 1665 return (EPERM); 1666 ai = (struct usb_alt_interface *)addr; 1667 err = usbd_device2interface_handle(sc->sc_udev, 1668 ai->uai_interface_index, &iface); 1669 if (err) 1670 return (EINVAL); 1671 err = ugen_set_interface(sc, ai->uai_interface_index, 1672 ai->uai_alt_no); 1673 if (err) 1674 return (EINVAL); 1675 break; 1676 case USB_GET_NO_ALT: 1677 ai = (struct usb_alt_interface *)addr; 1678 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0); 1679 if (cdesc == NULL) 1680 return (EINVAL); 1681 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1682 if (idesc == NULL) { 1683 free(cdesc, M_TEMP); 1684 return (EINVAL); 1685 } 1686 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1687 idesc->bInterfaceNumber); 1688 free(cdesc, M_TEMP); 1689 break; 1690 case USB_GET_DEVICE_DESC: 1691 *(usb_device_descriptor_t *)addr = 1692 *usbd_get_device_descriptor(sc->sc_udev); 1693 break; 1694 case USB_GET_CONFIG_DESC: 1695 cd = (struct usb_config_desc *)addr; 1696 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0); 1697 if (cdesc == NULL) 1698 return (EINVAL); 1699 cd->ucd_desc = *cdesc; 1700 free(cdesc, M_TEMP); 1701 break; 1702 case USB_GET_INTERFACE_DESC: 1703 id = (struct usb_interface_desc *)addr; 1704 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0); 1705 if (cdesc == NULL) 1706 return (EINVAL); 1707 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1708 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1709 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1710 else 1711 alt = id->uid_alt_index; 1712 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1713 if (idesc == NULL) { 1714 free(cdesc, M_TEMP); 1715 return (EINVAL); 1716 } 1717 id->uid_desc = *idesc; 1718 free(cdesc, M_TEMP); 1719 break; 1720 case USB_GET_ENDPOINT_DESC: 1721 ed = (struct usb_endpoint_desc *)addr; 1722 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0); 1723 if (cdesc == NULL) 1724 return (EINVAL); 1725 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1726 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1727 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1728 else 1729 alt = ed->ued_alt_index; 1730 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1731 alt, ed->ued_endpoint_index); 1732 if (edesc == NULL) { 1733 free(cdesc, M_TEMP); 1734 return (EINVAL); 1735 } 1736 ed->ued_desc = *edesc; 1737 free(cdesc, M_TEMP); 1738 break; 1739 case USB_GET_FULL_DESC: 1740 { 1741 int len; 1742 struct iovec iov; 1743 struct uio uio; 1744 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1745 int error; 1746 1747 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len); 1748 if (cdesc == NULL) 1749 return (EINVAL); 1750 if (len > fd->ufd_size) 1751 len = fd->ufd_size; 1752 iov.iov_base = (void *)fd->ufd_data; 1753 iov.iov_len = len; 1754 uio.uio_iov = &iov; 1755 uio.uio_iovcnt = 1; 1756 uio.uio_resid = len; 1757 uio.uio_offset = 0; 1758 uio.uio_rw = UIO_READ; 1759 uio.uio_vmspace = l->l_proc->p_vmspace; 1760 error = uiomove((void *)cdesc, len, &uio); 1761 free(cdesc, M_TEMP); 1762 return (error); 1763 } 1764 case USB_GET_STRING_DESC: { 1765 int len; 1766 si = (struct usb_string_desc *)addr; 1767 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1768 si->usd_language_id, &si->usd_desc, &len); 1769 if (err) 1770 return (EINVAL); 1771 break; 1772 } 1773 case USB_DO_REQUEST: 1774 { 1775 struct usb_ctl_request *ur = (void *)addr; 1776 int len = UGETW(ur->ucr_request.wLength); 1777 struct iovec iov; 1778 struct uio uio; 1779 void *ptr = 0; 1780 usbd_status xerr; 1781 int error = 0; 1782 1783 if (!(flag & FWRITE)) 1784 return (EPERM); 1785 /* Avoid requests that would damage the bus integrity. */ 1786 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1787 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1788 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1789 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1790 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1791 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1792 return (EINVAL); 1793 1794 if (len < 0 || len > 32767) 1795 return (EINVAL); 1796 if (len != 0) { 1797 iov.iov_base = (void *)ur->ucr_data; 1798 iov.iov_len = len; 1799 uio.uio_iov = &iov; 1800 uio.uio_iovcnt = 1; 1801 uio.uio_resid = len; 1802 uio.uio_offset = 0; 1803 uio.uio_rw = 1804 ur->ucr_request.bmRequestType & UT_READ ? 1805 UIO_READ : UIO_WRITE; 1806 uio.uio_vmspace = l->l_proc->p_vmspace; 1807 ptr = malloc(len, M_TEMP, M_WAITOK); 1808 if (uio.uio_rw == UIO_WRITE) { 1809 error = uiomove(ptr, len, &uio); 1810 if (error) 1811 goto ret; 1812 } 1813 } 1814 sce = &sc->sc_endpoints[endpt][IN]; 1815 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1816 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1817 if (xerr) { 1818 error = EIO; 1819 goto ret; 1820 } 1821 if (len != 0) { 1822 if (uio.uio_rw == UIO_READ) { 1823 error = uiomove(ptr, len, &uio); 1824 if (error) 1825 goto ret; 1826 } 1827 } 1828 ret: 1829 if (ptr) 1830 free(ptr, M_TEMP); 1831 return (error); 1832 } 1833 case USB_GET_DEVICEINFO: 1834 usbd_fill_deviceinfo(sc->sc_udev, 1835 (struct usb_device_info *)addr, 0); 1836 break; 1837 #ifdef COMPAT_30 1838 case USB_GET_DEVICEINFO_OLD: 1839 usbd_fill_deviceinfo_old(sc->sc_udev, 1840 (struct usb_device_info_old *)addr, 0); 1841 1842 break; 1843 #endif 1844 default: 1845 return (EINVAL); 1846 } 1847 return (0); 1848 } 1849 1850 int 1851 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1852 { 1853 int endpt = UGENENDPOINT(dev); 1854 struct ugen_softc *sc; 1855 int error; 1856 1857 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1858 1859 sc->sc_refcnt++; 1860 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1861 if (--sc->sc_refcnt < 0) 1862 usb_detach_wakeup(USBDEV(sc->sc_dev)); 1863 return (error); 1864 } 1865 1866 int 1867 ugenpoll(dev_t dev, int events, struct lwp *l) 1868 { 1869 struct ugen_softc *sc; 1870 struct ugen_endpoint *sce_in, *sce_out; 1871 int revents = 0; 1872 int s; 1873 1874 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1875 1876 if (sc->sc_dying) 1877 return (POLLHUP); 1878 1879 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1880 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1881 if (sce_in == NULL && sce_out == NULL) 1882 return (POLLERR); 1883 #ifdef DIAGNOSTIC 1884 if (!sce_in->edesc && !sce_out->edesc) { 1885 printf("ugenpoll: no edesc\n"); 1886 return (POLLERR); 1887 } 1888 /* It's possible to have only one pipe open. */ 1889 if (!sce_in->pipeh && !sce_out->pipeh) { 1890 printf("ugenpoll: no pipe\n"); 1891 return (POLLERR); 1892 } 1893 #endif 1894 s = splusb(); 1895 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1896 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1897 case UE_INTERRUPT: 1898 if (sce_in->q.c_cc > 0) 1899 revents |= events & (POLLIN | POLLRDNORM); 1900 else 1901 selrecord(l, &sce_in->rsel); 1902 break; 1903 case UE_ISOCHRONOUS: 1904 if (sce_in->cur != sce_in->fill) 1905 revents |= events & (POLLIN | POLLRDNORM); 1906 else 1907 selrecord(l, &sce_in->rsel); 1908 break; 1909 case UE_BULK: 1910 #ifdef UGEN_BULK_RA_WB 1911 if (sce_in->state & UGEN_BULK_RA) { 1912 if (sce_in->ra_wb_used > 0) 1913 revents |= events & 1914 (POLLIN | POLLRDNORM); 1915 else 1916 selrecord(l, &sce_in->rsel); 1917 break; 1918 } 1919 #endif 1920 /* 1921 * We have no easy way of determining if a read will 1922 * yield any data or a write will happen. 1923 * Pretend they will. 1924 */ 1925 revents |= events & (POLLIN | POLLRDNORM); 1926 break; 1927 default: 1928 break; 1929 } 1930 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1931 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1932 case UE_INTERRUPT: 1933 case UE_ISOCHRONOUS: 1934 /* XXX unimplemented */ 1935 break; 1936 case UE_BULK: 1937 #ifdef UGEN_BULK_RA_WB 1938 if (sce_out->state & UGEN_BULK_WB) { 1939 if (sce_out->ra_wb_used < 1940 sce_out->limit - sce_out->ibuf) 1941 revents |= events & 1942 (POLLOUT | POLLWRNORM); 1943 else 1944 selrecord(l, &sce_out->rsel); 1945 break; 1946 } 1947 #endif 1948 /* 1949 * We have no easy way of determining if a read will 1950 * yield any data or a write will happen. 1951 * Pretend they will. 1952 */ 1953 revents |= events & (POLLOUT | POLLWRNORM); 1954 break; 1955 default: 1956 break; 1957 } 1958 1959 1960 splx(s); 1961 return (revents); 1962 } 1963 1964 static void 1965 filt_ugenrdetach(struct knote *kn) 1966 { 1967 struct ugen_endpoint *sce = kn->kn_hook; 1968 int s; 1969 1970 s = splusb(); 1971 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1972 splx(s); 1973 } 1974 1975 static int 1976 filt_ugenread_intr(struct knote *kn, long hint) 1977 { 1978 struct ugen_endpoint *sce = kn->kn_hook; 1979 1980 kn->kn_data = sce->q.c_cc; 1981 return (kn->kn_data > 0); 1982 } 1983 1984 static int 1985 filt_ugenread_isoc(struct knote *kn, long hint) 1986 { 1987 struct ugen_endpoint *sce = kn->kn_hook; 1988 1989 if (sce->cur == sce->fill) 1990 return (0); 1991 1992 if (sce->cur < sce->fill) 1993 kn->kn_data = sce->fill - sce->cur; 1994 else 1995 kn->kn_data = (sce->limit - sce->cur) + 1996 (sce->fill - sce->ibuf); 1997 1998 return (1); 1999 } 2000 2001 #ifdef UGEN_BULK_RA_WB 2002 static int 2003 filt_ugenread_bulk(struct knote *kn, long hint) 2004 { 2005 struct ugen_endpoint *sce = kn->kn_hook; 2006 2007 if (!(sce->state & UGEN_BULK_RA)) 2008 /* 2009 * We have no easy way of determining if a read will 2010 * yield any data or a write will happen. 2011 * So, emulate "seltrue". 2012 */ 2013 return (filt_seltrue(kn, hint)); 2014 2015 if (sce->ra_wb_used == 0) 2016 return (0); 2017 2018 kn->kn_data = sce->ra_wb_used; 2019 2020 return (1); 2021 } 2022 2023 static int 2024 filt_ugenwrite_bulk(struct knote *kn, long hint) 2025 { 2026 struct ugen_endpoint *sce = kn->kn_hook; 2027 2028 if (!(sce->state & UGEN_BULK_WB)) 2029 /* 2030 * We have no easy way of determining if a read will 2031 * yield any data or a write will happen. 2032 * So, emulate "seltrue". 2033 */ 2034 return (filt_seltrue(kn, hint)); 2035 2036 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2037 return (0); 2038 2039 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2040 2041 return (1); 2042 } 2043 #endif 2044 2045 static const struct filterops ugenread_intr_filtops = 2046 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2047 2048 static const struct filterops ugenread_isoc_filtops = 2049 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2050 2051 #ifdef UGEN_BULK_RA_WB 2052 static const struct filterops ugenread_bulk_filtops = 2053 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2054 2055 static const struct filterops ugenwrite_bulk_filtops = 2056 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2057 #else 2058 static const struct filterops ugen_seltrue_filtops = 2059 { 1, NULL, filt_ugenrdetach, filt_seltrue }; 2060 #endif 2061 2062 int 2063 ugenkqfilter(dev_t dev, struct knote *kn) 2064 { 2065 struct ugen_softc *sc; 2066 struct ugen_endpoint *sce; 2067 struct klist *klist; 2068 int s; 2069 2070 USB_GET_SC(ugen, UGENUNIT(dev), sc); 2071 2072 if (sc->sc_dying) 2073 return (ENXIO); 2074 2075 switch (kn->kn_filter) { 2076 case EVFILT_READ: 2077 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2078 if (sce == NULL) 2079 return (EINVAL); 2080 2081 klist = &sce->rsel.sel_klist; 2082 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2083 case UE_INTERRUPT: 2084 kn->kn_fop = &ugenread_intr_filtops; 2085 break; 2086 case UE_ISOCHRONOUS: 2087 kn->kn_fop = &ugenread_isoc_filtops; 2088 break; 2089 case UE_BULK: 2090 #ifdef UGEN_BULK_RA_WB 2091 kn->kn_fop = &ugenread_bulk_filtops; 2092 break; 2093 #else 2094 /* 2095 * We have no easy way of determining if a read will 2096 * yield any data or a write will happen. 2097 * So, emulate "seltrue". 2098 */ 2099 kn->kn_fop = &ugen_seltrue_filtops; 2100 #endif 2101 break; 2102 default: 2103 return (EINVAL); 2104 } 2105 break; 2106 2107 case EVFILT_WRITE: 2108 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2109 if (sce == NULL) 2110 return (EINVAL); 2111 2112 klist = &sce->rsel.sel_klist; 2113 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2114 case UE_INTERRUPT: 2115 case UE_ISOCHRONOUS: 2116 /* XXX poll doesn't support this */ 2117 return (EINVAL); 2118 2119 case UE_BULK: 2120 #ifdef UGEN_BULK_RA_WB 2121 kn->kn_fop = &ugenwrite_bulk_filtops; 2122 #else 2123 /* 2124 * We have no easy way of determining if a read will 2125 * yield any data or a write will happen. 2126 * So, emulate "seltrue". 2127 */ 2128 kn->kn_fop = &ugen_seltrue_filtops; 2129 #endif 2130 break; 2131 default: 2132 return (EINVAL); 2133 } 2134 break; 2135 2136 default: 2137 return (EINVAL); 2138 } 2139 2140 kn->kn_hook = sce; 2141 2142 s = splusb(); 2143 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2144 splx(s); 2145 2146 return (0); 2147 } 2148 2149 #if defined(__FreeBSD__) 2150 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0); 2151 #endif 2152