1 /* $NetBSD: ugen.c,v 1.98 2008/04/28 20:23:59 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.98 2008/04/28 20:23:59 martin Exp $"); 41 42 #include "opt_ugen_bulk_ra_wb.h" 43 #include "opt_compat_netbsd.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #if defined(__NetBSD__) || defined(__OpenBSD__) 50 #include <sys/device.h> 51 #include <sys/ioctl.h> 52 #elif defined(__FreeBSD__) 53 #include <sys/module.h> 54 #include <sys/bus.h> 55 #include <sys/ioccom.h> 56 #include <sys/conf.h> 57 #include <sys/fcntl.h> 58 #include <sys/filio.h> 59 #endif 60 #include <sys/conf.h> 61 #include <sys/tty.h> 62 #include <sys/file.h> 63 #include <sys/select.h> 64 #include <sys/proc.h> 65 #include <sys/vnode.h> 66 #include <sys/poll.h> 67 68 #include <dev/usb/usb.h> 69 #include <dev/usb/usbdi.h> 70 #include <dev/usb/usbdi_util.h> 71 72 #ifdef UGEN_DEBUG 73 #define DPRINTF(x) if (ugendebug) logprintf x 74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x 75 int ugendebug = 0; 76 #else 77 #define DPRINTF(x) 78 #define DPRINTFN(n,x) 79 #endif 80 81 #define UGEN_CHUNK 128 /* chunk size for read */ 82 #define UGEN_IBSIZE 1020 /* buffer size */ 83 #define UGEN_BBSIZE 1024 84 85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */ 86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */ 87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */ 88 89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 91 92 struct ugen_endpoint { 93 struct ugen_softc *sc; 94 usb_endpoint_descriptor_t *edesc; 95 usbd_interface_handle iface; 96 int state; 97 #define UGEN_ASLP 0x02 /* waiting for data */ 98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 102 usbd_pipe_handle pipeh; 103 struct clist q; 104 struct selinfo rsel; 105 u_char *ibuf; /* start of buffer (circular for isoc) */ 106 u_char *fill; /* location for input (isoc) */ 107 u_char *limit; /* end of circular buffer (isoc) */ 108 u_char *cur; /* current read location (isoc) */ 109 u_int32_t timeout; 110 #ifdef UGEN_BULK_RA_WB 111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 113 u_int32_t ra_wb_used; /* how much is in buffer */ 114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 115 usbd_xfer_handle ra_wb_xfer; 116 #endif 117 struct isoreq { 118 struct ugen_endpoint *sce; 119 usbd_xfer_handle xfer; 120 void *dmabuf; 121 u_int16_t sizes[UGEN_NISORFRMS]; 122 } isoreqs[UGEN_NISOREQS]; 123 }; 124 125 struct ugen_softc { 126 USBBASEDEVICE sc_dev; /* base device */ 127 usbd_device_handle sc_udev; 128 129 char sc_is_open[USB_MAX_ENDPOINTS]; 130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 131 #define OUT 0 132 #define IN 1 133 134 int sc_refcnt; 135 char sc_buffer[UGEN_BBSIZE]; 136 u_char sc_dying; 137 }; 138 139 #if defined(__NetBSD__) 140 dev_type_open(ugenopen); 141 dev_type_close(ugenclose); 142 dev_type_read(ugenread); 143 dev_type_write(ugenwrite); 144 dev_type_ioctl(ugenioctl); 145 dev_type_poll(ugenpoll); 146 dev_type_kqfilter(ugenkqfilter); 147 148 const struct cdevsw ugen_cdevsw = { 149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl, 150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER, 151 }; 152 #elif defined(__OpenBSD__) 153 cdev_decl(ugen); 154 #elif defined(__FreeBSD__) 155 d_open_t ugenopen; 156 d_close_t ugenclose; 157 d_read_t ugenread; 158 d_write_t ugenwrite; 159 d_ioctl_t ugenioctl; 160 d_poll_t ugenpoll; 161 162 #define UGEN_CDEV_MAJOR 114 163 164 Static struct cdevsw ugen_cdevsw = { 165 /* open */ ugenopen, 166 /* close */ ugenclose, 167 /* read */ ugenread, 168 /* write */ ugenwrite, 169 /* ioctl */ ugenioctl, 170 /* poll */ ugenpoll, 171 /* mmap */ nommap, 172 /* strategy */ nostrategy, 173 /* name */ "ugen", 174 /* maj */ UGEN_CDEV_MAJOR, 175 /* dump */ nodump, 176 /* psize */ nopsize, 177 /* flags */ 0, 178 /* bmaj */ -1 179 }; 180 #endif 181 182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, 183 usbd_status status); 184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 185 usbd_status status); 186 #ifdef UGEN_BULK_RA_WB 187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 188 usbd_status status); 189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 190 usbd_status status); 191 #endif 192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 195 void *, int, struct lwp *); 196 Static int ugen_set_config(struct ugen_softc *sc, int configno); 197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc, 198 int index, int *lenp); 199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx); 201 202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 203 #define UGENENDPOINT(n) (minor(n) & 0xf) 204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 205 206 USB_DECLARE_DRIVER(ugen); 207 208 USB_MATCH(ugen) 209 { 210 USB_MATCH_START(ugen, uaa); 211 212 if (match->cf_flags & 1) 213 return (UMATCH_HIGHEST); 214 else if (uaa->usegeneric) 215 return (UMATCH_GENERIC); 216 else 217 return (UMATCH_NONE); 218 } 219 220 USB_ATTACH(ugen) 221 { 222 USB_ATTACH_START(ugen, sc, uaa); 223 usbd_device_handle udev; 224 char *devinfop; 225 usbd_status err; 226 int i, dir, conf; 227 228 devinfop = usbd_devinfo_alloc(uaa->device, 0); 229 USB_ATTACH_SETUP; 230 aprint_normal("%s: %s\n", USBDEVNAME(sc->sc_dev), devinfop); 231 usbd_devinfo_free(devinfop); 232 233 sc->sc_udev = udev = uaa->device; 234 235 /* First set configuration index 0, the default one for ugen. */ 236 err = usbd_set_config_index(udev, 0, 0); 237 if (err) { 238 aprint_error("%s: setting configuration index 0 failed\n", 239 USBDEVNAME(sc->sc_dev)); 240 sc->sc_dying = 1; 241 USB_ATTACH_ERROR_RETURN; 242 } 243 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 244 245 /* Set up all the local state for this configuration. */ 246 err = ugen_set_config(sc, conf); 247 if (err) { 248 aprint_error("%s: setting configuration %d failed\n", 249 USBDEVNAME(sc->sc_dev), conf); 250 sc->sc_dying = 1; 251 USB_ATTACH_ERROR_RETURN; 252 } 253 254 #ifdef __FreeBSD__ 255 { 256 static int global_init_done = 0; 257 if (!global_init_done) { 258 cdevsw_add(&ugen_cdevsw); 259 global_init_done = 1; 260 } 261 } 262 #endif 263 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 264 for (dir = OUT; dir <= IN; dir++) { 265 struct ugen_endpoint *sce; 266 267 sce = &sc->sc_endpoints[i][dir]; 268 selinit(&sce->rsel); 269 } 270 } 271 272 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 273 USBDEV(sc->sc_dev)); 274 275 if (!pmf_device_register(self, NULL, NULL)) 276 aprint_error_dev(self, "couldn't establish power handler\n"); 277 278 USB_ATTACH_SUCCESS_RETURN; 279 } 280 281 Static int 282 ugen_set_config(struct ugen_softc *sc, int configno) 283 { 284 usbd_device_handle dev = sc->sc_udev; 285 usbd_interface_handle iface; 286 usb_endpoint_descriptor_t *ed; 287 struct ugen_endpoint *sce; 288 u_int8_t niface, nendpt; 289 int ifaceno, endptno, endpt; 290 usbd_status err; 291 int dir; 292 293 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 294 USBDEVNAME(sc->sc_dev), configno, sc)); 295 296 /* 297 * We start at 1, not 0, because we don't care whether the 298 * control endpoint is open or not. It is always present. 299 */ 300 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 301 if (sc->sc_is_open[endptno]) { 302 DPRINTFN(1, 303 ("ugen_set_config: %s - endpoint %d is open\n", 304 USBDEVNAME(sc->sc_dev), endptno)); 305 return (USBD_IN_USE); 306 } 307 308 /* Avoid setting the current value. */ 309 if (usbd_get_config_descriptor(dev)->bConfigurationValue != configno) { 310 err = usbd_set_config_no(dev, configno, 1); 311 if (err) 312 return (err); 313 } 314 315 err = usbd_interface_count(dev, &niface); 316 if (err) 317 return (err); 318 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints); 319 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 320 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 321 err = usbd_device2interface_handle(dev, ifaceno, &iface); 322 if (err) 323 return (err); 324 err = usbd_endpoint_count(iface, &nendpt); 325 if (err) 326 return (err); 327 for (endptno = 0; endptno < nendpt; endptno++) { 328 ed = usbd_interface2endpoint_descriptor(iface,endptno); 329 KASSERT(ed != NULL); 330 endpt = ed->bEndpointAddress; 331 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 332 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 333 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 334 "(%d,%d), sce=%p\n", 335 endptno, endpt, UE_GET_ADDR(endpt), 336 UE_GET_DIR(endpt), sce)); 337 sce->sc = sc; 338 sce->edesc = ed; 339 sce->iface = iface; 340 } 341 } 342 return (USBD_NORMAL_COMPLETION); 343 } 344 345 int 346 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 347 { 348 struct ugen_softc *sc; 349 int unit = UGENUNIT(dev); 350 int endpt = UGENENDPOINT(dev); 351 usb_endpoint_descriptor_t *edesc; 352 struct ugen_endpoint *sce; 353 int dir, isize; 354 usbd_status err; 355 usbd_xfer_handle xfer; 356 void *tbuf; 357 int i, j; 358 359 USB_GET_SC_OPEN(ugen, unit, sc); 360 361 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 362 flag, mode, unit, endpt)); 363 364 if (sc == NULL || sc->sc_dying) 365 return (ENXIO); 366 367 /* The control endpoint allows multiple opens. */ 368 if (endpt == USB_CONTROL_ENDPOINT) { 369 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 370 return (0); 371 } 372 373 if (sc->sc_is_open[endpt]) 374 return (EBUSY); 375 376 /* Make sure there are pipes for all directions. */ 377 for (dir = OUT; dir <= IN; dir++) { 378 if (flag & (dir == OUT ? FWRITE : FREAD)) { 379 sce = &sc->sc_endpoints[endpt][dir]; 380 if (sce == 0 || sce->edesc == 0) 381 return (ENXIO); 382 } 383 } 384 385 /* Actually open the pipes. */ 386 /* XXX Should back out properly if it fails. */ 387 for (dir = OUT; dir <= IN; dir++) { 388 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 389 continue; 390 sce = &sc->sc_endpoints[endpt][dir]; 391 sce->state = 0; 392 sce->timeout = USBD_NO_TIMEOUT; 393 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 394 sc, endpt, dir, sce)); 395 edesc = sce->edesc; 396 switch (edesc->bmAttributes & UE_XFERTYPE) { 397 case UE_INTERRUPT: 398 if (dir == OUT) { 399 err = usbd_open_pipe(sce->iface, 400 edesc->bEndpointAddress, 0, &sce->pipeh); 401 if (err) 402 return (EIO); 403 break; 404 } 405 isize = UGETW(edesc->wMaxPacketSize); 406 if (isize == 0) /* shouldn't happen */ 407 return (EINVAL); 408 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); 409 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 410 endpt, isize)); 411 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) 412 return (ENOMEM); 413 err = usbd_open_pipe_intr(sce->iface, 414 edesc->bEndpointAddress, 415 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 416 sce->ibuf, isize, ugenintr, 417 USBD_DEFAULT_INTERVAL); 418 if (err) { 419 free(sce->ibuf, M_USBDEV); 420 clfree(&sce->q); 421 return (EIO); 422 } 423 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 424 break; 425 case UE_BULK: 426 err = usbd_open_pipe(sce->iface, 427 edesc->bEndpointAddress, 0, &sce->pipeh); 428 if (err) 429 return (EIO); 430 #ifdef UGEN_BULK_RA_WB 431 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 432 /* 433 * Use request size for non-RA/WB transfers 434 * as the default. 435 */ 436 sce->ra_wb_reqsize = UGEN_BBSIZE; 437 #endif 438 break; 439 case UE_ISOCHRONOUS: 440 if (dir == OUT) 441 return (EINVAL); 442 isize = UGETW(edesc->wMaxPacketSize); 443 if (isize == 0) /* shouldn't happen */ 444 return (EINVAL); 445 sce->ibuf = malloc(isize * UGEN_NISOFRAMES, 446 M_USBDEV, M_WAITOK); 447 sce->cur = sce->fill = sce->ibuf; 448 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 449 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 450 endpt, isize)); 451 err = usbd_open_pipe(sce->iface, 452 edesc->bEndpointAddress, 0, &sce->pipeh); 453 if (err) { 454 free(sce->ibuf, M_USBDEV); 455 return (EIO); 456 } 457 for(i = 0; i < UGEN_NISOREQS; ++i) { 458 sce->isoreqs[i].sce = sce; 459 xfer = usbd_alloc_xfer(sc->sc_udev); 460 if (xfer == 0) 461 goto bad; 462 sce->isoreqs[i].xfer = xfer; 463 tbuf = usbd_alloc_buffer 464 (xfer, isize * UGEN_NISORFRMS); 465 if (tbuf == 0) { 466 i++; 467 goto bad; 468 } 469 sce->isoreqs[i].dmabuf = tbuf; 470 for(j = 0; j < UGEN_NISORFRMS; ++j) 471 sce->isoreqs[i].sizes[j] = isize; 472 usbd_setup_isoc_xfer 473 (xfer, sce->pipeh, &sce->isoreqs[i], 474 sce->isoreqs[i].sizes, 475 UGEN_NISORFRMS, USBD_NO_COPY, 476 ugen_isoc_rintr); 477 (void)usbd_transfer(xfer); 478 } 479 DPRINTFN(5, ("ugenopen: isoc open done\n")); 480 break; 481 bad: 482 while (--i >= 0) /* implicit buffer free */ 483 usbd_free_xfer(sce->isoreqs[i].xfer); 484 return (ENOMEM); 485 case UE_CONTROL: 486 sce->timeout = USBD_DEFAULT_TIMEOUT; 487 return (EINVAL); 488 } 489 } 490 sc->sc_is_open[endpt] = 1; 491 return (0); 492 } 493 494 int 495 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 496 { 497 int endpt = UGENENDPOINT(dev); 498 struct ugen_softc *sc; 499 struct ugen_endpoint *sce; 500 int dir; 501 int i; 502 503 USB_GET_SC(ugen, UGENUNIT(dev), sc); 504 505 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 506 flag, mode, UGENUNIT(dev), endpt)); 507 508 #ifdef DIAGNOSTIC 509 if (!sc->sc_is_open[endpt]) { 510 printf("ugenclose: not open\n"); 511 return (EINVAL); 512 } 513 #endif 514 515 if (endpt == USB_CONTROL_ENDPOINT) { 516 DPRINTFN(5, ("ugenclose: close control\n")); 517 sc->sc_is_open[endpt] = 0; 518 return (0); 519 } 520 521 for (dir = OUT; dir <= IN; dir++) { 522 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 523 continue; 524 sce = &sc->sc_endpoints[endpt][dir]; 525 if (sce == NULL || sce->pipeh == NULL) 526 continue; 527 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 528 endpt, dir, sce)); 529 530 usbd_abort_pipe(sce->pipeh); 531 usbd_close_pipe(sce->pipeh); 532 sce->pipeh = NULL; 533 534 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 535 case UE_INTERRUPT: 536 ndflush(&sce->q, sce->q.c_cc); 537 clfree(&sce->q); 538 break; 539 case UE_ISOCHRONOUS: 540 for (i = 0; i < UGEN_NISOREQS; ++i) 541 usbd_free_xfer(sce->isoreqs[i].xfer); 542 break; 543 #ifdef UGEN_BULK_RA_WB 544 case UE_BULK: 545 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) 546 /* ibuf freed below */ 547 usbd_free_xfer(sce->ra_wb_xfer); 548 break; 549 #endif 550 default: 551 break; 552 } 553 554 if (sce->ibuf != NULL) { 555 free(sce->ibuf, M_USBDEV); 556 sce->ibuf = NULL; 557 clfree(&sce->q); 558 } 559 } 560 sc->sc_is_open[endpt] = 0; 561 562 return (0); 563 } 564 565 Static int 566 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 567 { 568 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 569 u_int32_t n, tn; 570 usbd_xfer_handle xfer; 571 usbd_status err; 572 int s; 573 int error = 0; 574 575 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 576 577 if (sc->sc_dying) 578 return (EIO); 579 580 if (endpt == USB_CONTROL_ENDPOINT) 581 return (ENODEV); 582 583 #ifdef DIAGNOSTIC 584 if (sce->edesc == NULL) { 585 printf("ugenread: no edesc\n"); 586 return (EIO); 587 } 588 if (sce->pipeh == NULL) { 589 printf("ugenread: no pipe\n"); 590 return (EIO); 591 } 592 #endif 593 594 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 595 case UE_INTERRUPT: 596 /* Block until activity occurred. */ 597 s = splusb(); 598 while (sce->q.c_cc == 0) { 599 if (flag & IO_NDELAY) { 600 splx(s); 601 return (EWOULDBLOCK); 602 } 603 sce->state |= UGEN_ASLP; 604 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 605 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 606 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 607 if (sc->sc_dying) 608 error = EIO; 609 if (error) { 610 sce->state &= ~UGEN_ASLP; 611 break; 612 } 613 } 614 splx(s); 615 616 /* Transfer as many chunks as possible. */ 617 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 618 n = min(sce->q.c_cc, uio->uio_resid); 619 if (n > sizeof(sc->sc_buffer)) 620 n = sizeof(sc->sc_buffer); 621 622 /* Remove a small chunk from the input queue. */ 623 q_to_b(&sce->q, sc->sc_buffer, n); 624 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 625 626 /* Copy the data to the user process. */ 627 error = uiomove(sc->sc_buffer, n, uio); 628 if (error) 629 break; 630 } 631 break; 632 case UE_BULK: 633 #ifdef UGEN_BULK_RA_WB 634 if (sce->state & UGEN_BULK_RA) { 635 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 636 uio->uio_resid, sce->ra_wb_used)); 637 xfer = sce->ra_wb_xfer; 638 639 s = splusb(); 640 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 641 splx(s); 642 return (EWOULDBLOCK); 643 } 644 while (uio->uio_resid > 0 && !error) { 645 while (sce->ra_wb_used == 0) { 646 sce->state |= UGEN_ASLP; 647 DPRINTFN(5, 648 ("ugenread: sleep on %p\n", 649 sce)); 650 error = tsleep(sce, PZERO | PCATCH, 651 "ugenrb", 0); 652 DPRINTFN(5, 653 ("ugenread: woke, error=%d\n", 654 error)); 655 if (sc->sc_dying) 656 error = EIO; 657 if (error) { 658 sce->state &= ~UGEN_ASLP; 659 break; 660 } 661 } 662 663 /* Copy data to the process. */ 664 while (uio->uio_resid > 0 665 && sce->ra_wb_used > 0) { 666 n = min(uio->uio_resid, 667 sce->ra_wb_used); 668 n = min(n, sce->limit - sce->cur); 669 error = uiomove(sce->cur, n, uio); 670 if (error) 671 break; 672 sce->cur += n; 673 sce->ra_wb_used -= n; 674 if (sce->cur == sce->limit) 675 sce->cur = sce->ibuf; 676 } 677 678 /* 679 * If the transfers stopped because the 680 * buffer was full, restart them. 681 */ 682 if (sce->state & UGEN_RA_WB_STOP && 683 sce->ra_wb_used < sce->limit - sce->ibuf) { 684 n = (sce->limit - sce->ibuf) 685 - sce->ra_wb_used; 686 usbd_setup_xfer(xfer, 687 sce->pipeh, sce, NULL, 688 min(n, sce->ra_wb_xferlen), 689 USBD_NO_COPY, USBD_NO_TIMEOUT, 690 ugen_bulkra_intr); 691 sce->state &= ~UGEN_RA_WB_STOP; 692 err = usbd_transfer(xfer); 693 if (err != USBD_IN_PROGRESS) 694 /* 695 * The transfer has not been 696 * queued. Setting STOP 697 * will make us try 698 * again at the next read. 699 */ 700 sce->state |= UGEN_RA_WB_STOP; 701 } 702 } 703 splx(s); 704 break; 705 } 706 #endif 707 xfer = usbd_alloc_xfer(sc->sc_udev); 708 if (xfer == 0) 709 return (ENOMEM); 710 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 711 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 712 tn = n; 713 err = usbd_bulk_transfer( 714 xfer, sce->pipeh, 715 sce->state & UGEN_SHORT_OK ? 716 USBD_SHORT_XFER_OK : 0, 717 sce->timeout, sc->sc_buffer, &tn, "ugenrb"); 718 if (err) { 719 if (err == USBD_INTERRUPTED) 720 error = EINTR; 721 else if (err == USBD_TIMEOUT) 722 error = ETIMEDOUT; 723 else 724 error = EIO; 725 break; 726 } 727 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 728 error = uiomove(sc->sc_buffer, tn, uio); 729 if (error || tn < n) 730 break; 731 } 732 usbd_free_xfer(xfer); 733 break; 734 case UE_ISOCHRONOUS: 735 s = splusb(); 736 while (sce->cur == sce->fill) { 737 if (flag & IO_NDELAY) { 738 splx(s); 739 return (EWOULDBLOCK); 740 } 741 sce->state |= UGEN_ASLP; 742 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 743 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0); 744 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 745 if (sc->sc_dying) 746 error = EIO; 747 if (error) { 748 sce->state &= ~UGEN_ASLP; 749 break; 750 } 751 } 752 753 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 754 if(sce->fill > sce->cur) 755 n = min(sce->fill - sce->cur, uio->uio_resid); 756 else 757 n = min(sce->limit - sce->cur, uio->uio_resid); 758 759 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 760 761 /* Copy the data to the user process. */ 762 error = uiomove(sce->cur, n, uio); 763 if (error) 764 break; 765 sce->cur += n; 766 if(sce->cur >= sce->limit) 767 sce->cur = sce->ibuf; 768 } 769 splx(s); 770 break; 771 772 773 default: 774 return (ENXIO); 775 } 776 return (error); 777 } 778 779 int 780 ugenread(dev_t dev, struct uio *uio, int flag) 781 { 782 int endpt = UGENENDPOINT(dev); 783 struct ugen_softc *sc; 784 int error; 785 786 USB_GET_SC(ugen, UGENUNIT(dev), sc); 787 788 sc->sc_refcnt++; 789 error = ugen_do_read(sc, endpt, uio, flag); 790 if (--sc->sc_refcnt < 0) 791 usb_detach_wakeup(USBDEV(sc->sc_dev)); 792 return (error); 793 } 794 795 Static int 796 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 797 int flag) 798 { 799 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 800 u_int32_t n; 801 int error = 0; 802 #ifdef UGEN_BULK_RA_WB 803 int s; 804 u_int32_t tn; 805 char *dbuf; 806 #endif 807 usbd_xfer_handle xfer; 808 usbd_status err; 809 810 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt)); 811 812 if (sc->sc_dying) 813 return (EIO); 814 815 if (endpt == USB_CONTROL_ENDPOINT) 816 return (ENODEV); 817 818 #ifdef DIAGNOSTIC 819 if (sce->edesc == NULL) { 820 printf("ugenwrite: no edesc\n"); 821 return (EIO); 822 } 823 if (sce->pipeh == NULL) { 824 printf("ugenwrite: no pipe\n"); 825 return (EIO); 826 } 827 #endif 828 829 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 830 case UE_BULK: 831 #ifdef UGEN_BULK_RA_WB 832 if (sce->state & UGEN_BULK_WB) { 833 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 834 uio->uio_resid, sce->ra_wb_used)); 835 xfer = sce->ra_wb_xfer; 836 837 s = splusb(); 838 if (sce->ra_wb_used == sce->limit - sce->ibuf && 839 flag & IO_NDELAY) { 840 splx(s); 841 return (EWOULDBLOCK); 842 } 843 while (uio->uio_resid > 0 && !error) { 844 while (sce->ra_wb_used == 845 sce->limit - sce->ibuf) { 846 sce->state |= UGEN_ASLP; 847 DPRINTFN(5, 848 ("ugenwrite: sleep on %p\n", 849 sce)); 850 error = tsleep(sce, PZERO | PCATCH, 851 "ugenwb", 0); 852 DPRINTFN(5, 853 ("ugenwrite: woke, error=%d\n", 854 error)); 855 if (sc->sc_dying) 856 error = EIO; 857 if (error) { 858 sce->state &= ~UGEN_ASLP; 859 break; 860 } 861 } 862 863 /* Copy data from the process. */ 864 while (uio->uio_resid > 0 && 865 sce->ra_wb_used < sce->limit - sce->ibuf) { 866 n = min(uio->uio_resid, 867 (sce->limit - sce->ibuf) 868 - sce->ra_wb_used); 869 n = min(n, sce->limit - sce->fill); 870 error = uiomove(sce->fill, n, uio); 871 if (error) 872 break; 873 sce->fill += n; 874 sce->ra_wb_used += n; 875 if (sce->fill == sce->limit) 876 sce->fill = sce->ibuf; 877 } 878 879 /* 880 * If the transfers stopped because the 881 * buffer was empty, restart them. 882 */ 883 if (sce->state & UGEN_RA_WB_STOP && 884 sce->ra_wb_used > 0) { 885 dbuf = (char *)usbd_get_buffer(xfer); 886 n = min(sce->ra_wb_used, 887 sce->ra_wb_xferlen); 888 tn = min(n, sce->limit - sce->cur); 889 memcpy(dbuf, sce->cur, tn); 890 dbuf += tn; 891 if (n - tn > 0) 892 memcpy(dbuf, sce->ibuf, 893 n - tn); 894 usbd_setup_xfer(xfer, 895 sce->pipeh, sce, NULL, n, 896 USBD_NO_COPY, USBD_NO_TIMEOUT, 897 ugen_bulkwb_intr); 898 sce->state &= ~UGEN_RA_WB_STOP; 899 err = usbd_transfer(xfer); 900 if (err != USBD_IN_PROGRESS) 901 /* 902 * The transfer has not been 903 * queued. Setting STOP 904 * will make us try again 905 * at the next read. 906 */ 907 sce->state |= UGEN_RA_WB_STOP; 908 } 909 } 910 splx(s); 911 break; 912 } 913 #endif 914 xfer = usbd_alloc_xfer(sc->sc_udev); 915 if (xfer == 0) 916 return (EIO); 917 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 918 error = uiomove(sc->sc_buffer, n, uio); 919 if (error) 920 break; 921 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 922 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, 923 sce->timeout, sc->sc_buffer, &n,"ugenwb"); 924 if (err) { 925 if (err == USBD_INTERRUPTED) 926 error = EINTR; 927 else if (err == USBD_TIMEOUT) 928 error = ETIMEDOUT; 929 else 930 error = EIO; 931 break; 932 } 933 } 934 usbd_free_xfer(xfer); 935 break; 936 case UE_INTERRUPT: 937 xfer = usbd_alloc_xfer(sc->sc_udev); 938 if (xfer == 0) 939 return (EIO); 940 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 941 uio->uio_resid)) != 0) { 942 error = uiomove(sc->sc_buffer, n, uio); 943 if (error) 944 break; 945 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 946 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 947 sce->timeout, sc->sc_buffer, &n, "ugenwi"); 948 if (err) { 949 if (err == USBD_INTERRUPTED) 950 error = EINTR; 951 else if (err == USBD_TIMEOUT) 952 error = ETIMEDOUT; 953 else 954 error = EIO; 955 break; 956 } 957 } 958 usbd_free_xfer(xfer); 959 break; 960 default: 961 return (ENXIO); 962 } 963 return (error); 964 } 965 966 int 967 ugenwrite(dev_t dev, struct uio *uio, int flag) 968 { 969 int endpt = UGENENDPOINT(dev); 970 struct ugen_softc *sc; 971 int error; 972 973 USB_GET_SC(ugen, UGENUNIT(dev), sc); 974 975 sc->sc_refcnt++; 976 error = ugen_do_write(sc, endpt, uio, flag); 977 if (--sc->sc_refcnt < 0) 978 usb_detach_wakeup(USBDEV(sc->sc_dev)); 979 return (error); 980 } 981 982 #if defined(__NetBSD__) || defined(__OpenBSD__) 983 int 984 ugen_activate(device_ptr_t self, enum devact act) 985 { 986 struct ugen_softc *sc = (struct ugen_softc *)self; 987 988 switch (act) { 989 case DVACT_ACTIVATE: 990 return (EOPNOTSUPP); 991 992 case DVACT_DEACTIVATE: 993 sc->sc_dying = 1; 994 break; 995 } 996 return (0); 997 } 998 #endif 999 1000 USB_DETACH(ugen) 1001 { 1002 USB_DETACH_START(ugen, sc); 1003 struct ugen_endpoint *sce; 1004 int i, dir; 1005 int s; 1006 #if defined(__NetBSD__) || defined(__OpenBSD__) 1007 int maj, mn; 1008 1009 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1010 #elif defined(__FreeBSD__) 1011 DPRINTF(("ugen_detach: sc=%p\n", sc)); 1012 #endif 1013 1014 sc->sc_dying = 1; 1015 pmf_device_deregister(self); 1016 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1017 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1018 for (dir = OUT; dir <= IN; dir++) { 1019 sce = &sc->sc_endpoints[i][dir]; 1020 if (sce && sce->pipeh) 1021 usbd_abort_pipe(sce->pipeh); 1022 } 1023 } 1024 1025 s = splusb(); 1026 if (--sc->sc_refcnt >= 0) { 1027 /* Wake everyone */ 1028 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1029 wakeup(&sc->sc_endpoints[i][IN]); 1030 /* Wait for processes to go away. */ 1031 usb_detach_wait(USBDEV(sc->sc_dev)); 1032 } 1033 splx(s); 1034 1035 #if defined(__NetBSD__) || defined(__OpenBSD__) 1036 /* locate the major number */ 1037 #if defined(__NetBSD__) 1038 maj = cdevsw_lookup_major(&ugen_cdevsw); 1039 #elif defined(__OpenBSD__) 1040 for (maj = 0; maj < nchrdev; maj++) 1041 if (cdevsw[maj].d_open == ugenopen) 1042 break; 1043 #endif 1044 1045 /* Nuke the vnodes for any open instances (calls close). */ 1046 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1047 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1048 #elif defined(__FreeBSD__) 1049 /* XXX not implemented yet */ 1050 #endif 1051 1052 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1053 USBDEV(sc->sc_dev)); 1054 1055 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1056 for (dir = OUT; dir <= IN; dir++) { 1057 sce = &sc->sc_endpoints[i][dir]; 1058 seldestroy(&sce->rsel); 1059 } 1060 } 1061 1062 return (0); 1063 } 1064 1065 Static void 1066 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) 1067 { 1068 struct ugen_endpoint *sce = addr; 1069 /*struct ugen_softc *sc = sce->sc;*/ 1070 u_int32_t count; 1071 u_char *ibuf; 1072 1073 if (status == USBD_CANCELLED) 1074 return; 1075 1076 if (status != USBD_NORMAL_COMPLETION) { 1077 DPRINTF(("ugenintr: status=%d\n", status)); 1078 if (status == USBD_STALLED) 1079 usbd_clear_endpoint_stall_async(sce->pipeh); 1080 return; 1081 } 1082 1083 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1084 ibuf = sce->ibuf; 1085 1086 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1087 xfer, status, count)); 1088 DPRINTFN(5, (" data = %02x %02x %02x\n", 1089 ibuf[0], ibuf[1], ibuf[2])); 1090 1091 (void)b_to_q(ibuf, count, &sce->q); 1092 1093 if (sce->state & UGEN_ASLP) { 1094 sce->state &= ~UGEN_ASLP; 1095 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1096 wakeup(sce); 1097 } 1098 selnotify(&sce->rsel, 0, 0); 1099 } 1100 1101 Static void 1102 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 1103 usbd_status status) 1104 { 1105 struct isoreq *req = addr; 1106 struct ugen_endpoint *sce = req->sce; 1107 u_int32_t count, n; 1108 int i, isize; 1109 1110 /* Return if we are aborting. */ 1111 if (status == USBD_CANCELLED) 1112 return; 1113 1114 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1115 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1116 (long)(req - sce->isoreqs), count)); 1117 1118 /* throw away oldest input if the buffer is full */ 1119 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1120 sce->cur += count; 1121 if(sce->cur >= sce->limit) 1122 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1123 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1124 count)); 1125 } 1126 1127 isize = UGETW(sce->edesc->wMaxPacketSize); 1128 for (i = 0; i < UGEN_NISORFRMS; i++) { 1129 u_int32_t actlen = req->sizes[i]; 1130 char const *tbuf = (char const *)req->dmabuf + isize * i; 1131 1132 /* copy data to buffer */ 1133 while (actlen > 0) { 1134 n = min(actlen, sce->limit - sce->fill); 1135 memcpy(sce->fill, tbuf, n); 1136 1137 tbuf += n; 1138 actlen -= n; 1139 sce->fill += n; 1140 if(sce->fill == sce->limit) 1141 sce->fill = sce->ibuf; 1142 } 1143 1144 /* setup size for next transfer */ 1145 req->sizes[i] = isize; 1146 } 1147 1148 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS, 1149 USBD_NO_COPY, ugen_isoc_rintr); 1150 (void)usbd_transfer(xfer); 1151 1152 if (sce->state & UGEN_ASLP) { 1153 sce->state &= ~UGEN_ASLP; 1154 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1155 wakeup(sce); 1156 } 1157 selnotify(&sce->rsel, 0, 0); 1158 } 1159 1160 #ifdef UGEN_BULK_RA_WB 1161 Static void 1162 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1163 usbd_status status) 1164 { 1165 struct ugen_endpoint *sce = addr; 1166 u_int32_t count, n; 1167 char const *tbuf; 1168 usbd_status err; 1169 1170 /* Return if we are aborting. */ 1171 if (status == USBD_CANCELLED) 1172 return; 1173 1174 if (status != USBD_NORMAL_COMPLETION) { 1175 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1176 sce->state |= UGEN_RA_WB_STOP; 1177 if (status == USBD_STALLED) 1178 usbd_clear_endpoint_stall_async(sce->pipeh); 1179 return; 1180 } 1181 1182 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1183 1184 /* Keep track of how much is in the buffer. */ 1185 sce->ra_wb_used += count; 1186 1187 /* Copy data to buffer. */ 1188 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1189 n = min(count, sce->limit - sce->fill); 1190 memcpy(sce->fill, tbuf, n); 1191 tbuf += n; 1192 count -= n; 1193 sce->fill += n; 1194 if (sce->fill == sce->limit) 1195 sce->fill = sce->ibuf; 1196 if (count > 0) { 1197 memcpy(sce->fill, tbuf, count); 1198 sce->fill += count; 1199 } 1200 1201 /* Set up the next request if necessary. */ 1202 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1203 if (n > 0) { 1204 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1205 min(n, sce->ra_wb_xferlen), USBD_NO_COPY, 1206 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1207 err = usbd_transfer(xfer); 1208 if (err != USBD_IN_PROGRESS) { 1209 printf("usbd_bulkra_intr: error=%d\n", err); 1210 /* 1211 * The transfer has not been queued. Setting STOP 1212 * will make us try again at the next read. 1213 */ 1214 sce->state |= UGEN_RA_WB_STOP; 1215 } 1216 } 1217 else 1218 sce->state |= UGEN_RA_WB_STOP; 1219 1220 if (sce->state & UGEN_ASLP) { 1221 sce->state &= ~UGEN_ASLP; 1222 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1223 wakeup(sce); 1224 } 1225 selnotify(&sce->rsel, 0, 0); 1226 } 1227 1228 Static void 1229 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1230 usbd_status status) 1231 { 1232 struct ugen_endpoint *sce = addr; 1233 u_int32_t count, n; 1234 char *tbuf; 1235 usbd_status err; 1236 1237 /* Return if we are aborting. */ 1238 if (status == USBD_CANCELLED) 1239 return; 1240 1241 if (status != USBD_NORMAL_COMPLETION) { 1242 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1243 sce->state |= UGEN_RA_WB_STOP; 1244 if (status == USBD_STALLED) 1245 usbd_clear_endpoint_stall_async(sce->pipeh); 1246 return; 1247 } 1248 1249 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1250 1251 /* Keep track of how much is in the buffer. */ 1252 sce->ra_wb_used -= count; 1253 1254 /* Update buffer pointers. */ 1255 sce->cur += count; 1256 if (sce->cur >= sce->limit) 1257 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1258 1259 /* Set up next request if necessary. */ 1260 if (sce->ra_wb_used > 0) { 1261 /* copy data from buffer */ 1262 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1263 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1264 n = min(count, sce->limit - sce->cur); 1265 memcpy(tbuf, sce->cur, n); 1266 tbuf += n; 1267 if (count - n > 0) 1268 memcpy(tbuf, sce->ibuf, count - n); 1269 1270 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1271 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr); 1272 err = usbd_transfer(xfer); 1273 if (err != USBD_IN_PROGRESS) { 1274 printf("usbd_bulkwb_intr: error=%d\n", err); 1275 /* 1276 * The transfer has not been queued. Setting STOP 1277 * will make us try again at the next write. 1278 */ 1279 sce->state |= UGEN_RA_WB_STOP; 1280 } 1281 } 1282 else 1283 sce->state |= UGEN_RA_WB_STOP; 1284 1285 if (sce->state & UGEN_ASLP) { 1286 sce->state &= ~UGEN_ASLP; 1287 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1288 wakeup(sce); 1289 } 1290 selnotify(&sce->rsel, 0, 0); 1291 } 1292 #endif 1293 1294 Static usbd_status 1295 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1296 { 1297 usbd_interface_handle iface; 1298 usb_endpoint_descriptor_t *ed; 1299 usbd_status err; 1300 struct ugen_endpoint *sce; 1301 u_int8_t niface, nendpt, endptno, endpt; 1302 int dir; 1303 1304 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1305 1306 err = usbd_interface_count(sc->sc_udev, &niface); 1307 if (err) 1308 return (err); 1309 if (ifaceidx < 0 || ifaceidx >= niface) 1310 return (USBD_INVAL); 1311 1312 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1313 if (err) 1314 return (err); 1315 err = usbd_endpoint_count(iface, &nendpt); 1316 if (err) 1317 return (err); 1318 /* XXX should only do this after setting new altno has succeeded */ 1319 for (endptno = 0; endptno < nendpt; endptno++) { 1320 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1321 endpt = ed->bEndpointAddress; 1322 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1323 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1324 sce->sc = 0; 1325 sce->edesc = 0; 1326 sce->iface = 0; 1327 } 1328 1329 /* change setting */ 1330 err = usbd_set_interface(iface, altno); 1331 if (err) 1332 return (err); 1333 1334 err = usbd_endpoint_count(iface, &nendpt); 1335 if (err) 1336 return (err); 1337 for (endptno = 0; endptno < nendpt; endptno++) { 1338 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1339 KASSERT(ed != NULL); 1340 endpt = ed->bEndpointAddress; 1341 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1342 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1343 sce->sc = sc; 1344 sce->edesc = ed; 1345 sce->iface = iface; 1346 } 1347 return (0); 1348 } 1349 1350 /* Retrieve a complete descriptor for a certain device and index. */ 1351 Static usb_config_descriptor_t * 1352 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1353 { 1354 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1355 int len; 1356 usbd_status err; 1357 1358 if (index == USB_CURRENT_CONFIG_INDEX) { 1359 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1360 len = UGETW(tdesc->wTotalLength); 1361 if (lenp) 1362 *lenp = len; 1363 cdesc = malloc(len, M_TEMP, M_WAITOK); 1364 memcpy(cdesc, tdesc, len); 1365 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1366 } else { 1367 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1368 if (err) 1369 return (0); 1370 len = UGETW(cdescr.wTotalLength); 1371 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1372 if (lenp) 1373 *lenp = len; 1374 cdesc = malloc(len, M_TEMP, M_WAITOK); 1375 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1376 if (err) { 1377 free(cdesc, M_TEMP); 1378 return (0); 1379 } 1380 } 1381 return (cdesc); 1382 } 1383 1384 Static int 1385 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1386 { 1387 usbd_interface_handle iface; 1388 usbd_status err; 1389 1390 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1391 if (err) 1392 return (-1); 1393 return (usbd_get_interface_altindex(iface)); 1394 } 1395 1396 Static int 1397 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1398 void *addr, int flag, struct lwp *l) 1399 { 1400 struct ugen_endpoint *sce; 1401 usbd_status err; 1402 usbd_interface_handle iface; 1403 struct usb_config_desc *cd; 1404 usb_config_descriptor_t *cdesc; 1405 struct usb_interface_desc *id; 1406 usb_interface_descriptor_t *idesc; 1407 struct usb_endpoint_desc *ed; 1408 usb_endpoint_descriptor_t *edesc; 1409 struct usb_alt_interface *ai; 1410 struct usb_string_desc *si; 1411 u_int8_t conf, alt; 1412 1413 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1414 if (sc->sc_dying) 1415 return (EIO); 1416 1417 switch (cmd) { 1418 case FIONBIO: 1419 /* All handled in the upper FS layer. */ 1420 return (0); 1421 case USB_SET_SHORT_XFER: 1422 if (endpt == USB_CONTROL_ENDPOINT) 1423 return (EINVAL); 1424 /* This flag only affects read */ 1425 sce = &sc->sc_endpoints[endpt][IN]; 1426 if (sce == NULL || sce->pipeh == NULL) 1427 return (EINVAL); 1428 if (*(int *)addr) 1429 sce->state |= UGEN_SHORT_OK; 1430 else 1431 sce->state &= ~UGEN_SHORT_OK; 1432 return (0); 1433 case USB_SET_TIMEOUT: 1434 sce = &sc->sc_endpoints[endpt][IN]; 1435 if (sce == NULL 1436 /* XXX this shouldn't happen, but the distinction between 1437 input and output pipes isn't clear enough. 1438 || sce->pipeh == NULL */ 1439 ) 1440 return (EINVAL); 1441 sce->timeout = *(int *)addr; 1442 return (0); 1443 case USB_SET_BULK_RA: 1444 #ifdef UGEN_BULK_RA_WB 1445 if (endpt == USB_CONTROL_ENDPOINT) 1446 return (EINVAL); 1447 sce = &sc->sc_endpoints[endpt][IN]; 1448 if (sce == NULL || sce->pipeh == NULL) 1449 return (EINVAL); 1450 edesc = sce->edesc; 1451 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1452 return (EINVAL); 1453 1454 if (*(int *)addr) { 1455 /* Only turn RA on if it's currently off. */ 1456 if (sce->state & UGEN_BULK_RA) 1457 return (0); 1458 1459 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1460 /* shouldn't happen */ 1461 return (EINVAL); 1462 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1463 if (sce->ra_wb_xfer == NULL) 1464 return (ENOMEM); 1465 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1466 /* 1467 * Set up a dmabuf because we reuse the xfer with 1468 * the same (max) request length like isoc. 1469 */ 1470 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1471 sce->ra_wb_xferlen) == 0) { 1472 usbd_free_xfer(sce->ra_wb_xfer); 1473 return (ENOMEM); 1474 } 1475 sce->ibuf = malloc(sce->ra_wb_bufsize, 1476 M_USBDEV, M_WAITOK); 1477 sce->fill = sce->cur = sce->ibuf; 1478 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1479 sce->ra_wb_used = 0; 1480 sce->state |= UGEN_BULK_RA; 1481 sce->state &= ~UGEN_RA_WB_STOP; 1482 /* Now start reading. */ 1483 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce, 1484 NULL, 1485 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1486 USBD_NO_COPY, USBD_NO_TIMEOUT, 1487 ugen_bulkra_intr); 1488 err = usbd_transfer(sce->ra_wb_xfer); 1489 if (err != USBD_IN_PROGRESS) { 1490 sce->state &= ~UGEN_BULK_RA; 1491 free(sce->ibuf, M_USBDEV); 1492 sce->ibuf = NULL; 1493 usbd_free_xfer(sce->ra_wb_xfer); 1494 return (EIO); 1495 } 1496 } else { 1497 /* Only turn RA off if it's currently on. */ 1498 if (!(sce->state & UGEN_BULK_RA)) 1499 return (0); 1500 1501 sce->state &= ~UGEN_BULK_RA; 1502 usbd_abort_pipe(sce->pipeh); 1503 usbd_free_xfer(sce->ra_wb_xfer); 1504 /* 1505 * XXX Discard whatever's in the buffer, but we 1506 * should keep it around and drain the buffer 1507 * instead. 1508 */ 1509 free(sce->ibuf, M_USBDEV); 1510 sce->ibuf = NULL; 1511 } 1512 return (0); 1513 #else 1514 return (EOPNOTSUPP); 1515 #endif 1516 case USB_SET_BULK_WB: 1517 #ifdef UGEN_BULK_RA_WB 1518 if (endpt == USB_CONTROL_ENDPOINT) 1519 return (EINVAL); 1520 sce = &sc->sc_endpoints[endpt][OUT]; 1521 if (sce == NULL || sce->pipeh == NULL) 1522 return (EINVAL); 1523 edesc = sce->edesc; 1524 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1525 return (EINVAL); 1526 1527 if (*(int *)addr) { 1528 /* Only turn WB on if it's currently off. */ 1529 if (sce->state & UGEN_BULK_WB) 1530 return (0); 1531 1532 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1533 /* shouldn't happen */ 1534 return (EINVAL); 1535 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1536 if (sce->ra_wb_xfer == NULL) 1537 return (ENOMEM); 1538 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1539 /* 1540 * Set up a dmabuf because we reuse the xfer with 1541 * the same (max) request length like isoc. 1542 */ 1543 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1544 sce->ra_wb_xferlen) == 0) { 1545 usbd_free_xfer(sce->ra_wb_xfer); 1546 return (ENOMEM); 1547 } 1548 sce->ibuf = malloc(sce->ra_wb_bufsize, 1549 M_USBDEV, M_WAITOK); 1550 sce->fill = sce->cur = sce->ibuf; 1551 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1552 sce->ra_wb_used = 0; 1553 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1554 } else { 1555 /* Only turn WB off if it's currently on. */ 1556 if (!(sce->state & UGEN_BULK_WB)) 1557 return (0); 1558 1559 sce->state &= ~UGEN_BULK_WB; 1560 /* 1561 * XXX Discard whatever's in the buffer, but we 1562 * should keep it around and keep writing to 1563 * drain the buffer instead. 1564 */ 1565 usbd_abort_pipe(sce->pipeh); 1566 usbd_free_xfer(sce->ra_wb_xfer); 1567 free(sce->ibuf, M_USBDEV); 1568 sce->ibuf = NULL; 1569 } 1570 return (0); 1571 #else 1572 return (EOPNOTSUPP); 1573 #endif 1574 case USB_SET_BULK_RA_OPT: 1575 case USB_SET_BULK_WB_OPT: 1576 #ifdef UGEN_BULK_RA_WB 1577 { 1578 struct usb_bulk_ra_wb_opt *opt; 1579 1580 if (endpt == USB_CONTROL_ENDPOINT) 1581 return (EINVAL); 1582 opt = (struct usb_bulk_ra_wb_opt *)addr; 1583 if (cmd == USB_SET_BULK_RA_OPT) 1584 sce = &sc->sc_endpoints[endpt][IN]; 1585 else 1586 sce = &sc->sc_endpoints[endpt][OUT]; 1587 if (sce == NULL || sce->pipeh == NULL) 1588 return (EINVAL); 1589 if (opt->ra_wb_buffer_size < 1 || 1590 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1591 opt->ra_wb_request_size < 1 || 1592 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1593 return (EINVAL); 1594 /* 1595 * XXX These changes do not take effect until the 1596 * next time RA/WB mode is enabled but they ought to 1597 * take effect immediately. 1598 */ 1599 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1600 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1601 return (0); 1602 } 1603 #else 1604 return (EOPNOTSUPP); 1605 #endif 1606 default: 1607 break; 1608 } 1609 1610 if (endpt != USB_CONTROL_ENDPOINT) 1611 return (EINVAL); 1612 1613 switch (cmd) { 1614 #ifdef UGEN_DEBUG 1615 case USB_SETDEBUG: 1616 ugendebug = *(int *)addr; 1617 break; 1618 #endif 1619 case USB_GET_CONFIG: 1620 err = usbd_get_config(sc->sc_udev, &conf); 1621 if (err) 1622 return (EIO); 1623 *(int *)addr = conf; 1624 break; 1625 case USB_SET_CONFIG: 1626 if (!(flag & FWRITE)) 1627 return (EPERM); 1628 err = ugen_set_config(sc, *(int *)addr); 1629 switch (err) { 1630 case USBD_NORMAL_COMPLETION: 1631 break; 1632 case USBD_IN_USE: 1633 return (EBUSY); 1634 default: 1635 return (EIO); 1636 } 1637 break; 1638 case USB_GET_ALTINTERFACE: 1639 ai = (struct usb_alt_interface *)addr; 1640 err = usbd_device2interface_handle(sc->sc_udev, 1641 ai->uai_interface_index, &iface); 1642 if (err) 1643 return (EINVAL); 1644 idesc = usbd_get_interface_descriptor(iface); 1645 if (idesc == NULL) 1646 return (EIO); 1647 ai->uai_alt_no = idesc->bAlternateSetting; 1648 break; 1649 case USB_SET_ALTINTERFACE: 1650 if (!(flag & FWRITE)) 1651 return (EPERM); 1652 ai = (struct usb_alt_interface *)addr; 1653 err = usbd_device2interface_handle(sc->sc_udev, 1654 ai->uai_interface_index, &iface); 1655 if (err) 1656 return (EINVAL); 1657 err = ugen_set_interface(sc, ai->uai_interface_index, 1658 ai->uai_alt_no); 1659 if (err) 1660 return (EINVAL); 1661 break; 1662 case USB_GET_NO_ALT: 1663 ai = (struct usb_alt_interface *)addr; 1664 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0); 1665 if (cdesc == NULL) 1666 return (EINVAL); 1667 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1668 if (idesc == NULL) { 1669 free(cdesc, M_TEMP); 1670 return (EINVAL); 1671 } 1672 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1673 idesc->bInterfaceNumber); 1674 free(cdesc, M_TEMP); 1675 break; 1676 case USB_GET_DEVICE_DESC: 1677 *(usb_device_descriptor_t *)addr = 1678 *usbd_get_device_descriptor(sc->sc_udev); 1679 break; 1680 case USB_GET_CONFIG_DESC: 1681 cd = (struct usb_config_desc *)addr; 1682 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0); 1683 if (cdesc == NULL) 1684 return (EINVAL); 1685 cd->ucd_desc = *cdesc; 1686 free(cdesc, M_TEMP); 1687 break; 1688 case USB_GET_INTERFACE_DESC: 1689 id = (struct usb_interface_desc *)addr; 1690 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0); 1691 if (cdesc == NULL) 1692 return (EINVAL); 1693 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1694 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1695 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1696 else 1697 alt = id->uid_alt_index; 1698 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1699 if (idesc == NULL) { 1700 free(cdesc, M_TEMP); 1701 return (EINVAL); 1702 } 1703 id->uid_desc = *idesc; 1704 free(cdesc, M_TEMP); 1705 break; 1706 case USB_GET_ENDPOINT_DESC: 1707 ed = (struct usb_endpoint_desc *)addr; 1708 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0); 1709 if (cdesc == NULL) 1710 return (EINVAL); 1711 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1712 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1713 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1714 else 1715 alt = ed->ued_alt_index; 1716 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1717 alt, ed->ued_endpoint_index); 1718 if (edesc == NULL) { 1719 free(cdesc, M_TEMP); 1720 return (EINVAL); 1721 } 1722 ed->ued_desc = *edesc; 1723 free(cdesc, M_TEMP); 1724 break; 1725 case USB_GET_FULL_DESC: 1726 { 1727 int len; 1728 struct iovec iov; 1729 struct uio uio; 1730 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1731 int error; 1732 1733 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len); 1734 if (len > fd->ufd_size) 1735 len = fd->ufd_size; 1736 iov.iov_base = (void *)fd->ufd_data; 1737 iov.iov_len = len; 1738 uio.uio_iov = &iov; 1739 uio.uio_iovcnt = 1; 1740 uio.uio_resid = len; 1741 uio.uio_offset = 0; 1742 uio.uio_rw = UIO_READ; 1743 uio.uio_vmspace = l->l_proc->p_vmspace; 1744 error = uiomove((void *)cdesc, len, &uio); 1745 free(cdesc, M_TEMP); 1746 return (error); 1747 } 1748 case USB_GET_STRING_DESC: { 1749 int len; 1750 si = (struct usb_string_desc *)addr; 1751 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1752 si->usd_language_id, &si->usd_desc, &len); 1753 if (err) 1754 return (EINVAL); 1755 break; 1756 } 1757 case USB_DO_REQUEST: 1758 { 1759 struct usb_ctl_request *ur = (void *)addr; 1760 int len = UGETW(ur->ucr_request.wLength); 1761 struct iovec iov; 1762 struct uio uio; 1763 void *ptr = 0; 1764 usbd_status xerr; 1765 int error = 0; 1766 1767 if (!(flag & FWRITE)) 1768 return (EPERM); 1769 /* Avoid requests that would damage the bus integrity. */ 1770 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1771 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1772 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1773 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1774 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1775 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1776 return (EINVAL); 1777 1778 if (len < 0 || len > 32767) 1779 return (EINVAL); 1780 if (len != 0) { 1781 iov.iov_base = (void *)ur->ucr_data; 1782 iov.iov_len = len; 1783 uio.uio_iov = &iov; 1784 uio.uio_iovcnt = 1; 1785 uio.uio_resid = len; 1786 uio.uio_offset = 0; 1787 uio.uio_rw = 1788 ur->ucr_request.bmRequestType & UT_READ ? 1789 UIO_READ : UIO_WRITE; 1790 uio.uio_vmspace = l->l_proc->p_vmspace; 1791 ptr = malloc(len, M_TEMP, M_WAITOK); 1792 if (uio.uio_rw == UIO_WRITE) { 1793 error = uiomove(ptr, len, &uio); 1794 if (error) 1795 goto ret; 1796 } 1797 } 1798 sce = &sc->sc_endpoints[endpt][IN]; 1799 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1800 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1801 if (xerr) { 1802 error = EIO; 1803 goto ret; 1804 } 1805 if (len != 0) { 1806 if (uio.uio_rw == UIO_READ) { 1807 error = uiomove(ptr, len, &uio); 1808 if (error) 1809 goto ret; 1810 } 1811 } 1812 ret: 1813 if (ptr) 1814 free(ptr, M_TEMP); 1815 return (error); 1816 } 1817 case USB_GET_DEVICEINFO: 1818 usbd_fill_deviceinfo(sc->sc_udev, 1819 (struct usb_device_info *)addr, 0); 1820 break; 1821 #ifdef COMPAT_30 1822 case USB_GET_DEVICEINFO_OLD: 1823 usbd_fill_deviceinfo_old(sc->sc_udev, 1824 (struct usb_device_info_old *)addr, 0); 1825 1826 break; 1827 #endif 1828 default: 1829 return (EINVAL); 1830 } 1831 return (0); 1832 } 1833 1834 int 1835 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1836 { 1837 int endpt = UGENENDPOINT(dev); 1838 struct ugen_softc *sc; 1839 int error; 1840 1841 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1842 1843 sc->sc_refcnt++; 1844 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1845 if (--sc->sc_refcnt < 0) 1846 usb_detach_wakeup(USBDEV(sc->sc_dev)); 1847 return (error); 1848 } 1849 1850 int 1851 ugenpoll(dev_t dev, int events, struct lwp *l) 1852 { 1853 struct ugen_softc *sc; 1854 struct ugen_endpoint *sce_in, *sce_out; 1855 int revents = 0; 1856 int s; 1857 1858 USB_GET_SC(ugen, UGENUNIT(dev), sc); 1859 1860 if (sc->sc_dying) 1861 return (POLLHUP); 1862 1863 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1864 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1865 if (sce_in == NULL && sce_out == NULL) 1866 return (POLLERR); 1867 #ifdef DIAGNOSTIC 1868 if (!sce_in->edesc && !sce_out->edesc) { 1869 printf("ugenpoll: no edesc\n"); 1870 return (POLLERR); 1871 } 1872 /* It's possible to have only one pipe open. */ 1873 if (!sce_in->pipeh && !sce_out->pipeh) { 1874 printf("ugenpoll: no pipe\n"); 1875 return (POLLERR); 1876 } 1877 #endif 1878 s = splusb(); 1879 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1880 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1881 case UE_INTERRUPT: 1882 if (sce_in->q.c_cc > 0) 1883 revents |= events & (POLLIN | POLLRDNORM); 1884 else 1885 selrecord(l, &sce_in->rsel); 1886 break; 1887 case UE_ISOCHRONOUS: 1888 if (sce_in->cur != sce_in->fill) 1889 revents |= events & (POLLIN | POLLRDNORM); 1890 else 1891 selrecord(l, &sce_in->rsel); 1892 break; 1893 case UE_BULK: 1894 #ifdef UGEN_BULK_RA_WB 1895 if (sce_in->state & UGEN_BULK_RA) { 1896 if (sce_in->ra_wb_used > 0) 1897 revents |= events & 1898 (POLLIN | POLLRDNORM); 1899 else 1900 selrecord(l, &sce_in->rsel); 1901 break; 1902 } 1903 #endif 1904 /* 1905 * We have no easy way of determining if a read will 1906 * yield any data or a write will happen. 1907 * Pretend they will. 1908 */ 1909 revents |= events & (POLLIN | POLLRDNORM); 1910 break; 1911 default: 1912 break; 1913 } 1914 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1915 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1916 case UE_INTERRUPT: 1917 case UE_ISOCHRONOUS: 1918 /* XXX unimplemented */ 1919 break; 1920 case UE_BULK: 1921 #ifdef UGEN_BULK_RA_WB 1922 if (sce_out->state & UGEN_BULK_WB) { 1923 if (sce_out->ra_wb_used < 1924 sce_out->limit - sce_out->ibuf) 1925 revents |= events & 1926 (POLLOUT | POLLWRNORM); 1927 else 1928 selrecord(l, &sce_out->rsel); 1929 break; 1930 } 1931 #endif 1932 /* 1933 * We have no easy way of determining if a read will 1934 * yield any data or a write will happen. 1935 * Pretend they will. 1936 */ 1937 revents |= events & (POLLOUT | POLLWRNORM); 1938 break; 1939 default: 1940 break; 1941 } 1942 1943 1944 splx(s); 1945 return (revents); 1946 } 1947 1948 static void 1949 filt_ugenrdetach(struct knote *kn) 1950 { 1951 struct ugen_endpoint *sce = kn->kn_hook; 1952 int s; 1953 1954 s = splusb(); 1955 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1956 splx(s); 1957 } 1958 1959 static int 1960 filt_ugenread_intr(struct knote *kn, long hint) 1961 { 1962 struct ugen_endpoint *sce = kn->kn_hook; 1963 1964 kn->kn_data = sce->q.c_cc; 1965 return (kn->kn_data > 0); 1966 } 1967 1968 static int 1969 filt_ugenread_isoc(struct knote *kn, long hint) 1970 { 1971 struct ugen_endpoint *sce = kn->kn_hook; 1972 1973 if (sce->cur == sce->fill) 1974 return (0); 1975 1976 if (sce->cur < sce->fill) 1977 kn->kn_data = sce->fill - sce->cur; 1978 else 1979 kn->kn_data = (sce->limit - sce->cur) + 1980 (sce->fill - sce->ibuf); 1981 1982 return (1); 1983 } 1984 1985 #ifdef UGEN_BULK_RA_WB 1986 static int 1987 filt_ugenread_bulk(struct knote *kn, long hint) 1988 { 1989 struct ugen_endpoint *sce = kn->kn_hook; 1990 1991 if (!(sce->state & UGEN_BULK_RA)) 1992 /* 1993 * We have no easy way of determining if a read will 1994 * yield any data or a write will happen. 1995 * So, emulate "seltrue". 1996 */ 1997 return (filt_seltrue(kn, hint)); 1998 1999 if (sce->ra_wb_used == 0) 2000 return (0); 2001 2002 kn->kn_data = sce->ra_wb_used; 2003 2004 return (1); 2005 } 2006 2007 static int 2008 filt_ugenwrite_bulk(struct knote *kn, long hint) 2009 { 2010 struct ugen_endpoint *sce = kn->kn_hook; 2011 2012 if (!(sce->state & UGEN_BULK_WB)) 2013 /* 2014 * We have no easy way of determining if a read will 2015 * yield any data or a write will happen. 2016 * So, emulate "seltrue". 2017 */ 2018 return (filt_seltrue(kn, hint)); 2019 2020 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2021 return (0); 2022 2023 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2024 2025 return (1); 2026 } 2027 #endif 2028 2029 static const struct filterops ugenread_intr_filtops = 2030 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2031 2032 static const struct filterops ugenread_isoc_filtops = 2033 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2034 2035 #ifdef UGEN_BULK_RA_WB 2036 static const struct filterops ugenread_bulk_filtops = 2037 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2038 2039 static const struct filterops ugenwrite_bulk_filtops = 2040 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2041 #else 2042 static const struct filterops ugen_seltrue_filtops = 2043 { 1, NULL, filt_ugenrdetach, filt_seltrue }; 2044 #endif 2045 2046 int 2047 ugenkqfilter(dev_t dev, struct knote *kn) 2048 { 2049 struct ugen_softc *sc; 2050 struct ugen_endpoint *sce; 2051 struct klist *klist; 2052 int s; 2053 2054 USB_GET_SC(ugen, UGENUNIT(dev), sc); 2055 2056 if (sc->sc_dying) 2057 return (ENXIO); 2058 2059 switch (kn->kn_filter) { 2060 case EVFILT_READ: 2061 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2062 if (sce == NULL) 2063 return (EINVAL); 2064 2065 klist = &sce->rsel.sel_klist; 2066 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2067 case UE_INTERRUPT: 2068 kn->kn_fop = &ugenread_intr_filtops; 2069 break; 2070 case UE_ISOCHRONOUS: 2071 kn->kn_fop = &ugenread_isoc_filtops; 2072 break; 2073 case UE_BULK: 2074 #ifdef UGEN_BULK_RA_WB 2075 kn->kn_fop = &ugenread_bulk_filtops; 2076 break; 2077 #else 2078 /* 2079 * We have no easy way of determining if a read will 2080 * yield any data or a write will happen. 2081 * So, emulate "seltrue". 2082 */ 2083 kn->kn_fop = &ugen_seltrue_filtops; 2084 #endif 2085 break; 2086 default: 2087 return (EINVAL); 2088 } 2089 break; 2090 2091 case EVFILT_WRITE: 2092 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2093 if (sce == NULL) 2094 return (EINVAL); 2095 2096 klist = &sce->rsel.sel_klist; 2097 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2098 case UE_INTERRUPT: 2099 case UE_ISOCHRONOUS: 2100 /* XXX poll doesn't support this */ 2101 return (EINVAL); 2102 2103 case UE_BULK: 2104 #ifdef UGEN_BULK_RA_WB 2105 kn->kn_fop = &ugenwrite_bulk_filtops; 2106 #else 2107 /* 2108 * We have no easy way of determining if a read will 2109 * yield any data or a write will happen. 2110 * So, emulate "seltrue". 2111 */ 2112 kn->kn_fop = &ugen_seltrue_filtops; 2113 #endif 2114 break; 2115 default: 2116 return (EINVAL); 2117 } 2118 break; 2119 2120 default: 2121 return (EINVAL); 2122 } 2123 2124 kn->kn_hook = sce; 2125 2126 s = splusb(); 2127 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2128 splx(s); 2129 2130 return (0); 2131 } 2132 2133 #if defined(__FreeBSD__) 2134 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0); 2135 #endif 2136