1 /* $NetBSD: ugen.c,v 1.116 2011/12/23 00:51:46 jakllsch Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.116 2011/12/23 00:51:46 jakllsch Exp $"); 41 42 #include "opt_compat_netbsd.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #if defined(__NetBSD__) || defined(__OpenBSD__) 49 #include <sys/device.h> 50 #include <sys/ioctl.h> 51 #elif defined(__FreeBSD__) 52 #include <sys/module.h> 53 #include <sys/bus.h> 54 #include <sys/ioccom.h> 55 #include <sys/conf.h> 56 #include <sys/fcntl.h> 57 #include <sys/filio.h> 58 #endif 59 #include <sys/conf.h> 60 #include <sys/tty.h> 61 #include <sys/file.h> 62 #include <sys/select.h> 63 #include <sys/proc.h> 64 #include <sys/vnode.h> 65 #include <sys/poll.h> 66 67 #include <dev/usb/usb.h> 68 #include <dev/usb/usbdi.h> 69 #include <dev/usb/usbdi_util.h> 70 71 #ifdef UGEN_DEBUG 72 #define DPRINTF(x) if (ugendebug) printf x 73 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x 74 int ugendebug = 0; 75 #else 76 #define DPRINTF(x) 77 #define DPRINTFN(n,x) 78 #endif 79 80 #define UGEN_CHUNK 128 /* chunk size for read */ 81 #define UGEN_IBSIZE 1020 /* buffer size */ 82 #define UGEN_BBSIZE 1024 83 84 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */ 85 #define UGEN_NISORFRMS 8 /* number of transactions per req */ 86 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS) 87 88 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 89 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 90 91 struct ugen_endpoint { 92 struct ugen_softc *sc; 93 usb_endpoint_descriptor_t *edesc; 94 usbd_interface_handle iface; 95 int state; 96 #define UGEN_ASLP 0x02 /* waiting for data */ 97 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 98 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 99 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 100 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 101 usbd_pipe_handle pipeh; 102 struct clist q; 103 struct selinfo rsel; 104 u_char *ibuf; /* start of buffer (circular for isoc) */ 105 u_char *fill; /* location for input (isoc) */ 106 u_char *limit; /* end of circular buffer (isoc) */ 107 u_char *cur; /* current read location (isoc) */ 108 u_int32_t timeout; 109 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 110 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 111 u_int32_t ra_wb_used; /* how much is in buffer */ 112 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 113 usbd_xfer_handle ra_wb_xfer; 114 struct isoreq { 115 struct ugen_endpoint *sce; 116 usbd_xfer_handle xfer; 117 void *dmabuf; 118 u_int16_t sizes[UGEN_NISORFRMS]; 119 } isoreqs[UGEN_NISOREQS]; 120 }; 121 122 struct ugen_softc { 123 device_t sc_dev; /* base device */ 124 usbd_device_handle sc_udev; 125 126 char sc_is_open[USB_MAX_ENDPOINTS]; 127 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 128 #define OUT 0 129 #define IN 1 130 131 int sc_refcnt; 132 char sc_buffer[UGEN_BBSIZE]; 133 u_char sc_dying; 134 }; 135 136 #if defined(__NetBSD__) 137 dev_type_open(ugenopen); 138 dev_type_close(ugenclose); 139 dev_type_read(ugenread); 140 dev_type_write(ugenwrite); 141 dev_type_ioctl(ugenioctl); 142 dev_type_poll(ugenpoll); 143 dev_type_kqfilter(ugenkqfilter); 144 145 const struct cdevsw ugen_cdevsw = { 146 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl, 147 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER, 148 }; 149 #elif defined(__OpenBSD__) 150 cdev_decl(ugen); 151 #elif defined(__FreeBSD__) 152 d_open_t ugenopen; 153 d_close_t ugenclose; 154 d_read_t ugenread; 155 d_write_t ugenwrite; 156 d_ioctl_t ugenioctl; 157 d_poll_t ugenpoll; 158 159 #define UGEN_CDEV_MAJOR 114 160 161 Static struct cdevsw ugen_cdevsw = { 162 /* open */ ugenopen, 163 /* close */ ugenclose, 164 /* read */ ugenread, 165 /* write */ ugenwrite, 166 /* ioctl */ ugenioctl, 167 /* poll */ ugenpoll, 168 /* mmap */ nommap, 169 /* strategy */ nostrategy, 170 /* name */ "ugen", 171 /* maj */ UGEN_CDEV_MAJOR, 172 /* dump */ nodump, 173 /* psize */ nopsize, 174 /* flags */ 0, 175 /* bmaj */ -1 176 }; 177 #endif 178 179 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, 180 usbd_status status); 181 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 182 usbd_status status); 183 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 184 usbd_status status); 185 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 186 usbd_status status); 187 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 188 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 189 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 190 void *, int, struct lwp *); 191 Static int ugen_set_config(struct ugen_softc *sc, int configno); 192 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc, 193 int index, int *lenp); 194 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 195 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx); 196 197 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 198 #define UGENENDPOINT(n) (minor(n) & 0xf) 199 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 200 201 int ugen_match(device_t, cfdata_t, void *); 202 void ugen_attach(device_t, device_t, void *); 203 int ugen_detach(device_t, int); 204 int ugen_activate(device_t, enum devact); 205 extern struct cfdriver ugen_cd; 206 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate); 207 208 /* toggle to control attach priority. -1 means "let autoconf decide" */ 209 int ugen_override = -1; 210 211 int 212 ugen_match(device_t parent, cfdata_t match, void *aux) 213 { 214 struct usb_attach_arg *uaa = aux; 215 int override; 216 217 if (ugen_override != -1) 218 override = ugen_override; 219 else 220 override = match->cf_flags & 1; 221 222 if (override) 223 return (UMATCH_HIGHEST); 224 else if (uaa->usegeneric) 225 return (UMATCH_GENERIC); 226 else 227 return (UMATCH_NONE); 228 } 229 230 void 231 ugen_attach(device_t parent, device_t self, void *aux) 232 { 233 struct ugen_softc *sc = device_private(self); 234 struct usb_attach_arg *uaa = aux; 235 usbd_device_handle udev; 236 char *devinfop; 237 usbd_status err; 238 int i, dir, conf; 239 240 aprint_naive("\n"); 241 aprint_normal("\n"); 242 243 devinfop = usbd_devinfo_alloc(uaa->device, 0); 244 aprint_normal_dev(self, "%s\n", devinfop); 245 usbd_devinfo_free(devinfop); 246 247 sc->sc_dev = self; 248 sc->sc_udev = udev = uaa->device; 249 250 /* First set configuration index 0, the default one for ugen. */ 251 err = usbd_set_config_index(udev, 0, 0); 252 if (err) { 253 aprint_error_dev(self, 254 "setting configuration index 0 failed\n"); 255 sc->sc_dying = 1; 256 return; 257 } 258 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 259 260 /* Set up all the local state for this configuration. */ 261 err = ugen_set_config(sc, conf); 262 if (err) { 263 aprint_error_dev(self, "setting configuration %d failed\n", 264 conf); 265 sc->sc_dying = 1; 266 return; 267 } 268 269 #ifdef __FreeBSD__ 270 { 271 static int global_init_done = 0; 272 if (!global_init_done) { 273 cdevsw_add(&ugen_cdevsw); 274 global_init_done = 1; 275 } 276 } 277 #endif 278 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 279 for (dir = OUT; dir <= IN; dir++) { 280 struct ugen_endpoint *sce; 281 282 sce = &sc->sc_endpoints[i][dir]; 283 selinit(&sce->rsel); 284 } 285 } 286 287 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 288 sc->sc_dev); 289 290 if (!pmf_device_register(self, NULL, NULL)) 291 aprint_error_dev(self, "couldn't establish power handler\n"); 292 293 return; 294 } 295 296 Static int 297 ugen_set_config(struct ugen_softc *sc, int configno) 298 { 299 usbd_device_handle dev = sc->sc_udev; 300 usb_config_descriptor_t *cdesc; 301 usbd_interface_handle iface; 302 usb_endpoint_descriptor_t *ed; 303 struct ugen_endpoint *sce; 304 u_int8_t niface, nendpt; 305 int ifaceno, endptno, endpt; 306 usbd_status err; 307 int dir; 308 309 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 310 device_xname(sc->sc_dev), configno, sc)); 311 312 /* 313 * We start at 1, not 0, because we don't care whether the 314 * control endpoint is open or not. It is always present. 315 */ 316 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 317 if (sc->sc_is_open[endptno]) { 318 DPRINTFN(1, 319 ("ugen_set_config: %s - endpoint %d is open\n", 320 device_xname(sc->sc_dev), endptno)); 321 return (USBD_IN_USE); 322 } 323 324 /* Avoid setting the current value. */ 325 cdesc = usbd_get_config_descriptor(dev); 326 if (!cdesc || cdesc->bConfigurationValue != configno) { 327 err = usbd_set_config_no(dev, configno, 1); 328 if (err) 329 return (err); 330 } 331 332 err = usbd_interface_count(dev, &niface); 333 if (err) 334 return (err); 335 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints); 336 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 337 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 338 err = usbd_device2interface_handle(dev, ifaceno, &iface); 339 if (err) 340 return (err); 341 err = usbd_endpoint_count(iface, &nendpt); 342 if (err) 343 return (err); 344 for (endptno = 0; endptno < nendpt; endptno++) { 345 ed = usbd_interface2endpoint_descriptor(iface,endptno); 346 KASSERT(ed != NULL); 347 endpt = ed->bEndpointAddress; 348 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 349 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 350 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 351 "(%d,%d), sce=%p\n", 352 endptno, endpt, UE_GET_ADDR(endpt), 353 UE_GET_DIR(endpt), sce)); 354 sce->sc = sc; 355 sce->edesc = ed; 356 sce->iface = iface; 357 } 358 } 359 return (USBD_NORMAL_COMPLETION); 360 } 361 362 int 363 ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 364 { 365 struct ugen_softc *sc; 366 int unit = UGENUNIT(dev); 367 int endpt = UGENENDPOINT(dev); 368 usb_endpoint_descriptor_t *edesc; 369 struct ugen_endpoint *sce; 370 int dir, isize; 371 usbd_status err; 372 usbd_xfer_handle xfer; 373 void *tbuf; 374 int i, j; 375 376 sc = device_lookup_private(&ugen_cd, unit); 377 if (sc == NULL) 378 return ENXIO; 379 380 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 381 flag, mode, unit, endpt)); 382 383 if (sc == NULL || sc->sc_dying) 384 return (ENXIO); 385 386 /* The control endpoint allows multiple opens. */ 387 if (endpt == USB_CONTROL_ENDPOINT) { 388 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 389 return (0); 390 } 391 392 if (sc->sc_is_open[endpt]) 393 return (EBUSY); 394 395 /* Make sure there are pipes for all directions. */ 396 for (dir = OUT; dir <= IN; dir++) { 397 if (flag & (dir == OUT ? FWRITE : FREAD)) { 398 sce = &sc->sc_endpoints[endpt][dir]; 399 if (sce == 0 || sce->edesc == 0) 400 return (ENXIO); 401 } 402 } 403 404 /* Actually open the pipes. */ 405 /* XXX Should back out properly if it fails. */ 406 for (dir = OUT; dir <= IN; dir++) { 407 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 408 continue; 409 sce = &sc->sc_endpoints[endpt][dir]; 410 sce->state = 0; 411 sce->timeout = USBD_NO_TIMEOUT; 412 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 413 sc, endpt, dir, sce)); 414 edesc = sce->edesc; 415 switch (edesc->bmAttributes & UE_XFERTYPE) { 416 case UE_INTERRUPT: 417 if (dir == OUT) { 418 err = usbd_open_pipe(sce->iface, 419 edesc->bEndpointAddress, 0, &sce->pipeh); 420 if (err) 421 return (EIO); 422 break; 423 } 424 isize = UGETW(edesc->wMaxPacketSize); 425 if (isize == 0) /* shouldn't happen */ 426 return (EINVAL); 427 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); 428 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 429 endpt, isize)); 430 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) 431 return (ENOMEM); 432 err = usbd_open_pipe_intr(sce->iface, 433 edesc->bEndpointAddress, 434 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 435 sce->ibuf, isize, ugenintr, 436 USBD_DEFAULT_INTERVAL); 437 if (err) { 438 free(sce->ibuf, M_USBDEV); 439 clfree(&sce->q); 440 return (EIO); 441 } 442 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 443 break; 444 case UE_BULK: 445 err = usbd_open_pipe(sce->iface, 446 edesc->bEndpointAddress, 0, &sce->pipeh); 447 if (err) 448 return (EIO); 449 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 450 /* 451 * Use request size for non-RA/WB transfers 452 * as the default. 453 */ 454 sce->ra_wb_reqsize = UGEN_BBSIZE; 455 break; 456 case UE_ISOCHRONOUS: 457 if (dir == OUT) 458 return (EINVAL); 459 isize = UGETW(edesc->wMaxPacketSize); 460 if (isize == 0) /* shouldn't happen */ 461 return (EINVAL); 462 sce->ibuf = malloc(isize * UGEN_NISOFRAMES, 463 M_USBDEV, M_WAITOK); 464 sce->cur = sce->fill = sce->ibuf; 465 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 466 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 467 endpt, isize)); 468 err = usbd_open_pipe(sce->iface, 469 edesc->bEndpointAddress, 0, &sce->pipeh); 470 if (err) { 471 free(sce->ibuf, M_USBDEV); 472 return (EIO); 473 } 474 for(i = 0; i < UGEN_NISOREQS; ++i) { 475 sce->isoreqs[i].sce = sce; 476 xfer = usbd_alloc_xfer(sc->sc_udev); 477 if (xfer == 0) 478 goto bad; 479 sce->isoreqs[i].xfer = xfer; 480 tbuf = usbd_alloc_buffer 481 (xfer, isize * UGEN_NISORFRMS); 482 if (tbuf == 0) { 483 i++; 484 goto bad; 485 } 486 sce->isoreqs[i].dmabuf = tbuf; 487 for(j = 0; j < UGEN_NISORFRMS; ++j) 488 sce->isoreqs[i].sizes[j] = isize; 489 usbd_setup_isoc_xfer 490 (xfer, sce->pipeh, &sce->isoreqs[i], 491 sce->isoreqs[i].sizes, 492 UGEN_NISORFRMS, USBD_NO_COPY, 493 ugen_isoc_rintr); 494 (void)usbd_transfer(xfer); 495 } 496 DPRINTFN(5, ("ugenopen: isoc open done\n")); 497 break; 498 bad: 499 while (--i >= 0) /* implicit buffer free */ 500 usbd_free_xfer(sce->isoreqs[i].xfer); 501 return (ENOMEM); 502 case UE_CONTROL: 503 sce->timeout = USBD_DEFAULT_TIMEOUT; 504 return (EINVAL); 505 } 506 } 507 sc->sc_is_open[endpt] = 1; 508 return (0); 509 } 510 511 int 512 ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 513 { 514 int endpt = UGENENDPOINT(dev); 515 struct ugen_softc *sc; 516 struct ugen_endpoint *sce; 517 int dir; 518 int i; 519 520 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 521 if (sc == NULL) 522 return ENXIO; 523 524 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 525 flag, mode, UGENUNIT(dev), endpt)); 526 527 #ifdef DIAGNOSTIC 528 if (!sc->sc_is_open[endpt]) { 529 printf("ugenclose: not open\n"); 530 return (EINVAL); 531 } 532 #endif 533 534 if (endpt == USB_CONTROL_ENDPOINT) { 535 DPRINTFN(5, ("ugenclose: close control\n")); 536 sc->sc_is_open[endpt] = 0; 537 return (0); 538 } 539 540 for (dir = OUT; dir <= IN; dir++) { 541 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 542 continue; 543 sce = &sc->sc_endpoints[endpt][dir]; 544 if (sce == NULL || sce->pipeh == NULL) 545 continue; 546 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 547 endpt, dir, sce)); 548 549 usbd_abort_pipe(sce->pipeh); 550 usbd_close_pipe(sce->pipeh); 551 sce->pipeh = NULL; 552 553 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 554 case UE_INTERRUPT: 555 ndflush(&sce->q, sce->q.c_cc); 556 clfree(&sce->q); 557 break; 558 case UE_ISOCHRONOUS: 559 for (i = 0; i < UGEN_NISOREQS; ++i) 560 usbd_free_xfer(sce->isoreqs[i].xfer); 561 break; 562 case UE_BULK: 563 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) 564 /* ibuf freed below */ 565 usbd_free_xfer(sce->ra_wb_xfer); 566 break; 567 default: 568 break; 569 } 570 571 if (sce->ibuf != NULL) { 572 free(sce->ibuf, M_USBDEV); 573 sce->ibuf = NULL; 574 } 575 } 576 sc->sc_is_open[endpt] = 0; 577 578 return (0); 579 } 580 581 Static int 582 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 583 { 584 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 585 u_int32_t n, tn; 586 usbd_xfer_handle xfer; 587 usbd_status err; 588 int s; 589 int error = 0; 590 591 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt)); 592 593 if (sc->sc_dying) 594 return (EIO); 595 596 if (endpt == USB_CONTROL_ENDPOINT) 597 return (ENODEV); 598 599 #ifdef DIAGNOSTIC 600 if (sce->edesc == NULL) { 601 printf("ugenread: no edesc\n"); 602 return (EIO); 603 } 604 if (sce->pipeh == NULL) { 605 printf("ugenread: no pipe\n"); 606 return (EIO); 607 } 608 #endif 609 610 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 611 case UE_INTERRUPT: 612 /* Block until activity occurred. */ 613 s = splusb(); 614 while (sce->q.c_cc == 0) { 615 if (flag & IO_NDELAY) { 616 splx(s); 617 return (EWOULDBLOCK); 618 } 619 sce->state |= UGEN_ASLP; 620 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 621 error = tsleep(sce, PZERO | PCATCH, "ugenri", mstohz(sce->timeout)); 622 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 623 if (sc->sc_dying) 624 error = EIO; 625 if (error) { 626 sce->state &= ~UGEN_ASLP; 627 break; 628 } 629 } 630 splx(s); 631 632 /* Transfer as many chunks as possible. */ 633 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 634 n = min(sce->q.c_cc, uio->uio_resid); 635 if (n > sizeof(sc->sc_buffer)) 636 n = sizeof(sc->sc_buffer); 637 638 /* Remove a small chunk from the input queue. */ 639 q_to_b(&sce->q, sc->sc_buffer, n); 640 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 641 642 /* Copy the data to the user process. */ 643 error = uiomove(sc->sc_buffer, n, uio); 644 if (error) 645 break; 646 } 647 break; 648 case UE_BULK: 649 if (sce->state & UGEN_BULK_RA) { 650 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 651 uio->uio_resid, sce->ra_wb_used)); 652 xfer = sce->ra_wb_xfer; 653 654 s = splusb(); 655 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 656 splx(s); 657 return (EWOULDBLOCK); 658 } 659 while (uio->uio_resid > 0 && !error) { 660 while (sce->ra_wb_used == 0) { 661 sce->state |= UGEN_ASLP; 662 DPRINTFN(5, 663 ("ugenread: sleep on %p\n", 664 sce)); 665 error = tsleep(sce, PZERO | PCATCH, 666 "ugenrb", mstohz(sce->timeout)); 667 DPRINTFN(5, 668 ("ugenread: woke, error=%d\n", 669 error)); 670 if (sc->sc_dying) 671 error = EIO; 672 if (error) { 673 sce->state &= ~UGEN_ASLP; 674 break; 675 } 676 } 677 678 /* Copy data to the process. */ 679 while (uio->uio_resid > 0 680 && sce->ra_wb_used > 0) { 681 n = min(uio->uio_resid, 682 sce->ra_wb_used); 683 n = min(n, sce->limit - sce->cur); 684 error = uiomove(sce->cur, n, uio); 685 if (error) 686 break; 687 sce->cur += n; 688 sce->ra_wb_used -= n; 689 if (sce->cur == sce->limit) 690 sce->cur = sce->ibuf; 691 } 692 693 /* 694 * If the transfers stopped because the 695 * buffer was full, restart them. 696 */ 697 if (sce->state & UGEN_RA_WB_STOP && 698 sce->ra_wb_used < sce->limit - sce->ibuf) { 699 n = (sce->limit - sce->ibuf) 700 - sce->ra_wb_used; 701 usbd_setup_xfer(xfer, 702 sce->pipeh, sce, NULL, 703 min(n, sce->ra_wb_xferlen), 704 USBD_NO_COPY, USBD_NO_TIMEOUT, 705 ugen_bulkra_intr); 706 sce->state &= ~UGEN_RA_WB_STOP; 707 err = usbd_transfer(xfer); 708 if (err != USBD_IN_PROGRESS) 709 /* 710 * The transfer has not been 711 * queued. Setting STOP 712 * will make us try 713 * again at the next read. 714 */ 715 sce->state |= UGEN_RA_WB_STOP; 716 } 717 } 718 splx(s); 719 break; 720 } 721 xfer = usbd_alloc_xfer(sc->sc_udev); 722 if (xfer == 0) 723 return (ENOMEM); 724 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 725 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 726 tn = n; 727 err = usbd_bulk_transfer( 728 xfer, sce->pipeh, 729 sce->state & UGEN_SHORT_OK ? 730 USBD_SHORT_XFER_OK : 0, 731 sce->timeout, sc->sc_buffer, &tn, "ugenrb"); 732 if (err) { 733 if (err == USBD_INTERRUPTED) 734 error = EINTR; 735 else if (err == USBD_TIMEOUT) 736 error = ETIMEDOUT; 737 else 738 error = EIO; 739 break; 740 } 741 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 742 error = uiomove(sc->sc_buffer, tn, uio); 743 if (error || tn < n) 744 break; 745 } 746 usbd_free_xfer(xfer); 747 break; 748 case UE_ISOCHRONOUS: 749 s = splusb(); 750 while (sce->cur == sce->fill) { 751 if (flag & IO_NDELAY) { 752 splx(s); 753 return (EWOULDBLOCK); 754 } 755 sce->state |= UGEN_ASLP; 756 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 757 error = tsleep(sce, PZERO | PCATCH, "ugenri", mstohz(sce->timeout)); 758 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 759 if (sc->sc_dying) 760 error = EIO; 761 if (error) { 762 sce->state &= ~UGEN_ASLP; 763 break; 764 } 765 } 766 767 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 768 if(sce->fill > sce->cur) 769 n = min(sce->fill - sce->cur, uio->uio_resid); 770 else 771 n = min(sce->limit - sce->cur, uio->uio_resid); 772 773 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 774 775 /* Copy the data to the user process. */ 776 error = uiomove(sce->cur, n, uio); 777 if (error) 778 break; 779 sce->cur += n; 780 if(sce->cur >= sce->limit) 781 sce->cur = sce->ibuf; 782 } 783 splx(s); 784 break; 785 786 787 default: 788 return (ENXIO); 789 } 790 return (error); 791 } 792 793 int 794 ugenread(dev_t dev, struct uio *uio, int flag) 795 { 796 int endpt = UGENENDPOINT(dev); 797 struct ugen_softc *sc; 798 int error; 799 800 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 801 if (sc == NULL) 802 return ENXIO; 803 804 sc->sc_refcnt++; 805 error = ugen_do_read(sc, endpt, uio, flag); 806 if (--sc->sc_refcnt < 0) 807 usb_detach_wakeup(sc->sc_dev); 808 return (error); 809 } 810 811 Static int 812 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 813 int flag) 814 { 815 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 816 u_int32_t n; 817 int error = 0; 818 int s; 819 u_int32_t tn; 820 char *dbuf; 821 usbd_xfer_handle xfer; 822 usbd_status err; 823 824 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt)); 825 826 if (sc->sc_dying) 827 return (EIO); 828 829 if (endpt == USB_CONTROL_ENDPOINT) 830 return (ENODEV); 831 832 #ifdef DIAGNOSTIC 833 if (sce->edesc == NULL) { 834 printf("ugenwrite: no edesc\n"); 835 return (EIO); 836 } 837 if (sce->pipeh == NULL) { 838 printf("ugenwrite: no pipe\n"); 839 return (EIO); 840 } 841 #endif 842 843 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 844 case UE_BULK: 845 if (sce->state & UGEN_BULK_WB) { 846 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 847 uio->uio_resid, sce->ra_wb_used)); 848 xfer = sce->ra_wb_xfer; 849 850 s = splusb(); 851 if (sce->ra_wb_used == sce->limit - sce->ibuf && 852 flag & IO_NDELAY) { 853 splx(s); 854 return (EWOULDBLOCK); 855 } 856 while (uio->uio_resid > 0 && !error) { 857 while (sce->ra_wb_used == 858 sce->limit - sce->ibuf) { 859 sce->state |= UGEN_ASLP; 860 DPRINTFN(5, 861 ("ugenwrite: sleep on %p\n", 862 sce)); 863 error = tsleep(sce, PZERO | PCATCH, 864 "ugenwb", mstohz(sce->timeout)); 865 DPRINTFN(5, 866 ("ugenwrite: woke, error=%d\n", 867 error)); 868 if (sc->sc_dying) 869 error = EIO; 870 if (error) { 871 sce->state &= ~UGEN_ASLP; 872 break; 873 } 874 } 875 876 /* Copy data from the process. */ 877 while (uio->uio_resid > 0 && 878 sce->ra_wb_used < sce->limit - sce->ibuf) { 879 n = min(uio->uio_resid, 880 (sce->limit - sce->ibuf) 881 - sce->ra_wb_used); 882 n = min(n, sce->limit - sce->fill); 883 error = uiomove(sce->fill, n, uio); 884 if (error) 885 break; 886 sce->fill += n; 887 sce->ra_wb_used += n; 888 if (sce->fill == sce->limit) 889 sce->fill = sce->ibuf; 890 } 891 892 /* 893 * If the transfers stopped because the 894 * buffer was empty, restart them. 895 */ 896 if (sce->state & UGEN_RA_WB_STOP && 897 sce->ra_wb_used > 0) { 898 dbuf = (char *)usbd_get_buffer(xfer); 899 n = min(sce->ra_wb_used, 900 sce->ra_wb_xferlen); 901 tn = min(n, sce->limit - sce->cur); 902 memcpy(dbuf, sce->cur, tn); 903 dbuf += tn; 904 if (n - tn > 0) 905 memcpy(dbuf, sce->ibuf, 906 n - tn); 907 usbd_setup_xfer(xfer, 908 sce->pipeh, sce, NULL, n, 909 USBD_NO_COPY, USBD_NO_TIMEOUT, 910 ugen_bulkwb_intr); 911 sce->state &= ~UGEN_RA_WB_STOP; 912 err = usbd_transfer(xfer); 913 if (err != USBD_IN_PROGRESS) 914 /* 915 * The transfer has not been 916 * queued. Setting STOP 917 * will make us try again 918 * at the next read. 919 */ 920 sce->state |= UGEN_RA_WB_STOP; 921 } 922 } 923 splx(s); 924 break; 925 } 926 xfer = usbd_alloc_xfer(sc->sc_udev); 927 if (xfer == 0) 928 return (EIO); 929 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 930 error = uiomove(sc->sc_buffer, n, uio); 931 if (error) 932 break; 933 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 934 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, 935 sce->timeout, sc->sc_buffer, &n,"ugenwb"); 936 if (err) { 937 if (err == USBD_INTERRUPTED) 938 error = EINTR; 939 else if (err == USBD_TIMEOUT) 940 error = ETIMEDOUT; 941 else 942 error = EIO; 943 break; 944 } 945 } 946 usbd_free_xfer(xfer); 947 break; 948 case UE_INTERRUPT: 949 xfer = usbd_alloc_xfer(sc->sc_udev); 950 if (xfer == 0) 951 return (EIO); 952 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 953 uio->uio_resid)) != 0) { 954 error = uiomove(sc->sc_buffer, n, uio); 955 if (error) 956 break; 957 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 958 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 959 sce->timeout, sc->sc_buffer, &n, "ugenwi"); 960 if (err) { 961 if (err == USBD_INTERRUPTED) 962 error = EINTR; 963 else if (err == USBD_TIMEOUT) 964 error = ETIMEDOUT; 965 else 966 error = EIO; 967 break; 968 } 969 } 970 usbd_free_xfer(xfer); 971 break; 972 default: 973 return (ENXIO); 974 } 975 return (error); 976 } 977 978 int 979 ugenwrite(dev_t dev, struct uio *uio, int flag) 980 { 981 int endpt = UGENENDPOINT(dev); 982 struct ugen_softc *sc; 983 int error; 984 985 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 986 if (sc == NULL) 987 return ENXIO; 988 989 sc->sc_refcnt++; 990 error = ugen_do_write(sc, endpt, uio, flag); 991 if (--sc->sc_refcnt < 0) 992 usb_detach_wakeup(sc->sc_dev); 993 return (error); 994 } 995 996 #if defined(__NetBSD__) || defined(__OpenBSD__) 997 int 998 ugen_activate(device_t self, enum devact act) 999 { 1000 struct ugen_softc *sc = device_private(self); 1001 1002 switch (act) { 1003 case DVACT_DEACTIVATE: 1004 sc->sc_dying = 1; 1005 return 0; 1006 default: 1007 return EOPNOTSUPP; 1008 } 1009 } 1010 #endif 1011 1012 int 1013 ugen_detach(device_t self, int flags) 1014 { 1015 struct ugen_softc *sc = device_private(self); 1016 struct ugen_endpoint *sce; 1017 int i, dir; 1018 int s; 1019 #if defined(__NetBSD__) || defined(__OpenBSD__) 1020 int maj, mn; 1021 1022 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1023 #elif defined(__FreeBSD__) 1024 DPRINTF(("ugen_detach: sc=%p\n", sc)); 1025 #endif 1026 1027 sc->sc_dying = 1; 1028 pmf_device_deregister(self); 1029 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1030 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1031 for (dir = OUT; dir <= IN; dir++) { 1032 sce = &sc->sc_endpoints[i][dir]; 1033 if (sce && sce->pipeh) 1034 usbd_abort_pipe(sce->pipeh); 1035 } 1036 } 1037 1038 s = splusb(); 1039 if (--sc->sc_refcnt >= 0) { 1040 /* Wake everyone */ 1041 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1042 wakeup(&sc->sc_endpoints[i][IN]); 1043 /* Wait for processes to go away. */ 1044 usb_detach_wait(sc->sc_dev); 1045 } 1046 splx(s); 1047 1048 #if defined(__NetBSD__) || defined(__OpenBSD__) 1049 /* locate the major number */ 1050 #if defined(__NetBSD__) 1051 maj = cdevsw_lookup_major(&ugen_cdevsw); 1052 #elif defined(__OpenBSD__) 1053 for (maj = 0; maj < nchrdev; maj++) 1054 if (cdevsw[maj].d_open == ugenopen) 1055 break; 1056 #endif 1057 1058 /* Nuke the vnodes for any open instances (calls close). */ 1059 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1060 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1061 #elif defined(__FreeBSD__) 1062 /* XXX not implemented yet */ 1063 #endif 1064 1065 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 1066 sc->sc_dev); 1067 1068 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1069 for (dir = OUT; dir <= IN; dir++) { 1070 sce = &sc->sc_endpoints[i][dir]; 1071 seldestroy(&sce->rsel); 1072 } 1073 } 1074 1075 return (0); 1076 } 1077 1078 Static void 1079 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status) 1080 { 1081 struct ugen_endpoint *sce = addr; 1082 /*struct ugen_softc *sc = sce->sc;*/ 1083 u_int32_t count; 1084 u_char *ibuf; 1085 1086 if (status == USBD_CANCELLED) 1087 return; 1088 1089 if (status != USBD_NORMAL_COMPLETION) { 1090 DPRINTF(("ugenintr: status=%d\n", status)); 1091 if (status == USBD_STALLED) 1092 usbd_clear_endpoint_stall_async(sce->pipeh); 1093 return; 1094 } 1095 1096 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1097 ibuf = sce->ibuf; 1098 1099 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1100 xfer, status, count)); 1101 DPRINTFN(5, (" data = %02x %02x %02x\n", 1102 ibuf[0], ibuf[1], ibuf[2])); 1103 1104 (void)b_to_q(ibuf, count, &sce->q); 1105 1106 if (sce->state & UGEN_ASLP) { 1107 sce->state &= ~UGEN_ASLP; 1108 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1109 wakeup(sce); 1110 } 1111 selnotify(&sce->rsel, 0, 0); 1112 } 1113 1114 Static void 1115 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr, 1116 usbd_status status) 1117 { 1118 struct isoreq *req = addr; 1119 struct ugen_endpoint *sce = req->sce; 1120 u_int32_t count, n; 1121 int i, isize; 1122 1123 /* Return if we are aborting. */ 1124 if (status == USBD_CANCELLED) 1125 return; 1126 1127 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1128 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1129 (long)(req - sce->isoreqs), count)); 1130 1131 /* throw away oldest input if the buffer is full */ 1132 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1133 sce->cur += count; 1134 if(sce->cur >= sce->limit) 1135 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1136 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1137 count)); 1138 } 1139 1140 isize = UGETW(sce->edesc->wMaxPacketSize); 1141 for (i = 0; i < UGEN_NISORFRMS; i++) { 1142 u_int32_t actlen = req->sizes[i]; 1143 char const *tbuf = (char const *)req->dmabuf + isize * i; 1144 1145 /* copy data to buffer */ 1146 while (actlen > 0) { 1147 n = min(actlen, sce->limit - sce->fill); 1148 memcpy(sce->fill, tbuf, n); 1149 1150 tbuf += n; 1151 actlen -= n; 1152 sce->fill += n; 1153 if(sce->fill == sce->limit) 1154 sce->fill = sce->ibuf; 1155 } 1156 1157 /* setup size for next transfer */ 1158 req->sizes[i] = isize; 1159 } 1160 1161 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS, 1162 USBD_NO_COPY, ugen_isoc_rintr); 1163 (void)usbd_transfer(xfer); 1164 1165 if (sce->state & UGEN_ASLP) { 1166 sce->state &= ~UGEN_ASLP; 1167 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1168 wakeup(sce); 1169 } 1170 selnotify(&sce->rsel, 0, 0); 1171 } 1172 1173 Static void 1174 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1175 usbd_status status) 1176 { 1177 struct ugen_endpoint *sce = addr; 1178 u_int32_t count, n; 1179 char const *tbuf; 1180 usbd_status err; 1181 1182 /* Return if we are aborting. */ 1183 if (status == USBD_CANCELLED) 1184 return; 1185 1186 if (status != USBD_NORMAL_COMPLETION) { 1187 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1188 sce->state |= UGEN_RA_WB_STOP; 1189 if (status == USBD_STALLED) 1190 usbd_clear_endpoint_stall_async(sce->pipeh); 1191 return; 1192 } 1193 1194 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1195 1196 /* Keep track of how much is in the buffer. */ 1197 sce->ra_wb_used += count; 1198 1199 /* Copy data to buffer. */ 1200 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1201 n = min(count, sce->limit - sce->fill); 1202 memcpy(sce->fill, tbuf, n); 1203 tbuf += n; 1204 count -= n; 1205 sce->fill += n; 1206 if (sce->fill == sce->limit) 1207 sce->fill = sce->ibuf; 1208 if (count > 0) { 1209 memcpy(sce->fill, tbuf, count); 1210 sce->fill += count; 1211 } 1212 1213 /* Set up the next request if necessary. */ 1214 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1215 if (n > 0) { 1216 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1217 min(n, sce->ra_wb_xferlen), USBD_NO_COPY, 1218 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1219 err = usbd_transfer(xfer); 1220 if (err != USBD_IN_PROGRESS) { 1221 printf("usbd_bulkra_intr: error=%d\n", err); 1222 /* 1223 * The transfer has not been queued. Setting STOP 1224 * will make us try again at the next read. 1225 */ 1226 sce->state |= UGEN_RA_WB_STOP; 1227 } 1228 } 1229 else 1230 sce->state |= UGEN_RA_WB_STOP; 1231 1232 if (sce->state & UGEN_ASLP) { 1233 sce->state &= ~UGEN_ASLP; 1234 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1235 wakeup(sce); 1236 } 1237 selnotify(&sce->rsel, 0, 0); 1238 } 1239 1240 Static void 1241 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr, 1242 usbd_status status) 1243 { 1244 struct ugen_endpoint *sce = addr; 1245 u_int32_t count, n; 1246 char *tbuf; 1247 usbd_status err; 1248 1249 /* Return if we are aborting. */ 1250 if (status == USBD_CANCELLED) 1251 return; 1252 1253 if (status != USBD_NORMAL_COMPLETION) { 1254 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1255 sce->state |= UGEN_RA_WB_STOP; 1256 if (status == USBD_STALLED) 1257 usbd_clear_endpoint_stall_async(sce->pipeh); 1258 return; 1259 } 1260 1261 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1262 1263 /* Keep track of how much is in the buffer. */ 1264 sce->ra_wb_used -= count; 1265 1266 /* Update buffer pointers. */ 1267 sce->cur += count; 1268 if (sce->cur >= sce->limit) 1269 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1270 1271 /* Set up next request if necessary. */ 1272 if (sce->ra_wb_used > 0) { 1273 /* copy data from buffer */ 1274 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1275 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1276 n = min(count, sce->limit - sce->cur); 1277 memcpy(tbuf, sce->cur, n); 1278 tbuf += n; 1279 if (count - n > 0) 1280 memcpy(tbuf, sce->ibuf, count - n); 1281 1282 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL, 1283 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr); 1284 err = usbd_transfer(xfer); 1285 if (err != USBD_IN_PROGRESS) { 1286 printf("usbd_bulkwb_intr: error=%d\n", err); 1287 /* 1288 * The transfer has not been queued. Setting STOP 1289 * will make us try again at the next write. 1290 */ 1291 sce->state |= UGEN_RA_WB_STOP; 1292 } 1293 } 1294 else 1295 sce->state |= UGEN_RA_WB_STOP; 1296 1297 if (sce->state & UGEN_ASLP) { 1298 sce->state &= ~UGEN_ASLP; 1299 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1300 wakeup(sce); 1301 } 1302 selnotify(&sce->rsel, 0, 0); 1303 } 1304 1305 Static usbd_status 1306 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1307 { 1308 usbd_interface_handle iface; 1309 usb_endpoint_descriptor_t *ed; 1310 usbd_status err; 1311 struct ugen_endpoint *sce; 1312 u_int8_t niface, nendpt, endptno, endpt; 1313 int dir; 1314 1315 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1316 1317 err = usbd_interface_count(sc->sc_udev, &niface); 1318 if (err) 1319 return (err); 1320 if (ifaceidx < 0 || ifaceidx >= niface) 1321 return (USBD_INVAL); 1322 1323 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1324 if (err) 1325 return (err); 1326 err = usbd_endpoint_count(iface, &nendpt); 1327 if (err) 1328 return (err); 1329 /* XXX should only do this after setting new altno has succeeded */ 1330 for (endptno = 0; endptno < nendpt; endptno++) { 1331 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1332 endpt = ed->bEndpointAddress; 1333 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1334 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1335 sce->sc = 0; 1336 sce->edesc = 0; 1337 sce->iface = 0; 1338 } 1339 1340 /* change setting */ 1341 err = usbd_set_interface(iface, altno); 1342 if (err) 1343 return (err); 1344 1345 err = usbd_endpoint_count(iface, &nendpt); 1346 if (err) 1347 return (err); 1348 for (endptno = 0; endptno < nendpt; endptno++) { 1349 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1350 KASSERT(ed != NULL); 1351 endpt = ed->bEndpointAddress; 1352 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1353 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1354 sce->sc = sc; 1355 sce->edesc = ed; 1356 sce->iface = iface; 1357 } 1358 return (0); 1359 } 1360 1361 /* Retrieve a complete descriptor for a certain device and index. */ 1362 Static usb_config_descriptor_t * 1363 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1364 { 1365 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1366 int len; 1367 usbd_status err; 1368 1369 if (index == USB_CURRENT_CONFIG_INDEX) { 1370 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1371 len = UGETW(tdesc->wTotalLength); 1372 if (lenp) 1373 *lenp = len; 1374 cdesc = malloc(len, M_TEMP, M_WAITOK); 1375 memcpy(cdesc, tdesc, len); 1376 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1377 } else { 1378 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1379 if (err) 1380 return (0); 1381 len = UGETW(cdescr.wTotalLength); 1382 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1383 if (lenp) 1384 *lenp = len; 1385 cdesc = malloc(len, M_TEMP, M_WAITOK); 1386 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1387 if (err) { 1388 free(cdesc, M_TEMP); 1389 return (0); 1390 } 1391 } 1392 return (cdesc); 1393 } 1394 1395 Static int 1396 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1397 { 1398 usbd_interface_handle iface; 1399 usbd_status err; 1400 1401 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1402 if (err) 1403 return (-1); 1404 return (usbd_get_interface_altindex(iface)); 1405 } 1406 1407 Static int 1408 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1409 void *addr, int flag, struct lwp *l) 1410 { 1411 struct ugen_endpoint *sce; 1412 usbd_status err; 1413 usbd_interface_handle iface; 1414 struct usb_config_desc *cd; 1415 usb_config_descriptor_t *cdesc; 1416 struct usb_interface_desc *id; 1417 usb_interface_descriptor_t *idesc; 1418 struct usb_endpoint_desc *ed; 1419 usb_endpoint_descriptor_t *edesc; 1420 struct usb_alt_interface *ai; 1421 struct usb_string_desc *si; 1422 u_int8_t conf, alt; 1423 1424 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1425 if (sc->sc_dying) 1426 return (EIO); 1427 1428 switch (cmd) { 1429 case FIONBIO: 1430 /* All handled in the upper FS layer. */ 1431 return (0); 1432 case USB_SET_SHORT_XFER: 1433 if (endpt == USB_CONTROL_ENDPOINT) 1434 return (EINVAL); 1435 /* This flag only affects read */ 1436 sce = &sc->sc_endpoints[endpt][IN]; 1437 if (sce == NULL || sce->pipeh == NULL) 1438 return (EINVAL); 1439 if (*(int *)addr) 1440 sce->state |= UGEN_SHORT_OK; 1441 else 1442 sce->state &= ~UGEN_SHORT_OK; 1443 return (0); 1444 case USB_SET_TIMEOUT: 1445 sce = &sc->sc_endpoints[endpt][IN]; 1446 if (sce == NULL 1447 /* XXX this shouldn't happen, but the distinction between 1448 input and output pipes isn't clear enough. 1449 || sce->pipeh == NULL */ 1450 ) 1451 return (EINVAL); 1452 sce->timeout = *(int *)addr; 1453 return (0); 1454 case USB_SET_BULK_RA: 1455 if (endpt == USB_CONTROL_ENDPOINT) 1456 return (EINVAL); 1457 sce = &sc->sc_endpoints[endpt][IN]; 1458 if (sce == NULL || sce->pipeh == NULL) 1459 return (EINVAL); 1460 edesc = sce->edesc; 1461 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1462 return (EINVAL); 1463 1464 if (*(int *)addr) { 1465 /* Only turn RA on if it's currently off. */ 1466 if (sce->state & UGEN_BULK_RA) 1467 return (0); 1468 1469 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1470 /* shouldn't happen */ 1471 return (EINVAL); 1472 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1473 if (sce->ra_wb_xfer == NULL) 1474 return (ENOMEM); 1475 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1476 /* 1477 * Set up a dmabuf because we reuse the xfer with 1478 * the same (max) request length like isoc. 1479 */ 1480 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1481 sce->ra_wb_xferlen) == 0) { 1482 usbd_free_xfer(sce->ra_wb_xfer); 1483 return (ENOMEM); 1484 } 1485 sce->ibuf = malloc(sce->ra_wb_bufsize, 1486 M_USBDEV, M_WAITOK); 1487 sce->fill = sce->cur = sce->ibuf; 1488 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1489 sce->ra_wb_used = 0; 1490 sce->state |= UGEN_BULK_RA; 1491 sce->state &= ~UGEN_RA_WB_STOP; 1492 /* Now start reading. */ 1493 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce, 1494 NULL, 1495 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1496 USBD_NO_COPY, USBD_NO_TIMEOUT, 1497 ugen_bulkra_intr); 1498 err = usbd_transfer(sce->ra_wb_xfer); 1499 if (err != USBD_IN_PROGRESS) { 1500 sce->state &= ~UGEN_BULK_RA; 1501 free(sce->ibuf, M_USBDEV); 1502 sce->ibuf = NULL; 1503 usbd_free_xfer(sce->ra_wb_xfer); 1504 return (EIO); 1505 } 1506 } else { 1507 /* Only turn RA off if it's currently on. */ 1508 if (!(sce->state & UGEN_BULK_RA)) 1509 return (0); 1510 1511 sce->state &= ~UGEN_BULK_RA; 1512 usbd_abort_pipe(sce->pipeh); 1513 usbd_free_xfer(sce->ra_wb_xfer); 1514 /* 1515 * XXX Discard whatever's in the buffer, but we 1516 * should keep it around and drain the buffer 1517 * instead. 1518 */ 1519 free(sce->ibuf, M_USBDEV); 1520 sce->ibuf = NULL; 1521 } 1522 return (0); 1523 case USB_SET_BULK_WB: 1524 if (endpt == USB_CONTROL_ENDPOINT) 1525 return (EINVAL); 1526 sce = &sc->sc_endpoints[endpt][OUT]; 1527 if (sce == NULL || sce->pipeh == NULL) 1528 return (EINVAL); 1529 edesc = sce->edesc; 1530 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1531 return (EINVAL); 1532 1533 if (*(int *)addr) { 1534 /* Only turn WB on if it's currently off. */ 1535 if (sce->state & UGEN_BULK_WB) 1536 return (0); 1537 1538 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1539 /* shouldn't happen */ 1540 return (EINVAL); 1541 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev); 1542 if (sce->ra_wb_xfer == NULL) 1543 return (ENOMEM); 1544 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1545 /* 1546 * Set up a dmabuf because we reuse the xfer with 1547 * the same (max) request length like isoc. 1548 */ 1549 if (usbd_alloc_buffer(sce->ra_wb_xfer, 1550 sce->ra_wb_xferlen) == 0) { 1551 usbd_free_xfer(sce->ra_wb_xfer); 1552 return (ENOMEM); 1553 } 1554 sce->ibuf = malloc(sce->ra_wb_bufsize, 1555 M_USBDEV, M_WAITOK); 1556 sce->fill = sce->cur = sce->ibuf; 1557 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1558 sce->ra_wb_used = 0; 1559 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1560 } else { 1561 /* Only turn WB off if it's currently on. */ 1562 if (!(sce->state & UGEN_BULK_WB)) 1563 return (0); 1564 1565 sce->state &= ~UGEN_BULK_WB; 1566 /* 1567 * XXX Discard whatever's in the buffer, but we 1568 * should keep it around and keep writing to 1569 * drain the buffer instead. 1570 */ 1571 usbd_abort_pipe(sce->pipeh); 1572 usbd_free_xfer(sce->ra_wb_xfer); 1573 free(sce->ibuf, M_USBDEV); 1574 sce->ibuf = NULL; 1575 } 1576 return (0); 1577 case USB_SET_BULK_RA_OPT: 1578 case USB_SET_BULK_WB_OPT: 1579 { 1580 struct usb_bulk_ra_wb_opt *opt; 1581 1582 if (endpt == USB_CONTROL_ENDPOINT) 1583 return (EINVAL); 1584 opt = (struct usb_bulk_ra_wb_opt *)addr; 1585 if (cmd == USB_SET_BULK_RA_OPT) 1586 sce = &sc->sc_endpoints[endpt][IN]; 1587 else 1588 sce = &sc->sc_endpoints[endpt][OUT]; 1589 if (sce == NULL || sce->pipeh == NULL) 1590 return (EINVAL); 1591 if (opt->ra_wb_buffer_size < 1 || 1592 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1593 opt->ra_wb_request_size < 1 || 1594 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1595 return (EINVAL); 1596 /* 1597 * XXX These changes do not take effect until the 1598 * next time RA/WB mode is enabled but they ought to 1599 * take effect immediately. 1600 */ 1601 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1602 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1603 return (0); 1604 } 1605 default: 1606 break; 1607 } 1608 1609 if (endpt != USB_CONTROL_ENDPOINT) 1610 return (EINVAL); 1611 1612 switch (cmd) { 1613 #ifdef UGEN_DEBUG 1614 case USB_SETDEBUG: 1615 ugendebug = *(int *)addr; 1616 break; 1617 #endif 1618 case USB_GET_CONFIG: 1619 err = usbd_get_config(sc->sc_udev, &conf); 1620 if (err) 1621 return (EIO); 1622 *(int *)addr = conf; 1623 break; 1624 case USB_SET_CONFIG: 1625 if (!(flag & FWRITE)) 1626 return (EPERM); 1627 err = ugen_set_config(sc, *(int *)addr); 1628 switch (err) { 1629 case USBD_NORMAL_COMPLETION: 1630 break; 1631 case USBD_IN_USE: 1632 return (EBUSY); 1633 default: 1634 return (EIO); 1635 } 1636 break; 1637 case USB_GET_ALTINTERFACE: 1638 ai = (struct usb_alt_interface *)addr; 1639 err = usbd_device2interface_handle(sc->sc_udev, 1640 ai->uai_interface_index, &iface); 1641 if (err) 1642 return (EINVAL); 1643 idesc = usbd_get_interface_descriptor(iface); 1644 if (idesc == NULL) 1645 return (EIO); 1646 ai->uai_alt_no = idesc->bAlternateSetting; 1647 break; 1648 case USB_SET_ALTINTERFACE: 1649 if (!(flag & FWRITE)) 1650 return (EPERM); 1651 ai = (struct usb_alt_interface *)addr; 1652 err = usbd_device2interface_handle(sc->sc_udev, 1653 ai->uai_interface_index, &iface); 1654 if (err) 1655 return (EINVAL); 1656 err = ugen_set_interface(sc, ai->uai_interface_index, 1657 ai->uai_alt_no); 1658 if (err) 1659 return (EINVAL); 1660 break; 1661 case USB_GET_NO_ALT: 1662 ai = (struct usb_alt_interface *)addr; 1663 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0); 1664 if (cdesc == NULL) 1665 return (EINVAL); 1666 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1667 if (idesc == NULL) { 1668 free(cdesc, M_TEMP); 1669 return (EINVAL); 1670 } 1671 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1672 idesc->bInterfaceNumber); 1673 free(cdesc, M_TEMP); 1674 break; 1675 case USB_GET_DEVICE_DESC: 1676 *(usb_device_descriptor_t *)addr = 1677 *usbd_get_device_descriptor(sc->sc_udev); 1678 break; 1679 case USB_GET_CONFIG_DESC: 1680 cd = (struct usb_config_desc *)addr; 1681 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0); 1682 if (cdesc == NULL) 1683 return (EINVAL); 1684 cd->ucd_desc = *cdesc; 1685 free(cdesc, M_TEMP); 1686 break; 1687 case USB_GET_INTERFACE_DESC: 1688 id = (struct usb_interface_desc *)addr; 1689 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0); 1690 if (cdesc == NULL) 1691 return (EINVAL); 1692 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1693 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1694 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1695 else 1696 alt = id->uid_alt_index; 1697 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1698 if (idesc == NULL) { 1699 free(cdesc, M_TEMP); 1700 return (EINVAL); 1701 } 1702 id->uid_desc = *idesc; 1703 free(cdesc, M_TEMP); 1704 break; 1705 case USB_GET_ENDPOINT_DESC: 1706 ed = (struct usb_endpoint_desc *)addr; 1707 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0); 1708 if (cdesc == NULL) 1709 return (EINVAL); 1710 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1711 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1712 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1713 else 1714 alt = ed->ued_alt_index; 1715 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1716 alt, ed->ued_endpoint_index); 1717 if (edesc == NULL) { 1718 free(cdesc, M_TEMP); 1719 return (EINVAL); 1720 } 1721 ed->ued_desc = *edesc; 1722 free(cdesc, M_TEMP); 1723 break; 1724 case USB_GET_FULL_DESC: 1725 { 1726 int len; 1727 struct iovec iov; 1728 struct uio uio; 1729 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1730 int error; 1731 1732 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len); 1733 if (cdesc == NULL) 1734 return (EINVAL); 1735 if (len > fd->ufd_size) 1736 len = fd->ufd_size; 1737 iov.iov_base = (void *)fd->ufd_data; 1738 iov.iov_len = len; 1739 uio.uio_iov = &iov; 1740 uio.uio_iovcnt = 1; 1741 uio.uio_resid = len; 1742 uio.uio_offset = 0; 1743 uio.uio_rw = UIO_READ; 1744 uio.uio_vmspace = l->l_proc->p_vmspace; 1745 error = uiomove((void *)cdesc, len, &uio); 1746 free(cdesc, M_TEMP); 1747 return (error); 1748 } 1749 case USB_GET_STRING_DESC: { 1750 int len; 1751 si = (struct usb_string_desc *)addr; 1752 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1753 si->usd_language_id, &si->usd_desc, &len); 1754 if (err) 1755 return (EINVAL); 1756 break; 1757 } 1758 case USB_DO_REQUEST: 1759 { 1760 struct usb_ctl_request *ur = (void *)addr; 1761 int len = UGETW(ur->ucr_request.wLength); 1762 struct iovec iov; 1763 struct uio uio; 1764 void *ptr = 0; 1765 usbd_status xerr; 1766 int error = 0; 1767 1768 if (!(flag & FWRITE)) 1769 return (EPERM); 1770 /* Avoid requests that would damage the bus integrity. */ 1771 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1772 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1773 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1774 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1775 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1776 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1777 return (EINVAL); 1778 1779 if (len < 0 || len > 32767) 1780 return (EINVAL); 1781 if (len != 0) { 1782 iov.iov_base = (void *)ur->ucr_data; 1783 iov.iov_len = len; 1784 uio.uio_iov = &iov; 1785 uio.uio_iovcnt = 1; 1786 uio.uio_resid = len; 1787 uio.uio_offset = 0; 1788 uio.uio_rw = 1789 ur->ucr_request.bmRequestType & UT_READ ? 1790 UIO_READ : UIO_WRITE; 1791 uio.uio_vmspace = l->l_proc->p_vmspace; 1792 ptr = malloc(len, M_TEMP, M_WAITOK); 1793 if (uio.uio_rw == UIO_WRITE) { 1794 error = uiomove(ptr, len, &uio); 1795 if (error) 1796 goto ret; 1797 } 1798 } 1799 sce = &sc->sc_endpoints[endpt][IN]; 1800 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1801 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1802 if (xerr) { 1803 error = EIO; 1804 goto ret; 1805 } 1806 if (len != 0) { 1807 if (uio.uio_rw == UIO_READ) { 1808 error = uiomove(ptr, len, &uio); 1809 if (error) 1810 goto ret; 1811 } 1812 } 1813 ret: 1814 if (ptr) 1815 free(ptr, M_TEMP); 1816 return (error); 1817 } 1818 case USB_GET_DEVICEINFO: 1819 usbd_fill_deviceinfo(sc->sc_udev, 1820 (struct usb_device_info *)addr, 0); 1821 break; 1822 #ifdef COMPAT_30 1823 case USB_GET_DEVICEINFO_OLD: 1824 usbd_fill_deviceinfo_old(sc->sc_udev, 1825 (struct usb_device_info_old *)addr, 0); 1826 1827 break; 1828 #endif 1829 default: 1830 return (EINVAL); 1831 } 1832 return (0); 1833 } 1834 1835 int 1836 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1837 { 1838 int endpt = UGENENDPOINT(dev); 1839 struct ugen_softc *sc; 1840 int error; 1841 1842 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 1843 if (sc == NULL) 1844 return ENXIO; 1845 1846 sc->sc_refcnt++; 1847 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1848 if (--sc->sc_refcnt < 0) 1849 usb_detach_wakeup(sc->sc_dev); 1850 return (error); 1851 } 1852 1853 int 1854 ugenpoll(dev_t dev, int events, struct lwp *l) 1855 { 1856 struct ugen_softc *sc; 1857 struct ugen_endpoint *sce_in, *sce_out; 1858 int revents = 0; 1859 int s; 1860 1861 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 1862 if (sc == NULL) 1863 return ENXIO; 1864 1865 if (sc->sc_dying) 1866 return (POLLHUP); 1867 1868 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 1869 return ENODEV; 1870 1871 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1872 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1873 if (sce_in == NULL && sce_out == NULL) 1874 return (POLLERR); 1875 #ifdef DIAGNOSTIC 1876 if (!sce_in->edesc && !sce_out->edesc) { 1877 printf("ugenpoll: no edesc\n"); 1878 return (POLLERR); 1879 } 1880 /* It's possible to have only one pipe open. */ 1881 if (!sce_in->pipeh && !sce_out->pipeh) { 1882 printf("ugenpoll: no pipe\n"); 1883 return (POLLERR); 1884 } 1885 #endif 1886 s = splusb(); 1887 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1888 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1889 case UE_INTERRUPT: 1890 if (sce_in->q.c_cc > 0) 1891 revents |= events & (POLLIN | POLLRDNORM); 1892 else 1893 selrecord(l, &sce_in->rsel); 1894 break; 1895 case UE_ISOCHRONOUS: 1896 if (sce_in->cur != sce_in->fill) 1897 revents |= events & (POLLIN | POLLRDNORM); 1898 else 1899 selrecord(l, &sce_in->rsel); 1900 break; 1901 case UE_BULK: 1902 if (sce_in->state & UGEN_BULK_RA) { 1903 if (sce_in->ra_wb_used > 0) 1904 revents |= events & 1905 (POLLIN | POLLRDNORM); 1906 else 1907 selrecord(l, &sce_in->rsel); 1908 break; 1909 } 1910 /* 1911 * We have no easy way of determining if a read will 1912 * yield any data or a write will happen. 1913 * Pretend they will. 1914 */ 1915 revents |= events & (POLLIN | POLLRDNORM); 1916 break; 1917 default: 1918 break; 1919 } 1920 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1921 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1922 case UE_INTERRUPT: 1923 case UE_ISOCHRONOUS: 1924 /* XXX unimplemented */ 1925 break; 1926 case UE_BULK: 1927 if (sce_out->state & UGEN_BULK_WB) { 1928 if (sce_out->ra_wb_used < 1929 sce_out->limit - sce_out->ibuf) 1930 revents |= events & 1931 (POLLOUT | POLLWRNORM); 1932 else 1933 selrecord(l, &sce_out->rsel); 1934 break; 1935 } 1936 /* 1937 * We have no easy way of determining if a read will 1938 * yield any data or a write will happen. 1939 * Pretend they will. 1940 */ 1941 revents |= events & (POLLOUT | POLLWRNORM); 1942 break; 1943 default: 1944 break; 1945 } 1946 1947 1948 splx(s); 1949 return (revents); 1950 } 1951 1952 static void 1953 filt_ugenrdetach(struct knote *kn) 1954 { 1955 struct ugen_endpoint *sce = kn->kn_hook; 1956 int s; 1957 1958 s = splusb(); 1959 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1960 splx(s); 1961 } 1962 1963 static int 1964 filt_ugenread_intr(struct knote *kn, long hint) 1965 { 1966 struct ugen_endpoint *sce = kn->kn_hook; 1967 1968 kn->kn_data = sce->q.c_cc; 1969 return (kn->kn_data > 0); 1970 } 1971 1972 static int 1973 filt_ugenread_isoc(struct knote *kn, long hint) 1974 { 1975 struct ugen_endpoint *sce = kn->kn_hook; 1976 1977 if (sce->cur == sce->fill) 1978 return (0); 1979 1980 if (sce->cur < sce->fill) 1981 kn->kn_data = sce->fill - sce->cur; 1982 else 1983 kn->kn_data = (sce->limit - sce->cur) + 1984 (sce->fill - sce->ibuf); 1985 1986 return (1); 1987 } 1988 1989 static int 1990 filt_ugenread_bulk(struct knote *kn, long hint) 1991 { 1992 struct ugen_endpoint *sce = kn->kn_hook; 1993 1994 if (!(sce->state & UGEN_BULK_RA)) 1995 /* 1996 * We have no easy way of determining if a read will 1997 * yield any data or a write will happen. 1998 * So, emulate "seltrue". 1999 */ 2000 return (filt_seltrue(kn, hint)); 2001 2002 if (sce->ra_wb_used == 0) 2003 return (0); 2004 2005 kn->kn_data = sce->ra_wb_used; 2006 2007 return (1); 2008 } 2009 2010 static int 2011 filt_ugenwrite_bulk(struct knote *kn, long hint) 2012 { 2013 struct ugen_endpoint *sce = kn->kn_hook; 2014 2015 if (!(sce->state & UGEN_BULK_WB)) 2016 /* 2017 * We have no easy way of determining if a read will 2018 * yield any data or a write will happen. 2019 * So, emulate "seltrue". 2020 */ 2021 return (filt_seltrue(kn, hint)); 2022 2023 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2024 return (0); 2025 2026 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2027 2028 return (1); 2029 } 2030 2031 static const struct filterops ugenread_intr_filtops = 2032 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr }; 2033 2034 static const struct filterops ugenread_isoc_filtops = 2035 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc }; 2036 2037 static const struct filterops ugenread_bulk_filtops = 2038 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk }; 2039 2040 static const struct filterops ugenwrite_bulk_filtops = 2041 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk }; 2042 2043 int 2044 ugenkqfilter(dev_t dev, struct knote *kn) 2045 { 2046 struct ugen_softc *sc; 2047 struct ugen_endpoint *sce; 2048 struct klist *klist; 2049 int s; 2050 2051 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 2052 if (sc == NULL) 2053 return ENXIO; 2054 2055 if (sc->sc_dying) 2056 return (ENXIO); 2057 2058 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 2059 return ENODEV; 2060 2061 switch (kn->kn_filter) { 2062 case EVFILT_READ: 2063 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2064 if (sce == NULL) 2065 return (EINVAL); 2066 2067 klist = &sce->rsel.sel_klist; 2068 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2069 case UE_INTERRUPT: 2070 kn->kn_fop = &ugenread_intr_filtops; 2071 break; 2072 case UE_ISOCHRONOUS: 2073 kn->kn_fop = &ugenread_isoc_filtops; 2074 break; 2075 case UE_BULK: 2076 kn->kn_fop = &ugenread_bulk_filtops; 2077 break; 2078 break; 2079 default: 2080 return (EINVAL); 2081 } 2082 break; 2083 2084 case EVFILT_WRITE: 2085 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2086 if (sce == NULL) 2087 return (EINVAL); 2088 2089 klist = &sce->rsel.sel_klist; 2090 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2091 case UE_INTERRUPT: 2092 case UE_ISOCHRONOUS: 2093 /* XXX poll doesn't support this */ 2094 return (EINVAL); 2095 2096 case UE_BULK: 2097 kn->kn_fop = &ugenwrite_bulk_filtops; 2098 break; 2099 default: 2100 return (EINVAL); 2101 } 2102 break; 2103 2104 default: 2105 return (EINVAL); 2106 } 2107 2108 kn->kn_hook = sce; 2109 2110 s = splusb(); 2111 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2112 splx(s); 2113 2114 return (0); 2115 } 2116 2117 #if defined(__FreeBSD__) 2118 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0); 2119 #endif 2120