1 /* $NetBSD: udsir.c,v 1.11 2020/03/14 02:35:33 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by David Sainty <dsainty@NetBSD.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: udsir.c,v 1.11 2020/03/14 02:35:33 christos Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/device.h> 37 #include <sys/errno.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/kmem.h> 41 #include <sys/conf.h> 42 #include <sys/file.h> 43 #include <sys/poll.h> 44 #include <sys/select.h> 45 #include <sys/proc.h> 46 #include <sys/kthread.h> 47 48 #include <dev/usb/usb.h> 49 #include <dev/usb/usbdevs.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdi_util.h> 52 53 #include <dev/ir/ir.h> 54 #include <dev/ir/irdaio.h> 55 #include <dev/ir/irframevar.h> 56 #include <dev/ir/sir.h> 57 58 #ifdef UDSIR_DEBUG 59 #define DPRINTFN(n,x) if (udsirdebug > (n)) printf x 60 int udsirdebug = 0; 61 #else 62 #define DPRINTFN(n,x) 63 #endif 64 65 /* Max size with framing. */ 66 #define MAX_UDSIR_OUTPUT_FRAME (2 * IRDA_MAX_FRAME_SIZE + IRDA_MAX_EBOFS + 4) 67 68 struct udsir_softc { 69 device_t sc_dev; 70 struct usbd_device *sc_udev; 71 struct usbd_interface *sc_iface; 72 73 uint8_t *sc_ur_buf; /* Unencapsulated frame */ 74 u_int sc_ur_framelen; 75 76 uint8_t *sc_rd_buf; /* Raw incoming data stream */ 77 int sc_rd_maxpsz; 78 size_t sc_rd_index; 79 int sc_rd_addr; 80 struct usbd_pipe *sc_rd_pipe; 81 struct usbd_xfer *sc_rd_xfer; 82 u_int sc_rd_count; 83 int sc_rd_readinprogress; 84 int sc_rd_expectdataticks; 85 u_char sc_rd_err; 86 struct framestate sc_framestate; 87 struct lwp *sc_thread; 88 struct selinfo sc_rd_sel; 89 90 uint8_t *sc_wr_buf; 91 int sc_wr_maxpsz; 92 int sc_wr_addr; 93 int sc_wr_stalewrite; 94 struct usbd_xfer *sc_wr_xfer; 95 struct usbd_pipe *sc_wr_pipe; 96 struct selinfo sc_wr_sel; 97 98 enum { 99 udir_input, /* Receiving data */ 100 udir_output, /* Transmitting data */ 101 udir_stalled, /* Error preventing data flow */ 102 udir_idle /* Neither receiving nor transmitting */ 103 } sc_direction; 104 105 device_t sc_child; 106 struct irda_params sc_params; 107 108 int sc_refcnt; 109 char sc_closing; 110 char sc_dying; 111 }; 112 113 /* True if we cannot safely read data from the device */ 114 #define UDSIR_BLOCK_RX_DATA(sc) ((sc)->sc_ur_framelen != 0) 115 116 #define UDSIR_WR_TIMEOUT 200 117 118 static int udsir_match(device_t, cfdata_t, void *); 119 static void udsir_attach(device_t, device_t, void *); 120 static int udsir_detach(device_t, int); 121 static void udsir_childdet(device_t, device_t); 122 static int udsir_activate(device_t, enum devact); 123 124 static int udsir_open(void *, int, int, struct lwp *); 125 static int udsir_close(void *, int, int, struct lwp *); 126 static int udsir_read(void *, struct uio *, int); 127 static int udsir_write(void *, struct uio *, int); 128 static int udsir_poll(void *, int, struct lwp *); 129 static int udsir_kqfilter(void *, struct knote *); 130 static int udsir_set_params(void *, struct irda_params *); 131 static int udsir_get_speeds(void *, int *); 132 static int udsir_get_turnarounds(void *, int *); 133 134 static void filt_udsirrdetach(struct knote *); 135 static int filt_udsirread(struct knote *, long); 136 static void filt_udsirwdetach(struct knote *); 137 static int filt_udsirwrite(struct knote *, long); 138 139 static void udsir_thread(void *); 140 141 #ifdef UDSIR_DEBUG 142 static void udsir_dumpdata(uint8_t const *, size_t, char const *); 143 #endif 144 static int deframe_rd_ur(struct udsir_softc *); 145 static void udsir_periodic(struct udsir_softc *); 146 static void udsir_rd_cb(struct usbd_xfer *, void *, usbd_status); 147 static usbd_status udsir_start_read(struct udsir_softc *); 148 149 CFATTACH_DECL2_NEW(udsir, sizeof(struct udsir_softc), 150 udsir_match, udsir_attach, udsir_detach, 151 udsir_activate, NULL, udsir_childdet); 152 153 static struct irframe_methods const udsir_methods = { 154 udsir_open, udsir_close, udsir_read, udsir_write, udsir_poll, 155 udsir_kqfilter, udsir_set_params, udsir_get_speeds, udsir_get_turnarounds, 156 }; 157 158 static int 159 udsir_match(device_t parent, cfdata_t match, void *aux) 160 { 161 struct usbif_attach_arg *uiaa = aux; 162 163 DPRINTFN(50, ("udsir_match\n")); 164 165 if (uiaa->uiaa_vendor == USB_VENDOR_KINGSUN && 166 uiaa->uiaa_product == USB_PRODUCT_KINGSUN_IRDA) 167 return UMATCH_VENDOR_PRODUCT; 168 169 return UMATCH_NONE; 170 } 171 172 static void 173 udsir_attach(device_t parent, device_t self, void *aux) 174 { 175 struct udsir_softc *sc = device_private(self); 176 struct usbif_attach_arg *uiaa = aux; 177 struct usbd_device *dev = uiaa->uiaa_device; 178 struct usbd_interface *iface = uiaa->uiaa_iface; 179 char *devinfop; 180 usb_endpoint_descriptor_t *ed; 181 uint8_t epcount; 182 int i; 183 struct ir_attach_args ia; 184 185 DPRINTFN(10, ("udsir_attach: sc=%p\n", sc)); 186 187 sc->sc_dev = self; 188 189 aprint_naive("\n"); 190 aprint_normal("\n"); 191 192 devinfop = usbd_devinfo_alloc(dev, 0); 193 aprint_normal_dev(self, "%s\n", devinfop); 194 usbd_devinfo_free(devinfop); 195 196 sc->sc_udev = dev; 197 sc->sc_iface = iface; 198 199 epcount = 0; 200 (void)usbd_endpoint_count(iface, &epcount); 201 202 sc->sc_rd_addr = -1; 203 sc->sc_wr_addr = -1; 204 for (i = 0; i < epcount; i++) { 205 ed = usbd_interface2endpoint_descriptor(iface, i); 206 if (ed == NULL) { 207 aprint_error_dev(self, "couldn't get ep %d\n", i); 208 return; 209 } 210 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && 211 UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { 212 sc->sc_rd_addr = ed->bEndpointAddress; 213 sc->sc_rd_maxpsz = UGETW(ed->wMaxPacketSize); 214 } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && 215 UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { 216 sc->sc_wr_addr = ed->bEndpointAddress; 217 sc->sc_wr_maxpsz = UGETW(ed->wMaxPacketSize); 218 } 219 } 220 if (sc->sc_rd_addr == -1 || sc->sc_wr_addr == -1) { 221 aprint_error_dev(self, "missing endpoint\n"); 222 return; 223 } 224 225 DPRINTFN(10, ("udsir_attach: %p\n", sc->sc_udev)); 226 227 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); 228 229 ia.ia_type = IR_TYPE_IRFRAME; 230 ia.ia_methods = &udsir_methods; 231 ia.ia_handle = sc; 232 233 sc->sc_child = config_found(self, &ia, ir_print); 234 selinit(&sc->sc_rd_sel); 235 selinit(&sc->sc_wr_sel); 236 237 return; 238 } 239 240 static int 241 udsir_detach(device_t self, int flags) 242 { 243 struct udsir_softc *sc = device_private(self); 244 int s; 245 int rv = 0; 246 247 DPRINTFN(0, ("udsir_detach: sc=%p flags=%d\n", sc, flags)); 248 249 sc->sc_closing = sc->sc_dying = 1; 250 251 wakeup(&sc->sc_thread); 252 253 while (sc->sc_thread != NULL) 254 tsleep(&sc->sc_closing, PWAIT, "usircl", 0); 255 256 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 257 if (sc->sc_rd_pipe != NULL) { 258 usbd_abort_pipe(sc->sc_rd_pipe); 259 } 260 if (sc->sc_wr_pipe != NULL) { 261 usbd_abort_pipe(sc->sc_wr_pipe); 262 } 263 if (sc->sc_rd_xfer != NULL) { 264 usbd_destroy_xfer(sc->sc_rd_xfer); 265 sc->sc_rd_xfer = NULL; 266 sc->sc_rd_buf = NULL; 267 } 268 if (sc->sc_wr_xfer != NULL) { 269 usbd_destroy_xfer(sc->sc_wr_xfer); 270 sc->sc_wr_xfer = NULL; 271 sc->sc_wr_buf = NULL; 272 } 273 /* Close pipes. */ 274 if (sc->sc_rd_pipe != NULL) { 275 usbd_close_pipe(sc->sc_rd_pipe); 276 sc->sc_rd_pipe = NULL; 277 } 278 if (sc->sc_wr_pipe != NULL) { 279 usbd_close_pipe(sc->sc_wr_pipe); 280 sc->sc_wr_pipe = NULL; 281 } 282 wakeup(&sc->sc_ur_framelen); 283 wakeup(&sc->sc_wr_buf); 284 285 s = splusb(); 286 if (--sc->sc_refcnt >= 0) { 287 /* Wait for processes to go away. */ 288 usb_detach_waitold(sc->sc_dev); 289 } 290 splx(s); 291 292 if (sc->sc_child != NULL) 293 rv = config_detach(sc->sc_child, flags); 294 295 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); 296 297 seldestroy(&sc->sc_rd_sel); 298 seldestroy(&sc->sc_wr_sel); 299 300 return rv; 301 } 302 303 static void 304 udsir_childdet(device_t self, device_t child) 305 { 306 struct udsir_softc *sc = device_private(self); 307 308 KASSERT(sc->sc_child == child); 309 sc->sc_child = NULL; 310 } 311 312 static int 313 udsir_activate(device_t self, enum devact act) 314 { 315 struct udsir_softc *sc = device_private(self); 316 317 switch (act) { 318 case DVACT_DEACTIVATE: 319 sc->sc_dying = 1; 320 return 0; 321 default: 322 return EOPNOTSUPP; 323 } 324 } 325 326 /* ARGSUSED */ 327 static int 328 udsir_open(void *h, int flag, int mode, struct lwp *l) 329 { 330 struct udsir_softc *sc = h; 331 int error; 332 usbd_status err; 333 334 DPRINTFN(0, ("%s: sc=%p\n", __func__, sc)); 335 336 err = usbd_open_pipe(sc->sc_iface, sc->sc_rd_addr, 0, &sc->sc_rd_pipe); 337 if (err != USBD_NORMAL_COMPLETION) { 338 error = EIO; 339 goto bad1; 340 } 341 err = usbd_open_pipe(sc->sc_iface, sc->sc_wr_addr, 0, &sc->sc_wr_pipe); 342 if (err != USBD_NORMAL_COMPLETION) { 343 error = EIO; 344 goto bad2; 345 } 346 error = usbd_create_xfer(sc->sc_rd_pipe, sc->sc_rd_maxpsz, 347 0, 0, &sc->sc_rd_xfer); 348 if (error) 349 goto bad3; 350 351 error = usbd_create_xfer(sc->sc_wr_pipe, IRDA_MAX_FRAME_SIZE, 352 USBD_FORCE_SHORT_XFER, 0, &sc->sc_wr_xfer); 353 if (error) 354 goto bad4; 355 356 sc->sc_rd_buf = usbd_get_buffer(sc->sc_rd_xfer); 357 sc->sc_wr_buf = usbd_get_buffer(sc->sc_wr_xfer); 358 359 sc->sc_ur_buf = kmem_alloc(IRDA_MAX_FRAME_SIZE, KM_SLEEP); 360 sc->sc_rd_index = sc->sc_rd_count = 0; 361 sc->sc_closing = 0; 362 sc->sc_rd_readinprogress = 0; 363 sc->sc_rd_expectdataticks = 0; 364 sc->sc_ur_framelen = 0; 365 sc->sc_rd_err = 0; 366 sc->sc_wr_stalewrite = 0; 367 sc->sc_direction = udir_idle; 368 sc->sc_params.speed = 0; 369 sc->sc_params.ebofs = 0; 370 sc->sc_params.maxsize = uimin(sc->sc_rd_maxpsz, sc->sc_wr_maxpsz); 371 372 deframe_init(&sc->sc_framestate, sc->sc_ur_buf, IRDA_MAX_FRAME_SIZE); 373 374 /* Increment reference for thread */ 375 sc->sc_refcnt++; 376 377 error = kthread_create(PRI_NONE, 0, NULL, udsir_thread, sc, 378 &sc->sc_thread, "%s", device_xname(sc->sc_dev)); 379 if (error) { 380 sc->sc_refcnt--; 381 goto bad5; 382 } 383 384 return 0; 385 386 bad5: 387 usbd_destroy_xfer(sc->sc_wr_xfer); 388 sc->sc_wr_xfer = NULL; 389 bad4: 390 usbd_destroy_xfer(sc->sc_rd_xfer); 391 sc->sc_rd_xfer = NULL; 392 bad3: 393 usbd_close_pipe(sc->sc_wr_pipe); 394 sc->sc_wr_pipe = NULL; 395 bad2: 396 usbd_close_pipe(sc->sc_rd_pipe); 397 sc->sc_rd_pipe = NULL; 398 bad1: 399 return error; 400 } 401 402 /* ARGSUSED */ 403 static int 404 udsir_close(void *h, int flag, int mode, struct lwp *l) 405 { 406 struct udsir_softc *sc = h; 407 408 DPRINTFN(0, ("%s: sc=%p\n", __func__, sc)); 409 410 sc->sc_refcnt++; 411 412 sc->sc_rd_readinprogress = 1; 413 sc->sc_closing = 1; 414 415 wakeup(&sc->sc_thread); 416 417 while (sc->sc_thread != NULL) 418 tsleep(&sc->sc_closing, PWAIT, "usircl", 0); 419 420 if (sc->sc_rd_pipe != NULL) { 421 usbd_abort_pipe(sc->sc_rd_pipe); 422 } 423 if (sc->sc_wr_pipe != NULL) { 424 usbd_abort_pipe(sc->sc_wr_pipe); 425 } 426 if (sc->sc_rd_xfer != NULL) { 427 usbd_destroy_xfer(sc->sc_rd_xfer); 428 sc->sc_rd_xfer = NULL; 429 sc->sc_rd_buf = NULL; 430 } 431 if (sc->sc_wr_xfer != NULL) { 432 usbd_destroy_xfer(sc->sc_wr_xfer); 433 sc->sc_wr_xfer = NULL; 434 sc->sc_wr_buf = NULL; 435 } 436 if (sc->sc_rd_pipe != NULL) { 437 usbd_close_pipe(sc->sc_rd_pipe); 438 sc->sc_rd_pipe = NULL; 439 } 440 if (sc->sc_wr_pipe != NULL) { 441 usbd_close_pipe(sc->sc_wr_pipe); 442 sc->sc_wr_pipe = NULL; 443 } 444 if (sc->sc_ur_buf != NULL) { 445 kmem_free(sc->sc_ur_buf, IRDA_MAX_FRAME_SIZE); 446 sc->sc_ur_buf = NULL; 447 } 448 449 if (--sc->sc_refcnt < 0) 450 usb_detach_wakeupold(sc->sc_dev); 451 452 return 0; 453 } 454 455 /* ARGSUSED */ 456 static int 457 udsir_read(void *h, struct uio *uio, int flag) 458 { 459 struct udsir_softc *sc = h; 460 int s; 461 int error; 462 u_int uframelen; 463 464 DPRINTFN(1, ("%s: sc=%p\n", __func__, sc)); 465 466 if (sc->sc_dying) 467 return EIO; 468 469 #ifdef DIAGNOSTIC 470 if (sc->sc_rd_buf == NULL) 471 return EINVAL; 472 #endif 473 474 sc->sc_refcnt++; 475 476 if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc)) 477 /* Possibly wake up polling thread */ 478 wakeup(&sc->sc_thread); 479 480 do { 481 s = splusb(); 482 while (sc->sc_ur_framelen == 0) { 483 DPRINTFN(5, ("%s: calling tsleep()\n", __func__)); 484 error = tsleep(&sc->sc_ur_framelen, PZERO | PCATCH, 485 "usirrd", 0); 486 if (sc->sc_dying) 487 error = EIO; 488 if (error) { 489 splx(s); 490 DPRINTFN(0, ("%s: tsleep() = %d\n", 491 __func__, error)); 492 goto ret; 493 } 494 } 495 splx(s); 496 497 uframelen = sc->sc_ur_framelen; 498 DPRINTFN(1, ("%s: sc=%p framelen=%u, hdr=0x%02x\n", 499 __func__, sc, uframelen, sc->sc_ur_buf[0])); 500 if (uframelen > uio->uio_resid) 501 error = EINVAL; 502 else 503 error = uiomove(sc->sc_ur_buf, uframelen, uio); 504 sc->sc_ur_framelen = 0; 505 506 if (deframe_rd_ur(sc) == 0 && uframelen > 0) { 507 /* 508 * Need to wait for another read to obtain a 509 * complete frame... If we also obtained 510 * actual data, wake up the possibly sleeping 511 * thread immediately... 512 */ 513 wakeup(&sc->sc_thread); 514 } 515 } while (uframelen == 0); 516 517 DPRINTFN(1, ("%s: return %d\n", __func__, error)); 518 519 ret: 520 if (--sc->sc_refcnt < 0) 521 usb_detach_wakeupold(sc->sc_dev); 522 return error; 523 } 524 525 /* ARGSUSED */ 526 static int 527 udsir_write(void *h, struct uio *uio, int flag) 528 { 529 struct udsir_softc *sc = h; 530 usbd_status err; 531 uint32_t wrlen; 532 int error, sirlength; 533 uint8_t *wrbuf; 534 int s; 535 536 DPRINTFN(1, ("%s: sc=%p\n", __func__, sc)); 537 538 if (sc->sc_dying) 539 return EIO; 540 541 #ifdef DIAGNOSTIC 542 if (sc->sc_wr_buf == NULL) 543 return EINVAL; 544 #endif 545 546 wrlen = uio->uio_resid; 547 if (wrlen > sc->sc_wr_maxpsz) 548 return EINVAL; 549 550 sc->sc_refcnt++; 551 552 if (!UDSIR_BLOCK_RX_DATA(sc)) { 553 /* 554 * If reads are not blocked, determine what action we 555 * should potentially take... 556 */ 557 if (sc->sc_direction == udir_output) { 558 /* 559 * If the last operation was an output, wait for the 560 * polling thread to check for incoming data. 561 */ 562 sc->sc_wr_stalewrite = 1; 563 wakeup(&sc->sc_thread); 564 } else if (!sc->sc_rd_readinprogress && 565 (sc->sc_direction == udir_idle || 566 sc->sc_direction == udir_input)) { 567 /* If idle, check for input before outputting */ 568 udsir_start_read(sc); 569 } 570 } 571 572 s = splusb(); 573 while (sc->sc_wr_stalewrite || 574 (sc->sc_direction != udir_output && 575 sc->sc_direction != udir_idle)) { 576 DPRINTFN(5, ("%s: sc=%p stalewrite=%d direction=%d, " 577 "calling tsleep()\n", 578 __func__, sc, sc->sc_wr_stalewrite, 579 sc->sc_direction)); 580 error = tsleep(&sc->sc_wr_buf, PZERO | PCATCH, "usirwr", 0); 581 if (sc->sc_dying) 582 error = EIO; 583 if (error) { 584 splx(s); 585 DPRINTFN(0, ("%s: tsleep() = %d\n", __func__, error)); 586 goto ret; 587 } 588 } 589 splx(s); 590 591 wrbuf = sc->sc_wr_buf; 592 593 sirlength = irda_sir_frame(wrbuf, MAX_UDSIR_OUTPUT_FRAME, 594 uio, sc->sc_params.ebofs); 595 if (sirlength < 0) 596 error = -sirlength; 597 else { 598 uint32_t btlen; 599 600 DPRINTFN(1, ("%s: transfer %u bytes\n", 601 __func__, (unsigned int)wrlen)); 602 603 btlen = sirlength; 604 605 sc->sc_direction = udir_output; 606 607 #ifdef UDSIR_DEBUG 608 if (udsirdebug >= 20) 609 udsir_dumpdata(wrbuf, btlen, __func__); 610 #endif 611 612 err = usbd_intr_transfer(sc->sc_wr_xfer, sc->sc_wr_pipe, 613 USBD_FORCE_SHORT_XFER, UDSIR_WR_TIMEOUT, 614 wrbuf, &btlen); 615 DPRINTFN(2, ("%s: err=%d\n", __func__, err)); 616 if (err != USBD_NORMAL_COMPLETION) { 617 if (err == USBD_INTERRUPTED) 618 error = EINTR; 619 else if (err == USBD_TIMEOUT) 620 error = ETIMEDOUT; 621 else 622 error = EIO; 623 } else 624 error = 0; 625 } 626 627 ret: 628 if (--sc->sc_refcnt < 0) 629 usb_detach_wakeupold(sc->sc_dev); 630 631 DPRINTFN(1, ("%s: sc=%p done\n", __func__, sc)); 632 return error; 633 } 634 635 static int 636 udsir_poll(void *h, int events, struct lwp *l) 637 { 638 struct udsir_softc *sc = h; 639 int revents = 0; 640 641 DPRINTFN(1, ("%s: sc=%p\n", __func__, sc)); 642 643 if (events & (POLLOUT | POLLWRNORM)) { 644 if (sc->sc_direction != udir_input) 645 revents |= events & (POLLOUT | POLLWRNORM); 646 else { 647 DPRINTFN(2, ("%s: recording write select\n", __func__)); 648 selrecord(l, &sc->sc_wr_sel); 649 } 650 } 651 652 if (events & (POLLIN | POLLRDNORM)) { 653 if (sc->sc_ur_framelen != 0) { 654 DPRINTFN(2, ("%s: have data\n", __func__)); 655 revents |= events & (POLLIN | POLLRDNORM); 656 } else { 657 DPRINTFN(2, ("%s: recording read select\n", __func__)); 658 selrecord(l, &sc->sc_rd_sel); 659 } 660 } 661 662 return revents; 663 } 664 665 static const struct filterops udsirread_filtops = { 666 .f_isfd = 1, 667 .f_attach = NULL, 668 .f_detach = filt_udsirrdetach, 669 .f_event = filt_udsirread, 670 }; 671 672 static const struct filterops udsirwrite_filtops = { 673 .f_isfd = 1, 674 .f_attach = NULL, 675 .f_detach = filt_udsirwdetach, 676 .f_event = filt_udsirwrite, 677 }; 678 679 static int 680 udsir_kqfilter(void *h, struct knote *kn) 681 { 682 struct udsir_softc *sc = h; 683 struct klist *klist; 684 int s; 685 686 switch (kn->kn_filter) { 687 case EVFILT_READ: 688 klist = &sc->sc_rd_sel.sel_klist; 689 kn->kn_fop = &udsirread_filtops; 690 break; 691 case EVFILT_WRITE: 692 klist = &sc->sc_wr_sel.sel_klist; 693 kn->kn_fop = &udsirwrite_filtops; 694 break; 695 default: 696 return EINVAL; 697 } 698 699 kn->kn_hook = sc; 700 701 s = splusb(); 702 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 703 splx(s); 704 705 return 0; 706 } 707 708 static int 709 udsir_set_params(void *h, struct irda_params *p) 710 { 711 struct udsir_softc *sc = h; 712 713 DPRINTFN(0, ("%s: sc=%p, speed=%d ebofs=%d maxsize=%d\n", 714 __func__, sc, p->speed, p->ebofs, p->maxsize)); 715 716 if (sc->sc_dying) 717 return EIO; 718 719 if (p->speed != 9600) 720 return EINVAL; 721 722 if (p->maxsize != sc->sc_params.maxsize) { 723 if (p->maxsize > uimin(sc->sc_rd_maxpsz, sc->sc_wr_maxpsz)) 724 return EINVAL; 725 sc->sc_params.maxsize = p->maxsize; 726 } 727 728 sc->sc_params = *p; 729 730 return 0; 731 } 732 733 static int 734 udsir_get_speeds(void *h, int *speeds) 735 { 736 struct udsir_softc *sc = h; 737 738 DPRINTFN(0, ("%s: sc=%p\n", __func__, sc)); 739 740 if (sc->sc_dying) 741 return EIO; 742 743 /* Support only 9600bps now. */ 744 *speeds = IRDA_SPEED_9600; 745 746 return 0; 747 } 748 749 static int 750 udsir_get_turnarounds(void *h, int *turnarounds) 751 { 752 struct udsir_softc *sc = h; 753 754 DPRINTFN(0, ("%s: sc=%p\n", __func__, sc)); 755 756 if (sc->sc_dying) 757 return EIO; 758 759 /* 760 * Documentation is on the light side with respect to 761 * turnaround time for this device. 762 */ 763 *turnarounds = IRDA_TURNT_10000; 764 765 return 0; 766 } 767 768 static void 769 filt_udsirrdetach(struct knote *kn) 770 { 771 struct udsir_softc *sc = kn->kn_hook; 772 int s; 773 774 s = splusb(); 775 SLIST_REMOVE(&sc->sc_rd_sel.sel_klist, kn, knote, kn_selnext); 776 splx(s); 777 } 778 779 /* ARGSUSED */ 780 static int 781 filt_udsirread(struct knote *kn, long hint) 782 { 783 struct udsir_softc *sc = kn->kn_hook; 784 785 kn->kn_data = sc->sc_ur_framelen; 786 return kn->kn_data > 0; 787 } 788 789 static void 790 filt_udsirwdetach(struct knote *kn) 791 { 792 struct udsir_softc *sc = kn->kn_hook; 793 int s; 794 795 s = splusb(); 796 SLIST_REMOVE(&sc->sc_wr_sel.sel_klist, kn, knote, kn_selnext); 797 splx(s); 798 } 799 800 /* ARGSUSED */ 801 static int 802 filt_udsirwrite(struct knote *kn, long hint) 803 { 804 struct udsir_softc *sc = kn->kn_hook; 805 806 kn->kn_data = 0; 807 return sc->sc_direction != udir_input; 808 } 809 810 811 static void 812 udsir_thread(void *arg) 813 { 814 struct udsir_softc *sc = arg; 815 int error; 816 817 DPRINTFN(20, ("%s: starting polling thread\n", __func__)); 818 819 while (!sc->sc_closing) { 820 if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc)) 821 udsir_periodic(sc); 822 823 if (!sc->sc_closing) { 824 error = tsleep(&sc->sc_thread, PWAIT, "udsir", hz / 10); 825 if (error == EWOULDBLOCK && 826 sc->sc_rd_expectdataticks > 0) 827 /* 828 * After a timeout decrement the tick 829 * counter within which time we expect 830 * data to arrive if we are receiving 831 * data... 832 */ 833 sc->sc_rd_expectdataticks--; 834 } 835 } 836 837 DPRINTFN(20, ("%s: exiting polling thread\n", __func__)); 838 839 sc->sc_thread = NULL; 840 841 wakeup(&sc->sc_closing); 842 843 if (--sc->sc_refcnt < 0) 844 usb_detach_wakeupold(sc->sc_dev); 845 846 kthread_exit(0); 847 } 848 849 #ifdef UDSIR_DEBUG 850 static void 851 udsir_dumpdata(uint8_t const *data, size_t dlen, char const *desc) 852 { 853 size_t bdindex; 854 855 printf("%s: (%lx)", desc, (unsigned long)dlen); 856 for (bdindex = 0; bdindex < dlen; bdindex++) 857 printf(" %02x", (unsigned int)data[bdindex]); 858 printf("\n"); 859 } 860 #endif 861 862 /* Returns 0 if more data required, 1 if a complete frame was extracted */ 863 static int 864 deframe_rd_ur(struct udsir_softc *sc) 865 { 866 867 if (sc->sc_rd_index == 0) { 868 KASSERT(sc->sc_rd_count == sc->sc_rd_maxpsz); 869 /* valid count */ 870 sc->sc_rd_count = sc->sc_rd_buf[sc->sc_rd_index++] + 1; 871 KASSERT(sc->sc_rd_count < sc->sc_rd_maxpsz); 872 } 873 874 while (sc->sc_rd_index < sc->sc_rd_count) { 875 uint8_t const *buf; 876 size_t buflen; 877 enum frameresult fresult; 878 879 buf = &sc->sc_rd_buf[sc->sc_rd_index]; 880 buflen = sc->sc_rd_count - sc->sc_rd_index; 881 882 fresult = deframe_process(&sc->sc_framestate, &buf, &buflen); 883 884 sc->sc_rd_index = sc->sc_rd_count - buflen; 885 886 DPRINTFN(1,("%s: result=%d\n", __func__, (int)fresult)); 887 888 switch (fresult) { 889 case FR_IDLE: 890 case FR_INPROGRESS: 891 case FR_FRAMEBADFCS: 892 case FR_FRAMEMALFORMED: 893 case FR_BUFFEROVERRUN: 894 break; 895 case FR_FRAMEOK: 896 sc->sc_ur_framelen = sc->sc_framestate.bufindex; 897 wakeup(&sc->sc_ur_framelen); /* XXX should use flag */ 898 selnotify(&sc->sc_rd_sel, 0, 0); 899 return 1; 900 } 901 } 902 903 /* Reset indices into USB-side buffer */ 904 sc->sc_rd_index = sc->sc_rd_count = 0; 905 906 return 0; 907 } 908 909 /* 910 * Direction transitions: 911 * 912 * udsir_periodic() can switch the direction from: 913 * 914 * output -> idle 915 * output -> stalled 916 * stalled -> idle 917 * idle -> input 918 * 919 * udsir_rd_cb() can switch the direction from: 920 * 921 * input -> stalled 922 * input -> idle 923 * 924 * udsir_write() can switch the direction from: 925 * 926 * idle -> output 927 */ 928 static void 929 udsir_periodic(struct udsir_softc *sc) 930 { 931 932 DPRINTFN(60, ("%s: direction = %d\n", __func__, sc->sc_direction)); 933 934 if (sc->sc_wr_stalewrite && sc->sc_direction == udir_idle) { 935 /* 936 * In a stale write case, we need to check if the 937 * write has completed. Once that has happened, the 938 * write is no longer stale. 939 * 940 * But note that we may immediately start a read poll... 941 */ 942 sc->sc_wr_stalewrite = 0; 943 wakeup(&sc->sc_wr_buf); 944 } 945 946 if (!sc->sc_rd_readinprogress && 947 (sc->sc_direction == udir_idle || 948 sc->sc_direction == udir_input)) 949 /* Do a read poll if appropriate... */ 950 udsir_start_read(sc); 951 } 952 953 static void 954 udsir_rd_cb(struct usbd_xfer *xfer, void * priv, usbd_status status) 955 { 956 struct udsir_softc *sc = priv; 957 uint32_t size; 958 959 DPRINTFN(60, ("%s: sc=%p\n", __func__, sc)); 960 961 /* Read is no longer in progress */ 962 sc->sc_rd_readinprogress = 0; 963 964 if (status == USBD_CANCELLED || sc->sc_closing) /* this is normal */ 965 return; 966 if (status) { 967 size = 0; 968 sc->sc_rd_err = 1; 969 970 if (sc->sc_direction == udir_input || 971 sc->sc_direction == udir_idle) { 972 /* 973 * Receive error, probably need to clear error 974 * condition. 975 */ 976 sc->sc_direction = udir_stalled; 977 } 978 } else 979 usbd_get_xfer_status(xfer, NULL, NULL, &size, NULL); 980 981 sc->sc_rd_index = 0; 982 sc->sc_rd_count = size; 983 984 DPRINTFN(((size > 0 || sc->sc_rd_err != 0) ? 20 : 60), 985 ("%s: sc=%p size=%u, err=%d\n", 986 __func__, sc, size, sc->sc_rd_err)); 987 988 #ifdef UDSIR_DEBUG 989 if (udsirdebug >= 20 && size > 0) 990 udsir_dumpdata(sc->sc_rd_buf, size, __func__); 991 #endif 992 993 if (deframe_rd_ur(sc) == 0) { 994 if (!deframe_isclear(&sc->sc_framestate) && size == 0 && 995 sc->sc_rd_expectdataticks == 0) { 996 /* 997 * Expected data, but didn't get it 998 * within expected time... 999 */ 1000 DPRINTFN(5,("%s: incoming packet timeout\n", 1001 __func__)); 1002 deframe_clear(&sc->sc_framestate); 1003 } else if (size > 0) { 1004 /* 1005 * If we also received actual data, reset the 1006 * data read timeout and wake up the possibly 1007 * sleeping thread... 1008 */ 1009 sc->sc_rd_expectdataticks = 2; 1010 wakeup(&sc->sc_thread); 1011 } 1012 } 1013 1014 /* 1015 * Check if incoming data has stopped, or that we cannot 1016 * safely read any more data. In the case of the latter we 1017 * must switch to idle so that a write will not block... 1018 */ 1019 if (sc->sc_direction == udir_input && 1020 ((size == 0 && sc->sc_rd_expectdataticks == 0) || 1021 UDSIR_BLOCK_RX_DATA(sc))) { 1022 DPRINTFN(8, ("%s: idling on packet timeout, " 1023 "complete frame, or no data\n", __func__)); 1024 sc->sc_direction = udir_idle; 1025 1026 /* Wake up for possible output */ 1027 wakeup(&sc->sc_wr_buf); 1028 selnotify(&sc->sc_wr_sel, 0, 0); 1029 } 1030 } 1031 1032 static usbd_status 1033 udsir_start_read(struct udsir_softc *sc) 1034 { 1035 usbd_status err; 1036 1037 DPRINTFN(60, ("%s: sc=%p, size=%d\n", __func__, sc, sc->sc_rd_maxpsz)); 1038 1039 if (sc->sc_dying) 1040 return USBD_IOERROR; 1041 1042 if (UDSIR_BLOCK_RX_DATA(sc) || deframe_rd_ur(sc)) { 1043 /* 1044 * Can't start reading just yet. Since we aren't 1045 * going to start a read, have to switch direction to 1046 * idle. 1047 */ 1048 sc->sc_direction = udir_idle; 1049 return USBD_NORMAL_COMPLETION; 1050 } 1051 1052 /* Starting a read... */ 1053 sc->sc_rd_readinprogress = 1; 1054 sc->sc_direction = udir_input; 1055 1056 if (sc->sc_rd_err) { 1057 sc->sc_rd_err = 0; 1058 DPRINTFN(0, ("%s: clear stall\n", __func__)); 1059 usbd_clear_endpoint_stall(sc->sc_rd_pipe); 1060 } 1061 1062 usbd_setup_xfer(sc->sc_rd_xfer, sc, sc->sc_rd_buf, sc->sc_rd_maxpsz, 1063 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, udsir_rd_cb); 1064 err = usbd_transfer(sc->sc_rd_xfer); 1065 if (err != USBD_IN_PROGRESS) { 1066 DPRINTFN(0, ("%s: err=%d\n", __func__, (int)err)); 1067 return err; 1068 } 1069 return USBD_NORMAL_COMPLETION; 1070 } 1071