xref: /netbsd-src/sys/dev/usb/uhid.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: uhid.c,v 1.119 2021/09/26 15:07:17 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004, 2008, 2012 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology and Matthew R. Green (mrg@eterna.com.au).
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uhid.c,v 1.119 2021/09/26 15:07:17 thorpej Exp $");
39 
40 #ifdef _KERNEL_OPT
41 #include "opt_compat_netbsd.h"
42 #include "opt_usb.h"
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/types.h>
47 
48 #include <sys/atomic.h>
49 #include <sys/compat_stub.h>
50 #include <sys/conf.h>
51 #include <sys/device.h>
52 #include <sys/file.h>
53 #include <sys/intr.h>
54 #include <sys/ioctl.h>
55 #include <sys/kernel.h>
56 #include <sys/kmem.h>
57 #include <sys/poll.h>
58 #include <sys/proc.h>
59 #include <sys/select.h>
60 #include <sys/signalvar.h>
61 #include <sys/systm.h>
62 #include <sys/tty.h>
63 #include <sys/vnode.h>
64 
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbhid.h>
67 
68 #include <dev/usb/usbdevs.h>
69 #include <dev/usb/usbdi.h>
70 #include <dev/usb/usbdi_util.h>
71 #include <dev/usb/usb_quirks.h>
72 #include <dev/hid/hid.h>
73 
74 #include <dev/usb/uhidev.h>
75 
76 #include "ioconf.h"
77 
78 #ifdef UHID_DEBUG
79 #define DPRINTF(x)	if (uhiddebug) printf x
80 #define DPRINTFN(n,x)	if (uhiddebug>(n)) printf x
81 int	uhiddebug = 0;
82 #else
83 #define DPRINTF(x)
84 #define DPRINTFN(n,x)
85 #endif
86 
87 struct uhid_softc {
88 	struct uhidev sc_hdev;
89 
90 	kmutex_t sc_lock;
91 	kcondvar_t sc_cv;
92 	kcondvar_t sc_detach_cv;
93 
94 	int sc_isize;
95 	int sc_osize;
96 	int sc_fsize;
97 
98 	u_char *sc_obuf;
99 
100 	struct clist sc_q;	/* protected by sc_lock */
101 	struct selinfo sc_rsel;
102 	proc_t *sc_async;	/* process that wants SIGIO */
103 	void *sc_sih;
104 	volatile uint32_t sc_state;	/* driver state */
105 #define UHID_IMMED	0x02	/* return read data immediately */
106 
107 	int sc_refcnt;
108 	int sc_raw;
109 	u_char sc_open;
110 	u_char sc_dying;
111 };
112 
113 #define	UHIDUNIT(dev)	(minor(dev))
114 #define	UHID_CHUNK	128	/* chunk size for read */
115 #define	UHID_BSIZE	1020	/* buffer size */
116 
117 static dev_type_open(uhidopen);
118 static dev_type_close(uhidclose);
119 static dev_type_read(uhidread);
120 static dev_type_write(uhidwrite);
121 static dev_type_ioctl(uhidioctl);
122 static dev_type_poll(uhidpoll);
123 static dev_type_kqfilter(uhidkqfilter);
124 
125 const struct cdevsw uhid_cdevsw = {
126 	.d_open = uhidopen,
127 	.d_close = uhidclose,
128 	.d_read = uhidread,
129 	.d_write = uhidwrite,
130 	.d_ioctl = uhidioctl,
131 	.d_stop = nostop,
132 	.d_tty = notty,
133 	.d_poll = uhidpoll,
134 	.d_mmap = nommap,
135 	.d_kqfilter = uhidkqfilter,
136 	.d_discard = nodiscard,
137 	.d_flag = D_OTHER
138 };
139 
140 Static void uhid_intr(struct uhidev *, void *, u_int);
141 
142 Static int uhid_do_read(struct uhid_softc *, struct uio *, int);
143 Static int uhid_do_write(struct uhid_softc *, struct uio *, int);
144 Static int uhid_do_ioctl(struct uhid_softc*, u_long, void *, int, struct lwp *);
145 
146 static int	uhid_match(device_t, cfdata_t, void *);
147 static void	uhid_attach(device_t, device_t, void *);
148 static int	uhid_detach(device_t, int);
149 static int	uhid_activate(device_t, enum devact);
150 
151 CFATTACH_DECL_NEW(uhid, sizeof(struct uhid_softc), uhid_match, uhid_attach,
152     uhid_detach, uhid_activate);
153 
154 static int
155 uhid_match(device_t parent, cfdata_t match, void *aux)
156 {
157 #ifdef UHID_DEBUG
158 	struct uhidev_attach_arg *uha = aux;
159 #endif
160 
161 	DPRINTF(("uhid_match: report=%d\n", uha->reportid));
162 
163 	if (match->cf_flags & 1)
164 		return UMATCH_HIGHEST;
165 	else
166 		return UMATCH_IFACECLASS_GENERIC;
167 }
168 
169 static void
170 uhid_attach(device_t parent, device_t self, void *aux)
171 {
172 	struct uhid_softc *sc = device_private(self);
173 	struct uhidev_attach_arg *uha = aux;
174 	int size, repid;
175 	void *desc;
176 
177 	sc->sc_hdev.sc_dev = self;
178 	selinit(&sc->sc_rsel);
179 	sc->sc_hdev.sc_intr = uhid_intr;
180 	sc->sc_hdev.sc_parent = uha->parent;
181 	sc->sc_hdev.sc_report_id = uha->reportid;
182 
183 	uhidev_get_report_desc(uha->parent, &desc, &size);
184 	repid = uha->reportid;
185 	sc->sc_isize = hid_report_size(desc, size, hid_input,   repid);
186 	sc->sc_osize = hid_report_size(desc, size, hid_output,  repid);
187 	sc->sc_fsize = hid_report_size(desc, size, hid_feature, repid);
188 	sc->sc_raw =  hid_is_collection(desc, size, uha->reportid,
189 	    HID_USAGE2(HUP_FIDO, HUF_U2FHID));
190 
191 	aprint_naive("\n");
192 	aprint_normal(": input=%d, output=%d, feature=%d\n",
193 	       sc->sc_isize, sc->sc_osize, sc->sc_fsize);
194 
195 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
196 	cv_init(&sc->sc_cv, "uhidrea");
197 	cv_init(&sc->sc_detach_cv, "uhiddet");
198 
199 	if (!pmf_device_register(self, NULL, NULL))
200 		aprint_error_dev(self, "couldn't establish power handler\n");
201 
202 	return;
203 }
204 
205 static int
206 uhid_activate(device_t self, enum devact act)
207 {
208 	struct uhid_softc *sc = device_private(self);
209 
210 	switch (act) {
211 	case DVACT_DEACTIVATE:
212 		sc->sc_dying = 1;
213 		return 0;
214 	default:
215 		return EOPNOTSUPP;
216 	}
217 }
218 
219 static int
220 uhid_detach(device_t self, int flags)
221 {
222 	struct uhid_softc *sc = device_private(self);
223 	int maj, mn;
224 
225 	DPRINTF(("uhid_detach: sc=%p flags=%d\n", sc, flags));
226 
227 	/* Prevent new I/O operations, and interrupt any pending reads.  */
228 	mutex_enter(&sc->sc_lock);
229 	sc->sc_dying = 1;
230 	cv_broadcast(&sc->sc_cv);
231 	mutex_exit(&sc->sc_lock);
232 
233 	/* Interrupt any pending uhidev_write.  */
234 	uhidev_stop(&sc->sc_hdev);
235 
236 	/* Wait for I/O operations to complete.  */
237 	mutex_enter(&sc->sc_lock);
238 	while (sc->sc_refcnt) {
239 		DPRINTF(("%s: open=%d refcnt=%d\n", __func__,
240 			sc->sc_open, sc->sc_refcnt));
241 		cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
242 	}
243 	mutex_exit(&sc->sc_lock);
244 
245 	pmf_device_deregister(self);
246 
247 	/* locate the major number */
248 	maj = cdevsw_lookup_major(&uhid_cdevsw);
249 
250 	/* Nuke the vnodes for any open instances (calls close). */
251 	mn = device_unit(self);
252 	vdevgone(maj, mn, mn, VCHR);
253 
254 	/*
255 	 * Wait for close to finish.
256 	 *
257 	 * XXX I assumed that vdevgone would synchronously call close,
258 	 * and not return before it has completed, but empirically the
259 	 * assertion of sc->sc_open == 0 below fires if we don't wait
260 	 * here.  Someone^TM should carefully examine vdevgone to
261 	 * ascertain what it guarantees, and audit all other users of
262 	 * it accordingly.
263 	 */
264 	mutex_enter(&sc->sc_lock);
265 	while (sc->sc_open) {
266 		DPRINTF(("%s: open=%d\n", __func__, sc->sc_open));
267 		cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
268 	}
269 	mutex_exit(&sc->sc_lock);
270 
271 	KASSERT(sc->sc_open == 0);
272 	KASSERT(sc->sc_refcnt == 0);
273 
274 	cv_destroy(&sc->sc_cv);
275 	cv_destroy(&sc->sc_detach_cv);
276 	mutex_destroy(&sc->sc_lock);
277 	seldestroy(&sc->sc_rsel);
278 
279 	return 0;
280 }
281 
282 void
283 uhid_intr(struct uhidev *addr, void *data, u_int len)
284 {
285 	struct uhid_softc *sc = (struct uhid_softc *)addr;
286 
287 #ifdef UHID_DEBUG
288 	if (uhiddebug > 5) {
289 		uint32_t i;
290 
291 		DPRINTF(("uhid_intr: data ="));
292 		for (i = 0; i < len; i++)
293 			DPRINTF((" %02x", ((u_char *)data)[i]));
294 		DPRINTF(("\n"));
295 	}
296 #endif
297 
298 	mutex_enter(&sc->sc_lock);
299 	(void)b_to_q(data, len, &sc->sc_q);
300 
301 	DPRINTFN(5, ("uhid_intr: waking %p\n", &sc->sc_q));
302 	cv_broadcast(&sc->sc_cv);
303 	selnotify(&sc->sc_rsel, 0, NOTE_SUBMIT);
304 	if (atomic_load_relaxed(&sc->sc_async) != NULL) {
305 		mutex_enter(&proc_lock);
306 		if (sc->sc_async != NULL) {
307 			DPRINTFN(3, ("uhid_intr: sending SIGIO to %jd\n",
308 				(intmax_t)sc->sc_async->p_pid));
309 			psignal(sc->sc_async, SIGIO);
310 		}
311 		mutex_exit(&proc_lock);
312 	}
313 	mutex_exit(&sc->sc_lock);
314 }
315 
316 static int
317 uhidopen(dev_t dev, int flag, int mode, struct lwp *l)
318 {
319 	struct uhid_softc *sc;
320 	int error;
321 
322 	sc = device_lookup_private(&uhid_cd, UHIDUNIT(dev));
323 	if (sc == NULL)
324 		return ENXIO;
325 
326 	DPRINTF(("uhidopen: sc=%p\n", sc));
327 
328 	/*
329 	 * Try to open.  If dying, or if already open (or opening),
330 	 * fail -- opens are exclusive.
331 	 */
332 	mutex_enter(&sc->sc_lock);
333 	if (sc->sc_dying) {
334 		mutex_exit(&sc->sc_lock);
335 		return ENXIO;
336 	}
337 	if (sc->sc_open) {
338 		mutex_exit(&sc->sc_lock);
339 		return EBUSY;
340 	}
341 	sc->sc_open = 1;
342 	atomic_store_relaxed(&sc->sc_state, 0);
343 	mutex_exit(&sc->sc_lock);
344 
345 	/* uhid interrupts aren't enabled yet, so setup sc_q now */
346 	if (clalloc(&sc->sc_q, UHID_BSIZE, 0) == -1) {
347 		error = ENOMEM;
348 		goto fail0;
349 	}
350 
351 	/* Allocate an output buffer if needed.  */
352 	if (sc->sc_osize > 0)
353 		sc->sc_obuf = kmem_alloc(sc->sc_osize, KM_SLEEP);
354 	else
355 		sc->sc_obuf = NULL;
356 
357 	/* Paranoia: reset SIGIO before enabling interrputs.  */
358 	mutex_enter(&proc_lock);
359 	atomic_store_relaxed(&sc->sc_async, NULL);
360 	mutex_exit(&proc_lock);
361 
362 	/* Open the uhidev -- after this point we can get interrupts.  */
363 	error = uhidev_open(&sc->sc_hdev);
364 	if (error)
365 		goto fail1;
366 
367 	/* We are open for business.  */
368 	mutex_enter(&sc->sc_lock);
369 	sc->sc_open = 2;
370 	mutex_exit(&sc->sc_lock);
371 
372 	return 0;
373 
374 fail2: __unused
375 	mutex_enter(&sc->sc_lock);
376 	KASSERT(sc->sc_open == 2);
377 	sc->sc_open = 1;
378 	mutex_exit(&sc->sc_lock);
379 	uhidev_close(&sc->sc_hdev);
380 fail1:	selnotify(&sc->sc_rsel, POLLHUP, 0);
381 	mutex_enter(&proc_lock);
382 	atomic_store_relaxed(&sc->sc_async, NULL);
383 	mutex_exit(&proc_lock);
384 	if (sc->sc_osize > 0) {
385 		kmem_free(sc->sc_obuf, sc->sc_osize);
386 		sc->sc_obuf = NULL;
387 	}
388 	clfree(&sc->sc_q);
389 fail0:	mutex_enter(&sc->sc_lock);
390 	KASSERT(sc->sc_open == 1);
391 	sc->sc_open = 0;
392 	cv_broadcast(&sc->sc_detach_cv);
393 	atomic_store_relaxed(&sc->sc_state, 0);
394 	mutex_exit(&sc->sc_lock);
395 	return error;
396 }
397 
398 static int
399 uhidclose(dev_t dev, int flag, int mode, struct lwp *l)
400 {
401 	struct uhid_softc *sc;
402 
403 	sc = device_lookup_private(&uhid_cd, UHIDUNIT(dev));
404 
405 	DPRINTF(("uhidclose: sc=%p\n", sc));
406 
407 	/* We are closing up shop.  Prevent new opens until we're done.  */
408 	mutex_enter(&sc->sc_lock);
409 	KASSERT(sc->sc_open == 2);
410 	sc->sc_open = 1;
411 	mutex_exit(&sc->sc_lock);
412 
413 	/* Prevent further interrupts.  */
414 	uhidev_close(&sc->sc_hdev);
415 
416 	/* Hang up all select/poll.  */
417 	selnotify(&sc->sc_rsel, POLLHUP, 0);
418 
419 	/* Reset SIGIO.  */
420 	mutex_enter(&proc_lock);
421 	atomic_store_relaxed(&sc->sc_async, NULL);
422 	mutex_exit(&proc_lock);
423 
424 	/* Free the buffer and queue.  */
425 	if (sc->sc_osize > 0) {
426 		kmem_free(sc->sc_obuf, sc->sc_osize);
427 		sc->sc_obuf = NULL;
428 	}
429 	clfree(&sc->sc_q);
430 
431 	/* All set.  We are now closed.  */
432 	mutex_enter(&sc->sc_lock);
433 	KASSERT(sc->sc_open == 1);
434 	sc->sc_open = 0;
435 	cv_broadcast(&sc->sc_detach_cv);
436 	atomic_store_relaxed(&sc->sc_state, 0);
437 	mutex_exit(&sc->sc_lock);
438 
439 	return 0;
440 }
441 
442 static int
443 uhid_enter(dev_t dev, struct uhid_softc **scp)
444 {
445 	struct uhid_softc *sc;
446 	int error;
447 
448 	/* XXX need to hold reference to device */
449 	sc = device_lookup_private(&uhid_cd, UHIDUNIT(dev));
450 	if (sc == NULL)
451 		return ENXIO;
452 
453 	mutex_enter(&sc->sc_lock);
454 	KASSERT(sc->sc_open == 2);
455 	if (sc->sc_dying) {
456 		error = ENXIO;
457 	} else if (sc->sc_refcnt == INT_MAX) {
458 		error = EBUSY;
459 	} else {
460 		*scp = sc;
461 		sc->sc_refcnt++;
462 		error = 0;
463 	}
464 	mutex_exit(&sc->sc_lock);
465 
466 	return error;
467 }
468 
469 static void
470 uhid_exit(struct uhid_softc *sc)
471 {
472 
473 	mutex_enter(&sc->sc_lock);
474 	KASSERT(sc->sc_open == 2);
475 	KASSERT(sc->sc_refcnt > 0);
476 	if (--sc->sc_refcnt == 0)
477 		cv_broadcast(&sc->sc_detach_cv);
478 	mutex_exit(&sc->sc_lock);
479 }
480 
481 Static int
482 uhid_do_read(struct uhid_softc *sc, struct uio *uio, int flag)
483 {
484 	int error = 0;
485 	int extra;
486 	size_t length;
487 	u_char buffer[UHID_CHUNK];
488 	usbd_status err;
489 
490 	DPRINTFN(1, ("uhidread\n"));
491 	if (atomic_load_relaxed(&sc->sc_state) & UHID_IMMED) {
492 		DPRINTFN(1, ("uhidread immed\n"));
493 		extra = sc->sc_hdev.sc_report_id != 0;
494 		if (sc->sc_isize + extra > sizeof(buffer))
495 			return ENOBUFS;
496 		err = uhidev_get_report(&sc->sc_hdev, UHID_INPUT_REPORT,
497 					buffer, sc->sc_isize + extra);
498 		if (err)
499 			return EIO;
500 		return uiomove(buffer+extra, sc->sc_isize, uio);
501 	}
502 
503 	mutex_enter(&sc->sc_lock);
504 	while (sc->sc_q.c_cc == 0) {
505 		if (flag & IO_NDELAY) {
506 			mutex_exit(&sc->sc_lock);
507 			return EWOULDBLOCK;
508 		}
509 		if (sc->sc_dying) {
510 			mutex_exit(&sc->sc_lock);
511 			return EIO;
512 		}
513 		DPRINTFN(5, ("uhidread: sleep on %p\n", &sc->sc_q));
514 		error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock);
515 		DPRINTFN(5, ("uhidread: woke, error=%d\n", error));
516 		if (error) {
517 			break;
518 		}
519 	}
520 
521 	/* Transfer as many chunks as possible. */
522 	while (sc->sc_q.c_cc > 0 && uio->uio_resid > 0 && !error) {
523 		length = uimin(sc->sc_q.c_cc, uio->uio_resid);
524 		if (length > sizeof(buffer))
525 			length = sizeof(buffer);
526 
527 		/* Remove a small chunk from the input queue. */
528 		(void) q_to_b(&sc->sc_q, buffer, length);
529 		DPRINTFN(5, ("uhidread: got %lu chars\n", (u_long)length));
530 
531 		/* Copy the data to the user process. */
532 		mutex_exit(&sc->sc_lock);
533 		if ((error = uiomove(buffer, length, uio)) != 0)
534 			return error;
535 		mutex_enter(&sc->sc_lock);
536 	}
537 
538 	mutex_exit(&sc->sc_lock);
539 	return error;
540 }
541 
542 static int
543 uhidread(dev_t dev, struct uio *uio, int flag)
544 {
545 	struct uhid_softc *sc;
546 	int error;
547 
548 	error = uhid_enter(dev, &sc);
549 	if (error)
550 		return error;
551 	error = uhid_do_read(sc, uio, flag);
552 	uhid_exit(sc);
553 	return error;
554 }
555 
556 Static int
557 uhid_do_write(struct uhid_softc *sc, struct uio *uio, int flag)
558 {
559 	int error;
560 	int size;
561 	usbd_status err;
562 
563 	DPRINTFN(1, ("uhidwrite\n"));
564 
565 	size = sc->sc_osize;
566 	if (uio->uio_resid != size || size == 0)
567 		return EINVAL;
568 	error = uiomove(sc->sc_obuf, size, uio);
569 #ifdef UHID_DEBUG
570 	if (uhiddebug > 5) {
571 		uint32_t i;
572 
573 		DPRINTF(("%s: outdata[%d] =", device_xname(sc->sc_hdev.sc_dev),
574 		    error));
575 		for (i = 0; i < size; i++)
576 			DPRINTF((" %02x", sc->sc_obuf[i]));
577 		DPRINTF(("\n"));
578 	}
579 #endif
580 	if (!error) {
581 		if (sc->sc_raw)
582 			err = uhidev_write(sc->sc_hdev.sc_parent, sc->sc_obuf,
583 			    size);
584 		else
585 			err = uhidev_set_report(&sc->sc_hdev,
586 			    UHID_OUTPUT_REPORT, sc->sc_obuf, size);
587 		if (err) {
588 			DPRINTF(("%s: err = %d\n",
589 			    device_xname(sc->sc_hdev.sc_dev), err));
590 			error = EIO;
591 		}
592 	}
593 
594 	return error;
595 }
596 
597 int
598 uhidwrite(dev_t dev, struct uio *uio, int flag)
599 {
600 	struct uhid_softc *sc;
601 	int error;
602 
603 	error = uhid_enter(dev, &sc);
604 	if (error)
605 		return error;
606 	error = uhid_do_write(sc, uio, flag);
607 	uhid_exit(sc);
608 	return error;
609 }
610 
611 int
612 uhid_do_ioctl(struct uhid_softc *sc, u_long cmd, void *addr,
613     int flag, struct lwp *l)
614 {
615 	struct usb_ctl_report_desc *rd;
616 	struct usb_ctl_report *re;
617 	u_char buffer[UHID_CHUNK];
618 	int size, extra;
619 	usbd_status err;
620 	void *desc;
621 
622 	DPRINTFN(2, ("uhidioctl: cmd=%lx\n", cmd));
623 
624 	switch (cmd) {
625 	case FIONBIO:
626 		/* All handled in the upper FS layer. */
627 		break;
628 
629 	case FIOASYNC:
630 		mutex_enter(&proc_lock);
631 		if (*(int *)addr) {
632 			if (sc->sc_async != NULL) {
633 				mutex_exit(&proc_lock);
634 				return EBUSY;
635 			}
636 			atomic_store_relaxed(&sc->sc_async, l->l_proc);
637 			DPRINTF(("uhid_do_ioctl: FIOASYNC %p\n", l->l_proc));
638 		} else
639 			atomic_store_relaxed(&sc->sc_async, NULL);
640 		mutex_exit(&proc_lock);
641 		break;
642 
643 	/* XXX this is not the most general solution. */
644 	case TIOCSPGRP:
645 		mutex_enter(&proc_lock);
646 		if (sc->sc_async == NULL) {
647 			mutex_exit(&proc_lock);
648 			return EINVAL;
649 		}
650 		if (*(int *)addr != sc->sc_async->p_pgid) {
651 			mutex_exit(&proc_lock);
652 			return EPERM;
653 		}
654 		mutex_exit(&proc_lock);
655 		break;
656 
657 	case FIOSETOWN:
658 		mutex_enter(&proc_lock);
659 		if (sc->sc_async == NULL) {
660 			mutex_exit(&proc_lock);
661 			return EINVAL;
662 		}
663 		if (-*(int *)addr != sc->sc_async->p_pgid
664 		    && *(int *)addr != sc->sc_async->p_pid) {
665 			mutex_exit(&proc_lock);
666 			return EPERM;
667 		}
668 		mutex_exit(&proc_lock);
669 		break;
670 
671 	case USB_HID_GET_RAW:
672 		*(int *)addr = sc->sc_raw;
673 		break;
674 
675 	case USB_HID_SET_RAW:
676 		sc->sc_raw = *(int *)addr;
677 		break;
678 
679 	case USB_GET_REPORT_DESC:
680 		uhidev_get_report_desc(sc->sc_hdev.sc_parent, &desc, &size);
681 		rd = (struct usb_ctl_report_desc *)addr;
682 		size = uimin(size, sizeof(rd->ucrd_data));
683 		rd->ucrd_size = size;
684 		memcpy(rd->ucrd_data, desc, size);
685 		break;
686 
687 	case USB_SET_IMMED:
688 		if (*(int *)addr) {
689 			extra = sc->sc_hdev.sc_report_id != 0;
690 			if (sc->sc_isize + extra > sizeof(buffer))
691 				return ENOBUFS;
692 			err = uhidev_get_report(&sc->sc_hdev, UHID_INPUT_REPORT,
693 						buffer, sc->sc_isize + extra);
694 			if (err)
695 				return EOPNOTSUPP;
696 
697 			atomic_or_32(&sc->sc_state, UHID_IMMED);
698 		} else
699 			atomic_and_32(&sc->sc_state, ~UHID_IMMED);
700 		break;
701 
702 	case USB_GET_REPORT:
703 		re = (struct usb_ctl_report *)addr;
704 		switch (re->ucr_report) {
705 		case UHID_INPUT_REPORT:
706 			size = sc->sc_isize;
707 			break;
708 		case UHID_OUTPUT_REPORT:
709 			size = sc->sc_osize;
710 			break;
711 		case UHID_FEATURE_REPORT:
712 			size = sc->sc_fsize;
713 			break;
714 		default:
715 			return EINVAL;
716 		}
717 		extra = sc->sc_hdev.sc_report_id != 0;
718 		if (size + extra > sizeof(re->ucr_data))
719 			return ENOBUFS;
720 		err = uhidev_get_report(&sc->sc_hdev, re->ucr_report,
721 		    re->ucr_data, size + extra);
722 		if (extra)
723 			memmove(re->ucr_data, re->ucr_data+1, size);
724 		if (err)
725 			return EIO;
726 		break;
727 
728 	case USB_SET_REPORT:
729 		re = (struct usb_ctl_report *)addr;
730 		switch (re->ucr_report) {
731 		case UHID_INPUT_REPORT:
732 			size = sc->sc_isize;
733 			break;
734 		case UHID_OUTPUT_REPORT:
735 			size = sc->sc_osize;
736 			break;
737 		case UHID_FEATURE_REPORT:
738 			size = sc->sc_fsize;
739 			break;
740 		default:
741 			return EINVAL;
742 		}
743 		if (size > sizeof(re->ucr_data))
744 			return ENOBUFS;
745 		err = uhidev_set_report(&sc->sc_hdev, re->ucr_report,
746 		    re->ucr_data, size);
747 		if (err)
748 			return EIO;
749 		break;
750 
751 	case USB_GET_REPORT_ID:
752 		*(int *)addr = sc->sc_hdev.sc_report_id;
753 		break;
754 
755 	case USB_GET_DEVICE_DESC:
756 		*(usb_device_descriptor_t *)addr =
757 			*usbd_get_device_descriptor(sc->sc_hdev.sc_parent->sc_udev);
758 		break;
759 
760 	case USB_GET_DEVICEINFO:
761 		usbd_fill_deviceinfo(sc->sc_hdev.sc_parent->sc_udev,
762 				     (struct usb_device_info *)addr, 0);
763 		break;
764 	case USB_GET_DEVICEINFO_OLD:
765 		MODULE_HOOK_CALL(usb_subr_fill_30_hook,
766                     (sc->sc_hdev.sc_parent->sc_udev,
767 		      (struct usb_device_info_old *)addr, 0,
768                       usbd_devinfo_vp, usbd_printBCD),
769                     enosys(), err);
770 		if (err == 0)
771 			return 0;
772 		break;
773 	case USB_GET_STRING_DESC:
774 	    {
775 		struct usb_string_desc *si = (struct usb_string_desc *)addr;
776 		err = usbd_get_string_desc(sc->sc_hdev.sc_parent->sc_udev,
777 			si->usd_string_index,
778 			si->usd_language_id, &si->usd_desc, &size);
779 		if (err)
780 			return EINVAL;
781 		break;
782 	    }
783 
784 	default:
785 		return EINVAL;
786 	}
787 	return 0;
788 }
789 
790 static int
791 uhidioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
792 {
793 	struct uhid_softc *sc;
794 	int error;
795 
796 	error = uhid_enter(dev, &sc);
797 	if (error)
798 		return error;
799 	error = uhid_do_ioctl(sc, cmd, addr, flag, l);
800 	uhid_exit(sc);
801 	return error;
802 }
803 
804 static int
805 uhidpoll(dev_t dev, int events, struct lwp *l)
806 {
807 	struct uhid_softc *sc;
808 	int revents = 0;
809 
810 	if (uhid_enter(dev, &sc) != 0)
811 		return POLLHUP;
812 
813 	mutex_enter(&sc->sc_lock);
814 	if (events & (POLLOUT | POLLWRNORM))
815 		revents |= events & (POLLOUT | POLLWRNORM);
816 	if (events & (POLLIN | POLLRDNORM)) {
817 		if (sc->sc_q.c_cc > 0)
818 			revents |= events & (POLLIN | POLLRDNORM);
819 		else
820 			selrecord(l, &sc->sc_rsel);
821 	}
822 	mutex_exit(&sc->sc_lock);
823 
824 	uhid_exit(sc);
825 	return revents;
826 }
827 
828 static void
829 filt_uhidrdetach(struct knote *kn)
830 {
831 	struct uhid_softc *sc = kn->kn_hook;
832 
833 	mutex_enter(&sc->sc_lock);
834 	selremove_knote(&sc->sc_rsel, kn);
835 	mutex_exit(&sc->sc_lock);
836 }
837 
838 static int
839 filt_uhidread(struct knote *kn, long hint)
840 {
841 	struct uhid_softc *sc = kn->kn_hook;
842 
843 	if (hint == NOTE_SUBMIT)
844 		KASSERT(mutex_owned(&sc->sc_lock));
845 	else
846 		mutex_enter(&sc->sc_lock);
847 
848 	kn->kn_data = sc->sc_q.c_cc;
849 
850 	if (hint == NOTE_SUBMIT)
851 		KASSERT(mutex_owned(&sc->sc_lock));
852 	else
853 		mutex_exit(&sc->sc_lock);
854 
855 	return kn->kn_data > 0;
856 }
857 
858 static const struct filterops uhidread_filtops = {
859 	.f_flags = FILTEROP_ISFD,
860 	.f_attach = NULL,
861 	.f_detach = filt_uhidrdetach,
862 	.f_event = filt_uhidread,
863 };
864 
865 static int
866 uhidkqfilter(dev_t dev, struct knote *kn)
867 {
868 	struct uhid_softc *sc;
869 	int error;
870 
871 	error = uhid_enter(dev, &sc);
872 	if (error)
873 		return error;
874 
875 	switch (kn->kn_filter) {
876 	case EVFILT_READ:
877 		kn->kn_fop = &uhidread_filtops;
878 		kn->kn_hook = sc;
879 		mutex_enter(&sc->sc_lock);
880 		selrecord_knote(&sc->sc_rsel, kn);
881 		mutex_exit(&sc->sc_lock);
882 		break;
883 
884 	case EVFILT_WRITE:
885 		kn->kn_fop = &seltrue_filtops;
886 		break;
887 
888 	default:
889 		error = EINVAL;
890 		goto out;
891 	}
892 
893 out:	uhid_exit(sc);
894 	return error;
895 }
896