xref: /netbsd-src/sys/kern/sys_select.c (revision 4b169a6ba595ae283ca507b26b15fdff40495b1c)
1 /*	$NetBSD: sys_select.c,v 1.61 2023/07/17 12:54:29 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009, 2010, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  * (c) UNIX System Laboratories, Inc.
36  * All or some portions of this file are derived from material licensed
37  * to the University of California by American Telephone and Telegraph
38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39  * the permission of UNIX System Laboratories, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
66  */
67 
68 /*
69  * System calls of synchronous I/O multiplexing subsystem.
70  *
71  * Locking
72  *
73  * Two locks are used: <object-lock> and selcluster_t::sc_lock.
74  *
75  * The <object-lock> might be a device driver or another subsystem, e.g.
76  * socket or pipe.  This lock is not exported, and thus invisible to this
77  * subsystem.  Mainly, synchronisation between selrecord() and selnotify()
78  * routines depends on this lock, as it will be described in the comments.
79  *
80  * Lock order
81  *
82  *	<object-lock> ->
83  *		selcluster_t::sc_lock
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.61 2023/07/17 12:54:29 riastradh Exp $");
88 
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/filedesc.h>
92 #include <sys/file.h>
93 #include <sys/proc.h>
94 #include <sys/socketvar.h>
95 #include <sys/signalvar.h>
96 #include <sys/uio.h>
97 #include <sys/kernel.h>
98 #include <sys/lwp.h>
99 #include <sys/poll.h>
100 #include <sys/mount.h>
101 #include <sys/syscallargs.h>
102 #include <sys/cpu.h>
103 #include <sys/atomic.h>
104 #include <sys/socketvar.h>
105 #include <sys/sleepq.h>
106 #include <sys/sysctl.h>
107 #include <sys/bitops.h>
108 
109 /* Flags for lwp::l_selflag. */
110 #define	SEL_RESET	0	/* awoken, interrupted, or not yet polling */
111 #define	SEL_SCANNING	1	/* polling descriptors */
112 #define	SEL_BLOCKING	2	/* blocking and waiting for event */
113 #define	SEL_EVENT	3	/* interrupted, events set directly */
114 
115 /*
116  * Per-cluster state for select()/poll().  For a system with fewer
117  * than 64 CPUs, this gives us per-CPU clusters.
118  */
119 #define	SELCLUSTERS	64
120 #define	SELCLUSTERMASK	(SELCLUSTERS - 1)
121 
122 typedef struct selcluster {
123 	kmutex_t	*sc_lock;
124 	sleepq_t	sc_sleepq;
125 	uint64_t	sc_mask;
126 	int		sc_ncoll;
127 } selcluster_t;
128 
129 static inline int	selscan(char *, const int, const size_t, register_t *);
130 static inline int	pollscan(struct pollfd *, const int, register_t *);
131 static void		selclear(void);
132 
133 static const int sel_flag[] = {
134 	POLLRDNORM | POLLHUP | POLLERR,
135 	POLLWRNORM | POLLHUP | POLLERR,
136 	POLLRDBAND
137 };
138 
139 /*
140  * LWPs are woken using the sleep queue only due to a collision, the case
141  * with the maximum Suck Factor.  Save the cost of sorting for named waiters
142  * by inserting in LIFO order.  In the future it would be preferable to not
143  * enqueue LWPs at all, unless subject to a collision.
144  */
145 syncobj_t select_sobj = {
146 	.sobj_name	= "select",
147 	.sobj_flag	= SOBJ_SLEEPQ_LIFO,
148 	.sobj_unsleep	= sleepq_unsleep,
149 	.sobj_changepri	= sleepq_changepri,
150 	.sobj_lendpri	= sleepq_lendpri,
151 	.sobj_owner	= syncobj_noowner,
152 };
153 
154 static selcluster_t	*selcluster[SELCLUSTERS] __read_mostly;
155 static int		direct_select __read_mostly = 0;
156 
157 /* Operations: either select() or poll(). */
158 const char		selop_select[] = "select";
159 const char		selop_poll[] = "poll";
160 
161 /*
162  * Select system call.
163  */
164 int
165 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap,
166     register_t *retval)
167 {
168 	/* {
169 		syscallarg(int)				nd;
170 		syscallarg(fd_set *)			in;
171 		syscallarg(fd_set *)			ou;
172 		syscallarg(fd_set *)			ex;
173 		syscallarg(const struct timespec *)	ts;
174 		syscallarg(sigset_t *)			mask;
175 	} */
176 	struct timespec	ats, *ts = NULL;
177 	sigset_t	amask, *mask = NULL;
178 	int		error;
179 
180 	if (SCARG(uap, ts)) {
181 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
182 		if (error)
183 			return error;
184 		ts = &ats;
185 	}
186 	if (SCARG(uap, mask) != NULL) {
187 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
188 		if (error)
189 			return error;
190 		mask = &amask;
191 	}
192 
193 	return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
194 	    SCARG(uap, ou), SCARG(uap, ex), ts, mask);
195 }
196 
197 int
198 sys___select50(struct lwp *l, const struct sys___select50_args *uap,
199     register_t *retval)
200 {
201 	/* {
202 		syscallarg(int)			nd;
203 		syscallarg(fd_set *)		in;
204 		syscallarg(fd_set *)		ou;
205 		syscallarg(fd_set *)		ex;
206 		syscallarg(struct timeval *)	tv;
207 	} */
208 	struct timeval atv;
209 	struct timespec ats, *ts = NULL;
210 	int error;
211 
212 	if (SCARG(uap, tv)) {
213 		error = copyin(SCARG(uap, tv), (void *)&atv, sizeof(atv));
214 		if (error)
215 			return error;
216 
217 		if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
218 			return EINVAL;
219 
220 		TIMEVAL_TO_TIMESPEC(&atv, &ats);
221 		ts = &ats;
222 	}
223 
224 	return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
225 	    SCARG(uap, ou), SCARG(uap, ex), ts, NULL);
226 }
227 
228 /*
229  * sel_do_scan: common code to perform the scan on descriptors.
230  */
231 static int
232 sel_do_scan(const char *opname, void *fds, const int nf, const size_t ni,
233     struct timespec *ts, sigset_t *mask, register_t *retval)
234 {
235 	lwp_t		* const l = curlwp;
236 	selcluster_t	*sc;
237 	kmutex_t	*lock;
238 	struct timespec	sleepts;
239 	int		error, timo;
240 
241 	timo = 0;
242 	if (ts && inittimeleft(ts, &sleepts) == -1) {
243 		return EINVAL;
244 	}
245 
246 	if (__predict_false(mask))
247 		sigsuspendsetup(l, mask);
248 
249 	/*
250 	 * We may context switch during or at any time after picking a CPU
251 	 * and cluster to associate with, but it doesn't matter.  In the
252 	 * unlikely event we migrate elsewhere all we risk is a little lock
253 	 * contention; correctness is not sacrificed.
254 	 */
255 	sc = curcpu()->ci_data.cpu_selcluster;
256 	lock = sc->sc_lock;
257 	l->l_selcluster = sc;
258 
259 	if (opname == selop_select) {
260 		l->l_selbits = fds;
261 		l->l_selni = ni;
262 	} else {
263 		l->l_selbits = NULL;
264 	}
265 
266 	for (;;) {
267 		int ncoll;
268 
269 		SLIST_INIT(&l->l_selwait);
270 		l->l_selret = 0;
271 
272 		/*
273 		 * No need to lock.  If this is overwritten by another value
274 		 * while scanning, we will retry below.  We only need to see
275 		 * exact state from the descriptors that we are about to poll,
276 		 * and lock activity resulting from fo_poll is enough to
277 		 * provide an up to date value for new polling activity.
278 		 */
279 		if (ts && (ts->tv_sec | ts->tv_nsec | direct_select) == 0) {
280 			/* Non-blocking: no need for selrecord()/selclear() */
281 			l->l_selflag = SEL_RESET;
282 		} else {
283 			l->l_selflag = SEL_SCANNING;
284 		}
285 		ncoll = sc->sc_ncoll;
286 		membar_release();
287 
288 		if (opname == selop_select) {
289 			error = selscan((char *)fds, nf, ni, retval);
290 		} else {
291 			error = pollscan((struct pollfd *)fds, nf, retval);
292 		}
293 		if (error || *retval)
294 			break;
295 		if (ts && (timo = gettimeleft(ts, &sleepts)) <= 0)
296 			break;
297 		/*
298 		 * Acquire the lock and perform the (re)checks.  Note, if
299 		 * collision has occurred, then our state does not matter,
300 		 * as we must perform re-scan.  Therefore, check it first.
301 		 */
302 state_check:
303 		mutex_spin_enter(lock);
304 		if (__predict_false(sc->sc_ncoll != ncoll)) {
305 			/* Collision: perform re-scan. */
306 			mutex_spin_exit(lock);
307 			selclear();
308 			continue;
309 		}
310 		if (__predict_true(l->l_selflag == SEL_EVENT)) {
311 			/* Events occurred, they are set directly. */
312 			mutex_spin_exit(lock);
313 			break;
314 		}
315 		if (__predict_true(l->l_selflag == SEL_RESET)) {
316 			/* Events occurred, but re-scan is requested. */
317 			mutex_spin_exit(lock);
318 			selclear();
319 			continue;
320 		}
321 		/* Nothing happen, therefore - sleep. */
322 		l->l_selflag = SEL_BLOCKING;
323 		l->l_kpriority = true;
324 		sleepq_enter(&sc->sc_sleepq, l, lock);
325 		sleepq_enqueue(&sc->sc_sleepq, sc, opname, &select_sobj, true);
326 		error = sleepq_block(timo, true, &select_sobj);
327 		if (error != 0) {
328 			break;
329 		}
330 		/* Awoken: need to check the state. */
331 		goto state_check;
332 	}
333 	selclear();
334 
335 	/* Add direct events if any. */
336 	if (l->l_selflag == SEL_EVENT) {
337 		KASSERT(l->l_selret != 0);
338 		*retval += l->l_selret;
339 	}
340 
341 	if (__predict_false(mask))
342 		sigsuspendteardown(l);
343 
344 	/* select and poll are not restarted after signals... */
345 	if (error == ERESTART)
346 		return EINTR;
347 	if (error == EWOULDBLOCK)
348 		return 0;
349 	return error;
350 }
351 
352 int
353 selcommon(register_t *retval, int nd, fd_set *u_in, fd_set *u_ou,
354     fd_set *u_ex, struct timespec *ts, sigset_t *mask)
355 {
356 	char		smallbits[howmany(FD_SETSIZE, NFDBITS) *
357 			    sizeof(fd_mask) * 6];
358 	char 		*bits;
359 	int		error, nf;
360 	size_t		ni;
361 
362 	if (nd < 0)
363 		return (EINVAL);
364 	nf = atomic_load_consume(&curlwp->l_fd->fd_dt)->dt_nfiles;
365 	if (nd > nf) {
366 		/* forgiving; slightly wrong */
367 		nd = nf;
368 	}
369 	ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
370 	if (ni * 6 > sizeof(smallbits))
371 		bits = kmem_alloc(ni * 6, KM_SLEEP);
372 	else
373 		bits = smallbits;
374 
375 #define	getbits(name, x)						\
376 	if (u_ ## name) {						\
377 		error = copyin(u_ ## name, bits + ni * x, ni);		\
378 		if (error)						\
379 			goto fail;					\
380 	} else								\
381 		memset(bits + ni * x, 0, ni);
382 	getbits(in, 0);
383 	getbits(ou, 1);
384 	getbits(ex, 2);
385 #undef	getbits
386 
387 	error = sel_do_scan(selop_select, bits, nd, ni, ts, mask, retval);
388 	if (error == 0 && u_in != NULL)
389 		error = copyout(bits + ni * 3, u_in, ni);
390 	if (error == 0 && u_ou != NULL)
391 		error = copyout(bits + ni * 4, u_ou, ni);
392 	if (error == 0 && u_ex != NULL)
393 		error = copyout(bits + ni * 5, u_ex, ni);
394  fail:
395 	if (bits != smallbits)
396 		kmem_free(bits, ni * 6);
397 	return (error);
398 }
399 
400 static inline int
401 selscan(char *bits, const int nfd, const size_t ni, register_t *retval)
402 {
403 	fd_mask *ibitp, *obitp;
404 	int msk, i, j, fd, n;
405 	file_t *fp;
406 	lwp_t *l;
407 
408 	ibitp = (fd_mask *)(bits + ni * 0);
409 	obitp = (fd_mask *)(bits + ni * 3);
410 	n = 0;
411 	l = curlwp;
412 
413 	memset(obitp, 0, ni * 3);
414 	for (msk = 0; msk < 3; msk++) {
415 		for (i = 0; i < nfd; i += NFDBITS) {
416 			fd_mask ibits, obits;
417 
418 			ibits = *ibitp;
419 			obits = 0;
420 			while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
421 				ibits &= ~(1U << j);
422 				if ((fp = fd_getfile(fd)) == NULL)
423 					return (EBADF);
424 				/*
425 				 * Setup an argument to selrecord(), which is
426 				 * a file descriptor number.
427 				 */
428 				l->l_selrec = fd;
429 				if ((*fp->f_ops->fo_poll)(fp, sel_flag[msk])) {
430 					if (!direct_select) {
431 						/*
432 						 * Have events: do nothing in
433 						 * selrecord().
434 						 */
435 						l->l_selflag = SEL_RESET;
436 					}
437 					obits |= (1U << j);
438 					n++;
439 				}
440 				fd_putfile(fd);
441 			}
442 			if (obits != 0) {
443 				if (direct_select) {
444 					kmutex_t *lock;
445 					lock = l->l_selcluster->sc_lock;
446 					mutex_spin_enter(lock);
447 					*obitp |= obits;
448 					mutex_spin_exit(lock);
449 				} else {
450 					*obitp |= obits;
451 				}
452 			}
453 			ibitp++;
454 			obitp++;
455 		}
456 	}
457 	*retval = n;
458 	return (0);
459 }
460 
461 /*
462  * Poll system call.
463  */
464 int
465 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
466 {
467 	/* {
468 		syscallarg(struct pollfd *)	fds;
469 		syscallarg(u_int)		nfds;
470 		syscallarg(int)			timeout;
471 	} */
472 	struct timespec	ats, *ts = NULL;
473 
474 	if (SCARG(uap, timeout) != INFTIM) {
475 		ats.tv_sec = SCARG(uap, timeout) / 1000;
476 		ats.tv_nsec = (SCARG(uap, timeout) % 1000) * 1000000;
477 		ts = &ats;
478 	}
479 
480 	return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, NULL);
481 }
482 
483 /*
484  * Poll system call.
485  */
486 int
487 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap,
488     register_t *retval)
489 {
490 	/* {
491 		syscallarg(struct pollfd *)		fds;
492 		syscallarg(u_int)			nfds;
493 		syscallarg(const struct timespec *)	ts;
494 		syscallarg(const sigset_t *)		mask;
495 	} */
496 	struct timespec	ats, *ts = NULL;
497 	sigset_t	amask, *mask = NULL;
498 	int		error;
499 
500 	if (SCARG(uap, ts)) {
501 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
502 		if (error)
503 			return error;
504 		ts = &ats;
505 	}
506 	if (SCARG(uap, mask)) {
507 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
508 		if (error)
509 			return error;
510 		mask = &amask;
511 	}
512 
513 	return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, mask);
514 }
515 
516 int
517 pollcommon(register_t *retval, struct pollfd *u_fds, u_int nfds,
518     struct timespec *ts, sigset_t *mask)
519 {
520 	struct pollfd	smallfds[32];
521 	struct pollfd	*fds;
522 	int		error;
523 	size_t		ni;
524 
525 	if (nfds > curlwp->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_max + 1000) {
526 		/*
527 		 * Prevent userland from causing over-allocation.
528 		 * Raising the default limit too high can still cause
529 		 * a lot of memory to be allocated, but this also means
530 		 * that the file descriptor array will also be large.
531 		 *
532 		 * To reduce the memory requirements here, we could
533 		 * process the 'fds' array in chunks, but that
534 		 * is a lot of code that isn't normally useful.
535 		 * (Or just move the copyin/out into pollscan().)
536 		 *
537 		 * Historically the code silently truncated 'fds' to
538 		 * dt_nfiles entries - but that does cause issues.
539 		 *
540 		 * Using the max limit equivalent to sysctl
541 		 * kern.maxfiles is the moral equivalent of OPEN_MAX
542 		 * as specified by POSIX.
543 		 *
544 		 * We add a slop of 1000 in case the resource limit was
545 		 * changed after opening descriptors or the same descriptor
546 		 * was specified more than once.
547 		 */
548 		return EINVAL;
549 	}
550 	ni = nfds * sizeof(struct pollfd);
551 	if (ni > sizeof(smallfds))
552 		fds = kmem_alloc(ni, KM_SLEEP);
553 	else
554 		fds = smallfds;
555 
556 	error = copyin(u_fds, fds, ni);
557 	if (error)
558 		goto fail;
559 
560 	error = sel_do_scan(selop_poll, fds, nfds, ni, ts, mask, retval);
561 	if (error == 0)
562 		error = copyout(fds, u_fds, ni);
563  fail:
564 	if (fds != smallfds)
565 		kmem_free(fds, ni);
566 	return (error);
567 }
568 
569 static inline int
570 pollscan(struct pollfd *fds, const int nfd, register_t *retval)
571 {
572 	file_t *fp;
573 	int i, n = 0, revents;
574 
575 	for (i = 0; i < nfd; i++, fds++) {
576 		fds->revents = 0;
577 		if (fds->fd < 0) {
578 			revents = 0;
579 		} else if ((fp = fd_getfile(fds->fd)) == NULL) {
580 			revents = POLLNVAL;
581 		} else {
582 			/*
583 			 * Perform poll: registers select request or returns
584 			 * the events which are set.  Setup an argument for
585 			 * selrecord(), which is a pointer to struct pollfd.
586 			 */
587 			curlwp->l_selrec = (uintptr_t)fds;
588 			revents = (*fp->f_ops->fo_poll)(fp,
589 			    fds->events | POLLERR | POLLHUP);
590 			fd_putfile(fds->fd);
591 		}
592 		if (revents) {
593 			if (!direct_select)  {
594 				/* Have events: do nothing in selrecord(). */
595 				curlwp->l_selflag = SEL_RESET;
596 			}
597 			fds->revents = revents;
598 			n++;
599 		}
600 	}
601 	*retval = n;
602 	return (0);
603 }
604 
605 int
606 seltrue(dev_t dev, int events, lwp_t *l)
607 {
608 
609 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
610 }
611 
612 /*
613  * Record a select request.  Concurrency issues:
614  *
615  * The caller holds the same lock across calls to selrecord() and
616  * selnotify(), so we don't need to consider a concurrent wakeup
617  * while in this routine.
618  *
619  * The only activity we need to guard against is selclear(), called by
620  * another thread that is exiting sel_do_scan().
621  * `sel_lwp' can only become non-NULL while the caller's lock is held,
622  * so it cannot become non-NULL due to a change made by another thread
623  * while we are in this routine.  It can only become _NULL_ due to a
624  * call to selclear().
625  *
626  * If it is non-NULL and != selector there is the potential for
627  * selclear() to be called by another thread.  If either of those
628  * conditions are true, we're not interested in touching the `named
629  * waiter' part of the selinfo record because we need to record a
630  * collision.  Hence there is no need for additional locking in this
631  * routine.
632  */
633 void
634 selrecord(lwp_t *selector, struct selinfo *sip)
635 {
636 	selcluster_t *sc;
637 	lwp_t *other;
638 
639 	KASSERT(selector == curlwp);
640 
641 	sc = selector->l_selcluster;
642 	other = sip->sel_lwp;
643 
644 	if (selector->l_selflag == SEL_RESET) {
645 		/* 0. We're not going to block - will poll again if needed. */
646 	} else if (other == selector) {
647 		/* 1. We (selector) already claimed to be the first LWP. */
648 		KASSERT(sip->sel_cluster == sc);
649 	} else if (other == NULL) {
650 		/*
651 		 * 2. No first LWP, therefore we (selector) are the first.
652 		 *
653 		 * There may be unnamed waiters (collisions).  Issue a memory
654 		 * barrier to ensure that we access sel_lwp (above) before
655 		 * other fields - this guards against a call to selclear().
656 		 */
657 		membar_acquire();
658 		sip->sel_lwp = selector;
659 		SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
660 		/* Copy the argument, which is for selnotify(). */
661 		sip->sel_fdinfo = selector->l_selrec;
662 		/* Replace selinfo's lock with the chosen cluster's lock. */
663 		sip->sel_cluster = sc;
664 	} else {
665 		/* 3. Multiple waiters: record a collision. */
666 		sip->sel_collision |= sc->sc_mask;
667 		KASSERT(sip->sel_cluster != NULL);
668 	}
669 }
670 
671 /*
672  * Record a knote.
673  *
674  * The caller holds the same lock as for selrecord().
675  */
676 void
677 selrecord_knote(struct selinfo *sip, struct knote *kn)
678 {
679 	klist_insert(&sip->sel_klist, kn);
680 }
681 
682 /*
683  * Remove a knote.
684  *
685  * The caller holds the same lock as for selrecord().
686  *
687  * Returns true if the last knote was removed and the list
688  * is now empty.
689  */
690 bool
691 selremove_knote(struct selinfo *sip, struct knote *kn)
692 {
693 	return klist_remove(&sip->sel_klist, kn);
694 }
695 
696 /*
697  * sel_setevents: a helper function for selnotify(), to set the events
698  * for LWP sleeping in selcommon() or pollcommon().
699  */
700 static inline bool
701 sel_setevents(lwp_t *l, struct selinfo *sip, const int events)
702 {
703 	const int oflag = l->l_selflag;
704 	int ret = 0;
705 
706 	/*
707 	 * If we require re-scan or it was required by somebody else,
708 	 * then just (re)set SEL_RESET and return.
709 	 */
710 	if (__predict_false(events == 0 || oflag == SEL_RESET)) {
711 		l->l_selflag = SEL_RESET;
712 		return true;
713 	}
714 	/*
715 	 * Direct set.  Note: select state of LWP is locked.  First,
716 	 * determine whether it is selcommon() or pollcommon().
717 	 */
718 	if (l->l_selbits != NULL) {
719 		const size_t ni = l->l_selni;
720 		fd_mask *fds = (fd_mask *)l->l_selbits;
721 		fd_mask *ofds = (fd_mask *)((char *)fds + ni * 3);
722 		const int fd = sip->sel_fdinfo, fbit = 1 << (fd & __NFDMASK);
723 		const int idx = fd >> __NFDSHIFT;
724 		int n;
725 
726 		for (n = 0; n < 3; n++) {
727 			if ((fds[idx] & fbit) != 0 &&
728 			    (ofds[idx] & fbit) == 0 &&
729 			    (sel_flag[n] & events)) {
730 				ofds[idx] |= fbit;
731 				ret++;
732 			}
733 			fds = (fd_mask *)((char *)fds + ni);
734 			ofds = (fd_mask *)((char *)ofds + ni);
735 		}
736 	} else {
737 		struct pollfd *pfd = (void *)sip->sel_fdinfo;
738 		int revents = events & (pfd->events | POLLERR | POLLHUP);
739 
740 		if (revents) {
741 			if (pfd->revents == 0)
742 				ret = 1;
743 			pfd->revents |= revents;
744 		}
745 	}
746 	/* Check whether there are any events to return. */
747 	if (!ret) {
748 		return false;
749 	}
750 	/* Indicate direct set and note the event (cluster lock is held). */
751 	l->l_selflag = SEL_EVENT;
752 	l->l_selret += ret;
753 	return true;
754 }
755 
756 /*
757  * Do a wakeup when a selectable event occurs.  Concurrency issues:
758  *
759  * As per selrecord(), the caller's object lock is held.  If there
760  * is a named waiter, we must acquire the associated selcluster's lock
761  * in order to synchronize with selclear() and pollers going to sleep
762  * in sel_do_scan().
763  *
764  * sip->sel_cluser cannot change at this point, as it is only changed
765  * in selrecord(), and concurrent calls to selrecord() are locked
766  * out by the caller.
767  */
768 void
769 selnotify(struct selinfo *sip, int events, long knhint)
770 {
771 	selcluster_t *sc;
772 	uint64_t mask;
773 	int index, oflag;
774 	lwp_t *l;
775 	kmutex_t *lock;
776 
777 	KNOTE(&sip->sel_klist, knhint);
778 
779 	if (sip->sel_lwp != NULL) {
780 		/* One named LWP is waiting. */
781 		sc = sip->sel_cluster;
782 		lock = sc->sc_lock;
783 		mutex_spin_enter(lock);
784 		/* Still there? */
785 		if (sip->sel_lwp != NULL) {
786 			/*
787 			 * Set the events for our LWP and indicate that.
788 			 * Otherwise, request for a full re-scan.
789 			 */
790 			l = sip->sel_lwp;
791 			oflag = l->l_selflag;
792 
793 			if (!direct_select) {
794 				l->l_selflag = SEL_RESET;
795 			} else if (!sel_setevents(l, sip, events)) {
796 				/* No events to return. */
797 				mutex_spin_exit(lock);
798 				return;
799 			}
800 
801 			/*
802 			 * If thread is sleeping, wake it up.  If it's not
803 			 * yet asleep, it will notice the change in state
804 			 * and will re-poll the descriptors.
805 			 */
806 			if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
807 				KASSERT(l->l_wchan == sc);
808 				sleepq_unsleep(l, false);
809 			}
810 		}
811 		mutex_spin_exit(lock);
812 	}
813 
814 	if ((mask = sip->sel_collision) != 0) {
815 		/*
816 		 * There was a collision (multiple waiters): we must
817 		 * inform all potentially interested waiters.
818 		 */
819 		sip->sel_collision = 0;
820 		do {
821 			index = ffs64(mask) - 1;
822 			mask ^= __BIT(index);
823 			sc = selcluster[index];
824 			lock = sc->sc_lock;
825 			mutex_spin_enter(lock);
826 			sc->sc_ncoll++;
827 			sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock);
828 		} while (__predict_false(mask != 0));
829 	}
830 }
831 
832 /*
833  * Remove an LWP from all objects that it is waiting for.  Concurrency
834  * issues:
835  *
836  * The object owner's (e.g. device driver) lock is not held here.  Calls
837  * can be made to selrecord() and we do not synchronize against those
838  * directly using locks.  However, we use `sel_lwp' to lock out changes.
839  * Before clearing it we must use memory barriers to ensure that we can
840  * safely traverse the list of selinfo records.
841  */
842 static void
843 selclear(void)
844 {
845 	struct selinfo *sip, *next;
846 	selcluster_t *sc;
847 	lwp_t *l;
848 	kmutex_t *lock;
849 
850 	l = curlwp;
851 	sc = l->l_selcluster;
852 	lock = sc->sc_lock;
853 
854 	/*
855 	 * If the request was non-blocking, or we found events on the first
856 	 * descriptor, there will be no need to clear anything - avoid
857 	 * taking the lock.
858 	 */
859 	if (SLIST_EMPTY(&l->l_selwait)) {
860 		return;
861 	}
862 
863 	mutex_spin_enter(lock);
864 	for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
865 		KASSERT(sip->sel_lwp == l);
866 		KASSERT(sip->sel_cluster == l->l_selcluster);
867 
868 		/*
869 		 * Read link to next selinfo record, if any.
870 		 * It's no longer safe to touch `sip' after clearing
871 		 * `sel_lwp', so ensure that the read of `sel_chain'
872 		 * completes before the clearing of sel_lwp becomes
873 		 * globally visible.
874 		 */
875 		next = SLIST_NEXT(sip, sel_chain);
876 		/* Release the record for another named waiter to use. */
877 		atomic_store_release(&sip->sel_lwp, NULL);
878 	}
879 	mutex_spin_exit(lock);
880 }
881 
882 /*
883  * Initialize the select/poll system calls.  Called once for each
884  * CPU in the system, as they are attached.
885  */
886 void
887 selsysinit(struct cpu_info *ci)
888 {
889 	selcluster_t *sc;
890 	u_int index;
891 
892 	/* If already a cluster in place for this bit, re-use. */
893 	index = cpu_index(ci) & SELCLUSTERMASK;
894 	sc = selcluster[index];
895 	if (sc == NULL) {
896 		sc = kmem_alloc(roundup2(sizeof(selcluster_t),
897 		    coherency_unit) + coherency_unit, KM_SLEEP);
898 		sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
899 		sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
900 		sleepq_init(&sc->sc_sleepq);
901 		sc->sc_ncoll = 0;
902 		sc->sc_mask = __BIT(index);
903 		selcluster[index] = sc;
904 	}
905 	ci->ci_data.cpu_selcluster = sc;
906 }
907 
908 /*
909  * Initialize a selinfo record.
910  */
911 void
912 selinit(struct selinfo *sip)
913 {
914 
915 	memset(sip, 0, sizeof(*sip));
916 	klist_init(&sip->sel_klist);
917 }
918 
919 /*
920  * Destroy a selinfo record.  The owning object must not gain new
921  * references while this is in progress: all activity on the record
922  * must be stopped.
923  *
924  * Concurrency issues: we only need guard against a call to selclear()
925  * by a thread exiting sel_do_scan().  The caller has prevented further
926  * references being made to the selinfo record via selrecord(), and it
927  * will not call selnotify() again.
928  */
929 void
930 seldestroy(struct selinfo *sip)
931 {
932 	selcluster_t *sc;
933 	kmutex_t *lock;
934 	lwp_t *l;
935 
936 	klist_fini(&sip->sel_klist);
937 
938 	if (sip->sel_lwp == NULL)
939 		return;
940 
941 	/*
942 	 * Lock out selclear().  The selcluster pointer can't change while
943 	 * we are here since it is only ever changed in selrecord(),
944 	 * and that will not be entered again for this record because
945 	 * it is dying.
946 	 */
947 	KASSERT(sip->sel_cluster != NULL);
948 	sc = sip->sel_cluster;
949 	lock = sc->sc_lock;
950 	mutex_spin_enter(lock);
951 	if ((l = sip->sel_lwp) != NULL) {
952 		/*
953 		 * This should rarely happen, so although SLIST_REMOVE()
954 		 * is slow, using it here is not a problem.
955 		 */
956 		KASSERT(l->l_selcluster == sc);
957 		SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
958 		sip->sel_lwp = NULL;
959 	}
960 	mutex_spin_exit(lock);
961 }
962 
963 /*
964  * System control nodes.
965  */
966 SYSCTL_SETUP(sysctl_select_setup, "sysctl select setup")
967 {
968 
969 	sysctl_createv(clog, 0, NULL, NULL,
970 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
971 		CTLTYPE_INT, "direct_select",
972 		SYSCTL_DESCR("Enable/disable direct select (for testing)"),
973 		NULL, 0, &direct_select, 0,
974 		CTL_KERN, CTL_CREATE, CTL_EOL);
975 }
976