xref: /netbsd-src/sys/kern/sys_select.c (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /*	$NetBSD: sys_select.c,v 1.33 2011/05/28 15:33:41 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  * (c) UNIX System Laboratories, Inc.
36  * All or some portions of this file are derived from material licensed
37  * to the University of California by American Telephone and Telegraph
38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39  * the permission of UNIX System Laboratories, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
66  */
67 
68 /*
69  * System calls of synchronous I/O multiplexing subsystem.
70  *
71  * Locking
72  *
73  * Two locks are used: <object-lock> and selcluster_t::sc_lock.
74  *
75  * The <object-lock> might be a device driver or another subsystem, e.g.
76  * socket or pipe.  This lock is not exported, and thus invisible to this
77  * subsystem.  Mainly, synchronisation between selrecord() and selnotify()
78  * routines depends on this lock, as it will be described in the comments.
79  *
80  * Lock order
81  *
82  *	<object-lock> ->
83  *		selcluster_t::sc_lock
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.33 2011/05/28 15:33:41 christos Exp $");
88 
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/filedesc.h>
92 #include <sys/file.h>
93 #include <sys/proc.h>
94 #include <sys/socketvar.h>
95 #include <sys/signalvar.h>
96 #include <sys/uio.h>
97 #include <sys/kernel.h>
98 #include <sys/lwp.h>
99 #include <sys/poll.h>
100 #include <sys/mount.h>
101 #include <sys/syscallargs.h>
102 #include <sys/cpu.h>
103 #include <sys/atomic.h>
104 #include <sys/socketvar.h>
105 #include <sys/sleepq.h>
106 
107 /* Flags for lwp::l_selflag. */
108 #define	SEL_RESET	0	/* awoken, interrupted, or not yet polling */
109 #define	SEL_SCANNING	1	/* polling descriptors */
110 #define	SEL_BLOCKING	2	/* blocking and waiting for event */
111 #define	SEL_EVENT	3	/* interrupted, events set directly */
112 
113 /* Operations: either select() or poll(). */
114 #define	SELOP_SELECT	1
115 #define	SELOP_POLL	2
116 
117 /*
118  * Per-cluster state for select()/poll().  For a system with fewer
119  * than 32 CPUs, this gives us per-CPU clusters.
120  */
121 #define	SELCLUSTERS	32
122 #define	SELCLUSTERMASK	(SELCLUSTERS - 1)
123 
124 typedef struct selcluster {
125 	kmutex_t	*sc_lock;
126 	sleepq_t	sc_sleepq;
127 	int		sc_ncoll;
128 	uint32_t	sc_mask;
129 } selcluster_t;
130 
131 static inline int	selscan(char *, const int, const size_t, register_t *);
132 static inline int	pollscan(struct pollfd *, const int, register_t *);
133 static void		selclear(void);
134 
135 static const int sel_flag[] = {
136 	POLLRDNORM | POLLHUP | POLLERR,
137 	POLLWRNORM | POLLHUP | POLLERR,
138 	POLLRDBAND
139 };
140 
141 static syncobj_t select_sobj = {
142 	SOBJ_SLEEPQ_FIFO,
143 	sleepq_unsleep,
144 	sleepq_changepri,
145 	sleepq_lendpri,
146 	syncobj_noowner,
147 };
148 
149 static selcluster_t	*selcluster[SELCLUSTERS] __read_mostly;
150 
151 /*
152  * Select system call.
153  */
154 int
155 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap,
156     register_t *retval)
157 {
158 	/* {
159 		syscallarg(int)				nd;
160 		syscallarg(fd_set *)			in;
161 		syscallarg(fd_set *)			ou;
162 		syscallarg(fd_set *)			ex;
163 		syscallarg(const struct timespec *)	ts;
164 		syscallarg(sigset_t *)			mask;
165 	} */
166 	struct timespec	ats, *ts = NULL;
167 	sigset_t	amask, *mask = NULL;
168 	int		error;
169 
170 	if (SCARG(uap, ts)) {
171 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
172 		if (error)
173 			return error;
174 		ts = &ats;
175 	}
176 	if (SCARG(uap, mask) != NULL) {
177 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
178 		if (error)
179 			return error;
180 		mask = &amask;
181 	}
182 
183 	return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
184 	    SCARG(uap, ou), SCARG(uap, ex), ts, mask);
185 }
186 
187 int
188 sys___select50(struct lwp *l, const struct sys___select50_args *uap,
189     register_t *retval)
190 {
191 	/* {
192 		syscallarg(int)			nd;
193 		syscallarg(fd_set *)		in;
194 		syscallarg(fd_set *)		ou;
195 		syscallarg(fd_set *)		ex;
196 		syscallarg(struct timeval *)	tv;
197 	} */
198 	struct timeval atv;
199 	struct timespec ats, *ts = NULL;
200 	int error;
201 
202 	if (SCARG(uap, tv)) {
203 		error = copyin(SCARG(uap, tv), (void *)&atv, sizeof(atv));
204 		if (error)
205 			return error;
206 		TIMEVAL_TO_TIMESPEC(&atv, &ats);
207 		ts = &ats;
208 	}
209 
210 	return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
211 	    SCARG(uap, ou), SCARG(uap, ex), ts, NULL);
212 }
213 
214 /*
215  * sel_do_scan: common code to perform the scan on descriptors.
216  */
217 static int
218 sel_do_scan(const int op, void *fds, const int nf, const size_t ni,
219     struct timespec *ts, sigset_t *mask, register_t *retval)
220 {
221 	lwp_t		* const l = curlwp;
222 	selcluster_t	*sc;
223 	kmutex_t	*lock;
224 	struct timespec	sleepts;
225 	int		error, timo;
226 
227 	timo = 0;
228 	if (ts && inittimeleft(ts, &sleepts) == -1) {
229 		return EINVAL;
230 	}
231 
232 	if (__predict_false(mask))
233 		sigsuspendsetup(l, mask);
234 
235 	sc = curcpu()->ci_data.cpu_selcluster;
236 	lock = sc->sc_lock;
237 	l->l_selcluster = sc;
238 	SLIST_INIT(&l->l_selwait);
239 
240 	l->l_selret = 0;
241 	if (op == SELOP_SELECT) {
242 		l->l_selbits = fds;
243 		l->l_selni = ni;
244 	} else {
245 		l->l_selbits = NULL;
246 	}
247 	for (;;) {
248 		int ncoll;
249 
250 		/*
251 		 * No need to lock.  If this is overwritten by another value
252 		 * while scanning, we will retry below.  We only need to see
253 		 * exact state from the descriptors that we are about to poll,
254 		 * and lock activity resulting from fo_poll is enough to
255 		 * provide an up to date value for new polling activity.
256 		 */
257 		l->l_selflag = SEL_SCANNING;
258 		ncoll = sc->sc_ncoll;
259 
260 		if (op == SELOP_SELECT) {
261 			error = selscan((char *)fds, nf, ni, retval);
262 		} else {
263 			error = pollscan((struct pollfd *)fds, nf, retval);
264 		}
265 		if (error || *retval)
266 			break;
267 		if (ts && (timo = gettimeleft(ts, &sleepts)) <= 0)
268 			break;
269 		/*
270 		 * Acquire the lock and perform the (re)checks.  Note, if
271 		 * collision has occured, then our state does not matter,
272 		 * as we must perform re-scan.  Therefore, check it first.
273 		 */
274 state_check:
275 		mutex_spin_enter(lock);
276 		if (__predict_false(sc->sc_ncoll != ncoll)) {
277 			/* Collision: perform re-scan. */
278 			mutex_spin_exit(lock);
279 			continue;
280 		}
281 		if (__predict_true(l->l_selflag == SEL_EVENT)) {
282 			/* Events occured, they are set directly. */
283 			mutex_spin_exit(lock);
284 			KASSERT(l->l_selret != 0);
285 			*retval = l->l_selret;
286 			break;
287 		}
288 		if (__predict_true(l->l_selflag == SEL_RESET)) {
289 			/* Events occured, but re-scan is requested. */
290 			mutex_spin_exit(lock);
291 			continue;
292 		}
293 		/* Nothing happen, therefore - sleep. */
294 		l->l_selflag = SEL_BLOCKING;
295 		l->l_kpriority = true;
296 		sleepq_enter(&sc->sc_sleepq, l, lock);
297 		sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
298 		error = sleepq_block(timo, true);
299 		if (error != 0) {
300 			break;
301 		}
302 		/* Awoken: need to check the state. */
303 		goto state_check;
304 	}
305 	selclear();
306 
307 	if (__predict_false(mask))
308 		sigsuspendteardown(l);
309 
310 	/* select and poll are not restarted after signals... */
311 	if (error == ERESTART)
312 		return EINTR;
313 	if (error == EWOULDBLOCK)
314 		return 0;
315 	return error;
316 }
317 
318 int
319 selcommon(register_t *retval, int nd, fd_set *u_in, fd_set *u_ou,
320     fd_set *u_ex, struct timespec *ts, sigset_t *mask)
321 {
322 	char		smallbits[howmany(FD_SETSIZE, NFDBITS) *
323 			    sizeof(fd_mask) * 6];
324 	char 		*bits;
325 	int		error, nf;
326 	size_t		ni;
327 
328 	if (nd < 0)
329 		return (EINVAL);
330 	nf = curlwp->l_fd->fd_dt->dt_nfiles;
331 	if (nd > nf) {
332 		/* forgiving; slightly wrong */
333 		nd = nf;
334 	}
335 	ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
336 	if (ni * 6 > sizeof(smallbits)) {
337 		bits = kmem_alloc(ni * 6, KM_SLEEP);
338 		if (bits == NULL)
339 			return ENOMEM;
340 	} else
341 		bits = smallbits;
342 
343 #define	getbits(name, x)						\
344 	if (u_ ## name) {						\
345 		error = copyin(u_ ## name, bits + ni * x, ni);		\
346 		if (error)						\
347 			goto fail;					\
348 	} else								\
349 		memset(bits + ni * x, 0, ni);
350 	getbits(in, 0);
351 	getbits(ou, 1);
352 	getbits(ex, 2);
353 #undef	getbits
354 
355 	error = sel_do_scan(SELOP_SELECT, bits, nd, ni, ts, mask, retval);
356 	if (error == 0 && u_in != NULL)
357 		error = copyout(bits + ni * 3, u_in, ni);
358 	if (error == 0 && u_ou != NULL)
359 		error = copyout(bits + ni * 4, u_ou, ni);
360 	if (error == 0 && u_ex != NULL)
361 		error = copyout(bits + ni * 5, u_ex, ni);
362  fail:
363 	if (bits != smallbits)
364 		kmem_free(bits, ni * 6);
365 	return (error);
366 }
367 
368 static inline int
369 selscan(char *bits, const int nfd, const size_t ni, register_t *retval)
370 {
371 	fd_mask *ibitp, *obitp;
372 	int msk, i, j, fd, n;
373 	file_t *fp;
374 
375 	ibitp = (fd_mask *)(bits + ni * 0);
376 	obitp = (fd_mask *)(bits + ni * 3);
377 	n = 0;
378 
379 	for (msk = 0; msk < 3; msk++) {
380 		for (i = 0; i < nfd; i += NFDBITS) {
381 			fd_mask ibits, obits;
382 
383 			ibits = *ibitp++;
384 			obits = 0;
385 			while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
386 				ibits &= ~(1 << j);
387 				if ((fp = fd_getfile(fd)) == NULL)
388 					return (EBADF);
389 				/*
390 				 * Setup an argument to selrecord(), which is
391 				 * a file descriptor number.
392 				 */
393 				curlwp->l_selrec = fd;
394 				if ((*fp->f_ops->fo_poll)(fp, sel_flag[msk])) {
395 					obits |= (1 << j);
396 					n++;
397 				}
398 				fd_putfile(fd);
399 			}
400 			*obitp++ = obits;
401 		}
402 	}
403 	*retval = n;
404 	return (0);
405 }
406 
407 /*
408  * Poll system call.
409  */
410 int
411 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
412 {
413 	/* {
414 		syscallarg(struct pollfd *)	fds;
415 		syscallarg(u_int)		nfds;
416 		syscallarg(int)			timeout;
417 	} */
418 	struct timespec	ats, *ts = NULL;
419 
420 	if (SCARG(uap, timeout) != INFTIM) {
421 		ats.tv_sec = SCARG(uap, timeout) / 1000;
422 		ats.tv_nsec = (SCARG(uap, timeout) % 1000) * 1000000;
423 		ts = &ats;
424 	}
425 
426 	return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, NULL);
427 }
428 
429 /*
430  * Poll system call.
431  */
432 int
433 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap,
434     register_t *retval)
435 {
436 	/* {
437 		syscallarg(struct pollfd *)		fds;
438 		syscallarg(u_int)			nfds;
439 		syscallarg(const struct timespec *)	ts;
440 		syscallarg(const sigset_t *)		mask;
441 	} */
442 	struct timespec	ats, *ts = NULL;
443 	sigset_t	amask, *mask = NULL;
444 	int		error;
445 
446 	if (SCARG(uap, ts)) {
447 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
448 		if (error)
449 			return error;
450 		ts = &ats;
451 	}
452 	if (SCARG(uap, mask)) {
453 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
454 		if (error)
455 			return error;
456 		mask = &amask;
457 	}
458 
459 	return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, mask);
460 }
461 
462 int
463 pollcommon(register_t *retval, struct pollfd *u_fds, u_int nfds,
464     struct timespec *ts, sigset_t *mask)
465 {
466 	struct pollfd	smallfds[32];
467 	struct pollfd	*fds;
468 	int		error;
469 	size_t		ni;
470 
471 	if (nfds > 1000 + curlwp->l_fd->fd_dt->dt_nfiles) {
472 		/*
473 		 * Either the user passed in a very sparse 'fds' or junk!
474 		 * The kmem_alloc() call below would be bad news.
475 		 * We could process the 'fds' array in chunks, but that
476 		 * is a lot of code that isn't normally useful.
477 		 * (Or just move the copyin/out into pollscan().)
478 		 * Historically the code silently truncated 'fds' to
479 		 * dt_nfiles entries - but that does cause issues.
480 		 */
481 		return EINVAL;
482 	}
483 	ni = nfds * sizeof(struct pollfd);
484 	if (ni > sizeof(smallfds)) {
485 		fds = kmem_alloc(ni, KM_SLEEP);
486 		if (fds == NULL)
487 			return ENOMEM;
488 	} else
489 		fds = smallfds;
490 
491 	error = copyin(u_fds, fds, ni);
492 	if (error)
493 		goto fail;
494 
495 	error = sel_do_scan(SELOP_POLL, fds, nfds, ni, ts, mask, retval);
496 	if (error == 0)
497 		error = copyout(fds, u_fds, ni);
498  fail:
499 	if (fds != smallfds)
500 		kmem_free(fds, ni);
501 	return (error);
502 }
503 
504 static inline int
505 pollscan(struct pollfd *fds, const int nfd, register_t *retval)
506 {
507 	file_t *fp;
508 	int i, n = 0;
509 
510 	for (i = 0; i < nfd; i++, fds++) {
511 		if (fds->fd < 0) {
512 			fds->revents = 0;
513 		} else if ((fp = fd_getfile(fds->fd)) == NULL) {
514 			fds->revents = POLLNVAL;
515 			n++;
516 		} else {
517 			/*
518 			 * Perform poll: registers select request or returns
519 			 * the events which are set.  Setup an argument for
520 			 * selrecord(), which is a pointer to struct pollfd.
521 			 */
522 			curlwp->l_selrec = (uintptr_t)fds;
523 			fds->revents = (*fp->f_ops->fo_poll)(fp,
524 			    fds->events | POLLERR | POLLHUP);
525 			if (fds->revents != 0)
526 				n++;
527 			fd_putfile(fds->fd);
528 		}
529 	}
530 	*retval = n;
531 	return (0);
532 }
533 
534 int
535 seltrue(dev_t dev, int events, lwp_t *l)
536 {
537 
538 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
539 }
540 
541 /*
542  * Record a select request.  Concurrency issues:
543  *
544  * The caller holds the same lock across calls to selrecord() and
545  * selnotify(), so we don't need to consider a concurrent wakeup
546  * while in this routine.
547  *
548  * The only activity we need to guard against is selclear(), called by
549  * another thread that is exiting sel_do_scan().
550  * `sel_lwp' can only become non-NULL while the caller's lock is held,
551  * so it cannot become non-NULL due to a change made by another thread
552  * while we are in this routine.  It can only become _NULL_ due to a
553  * call to selclear().
554  *
555  * If it is non-NULL and != selector there is the potential for
556  * selclear() to be called by another thread.  If either of those
557  * conditions are true, we're not interested in touching the `named
558  * waiter' part of the selinfo record because we need to record a
559  * collision.  Hence there is no need for additional locking in this
560  * routine.
561  */
562 void
563 selrecord(lwp_t *selector, struct selinfo *sip)
564 {
565 	selcluster_t *sc;
566 	lwp_t *other;
567 
568 	KASSERT(selector == curlwp);
569 
570 	sc = selector->l_selcluster;
571 	other = sip->sel_lwp;
572 
573 	if (other == selector) {
574 		/* 1. We (selector) already claimed to be the first LWP. */
575 		KASSERT(sip->sel_cluster = sc);
576 	} else if (other == NULL) {
577 		/*
578 		 * 2. No first LWP, therefore we (selector) are the first.
579 		 *
580 		 * There may be unnamed waiters (collisions).  Issue a memory
581 		 * barrier to ensure that we access sel_lwp (above) before
582 		 * other fields - this guards against a call to selclear().
583 		 */
584 		membar_enter();
585 		sip->sel_lwp = selector;
586 		SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
587 		/* Copy the argument, which is for selnotify(). */
588 		sip->sel_fdinfo = selector->l_selrec;
589 		/* Replace selinfo's lock with the chosen cluster's lock. */
590 		sip->sel_cluster = sc;
591 	} else {
592 		/* 3. Multiple waiters: record a collision. */
593 		sip->sel_collision |= sc->sc_mask;
594 		KASSERT(sip->sel_cluster != NULL);
595 	}
596 }
597 
598 /*
599  * sel_setevents: a helper function for selnotify(), to set the events
600  * for LWP sleeping in selcommon() or pollcommon().
601  */
602 static inline bool
603 sel_setevents(lwp_t *l, struct selinfo *sip, const int events)
604 {
605 	const int oflag = l->l_selflag;
606 	int ret = 0;
607 
608 	/*
609 	 * If we require re-scan or it was required by somebody else,
610 	 * then just (re)set SEL_RESET and return.
611 	 */
612 	if (__predict_false(events == 0 || oflag == SEL_RESET)) {
613 		l->l_selflag = SEL_RESET;
614 		return true;
615 	}
616 	/*
617 	 * Direct set.  Note: select state of LWP is locked.  First,
618 	 * determine whether it is selcommon() or pollcommon().
619 	 */
620 	if (l->l_selbits != NULL) {
621 		const size_t ni = l->l_selni;
622 		fd_mask *fds = (fd_mask *)l->l_selbits;
623 		fd_mask *ofds = (fd_mask *)((char *)fds + ni * 3);
624 		const int fd = sip->sel_fdinfo, fbit = 1 << (fd & __NFDMASK);
625 		const int idx = fd >> __NFDSHIFT;
626 		int n;
627 
628 		for (n = 0; n < 3; n++) {
629 			if ((fds[idx] & fbit) != 0 && (sel_flag[n] & events)) {
630 				ofds[idx] |= fbit;
631 				ret++;
632 			}
633 			fds = (fd_mask *)((char *)fds + ni);
634 			ofds = (fd_mask *)((char *)ofds + ni);
635 		}
636 	} else {
637 		struct pollfd *pfd = (void *)sip->sel_fdinfo;
638 		int revents = events & (pfd->events | POLLERR | POLLHUP);
639 
640 		if (revents) {
641 			pfd->revents |= revents;
642 			ret = 1;
643 		}
644 	}
645 	/* Check whether there are any events to return. */
646 	if (!ret) {
647 		return false;
648 	}
649 	/* Indicate direct set and note the event (cluster lock is held). */
650 	l->l_selflag = SEL_EVENT;
651 	l->l_selret += ret;
652 	return true;
653 }
654 
655 /*
656  * Do a wakeup when a selectable event occurs.  Concurrency issues:
657  *
658  * As per selrecord(), the caller's object lock is held.  If there
659  * is a named waiter, we must acquire the associated selcluster's lock
660  * in order to synchronize with selclear() and pollers going to sleep
661  * in sel_do_scan().
662  *
663  * sip->sel_cluser cannot change at this point, as it is only changed
664  * in selrecord(), and concurrent calls to selrecord() are locked
665  * out by the caller.
666  */
667 void
668 selnotify(struct selinfo *sip, int events, long knhint)
669 {
670 	selcluster_t *sc;
671 	uint32_t mask;
672 	int index, oflag;
673 	lwp_t *l;
674 	kmutex_t *lock;
675 
676 	KNOTE(&sip->sel_klist, knhint);
677 
678 	if (sip->sel_lwp != NULL) {
679 		/* One named LWP is waiting. */
680 		sc = sip->sel_cluster;
681 		lock = sc->sc_lock;
682 		mutex_spin_enter(lock);
683 		/* Still there? */
684 		if (sip->sel_lwp != NULL) {
685 			/*
686 			 * Set the events for our LWP and indicate that.
687 			 * Otherwise, request for a full re-scan.
688 			 */
689 			l = sip->sel_lwp;
690 			oflag = l->l_selflag;
691 #ifndef NO_DIRECT_SELECT
692 			if (!sel_setevents(l, sip, events)) {
693 				/* No events to return. */
694 				mutex_spin_exit(lock);
695 				return;
696 			}
697 #else
698 			l->l_selflag = SEL_RESET;
699 #endif
700 			/*
701 			 * If thread is sleeping, wake it up.  If it's not
702 			 * yet asleep, it will notice the change in state
703 			 * and will re-poll the descriptors.
704 			 */
705 			if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
706 				KASSERT(l->l_wchan == sc);
707 				sleepq_unsleep(l, false);
708 			}
709 		}
710 		mutex_spin_exit(lock);
711 	}
712 
713 	if ((mask = sip->sel_collision) != 0) {
714 		/*
715 		 * There was a collision (multiple waiters): we must
716 		 * inform all potentially interested waiters.
717 		 */
718 		sip->sel_collision = 0;
719 		do {
720 			index = ffs(mask) - 1;
721 			mask &= ~(1 << index);
722 			sc = selcluster[index];
723 			lock = sc->sc_lock;
724 			mutex_spin_enter(lock);
725 			sc->sc_ncoll++;
726 			sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock);
727 		} while (__predict_false(mask != 0));
728 	}
729 }
730 
731 /*
732  * Remove an LWP from all objects that it is waiting for.  Concurrency
733  * issues:
734  *
735  * The object owner's (e.g. device driver) lock is not held here.  Calls
736  * can be made to selrecord() and we do not synchronize against those
737  * directly using locks.  However, we use `sel_lwp' to lock out changes.
738  * Before clearing it we must use memory barriers to ensure that we can
739  * safely traverse the list of selinfo records.
740  */
741 static void
742 selclear(void)
743 {
744 	struct selinfo *sip, *next;
745 	selcluster_t *sc;
746 	lwp_t *l;
747 	kmutex_t *lock;
748 
749 	l = curlwp;
750 	sc = l->l_selcluster;
751 	lock = sc->sc_lock;
752 
753 	mutex_spin_enter(lock);
754 	for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
755 		KASSERT(sip->sel_lwp == l);
756 		KASSERT(sip->sel_cluster == l->l_selcluster);
757 
758 		/*
759 		 * Read link to next selinfo record, if any.
760 		 * It's no longer safe to touch `sip' after clearing
761 		 * `sel_lwp', so ensure that the read of `sel_chain'
762 		 * completes before the clearing of sel_lwp becomes
763 		 * globally visible.
764 		 */
765 		next = SLIST_NEXT(sip, sel_chain);
766 		membar_exit();
767 		/* Release the record for another named waiter to use. */
768 		sip->sel_lwp = NULL;
769 	}
770 	mutex_spin_exit(lock);
771 }
772 
773 /*
774  * Initialize the select/poll system calls.  Called once for each
775  * CPU in the system, as they are attached.
776  */
777 void
778 selsysinit(struct cpu_info *ci)
779 {
780 	selcluster_t *sc;
781 	u_int index;
782 
783 	/* If already a cluster in place for this bit, re-use. */
784 	index = cpu_index(ci) & SELCLUSTERMASK;
785 	sc = selcluster[index];
786 	if (sc == NULL) {
787 		sc = kmem_alloc(roundup2(sizeof(selcluster_t),
788 		    coherency_unit) + coherency_unit, KM_SLEEP);
789 		sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
790 		sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
791 		sleepq_init(&sc->sc_sleepq);
792 		sc->sc_ncoll = 0;
793 		sc->sc_mask = (1 << index);
794 		selcluster[index] = sc;
795 	}
796 	ci->ci_data.cpu_selcluster = sc;
797 }
798 
799 /*
800  * Initialize a selinfo record.
801  */
802 void
803 selinit(struct selinfo *sip)
804 {
805 
806 	memset(sip, 0, sizeof(*sip));
807 }
808 
809 /*
810  * Destroy a selinfo record.  The owning object must not gain new
811  * references while this is in progress: all activity on the record
812  * must be stopped.
813  *
814  * Concurrency issues: we only need guard against a call to selclear()
815  * by a thread exiting sel_do_scan().  The caller has prevented further
816  * references being made to the selinfo record via selrecord(), and it
817  * will not call selnotify() again.
818  */
819 void
820 seldestroy(struct selinfo *sip)
821 {
822 	selcluster_t *sc;
823 	kmutex_t *lock;
824 	lwp_t *l;
825 
826 	if (sip->sel_lwp == NULL)
827 		return;
828 
829 	/*
830 	 * Lock out selclear().  The selcluster pointer can't change while
831 	 * we are here since it is only ever changed in selrecord(),
832 	 * and that will not be entered again for this record because
833 	 * it is dying.
834 	 */
835 	KASSERT(sip->sel_cluster != NULL);
836 	sc = sip->sel_cluster;
837 	lock = sc->sc_lock;
838 	mutex_spin_enter(lock);
839 	if ((l = sip->sel_lwp) != NULL) {
840 		/*
841 		 * This should rarely happen, so although SLIST_REMOVE()
842 		 * is slow, using it here is not a problem.
843 		 */
844 		KASSERT(l->l_selcluster == sc);
845 		SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
846 		sip->sel_lwp = NULL;
847 	}
848 	mutex_spin_exit(lock);
849 }
850 
851 int
852 pollsock(struct socket *so, const struct timespec *tsp, int events)
853 {
854 	int		ncoll, error, timo;
855 	struct timespec	sleepts, ts;
856 	selcluster_t	*sc;
857 	lwp_t		*l;
858 	kmutex_t	*lock;
859 
860 	timo = 0;
861 	if (tsp != NULL) {
862 		ts = *tsp;
863 		if (inittimeleft(&ts, &sleepts) == -1)
864 			return EINVAL;
865 	}
866 
867 	l = curlwp;
868 	sc = curcpu()->ci_data.cpu_selcluster;
869 	lock = sc->sc_lock;
870 	l->l_selcluster = sc;
871 	SLIST_INIT(&l->l_selwait);
872 	error = 0;
873 	for (;;) {
874 		/*
875 		 * No need to lock.  If this is overwritten by another
876 		 * value while scanning, we will retry below.  We only
877 		 * need to see exact state from the descriptors that
878 		 * we are about to poll, and lock activity resulting
879 		 * from fo_poll is enough to provide an up to date value
880 		 * for new polling activity.
881 		 */
882 		ncoll = sc->sc_ncoll;
883 		l->l_selflag = SEL_SCANNING;
884 		if (sopoll(so, events) != 0)
885 			break;
886 		if (tsp && (timo = gettimeleft(&ts, &sleepts)) <= 0)
887 			break;
888 		mutex_spin_enter(lock);
889 		if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
890 			mutex_spin_exit(lock);
891 			continue;
892 		}
893 		l->l_selflag = SEL_BLOCKING;
894 		sleepq_enter(&sc->sc_sleepq, l, lock);
895 		sleepq_enqueue(&sc->sc_sleepq, sc, "pollsock", &select_sobj);
896 		error = sleepq_block(timo, true);
897 		if (error != 0)
898 			break;
899 	}
900 	selclear();
901 	/* poll is not restarted after signals... */
902 	if (error == ERESTART)
903 		error = EINTR;
904 	if (error == EWOULDBLOCK)
905 		error = 0;
906 	return (error);
907 }
908