xref: /netbsd-src/sys/kern/sys_select.c (revision 4bfc10355ca5ccd94d950ad6f7092be3470193fa)
1 /*	$NetBSD: sys_select.c,v 1.13 2009/03/21 13:11:14 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  * (c) UNIX System Laboratories, Inc.
36  * All or some portions of this file are derived from material licensed
37  * to the University of California by American Telephone and Telegraph
38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39  * the permission of UNIX System Laboratories, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
66  */
67 
68 /*
69  * System calls relating to files.
70  */
71 
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.13 2009/03/21 13:11:14 ad Exp $");
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file.h>
80 #include <sys/proc.h>
81 #include <sys/socketvar.h>
82 #include <sys/signalvar.h>
83 #include <sys/uio.h>
84 #include <sys/kernel.h>
85 #include <sys/stat.h>
86 #include <sys/poll.h>
87 #include <sys/vnode.h>
88 #include <sys/mount.h>
89 #include <sys/syscallargs.h>
90 #include <sys/cpu.h>
91 #include <sys/atomic.h>
92 #include <sys/socketvar.h>
93 #include <sys/sleepq.h>
94 
95 /* Flags for lwp::l_selflag. */
96 #define	SEL_RESET	0	/* awoken, interrupted, or not yet polling */
97 #define	SEL_SCANNING	1	/* polling descriptors */
98 #define	SEL_BLOCKING	2	/* about to block on select_cv */
99 
100 /* Per-CPU state for select()/poll(). */
101 #if MAXCPUS > 32
102 #error adjust this code
103 #endif
104 typedef struct selcpu {
105 	kmutex_t	*sc_lock;
106 	sleepq_t	sc_sleepq;
107 	int		sc_ncoll;
108 	uint32_t	sc_mask;
109 } selcpu_t;
110 
111 static int	selscan(lwp_t *, fd_mask *, fd_mask *, int, register_t *);
112 static int	pollscan(lwp_t *, struct pollfd *, int, register_t *);
113 static void	selclear(void);
114 
115 static syncobj_t select_sobj = {
116 	SOBJ_SLEEPQ_FIFO,
117 	sleepq_unsleep,
118 	sleepq_changepri,
119 	sleepq_lendpri,
120 	syncobj_noowner,
121 };
122 
123 /*
124  * Select system call.
125  */
126 int
127 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap,
128     register_t *retval)
129 {
130 	/* {
131 		syscallarg(int)				nd;
132 		syscallarg(fd_set *)			in;
133 		syscallarg(fd_set *)			ou;
134 		syscallarg(fd_set *)			ex;
135 		syscallarg(const struct timespec *)	ts;
136 		syscallarg(sigset_t *)			mask;
137 	} */
138 	struct timespec	ats;
139 	struct timeval	atv, *tv = NULL;
140 	sigset_t	amask, *mask = NULL;
141 	int		error;
142 
143 	if (SCARG(uap, ts)) {
144 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
145 		if (error)
146 			return error;
147 		atv.tv_sec = ats.tv_sec;
148 		atv.tv_usec = ats.tv_nsec / 1000;
149 		tv = &atv;
150 	}
151 	if (SCARG(uap, mask) != NULL) {
152 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
153 		if (error)
154 			return error;
155 		mask = &amask;
156 	}
157 
158 	return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
159 	    SCARG(uap, ou), SCARG(uap, ex), tv, mask);
160 }
161 
162 int
163 inittimeleft(struct timeval *tv, struct timeval *sleeptv)
164 {
165 	if (itimerfix(tv))
166 		return -1;
167 	getmicrouptime(sleeptv);
168 	return 0;
169 }
170 
171 int
172 gettimeleft(struct timeval *tv, struct timeval *sleeptv)
173 {
174 	/*
175 	 * We have to recalculate the timeout on every retry.
176 	 */
177 	struct timeval slepttv;
178 	/*
179 	 * reduce tv by elapsed time
180 	 * based on monotonic time scale
181 	 */
182 	getmicrouptime(&slepttv);
183 	timeradd(tv, sleeptv, tv);
184 	timersub(tv, &slepttv, tv);
185 	*sleeptv = slepttv;
186 	return tvtohz(tv);
187 }
188 
189 int
190 sys___select50(struct lwp *l, const struct sys___select50_args *uap,
191     register_t *retval)
192 {
193 	/* {
194 		syscallarg(int)			nd;
195 		syscallarg(fd_set *)		in;
196 		syscallarg(fd_set *)		ou;
197 		syscallarg(fd_set *)		ex;
198 		syscallarg(struct timeval *)	tv;
199 	} */
200 	struct timeval atv, *tv = NULL;
201 	int error;
202 
203 	if (SCARG(uap, tv)) {
204 		error = copyin(SCARG(uap, tv), (void *)&atv,
205 			sizeof(atv));
206 		if (error)
207 			return error;
208 		tv = &atv;
209 	}
210 
211 	return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
212 	    SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
213 }
214 
215 int
216 selcommon(lwp_t *l, register_t *retval, int nd, fd_set *u_in,
217 	  fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
218 {
219 	char		smallbits[howmany(FD_SETSIZE, NFDBITS) *
220 			    sizeof(fd_mask) * 6];
221 	proc_t		* const p = l->l_proc;
222 	char 		*bits;
223 	int		ncoll, error, timo;
224 	size_t		ni;
225 	sigset_t	oldmask;
226 	struct timeval  sleeptv;
227 	selcpu_t	*sc;
228 	kmutex_t	*lock;
229 
230 	error = 0;
231 	if (nd < 0)
232 		return (EINVAL);
233 	if (nd > p->p_fd->fd_nfiles) {
234 		/* forgiving; slightly wrong */
235 		nd = p->p_fd->fd_nfiles;
236 	}
237 	ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
238 	if (ni * 6 > sizeof(smallbits)) {
239 		bits = kmem_alloc(ni * 6, KM_SLEEP);
240 		if (bits == NULL)
241 			return ENOMEM;
242 	} else
243 		bits = smallbits;
244 
245 #define	getbits(name, x)						\
246 	if (u_ ## name) {						\
247 		error = copyin(u_ ## name, bits + ni * x, ni);		\
248 		if (error)						\
249 			goto done;					\
250 	} else								\
251 		memset(bits + ni * x, 0, ni);
252 	getbits(in, 0);
253 	getbits(ou, 1);
254 	getbits(ex, 2);
255 #undef	getbits
256 
257 	timo = 0;
258 	if (tv && inittimeleft(tv, &sleeptv) == -1) {
259 		error = EINVAL;
260 		goto done;
261 	}
262 
263 	if (mask) {
264 		sigminusset(&sigcantmask, mask);
265 		mutex_enter(p->p_lock);
266 		oldmask = l->l_sigmask;
267 		l->l_sigmask = *mask;
268 		mutex_exit(p->p_lock);
269 	} else
270 		oldmask = l->l_sigmask;	/* XXXgcc */
271 
272 	sc = curcpu()->ci_data.cpu_selcpu;
273 	lock = sc->sc_lock;
274 	l->l_selcpu = sc;
275 	SLIST_INIT(&l->l_selwait);
276 	for (;;) {
277 		/*
278 		 * No need to lock.  If this is overwritten by another
279 		 * value while scanning, we will retry below.  We only
280 		 * need to see exact state from the descriptors that
281 		 * we are about to poll, and lock activity resulting
282 		 * from fo_poll is enough to provide an up to date value
283 		 * for new polling activity.
284 		 */
285 	 	l->l_selflag = SEL_SCANNING;
286 		ncoll = sc->sc_ncoll;
287 
288 		error = selscan(l, (fd_mask *)(bits + ni * 0),
289 		    (fd_mask *)(bits + ni * 3), nd, retval);
290 
291 		if (error || *retval)
292 			break;
293 		if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
294 			break;
295 		mutex_spin_enter(lock);
296 		if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
297 			mutex_spin_exit(lock);
298 			continue;
299 		}
300 		l->l_selflag = SEL_BLOCKING;
301 		l->l_kpriority = true;
302 		sleepq_enter(&sc->sc_sleepq, l, lock);
303 		sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
304 		error = sleepq_block(timo, true);
305 		if (error != 0)
306 			break;
307 	}
308 	selclear();
309 
310 	if (mask) {
311 		mutex_enter(p->p_lock);
312 		l->l_sigmask = oldmask;
313 		mutex_exit(p->p_lock);
314 	}
315 
316  done:
317 	/* select is not restarted after signals... */
318 	if (error == ERESTART)
319 		error = EINTR;
320 	if (error == EWOULDBLOCK)
321 		error = 0;
322 	if (error == 0 && u_in != NULL)
323 		error = copyout(bits + ni * 3, u_in, ni);
324 	if (error == 0 && u_ou != NULL)
325 		error = copyout(bits + ni * 4, u_ou, ni);
326 	if (error == 0 && u_ex != NULL)
327 		error = copyout(bits + ni * 5, u_ex, ni);
328 	if (bits != smallbits)
329 		kmem_free(bits, ni * 6);
330 	return (error);
331 }
332 
333 int
334 selscan(lwp_t *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
335 	register_t *retval)
336 {
337 	static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
338 			       POLLWRNORM | POLLHUP | POLLERR,
339 			       POLLRDBAND };
340 	int msk, i, j, fd, n;
341 	fd_mask ibits, obits;
342 	file_t *fp;
343 
344 	n = 0;
345 	for (msk = 0; msk < 3; msk++) {
346 		for (i = 0; i < nfd; i += NFDBITS) {
347 			ibits = *ibitp++;
348 			obits = 0;
349 			while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
350 				ibits &= ~(1 << j);
351 				if ((fp = fd_getfile(fd)) == NULL)
352 					return (EBADF);
353 				if ((*fp->f_ops->fo_poll)(fp, flag[msk])) {
354 					obits |= (1 << j);
355 					n++;
356 				}
357 				fd_putfile(fd);
358 			}
359 			*obitp++ = obits;
360 		}
361 	}
362 	*retval = n;
363 	return (0);
364 }
365 
366 /*
367  * Poll system call.
368  */
369 int
370 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
371 {
372 	/* {
373 		syscallarg(struct pollfd *)	fds;
374 		syscallarg(u_int)		nfds;
375 		syscallarg(int)			timeout;
376 	} */
377 	struct timeval	atv, *tv = NULL;
378 
379 	if (SCARG(uap, timeout) != INFTIM) {
380 		atv.tv_sec = SCARG(uap, timeout) / 1000;
381 		atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
382 		tv = &atv;
383 	}
384 
385 	return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
386 		tv, NULL);
387 }
388 
389 /*
390  * Poll system call.
391  */
392 int
393 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap,
394     register_t *retval)
395 {
396 	/* {
397 		syscallarg(struct pollfd *)		fds;
398 		syscallarg(u_int)			nfds;
399 		syscallarg(const struct timespec *)	ts;
400 		syscallarg(const sigset_t *)		mask;
401 	} */
402 	struct timespec	ats;
403 	struct timeval	atv, *tv = NULL;
404 	sigset_t	amask, *mask = NULL;
405 	int		error;
406 
407 	if (SCARG(uap, ts)) {
408 		error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
409 		if (error)
410 			return error;
411 		atv.tv_sec = ats.tv_sec;
412 		atv.tv_usec = ats.tv_nsec / 1000;
413 		tv = &atv;
414 	}
415 	if (SCARG(uap, mask)) {
416 		error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
417 		if (error)
418 			return error;
419 		mask = &amask;
420 	}
421 
422 	return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
423 		tv, mask);
424 }
425 
426 int
427 pollcommon(lwp_t *l, register_t *retval,
428 	struct pollfd *u_fds, u_int nfds,
429 	struct timeval *tv, sigset_t *mask)
430 {
431 	struct pollfd	smallfds[32];
432 	struct pollfd	*fds;
433 	proc_t		* const p = l->l_proc;
434 	sigset_t	oldmask;
435 	int		ncoll, error, timo;
436 	size_t		ni;
437 	struct timeval	sleeptv;
438 	selcpu_t	*sc;
439 	kmutex_t	*lock;
440 
441 	if (nfds > p->p_fd->fd_nfiles) {
442 		/* forgiving; slightly wrong */
443 		nfds = p->p_fd->fd_nfiles;
444 	}
445 	ni = nfds * sizeof(struct pollfd);
446 	if (ni > sizeof(smallfds)) {
447 		fds = kmem_alloc(ni, KM_SLEEP);
448 		if (fds == NULL)
449 			return ENOMEM;
450 	} else
451 		fds = smallfds;
452 
453 	error = copyin(u_fds, fds, ni);
454 	if (error)
455 		goto done;
456 
457 	timo = 0;
458 	if (tv && inittimeleft(tv, &sleeptv) == -1) {
459 		error = EINVAL;
460 		goto done;
461 	}
462 
463 	if (mask) {
464 		sigminusset(&sigcantmask, mask);
465 		mutex_enter(p->p_lock);
466 		oldmask = l->l_sigmask;
467 		l->l_sigmask = *mask;
468 		mutex_exit(p->p_lock);
469 	} else
470 		oldmask = l->l_sigmask;	/* XXXgcc */
471 
472 	sc = curcpu()->ci_data.cpu_selcpu;
473 	lock = sc->sc_lock;
474 	l->l_selcpu = sc;
475 	SLIST_INIT(&l->l_selwait);
476 	for (;;) {
477 		/*
478 		 * No need to lock.  If this is overwritten by another
479 		 * value while scanning, we will retry below.  We only
480 		 * need to see exact state from the descriptors that
481 		 * we are about to poll, and lock activity resulting
482 		 * from fo_poll is enough to provide an up to date value
483 		 * for new polling activity.
484 		 */
485 		ncoll = sc->sc_ncoll;
486 		l->l_selflag = SEL_SCANNING;
487 
488 		error = pollscan(l, fds, nfds, retval);
489 
490 		if (error || *retval)
491 			break;
492 		if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
493 			break;
494 		mutex_spin_enter(lock);
495 		if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
496 			mutex_spin_exit(lock);
497 			continue;
498 		}
499 		l->l_selflag = SEL_BLOCKING;
500 		l->l_kpriority = true;
501 		sleepq_enter(&sc->sc_sleepq, l, lock);
502 		sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
503 		error = sleepq_block(timo, true);
504 		if (error != 0)
505 			break;
506 	}
507 	selclear();
508 
509 	if (mask) {
510 		mutex_enter(p->p_lock);
511 		l->l_sigmask = oldmask;
512 		mutex_exit(p->p_lock);
513 	}
514  done:
515 	/* poll is not restarted after signals... */
516 	if (error == ERESTART)
517 		error = EINTR;
518 	if (error == EWOULDBLOCK)
519 		error = 0;
520 	if (error == 0)
521 		error = copyout(fds, u_fds, ni);
522 	if (fds != smallfds)
523 		kmem_free(fds, ni);
524 	return (error);
525 }
526 
527 int
528 pollscan(lwp_t *l, struct pollfd *fds, int nfd, register_t *retval)
529 {
530 	int i, n;
531 	file_t *fp;
532 
533 	n = 0;
534 	for (i = 0; i < nfd; i++, fds++) {
535 		if (fds->fd < 0) {
536 			fds->revents = 0;
537 		} else if ((fp = fd_getfile(fds->fd)) == NULL) {
538 			fds->revents = POLLNVAL;
539 			n++;
540 		} else {
541 			fds->revents = (*fp->f_ops->fo_poll)(fp,
542 			    fds->events | POLLERR | POLLHUP);
543 			if (fds->revents != 0)
544 				n++;
545 			fd_putfile(fds->fd);
546 		}
547 	}
548 	*retval = n;
549 	return (0);
550 }
551 
552 /*ARGSUSED*/
553 int
554 seltrue(dev_t dev, int events, lwp_t *l)
555 {
556 
557 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
558 }
559 
560 /*
561  * Record a select request.  Concurrency issues:
562  *
563  * The caller holds the same lock across calls to selrecord() and
564  * selnotify(), so we don't need to consider a concurrent wakeup
565  * while in this routine.
566  *
567  * The only activity we need to guard against is selclear(), called by
568  * another thread that is exiting selcommon() or pollcommon().
569  * `sel_lwp' can only become non-NULL while the caller's lock is held,
570  * so it cannot become non-NULL due to a change made by another thread
571  * while we are in this routine.  It can only become _NULL_ due to a
572  * call to selclear().
573  *
574  * If it is non-NULL and != selector there is the potential for
575  * selclear() to be called by another thread.  If either of those
576  * conditions are true, we're not interested in touching the `named
577  * waiter' part of the selinfo record because we need to record a
578  * collision.  Hence there is no need for additional locking in this
579  * routine.
580  */
581 void
582 selrecord(lwp_t *selector, struct selinfo *sip)
583 {
584 	selcpu_t *sc;
585 	lwp_t *other;
586 
587 	KASSERT(selector == curlwp);
588 
589 	sc = selector->l_selcpu;
590 	other = sip->sel_lwp;
591 
592 	if (other == selector) {
593 		/* `selector' has already claimed it. */
594 		KASSERT(sip->sel_cpu = sc);
595 	} else if (other == NULL) {
596 		/*
597 		 * First named waiter, although there may be unnamed
598 		 * waiters (collisions).  Issue a memory barrier to
599 		 * ensure that we access sel_lwp (above) before other
600 		 * fields - this guards against a call to selclear().
601 		 */
602 		membar_enter();
603 		sip->sel_lwp = selector;
604 		SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
605 		/* Replace selinfo's lock with our chosen CPU's lock. */
606 		sip->sel_cpu = sc;
607 	} else {
608 		/* Multiple waiters: record a collision. */
609 		sip->sel_collision |= sc->sc_mask;
610 		KASSERT(sip->sel_cpu != NULL);
611 	}
612 }
613 
614 /*
615  * Do a wakeup when a selectable event occurs.  Concurrency issues:
616  *
617  * As per selrecord(), the caller's object lock is held.  If there
618  * is a named waiter, we must acquire the associated selcpu's lock
619  * in order to synchronize with selclear() and pollers going to sleep
620  * in selcommon() and/or pollcommon().
621  *
622  * sip->sel_cpu cannot change at this point, as it is only changed
623  * in selrecord(), and concurrent calls to selrecord() are locked
624  * out by the caller.
625  */
626 void
627 selnotify(struct selinfo *sip, int events, long knhint)
628 {
629 	selcpu_t *sc;
630 	uint32_t mask;
631 	int index, oflag, swapin;
632 	lwp_t *l;
633 	kmutex_t *lock;
634 
635 	KNOTE(&sip->sel_klist, knhint);
636 
637 	if (sip->sel_lwp != NULL) {
638 		/* One named LWP is waiting. */
639 		swapin = 0;
640 		sc = sip->sel_cpu;
641 		lock = sc->sc_lock;
642 		mutex_spin_enter(lock);
643 		/* Still there? */
644 		if (sip->sel_lwp != NULL) {
645 			l = sip->sel_lwp;
646 			/*
647 			 * If thread is sleeping, wake it up.  If it's not
648 			 * yet asleep, it will notice the change in state
649 			 * and will re-poll the descriptors.
650 			 */
651 			oflag = l->l_selflag;
652 			l->l_selflag = SEL_RESET;
653 			if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
654 				KASSERT(l->l_wchan == sc);
655 				swapin = sleepq_unsleep(l, false);
656 			}
657 		}
658 		mutex_spin_exit(lock);
659 		if (swapin)
660 			uvm_kick_scheduler();
661 	}
662 
663 	if ((mask = sip->sel_collision) != 0) {
664 		/*
665 		 * There was a collision (multiple waiters): we must
666 		 * inform all potentially interested waiters.
667 		 */
668 		sip->sel_collision = 0;
669 		do {
670 			index = ffs(mask) - 1;
671 			mask &= ~(1 << index);
672 			sc = cpu_lookup(index)->ci_data.cpu_selcpu;
673 			lock = sc->sc_lock;
674 			mutex_spin_enter(lock);
675 			sc->sc_ncoll++;
676 			sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock);
677 		} while (__predict_false(mask != 0));
678 	}
679 }
680 
681 /*
682  * Remove an LWP from all objects that it is waiting for.  Concurrency
683  * issues:
684  *
685  * The object owner's (e.g. device driver) lock is not held here.  Calls
686  * can be made to selrecord() and we do not synchronize against those
687  * directly using locks.  However, we use `sel_lwp' to lock out changes.
688  * Before clearing it we must use memory barriers to ensure that we can
689  * safely traverse the list of selinfo records.
690  */
691 static void
692 selclear(void)
693 {
694 	struct selinfo *sip, *next;
695 	selcpu_t *sc;
696 	lwp_t *l;
697 	kmutex_t *lock;
698 
699 	l = curlwp;
700 	sc = l->l_selcpu;
701 	lock = sc->sc_lock;
702 
703 	mutex_spin_enter(lock);
704 	for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
705 		KASSERT(sip->sel_lwp == l);
706 		KASSERT(sip->sel_cpu == l->l_selcpu);
707 		/*
708 		 * Read link to next selinfo record, if any.
709 		 * It's no longer safe to touch `sip' after clearing
710 		 * `sel_lwp', so ensure that the read of `sel_chain'
711 		 * completes before the clearing of sel_lwp becomes
712 		 * globally visible.
713 		 */
714 		next = SLIST_NEXT(sip, sel_chain);
715 		membar_exit();
716 		/* Release the record for another named waiter to use. */
717 		sip->sel_lwp = NULL;
718 	}
719 	mutex_spin_exit(lock);
720 }
721 
722 /*
723  * Initialize the select/poll system calls.  Called once for each
724  * CPU in the system, as they are attached.
725  */
726 void
727 selsysinit(struct cpu_info *ci)
728 {
729 	selcpu_t *sc;
730 
731 	sc = kmem_alloc(roundup2(sizeof(selcpu_t), coherency_unit) +
732 	    coherency_unit, KM_SLEEP);
733 	sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
734 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
735 	sleepq_init(&sc->sc_sleepq);
736 	sc->sc_ncoll = 0;
737 	sc->sc_mask = (1 << cpu_index(ci));
738 	ci->ci_data.cpu_selcpu = sc;
739 }
740 
741 /*
742  * Initialize a selinfo record.
743  */
744 void
745 selinit(struct selinfo *sip)
746 {
747 
748 	memset(sip, 0, sizeof(*sip));
749 }
750 
751 /*
752  * Destroy a selinfo record.  The owning object must not gain new
753  * references while this is in progress: all activity on the record
754  * must be stopped.
755  *
756  * Concurrency issues: we only need guard against a call to selclear()
757  * by a thread exiting selcommon() and/or pollcommon().  The caller has
758  * prevented further references being made to the selinfo record via
759  * selrecord(), and it won't call selwakeup() again.
760  */
761 void
762 seldestroy(struct selinfo *sip)
763 {
764 	selcpu_t *sc;
765 	kmutex_t *lock;
766 	lwp_t *l;
767 
768 	if (sip->sel_lwp == NULL)
769 		return;
770 
771 	/*
772 	 * Lock out selclear().  The selcpu pointer can't change while
773 	 * we are here since it is only ever changed in selrecord(),
774 	 * and that will not be entered again for this record because
775 	 * it is dying.
776 	 */
777 	KASSERT(sip->sel_cpu != NULL);
778 	sc = sip->sel_cpu;
779 	lock = sc->sc_lock;
780 	mutex_spin_enter(lock);
781 	if ((l = sip->sel_lwp) != NULL) {
782 		/*
783 		 * This should rarely happen, so although SLIST_REMOVE()
784 		 * is slow, using it here is not a problem.
785 		 */
786 		KASSERT(l->l_selcpu == sc);
787 		SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
788 		sip->sel_lwp = NULL;
789 	}
790 	mutex_spin_exit(lock);
791 }
792 
793 int
794 pollsock(struct socket *so, const struct timeval *tvp, int events)
795 {
796 	int		ncoll, error, timo;
797 	struct timeval	sleeptv, tv;
798 	selcpu_t	*sc;
799 	lwp_t		*l;
800 	kmutex_t	*lock;
801 
802 	timo = 0;
803 	if (tvp != NULL) {
804 		tv = *tvp;
805 		if (inittimeleft(&tv, &sleeptv) == -1)
806 			return EINVAL;
807 	}
808 
809 	l = curlwp;
810 	sc = l->l_cpu->ci_data.cpu_selcpu;
811 	lock = sc->sc_lock;
812 	l->l_selcpu = sc;
813 	SLIST_INIT(&l->l_selwait);
814 	error = 0;
815 	for (;;) {
816 		/*
817 		 * No need to lock.  If this is overwritten by another
818 		 * value while scanning, we will retry below.  We only
819 		 * need to see exact state from the descriptors that
820 		 * we are about to poll, and lock activity resulting
821 		 * from fo_poll is enough to provide an up to date value
822 		 * for new polling activity.
823 		 */
824 		ncoll = sc->sc_ncoll;
825 		l->l_selflag = SEL_SCANNING;
826 		if (sopoll(so, events) != 0)
827 			break;
828 		if (tvp && (timo = gettimeleft(&tv, &sleeptv)) <= 0)
829 			break;
830 		mutex_spin_enter(lock);
831 		if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
832 			mutex_spin_exit(lock);
833 			continue;
834 		}
835 		l->l_selflag = SEL_BLOCKING;
836 		sleepq_enter(&sc->sc_sleepq, l, lock);
837 		sleepq_enqueue(&sc->sc_sleepq, sc, "pollsock", &select_sobj);
838 		error = sleepq_block(timo, true);
839 		if (error != 0)
840 			break;
841 	}
842 	selclear();
843 	/* poll is not restarted after signals... */
844 	if (error == ERESTART)
845 		error = EINTR;
846 	if (error == EWOULDBLOCK)
847 		error = 0;
848 	return (error);
849 }
850