1 /* $NetBSD: sys_select.c,v 1.22 2010/04/25 15:55:24 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95 66 */ 67 68 /* 69 * System calls of synchronous I/O multiplexing subsystem. 70 * 71 * Locking 72 * 73 * Two locks are used: <object-lock> and selcluster_t::sc_lock. 74 * 75 * The <object-lock> might be a device driver or another subsystem, e.g. 76 * socket or pipe. This lock is not exported, and thus invisible to this 77 * subsystem. Mainly, synchronisation between selrecord() and selnotify() 78 * routines depends on this lock, as it will be described in the comments. 79 * 80 * Lock order 81 * 82 * <object-lock> -> 83 * selcluster_t::sc_lock 84 */ 85 86 #include <sys/cdefs.h> 87 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.22 2010/04/25 15:55:24 ad Exp $"); 88 89 #include <sys/param.h> 90 #include <sys/systm.h> 91 #include <sys/filedesc.h> 92 #include <sys/ioctl.h> 93 #include <sys/file.h> 94 #include <sys/proc.h> 95 #include <sys/socketvar.h> 96 #include <sys/signalvar.h> 97 #include <sys/uio.h> 98 #include <sys/kernel.h> 99 #include <sys/stat.h> 100 #include <sys/poll.h> 101 #include <sys/vnode.h> 102 #include <sys/mount.h> 103 #include <sys/syscallargs.h> 104 #include <sys/cpu.h> 105 #include <sys/atomic.h> 106 #include <sys/socketvar.h> 107 #include <sys/sleepq.h> 108 109 /* Flags for lwp::l_selflag. */ 110 #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */ 111 #define SEL_SCANNING 1 /* polling descriptors */ 112 #define SEL_BLOCKING 2 /* about to block on select_cv */ 113 114 /* 115 * Per-cluster state for select()/poll(). For a system with fewer 116 * than 32 CPUs, this gives us per-CPU clusters. 117 */ 118 #define SELCLUSTERS 32 119 #define SELCLUSTERMASK (SELCLUSTERS - 1) 120 121 typedef struct selcluster { 122 kmutex_t *sc_lock; 123 sleepq_t sc_sleepq; 124 int sc_ncoll; 125 uint32_t sc_mask; 126 } selcluster_t; 127 128 static inline int selscan(char *, u_int, register_t *); 129 static inline int pollscan(struct pollfd *, u_int, register_t *); 130 static void selclear(void); 131 132 static syncobj_t select_sobj = { 133 SOBJ_SLEEPQ_FIFO, 134 sleepq_unsleep, 135 sleepq_changepri, 136 sleepq_lendpri, 137 syncobj_noowner, 138 }; 139 140 static selcluster_t *selcluster[SELCLUSTERS]; 141 142 /* 143 * Select system call. 144 */ 145 int 146 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap, 147 register_t *retval) 148 { 149 /* { 150 syscallarg(int) nd; 151 syscallarg(fd_set *) in; 152 syscallarg(fd_set *) ou; 153 syscallarg(fd_set *) ex; 154 syscallarg(const struct timespec *) ts; 155 syscallarg(sigset_t *) mask; 156 } */ 157 struct timespec ats, *ts = NULL; 158 sigset_t amask, *mask = NULL; 159 int error; 160 161 if (SCARG(uap, ts)) { 162 error = copyin(SCARG(uap, ts), &ats, sizeof(ats)); 163 if (error) 164 return error; 165 ts = &ats; 166 } 167 if (SCARG(uap, mask) != NULL) { 168 error = copyin(SCARG(uap, mask), &amask, sizeof(amask)); 169 if (error) 170 return error; 171 mask = &amask; 172 } 173 174 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in), 175 SCARG(uap, ou), SCARG(uap, ex), ts, mask); 176 } 177 178 int 179 sys___select50(struct lwp *l, const struct sys___select50_args *uap, 180 register_t *retval) 181 { 182 /* { 183 syscallarg(int) nd; 184 syscallarg(fd_set *) in; 185 syscallarg(fd_set *) ou; 186 syscallarg(fd_set *) ex; 187 syscallarg(struct timeval *) tv; 188 } */ 189 struct timeval atv; 190 struct timespec ats, *ts = NULL; 191 int error; 192 193 if (SCARG(uap, tv)) { 194 error = copyin(SCARG(uap, tv), (void *)&atv, sizeof(atv)); 195 if (error) 196 return error; 197 TIMEVAL_TO_TIMESPEC(&atv, &ats); 198 ts = &ats; 199 } 200 201 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in), 202 SCARG(uap, ou), SCARG(uap, ex), ts, NULL); 203 } 204 205 /* 206 * sel_do_scan: common code to perform the scan on descriptors. 207 */ 208 static int 209 sel_do_scan(void *fds, u_int nfds, struct timespec *ts, sigset_t *mask, 210 register_t *retval, int selpoll) 211 { 212 lwp_t * const l = curlwp; 213 proc_t * const p = l->l_proc; 214 selcluster_t *sc; 215 kmutex_t *lock; 216 sigset_t oldmask; 217 struct timespec sleepts; 218 int error, timo; 219 220 timo = 0; 221 if (ts && inittimeleft(ts, &sleepts) == -1) { 222 return EINVAL; 223 } 224 225 if (__predict_false(mask)) { 226 sigminusset(&sigcantmask, mask); 227 mutex_enter(p->p_lock); 228 oldmask = l->l_sigmask; 229 l->l_sigmask = *mask; 230 mutex_exit(p->p_lock); 231 } else { 232 /* XXXgcc */ 233 oldmask = l->l_sigmask; 234 } 235 236 sc = curcpu()->ci_data.cpu_selcluster; 237 lock = sc->sc_lock; 238 l->l_selcluster = sc; 239 SLIST_INIT(&l->l_selwait); 240 for (;;) { 241 int ncoll; 242 243 /* 244 * No need to lock. If this is overwritten by another value 245 * while scanning, we will retry below. We only need to see 246 * exact state from the descriptors that we are about to poll, 247 * and lock activity resulting from fo_poll is enough to 248 * provide an up to date value for new polling activity. 249 */ 250 l->l_selflag = SEL_SCANNING; 251 ncoll = sc->sc_ncoll; 252 253 if (selpoll) { 254 error = selscan((char *)fds, nfds, retval); 255 } else { 256 error = pollscan((struct pollfd *)fds, nfds, retval); 257 } 258 259 if (error || *retval) 260 break; 261 if (ts && (timo = gettimeleft(ts, &sleepts)) <= 0) 262 break; 263 mutex_spin_enter(lock); 264 if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) { 265 mutex_spin_exit(lock); 266 continue; 267 } 268 l->l_selflag = SEL_BLOCKING; 269 l->l_kpriority = true; 270 sleepq_enter(&sc->sc_sleepq, l, lock); 271 sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj); 272 error = sleepq_block(timo, true); 273 if (error != 0) 274 break; 275 } 276 selclear(); 277 278 if (__predict_false(mask)) { 279 mutex_enter(p->p_lock); 280 l->l_sigmask = oldmask; 281 mutex_exit(p->p_lock); 282 } 283 284 /* select and poll are not restarted after signals... */ 285 if (error == ERESTART) 286 return EINTR; 287 if (error == EWOULDBLOCK) 288 return 0; 289 return error; 290 } 291 292 int 293 selcommon(register_t *retval, int nd, fd_set *u_in, fd_set *u_ou, 294 fd_set *u_ex, struct timespec *ts, sigset_t *mask) 295 { 296 char smallbits[howmany(FD_SETSIZE, NFDBITS) * 297 sizeof(fd_mask) * 6]; 298 char *bits; 299 int error, nf; 300 size_t ni; 301 302 if (nd < 0) 303 return (EINVAL); 304 nf = curlwp->l_fd->fd_dt->dt_nfiles; 305 if (nd > nf) { 306 /* forgiving; slightly wrong */ 307 nd = nf; 308 } 309 ni = howmany(nd, NFDBITS) * sizeof(fd_mask); 310 if (ni * 6 > sizeof(smallbits)) { 311 bits = kmem_alloc(ni * 6, KM_SLEEP); 312 if (bits == NULL) 313 return ENOMEM; 314 } else 315 bits = smallbits; 316 317 #define getbits(name, x) \ 318 if (u_ ## name) { \ 319 error = copyin(u_ ## name, bits + ni * x, ni); \ 320 if (error) \ 321 goto fail; \ 322 } else \ 323 memset(bits + ni * x, 0, ni); 324 getbits(in, 0); 325 getbits(ou, 1); 326 getbits(ex, 2); 327 #undef getbits 328 329 error = sel_do_scan(bits, nd, ts, mask, retval, 1); 330 if (error == 0 && u_in != NULL) 331 error = copyout(bits + ni * 3, u_in, ni); 332 if (error == 0 && u_ou != NULL) 333 error = copyout(bits + ni * 4, u_ou, ni); 334 if (error == 0 && u_ex != NULL) 335 error = copyout(bits + ni * 5, u_ex, ni); 336 fail: 337 if (bits != smallbits) 338 kmem_free(bits, ni * 6); 339 return (error); 340 } 341 342 static inline int 343 selscan(char *bits, u_int nfd, register_t *retval) 344 { 345 static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR, 346 POLLWRNORM | POLLHUP | POLLERR, 347 POLLRDBAND }; 348 fd_mask *ibitp, *obitp; 349 int msk, i, j, fd, ni, n; 350 fd_mask ibits, obits; 351 file_t *fp; 352 353 ni = howmany(nfd, NFDBITS) * sizeof(fd_mask); 354 ibitp = (fd_mask *)(bits + ni * 0); 355 obitp = (fd_mask *)(bits + ni * 3); 356 n = 0; 357 358 for (msk = 0; msk < 3; msk++) { 359 for (i = 0; i < nfd; i += NFDBITS) { 360 ibits = *ibitp++; 361 obits = 0; 362 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) { 363 ibits &= ~(1 << j); 364 if ((fp = fd_getfile(fd)) == NULL) 365 return (EBADF); 366 if ((*fp->f_ops->fo_poll)(fp, flag[msk])) { 367 obits |= (1 << j); 368 n++; 369 } 370 fd_putfile(fd); 371 } 372 *obitp++ = obits; 373 } 374 } 375 *retval = n; 376 return (0); 377 } 378 379 /* 380 * Poll system call. 381 */ 382 int 383 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval) 384 { 385 /* { 386 syscallarg(struct pollfd *) fds; 387 syscallarg(u_int) nfds; 388 syscallarg(int) timeout; 389 } */ 390 struct timespec ats, *ts = NULL; 391 392 if (SCARG(uap, timeout) != INFTIM) { 393 ats.tv_sec = SCARG(uap, timeout) / 1000; 394 ats.tv_nsec = (SCARG(uap, timeout) % 1000) * 1000000; 395 ts = &ats; 396 } 397 398 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, NULL); 399 } 400 401 /* 402 * Poll system call. 403 */ 404 int 405 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap, 406 register_t *retval) 407 { 408 /* { 409 syscallarg(struct pollfd *) fds; 410 syscallarg(u_int) nfds; 411 syscallarg(const struct timespec *) ts; 412 syscallarg(const sigset_t *) mask; 413 } */ 414 struct timespec ats, *ts = NULL; 415 sigset_t amask, *mask = NULL; 416 int error; 417 418 if (SCARG(uap, ts)) { 419 error = copyin(SCARG(uap, ts), &ats, sizeof(ats)); 420 if (error) 421 return error; 422 ts = &ats; 423 } 424 if (SCARG(uap, mask)) { 425 error = copyin(SCARG(uap, mask), &amask, sizeof(amask)); 426 if (error) 427 return error; 428 mask = &amask; 429 } 430 431 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, mask); 432 } 433 434 int 435 pollcommon(register_t *retval, struct pollfd *u_fds, u_int nfds, 436 struct timespec *ts, sigset_t *mask) 437 { 438 struct pollfd smallfds[32]; 439 struct pollfd *fds; 440 int error; 441 size_t ni; 442 443 if (nfds > 1000 + curlwp->l_fd->fd_dt->dt_nfiles) { 444 /* 445 * Either the user passed in a very sparse 'fds' or junk! 446 * The kmem_alloc() call below would be bad news. 447 * We could process the 'fds' array in chunks, but that 448 * is a lot of code that isn't normally useful. 449 * (Or just move the copyin/out into pollscan().) 450 * Historically the code silently truncated 'fds' to 451 * dt_nfiles entries - but that does cause issues. 452 */ 453 return EINVAL; 454 } 455 ni = nfds * sizeof(struct pollfd); 456 if (ni > sizeof(smallfds)) { 457 fds = kmem_alloc(ni, KM_SLEEP); 458 if (fds == NULL) 459 return ENOMEM; 460 } else 461 fds = smallfds; 462 463 error = copyin(u_fds, fds, ni); 464 if (error) 465 goto fail; 466 467 error = sel_do_scan(fds, nfds, ts, mask, retval, 0); 468 if (error == 0) 469 error = copyout(fds, u_fds, ni); 470 fail: 471 if (fds != smallfds) 472 kmem_free(fds, ni); 473 return (error); 474 } 475 476 static inline int 477 pollscan(struct pollfd *fds, u_int nfd, register_t *retval) 478 { 479 int i, n; 480 file_t *fp; 481 482 n = 0; 483 for (i = 0; i < nfd; i++, fds++) { 484 if (fds->fd < 0) { 485 fds->revents = 0; 486 } else if ((fp = fd_getfile(fds->fd)) == NULL) { 487 fds->revents = POLLNVAL; 488 n++; 489 } else { 490 fds->revents = (*fp->f_ops->fo_poll)(fp, 491 fds->events | POLLERR | POLLHUP); 492 if (fds->revents != 0) 493 n++; 494 fd_putfile(fds->fd); 495 } 496 } 497 *retval = n; 498 return (0); 499 } 500 501 /*ARGSUSED*/ 502 int 503 seltrue(dev_t dev, int events, lwp_t *l) 504 { 505 506 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 507 } 508 509 /* 510 * Record a select request. Concurrency issues: 511 * 512 * The caller holds the same lock across calls to selrecord() and 513 * selnotify(), so we don't need to consider a concurrent wakeup 514 * while in this routine. 515 * 516 * The only activity we need to guard against is selclear(), called by 517 * another thread that is exiting sel_do_scan(). 518 * `sel_lwp' can only become non-NULL while the caller's lock is held, 519 * so it cannot become non-NULL due to a change made by another thread 520 * while we are in this routine. It can only become _NULL_ due to a 521 * call to selclear(). 522 * 523 * If it is non-NULL and != selector there is the potential for 524 * selclear() to be called by another thread. If either of those 525 * conditions are true, we're not interested in touching the `named 526 * waiter' part of the selinfo record because we need to record a 527 * collision. Hence there is no need for additional locking in this 528 * routine. 529 */ 530 void 531 selrecord(lwp_t *selector, struct selinfo *sip) 532 { 533 selcluster_t *sc; 534 lwp_t *other; 535 536 KASSERT(selector == curlwp); 537 538 sc = selector->l_selcluster; 539 other = sip->sel_lwp; 540 541 if (other == selector) { 542 /* `selector' has already claimed it. */ 543 KASSERT(sip->sel_cluster = sc); 544 } else if (other == NULL) { 545 /* 546 * First named waiter, although there may be unnamed 547 * waiters (collisions). Issue a memory barrier to 548 * ensure that we access sel_lwp (above) before other 549 * fields - this guards against a call to selclear(). 550 */ 551 membar_enter(); 552 sip->sel_lwp = selector; 553 SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain); 554 /* Replace selinfo's lock with the chosen cluster's lock. */ 555 sip->sel_cluster = sc; 556 } else { 557 /* Multiple waiters: record a collision. */ 558 sip->sel_collision |= sc->sc_mask; 559 KASSERT(sip->sel_cluster != NULL); 560 } 561 } 562 563 /* 564 * Do a wakeup when a selectable event occurs. Concurrency issues: 565 * 566 * As per selrecord(), the caller's object lock is held. If there 567 * is a named waiter, we must acquire the associated selcluster's lock 568 * in order to synchronize with selclear() and pollers going to sleep 569 * in sel_do_scan(). 570 * 571 * sip->sel_cluser cannot change at this point, as it is only changed 572 * in selrecord(), and concurrent calls to selrecord() are locked 573 * out by the caller. 574 */ 575 void 576 selnotify(struct selinfo *sip, int events, long knhint) 577 { 578 selcluster_t *sc; 579 uint32_t mask; 580 int index, oflag; 581 lwp_t *l; 582 kmutex_t *lock; 583 584 KNOTE(&sip->sel_klist, knhint); 585 586 if (sip->sel_lwp != NULL) { 587 /* One named LWP is waiting. */ 588 sc = sip->sel_cluster; 589 lock = sc->sc_lock; 590 mutex_spin_enter(lock); 591 /* Still there? */ 592 if (sip->sel_lwp != NULL) { 593 l = sip->sel_lwp; 594 /* 595 * If thread is sleeping, wake it up. If it's not 596 * yet asleep, it will notice the change in state 597 * and will re-poll the descriptors. 598 */ 599 oflag = l->l_selflag; 600 l->l_selflag = SEL_RESET; 601 if (oflag == SEL_BLOCKING && l->l_mutex == lock) { 602 KASSERT(l->l_wchan == sc); 603 sleepq_unsleep(l, false); 604 } 605 } 606 mutex_spin_exit(lock); 607 } 608 609 if ((mask = sip->sel_collision) != 0) { 610 /* 611 * There was a collision (multiple waiters): we must 612 * inform all potentially interested waiters. 613 */ 614 sip->sel_collision = 0; 615 do { 616 index = ffs(mask) - 1; 617 mask &= ~(1 << index); 618 sc = selcluster[index]; 619 lock = sc->sc_lock; 620 mutex_spin_enter(lock); 621 sc->sc_ncoll++; 622 sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock); 623 } while (__predict_false(mask != 0)); 624 } 625 } 626 627 /* 628 * Remove an LWP from all objects that it is waiting for. Concurrency 629 * issues: 630 * 631 * The object owner's (e.g. device driver) lock is not held here. Calls 632 * can be made to selrecord() and we do not synchronize against those 633 * directly using locks. However, we use `sel_lwp' to lock out changes. 634 * Before clearing it we must use memory barriers to ensure that we can 635 * safely traverse the list of selinfo records. 636 */ 637 static void 638 selclear(void) 639 { 640 struct selinfo *sip, *next; 641 selcluster_t *sc; 642 lwp_t *l; 643 kmutex_t *lock; 644 645 l = curlwp; 646 sc = l->l_selcluster; 647 lock = sc->sc_lock; 648 649 mutex_spin_enter(lock); 650 for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) { 651 KASSERT(sip->sel_lwp == l); 652 KASSERT(sip->sel_cluster == l->l_selcluster); 653 654 /* 655 * Read link to next selinfo record, if any. 656 * It's no longer safe to touch `sip' after clearing 657 * `sel_lwp', so ensure that the read of `sel_chain' 658 * completes before the clearing of sel_lwp becomes 659 * globally visible. 660 */ 661 next = SLIST_NEXT(sip, sel_chain); 662 membar_exit(); 663 /* Release the record for another named waiter to use. */ 664 sip->sel_lwp = NULL; 665 } 666 mutex_spin_exit(lock); 667 } 668 669 /* 670 * Initialize the select/poll system calls. Called once for each 671 * CPU in the system, as they are attached. 672 */ 673 void 674 selsysinit(struct cpu_info *ci) 675 { 676 selcluster_t *sc; 677 u_int index; 678 679 /* If already a cluster in place for this bit, re-use. */ 680 index = cpu_index(ci) & SELCLUSTERMASK; 681 sc = selcluster[index]; 682 if (sc == NULL) { 683 sc = kmem_alloc(roundup2(sizeof(selcluster_t), 684 coherency_unit) + coherency_unit, KM_SLEEP); 685 sc = (void *)roundup2((uintptr_t)sc, coherency_unit); 686 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 687 sleepq_init(&sc->sc_sleepq); 688 sc->sc_ncoll = 0; 689 sc->sc_mask = (1 << index); 690 selcluster[index] = sc; 691 } 692 ci->ci_data.cpu_selcluster = sc; 693 } 694 695 /* 696 * Initialize a selinfo record. 697 */ 698 void 699 selinit(struct selinfo *sip) 700 { 701 702 memset(sip, 0, sizeof(*sip)); 703 } 704 705 /* 706 * Destroy a selinfo record. The owning object must not gain new 707 * references while this is in progress: all activity on the record 708 * must be stopped. 709 * 710 * Concurrency issues: we only need guard against a call to selclear() 711 * by a thread exiting sel_do_scan(). The caller has prevented further 712 * references being made to the selinfo record via selrecord(), and it 713 * won't call selwakeup() again. 714 */ 715 void 716 seldestroy(struct selinfo *sip) 717 { 718 selcluster_t *sc; 719 kmutex_t *lock; 720 lwp_t *l; 721 722 if (sip->sel_lwp == NULL) 723 return; 724 725 /* 726 * Lock out selclear(). The selcluster pointer can't change while 727 * we are here since it is only ever changed in selrecord(), 728 * and that will not be entered again for this record because 729 * it is dying. 730 */ 731 KASSERT(sip->sel_cluster != NULL); 732 sc = sip->sel_cluster; 733 lock = sc->sc_lock; 734 mutex_spin_enter(lock); 735 if ((l = sip->sel_lwp) != NULL) { 736 /* 737 * This should rarely happen, so although SLIST_REMOVE() 738 * is slow, using it here is not a problem. 739 */ 740 KASSERT(l->l_selcluster == sc); 741 SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain); 742 sip->sel_lwp = NULL; 743 } 744 mutex_spin_exit(lock); 745 } 746 747 int 748 pollsock(struct socket *so, const struct timespec *tsp, int events) 749 { 750 int ncoll, error, timo; 751 struct timespec sleepts, ts; 752 selcluster_t *sc; 753 lwp_t *l; 754 kmutex_t *lock; 755 756 timo = 0; 757 if (tsp != NULL) { 758 ts = *tsp; 759 if (inittimeleft(&ts, &sleepts) == -1) 760 return EINVAL; 761 } 762 763 l = curlwp; 764 sc = curcpu()->ci_data.cpu_selcluster; 765 lock = sc->sc_lock; 766 l->l_selcluster = sc; 767 SLIST_INIT(&l->l_selwait); 768 error = 0; 769 for (;;) { 770 /* 771 * No need to lock. If this is overwritten by another 772 * value while scanning, we will retry below. We only 773 * need to see exact state from the descriptors that 774 * we are about to poll, and lock activity resulting 775 * from fo_poll is enough to provide an up to date value 776 * for new polling activity. 777 */ 778 ncoll = sc->sc_ncoll; 779 l->l_selflag = SEL_SCANNING; 780 if (sopoll(so, events) != 0) 781 break; 782 if (tsp && (timo = gettimeleft(&ts, &sleepts)) <= 0) 783 break; 784 mutex_spin_enter(lock); 785 if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) { 786 mutex_spin_exit(lock); 787 continue; 788 } 789 l->l_selflag = SEL_BLOCKING; 790 sleepq_enter(&sc->sc_sleepq, l, lock); 791 sleepq_enqueue(&sc->sc_sleepq, sc, "pollsock", &select_sobj); 792 error = sleepq_block(timo, true); 793 if (error != 0) 794 break; 795 } 796 selclear(); 797 /* poll is not restarted after signals... */ 798 if (error == ERESTART) 799 error = EINTR; 800 if (error == EWOULDBLOCK) 801 error = 0; 802 return (error); 803 } 804