1 /* $OpenBSD: kern_synch.c,v 1.155 2019/11/30 11:19:17 visa Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/timeout.h> 48 #include <sys/mount.h> 49 #include <sys/syscallargs.h> 50 #include <sys/pool.h> 51 #include <sys/refcnt.h> 52 #include <sys/atomic.h> 53 #include <sys/witness.h> 54 #include <ddb/db_output.h> 55 56 #include <machine/spinlock.h> 57 58 #ifdef DIAGNOSTIC 59 #include <sys/syslog.h> 60 #endif 61 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 int thrsleep(struct proc *, struct sys___thrsleep_args *); 67 int thrsleep_unlock(void *); 68 69 /* 70 * We're only looking at 7 bits of the address; everything is 71 * aligned to 4, lots of things are aligned to greater powers 72 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 73 */ 74 #define TABLESIZE 128 75 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 76 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 77 78 void 79 sleep_queue_init(void) 80 { 81 int i; 82 83 for (i = 0; i < TABLESIZE; i++) 84 TAILQ_INIT(&slpque[i]); 85 } 86 87 88 /* 89 * During autoconfiguration or after a panic, a sleep will simply 90 * lower the priority briefly to allow interrupts, then return. 91 * The priority to be used (safepri) is machine-dependent, thus this 92 * value is initialized and maintained in the machine-dependent layers. 93 * This priority will typically be 0, or the lowest priority 94 * that is safe for use on the interrupt stack; it can be made 95 * higher to block network software interrupts after panics. 96 */ 97 extern int safepri; 98 99 /* 100 * General sleep call. Suspends the current process until a wakeup is 101 * performed on the specified identifier. The process will then be made 102 * runnable with the specified priority. Sleeps at most timo/hz seconds 103 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 104 * before and after sleeping, else signals are not checked. Returns 0 if 105 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 106 * signal needs to be delivered, ERESTART is returned if the current system 107 * call should be restarted if possible, and EINTR is returned if the system 108 * call should be interrupted by the signal (return EINTR). 109 */ 110 int 111 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 112 { 113 struct sleep_state sls; 114 #ifdef MULTIPROCESSOR 115 int hold_count; 116 #endif 117 118 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 119 120 #ifdef MULTIPROCESSOR 121 KASSERT(timo || _kernel_lock_held()); 122 #endif 123 124 #ifdef DDB 125 if (cold == 2) 126 db_stack_dump(); 127 #endif 128 if (cold || panicstr) { 129 int s; 130 /* 131 * After a panic, or during autoconfiguration, 132 * just give interrupts a chance, then just return; 133 * don't run any other procs or panic below, 134 * in case this is the idle process and already asleep. 135 */ 136 s = splhigh(); 137 splx(safepri); 138 #ifdef MULTIPROCESSOR 139 if (_kernel_lock_held()) { 140 hold_count = __mp_release_all(&kernel_lock); 141 __mp_acquire_count(&kernel_lock, hold_count); 142 } 143 #endif 144 splx(s); 145 return (0); 146 } 147 148 sleep_setup(&sls, ident, priority, wmesg); 149 sleep_setup_timeout(&sls, timo); 150 sleep_setup_signal(&sls); 151 152 return sleep_finish_all(&sls, 1); 153 } 154 155 int 156 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 157 uint64_t nsecs) 158 { 159 uint64_t to_ticks; 160 161 if (nsecs == INFSLP) 162 return tsleep(ident, priority, wmesg, 0); 163 #ifdef DIAGNOSTIC 164 if (nsecs == 0) { 165 log(LOG_WARNING, "%s: %s: trying to sleep zero nanoseconds\n", 166 __func__, wmesg); 167 } 168 #endif 169 to_ticks = nsecs / (tick * 1000); 170 if (to_ticks > INT_MAX) 171 to_ticks = INT_MAX; 172 if (to_ticks == 0) 173 to_ticks = 1; 174 return tsleep(ident, priority, wmesg, (int)to_ticks); 175 } 176 177 int 178 sleep_finish_all(struct sleep_state *sls, int do_sleep) 179 { 180 int error, error1; 181 182 sleep_finish(sls, do_sleep); 183 error1 = sleep_finish_timeout(sls); 184 error = sleep_finish_signal(sls); 185 186 /* Signal errors are higher priority than timeouts. */ 187 if (error == 0 && error1 != 0) 188 error = error1; 189 190 return error; 191 } 192 193 /* 194 * Same as tsleep, but if we have a mutex provided, then once we've 195 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 196 */ 197 int 198 msleep(const volatile void *ident, struct mutex *mtx, int priority, 199 const char *wmesg, int timo) 200 { 201 struct sleep_state sls; 202 int error, spl; 203 #ifdef MULTIPROCESSOR 204 int hold_count; 205 #endif 206 207 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 208 KASSERT(mtx != NULL); 209 210 if (priority & PCATCH) 211 KERNEL_ASSERT_LOCKED(); 212 213 if (cold || panicstr) { 214 /* 215 * After a panic, or during autoconfiguration, 216 * just give interrupts a chance, then just return; 217 * don't run any other procs or panic below, 218 * in case this is the idle process and already asleep. 219 */ 220 spl = MUTEX_OLDIPL(mtx); 221 MUTEX_OLDIPL(mtx) = safepri; 222 mtx_leave(mtx); 223 #ifdef MULTIPROCESSOR 224 if (_kernel_lock_held()) { 225 hold_count = __mp_release_all(&kernel_lock); 226 __mp_acquire_count(&kernel_lock, hold_count); 227 } 228 #endif 229 if ((priority & PNORELOCK) == 0) { 230 mtx_enter(mtx); 231 MUTEX_OLDIPL(mtx) = spl; 232 } else 233 splx(spl); 234 return (0); 235 } 236 237 sleep_setup(&sls, ident, priority, wmesg); 238 sleep_setup_timeout(&sls, timo); 239 sleep_setup_signal(&sls); 240 241 /* XXX - We need to make sure that the mutex doesn't 242 * unblock splsched. This can be made a bit more 243 * correct when the sched_lock is a mutex. 244 */ 245 spl = MUTEX_OLDIPL(mtx); 246 MUTEX_OLDIPL(mtx) = splsched(); 247 mtx_leave(mtx); 248 249 error = sleep_finish_all(&sls, 1); 250 251 if ((priority & PNORELOCK) == 0) { 252 mtx_enter(mtx); 253 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 254 } else 255 splx(spl); 256 257 return error; 258 } 259 260 int 261 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 262 const char *wmesg, uint64_t nsecs) 263 { 264 uint64_t to_ticks; 265 266 if (nsecs == INFSLP) 267 return msleep(ident, mtx, priority, wmesg, 0); 268 #ifdef DIAGNOSTIC 269 if (nsecs == 0) { 270 log(LOG_WARNING, "%s: %s: trying to sleep zero nanoseconds\n", 271 __func__, wmesg); 272 } 273 #endif 274 to_ticks = nsecs / (tick * 1000); 275 if (to_ticks > INT_MAX) 276 to_ticks = INT_MAX; 277 if (to_ticks == 0) 278 to_ticks = 1; 279 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 280 } 281 282 /* 283 * Same as tsleep, but if we have a rwlock provided, then once we've 284 * entered the sleep queue we drop the it. After sleeping we re-lock. 285 */ 286 int 287 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 288 const char *wmesg, int timo) 289 { 290 struct sleep_state sls; 291 int error, status; 292 293 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 294 rw_assert_anylock(rwl); 295 status = rw_status(rwl); 296 297 sleep_setup(&sls, ident, priority, wmesg); 298 sleep_setup_timeout(&sls, timo); 299 sleep_setup_signal(&sls); 300 301 rw_exit(rwl); 302 303 error = sleep_finish_all(&sls, 1); 304 305 if ((priority & PNORELOCK) == 0) 306 rw_enter(rwl, status); 307 308 return error; 309 } 310 311 int 312 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 313 const char *wmesg, uint64_t nsecs) 314 { 315 uint64_t to_ticks; 316 317 if (nsecs == INFSLP) 318 return rwsleep(ident, rwl, priority, wmesg, 0); 319 #ifdef DIAGNOSTIC 320 if (nsecs == 0) { 321 log(LOG_WARNING, "%s: %s: trying to sleep zero nanoseconds\n", 322 __func__, wmesg); 323 } 324 #endif 325 to_ticks = nsecs / (tick * 1000); 326 if (to_ticks > INT_MAX) 327 to_ticks = INT_MAX; 328 if (to_ticks == 0) 329 to_ticks = 1; 330 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 331 } 332 333 void 334 sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 335 const char *wmesg) 336 { 337 struct proc *p = curproc; 338 339 #ifdef DIAGNOSTIC 340 if (p->p_flag & P_CANTSLEEP) 341 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 342 if (ident == NULL) 343 panic("tsleep: no ident"); 344 if (p->p_stat != SONPROC) 345 panic("tsleep: not SONPROC"); 346 #endif 347 348 sls->sls_catch = prio & PCATCH; 349 sls->sls_do_sleep = 1; 350 sls->sls_locked = 0; 351 sls->sls_sig = 1; 352 sls->sls_timeout = 0; 353 354 /* 355 * The kernel has to be locked for signal processing. 356 * This is done here and not in sleep_setup_signal() because 357 * KERNEL_LOCK() has to be taken before SCHED_LOCK(). 358 */ 359 if (sls->sls_catch != 0) { 360 KERNEL_LOCK(); 361 sls->sls_locked = 1; 362 } 363 364 SCHED_LOCK(sls->sls_s); 365 366 p->p_wchan = ident; 367 p->p_wmesg = wmesg; 368 p->p_slptime = 0; 369 p->p_priority = prio & PRIMASK; 370 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 371 } 372 373 void 374 sleep_finish(struct sleep_state *sls, int do_sleep) 375 { 376 struct proc *p = curproc; 377 378 if (sls->sls_do_sleep && do_sleep) { 379 p->p_stat = SSLEEP; 380 p->p_ru.ru_nvcsw++; 381 SCHED_ASSERT_LOCKED(); 382 mi_switch(); 383 } else if (!do_sleep) { 384 unsleep(p); 385 } 386 387 #ifdef DIAGNOSTIC 388 if (p->p_stat != SONPROC) 389 panic("sleep_finish !SONPROC"); 390 #endif 391 392 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 393 SCHED_UNLOCK(sls->sls_s); 394 395 /* 396 * Even though this belongs to the signal handling part of sleep, 397 * we need to clear it before the ktrace. 398 */ 399 atomic_clearbits_int(&p->p_flag, P_SINTR); 400 } 401 402 void 403 sleep_setup_timeout(struct sleep_state *sls, int timo) 404 { 405 struct proc *p = curproc; 406 407 if (timo) { 408 KASSERT((p->p_flag & P_TIMEOUT) == 0); 409 sls->sls_timeout = 1; 410 timeout_add(&p->p_sleep_to, timo); 411 } 412 } 413 414 int 415 sleep_finish_timeout(struct sleep_state *sls) 416 { 417 struct proc *p = curproc; 418 419 if (sls->sls_timeout) { 420 if (p->p_flag & P_TIMEOUT) { 421 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 422 return (EWOULDBLOCK); 423 } else { 424 /* This must not sleep. */ 425 timeout_del_barrier(&p->p_sleep_to); 426 KASSERT((p->p_flag & P_TIMEOUT) == 0); 427 } 428 } 429 430 return (0); 431 } 432 433 void 434 sleep_setup_signal(struct sleep_state *sls) 435 { 436 struct proc *p = curproc; 437 438 if (sls->sls_catch == 0) 439 return; 440 441 /* sleep_setup() has locked the kernel. */ 442 KERNEL_ASSERT_LOCKED(); 443 444 /* 445 * We put ourselves on the sleep queue and start our timeout 446 * before calling CURSIG, as we could stop there, and a wakeup 447 * or a SIGCONT (or both) could occur while we were stopped. 448 * A SIGCONT would cause us to be marked as SSLEEP 449 * without resuming us, thus we must be ready for sleep 450 * when CURSIG is called. If the wakeup happens while we're 451 * stopped, p->p_wchan will be 0 upon return from CURSIG. 452 */ 453 atomic_setbits_int(&p->p_flag, P_SINTR); 454 if (p->p_p->ps_single != NULL || (sls->sls_sig = CURSIG(p)) != 0) { 455 if (p->p_wchan) 456 unsleep(p); 457 p->p_stat = SONPROC; 458 sls->sls_do_sleep = 0; 459 } else if (p->p_wchan == 0) { 460 sls->sls_catch = 0; 461 sls->sls_do_sleep = 0; 462 } 463 } 464 465 int 466 sleep_finish_signal(struct sleep_state *sls) 467 { 468 struct proc *p = curproc; 469 int error = 0; 470 471 if (sls->sls_catch != 0) { 472 KERNEL_ASSERT_LOCKED(); 473 474 error = single_thread_check(p, 1); 475 if (error == 0 && 476 (sls->sls_sig != 0 || (sls->sls_sig = CURSIG(p)) != 0)) { 477 if (p->p_p->ps_sigacts->ps_sigintr & 478 sigmask(sls->sls_sig)) 479 error = EINTR; 480 else 481 error = ERESTART; 482 } 483 } 484 485 if (sls->sls_locked) 486 KERNEL_UNLOCK(); 487 488 return (error); 489 } 490 491 /* 492 * Implement timeout for tsleep. 493 * If process hasn't been awakened (wchan non-zero), 494 * set timeout flag and undo the sleep. If proc 495 * is stopped, just unsleep so it will remain stopped. 496 */ 497 void 498 endtsleep(void *arg) 499 { 500 struct proc *p = arg; 501 int s; 502 503 SCHED_LOCK(s); 504 if (p->p_wchan) { 505 if (p->p_stat == SSLEEP) 506 setrunnable(p); 507 else 508 unsleep(p); 509 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 510 } 511 SCHED_UNLOCK(s); 512 } 513 514 /* 515 * Remove a process from its wait queue 516 */ 517 void 518 unsleep(struct proc *p) 519 { 520 SCHED_ASSERT_LOCKED(); 521 522 if (p->p_wchan) { 523 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 524 p->p_wchan = NULL; 525 } 526 } 527 528 /* 529 * Make a number of processes sleeping on the specified identifier runnable. 530 */ 531 void 532 wakeup_n(const volatile void *ident, int n) 533 { 534 struct slpque *qp; 535 struct proc *p; 536 struct proc *pnext; 537 int s; 538 539 SCHED_LOCK(s); 540 qp = &slpque[LOOKUP(ident)]; 541 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 542 pnext = TAILQ_NEXT(p, p_runq); 543 #ifdef DIAGNOSTIC 544 /* 545 * If the rwlock passed to rwsleep() is contended, the 546 * CPU will end up calling wakeup() between sleep_setup() 547 * and sleep_finish(). 548 */ 549 if (p == curproc) { 550 KASSERT(p->p_stat == SONPROC); 551 continue; 552 } 553 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 554 panic("wakeup: p_stat is %d", (int)p->p_stat); 555 #endif 556 if (p->p_wchan == ident) { 557 --n; 558 p->p_wchan = 0; 559 TAILQ_REMOVE(qp, p, p_runq); 560 if (p->p_stat == SSLEEP) 561 setrunnable(p); 562 } 563 } 564 SCHED_UNLOCK(s); 565 } 566 567 /* 568 * Make all processes sleeping on the specified identifier runnable. 569 */ 570 void 571 wakeup(const volatile void *chan) 572 { 573 wakeup_n(chan, -1); 574 } 575 576 int 577 sys_sched_yield(struct proc *p, void *v, register_t *retval) 578 { 579 struct proc *q; 580 uint8_t newprio; 581 int s; 582 583 SCHED_LOCK(s); 584 /* 585 * If one of the threads of a multi-threaded process called 586 * sched_yield(2), drop its priority to ensure its siblings 587 * can make some progress. 588 */ 589 newprio = p->p_usrpri; 590 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 591 newprio = max(newprio, q->p_priority); 592 setrunqueue(p->p_cpu, p, newprio); 593 p->p_ru.ru_nvcsw++; 594 mi_switch(); 595 SCHED_UNLOCK(s); 596 597 return (0); 598 } 599 600 int 601 thrsleep_unlock(void *lock) 602 { 603 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 604 _atomic_lock_t *atomiclock = lock; 605 606 if (!lock) 607 return 0; 608 609 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 610 } 611 612 static int globalsleepaddr; 613 614 int 615 thrsleep(struct proc *p, struct sys___thrsleep_args *v) 616 { 617 struct sys___thrsleep_args /* { 618 syscallarg(const volatile void *) ident; 619 syscallarg(clockid_t) clock_id; 620 syscallarg(const struct timespec *) tp; 621 syscallarg(void *) lock; 622 syscallarg(const int *) abort; 623 } */ *uap = v; 624 long ident = (long)SCARG(uap, ident); 625 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 626 void *lock = SCARG(uap, lock); 627 uint64_t to_ticks = 0; 628 int abort, error; 629 clockid_t clock_id = SCARG(uap, clock_id); 630 631 if (ident == 0) 632 return (EINVAL); 633 if (tsp != NULL) { 634 struct timespec now; 635 636 if ((error = clock_gettime(p, clock_id, &now))) 637 return (error); 638 #ifdef KTRACE 639 if (KTRPOINT(p, KTR_STRUCT)) 640 ktrabstimespec(p, tsp); 641 #endif 642 643 if (timespeccmp(tsp, &now, <)) { 644 /* already passed: still do the unlock */ 645 if ((error = thrsleep_unlock(lock))) 646 return (error); 647 return (EWOULDBLOCK); 648 } 649 650 timespecsub(tsp, &now, tsp); 651 to_ticks = (uint64_t)hz * tsp->tv_sec + 652 (tsp->tv_nsec + tick * 1000 - 1) / (tick * 1000) + 1; 653 if (to_ticks > INT_MAX) 654 to_ticks = INT_MAX; 655 } 656 657 p->p_thrslpid = ident; 658 659 if ((error = thrsleep_unlock(lock))) 660 goto out; 661 662 if (SCARG(uap, abort) != NULL) { 663 if ((error = copyin(SCARG(uap, abort), &abort, 664 sizeof(abort))) != 0) 665 goto out; 666 if (abort) { 667 error = EINTR; 668 goto out; 669 } 670 } 671 672 if (p->p_thrslpid == 0) 673 error = 0; 674 else { 675 void *sleepaddr = &p->p_thrslpid; 676 if (ident == -1) 677 sleepaddr = &globalsleepaddr; 678 error = tsleep(sleepaddr, PWAIT|PCATCH, "thrsleep", 679 (int)to_ticks); 680 } 681 682 out: 683 p->p_thrslpid = 0; 684 685 if (error == ERESTART) 686 error = ECANCELED; 687 688 return (error); 689 690 } 691 692 int 693 sys___thrsleep(struct proc *p, void *v, register_t *retval) 694 { 695 struct sys___thrsleep_args /* { 696 syscallarg(const volatile void *) ident; 697 syscallarg(clockid_t) clock_id; 698 syscallarg(struct timespec *) tp; 699 syscallarg(void *) lock; 700 syscallarg(const int *) abort; 701 } */ *uap = v; 702 struct timespec ts; 703 int error; 704 705 if (SCARG(uap, tp) != NULL) { 706 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 707 *retval = error; 708 return 0; 709 } 710 if (!timespecisvalid(&ts)) { 711 *retval = EINVAL; 712 return 0; 713 } 714 SCARG(uap, tp) = &ts; 715 } 716 717 *retval = thrsleep(p, uap); 718 return 0; 719 } 720 721 int 722 sys___thrwakeup(struct proc *p, void *v, register_t *retval) 723 { 724 struct sys___thrwakeup_args /* { 725 syscallarg(const volatile void *) ident; 726 syscallarg(int) n; 727 } */ *uap = v; 728 long ident = (long)SCARG(uap, ident); 729 int n = SCARG(uap, n); 730 struct proc *q; 731 int found = 0; 732 733 if (ident == 0) 734 *retval = EINVAL; 735 else if (ident == -1) 736 wakeup(&globalsleepaddr); 737 else { 738 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { 739 if (q->p_thrslpid == ident) { 740 wakeup_one(&q->p_thrslpid); 741 q->p_thrslpid = 0; 742 if (++found == n) 743 break; 744 } 745 } 746 *retval = found ? 0 : ESRCH; 747 } 748 749 return (0); 750 } 751 752 void 753 refcnt_init(struct refcnt *r) 754 { 755 r->refs = 1; 756 } 757 758 void 759 refcnt_take(struct refcnt *r) 760 { 761 #ifdef DIAGNOSTIC 762 u_int refcnt; 763 764 refcnt = atomic_inc_int_nv(&r->refs); 765 KASSERT(refcnt != 0); 766 #else 767 atomic_inc_int(&r->refs); 768 #endif 769 } 770 771 int 772 refcnt_rele(struct refcnt *r) 773 { 774 u_int refcnt; 775 776 refcnt = atomic_dec_int_nv(&r->refs); 777 KASSERT(refcnt != ~0); 778 779 return (refcnt == 0); 780 } 781 782 void 783 refcnt_rele_wake(struct refcnt *r) 784 { 785 if (refcnt_rele(r)) 786 wakeup_one(r); 787 } 788 789 void 790 refcnt_finalize(struct refcnt *r, const char *wmesg) 791 { 792 struct sleep_state sls; 793 u_int refcnt; 794 795 refcnt = atomic_dec_int_nv(&r->refs); 796 while (refcnt) { 797 sleep_setup(&sls, r, PWAIT, wmesg); 798 refcnt = r->refs; 799 sleep_finish(&sls, refcnt); 800 } 801 } 802 803 void 804 cond_init(struct cond *c) 805 { 806 c->c_wait = 1; 807 } 808 809 void 810 cond_signal(struct cond *c) 811 { 812 c->c_wait = 0; 813 814 wakeup_one(c); 815 } 816 817 void 818 cond_wait(struct cond *c, const char *wmesg) 819 { 820 struct sleep_state sls; 821 int wait; 822 823 wait = c->c_wait; 824 while (wait) { 825 sleep_setup(&sls, c, PWAIT, wmesg); 826 wait = c->c_wait; 827 sleep_finish(&sls, wait); 828 } 829 } 830