1 /* $OpenBSD: kern_synch.c,v 1.189 2022/06/28 09:32:27 bluhm Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/timeout.h> 48 #include <sys/mount.h> 49 #include <sys/syscallargs.h> 50 #include <sys/pool.h> 51 #include <sys/refcnt.h> 52 #include <sys/atomic.h> 53 #include <sys/witness.h> 54 #include <sys/tracepoint.h> 55 56 #include <ddb/db_output.h> 57 58 #include <machine/spinlock.h> 59 60 #ifdef DIAGNOSTIC 61 #include <sys/syslog.h> 62 #endif 63 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 68 int sleep_signal_check(void); 69 int thrsleep(struct proc *, struct sys___thrsleep_args *); 70 int thrsleep_unlock(void *); 71 72 /* 73 * We're only looking at 7 bits of the address; everything is 74 * aligned to 4, lots of things are aligned to greater powers 75 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 76 */ 77 #define TABLESIZE 128 78 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 79 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 80 81 void 82 sleep_queue_init(void) 83 { 84 int i; 85 86 for (i = 0; i < TABLESIZE; i++) 87 TAILQ_INIT(&slpque[i]); 88 } 89 90 /* 91 * Global sleep channel for threads that do not want to 92 * receive wakeup(9) broadcasts. 93 */ 94 int nowake; 95 96 /* 97 * During autoconfiguration or after a panic, a sleep will simply 98 * lower the priority briefly to allow interrupts, then return. 99 * The priority to be used (safepri) is machine-dependent, thus this 100 * value is initialized and maintained in the machine-dependent layers. 101 * This priority will typically be 0, or the lowest priority 102 * that is safe for use on the interrupt stack; it can be made 103 * higher to block network software interrupts after panics. 104 */ 105 extern int safepri; 106 107 /* 108 * General sleep call. Suspends the current process until a wakeup is 109 * performed on the specified identifier. The process will then be made 110 * runnable with the specified priority. Sleeps at most timo/hz seconds 111 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 112 * before and after sleeping, else signals are not checked. Returns 0 if 113 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 114 * signal needs to be delivered, ERESTART is returned if the current system 115 * call should be restarted if possible, and EINTR is returned if the system 116 * call should be interrupted by the signal (return EINTR). 117 */ 118 int 119 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 120 { 121 struct sleep_state sls; 122 #ifdef MULTIPROCESSOR 123 int hold_count; 124 #endif 125 126 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 127 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 128 129 #ifdef MULTIPROCESSOR 130 KASSERT(timo || _kernel_lock_held()); 131 #endif 132 133 #ifdef DDB 134 if (cold == 2) 135 db_stack_dump(); 136 #endif 137 if (cold || panicstr) { 138 int s; 139 /* 140 * After a panic, or during autoconfiguration, 141 * just give interrupts a chance, then just return; 142 * don't run any other procs or panic below, 143 * in case this is the idle process and already asleep. 144 */ 145 s = splhigh(); 146 splx(safepri); 147 #ifdef MULTIPROCESSOR 148 if (_kernel_lock_held()) { 149 hold_count = __mp_release_all(&kernel_lock); 150 __mp_acquire_count(&kernel_lock, hold_count); 151 } 152 #endif 153 splx(s); 154 return (0); 155 } 156 157 sleep_setup(&sls, ident, priority, wmesg, timo); 158 return sleep_finish(&sls, 1); 159 } 160 161 int 162 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 163 uint64_t nsecs) 164 { 165 uint64_t to_ticks; 166 167 if (nsecs == INFSLP) 168 return tsleep(ident, priority, wmesg, 0); 169 #ifdef DIAGNOSTIC 170 if (nsecs == 0) { 171 log(LOG_WARNING, 172 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 173 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 174 wmesg); 175 } 176 #endif 177 /* 178 * We want to sleep at least nsecs nanoseconds worth of ticks. 179 * 180 * - Clamp nsecs to prevent arithmetic overflow. 181 * 182 * - Round nsecs up to account for any nanoseconds that do not 183 * divide evenly into tick_nsec, otherwise we'll lose them to 184 * integer division in the next step. We add (tick_nsec - 1) 185 * to keep from introducing a spurious tick if there are no 186 * such nanoseconds, i.e. nsecs % tick_nsec == 0. 187 * 188 * - Divide the rounded value to a count of ticks. We divide 189 * by (tick_nsec + 1) to discard the extra tick introduced if, 190 * before rounding, nsecs % tick_nsec == 1. 191 * 192 * - Finally, add a tick to the result. We need to wait out 193 * the current tick before we can begin counting our interval, 194 * as we do not know how much time has elapsed since the 195 * current tick began. 196 */ 197 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 198 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 199 if (to_ticks > INT_MAX) 200 to_ticks = INT_MAX; 201 return tsleep(ident, priority, wmesg, (int)to_ticks); 202 } 203 204 /* 205 * Same as tsleep, but if we have a mutex provided, then once we've 206 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 207 */ 208 int 209 msleep(const volatile void *ident, struct mutex *mtx, int priority, 210 const char *wmesg, int timo) 211 { 212 struct sleep_state sls; 213 int error, spl; 214 #ifdef MULTIPROCESSOR 215 int hold_count; 216 #endif 217 218 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 219 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 220 KASSERT(mtx != NULL); 221 222 #ifdef DDB 223 if (cold == 2) 224 db_stack_dump(); 225 #endif 226 if (cold || panicstr) { 227 /* 228 * After a panic, or during autoconfiguration, 229 * just give interrupts a chance, then just return; 230 * don't run any other procs or panic below, 231 * in case this is the idle process and already asleep. 232 */ 233 spl = MUTEX_OLDIPL(mtx); 234 MUTEX_OLDIPL(mtx) = safepri; 235 mtx_leave(mtx); 236 #ifdef MULTIPROCESSOR 237 if (_kernel_lock_held()) { 238 hold_count = __mp_release_all(&kernel_lock); 239 __mp_acquire_count(&kernel_lock, hold_count); 240 } 241 #endif 242 if ((priority & PNORELOCK) == 0) { 243 mtx_enter(mtx); 244 MUTEX_OLDIPL(mtx) = spl; 245 } else 246 splx(spl); 247 return (0); 248 } 249 250 sleep_setup(&sls, ident, priority, wmesg, timo); 251 252 /* XXX - We need to make sure that the mutex doesn't 253 * unblock splsched. This can be made a bit more 254 * correct when the sched_lock is a mutex. 255 */ 256 spl = MUTEX_OLDIPL(mtx); 257 MUTEX_OLDIPL(mtx) = splsched(); 258 mtx_leave(mtx); 259 /* signal may stop the process, release mutex before that */ 260 error = sleep_finish(&sls, 1); 261 262 if ((priority & PNORELOCK) == 0) { 263 mtx_enter(mtx); 264 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 265 } else 266 splx(spl); 267 268 return error; 269 } 270 271 int 272 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 273 const char *wmesg, uint64_t nsecs) 274 { 275 uint64_t to_ticks; 276 277 if (nsecs == INFSLP) 278 return msleep(ident, mtx, priority, wmesg, 0); 279 #ifdef DIAGNOSTIC 280 if (nsecs == 0) { 281 log(LOG_WARNING, 282 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 283 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 284 wmesg); 285 } 286 #endif 287 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 288 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 289 if (to_ticks > INT_MAX) 290 to_ticks = INT_MAX; 291 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 292 } 293 294 /* 295 * Same as tsleep, but if we have a rwlock provided, then once we've 296 * entered the sleep queue we drop the it. After sleeping we re-lock. 297 */ 298 int 299 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 300 const char *wmesg, int timo) 301 { 302 struct sleep_state sls; 303 int error, status; 304 305 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 306 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 307 rw_assert_anylock(rwl); 308 status = rw_status(rwl); 309 310 sleep_setup(&sls, ident, priority, wmesg, timo); 311 312 rw_exit(rwl); 313 /* signal may stop the process, release rwlock before that */ 314 error = sleep_finish(&sls, 1); 315 316 if ((priority & PNORELOCK) == 0) 317 rw_enter(rwl, status); 318 319 return error; 320 } 321 322 int 323 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 324 const char *wmesg, uint64_t nsecs) 325 { 326 uint64_t to_ticks; 327 328 if (nsecs == INFSLP) 329 return rwsleep(ident, rwl, priority, wmesg, 0); 330 #ifdef DIAGNOSTIC 331 if (nsecs == 0) { 332 log(LOG_WARNING, 333 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 334 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 335 wmesg); 336 } 337 #endif 338 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 339 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 340 if (to_ticks > INT_MAX) 341 to_ticks = INT_MAX; 342 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 343 } 344 345 void 346 sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 347 const char *wmesg, int timo) 348 { 349 struct proc *p = curproc; 350 351 #ifdef DIAGNOSTIC 352 if (p->p_flag & P_CANTSLEEP) 353 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 354 if (ident == NULL) 355 panic("tsleep: no ident"); 356 if (p->p_stat != SONPROC) 357 panic("tsleep: not SONPROC"); 358 #endif 359 360 sls->sls_catch = prio & PCATCH; 361 sls->sls_timeout = 0; 362 363 SCHED_LOCK(sls->sls_s); 364 365 TRACEPOINT(sched, sleep, NULL); 366 367 p->p_wchan = ident; 368 p->p_wmesg = wmesg; 369 p->p_slptime = 0; 370 p->p_slppri = prio & PRIMASK; 371 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 372 373 if (timo) { 374 KASSERT((p->p_flag & P_TIMEOUT) == 0); 375 sls->sls_timeout = 1; 376 timeout_add(&p->p_sleep_to, timo); 377 } 378 } 379 380 int 381 sleep_finish(struct sleep_state *sls, int do_sleep) 382 { 383 struct proc *p = curproc; 384 int error = 0, error1 = 0; 385 386 if (sls->sls_catch != 0) { 387 /* 388 * We put ourselves on the sleep queue and start our 389 * timeout before calling sleep_signal_check(), as we could 390 * stop there, and a wakeup or a SIGCONT (or both) could 391 * occur while we were stopped. A SIGCONT would cause 392 * us to be marked as SSLEEP without resuming us, thus 393 * we must be ready for sleep when sleep_signal_check() is 394 * called. 395 * If the wakeup happens while we're stopped, p->p_wchan 396 * will be NULL upon return from sleep_signal_check(). In 397 * that case we need to unwind immediately. 398 */ 399 atomic_setbits_int(&p->p_flag, P_SINTR); 400 if ((error = sleep_signal_check()) != 0) { 401 p->p_stat = SONPROC; 402 sls->sls_catch = 0; 403 do_sleep = 0; 404 } else if (p->p_wchan == NULL) { 405 sls->sls_catch = 0; 406 do_sleep = 0; 407 } 408 } 409 410 if (do_sleep) { 411 p->p_stat = SSLEEP; 412 p->p_ru.ru_nvcsw++; 413 SCHED_ASSERT_LOCKED(); 414 mi_switch(); 415 } else { 416 unsleep(p); 417 } 418 419 #ifdef DIAGNOSTIC 420 if (p->p_stat != SONPROC) 421 panic("sleep_finish !SONPROC"); 422 #endif 423 424 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 425 SCHED_UNLOCK(sls->sls_s); 426 427 /* 428 * Even though this belongs to the signal handling part of sleep, 429 * we need to clear it before the ktrace. 430 */ 431 atomic_clearbits_int(&p->p_flag, P_SINTR); 432 433 if (sls->sls_timeout) { 434 if (p->p_flag & P_TIMEOUT) { 435 error1 = EWOULDBLOCK; 436 } else { 437 /* This can sleep. It must not use timeouts. */ 438 timeout_del_barrier(&p->p_sleep_to); 439 } 440 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 441 } 442 443 /* Check if thread was woken up because of a unwind or signal */ 444 if (sls->sls_catch != 0) 445 error = sleep_signal_check(); 446 447 /* Signal errors are higher priority than timeouts. */ 448 if (error == 0 && error1 != 0) 449 error = error1; 450 451 return error; 452 } 453 454 /* 455 * Check and handle signals and suspensions around a sleep cycle. 456 */ 457 int 458 sleep_signal_check(void) 459 { 460 struct proc *p = curproc; 461 struct sigctx ctx; 462 int err, sig; 463 464 if ((err = single_thread_check(p, 1)) != 0) 465 return err; 466 if ((sig = cursig(p, &ctx)) != 0) { 467 if (ctx.sig_intr) 468 return EINTR; 469 else 470 return ERESTART; 471 } 472 return 0; 473 } 474 475 int 476 wakeup_proc(struct proc *p, const volatile void *chan) 477 { 478 int s, awakened = 0; 479 480 SCHED_LOCK(s); 481 if (p->p_wchan != NULL && 482 ((chan == NULL) || (p->p_wchan == chan))) { 483 awakened = 1; 484 if (p->p_stat == SSLEEP) 485 setrunnable(p); 486 else 487 unsleep(p); 488 } 489 SCHED_UNLOCK(s); 490 491 return awakened; 492 } 493 494 495 /* 496 * Implement timeout for tsleep. 497 * If process hasn't been awakened (wchan non-zero), 498 * set timeout flag and undo the sleep. If proc 499 * is stopped, just unsleep so it will remain stopped. 500 */ 501 void 502 endtsleep(void *arg) 503 { 504 struct proc *p = arg; 505 int s; 506 507 SCHED_LOCK(s); 508 if (wakeup_proc(p, NULL)) 509 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 510 SCHED_UNLOCK(s); 511 } 512 513 /* 514 * Remove a process from its wait queue 515 */ 516 void 517 unsleep(struct proc *p) 518 { 519 SCHED_ASSERT_LOCKED(); 520 521 if (p->p_wchan != NULL) { 522 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 523 p->p_wchan = NULL; 524 TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET, 525 p->p_p->ps_pid); 526 } 527 } 528 529 /* 530 * Make a number of processes sleeping on the specified identifier runnable. 531 */ 532 void 533 wakeup_n(const volatile void *ident, int n) 534 { 535 struct slpque *qp; 536 struct proc *p; 537 struct proc *pnext; 538 int s; 539 540 SCHED_LOCK(s); 541 qp = &slpque[LOOKUP(ident)]; 542 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 543 pnext = TAILQ_NEXT(p, p_runq); 544 /* 545 * This happens if wakeup(9) is called after enqueuing 546 * itself on the sleep queue and both `ident' collide. 547 */ 548 if (p == curproc) 549 continue; 550 #ifdef DIAGNOSTIC 551 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 552 panic("wakeup: p_stat is %d", (int)p->p_stat); 553 #endif 554 if (wakeup_proc(p, ident)) 555 --n; 556 } 557 SCHED_UNLOCK(s); 558 } 559 560 /* 561 * Make all processes sleeping on the specified identifier runnable. 562 */ 563 void 564 wakeup(const volatile void *chan) 565 { 566 wakeup_n(chan, -1); 567 } 568 569 int 570 sys_sched_yield(struct proc *p, void *v, register_t *retval) 571 { 572 struct proc *q; 573 uint8_t newprio; 574 int s; 575 576 SCHED_LOCK(s); 577 /* 578 * If one of the threads of a multi-threaded process called 579 * sched_yield(2), drop its priority to ensure its siblings 580 * can make some progress. 581 */ 582 newprio = p->p_usrpri; 583 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 584 newprio = max(newprio, q->p_runpri); 585 setrunqueue(p->p_cpu, p, newprio); 586 p->p_ru.ru_nvcsw++; 587 mi_switch(); 588 SCHED_UNLOCK(s); 589 590 return (0); 591 } 592 593 int 594 thrsleep_unlock(void *lock) 595 { 596 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 597 _atomic_lock_t *atomiclock = lock; 598 599 if (!lock) 600 return 0; 601 602 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 603 } 604 605 struct tslpentry { 606 TAILQ_ENTRY(tslpentry) tslp_link; 607 long tslp_ident; 608 }; 609 610 /* thrsleep queue shared between processes */ 611 static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue); 612 static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"); 613 614 int 615 thrsleep(struct proc *p, struct sys___thrsleep_args *v) 616 { 617 struct sys___thrsleep_args /* { 618 syscallarg(const volatile void *) ident; 619 syscallarg(clockid_t) clock_id; 620 syscallarg(const struct timespec *) tp; 621 syscallarg(void *) lock; 622 syscallarg(const int *) abort; 623 } */ *uap = v; 624 long ident = (long)SCARG(uap, ident); 625 struct tslpentry entry; 626 struct tslpqueue *queue; 627 struct rwlock *qlock; 628 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 629 void *lock = SCARG(uap, lock); 630 uint64_t nsecs = INFSLP; 631 int abort = 0, error; 632 clockid_t clock_id = SCARG(uap, clock_id); 633 634 if (ident == 0) 635 return (EINVAL); 636 if (tsp != NULL) { 637 struct timespec now; 638 639 if ((error = clock_gettime(p, clock_id, &now))) 640 return (error); 641 #ifdef KTRACE 642 if (KTRPOINT(p, KTR_STRUCT)) 643 ktrabstimespec(p, tsp); 644 #endif 645 646 if (timespeccmp(tsp, &now, <=)) { 647 /* already passed: still do the unlock */ 648 if ((error = thrsleep_unlock(lock))) 649 return (error); 650 return (EWOULDBLOCK); 651 } 652 653 timespecsub(tsp, &now, tsp); 654 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP); 655 } 656 657 if (ident == -1) { 658 queue = &thrsleep_queue; 659 qlock = &thrsleep_lock; 660 } else { 661 queue = &p->p_p->ps_tslpqueue; 662 qlock = &p->p_p->ps_lock; 663 } 664 665 /* Interlock with wakeup. */ 666 entry.tslp_ident = ident; 667 rw_enter_write(qlock); 668 TAILQ_INSERT_TAIL(queue, &entry, tslp_link); 669 rw_exit_write(qlock); 670 671 error = thrsleep_unlock(lock); 672 673 if (error == 0 && SCARG(uap, abort) != NULL) 674 error = copyin(SCARG(uap, abort), &abort, sizeof(abort)); 675 676 rw_enter_write(qlock); 677 if (error != 0) 678 goto out; 679 if (abort != 0) { 680 error = EINTR; 681 goto out; 682 } 683 if (entry.tslp_ident != 0) { 684 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep", 685 nsecs); 686 } 687 688 out: 689 if (entry.tslp_ident != 0) 690 TAILQ_REMOVE(queue, &entry, tslp_link); 691 rw_exit_write(qlock); 692 693 if (error == ERESTART) 694 error = ECANCELED; 695 696 return (error); 697 698 } 699 700 int 701 sys___thrsleep(struct proc *p, void *v, register_t *retval) 702 { 703 struct sys___thrsleep_args /* { 704 syscallarg(const volatile void *) ident; 705 syscallarg(clockid_t) clock_id; 706 syscallarg(struct timespec *) tp; 707 syscallarg(void *) lock; 708 syscallarg(const int *) abort; 709 } */ *uap = v; 710 struct timespec ts; 711 int error; 712 713 if (SCARG(uap, tp) != NULL) { 714 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 715 *retval = error; 716 return 0; 717 } 718 if (!timespecisvalid(&ts)) { 719 *retval = EINVAL; 720 return 0; 721 } 722 SCARG(uap, tp) = &ts; 723 } 724 725 *retval = thrsleep(p, uap); 726 return 0; 727 } 728 729 int 730 sys___thrwakeup(struct proc *p, void *v, register_t *retval) 731 { 732 struct sys___thrwakeup_args /* { 733 syscallarg(const volatile void *) ident; 734 syscallarg(int) n; 735 } */ *uap = v; 736 struct tslpentry *entry, *tmp; 737 struct tslpqueue *queue; 738 struct rwlock *qlock; 739 long ident = (long)SCARG(uap, ident); 740 int n = SCARG(uap, n); 741 int found = 0; 742 743 if (ident == 0) 744 *retval = EINVAL; 745 else { 746 if (ident == -1) { 747 queue = &thrsleep_queue; 748 qlock = &thrsleep_lock; 749 /* 750 * Wake up all waiters with ident -1. This is needed 751 * because ident -1 can be shared by multiple userspace 752 * lock state machines concurrently. The implementation 753 * has no way to direct the wakeup to a particular 754 * state machine. 755 */ 756 n = 0; 757 } else { 758 queue = &p->p_p->ps_tslpqueue; 759 qlock = &p->p_p->ps_lock; 760 } 761 762 rw_enter_write(qlock); 763 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) { 764 if (entry->tslp_ident == ident) { 765 TAILQ_REMOVE(queue, entry, tslp_link); 766 entry->tslp_ident = 0; 767 wakeup_one(entry); 768 if (++found == n) 769 break; 770 } 771 } 772 rw_exit_write(qlock); 773 774 if (ident == -1) 775 *retval = 0; 776 else 777 *retval = found ? 0 : ESRCH; 778 } 779 780 return (0); 781 } 782 783 void 784 refcnt_init(struct refcnt *r) 785 { 786 refcnt_init_trace(r, 0); 787 } 788 789 void 790 refcnt_init_trace(struct refcnt *r, int idx) 791 { 792 r->r_traceidx = idx; 793 atomic_store_int(&r->r_refs, 1); 794 TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1); 795 } 796 797 void 798 refcnt_take(struct refcnt *r) 799 { 800 u_int refs; 801 802 refs = atomic_inc_int_nv(&r->r_refs); 803 KASSERT(refs != 0); 804 TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1); 805 (void)refs; 806 } 807 808 int 809 refcnt_rele(struct refcnt *r) 810 { 811 u_int refs; 812 813 membar_exit_before_atomic(); 814 refs = atomic_dec_int_nv(&r->r_refs); 815 KASSERT(refs != ~0); 816 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1); 817 if (refs == 0) { 818 membar_enter_after_atomic(); 819 return (1); 820 } 821 return (0); 822 } 823 824 void 825 refcnt_rele_wake(struct refcnt *r) 826 { 827 if (refcnt_rele(r)) 828 wakeup_one(r); 829 } 830 831 void 832 refcnt_finalize(struct refcnt *r, const char *wmesg) 833 { 834 struct sleep_state sls; 835 u_int refs; 836 837 membar_exit_before_atomic(); 838 refs = atomic_dec_int_nv(&r->r_refs); 839 KASSERT(refs != ~0); 840 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1); 841 while (refs) { 842 sleep_setup(&sls, r, PWAIT, wmesg, 0); 843 refs = atomic_load_int(&r->r_refs); 844 sleep_finish(&sls, refs); 845 } 846 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 847 /* Order subsequent loads and stores after refs == 0 load. */ 848 membar_sync(); 849 } 850 851 int 852 refcnt_shared(struct refcnt *r) 853 { 854 u_int refs; 855 856 refs = atomic_load_int(&r->r_refs); 857 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 858 return (refs > 1); 859 } 860 861 unsigned int 862 refcnt_read(struct refcnt *r) 863 { 864 u_int refs; 865 866 refs = atomic_load_int(&r->r_refs); 867 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 868 return (refs); 869 } 870 871 void 872 cond_init(struct cond *c) 873 { 874 atomic_store_int(&c->c_wait, 1); 875 } 876 877 void 878 cond_signal(struct cond *c) 879 { 880 atomic_store_int(&c->c_wait, 0); 881 882 wakeup_one(c); 883 } 884 885 void 886 cond_wait(struct cond *c, const char *wmesg) 887 { 888 struct sleep_state sls; 889 unsigned int wait; 890 891 wait = atomic_load_int(&c->c_wait); 892 while (wait) { 893 sleep_setup(&sls, c, PWAIT, wmesg, 0); 894 wait = atomic_load_int(&c->c_wait); 895 sleep_finish(&sls, wait); 896 } 897 } 898