1 /* $OpenBSD: kern_synch.c,v 1.163 2020/03/02 13:55:15 bluhm Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/timeout.h> 48 #include <sys/mount.h> 49 #include <sys/syscallargs.h> 50 #include <sys/pool.h> 51 #include <sys/refcnt.h> 52 #include <sys/atomic.h> 53 #include <sys/witness.h> 54 #include <sys/tracepoint.h> 55 56 #include <ddb/db_output.h> 57 58 #include <machine/spinlock.h> 59 60 #ifdef DIAGNOSTIC 61 #include <sys/syslog.h> 62 #endif 63 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 68 int thrsleep(struct proc *, struct sys___thrsleep_args *); 69 int thrsleep_unlock(void *); 70 71 /* 72 * We're only looking at 7 bits of the address; everything is 73 * aligned to 4, lots of things are aligned to greater powers 74 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 75 */ 76 #define TABLESIZE 128 77 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 78 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 79 80 void 81 sleep_queue_init(void) 82 { 83 int i; 84 85 for (i = 0; i < TABLESIZE; i++) 86 TAILQ_INIT(&slpque[i]); 87 } 88 89 90 /* 91 * During autoconfiguration or after a panic, a sleep will simply 92 * lower the priority briefly to allow interrupts, then return. 93 * The priority to be used (safepri) is machine-dependent, thus this 94 * value is initialized and maintained in the machine-dependent layers. 95 * This priority will typically be 0, or the lowest priority 96 * that is safe for use on the interrupt stack; it can be made 97 * higher to block network software interrupts after panics. 98 */ 99 extern int safepri; 100 101 /* 102 * General sleep call. Suspends the current process until a wakeup is 103 * performed on the specified identifier. The process will then be made 104 * runnable with the specified priority. Sleeps at most timo/hz seconds 105 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 106 * before and after sleeping, else signals are not checked. Returns 0 if 107 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 108 * signal needs to be delivered, ERESTART is returned if the current system 109 * call should be restarted if possible, and EINTR is returned if the system 110 * call should be interrupted by the signal (return EINTR). 111 */ 112 int 113 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 114 { 115 struct sleep_state sls; 116 #ifdef MULTIPROCESSOR 117 int hold_count; 118 #endif 119 120 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 121 122 #ifdef MULTIPROCESSOR 123 KASSERT(timo || _kernel_lock_held()); 124 #endif 125 126 #ifdef DDB 127 if (cold == 2) 128 db_stack_dump(); 129 #endif 130 if (cold || panicstr) { 131 int s; 132 /* 133 * After a panic, or during autoconfiguration, 134 * just give interrupts a chance, then just return; 135 * don't run any other procs or panic below, 136 * in case this is the idle process and already asleep. 137 */ 138 s = splhigh(); 139 splx(safepri); 140 #ifdef MULTIPROCESSOR 141 if (_kernel_lock_held()) { 142 hold_count = __mp_release_all(&kernel_lock); 143 __mp_acquire_count(&kernel_lock, hold_count); 144 } 145 #endif 146 splx(s); 147 return (0); 148 } 149 150 sleep_setup(&sls, ident, priority, wmesg); 151 sleep_setup_timeout(&sls, timo); 152 sleep_setup_signal(&sls); 153 154 return sleep_finish_all(&sls, 1); 155 } 156 157 int 158 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 159 uint64_t nsecs) 160 { 161 uint64_t to_ticks; 162 163 if (nsecs == INFSLP) 164 return tsleep(ident, priority, wmesg, 0); 165 #ifdef DIAGNOSTIC 166 if (nsecs == 0) { 167 log(LOG_WARNING, 168 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 169 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 170 wmesg); 171 } 172 #endif 173 /* 174 * We want to sleep at least nsecs nanoseconds worth of ticks. 175 * 176 * - Clamp nsecs to prevent arithmetic overflow. 177 * 178 * - Round nsecs up to account for any nanoseconds that do not 179 * divide evenly into tick_nsec, otherwise we'll lose them to 180 * integer division in the next step. We add (tick_nsec - 1) 181 * to keep from introducing a spurious tick if there are no 182 * such nanoseconds, i.e. nsecs % tick_nsec == 0. 183 * 184 * - Divide the rounded value to a count of ticks. We divide 185 * by (tick_nsec + 1) to discard the extra tick introduced if, 186 * before rounding, nsecs % tick_nsec == 1. 187 * 188 * - Finally, add a tick to the result. We need to wait out 189 * the current tick before we can begin counting our interval, 190 * as we do not know how much time has elapsed since the 191 * current tick began. 192 */ 193 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 194 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 195 if (to_ticks > INT_MAX) 196 to_ticks = INT_MAX; 197 return tsleep(ident, priority, wmesg, (int)to_ticks); 198 } 199 200 int 201 sleep_finish_all(struct sleep_state *sls, int do_sleep) 202 { 203 int error, error1; 204 205 sleep_finish(sls, do_sleep); 206 error1 = sleep_finish_timeout(sls); 207 error = sleep_finish_signal(sls); 208 209 /* Signal errors are higher priority than timeouts. */ 210 if (error == 0 && error1 != 0) 211 error = error1; 212 213 return error; 214 } 215 216 /* 217 * Same as tsleep, but if we have a mutex provided, then once we've 218 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 219 */ 220 int 221 msleep(const volatile void *ident, struct mutex *mtx, int priority, 222 const char *wmesg, int timo) 223 { 224 struct sleep_state sls; 225 int error, spl; 226 #ifdef MULTIPROCESSOR 227 int hold_count; 228 #endif 229 230 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 231 KASSERT(mtx != NULL); 232 233 if (priority & PCATCH) 234 KERNEL_ASSERT_LOCKED(); 235 236 if (cold || panicstr) { 237 /* 238 * After a panic, or during autoconfiguration, 239 * just give interrupts a chance, then just return; 240 * don't run any other procs or panic below, 241 * in case this is the idle process and already asleep. 242 */ 243 spl = MUTEX_OLDIPL(mtx); 244 MUTEX_OLDIPL(mtx) = safepri; 245 mtx_leave(mtx); 246 #ifdef MULTIPROCESSOR 247 if (_kernel_lock_held()) { 248 hold_count = __mp_release_all(&kernel_lock); 249 __mp_acquire_count(&kernel_lock, hold_count); 250 } 251 #endif 252 if ((priority & PNORELOCK) == 0) { 253 mtx_enter(mtx); 254 MUTEX_OLDIPL(mtx) = spl; 255 } else 256 splx(spl); 257 return (0); 258 } 259 260 sleep_setup(&sls, ident, priority, wmesg); 261 sleep_setup_timeout(&sls, timo); 262 263 /* XXX - We need to make sure that the mutex doesn't 264 * unblock splsched. This can be made a bit more 265 * correct when the sched_lock is a mutex. 266 */ 267 spl = MUTEX_OLDIPL(mtx); 268 MUTEX_OLDIPL(mtx) = splsched(); 269 mtx_leave(mtx); 270 /* signal may stop the process, release mutex before that */ 271 sleep_setup_signal(&sls); 272 273 error = sleep_finish_all(&sls, 1); 274 275 if ((priority & PNORELOCK) == 0) { 276 mtx_enter(mtx); 277 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 278 } else 279 splx(spl); 280 281 return error; 282 } 283 284 int 285 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 286 const char *wmesg, uint64_t nsecs) 287 { 288 uint64_t to_ticks; 289 290 if (nsecs == INFSLP) 291 return msleep(ident, mtx, priority, wmesg, 0); 292 #ifdef DIAGNOSTIC 293 if (nsecs == 0) { 294 log(LOG_WARNING, 295 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 296 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 297 wmesg); 298 } 299 #endif 300 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 301 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 302 if (to_ticks > INT_MAX) 303 to_ticks = INT_MAX; 304 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 305 } 306 307 /* 308 * Same as tsleep, but if we have a rwlock provided, then once we've 309 * entered the sleep queue we drop the it. After sleeping we re-lock. 310 */ 311 int 312 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 313 const char *wmesg, int timo) 314 { 315 struct sleep_state sls; 316 int error, status; 317 318 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 319 rw_assert_anylock(rwl); 320 status = rw_status(rwl); 321 322 sleep_setup(&sls, ident, priority, wmesg); 323 sleep_setup_timeout(&sls, timo); 324 325 rw_exit(rwl); 326 /* signal may stop the process, release rwlock before that */ 327 sleep_setup_signal(&sls); 328 329 error = sleep_finish_all(&sls, 1); 330 331 if ((priority & PNORELOCK) == 0) 332 rw_enter(rwl, status); 333 334 return error; 335 } 336 337 int 338 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 339 const char *wmesg, uint64_t nsecs) 340 { 341 uint64_t to_ticks; 342 343 if (nsecs == INFSLP) 344 return rwsleep(ident, rwl, priority, wmesg, 0); 345 #ifdef DIAGNOSTIC 346 if (nsecs == 0) { 347 log(LOG_WARNING, 348 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 349 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 350 wmesg); 351 } 352 #endif 353 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 354 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 355 if (to_ticks > INT_MAX) 356 to_ticks = INT_MAX; 357 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 358 } 359 360 void 361 sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 362 const char *wmesg) 363 { 364 struct proc *p = curproc; 365 366 #ifdef DIAGNOSTIC 367 if (p->p_flag & P_CANTSLEEP) 368 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 369 if (ident == NULL) 370 panic("tsleep: no ident"); 371 if (p->p_stat != SONPROC) 372 panic("tsleep: not SONPROC"); 373 #endif 374 375 sls->sls_catch = prio & PCATCH; 376 sls->sls_do_sleep = 1; 377 sls->sls_locked = 0; 378 sls->sls_sig = 1; 379 sls->sls_timeout = 0; 380 381 /* 382 * The kernel has to be locked for signal processing. 383 * This is done here and not in sleep_setup_signal() because 384 * KERNEL_LOCK() has to be taken before SCHED_LOCK(). 385 */ 386 if (sls->sls_catch != 0) { 387 KERNEL_LOCK(); 388 sls->sls_locked = 1; 389 } 390 391 SCHED_LOCK(sls->sls_s); 392 393 TRACEPOINT(sched, sleep, NULL); 394 395 p->p_wchan = ident; 396 p->p_wmesg = wmesg; 397 p->p_slptime = 0; 398 p->p_slppri = prio & PRIMASK; 399 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 400 } 401 402 void 403 sleep_finish(struct sleep_state *sls, int do_sleep) 404 { 405 struct proc *p = curproc; 406 407 if (sls->sls_do_sleep && do_sleep) { 408 p->p_stat = SSLEEP; 409 p->p_ru.ru_nvcsw++; 410 SCHED_ASSERT_LOCKED(); 411 mi_switch(); 412 } else if (!do_sleep) { 413 unsleep(p); 414 } 415 416 #ifdef DIAGNOSTIC 417 if (p->p_stat != SONPROC) 418 panic("sleep_finish !SONPROC"); 419 #endif 420 421 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 422 SCHED_UNLOCK(sls->sls_s); 423 424 /* 425 * Even though this belongs to the signal handling part of sleep, 426 * we need to clear it before the ktrace. 427 */ 428 atomic_clearbits_int(&p->p_flag, P_SINTR); 429 } 430 431 void 432 sleep_setup_timeout(struct sleep_state *sls, int timo) 433 { 434 struct proc *p = curproc; 435 436 if (timo) { 437 KASSERT((p->p_flag & P_TIMEOUT) == 0); 438 sls->sls_timeout = 1; 439 timeout_add(&p->p_sleep_to, timo); 440 } 441 } 442 443 int 444 sleep_finish_timeout(struct sleep_state *sls) 445 { 446 struct proc *p = curproc; 447 448 if (sls->sls_timeout) { 449 if (p->p_flag & P_TIMEOUT) { 450 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 451 return (EWOULDBLOCK); 452 } else { 453 /* This must not sleep. */ 454 timeout_del_barrier(&p->p_sleep_to); 455 KASSERT((p->p_flag & P_TIMEOUT) == 0); 456 } 457 } 458 459 return (0); 460 } 461 462 void 463 sleep_setup_signal(struct sleep_state *sls) 464 { 465 struct proc *p = curproc; 466 467 if (sls->sls_catch == 0) 468 return; 469 470 /* sleep_setup() has locked the kernel. */ 471 KERNEL_ASSERT_LOCKED(); 472 473 /* 474 * We put ourselves on the sleep queue and start our timeout 475 * before calling CURSIG, as we could stop there, and a wakeup 476 * or a SIGCONT (or both) could occur while we were stopped. 477 * A SIGCONT would cause us to be marked as SSLEEP 478 * without resuming us, thus we must be ready for sleep 479 * when CURSIG is called. If the wakeup happens while we're 480 * stopped, p->p_wchan will be 0 upon return from CURSIG. 481 */ 482 atomic_setbits_int(&p->p_flag, P_SINTR); 483 if (p->p_p->ps_single != NULL || (sls->sls_sig = CURSIG(p)) != 0) { 484 unsleep(p); 485 p->p_stat = SONPROC; 486 sls->sls_do_sleep = 0; 487 } else if (p->p_wchan == 0) { 488 sls->sls_catch = 0; 489 sls->sls_do_sleep = 0; 490 } 491 } 492 493 int 494 sleep_finish_signal(struct sleep_state *sls) 495 { 496 struct proc *p = curproc; 497 int error = 0; 498 499 if (sls->sls_catch != 0) { 500 KERNEL_ASSERT_LOCKED(); 501 502 error = single_thread_check(p, 1); 503 if (error == 0 && 504 (sls->sls_sig != 0 || (sls->sls_sig = CURSIG(p)) != 0)) { 505 if (p->p_p->ps_sigacts->ps_sigintr & 506 sigmask(sls->sls_sig)) 507 error = EINTR; 508 else 509 error = ERESTART; 510 } 511 } 512 513 if (sls->sls_locked) 514 KERNEL_UNLOCK(); 515 516 return (error); 517 } 518 519 int 520 wakeup_proc(struct proc *p, const volatile void *chan) 521 { 522 int s, awakened = 0; 523 524 SCHED_LOCK(s); 525 if (p->p_wchan != NULL && 526 ((chan == NULL) || (p->p_wchan == chan))) { 527 awakened = 1; 528 if (p->p_stat == SSLEEP) 529 setrunnable(p); 530 else 531 unsleep(p); 532 } 533 SCHED_UNLOCK(s); 534 535 return awakened; 536 } 537 538 /* 539 * Implement timeout for tsleep. 540 * If process hasn't been awakened (wchan non-zero), 541 * set timeout flag and undo the sleep. If proc 542 * is stopped, just unsleep so it will remain stopped. 543 */ 544 void 545 endtsleep(void *arg) 546 { 547 struct proc *p = arg; 548 int s; 549 550 SCHED_LOCK(s); 551 if (wakeup_proc(p, NULL)) 552 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 553 SCHED_UNLOCK(s); 554 } 555 556 /* 557 * Remove a process from its wait queue 558 */ 559 void 560 unsleep(struct proc *p) 561 { 562 SCHED_ASSERT_LOCKED(); 563 564 if (p->p_wchan != NULL) { 565 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 566 p->p_wchan = NULL; 567 TRACEPOINT(sched, wakeup, p->p_tid, p->p_p->ps_pid); 568 } 569 } 570 571 /* 572 * Make a number of processes sleeping on the specified identifier runnable. 573 */ 574 void 575 wakeup_n(const volatile void *ident, int n) 576 { 577 struct slpque *qp; 578 struct proc *p; 579 struct proc *pnext; 580 int s; 581 582 SCHED_LOCK(s); 583 qp = &slpque[LOOKUP(ident)]; 584 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 585 pnext = TAILQ_NEXT(p, p_runq); 586 #ifdef DIAGNOSTIC 587 /* 588 * If the rwlock passed to rwsleep() is contended, the 589 * CPU will end up calling wakeup() between sleep_setup() 590 * and sleep_finish(). 591 */ 592 if (p == curproc) { 593 KASSERT(p->p_stat == SONPROC); 594 continue; 595 } 596 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 597 panic("wakeup: p_stat is %d", (int)p->p_stat); 598 #endif 599 if (wakeup_proc(p, ident)) 600 --n; 601 } 602 SCHED_UNLOCK(s); 603 } 604 605 /* 606 * Make all processes sleeping on the specified identifier runnable. 607 */ 608 void 609 wakeup(const volatile void *chan) 610 { 611 wakeup_n(chan, -1); 612 } 613 614 int 615 sys_sched_yield(struct proc *p, void *v, register_t *retval) 616 { 617 struct proc *q; 618 uint8_t newprio; 619 int s; 620 621 SCHED_LOCK(s); 622 /* 623 * If one of the threads of a multi-threaded process called 624 * sched_yield(2), drop its priority to ensure its siblings 625 * can make some progress. 626 */ 627 newprio = p->p_usrpri; 628 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 629 newprio = max(newprio, q->p_runpri); 630 setrunqueue(p->p_cpu, p, newprio); 631 p->p_ru.ru_nvcsw++; 632 mi_switch(); 633 SCHED_UNLOCK(s); 634 635 return (0); 636 } 637 638 int 639 thrsleep_unlock(void *lock) 640 { 641 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 642 _atomic_lock_t *atomiclock = lock; 643 644 if (!lock) 645 return 0; 646 647 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 648 } 649 650 struct tslpentry { 651 TAILQ_ENTRY(tslpentry) tslp_link; 652 long tslp_ident; 653 }; 654 655 /* thrsleep queue shared between processes */ 656 static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue); 657 static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"); 658 659 int 660 thrsleep(struct proc *p, struct sys___thrsleep_args *v) 661 { 662 struct sys___thrsleep_args /* { 663 syscallarg(const volatile void *) ident; 664 syscallarg(clockid_t) clock_id; 665 syscallarg(const struct timespec *) tp; 666 syscallarg(void *) lock; 667 syscallarg(const int *) abort; 668 } */ *uap = v; 669 long ident = (long)SCARG(uap, ident); 670 struct tslpentry entry; 671 struct tslpqueue *queue; 672 struct rwlock *qlock; 673 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 674 void *lock = SCARG(uap, lock); 675 uint64_t nsecs = INFSLP; 676 int abort = 0, error; 677 clockid_t clock_id = SCARG(uap, clock_id); 678 679 if (ident == 0) 680 return (EINVAL); 681 if (tsp != NULL) { 682 struct timespec now; 683 684 if ((error = clock_gettime(p, clock_id, &now))) 685 return (error); 686 #ifdef KTRACE 687 if (KTRPOINT(p, KTR_STRUCT)) 688 ktrabstimespec(p, tsp); 689 #endif 690 691 if (timespeccmp(tsp, &now, <)) { 692 /* already passed: still do the unlock */ 693 if ((error = thrsleep_unlock(lock))) 694 return (error); 695 return (EWOULDBLOCK); 696 } 697 698 timespecsub(tsp, &now, tsp); 699 nsecs = TIMESPEC_TO_NSEC(tsp); 700 } 701 702 if (ident == -1) { 703 queue = &thrsleep_queue; 704 qlock = &thrsleep_lock; 705 } else { 706 queue = &p->p_p->ps_tslpqueue; 707 qlock = &p->p_p->ps_lock; 708 } 709 710 /* Interlock with wakeup. */ 711 entry.tslp_ident = ident; 712 rw_enter_write(qlock); 713 TAILQ_INSERT_TAIL(queue, &entry, tslp_link); 714 rw_exit_write(qlock); 715 716 error = thrsleep_unlock(lock); 717 718 if (error == 0 && SCARG(uap, abort) != NULL) 719 error = copyin(SCARG(uap, abort), &abort, sizeof(abort)); 720 721 rw_enter_write(qlock); 722 if (error != 0) 723 goto out; 724 if (abort != 0) { 725 error = EINTR; 726 goto out; 727 } 728 if (entry.tslp_ident != 0) { 729 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep", 730 nsecs); 731 } 732 733 out: 734 if (entry.tslp_ident != 0) 735 TAILQ_REMOVE(queue, &entry, tslp_link); 736 rw_exit_write(qlock); 737 738 if (error == ERESTART) 739 error = ECANCELED; 740 741 return (error); 742 743 } 744 745 int 746 sys___thrsleep(struct proc *p, void *v, register_t *retval) 747 { 748 struct sys___thrsleep_args /* { 749 syscallarg(const volatile void *) ident; 750 syscallarg(clockid_t) clock_id; 751 syscallarg(struct timespec *) tp; 752 syscallarg(void *) lock; 753 syscallarg(const int *) abort; 754 } */ *uap = v; 755 struct timespec ts; 756 int error; 757 758 if (SCARG(uap, tp) != NULL) { 759 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 760 *retval = error; 761 return 0; 762 } 763 if (!timespecisvalid(&ts)) { 764 *retval = EINVAL; 765 return 0; 766 } 767 SCARG(uap, tp) = &ts; 768 } 769 770 *retval = thrsleep(p, uap); 771 return 0; 772 } 773 774 int 775 sys___thrwakeup(struct proc *p, void *v, register_t *retval) 776 { 777 struct sys___thrwakeup_args /* { 778 syscallarg(const volatile void *) ident; 779 syscallarg(int) n; 780 } */ *uap = v; 781 struct tslpentry *entry, *tmp; 782 struct tslpqueue *queue; 783 struct rwlock *qlock; 784 long ident = (long)SCARG(uap, ident); 785 int n = SCARG(uap, n); 786 int found = 0; 787 788 if (ident == 0) 789 *retval = EINVAL; 790 else { 791 if (ident == -1) { 792 queue = &thrsleep_queue; 793 qlock = &thrsleep_lock; 794 /* 795 * Wake up all waiters with ident -1. This is needed 796 * because ident -1 can be shared by multiple userspace 797 * lock state machines concurrently. The implementation 798 * has no way to direct the wakeup to a particular 799 * state machine. 800 */ 801 n = 0; 802 } else { 803 queue = &p->p_p->ps_tslpqueue; 804 qlock = &p->p_p->ps_lock; 805 } 806 807 rw_enter_write(qlock); 808 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) { 809 if (entry->tslp_ident == ident) { 810 TAILQ_REMOVE(queue, entry, tslp_link); 811 entry->tslp_ident = 0; 812 wakeup_one(entry); 813 if (++found == n) 814 break; 815 } 816 } 817 rw_exit_write(qlock); 818 819 if (ident == -1) 820 *retval = 0; 821 else 822 *retval = found ? 0 : ESRCH; 823 } 824 825 return (0); 826 } 827 828 void 829 refcnt_init(struct refcnt *r) 830 { 831 r->refs = 1; 832 } 833 834 void 835 refcnt_take(struct refcnt *r) 836 { 837 #ifdef DIAGNOSTIC 838 u_int refcnt; 839 840 refcnt = atomic_inc_int_nv(&r->refs); 841 KASSERT(refcnt != 0); 842 #else 843 atomic_inc_int(&r->refs); 844 #endif 845 } 846 847 int 848 refcnt_rele(struct refcnt *r) 849 { 850 u_int refcnt; 851 852 refcnt = atomic_dec_int_nv(&r->refs); 853 KASSERT(refcnt != ~0); 854 855 return (refcnt == 0); 856 } 857 858 void 859 refcnt_rele_wake(struct refcnt *r) 860 { 861 if (refcnt_rele(r)) 862 wakeup_one(r); 863 } 864 865 void 866 refcnt_finalize(struct refcnt *r, const char *wmesg) 867 { 868 struct sleep_state sls; 869 u_int refcnt; 870 871 refcnt = atomic_dec_int_nv(&r->refs); 872 while (refcnt) { 873 sleep_setup(&sls, r, PWAIT, wmesg); 874 refcnt = r->refs; 875 sleep_finish(&sls, refcnt); 876 } 877 } 878 879 void 880 cond_init(struct cond *c) 881 { 882 c->c_wait = 1; 883 } 884 885 void 886 cond_signal(struct cond *c) 887 { 888 c->c_wait = 0; 889 890 wakeup_one(c); 891 } 892 893 void 894 cond_wait(struct cond *c, const char *wmesg) 895 { 896 struct sleep_state sls; 897 int wait; 898 899 wait = c->c_wait; 900 while (wait) { 901 sleep_setup(&sls, c, PWAIT, wmesg); 902 wait = c->c_wait; 903 sleep_finish(&sls, wait); 904 } 905 } 906