1 /* $OpenBSD: kern_synch.c,v 1.149 2019/06/18 15:53:11 visa Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sched.h> 47 #include <sys/timeout.h> 48 #include <sys/mount.h> 49 #include <sys/syscallargs.h> 50 #include <sys/pool.h> 51 #include <sys/refcnt.h> 52 #include <sys/atomic.h> 53 #include <sys/witness.h> 54 #include <ddb/db_output.h> 55 56 #include <machine/spinlock.h> 57 58 #ifdef KTRACE 59 #include <sys/ktrace.h> 60 #endif 61 62 int thrsleep(struct proc *, struct sys___thrsleep_args *); 63 int thrsleep_unlock(void *); 64 65 /* 66 * We're only looking at 7 bits of the address; everything is 67 * aligned to 4, lots of things are aligned to greater powers 68 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 69 */ 70 #define TABLESIZE 128 71 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 72 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 73 74 void 75 sleep_queue_init(void) 76 { 77 int i; 78 79 for (i = 0; i < TABLESIZE; i++) 80 TAILQ_INIT(&slpque[i]); 81 } 82 83 84 /* 85 * During autoconfiguration or after a panic, a sleep will simply 86 * lower the priority briefly to allow interrupts, then return. 87 * The priority to be used (safepri) is machine-dependent, thus this 88 * value is initialized and maintained in the machine-dependent layers. 89 * This priority will typically be 0, or the lowest priority 90 * that is safe for use on the interrupt stack; it can be made 91 * higher to block network software interrupts after panics. 92 */ 93 extern int safepri; 94 95 /* 96 * General sleep call. Suspends the current process until a wakeup is 97 * performed on the specified identifier. The process will then be made 98 * runnable with the specified priority. Sleeps at most timo/hz seconds 99 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 100 * before and after sleeping, else signals are not checked. Returns 0 if 101 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 102 * signal needs to be delivered, ERESTART is returned if the current system 103 * call should be restarted if possible, and EINTR is returned if the system 104 * call should be interrupted by the signal (return EINTR). 105 */ 106 int 107 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 108 { 109 struct sleep_state sls; 110 #ifdef MULTIPROCESSOR 111 int hold_count; 112 #endif 113 114 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 115 116 #ifdef MULTIPROCESSOR 117 KASSERT(timo || _kernel_lock_held()); 118 #endif 119 120 #ifdef DDB 121 if (cold == 2) 122 db_stack_dump(); 123 #endif 124 if (cold || panicstr) { 125 int s; 126 /* 127 * After a panic, or during autoconfiguration, 128 * just give interrupts a chance, then just return; 129 * don't run any other procs or panic below, 130 * in case this is the idle process and already asleep. 131 */ 132 s = splhigh(); 133 splx(safepri); 134 #ifdef MULTIPROCESSOR 135 if (_kernel_lock_held()) { 136 hold_count = __mp_release_all(&kernel_lock); 137 __mp_acquire_count(&kernel_lock, hold_count); 138 } 139 #endif 140 splx(s); 141 return (0); 142 } 143 144 sleep_setup(&sls, ident, priority, wmesg); 145 sleep_setup_timeout(&sls, timo); 146 sleep_setup_signal(&sls, priority); 147 148 return sleep_finish_all(&sls, 1); 149 } 150 151 int 152 sleep_finish_all(struct sleep_state *sls, int do_sleep) 153 { 154 int error, error1; 155 156 sleep_finish(sls, do_sleep); 157 error1 = sleep_finish_timeout(sls); 158 error = sleep_finish_signal(sls); 159 160 /* Signal errors are higher priority than timeouts. */ 161 if (error == 0 && error1 != 0) 162 error = error1; 163 164 return error; 165 } 166 167 /* 168 * Same as tsleep, but if we have a mutex provided, then once we've 169 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 170 */ 171 int 172 msleep(const volatile void *ident, struct mutex *mtx, int priority, 173 const char *wmesg, int timo) 174 { 175 struct sleep_state sls; 176 int error, spl; 177 #ifdef MULTIPROCESSOR 178 int hold_count; 179 #endif 180 181 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 182 KASSERT(mtx != NULL); 183 184 if (cold || panicstr) { 185 /* 186 * After a panic, or during autoconfiguration, 187 * just give interrupts a chance, then just return; 188 * don't run any other procs or panic below, 189 * in case this is the idle process and already asleep. 190 */ 191 spl = MUTEX_OLDIPL(mtx); 192 MUTEX_OLDIPL(mtx) = safepri; 193 mtx_leave(mtx); 194 #ifdef MULTIPROCESSOR 195 if (_kernel_lock_held()) { 196 hold_count = __mp_release_all(&kernel_lock); 197 __mp_acquire_count(&kernel_lock, hold_count); 198 } 199 #endif 200 if ((priority & PNORELOCK) == 0) { 201 mtx_enter(mtx); 202 MUTEX_OLDIPL(mtx) = spl; 203 } else 204 splx(spl); 205 return (0); 206 } 207 208 sleep_setup(&sls, ident, priority, wmesg); 209 sleep_setup_timeout(&sls, timo); 210 sleep_setup_signal(&sls, priority); 211 212 /* XXX - We need to make sure that the mutex doesn't 213 * unblock splsched. This can be made a bit more 214 * correct when the sched_lock is a mutex. 215 */ 216 spl = MUTEX_OLDIPL(mtx); 217 MUTEX_OLDIPL(mtx) = splsched(); 218 mtx_leave(mtx); 219 220 error = sleep_finish_all(&sls, 1); 221 222 if ((priority & PNORELOCK) == 0) { 223 mtx_enter(mtx); 224 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 225 } else 226 splx(spl); 227 228 return error; 229 } 230 231 /* 232 * Same as tsleep, but if we have a rwlock provided, then once we've 233 * entered the sleep queue we drop the it. After sleeping we re-lock. 234 */ 235 int 236 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 237 const char *wmesg, int timo) 238 { 239 struct sleep_state sls; 240 int error, status; 241 242 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 243 rw_assert_anylock(rwl); 244 status = rw_status(rwl); 245 246 sleep_setup(&sls, ident, priority, wmesg); 247 sleep_setup_timeout(&sls, timo); 248 sleep_setup_signal(&sls, priority); 249 250 rw_exit(rwl); 251 252 error = sleep_finish_all(&sls, 1); 253 254 if ((priority & PNORELOCK) == 0) 255 rw_enter(rwl, status); 256 257 return error; 258 } 259 260 void 261 sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 262 const char *wmesg) 263 { 264 struct proc *p = curproc; 265 266 #ifdef DIAGNOSTIC 267 if (p->p_flag & P_CANTSLEEP) 268 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 269 if (ident == NULL) 270 panic("tsleep: no ident"); 271 if (p->p_stat != SONPROC) 272 panic("tsleep: not SONPROC"); 273 #endif 274 275 sls->sls_catch = 0; 276 sls->sls_do_sleep = 1; 277 sls->sls_sig = 1; 278 279 SCHED_LOCK(sls->sls_s); 280 281 p->p_wchan = ident; 282 p->p_wmesg = wmesg; 283 p->p_slptime = 0; 284 p->p_priority = prio & PRIMASK; 285 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 286 } 287 288 void 289 sleep_finish(struct sleep_state *sls, int do_sleep) 290 { 291 struct proc *p = curproc; 292 293 if (sls->sls_do_sleep && do_sleep) { 294 p->p_stat = SSLEEP; 295 p->p_ru.ru_nvcsw++; 296 SCHED_ASSERT_LOCKED(); 297 mi_switch(); 298 } else if (!do_sleep) { 299 unsleep(p); 300 } 301 302 #ifdef DIAGNOSTIC 303 if (p->p_stat != SONPROC) 304 panic("sleep_finish !SONPROC"); 305 #endif 306 307 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 308 SCHED_UNLOCK(sls->sls_s); 309 310 /* 311 * Even though this belongs to the signal handling part of sleep, 312 * we need to clear it before the ktrace. 313 */ 314 atomic_clearbits_int(&p->p_flag, P_SINTR); 315 } 316 317 void 318 sleep_setup_timeout(struct sleep_state *sls, int timo) 319 { 320 if (timo) 321 timeout_add(&curproc->p_sleep_to, timo); 322 } 323 324 int 325 sleep_finish_timeout(struct sleep_state *sls) 326 { 327 struct proc *p = curproc; 328 329 if (p->p_flag & P_TIMEOUT) { 330 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 331 return (EWOULDBLOCK); 332 } else { 333 /* This must not sleep. */ 334 timeout_del_barrier(&p->p_sleep_to); 335 KASSERT((p->p_flag & P_TIMEOUT) == 0); 336 } 337 338 return (0); 339 } 340 341 void 342 sleep_setup_signal(struct sleep_state *sls, int prio) 343 { 344 struct proc *p = curproc; 345 346 if ((sls->sls_catch = (prio & PCATCH)) == 0) 347 return; 348 349 /* 350 * We put ourselves on the sleep queue and start our timeout 351 * before calling CURSIG, as we could stop there, and a wakeup 352 * or a SIGCONT (or both) could occur while we were stopped. 353 * A SIGCONT would cause us to be marked as SSLEEP 354 * without resuming us, thus we must be ready for sleep 355 * when CURSIG is called. If the wakeup happens while we're 356 * stopped, p->p_wchan will be 0 upon return from CURSIG. 357 */ 358 atomic_setbits_int(&p->p_flag, P_SINTR); 359 if (p->p_p->ps_single != NULL || (sls->sls_sig = CURSIG(p)) != 0) { 360 if (p->p_wchan) 361 unsleep(p); 362 p->p_stat = SONPROC; 363 sls->sls_do_sleep = 0; 364 } else if (p->p_wchan == 0) { 365 sls->sls_catch = 0; 366 sls->sls_do_sleep = 0; 367 } 368 } 369 370 int 371 sleep_finish_signal(struct sleep_state *sls) 372 { 373 struct proc *p = curproc; 374 int error; 375 376 if (sls->sls_catch != 0) { 377 if ((error = single_thread_check(p, 1))) 378 return (error); 379 if (sls->sls_sig != 0 || (sls->sls_sig = CURSIG(p)) != 0) { 380 if (p->p_p->ps_sigacts->ps_sigintr & 381 sigmask(sls->sls_sig)) 382 return (EINTR); 383 return (ERESTART); 384 } 385 } 386 387 return (0); 388 } 389 390 /* 391 * Implement timeout for tsleep. 392 * If process hasn't been awakened (wchan non-zero), 393 * set timeout flag and undo the sleep. If proc 394 * is stopped, just unsleep so it will remain stopped. 395 */ 396 void 397 endtsleep(void *arg) 398 { 399 struct proc *p = arg; 400 int s; 401 402 SCHED_LOCK(s); 403 if (p->p_wchan) { 404 if (p->p_stat == SSLEEP) 405 setrunnable(p); 406 else 407 unsleep(p); 408 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 409 } 410 SCHED_UNLOCK(s); 411 } 412 413 /* 414 * Remove a process from its wait queue 415 */ 416 void 417 unsleep(struct proc *p) 418 { 419 SCHED_ASSERT_LOCKED(); 420 421 if (p->p_wchan) { 422 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 423 p->p_wchan = NULL; 424 } 425 } 426 427 /* 428 * Make a number of processes sleeping on the specified identifier runnable. 429 */ 430 void 431 wakeup_n(const volatile void *ident, int n) 432 { 433 struct slpque *qp; 434 struct proc *p; 435 struct proc *pnext; 436 int s; 437 438 SCHED_LOCK(s); 439 qp = &slpque[LOOKUP(ident)]; 440 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 441 pnext = TAILQ_NEXT(p, p_runq); 442 #ifdef DIAGNOSTIC 443 /* 444 * If the rwlock passed to rwsleep() is contended, the 445 * CPU will end up calling wakeup() between sleep_setup() 446 * and sleep_finish(). 447 */ 448 if (p == curproc) { 449 KASSERT(p->p_stat == SONPROC); 450 continue; 451 } 452 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 453 panic("wakeup: p_stat is %d", (int)p->p_stat); 454 #endif 455 if (p->p_wchan == ident) { 456 --n; 457 p->p_wchan = 0; 458 TAILQ_REMOVE(qp, p, p_runq); 459 if (p->p_stat == SSLEEP) 460 setrunnable(p); 461 } 462 } 463 SCHED_UNLOCK(s); 464 } 465 466 /* 467 * Make all processes sleeping on the specified identifier runnable. 468 */ 469 void 470 wakeup(const volatile void *chan) 471 { 472 wakeup_n(chan, -1); 473 } 474 475 int 476 sys_sched_yield(struct proc *p, void *v, register_t *retval) 477 { 478 struct proc *q; 479 int s; 480 481 SCHED_LOCK(s); 482 /* 483 * If one of the threads of a multi-threaded process called 484 * sched_yield(2), drop its priority to ensure its siblings 485 * can make some progress. 486 */ 487 p->p_priority = p->p_usrpri; 488 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 489 p->p_priority = max(p->p_priority, q->p_priority); 490 p->p_stat = SRUN; 491 setrunqueue(p); 492 p->p_ru.ru_nvcsw++; 493 mi_switch(); 494 SCHED_UNLOCK(s); 495 496 return (0); 497 } 498 499 int 500 thrsleep_unlock(void *lock) 501 { 502 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 503 _atomic_lock_t *atomiclock = lock; 504 505 if (!lock) 506 return 0; 507 508 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 509 } 510 511 static int globalsleepaddr; 512 513 int 514 thrsleep(struct proc *p, struct sys___thrsleep_args *v) 515 { 516 struct sys___thrsleep_args /* { 517 syscallarg(const volatile void *) ident; 518 syscallarg(clockid_t) clock_id; 519 syscallarg(const struct timespec *) tp; 520 syscallarg(void *) lock; 521 syscallarg(const int *) abort; 522 } */ *uap = v; 523 long ident = (long)SCARG(uap, ident); 524 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 525 void *lock = SCARG(uap, lock); 526 uint64_t to_ticks = 0; 527 int abort, error; 528 clockid_t clock_id = SCARG(uap, clock_id); 529 530 if (ident == 0) 531 return (EINVAL); 532 if (tsp != NULL) { 533 struct timespec now; 534 535 if ((error = clock_gettime(p, clock_id, &now))) 536 return (error); 537 #ifdef KTRACE 538 if (KTRPOINT(p, KTR_STRUCT)) 539 ktrabstimespec(p, tsp); 540 #endif 541 542 if (timespeccmp(tsp, &now, <)) { 543 /* already passed: still do the unlock */ 544 if ((error = thrsleep_unlock(lock))) 545 return (error); 546 return (EWOULDBLOCK); 547 } 548 549 timespecsub(tsp, &now, tsp); 550 to_ticks = (uint64_t)hz * tsp->tv_sec + 551 (tsp->tv_nsec + tick * 1000 - 1) / (tick * 1000) + 1; 552 if (to_ticks > INT_MAX) 553 to_ticks = INT_MAX; 554 } 555 556 p->p_thrslpid = ident; 557 558 if ((error = thrsleep_unlock(lock))) 559 goto out; 560 561 if (SCARG(uap, abort) != NULL) { 562 if ((error = copyin(SCARG(uap, abort), &abort, 563 sizeof(abort))) != 0) 564 goto out; 565 if (abort) { 566 error = EINTR; 567 goto out; 568 } 569 } 570 571 if (p->p_thrslpid == 0) 572 error = 0; 573 else { 574 void *sleepaddr = &p->p_thrslpid; 575 if (ident == -1) 576 sleepaddr = &globalsleepaddr; 577 error = tsleep(sleepaddr, PUSER | PCATCH, "thrsleep", 578 (int)to_ticks); 579 } 580 581 out: 582 p->p_thrslpid = 0; 583 584 if (error == ERESTART) 585 error = ECANCELED; 586 587 return (error); 588 589 } 590 591 int 592 sys___thrsleep(struct proc *p, void *v, register_t *retval) 593 { 594 struct sys___thrsleep_args /* { 595 syscallarg(const volatile void *) ident; 596 syscallarg(clockid_t) clock_id; 597 syscallarg(struct timespec *) tp; 598 syscallarg(void *) lock; 599 syscallarg(const int *) abort; 600 } */ *uap = v; 601 struct timespec ts; 602 int error; 603 604 if (SCARG(uap, tp) != NULL) { 605 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 606 *retval = error; 607 return 0; 608 } 609 if (!timespecisvalid(&ts)) { 610 *retval = EINVAL; 611 return 0; 612 } 613 SCARG(uap, tp) = &ts; 614 } 615 616 *retval = thrsleep(p, uap); 617 return 0; 618 } 619 620 int 621 sys___thrwakeup(struct proc *p, void *v, register_t *retval) 622 { 623 struct sys___thrwakeup_args /* { 624 syscallarg(const volatile void *) ident; 625 syscallarg(int) n; 626 } */ *uap = v; 627 long ident = (long)SCARG(uap, ident); 628 int n = SCARG(uap, n); 629 struct proc *q; 630 int found = 0; 631 632 if (ident == 0) 633 *retval = EINVAL; 634 else if (ident == -1) 635 wakeup(&globalsleepaddr); 636 else { 637 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { 638 if (q->p_thrslpid == ident) { 639 wakeup_one(&q->p_thrslpid); 640 q->p_thrslpid = 0; 641 if (++found == n) 642 break; 643 } 644 } 645 *retval = found ? 0 : ESRCH; 646 } 647 648 return (0); 649 } 650 651 void 652 refcnt_init(struct refcnt *r) 653 { 654 r->refs = 1; 655 } 656 657 void 658 refcnt_take(struct refcnt *r) 659 { 660 #ifdef DIAGNOSTIC 661 u_int refcnt; 662 663 refcnt = atomic_inc_int_nv(&r->refs); 664 KASSERT(refcnt != 0); 665 #else 666 atomic_inc_int(&r->refs); 667 #endif 668 } 669 670 int 671 refcnt_rele(struct refcnt *r) 672 { 673 u_int refcnt; 674 675 refcnt = atomic_dec_int_nv(&r->refs); 676 KASSERT(refcnt != ~0); 677 678 return (refcnt == 0); 679 } 680 681 void 682 refcnt_rele_wake(struct refcnt *r) 683 { 684 if (refcnt_rele(r)) 685 wakeup_one(r); 686 } 687 688 void 689 refcnt_finalize(struct refcnt *r, const char *wmesg) 690 { 691 struct sleep_state sls; 692 u_int refcnt; 693 694 refcnt = atomic_dec_int_nv(&r->refs); 695 while (refcnt) { 696 sleep_setup(&sls, r, PWAIT, wmesg); 697 refcnt = r->refs; 698 sleep_finish(&sls, refcnt); 699 } 700 } 701 702 void 703 cond_init(struct cond *c) 704 { 705 c->c_wait = 1; 706 } 707 708 void 709 cond_signal(struct cond *c) 710 { 711 c->c_wait = 0; 712 713 wakeup_one(c); 714 } 715 716 void 717 cond_wait(struct cond *c, const char *wmesg) 718 { 719 struct sleep_state sls; 720 int wait; 721 722 wait = c->c_wait; 723 while (wait) { 724 sleep_setup(&sls, c, PWAIT, wmesg); 725 wait = c->c_wait; 726 sleep_finish(&sls, wait); 727 } 728 } 729