1 /* $NetBSD: kern_sig.c,v 1.284 2008/05/19 17:06:02 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1991, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.284 2008/05/19 17:06:02 ad Exp $"); 70 71 #include "opt_ptrace.h" 72 #include "opt_compat_sunos.h" 73 #include "opt_compat_netbsd.h" 74 #include "opt_compat_netbsd32.h" 75 #include "opt_pax.h" 76 77 #define SIGPROP /* include signal properties table */ 78 #include <sys/param.h> 79 #include <sys/signalvar.h> 80 #include <sys/proc.h> 81 #include <sys/systm.h> 82 #include <sys/wait.h> 83 #include <sys/ktrace.h> 84 #include <sys/syslog.h> 85 #include <sys/filedesc.h> 86 #include <sys/file.h> 87 #include <sys/malloc.h> 88 #include <sys/pool.h> 89 #include <sys/ucontext.h> 90 #include <sys/exec.h> 91 #include <sys/kauth.h> 92 #include <sys/acct.h> 93 #include <sys/callout.h> 94 #include <sys/atomic.h> 95 #include <sys/cpu.h> 96 97 #ifdef PAX_SEGVGUARD 98 #include <sys/pax.h> 99 #endif /* PAX_SEGVGUARD */ 100 101 #include <uvm/uvm.h> 102 #include <uvm/uvm_extern.h> 103 104 static void ksiginfo_exechook(struct proc *, void *); 105 static void proc_stop_callout(void *); 106 107 int sigunwait(struct proc *, const ksiginfo_t *); 108 void sigput(sigpend_t *, struct proc *, ksiginfo_t *); 109 int sigpost(struct lwp *, sig_t, int, int); 110 int sigchecktrace(sigpend_t **); 111 void sigswitch(bool, int, int); 112 void sigrealloc(ksiginfo_t *); 113 114 sigset_t contsigmask, stopsigmask, sigcantmask; 115 static pool_cache_t sigacts_cache; /* memory pool for sigacts structures */ 116 static void sigacts_poolpage_free(struct pool *, void *); 117 static void *sigacts_poolpage_alloc(struct pool *, int); 118 static callout_t proc_stop_ch; 119 120 static struct pool_allocator sigactspool_allocator = { 121 .pa_alloc = sigacts_poolpage_alloc, 122 .pa_free = sigacts_poolpage_free, 123 }; 124 125 #ifdef DEBUG 126 int kern_logsigexit = 1; 127 #else 128 int kern_logsigexit = 0; 129 #endif 130 131 static const char logcoredump[] = 132 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n"; 133 static const char lognocoredump[] = 134 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n"; 135 136 POOL_INIT(siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo", 137 &pool_allocator_nointr, IPL_NONE); 138 POOL_INIT(ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo", 139 NULL, IPL_VM); 140 141 /* 142 * signal_init: 143 * 144 * Initialize global signal-related data structures. 145 */ 146 void 147 signal_init(void) 148 { 149 150 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2; 151 152 sigacts_cache = pool_cache_init(sizeof(struct sigacts), 0, 0, 0, 153 "sigacts", sizeof(struct sigacts) > PAGE_SIZE ? 154 &sigactspool_allocator : NULL, IPL_NONE, NULL, NULL, NULL); 155 156 exechook_establish(ksiginfo_exechook, NULL); 157 158 callout_init(&proc_stop_ch, CALLOUT_MPSAFE); 159 callout_setfunc(&proc_stop_ch, proc_stop_callout, NULL); 160 } 161 162 /* 163 * sigacts_poolpage_alloc: 164 * 165 * Allocate a page for the sigacts memory pool. 166 */ 167 static void * 168 sigacts_poolpage_alloc(struct pool *pp, int flags) 169 { 170 171 return (void *)uvm_km_alloc(kernel_map, 172 (PAGE_SIZE)*2, (PAGE_SIZE)*2, 173 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 174 | UVM_KMF_WIRED); 175 } 176 177 /* 178 * sigacts_poolpage_free: 179 * 180 * Free a page on behalf of the sigacts memory pool. 181 */ 182 static void 183 sigacts_poolpage_free(struct pool *pp, void *v) 184 { 185 186 uvm_km_free(kernel_map, (vaddr_t)v, (PAGE_SIZE)*2, UVM_KMF_WIRED); 187 } 188 189 /* 190 * sigactsinit: 191 * 192 * Create an initial sigctx structure, using the same signal state as 193 * p. If 'share' is set, share the sigctx_proc part, otherwise just 194 * copy it from parent. 195 */ 196 struct sigacts * 197 sigactsinit(struct proc *pp, int share) 198 { 199 struct sigacts *ps, *ps2; 200 201 ps = pp->p_sigacts; 202 203 if (share) { 204 atomic_inc_uint(&ps->sa_refcnt); 205 ps2 = ps; 206 } else { 207 ps2 = pool_cache_get(sigacts_cache, PR_WAITOK); 208 /* XXXAD get rid of this */ 209 mutex_init(&ps2->sa_mutex, MUTEX_DEFAULT, IPL_SCHED); 210 mutex_enter(&ps->sa_mutex); 211 memcpy(&ps2->sa_sigdesc, ps->sa_sigdesc, 212 sizeof(ps2->sa_sigdesc)); 213 mutex_exit(&ps->sa_mutex); 214 ps2->sa_refcnt = 1; 215 } 216 217 return ps2; 218 } 219 220 /* 221 * sigactsunshare: 222 * 223 * Make this process not share its sigctx, maintaining all 224 * signal state. 225 */ 226 void 227 sigactsunshare(struct proc *p) 228 { 229 struct sigacts *ps, *oldps; 230 231 oldps = p->p_sigacts; 232 if (oldps->sa_refcnt == 1) 233 return; 234 ps = pool_cache_get(sigacts_cache, PR_WAITOK); 235 /* XXXAD get rid of this */ 236 mutex_init(&ps->sa_mutex, MUTEX_DEFAULT, IPL_SCHED); 237 memset(&ps->sa_sigdesc, 0, sizeof(ps->sa_sigdesc)); 238 p->p_sigacts = ps; 239 sigactsfree(oldps); 240 } 241 242 /* 243 * sigactsfree; 244 * 245 * Release a sigctx structure. 246 */ 247 void 248 sigactsfree(struct sigacts *ps) 249 { 250 251 if (atomic_dec_uint_nv(&ps->sa_refcnt) == 0) { 252 mutex_destroy(&ps->sa_mutex); 253 pool_cache_put(sigacts_cache, ps); 254 } 255 } 256 257 /* 258 * siginit: 259 * 260 * Initialize signal state for process 0; set to ignore signals that 261 * are ignored by default and disable the signal stack. Locking not 262 * required as the system is still cold. 263 */ 264 void 265 siginit(struct proc *p) 266 { 267 struct lwp *l; 268 struct sigacts *ps; 269 int signo, prop; 270 271 ps = p->p_sigacts; 272 sigemptyset(&contsigmask); 273 sigemptyset(&stopsigmask); 274 sigemptyset(&sigcantmask); 275 for (signo = 1; signo < NSIG; signo++) { 276 prop = sigprop[signo]; 277 if (prop & SA_CONT) 278 sigaddset(&contsigmask, signo); 279 if (prop & SA_STOP) 280 sigaddset(&stopsigmask, signo); 281 if (prop & SA_CANTMASK) 282 sigaddset(&sigcantmask, signo); 283 if (prop & SA_IGNORE && signo != SIGCONT) 284 sigaddset(&p->p_sigctx.ps_sigignore, signo); 285 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask); 286 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART; 287 } 288 sigemptyset(&p->p_sigctx.ps_sigcatch); 289 p->p_sflag &= ~PS_NOCLDSTOP; 290 291 ksiginfo_queue_init(&p->p_sigpend.sp_info); 292 sigemptyset(&p->p_sigpend.sp_set); 293 294 /* 295 * Reset per LWP state. 296 */ 297 l = LIST_FIRST(&p->p_lwps); 298 l->l_sigwaited = NULL; 299 l->l_sigstk.ss_flags = SS_DISABLE; 300 l->l_sigstk.ss_size = 0; 301 l->l_sigstk.ss_sp = 0; 302 ksiginfo_queue_init(&l->l_sigpend.sp_info); 303 sigemptyset(&l->l_sigpend.sp_set); 304 305 /* One reference. */ 306 ps->sa_refcnt = 1; 307 } 308 309 /* 310 * execsigs: 311 * 312 * Reset signals for an exec of the specified process. 313 */ 314 void 315 execsigs(struct proc *p) 316 { 317 struct sigacts *ps; 318 struct lwp *l; 319 int signo, prop; 320 sigset_t tset; 321 ksiginfoq_t kq; 322 323 KASSERT(p->p_nlwps == 1); 324 325 sigactsunshare(p); 326 ps = p->p_sigacts; 327 328 /* 329 * Reset caught signals. Held signals remain held through 330 * l->l_sigmask (unless they were caught, and are now ignored 331 * by default). 332 * 333 * No need to lock yet, the process has only one LWP and 334 * at this point the sigacts are private to the process. 335 */ 336 sigemptyset(&tset); 337 for (signo = 1; signo < NSIG; signo++) { 338 if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) { 339 prop = sigprop[signo]; 340 if (prop & SA_IGNORE) { 341 if ((prop & SA_CONT) == 0) 342 sigaddset(&p->p_sigctx.ps_sigignore, 343 signo); 344 sigaddset(&tset, signo); 345 } 346 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL; 347 } 348 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask); 349 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART; 350 } 351 ksiginfo_queue_init(&kq); 352 353 mutex_enter(p->p_lock); 354 sigclearall(p, &tset, &kq); 355 sigemptyset(&p->p_sigctx.ps_sigcatch); 356 357 /* 358 * Reset no zombies if child dies flag as Solaris does. 359 */ 360 p->p_flag &= ~(PK_NOCLDWAIT | PK_CLDSIGIGN); 361 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN) 362 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL; 363 364 /* 365 * Reset per-LWP state. 366 */ 367 l = LIST_FIRST(&p->p_lwps); 368 l->l_sigwaited = NULL; 369 l->l_sigstk.ss_flags = SS_DISABLE; 370 l->l_sigstk.ss_size = 0; 371 l->l_sigstk.ss_sp = 0; 372 ksiginfo_queue_init(&l->l_sigpend.sp_info); 373 sigemptyset(&l->l_sigpend.sp_set); 374 mutex_exit(p->p_lock); 375 376 ksiginfo_queue_drain(&kq); 377 } 378 379 /* 380 * ksiginfo_exechook: 381 * 382 * Free all pending ksiginfo entries from a process on exec. 383 * Additionally, drain any unused ksiginfo structures in the 384 * system back to the pool. 385 * 386 * XXX This should not be a hook, every process has signals. 387 */ 388 static void 389 ksiginfo_exechook(struct proc *p, void *v) 390 { 391 ksiginfoq_t kq; 392 393 ksiginfo_queue_init(&kq); 394 395 mutex_enter(p->p_lock); 396 sigclearall(p, NULL, &kq); 397 mutex_exit(p->p_lock); 398 399 ksiginfo_queue_drain(&kq); 400 } 401 402 /* 403 * ksiginfo_alloc: 404 * 405 * Allocate a new ksiginfo structure from the pool, and optionally copy 406 * an existing one. If the existing ksiginfo_t is from the pool, and 407 * has not been queued somewhere, then just return it. Additionally, 408 * if the existing ksiginfo_t does not contain any information beyond 409 * the signal number, then just return it. 410 */ 411 ksiginfo_t * 412 ksiginfo_alloc(struct proc *p, ksiginfo_t *ok, int flags) 413 { 414 ksiginfo_t *kp; 415 416 if (ok != NULL) { 417 if ((ok->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) == 418 KSI_FROMPOOL) 419 return ok; 420 if (KSI_EMPTY_P(ok)) 421 return ok; 422 } 423 424 kp = pool_get(&ksiginfo_pool, flags); 425 if (kp == NULL) { 426 #ifdef DIAGNOSTIC 427 printf("Out of memory allocating ksiginfo for pid %d\n", 428 p->p_pid); 429 #endif 430 return NULL; 431 } 432 433 if (ok != NULL) { 434 memcpy(kp, ok, sizeof(*kp)); 435 kp->ksi_flags &= ~KSI_QUEUED; 436 } else 437 KSI_INIT_EMPTY(kp); 438 439 kp->ksi_flags |= KSI_FROMPOOL; 440 441 return kp; 442 } 443 444 /* 445 * ksiginfo_free: 446 * 447 * If the given ksiginfo_t is from the pool and has not been queued, 448 * then free it. 449 */ 450 void 451 ksiginfo_free(ksiginfo_t *kp) 452 { 453 454 if ((kp->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) != KSI_FROMPOOL) 455 return; 456 pool_put(&ksiginfo_pool, kp); 457 } 458 459 /* 460 * ksiginfo_queue_drain: 461 * 462 * Drain a non-empty ksiginfo_t queue. 463 */ 464 void 465 ksiginfo_queue_drain0(ksiginfoq_t *kq) 466 { 467 ksiginfo_t *ksi; 468 469 KASSERT(!CIRCLEQ_EMPTY(kq)); 470 471 while (!CIRCLEQ_EMPTY(kq)) { 472 ksi = CIRCLEQ_FIRST(kq); 473 CIRCLEQ_REMOVE(kq, ksi, ksi_list); 474 pool_put(&ksiginfo_pool, ksi); 475 } 476 } 477 478 /* 479 * sigget: 480 * 481 * Fetch the first pending signal from a set. Optionally, also fetch 482 * or manufacture a ksiginfo element. Returns the number of the first 483 * pending signal, or zero. 484 */ 485 int 486 sigget(sigpend_t *sp, ksiginfo_t *out, int signo, const sigset_t *mask) 487 { 488 ksiginfo_t *ksi; 489 sigset_t tset; 490 491 /* If there's no pending set, the signal is from the debugger. */ 492 if (sp == NULL) { 493 if (out != NULL) { 494 KSI_INIT(out); 495 out->ksi_info._signo = signo; 496 out->ksi_info._code = SI_USER; 497 } 498 return signo; 499 } 500 501 /* Construct mask from signo, and 'mask'. */ 502 if (signo == 0) { 503 if (mask != NULL) { 504 tset = *mask; 505 __sigandset(&sp->sp_set, &tset); 506 } else 507 tset = sp->sp_set; 508 509 /* If there are no signals pending, that's it. */ 510 if ((signo = firstsig(&tset)) == 0) 511 return 0; 512 } else { 513 KASSERT(sigismember(&sp->sp_set, signo)); 514 } 515 516 sigdelset(&sp->sp_set, signo); 517 518 /* Find siginfo and copy it out. */ 519 CIRCLEQ_FOREACH(ksi, &sp->sp_info, ksi_list) { 520 if (ksi->ksi_signo == signo) { 521 CIRCLEQ_REMOVE(&sp->sp_info, ksi, ksi_list); 522 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 523 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0); 524 ksi->ksi_flags &= ~KSI_QUEUED; 525 if (out != NULL) { 526 memcpy(out, ksi, sizeof(*out)); 527 out->ksi_flags &= ~(KSI_FROMPOOL | KSI_QUEUED); 528 } 529 ksiginfo_free(ksi); 530 return signo; 531 } 532 } 533 534 /* If there's no siginfo, then manufacture it. */ 535 if (out != NULL) { 536 KSI_INIT(out); 537 out->ksi_info._signo = signo; 538 out->ksi_info._code = SI_USER; 539 } 540 541 return signo; 542 } 543 544 /* 545 * sigput: 546 * 547 * Append a new ksiginfo element to the list of pending ksiginfo's, if 548 * we need to (e.g. SA_SIGINFO was requested). 549 */ 550 void 551 sigput(sigpend_t *sp, struct proc *p, ksiginfo_t *ksi) 552 { 553 ksiginfo_t *kp; 554 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo); 555 556 KASSERT(mutex_owned(p->p_lock)); 557 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0); 558 559 sigaddset(&sp->sp_set, ksi->ksi_signo); 560 561 /* 562 * If siginfo is not required, or there is none, then just mark the 563 * signal as pending. 564 */ 565 if ((sa->sa_flags & SA_SIGINFO) == 0 || KSI_EMPTY_P(ksi)) 566 return; 567 568 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 569 570 #ifdef notyet /* XXX: QUEUING */ 571 if (ksi->ksi_signo < SIGRTMIN) 572 #endif 573 { 574 CIRCLEQ_FOREACH(kp, &sp->sp_info, ksi_list) { 575 if (kp->ksi_signo == ksi->ksi_signo) { 576 KSI_COPY(ksi, kp); 577 kp->ksi_flags |= KSI_QUEUED; 578 return; 579 } 580 } 581 } 582 583 ksi->ksi_flags |= KSI_QUEUED; 584 CIRCLEQ_INSERT_TAIL(&sp->sp_info, ksi, ksi_list); 585 } 586 587 /* 588 * sigclear: 589 * 590 * Clear all pending signals in the specified set. 591 */ 592 void 593 sigclear(sigpend_t *sp, const sigset_t *mask, ksiginfoq_t *kq) 594 { 595 ksiginfo_t *ksi, *next; 596 597 if (mask == NULL) 598 sigemptyset(&sp->sp_set); 599 else 600 sigminusset(mask, &sp->sp_set); 601 602 ksi = CIRCLEQ_FIRST(&sp->sp_info); 603 for (; ksi != (void *)&sp->sp_info; ksi = next) { 604 next = CIRCLEQ_NEXT(ksi, ksi_list); 605 if (mask == NULL || sigismember(mask, ksi->ksi_signo)) { 606 CIRCLEQ_REMOVE(&sp->sp_info, ksi, ksi_list); 607 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 608 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0); 609 CIRCLEQ_INSERT_TAIL(kq, ksi, ksi_list); 610 } 611 } 612 } 613 614 /* 615 * sigclearall: 616 * 617 * Clear all pending signals in the specified set from a process and 618 * its LWPs. 619 */ 620 void 621 sigclearall(struct proc *p, const sigset_t *mask, ksiginfoq_t *kq) 622 { 623 struct lwp *l; 624 625 KASSERT(mutex_owned(p->p_lock)); 626 627 sigclear(&p->p_sigpend, mask, kq); 628 629 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 630 sigclear(&l->l_sigpend, mask, kq); 631 } 632 } 633 634 /* 635 * sigispending: 636 * 637 * Return true if there are pending signals for the current LWP. May 638 * be called unlocked provided that LW_PENDSIG is set, and that the 639 * signal has been posted to the appopriate queue before LW_PENDSIG is 640 * set. 641 */ 642 int 643 sigispending(struct lwp *l, int signo) 644 { 645 struct proc *p = l->l_proc; 646 sigset_t tset; 647 648 membar_consumer(); 649 650 tset = l->l_sigpend.sp_set; 651 sigplusset(&p->p_sigpend.sp_set, &tset); 652 sigminusset(&p->p_sigctx.ps_sigignore, &tset); 653 sigminusset(&l->l_sigmask, &tset); 654 655 if (signo == 0) { 656 if (firstsig(&tset) != 0) 657 return EINTR; 658 } else if (sigismember(&tset, signo)) 659 return EINTR; 660 661 return 0; 662 } 663 664 /* 665 * siginfo_alloc: 666 * 667 * Allocate a new siginfo_t structure from the pool. 668 */ 669 siginfo_t * 670 siginfo_alloc(int flags) 671 { 672 673 return pool_get(&siginfo_pool, flags); 674 } 675 676 /* 677 * siginfo_free: 678 * 679 * Return a siginfo_t structure to the pool. 680 */ 681 void 682 siginfo_free(void *arg) 683 { 684 685 pool_put(&siginfo_pool, arg); 686 } 687 688 void 689 getucontext(struct lwp *l, ucontext_t *ucp) 690 { 691 struct proc *p = l->l_proc; 692 693 KASSERT(mutex_owned(p->p_lock)); 694 695 ucp->uc_flags = 0; 696 ucp->uc_link = l->l_ctxlink; 697 698 ucp->uc_sigmask = l->l_sigmask; 699 ucp->uc_flags |= _UC_SIGMASK; 700 701 /* 702 * The (unsupplied) definition of the `current execution stack' 703 * in the System V Interface Definition appears to allow returning 704 * the main context stack. 705 */ 706 if ((l->l_sigstk.ss_flags & SS_ONSTACK) == 0) { 707 ucp->uc_stack.ss_sp = (void *)l->l_proc->p_stackbase; 708 ucp->uc_stack.ss_size = ctob(l->l_proc->p_vmspace->vm_ssize); 709 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */ 710 } else { 711 /* Simply copy alternate signal execution stack. */ 712 ucp->uc_stack = l->l_sigstk; 713 } 714 ucp->uc_flags |= _UC_STACK; 715 mutex_exit(p->p_lock); 716 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags); 717 mutex_enter(p->p_lock); 718 } 719 720 int 721 setucontext(struct lwp *l, const ucontext_t *ucp) 722 { 723 struct proc *p = l->l_proc; 724 int error; 725 726 KASSERT(mutex_owned(p->p_lock)); 727 728 if ((ucp->uc_flags & _UC_SIGMASK) != 0) { 729 error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL); 730 if (error != 0) 731 return error; 732 } 733 734 mutex_exit(p->p_lock); 735 error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags); 736 mutex_enter(p->p_lock); 737 if (error != 0) 738 return (error); 739 740 l->l_ctxlink = ucp->uc_link; 741 742 /* 743 * If there was stack information, update whether or not we are 744 * still running on an alternate signal stack. 745 */ 746 if ((ucp->uc_flags & _UC_STACK) != 0) { 747 if (ucp->uc_stack.ss_flags & SS_ONSTACK) 748 l->l_sigstk.ss_flags |= SS_ONSTACK; 749 else 750 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 751 } 752 753 return 0; 754 } 755 756 /* 757 * Common code for kill process group/broadcast kill. cp is calling 758 * process. 759 */ 760 int 761 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all) 762 { 763 struct proc *p, *cp; 764 kauth_cred_t pc; 765 struct pgrp *pgrp; 766 int nfound; 767 int signo = ksi->ksi_signo; 768 769 cp = l->l_proc; 770 pc = l->l_cred; 771 nfound = 0; 772 773 mutex_enter(proc_lock); 774 if (all) { 775 /* 776 * broadcast 777 */ 778 PROCLIST_FOREACH(p, &allproc) { 779 if (p->p_pid <= 1 || p == cp || 780 p->p_flag & (PK_SYSTEM|PK_MARKER)) 781 continue; 782 mutex_enter(p->p_lock); 783 if (kauth_authorize_process(pc, 784 KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(signo), NULL, 785 NULL) == 0) { 786 nfound++; 787 if (signo) 788 kpsignal2(p, ksi); 789 } 790 mutex_exit(p->p_lock); 791 } 792 } else { 793 if (pgid == 0) 794 /* 795 * zero pgid means send to my process group. 796 */ 797 pgrp = cp->p_pgrp; 798 else { 799 pgrp = pg_find(pgid, PFIND_LOCKED); 800 if (pgrp == NULL) 801 goto out; 802 } 803 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 804 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM) 805 continue; 806 mutex_enter(p->p_lock); 807 if (kauth_authorize_process(pc, KAUTH_PROCESS_SIGNAL, 808 p, KAUTH_ARG(signo), NULL, NULL) == 0) { 809 nfound++; 810 if (signo && P_ZOMBIE(p) == 0) 811 kpsignal2(p, ksi); 812 } 813 mutex_exit(p->p_lock); 814 } 815 } 816 out: 817 mutex_exit(proc_lock); 818 return (nfound ? 0 : ESRCH); 819 } 820 821 /* 822 * Send a signal to a process group. If checktty is 1, limit to members 823 * which have a controlling terminal. 824 */ 825 void 826 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 827 { 828 ksiginfo_t ksi; 829 830 KASSERT(!cpu_intr_p()); 831 KASSERT(mutex_owned(proc_lock)); 832 833 KSI_INIT_EMPTY(&ksi); 834 ksi.ksi_signo = sig; 835 kpgsignal(pgrp, &ksi, NULL, checkctty); 836 } 837 838 void 839 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty) 840 { 841 struct proc *p; 842 843 KASSERT(!cpu_intr_p()); 844 KASSERT(mutex_owned(proc_lock)); 845 846 if (pgrp) 847 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 848 if (checkctty == 0 || p->p_lflag & PL_CONTROLT) 849 kpsignal(p, ksi, data); 850 } 851 852 /* 853 * Send a signal caused by a trap to the current LWP. If it will be caught 854 * immediately, deliver it with correct code. Otherwise, post it normally. 855 */ 856 void 857 trapsignal(struct lwp *l, ksiginfo_t *ksi) 858 { 859 struct proc *p; 860 struct sigacts *ps; 861 int signo = ksi->ksi_signo; 862 863 KASSERT(KSI_TRAP_P(ksi)); 864 865 ksi->ksi_lid = l->l_lid; 866 p = l->l_proc; 867 868 KASSERT(!cpu_intr_p()); 869 mutex_enter(proc_lock); 870 mutex_enter(p->p_lock); 871 ps = p->p_sigacts; 872 if ((p->p_slflag & PSL_TRACED) == 0 && 873 sigismember(&p->p_sigctx.ps_sigcatch, signo) && 874 !sigismember(&l->l_sigmask, signo)) { 875 mutex_exit(proc_lock); 876 l->l_ru.ru_nsignals++; 877 kpsendsig(l, ksi, &l->l_sigmask); 878 mutex_exit(p->p_lock); 879 ktrpsig(signo, SIGACTION_PS(ps, signo).sa_handler, 880 &l->l_sigmask, ksi); 881 } else { 882 /* XXX for core dump/debugger */ 883 p->p_sigctx.ps_lwp = l->l_lid; 884 p->p_sigctx.ps_signo = ksi->ksi_signo; 885 p->p_sigctx.ps_code = ksi->ksi_trap; 886 kpsignal2(p, ksi); 887 mutex_exit(p->p_lock); 888 mutex_exit(proc_lock); 889 } 890 } 891 892 /* 893 * Fill in signal information and signal the parent for a child status change. 894 */ 895 void 896 child_psignal(struct proc *p, int mask) 897 { 898 ksiginfo_t ksi; 899 struct proc *q; 900 int xstat; 901 902 KASSERT(mutex_owned(proc_lock)); 903 KASSERT(mutex_owned(p->p_lock)); 904 905 xstat = p->p_xstat; 906 907 KSI_INIT(&ksi); 908 ksi.ksi_signo = SIGCHLD; 909 ksi.ksi_code = (xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED); 910 ksi.ksi_pid = p->p_pid; 911 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred); 912 ksi.ksi_status = xstat; 913 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; 914 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; 915 916 q = p->p_pptr; 917 918 mutex_exit(p->p_lock); 919 mutex_enter(q->p_lock); 920 921 if ((q->p_sflag & mask) == 0) 922 kpsignal2(q, &ksi); 923 924 mutex_exit(q->p_lock); 925 mutex_enter(p->p_lock); 926 } 927 928 void 929 psignal(struct proc *p, int signo) 930 { 931 ksiginfo_t ksi; 932 933 KASSERT(!cpu_intr_p()); 934 KASSERT(mutex_owned(proc_lock)); 935 936 KSI_INIT_EMPTY(&ksi); 937 ksi.ksi_signo = signo; 938 mutex_enter(p->p_lock); 939 kpsignal2(p, &ksi); 940 mutex_exit(p->p_lock); 941 } 942 943 void 944 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data) 945 { 946 fdfile_t *ff; 947 file_t *fp; 948 949 KASSERT(!cpu_intr_p()); 950 KASSERT(mutex_owned(proc_lock)); 951 952 if ((p->p_sflag & PS_WEXIT) == 0 && data) { 953 size_t fd; 954 filedesc_t *fdp = p->p_fd; 955 956 /* XXXSMP locking */ 957 ksi->ksi_fd = -1; 958 for (fd = 0; fd < fdp->fd_nfiles; fd++) { 959 if ((ff = fdp->fd_ofiles[fd]) == NULL) 960 continue; 961 if ((fp = ff->ff_file) == NULL) 962 continue; 963 if (fp->f_data == data) { 964 ksi->ksi_fd = fd; 965 break; 966 } 967 } 968 } 969 mutex_enter(p->p_lock); 970 kpsignal2(p, ksi); 971 mutex_exit(p->p_lock); 972 } 973 974 /* 975 * sigismasked: 976 * 977 * Returns true if signal is ignored or masked for the specified LWP. 978 */ 979 int 980 sigismasked(struct lwp *l, int sig) 981 { 982 struct proc *p = l->l_proc; 983 984 return (sigismember(&p->p_sigctx.ps_sigignore, sig) || 985 sigismember(&l->l_sigmask, sig)); 986 } 987 988 /* 989 * sigpost: 990 * 991 * Post a pending signal to an LWP. Returns non-zero if the LWP was 992 * able to take the signal. 993 */ 994 int 995 sigpost(struct lwp *l, sig_t action, int prop, int sig) 996 { 997 int rv, masked; 998 999 KASSERT(mutex_owned(l->l_proc->p_lock)); 1000 1001 /* 1002 * If the LWP is on the way out, sigclear() will be busy draining all 1003 * pending signals. Don't give it more. 1004 */ 1005 if (l->l_refcnt == 0) 1006 return 0; 1007 1008 lwp_lock(l); 1009 1010 /* 1011 * Have the LWP check for signals. This ensures that even if no LWP 1012 * is found to take the signal immediately, it should be taken soon. 1013 */ 1014 l->l_flag |= LW_PENDSIG; 1015 1016 /* 1017 * SIGCONT can be masked, but must always restart stopped LWPs. 1018 */ 1019 masked = sigismember(&l->l_sigmask, sig); 1020 if (masked && ((prop & SA_CONT) == 0 || l->l_stat != LSSTOP)) { 1021 lwp_unlock(l); 1022 return 0; 1023 } 1024 1025 /* 1026 * If killing the process, make it run fast. 1027 */ 1028 if (__predict_false((prop & SA_KILL) != 0) && 1029 action == SIG_DFL && l->l_priority < MAXPRI_USER) { 1030 KASSERT(l->l_class == SCHED_OTHER); 1031 lwp_changepri(l, MAXPRI_USER); 1032 } 1033 1034 /* 1035 * If the LWP is running or on a run queue, then we win. If it's 1036 * sleeping interruptably, wake it and make it take the signal. If 1037 * the sleep isn't interruptable, then the chances are it will get 1038 * to see the signal soon anyhow. If suspended, it can't take the 1039 * signal right now. If it's LWP private or for all LWPs, save it 1040 * for later; otherwise punt. 1041 */ 1042 rv = 0; 1043 1044 switch (l->l_stat) { 1045 case LSRUN: 1046 case LSONPROC: 1047 lwp_need_userret(l); 1048 rv = 1; 1049 break; 1050 1051 case LSSLEEP: 1052 if ((l->l_flag & LW_SINTR) != 0) { 1053 /* setrunnable() will release the lock. */ 1054 setrunnable(l); 1055 return 1; 1056 } 1057 break; 1058 1059 case LSSUSPENDED: 1060 if ((prop & SA_KILL) != 0) { 1061 /* lwp_continue() will release the lock. */ 1062 lwp_continue(l); 1063 return 1; 1064 } 1065 break; 1066 1067 case LSSTOP: 1068 if ((prop & SA_STOP) != 0) 1069 break; 1070 1071 /* 1072 * If the LWP is stopped and we are sending a continue 1073 * signal, then start it again. 1074 */ 1075 if ((prop & SA_CONT) != 0) { 1076 if (l->l_wchan != NULL) { 1077 l->l_stat = LSSLEEP; 1078 l->l_proc->p_nrlwps++; 1079 rv = 1; 1080 break; 1081 } 1082 /* setrunnable() will release the lock. */ 1083 setrunnable(l); 1084 return 1; 1085 } else if (l->l_wchan == NULL || (l->l_flag & LW_SINTR) != 0) { 1086 /* setrunnable() will release the lock. */ 1087 setrunnable(l); 1088 return 1; 1089 } 1090 break; 1091 1092 default: 1093 break; 1094 } 1095 1096 lwp_unlock(l); 1097 return rv; 1098 } 1099 1100 /* 1101 * Notify an LWP that it has a pending signal. 1102 */ 1103 void 1104 signotify(struct lwp *l) 1105 { 1106 KASSERT(lwp_locked(l, NULL)); 1107 1108 l->l_flag |= LW_PENDSIG; 1109 lwp_need_userret(l); 1110 } 1111 1112 /* 1113 * Find an LWP within process p that is waiting on signal ksi, and hand 1114 * it on. 1115 */ 1116 int 1117 sigunwait(struct proc *p, const ksiginfo_t *ksi) 1118 { 1119 struct lwp *l; 1120 int signo; 1121 1122 KASSERT(mutex_owned(p->p_lock)); 1123 1124 signo = ksi->ksi_signo; 1125 1126 if (ksi->ksi_lid != 0) { 1127 /* 1128 * Signal came via _lwp_kill(). Find the LWP and see if 1129 * it's interested. 1130 */ 1131 if ((l = lwp_find(p, ksi->ksi_lid)) == NULL) 1132 return 0; 1133 if (l->l_sigwaited == NULL || 1134 !sigismember(&l->l_sigwaitset, signo)) 1135 return 0; 1136 } else { 1137 /* 1138 * Look for any LWP that may be interested. 1139 */ 1140 LIST_FOREACH(l, &p->p_sigwaiters, l_sigwaiter) { 1141 KASSERT(l->l_sigwaited != NULL); 1142 if (sigismember(&l->l_sigwaitset, signo)) 1143 break; 1144 } 1145 } 1146 1147 if (l != NULL) { 1148 l->l_sigwaited->ksi_info = ksi->ksi_info; 1149 l->l_sigwaited = NULL; 1150 LIST_REMOVE(l, l_sigwaiter); 1151 cv_signal(&l->l_sigcv); 1152 return 1; 1153 } 1154 1155 return 0; 1156 } 1157 1158 /* 1159 * Send the signal to the process. If the signal has an action, the action 1160 * is usually performed by the target process rather than the caller; we add 1161 * the signal to the set of pending signals for the process. 1162 * 1163 * Exceptions: 1164 * o When a stop signal is sent to a sleeping process that takes the 1165 * default action, the process is stopped without awakening it. 1166 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1167 * regardless of the signal action (eg, blocked or ignored). 1168 * 1169 * Other ignored signals are discarded immediately. 1170 */ 1171 void 1172 kpsignal2(struct proc *p, ksiginfo_t *ksi) 1173 { 1174 int prop, lid, toall, signo = ksi->ksi_signo; 1175 struct sigacts *sa; 1176 struct lwp *l; 1177 ksiginfo_t *kp; 1178 ksiginfoq_t kq; 1179 sig_t action; 1180 1181 KASSERT(!cpu_intr_p()); 1182 KASSERT(mutex_owned(proc_lock)); 1183 KASSERT(mutex_owned(p->p_lock)); 1184 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0); 1185 KASSERT(signo > 0 && signo < NSIG); 1186 1187 /* 1188 * If the process is being created by fork, is a zombie or is 1189 * exiting, then just drop the signal here and bail out. 1190 */ 1191 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 1192 return; 1193 1194 /* 1195 * Notify any interested parties of the signal. 1196 */ 1197 KNOTE(&p->p_klist, NOTE_SIGNAL | signo); 1198 1199 /* 1200 * Some signals including SIGKILL must act on the entire process. 1201 */ 1202 kp = NULL; 1203 prop = sigprop[signo]; 1204 toall = ((prop & SA_TOALL) != 0); 1205 1206 if (toall) 1207 lid = 0; 1208 else 1209 lid = ksi->ksi_lid; 1210 1211 /* 1212 * If proc is traced, always give parent a chance. 1213 */ 1214 if (p->p_slflag & PSL_TRACED) { 1215 action = SIG_DFL; 1216 1217 if (lid == 0) { 1218 /* 1219 * If the process is being traced and the signal 1220 * is being caught, make sure to save any ksiginfo. 1221 */ 1222 if ((kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL) 1223 return; 1224 sigput(&p->p_sigpend, p, kp); 1225 } 1226 } else { 1227 /* 1228 * If the signal was the result of a trap and is not being 1229 * caught, then reset it to default action so that the 1230 * process dumps core immediately. 1231 */ 1232 if (KSI_TRAP_P(ksi)) { 1233 sa = p->p_sigacts; 1234 mutex_enter(&sa->sa_mutex); 1235 if (!sigismember(&p->p_sigctx.ps_sigcatch, signo)) { 1236 sigdelset(&p->p_sigctx.ps_sigignore, signo); 1237 SIGACTION(p, signo).sa_handler = SIG_DFL; 1238 } 1239 mutex_exit(&sa->sa_mutex); 1240 } 1241 1242 /* 1243 * If the signal is being ignored, then drop it. Note: we 1244 * don't set SIGCONT in ps_sigignore, and if it is set to 1245 * SIG_IGN, action will be SIG_DFL here. 1246 */ 1247 if (sigismember(&p->p_sigctx.ps_sigignore, signo)) 1248 return; 1249 1250 else if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) 1251 action = SIG_CATCH; 1252 else { 1253 action = SIG_DFL; 1254 1255 /* 1256 * If sending a tty stop signal to a member of an 1257 * orphaned process group, discard the signal here if 1258 * the action is default; don't stop the process below 1259 * if sleeping, and don't clear any pending SIGCONT. 1260 */ 1261 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0) 1262 return; 1263 1264 if (prop & SA_KILL && p->p_nice > NZERO) 1265 p->p_nice = NZERO; 1266 } 1267 } 1268 1269 /* 1270 * If stopping or continuing a process, discard any pending 1271 * signals that would do the inverse. 1272 */ 1273 if ((prop & (SA_CONT | SA_STOP)) != 0) { 1274 ksiginfo_queue_init(&kq); 1275 if ((prop & SA_CONT) != 0) 1276 sigclear(&p->p_sigpend, &stopsigmask, &kq); 1277 if ((prop & SA_STOP) != 0) 1278 sigclear(&p->p_sigpend, &contsigmask, &kq); 1279 ksiginfo_queue_drain(&kq); /* XXXSMP */ 1280 } 1281 1282 /* 1283 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL, 1284 * please!), check if any LWPs are waiting on it. If yes, pass on 1285 * the signal info. The signal won't be processed further here. 1286 */ 1287 if ((prop & SA_CANTMASK) == 0 && !LIST_EMPTY(&p->p_sigwaiters) && 1288 p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0 && 1289 sigunwait(p, ksi)) 1290 return; 1291 1292 /* 1293 * XXXSMP Should be allocated by the caller, we're holding locks 1294 * here. 1295 */ 1296 if (kp == NULL && (kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL) 1297 return; 1298 1299 /* 1300 * LWP private signals are easy - just find the LWP and post 1301 * the signal to it. 1302 */ 1303 if (lid != 0) { 1304 l = lwp_find(p, lid); 1305 if (l != NULL) { 1306 sigput(&l->l_sigpend, p, kp); 1307 membar_producer(); 1308 (void)sigpost(l, action, prop, kp->ksi_signo); 1309 } 1310 goto out; 1311 } 1312 1313 /* 1314 * Some signals go to all LWPs, even if posted with _lwp_kill(). 1315 */ 1316 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) { 1317 if ((p->p_slflag & PSL_TRACED) != 0) 1318 goto deliver; 1319 1320 /* 1321 * If SIGCONT is default (or ignored) and process is 1322 * asleep, we are finished; the process should not 1323 * be awakened. 1324 */ 1325 if ((prop & SA_CONT) != 0 && action == SIG_DFL) 1326 goto out; 1327 1328 sigput(&p->p_sigpend, p, kp); 1329 } else { 1330 /* 1331 * Process is stopped or stopping. If traced, then no 1332 * further action is necessary. 1333 */ 1334 if ((p->p_slflag & PSL_TRACED) != 0 && signo != SIGKILL) 1335 goto out; 1336 1337 if ((prop & (SA_CONT | SA_KILL)) != 0) { 1338 /* 1339 * Re-adjust p_nstopchild if the process wasn't 1340 * collected by its parent. 1341 */ 1342 p->p_stat = SACTIVE; 1343 p->p_sflag &= ~PS_STOPPING; 1344 if (!p->p_waited) 1345 p->p_pptr->p_nstopchild--; 1346 1347 /* 1348 * If SIGCONT is default (or ignored), we continue 1349 * the process but don't leave the signal in 1350 * ps_siglist, as it has no further action. If 1351 * SIGCONT is held, we continue the process and 1352 * leave the signal in ps_siglist. If the process 1353 * catches SIGCONT, let it handle the signal itself. 1354 * If it isn't waiting on an event, then it goes 1355 * back to run state. Otherwise, process goes back 1356 * to sleep state. 1357 */ 1358 if ((prop & SA_CONT) == 0 || action != SIG_DFL) 1359 sigput(&p->p_sigpend, p, kp); 1360 } else if ((prop & SA_STOP) != 0) { 1361 /* 1362 * Already stopped, don't need to stop again. 1363 * (If we did the shell could get confused.) 1364 */ 1365 goto out; 1366 } else 1367 sigput(&p->p_sigpend, p, kp); 1368 } 1369 1370 deliver: 1371 /* 1372 * Before we set LW_PENDSIG on any LWP, ensure that the signal is 1373 * visible on the per process list (for sigispending()). This 1374 * is unlikely to be needed in practice, but... 1375 */ 1376 membar_producer(); 1377 1378 /* 1379 * Try to find an LWP that can take the signal. 1380 */ 1381 LIST_FOREACH(l, &p->p_lwps, l_sibling) 1382 if (sigpost(l, action, prop, kp->ksi_signo) && !toall) 1383 break; 1384 1385 out: 1386 /* 1387 * If the ksiginfo wasn't used, then bin it. XXXSMP freeing memory 1388 * with locks held. The caller should take care of this. 1389 */ 1390 ksiginfo_free(kp); 1391 } 1392 1393 void 1394 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask) 1395 { 1396 struct proc *p = l->l_proc; 1397 1398 KASSERT(mutex_owned(p->p_lock)); 1399 1400 (*p->p_emul->e_sendsig)(ksi, mask); 1401 } 1402 1403 /* 1404 * Stop any LWPs sleeping interruptably. 1405 */ 1406 static void 1407 proc_stop_lwps(struct proc *p) 1408 { 1409 struct lwp *l; 1410 1411 KASSERT(mutex_owned(p->p_lock)); 1412 KASSERT((p->p_sflag & PS_STOPPING) != 0); 1413 1414 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1415 lwp_lock(l); 1416 if (l->l_stat == LSSLEEP && (l->l_flag & LW_SINTR) != 0) { 1417 l->l_stat = LSSTOP; 1418 p->p_nrlwps--; 1419 } 1420 lwp_unlock(l); 1421 } 1422 } 1423 1424 /* 1425 * Finish stopping of a process. Mark it stopped and notify the parent. 1426 * 1427 * Drop p_lock briefly if PS_NOTIFYSTOP is set and ppsig is true. 1428 */ 1429 static void 1430 proc_stop_done(struct proc *p, bool ppsig, int ppmask) 1431 { 1432 1433 KASSERT(mutex_owned(proc_lock)); 1434 KASSERT(mutex_owned(p->p_lock)); 1435 KASSERT((p->p_sflag & PS_STOPPING) != 0); 1436 KASSERT(p->p_nrlwps == 0 || (p->p_nrlwps == 1 && p == curproc)); 1437 1438 p->p_sflag &= ~PS_STOPPING; 1439 p->p_stat = SSTOP; 1440 p->p_waited = 0; 1441 p->p_pptr->p_nstopchild++; 1442 if ((p->p_sflag & PS_NOTIFYSTOP) != 0) { 1443 if (ppsig) { 1444 /* child_psignal drops p_lock briefly. */ 1445 child_psignal(p, ppmask); 1446 } 1447 cv_broadcast(&p->p_pptr->p_waitcv); 1448 } 1449 } 1450 1451 /* 1452 * Stop the current process and switch away when being stopped or traced. 1453 */ 1454 void 1455 sigswitch(bool ppsig, int ppmask, int signo) 1456 { 1457 struct lwp *l = curlwp; 1458 struct proc *p = l->l_proc; 1459 int biglocks; 1460 1461 KASSERT(mutex_owned(p->p_lock)); 1462 KASSERT(l->l_stat == LSONPROC); 1463 KASSERT(p->p_nrlwps > 0); 1464 1465 /* 1466 * On entry we know that the process needs to stop. If it's 1467 * the result of a 'sideways' stop signal that has been sourced 1468 * through issignal(), then stop other LWPs in the process too. 1469 */ 1470 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) { 1471 KASSERT(signo != 0); 1472 proc_stop(p, 1, signo); 1473 KASSERT(p->p_nrlwps > 0); 1474 } 1475 1476 /* 1477 * If we are the last live LWP, and the stop was a result of 1478 * a new signal, then signal the parent. 1479 */ 1480 if ((p->p_sflag & PS_STOPPING) != 0) { 1481 if (!mutex_tryenter(proc_lock)) { 1482 mutex_exit(p->p_lock); 1483 mutex_enter(proc_lock); 1484 mutex_enter(p->p_lock); 1485 } 1486 1487 if (p->p_nrlwps == 1 && (p->p_sflag & PS_STOPPING) != 0) { 1488 /* 1489 * Note that proc_stop_done() can drop 1490 * p->p_lock briefly. 1491 */ 1492 proc_stop_done(p, ppsig, ppmask); 1493 } 1494 1495 mutex_exit(proc_lock); 1496 } 1497 1498 /* 1499 * Unlock and switch away. 1500 */ 1501 KERNEL_UNLOCK_ALL(l, &biglocks); 1502 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 1503 p->p_nrlwps--; 1504 lwp_lock(l); 1505 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP); 1506 l->l_stat = LSSTOP; 1507 lwp_unlock(l); 1508 } 1509 1510 mutex_exit(p->p_lock); 1511 lwp_lock(l); 1512 mi_switch(l); 1513 KERNEL_LOCK(biglocks, l); 1514 mutex_enter(p->p_lock); 1515 } 1516 1517 /* 1518 * Check for a signal from the debugger. 1519 */ 1520 int 1521 sigchecktrace(sigpend_t **spp) 1522 { 1523 struct lwp *l = curlwp; 1524 struct proc *p = l->l_proc; 1525 int signo; 1526 1527 KASSERT(mutex_owned(p->p_lock)); 1528 1529 /* 1530 * If we are no longer being traced, or the parent didn't 1531 * give us a signal, look for more signals. 1532 */ 1533 if ((p->p_slflag & PSL_TRACED) == 0 || p->p_xstat == 0) 1534 return 0; 1535 1536 /* If there's a pending SIGKILL, process it immediately. */ 1537 if (sigismember(&p->p_sigpend.sp_set, SIGKILL)) 1538 return 0; 1539 1540 /* 1541 * If the new signal is being masked, look for other signals. 1542 * `p->p_sigctx.ps_siglist |= mask' is done in setrunnable(). 1543 */ 1544 signo = p->p_xstat; 1545 p->p_xstat = 0; 1546 if ((sigprop[signo] & SA_TOLWP) != 0) 1547 *spp = &l->l_sigpend; 1548 else 1549 *spp = &p->p_sigpend; 1550 if (sigismember(&l->l_sigmask, signo)) 1551 signo = 0; 1552 1553 return signo; 1554 } 1555 1556 /* 1557 * If the current process has received a signal (should be caught or cause 1558 * termination, should interrupt current syscall), return the signal number. 1559 * 1560 * Stop signals with default action are processed immediately, then cleared; 1561 * they aren't returned. This is checked after each entry to the system for 1562 * a syscall or trap. 1563 * 1564 * We will also return -1 if the process is exiting and the current LWP must 1565 * follow suit. 1566 * 1567 * Note that we may be called while on a sleep queue, so MUST NOT sleep. We 1568 * can switch away, though. 1569 */ 1570 int 1571 issignal(struct lwp *l) 1572 { 1573 struct proc *p = l->l_proc; 1574 int signo = 0, prop; 1575 sigpend_t *sp = NULL; 1576 sigset_t ss; 1577 1578 KASSERT(mutex_owned(p->p_lock)); 1579 1580 for (;;) { 1581 /* Discard any signals that we have decided not to take. */ 1582 if (signo != 0) 1583 (void)sigget(sp, NULL, signo, NULL); 1584 1585 /* 1586 * If the process is stopped/stopping, then stop ourselves 1587 * now that we're on the kernel/userspace boundary. When 1588 * we awaken, check for a signal from the debugger. 1589 */ 1590 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 1591 sigswitch(true, PS_NOCLDSTOP, 0); 1592 signo = sigchecktrace(&sp); 1593 } else 1594 signo = 0; 1595 1596 /* 1597 * If the debugger didn't provide a signal, find a pending 1598 * signal from our set. Check per-LWP signals first, and 1599 * then per-process. 1600 */ 1601 if (signo == 0) { 1602 sp = &l->l_sigpend; 1603 ss = sp->sp_set; 1604 if ((p->p_sflag & PS_PPWAIT) != 0) 1605 sigminusset(&stopsigmask, &ss); 1606 sigminusset(&l->l_sigmask, &ss); 1607 1608 if ((signo = firstsig(&ss)) == 0) { 1609 sp = &p->p_sigpend; 1610 ss = sp->sp_set; 1611 if ((p->p_sflag & PS_PPWAIT) != 0) 1612 sigminusset(&stopsigmask, &ss); 1613 sigminusset(&l->l_sigmask, &ss); 1614 1615 if ((signo = firstsig(&ss)) == 0) { 1616 /* 1617 * No signal pending - clear the 1618 * indicator and bail out. 1619 */ 1620 lwp_lock(l); 1621 l->l_flag &= ~LW_PENDSIG; 1622 lwp_unlock(l); 1623 sp = NULL; 1624 break; 1625 } 1626 } 1627 } 1628 1629 /* 1630 * We should see pending but ignored signals only if 1631 * we are being traced. 1632 */ 1633 if (sigismember(&p->p_sigctx.ps_sigignore, signo) && 1634 (p->p_slflag & PSL_TRACED) == 0) { 1635 /* Discard the signal. */ 1636 continue; 1637 } 1638 1639 /* 1640 * If traced, always stop, and stay stopped until released 1641 * by the debugger. If the our parent process is waiting 1642 * for us, don't hang as we could deadlock. 1643 */ 1644 if ((p->p_slflag & PSL_TRACED) != 0 && 1645 (p->p_sflag & PS_PPWAIT) == 0 && signo != SIGKILL) { 1646 /* Take the signal. */ 1647 (void)sigget(sp, NULL, signo, NULL); 1648 p->p_xstat = signo; 1649 1650 /* Emulation-specific handling of signal trace */ 1651 if (p->p_emul->e_tracesig == NULL || 1652 (*p->p_emul->e_tracesig)(p, signo) == 0) 1653 sigswitch(!(p->p_slflag & PSL_FSTRACE), 0, 1654 signo); 1655 1656 /* Check for a signal from the debugger. */ 1657 if ((signo = sigchecktrace(&sp)) == 0) 1658 continue; 1659 } 1660 1661 prop = sigprop[signo]; 1662 1663 /* 1664 * Decide whether the signal should be returned. 1665 */ 1666 switch ((long)SIGACTION(p, signo).sa_handler) { 1667 case (long)SIG_DFL: 1668 /* 1669 * Don't take default actions on system processes. 1670 */ 1671 if (p->p_pid <= 1) { 1672 #ifdef DIAGNOSTIC 1673 /* 1674 * Are you sure you want to ignore SIGSEGV 1675 * in init? XXX 1676 */ 1677 printf_nolog("Process (pid %d) got sig %d\n", 1678 p->p_pid, signo); 1679 #endif 1680 continue; 1681 } 1682 1683 /* 1684 * If there is a pending stop signal to process with 1685 * default action, stop here, then clear the signal. 1686 * However, if process is member of an orphaned 1687 * process group, ignore tty stop signals. 1688 */ 1689 if (prop & SA_STOP) { 1690 /* 1691 * XXX Don't hold proc_lock for p_lflag, 1692 * but it's not a big deal. 1693 */ 1694 if (p->p_slflag & PSL_TRACED || 1695 ((p->p_lflag & PL_ORPHANPG) != 0 && 1696 prop & SA_TTYSTOP)) { 1697 /* Ignore the signal. */ 1698 continue; 1699 } 1700 /* Take the signal. */ 1701 (void)sigget(sp, NULL, signo, NULL); 1702 p->p_xstat = signo; 1703 signo = 0; 1704 sigswitch(true, PS_NOCLDSTOP, p->p_xstat); 1705 } else if (prop & SA_IGNORE) { 1706 /* 1707 * Except for SIGCONT, shouldn't get here. 1708 * Default action is to ignore; drop it. 1709 */ 1710 continue; 1711 } 1712 break; 1713 1714 case (long)SIG_IGN: 1715 #ifdef DEBUG_ISSIGNAL 1716 /* 1717 * Masking above should prevent us ever trying 1718 * to take action on an ignored signal other 1719 * than SIGCONT, unless process is traced. 1720 */ 1721 if ((prop & SA_CONT) == 0 && 1722 (p->p_slflag & PSL_TRACED) == 0) 1723 printf_nolog("issignal\n"); 1724 #endif 1725 continue; 1726 1727 default: 1728 /* 1729 * This signal has an action, let postsig() process 1730 * it. 1731 */ 1732 break; 1733 } 1734 1735 break; 1736 } 1737 1738 l->l_sigpendset = sp; 1739 return signo; 1740 } 1741 1742 /* 1743 * Take the action for the specified signal 1744 * from the current set of pending signals. 1745 */ 1746 void 1747 postsig(int signo) 1748 { 1749 struct lwp *l; 1750 struct proc *p; 1751 struct sigacts *ps; 1752 sig_t action; 1753 sigset_t *returnmask; 1754 ksiginfo_t ksi; 1755 1756 l = curlwp; 1757 p = l->l_proc; 1758 ps = p->p_sigacts; 1759 1760 KASSERT(mutex_owned(p->p_lock)); 1761 KASSERT(signo > 0); 1762 1763 /* 1764 * Set the new mask value and also defer further occurrences of this 1765 * signal. 1766 * 1767 * Special case: user has done a sigsuspend. Here the current mask is 1768 * not of interest, but rather the mask from before the sigsuspen is 1769 * what we want restored after the signal processing is completed. 1770 */ 1771 if (l->l_sigrestore) { 1772 returnmask = &l->l_sigoldmask; 1773 l->l_sigrestore = 0; 1774 } else 1775 returnmask = &l->l_sigmask; 1776 1777 /* 1778 * Commit to taking the signal before releasing the mutex. 1779 */ 1780 action = SIGACTION_PS(ps, signo).sa_handler; 1781 l->l_ru.ru_nsignals++; 1782 sigget(l->l_sigpendset, &ksi, signo, NULL); 1783 1784 if (ktrpoint(KTR_PSIG)) { 1785 mutex_exit(p->p_lock); 1786 ktrpsig(signo, action, returnmask, NULL); 1787 mutex_enter(p->p_lock); 1788 } 1789 1790 if (action == SIG_DFL) { 1791 /* 1792 * Default action, where the default is to kill 1793 * the process. (Other cases were ignored above.) 1794 */ 1795 sigexit(l, signo); 1796 return; 1797 } 1798 1799 /* 1800 * If we get here, the signal must be caught. 1801 */ 1802 #ifdef DIAGNOSTIC 1803 if (action == SIG_IGN || sigismember(&l->l_sigmask, signo)) 1804 panic("postsig action"); 1805 #endif 1806 1807 kpsendsig(l, &ksi, returnmask); 1808 } 1809 1810 /* 1811 * sendsig_reset: 1812 * 1813 * Reset the signal action. Called from emulation specific sendsig() 1814 * before unlocking to deliver the signal. 1815 */ 1816 void 1817 sendsig_reset(struct lwp *l, int signo) 1818 { 1819 struct proc *p = l->l_proc; 1820 struct sigacts *ps = p->p_sigacts; 1821 1822 KASSERT(mutex_owned(p->p_lock)); 1823 1824 p->p_sigctx.ps_lwp = 0; 1825 p->p_sigctx.ps_code = 0; 1826 p->p_sigctx.ps_signo = 0; 1827 1828 mutex_enter(&ps->sa_mutex); 1829 sigplusset(&SIGACTION_PS(ps, signo).sa_mask, &l->l_sigmask); 1830 if (SIGACTION_PS(ps, signo).sa_flags & SA_RESETHAND) { 1831 sigdelset(&p->p_sigctx.ps_sigcatch, signo); 1832 if (signo != SIGCONT && sigprop[signo] & SA_IGNORE) 1833 sigaddset(&p->p_sigctx.ps_sigignore, signo); 1834 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL; 1835 } 1836 mutex_exit(&ps->sa_mutex); 1837 } 1838 1839 /* 1840 * Kill the current process for stated reason. 1841 */ 1842 void 1843 killproc(struct proc *p, const char *why) 1844 { 1845 1846 KASSERT(mutex_owned(proc_lock)); 1847 1848 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why); 1849 uprintf_locked("sorry, pid %d was killed: %s\n", p->p_pid, why); 1850 psignal(p, SIGKILL); 1851 } 1852 1853 /* 1854 * Force the current process to exit with the specified signal, dumping core 1855 * if appropriate. We bypass the normal tests for masked and caught 1856 * signals, allowing unrecoverable failures to terminate the process without 1857 * changing signal state. Mark the accounting record with the signal 1858 * termination. If dumping core, save the signal number for the debugger. 1859 * Calls exit and does not return. 1860 */ 1861 void 1862 sigexit(struct lwp *l, int signo) 1863 { 1864 int exitsig, error, docore; 1865 struct proc *p; 1866 struct lwp *t; 1867 1868 p = l->l_proc; 1869 1870 KASSERT(mutex_owned(p->p_lock)); 1871 KERNEL_UNLOCK_ALL(l, NULL); 1872 1873 /* 1874 * Don't permit coredump() multiple times in the same process. 1875 * Call back into sigexit, where we will be suspended until 1876 * the deed is done. Note that this is a recursive call, but 1877 * LW_WCORE will prevent us from coming back this way. 1878 */ 1879 if ((p->p_sflag & PS_WCORE) != 0) { 1880 lwp_lock(l); 1881 l->l_flag |= (LW_WCORE | LW_WEXIT | LW_WSUSPEND); 1882 lwp_unlock(l); 1883 mutex_exit(p->p_lock); 1884 lwp_userret(l); 1885 panic("sigexit 1"); 1886 /* NOTREACHED */ 1887 } 1888 1889 /* If process is already on the way out, then bail now. */ 1890 if ((p->p_sflag & PS_WEXIT) != 0) { 1891 mutex_exit(p->p_lock); 1892 lwp_exit(l); 1893 panic("sigexit 2"); 1894 /* NOTREACHED */ 1895 } 1896 1897 /* 1898 * Prepare all other LWPs for exit. If dumping core, suspend them 1899 * so that their registers are available long enough to be dumped. 1900 */ 1901 if ((docore = (sigprop[signo] & SA_CORE)) != 0) { 1902 p->p_sflag |= PS_WCORE; 1903 for (;;) { 1904 LIST_FOREACH(t, &p->p_lwps, l_sibling) { 1905 lwp_lock(t); 1906 if (t == l) { 1907 t->l_flag &= ~LW_WSUSPEND; 1908 lwp_unlock(t); 1909 continue; 1910 } 1911 t->l_flag |= (LW_WCORE | LW_WEXIT); 1912 lwp_suspend(l, t); 1913 } 1914 1915 if (p->p_nrlwps == 1) 1916 break; 1917 1918 /* 1919 * Kick any LWPs sitting in lwp_wait1(), and wait 1920 * for everyone else to stop before proceeding. 1921 */ 1922 p->p_nlwpwait++; 1923 cv_broadcast(&p->p_lwpcv); 1924 cv_wait(&p->p_lwpcv, p->p_lock); 1925 p->p_nlwpwait--; 1926 } 1927 } 1928 1929 exitsig = signo; 1930 p->p_acflag |= AXSIG; 1931 p->p_sigctx.ps_signo = signo; 1932 1933 if (docore) { 1934 mutex_exit(p->p_lock); 1935 if ((error = coredump(l, NULL)) == 0) 1936 exitsig |= WCOREFLAG; 1937 1938 if (kern_logsigexit) { 1939 int uid = l->l_cred ? 1940 (int)kauth_cred_geteuid(l->l_cred) : -1; 1941 1942 if (error) 1943 log(LOG_INFO, lognocoredump, p->p_pid, 1944 p->p_comm, uid, signo, error); 1945 else 1946 log(LOG_INFO, logcoredump, p->p_pid, 1947 p->p_comm, uid, signo); 1948 } 1949 1950 #ifdef PAX_SEGVGUARD 1951 pax_segvguard(l, p->p_textvp, p->p_comm, true); 1952 #endif /* PAX_SEGVGUARD */ 1953 /* Acquire the sched state mutex. exit1() will release it. */ 1954 mutex_enter(p->p_lock); 1955 } 1956 1957 /* No longer dumping core. */ 1958 p->p_sflag &= ~PS_WCORE; 1959 1960 exit1(l, W_EXITCODE(0, exitsig)); 1961 /* NOTREACHED */ 1962 } 1963 1964 /* 1965 * Put process 'p' into the stopped state and optionally, notify the parent. 1966 */ 1967 void 1968 proc_stop(struct proc *p, int notify, int signo) 1969 { 1970 struct lwp *l; 1971 1972 KASSERT(mutex_owned(p->p_lock)); 1973 1974 /* 1975 * First off, set the stopping indicator and bring all sleeping 1976 * LWPs to a halt so they are included in p->p_nrlwps. We musn't 1977 * unlock between here and the p->p_nrlwps check below. 1978 */ 1979 p->p_sflag |= PS_STOPPING; 1980 if (notify) 1981 p->p_sflag |= PS_NOTIFYSTOP; 1982 else 1983 p->p_sflag &= ~PS_NOTIFYSTOP; 1984 membar_producer(); 1985 1986 proc_stop_lwps(p); 1987 1988 /* 1989 * If there are no LWPs available to take the signal, then we 1990 * signal the parent process immediately. Otherwise, the last 1991 * LWP to stop will take care of it. 1992 */ 1993 1994 if (p->p_nrlwps == 0) { 1995 proc_stop_done(p, true, PS_NOCLDSTOP); 1996 } else { 1997 /* 1998 * Have the remaining LWPs come to a halt, and trigger 1999 * proc_stop_callout() to ensure that they do. 2000 */ 2001 LIST_FOREACH(l, &p->p_lwps, l_sibling) 2002 sigpost(l, SIG_DFL, SA_STOP, signo); 2003 callout_schedule(&proc_stop_ch, 1); 2004 } 2005 } 2006 2007 /* 2008 * When stopping a process, we do not immediatly set sleeping LWPs stopped, 2009 * but wait for them to come to a halt at the kernel-user boundary. This is 2010 * to allow LWPs to release any locks that they may hold before stopping. 2011 * 2012 * Non-interruptable sleeps can be long, and there is the potential for an 2013 * LWP to begin sleeping interruptably soon after the process has been set 2014 * stopping (PS_STOPPING). These LWPs will not notice that the process is 2015 * stopping, and so complete halt of the process and the return of status 2016 * information to the parent could be delayed indefinitely. 2017 * 2018 * To handle this race, proc_stop_callout() runs once per tick while there 2019 * are stopping processes in the system. It sets LWPs that are sleeping 2020 * interruptably into the LSSTOP state. 2021 * 2022 * Note that we are not concerned about keeping all LWPs stopped while the 2023 * process is stopped: stopped LWPs can awaken briefly to handle signals. 2024 * What we do need to ensure is that all LWPs in a stopping process have 2025 * stopped at least once, so that notification can be sent to the parent 2026 * process. 2027 */ 2028 static void 2029 proc_stop_callout(void *cookie) 2030 { 2031 bool more, restart; 2032 struct proc *p; 2033 2034 (void)cookie; 2035 2036 do { 2037 restart = false; 2038 more = false; 2039 2040 mutex_enter(proc_lock); 2041 PROCLIST_FOREACH(p, &allproc) { 2042 if ((p->p_flag & PK_MARKER) != 0) 2043 continue; 2044 mutex_enter(p->p_lock); 2045 2046 if ((p->p_sflag & PS_STOPPING) == 0) { 2047 mutex_exit(p->p_lock); 2048 continue; 2049 } 2050 2051 /* Stop any LWPs sleeping interruptably. */ 2052 proc_stop_lwps(p); 2053 if (p->p_nrlwps == 0) { 2054 /* 2055 * We brought the process to a halt. 2056 * Mark it as stopped and notify the 2057 * parent. 2058 */ 2059 if ((p->p_sflag & PS_NOTIFYSTOP) != 0) { 2060 /* 2061 * Note that proc_stop_done() will 2062 * drop p->p_lock briefly. 2063 * Arrange to restart and check 2064 * all processes again. 2065 */ 2066 restart = true; 2067 } 2068 proc_stop_done(p, true, PS_NOCLDSTOP); 2069 } else 2070 more = true; 2071 2072 mutex_exit(p->p_lock); 2073 if (restart) 2074 break; 2075 } 2076 mutex_exit(proc_lock); 2077 } while (restart); 2078 2079 /* 2080 * If we noted processes that are stopping but still have 2081 * running LWPs, then arrange to check again in 1 tick. 2082 */ 2083 if (more) 2084 callout_schedule(&proc_stop_ch, 1); 2085 } 2086 2087 /* 2088 * Given a process in state SSTOP, set the state back to SACTIVE and 2089 * move LSSTOP'd LWPs to LSSLEEP or make them runnable. 2090 */ 2091 void 2092 proc_unstop(struct proc *p) 2093 { 2094 struct lwp *l; 2095 int sig; 2096 2097 KASSERT(mutex_owned(proc_lock)); 2098 KASSERT(mutex_owned(p->p_lock)); 2099 2100 p->p_stat = SACTIVE; 2101 p->p_sflag &= ~PS_STOPPING; 2102 sig = p->p_xstat; 2103 2104 if (!p->p_waited) 2105 p->p_pptr->p_nstopchild--; 2106 2107 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 2108 lwp_lock(l); 2109 if (l->l_stat != LSSTOP) { 2110 lwp_unlock(l); 2111 continue; 2112 } 2113 if (l->l_wchan == NULL) { 2114 setrunnable(l); 2115 continue; 2116 } 2117 if (sig && (l->l_flag & LW_SINTR) != 0) { 2118 setrunnable(l); 2119 sig = 0; 2120 } else { 2121 l->l_stat = LSSLEEP; 2122 p->p_nrlwps++; 2123 lwp_unlock(l); 2124 } 2125 } 2126 } 2127 2128 static int 2129 filt_sigattach(struct knote *kn) 2130 { 2131 struct proc *p = curproc; 2132 2133 kn->kn_obj = p; 2134 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2135 2136 mutex_enter(p->p_lock); 2137 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2138 mutex_exit(p->p_lock); 2139 2140 return (0); 2141 } 2142 2143 static void 2144 filt_sigdetach(struct knote *kn) 2145 { 2146 struct proc *p = kn->kn_obj; 2147 2148 mutex_enter(p->p_lock); 2149 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2150 mutex_exit(p->p_lock); 2151 } 2152 2153 /* 2154 * signal knotes are shared with proc knotes, so we apply a mask to 2155 * the hint in order to differentiate them from process hints. This 2156 * could be avoided by using a signal-specific knote list, but probably 2157 * isn't worth the trouble. 2158 */ 2159 static int 2160 filt_signal(struct knote *kn, long hint) 2161 { 2162 2163 if (hint & NOTE_SIGNAL) { 2164 hint &= ~NOTE_SIGNAL; 2165 2166 if (kn->kn_id == hint) 2167 kn->kn_data++; 2168 } 2169 return (kn->kn_data != 0); 2170 } 2171 2172 const struct filterops sig_filtops = { 2173 0, filt_sigattach, filt_sigdetach, filt_signal 2174 }; 2175