1 /* $NetBSD: kern_sig.c,v 1.240 2006/11/22 02:02:51 elad Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.240 2006/11/22 02:02:51 elad Exp $"); 41 42 #include "opt_coredump.h" 43 #include "opt_ktrace.h" 44 #include "opt_ptrace.h" 45 #include "opt_multiprocessor.h" 46 #include "opt_compat_sunos.h" 47 #include "opt_compat_netbsd.h" 48 #include "opt_compat_netbsd32.h" 49 #include "opt_pax.h" 50 51 #define SIGPROP /* include signal properties table */ 52 #include <sys/param.h> 53 #include <sys/signalvar.h> 54 #include <sys/resourcevar.h> 55 #include <sys/namei.h> 56 #include <sys/vnode.h> 57 #include <sys/proc.h> 58 #include <sys/systm.h> 59 #include <sys/timeb.h> 60 #include <sys/times.h> 61 #include <sys/buf.h> 62 #include <sys/acct.h> 63 #include <sys/file.h> 64 #include <sys/kernel.h> 65 #include <sys/wait.h> 66 #include <sys/ktrace.h> 67 #include <sys/syslog.h> 68 #include <sys/stat.h> 69 #include <sys/core.h> 70 #include <sys/filedesc.h> 71 #include <sys/malloc.h> 72 #include <sys/pool.h> 73 #include <sys/ucontext.h> 74 #include <sys/sa.h> 75 #include <sys/savar.h> 76 #include <sys/exec.h> 77 #include <sys/sysctl.h> 78 #include <sys/kauth.h> 79 80 #include <sys/mount.h> 81 #include <sys/syscallargs.h> 82 83 #include <machine/cpu.h> 84 85 #include <sys/user.h> /* for coredump */ 86 87 #ifdef PAX_SEGVGUARD 88 #include <sys/pax.h> 89 #endif /* PAX_SEGVGUARD */ 90 91 #include <uvm/uvm.h> 92 #include <uvm/uvm_extern.h> 93 94 #ifdef COREDUMP 95 static int build_corename(struct proc *, char *, const char *, size_t); 96 #endif 97 static void ksiginfo_exithook(struct proc *, void *); 98 static void ksiginfo_queue(struct proc *, const ksiginfo_t *, ksiginfo_t **); 99 static ksiginfo_t *ksiginfo_dequeue(struct proc *, int); 100 static void kpsignal2(struct proc *, const ksiginfo_t *); 101 102 sigset_t contsigmask, stopsigmask, sigcantmask; 103 104 struct pool sigacts_pool; /* memory pool for sigacts structures */ 105 106 /* 107 * struct sigacts memory pool allocator. 108 */ 109 110 static void * 111 sigacts_poolpage_alloc(struct pool *pp, int flags) 112 { 113 114 return (void *)uvm_km_alloc(kernel_map, 115 (PAGE_SIZE)*2, (PAGE_SIZE)*2, 116 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 117 | UVM_KMF_WIRED); 118 } 119 120 static void 121 sigacts_poolpage_free(struct pool *pp, void *v) 122 { 123 uvm_km_free(kernel_map, (vaddr_t)v, (PAGE_SIZE)*2, UVM_KMF_WIRED); 124 } 125 126 static struct pool_allocator sigactspool_allocator = { 127 .pa_alloc = sigacts_poolpage_alloc, 128 .pa_free = sigacts_poolpage_free, 129 }; 130 131 static POOL_INIT(siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo", 132 &pool_allocator_nointr); 133 static POOL_INIT(ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo", NULL); 134 135 static ksiginfo_t * 136 ksiginfo_alloc(int prflags) 137 { 138 int s; 139 ksiginfo_t *ksi; 140 141 s = splsoftclock(); 142 ksi = pool_get(&ksiginfo_pool, prflags); 143 splx(s); 144 return ksi; 145 } 146 147 static void 148 ksiginfo_free(ksiginfo_t *ksi) 149 { 150 int s; 151 152 s = splsoftclock(); 153 pool_put(&ksiginfo_pool, ksi); 154 splx(s); 155 } 156 157 /* 158 * Remove and return the first ksiginfo element that matches our requested 159 * signal, or return NULL if one not found. 160 */ 161 static ksiginfo_t * 162 ksiginfo_dequeue(struct proc *p, int signo) 163 { 164 ksiginfo_t *ksi; 165 int s; 166 167 s = splsoftclock(); 168 simple_lock(&p->p_sigctx.ps_silock); 169 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) { 170 if (ksi->ksi_signo == signo) { 171 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list); 172 goto out; 173 } 174 } 175 ksi = NULL; 176 out: 177 simple_unlock(&p->p_sigctx.ps_silock); 178 splx(s); 179 return ksi; 180 } 181 182 /* 183 * Append a new ksiginfo element to the list of pending ksiginfo's, if 184 * we need to (SA_SIGINFO was requested). We replace non RT signals if 185 * they already existed in the queue and we add new entries for RT signals, 186 * or for non RT signals with non-existing entries. 187 */ 188 static void 189 ksiginfo_queue(struct proc *p, const ksiginfo_t *ksi, ksiginfo_t **newkp) 190 { 191 ksiginfo_t *kp; 192 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo); 193 int s; 194 195 if ((sa->sa_flags & SA_SIGINFO) == 0) 196 return; 197 198 /* 199 * If there's no info, don't save it. 200 */ 201 if (KSI_EMPTY_P(ksi)) 202 return; 203 204 s = splsoftclock(); 205 simple_lock(&p->p_sigctx.ps_silock); 206 #ifdef notyet /* XXX: QUEUING */ 207 if (ksi->ksi_signo < SIGRTMIN) 208 #endif 209 { 210 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) { 211 if (kp->ksi_signo == ksi->ksi_signo) { 212 KSI_COPY(ksi, kp); 213 goto out; 214 } 215 } 216 } 217 if (newkp && *newkp) { 218 kp = *newkp; 219 *newkp = NULL; 220 } else { 221 SCHED_ASSERT_UNLOCKED(); 222 kp = ksiginfo_alloc(PR_NOWAIT); 223 if (kp == NULL) { 224 #ifdef DIAGNOSTIC 225 printf("Out of memory allocating siginfo for pid %d\n", 226 p->p_pid); 227 #endif 228 goto out; 229 } 230 } 231 *kp = *ksi; 232 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list); 233 out: 234 simple_unlock(&p->p_sigctx.ps_silock); 235 splx(s); 236 } 237 238 /* 239 * free all pending ksiginfo on exit 240 */ 241 static void 242 ksiginfo_exithook(struct proc *p, void *v) 243 { 244 int s; 245 246 s = splsoftclock(); 247 simple_lock(&p->p_sigctx.ps_silock); 248 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) { 249 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo); 250 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list); 251 ksiginfo_free(ksi); 252 } 253 simple_unlock(&p->p_sigctx.ps_silock); 254 splx(s); 255 } 256 257 /* 258 * Initialize signal-related data structures. 259 */ 260 void 261 signal_init(void) 262 { 263 264 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2; 265 266 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", 267 sizeof(struct sigacts) > PAGE_SIZE ? 268 &sigactspool_allocator : &pool_allocator_nointr); 269 270 exithook_establish(ksiginfo_exithook, NULL); 271 exechook_establish(ksiginfo_exithook, NULL); 272 } 273 274 /* 275 * Create an initial sigctx structure, using the same signal state 276 * as p. If 'share' is set, share the sigctx_proc part, otherwise just 277 * copy it from parent. 278 */ 279 void 280 sigactsinit(struct proc *np, struct proc *pp, int share) 281 { 282 struct sigacts *ps; 283 284 if (share) { 285 np->p_sigacts = pp->p_sigacts; 286 pp->p_sigacts->sa_refcnt++; 287 } else { 288 ps = pool_get(&sigacts_pool, PR_WAITOK); 289 if (pp) 290 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts)); 291 else 292 memset(ps, '\0', sizeof(struct sigacts)); 293 ps->sa_refcnt = 1; 294 np->p_sigacts = ps; 295 } 296 } 297 298 /* 299 * Make this process not share its sigctx, maintaining all 300 * signal state. 301 */ 302 void 303 sigactsunshare(struct proc *p) 304 { 305 struct sigacts *oldps; 306 307 if (p->p_sigacts->sa_refcnt == 1) 308 return; 309 310 oldps = p->p_sigacts; 311 sigactsinit(p, NULL, 0); 312 313 if (--oldps->sa_refcnt == 0) 314 pool_put(&sigacts_pool, oldps); 315 } 316 317 /* 318 * Release a sigctx structure. 319 */ 320 void 321 sigactsfree(struct sigacts *ps) 322 { 323 324 if (--ps->sa_refcnt > 0) 325 return; 326 327 pool_put(&sigacts_pool, ps); 328 } 329 330 int 331 sigaction1(struct proc *p, int signum, const struct sigaction *nsa, 332 struct sigaction *osa, const void *tramp, int vers) 333 { 334 struct sigacts *ps; 335 int prop; 336 337 ps = p->p_sigacts; 338 if (signum <= 0 || signum >= NSIG) 339 return (EINVAL); 340 341 /* 342 * Trampoline ABI version 0 is reserved for the legacy 343 * kernel-provided on-stack trampoline. Conversely, if we are 344 * using a non-0 ABI version, we must have a trampoline. Only 345 * validate the vers if a new sigaction was supplied. Emulations 346 * use legacy kernel trampolines with version 0, alternatively 347 * check for that too. 348 */ 349 if ((vers != 0 && tramp == NULL) || 350 #ifdef SIGTRAMP_VALID 351 (nsa != NULL && 352 ((vers == 0) ? 353 (p->p_emul->e_sigcode == NULL) : 354 !SIGTRAMP_VALID(vers))) || 355 #endif 356 (vers == 0 && tramp != NULL)) 357 return (EINVAL); 358 359 if (osa) 360 *osa = SIGACTION_PS(ps, signum); 361 362 if (nsa) { 363 if (nsa->sa_flags & ~SA_ALLBITS) 364 return (EINVAL); 365 366 prop = sigprop[signum]; 367 if (prop & SA_CANTMASK) 368 return (EINVAL); 369 370 (void) splsched(); /* XXXSMP */ 371 SIGACTION_PS(ps, signum) = *nsa; 372 ps->sa_sigdesc[signum].sd_tramp = tramp; 373 ps->sa_sigdesc[signum].sd_vers = vers; 374 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask); 375 if ((prop & SA_NORESET) != 0) 376 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND; 377 if (signum == SIGCHLD) { 378 if (nsa->sa_flags & SA_NOCLDSTOP) 379 p->p_flag |= P_NOCLDSTOP; 380 else 381 p->p_flag &= ~P_NOCLDSTOP; 382 if (nsa->sa_flags & SA_NOCLDWAIT) { 383 /* 384 * Paranoia: since SA_NOCLDWAIT is implemented 385 * by reparenting the dying child to PID 1 (and 386 * trust it to reap the zombie), PID 1 itself 387 * is forbidden to set SA_NOCLDWAIT. 388 */ 389 if (p->p_pid == 1) 390 p->p_flag &= ~P_NOCLDWAIT; 391 else 392 p->p_flag |= P_NOCLDWAIT; 393 } else 394 p->p_flag &= ~P_NOCLDWAIT; 395 396 if (nsa->sa_handler == SIG_IGN) { 397 /* 398 * Paranoia: same as above. 399 */ 400 if (p->p_pid == 1) 401 p->p_flag &= ~P_CLDSIGIGN; 402 else 403 p->p_flag |= P_CLDSIGIGN; 404 } else 405 p->p_flag &= ~P_CLDSIGIGN; 406 407 } 408 if ((nsa->sa_flags & SA_NODEFER) == 0) 409 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum); 410 else 411 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum); 412 /* 413 * Set bit in p_sigctx.ps_sigignore for signals that are set to 414 * SIG_IGN, and for signals set to SIG_DFL where the default is 415 * to ignore. However, don't put SIGCONT in 416 * p_sigctx.ps_sigignore, as we have to restart the process. 417 */ 418 if (nsa->sa_handler == SIG_IGN || 419 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) { 420 /* never to be seen again */ 421 sigdelset(&p->p_sigctx.ps_siglist, signum); 422 if (signum != SIGCONT) { 423 /* easier in psignal */ 424 sigaddset(&p->p_sigctx.ps_sigignore, signum); 425 } 426 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 427 } else { 428 sigdelset(&p->p_sigctx.ps_sigignore, signum); 429 if (nsa->sa_handler == SIG_DFL) 430 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 431 else 432 sigaddset(&p->p_sigctx.ps_sigcatch, signum); 433 } 434 (void) spl0(); 435 } 436 437 return (0); 438 } 439 440 #ifdef COMPAT_16 441 /* ARGSUSED */ 442 int 443 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval) 444 { 445 struct compat_16_sys___sigaction14_args /* { 446 syscallarg(int) signum; 447 syscallarg(const struct sigaction *) nsa; 448 syscallarg(struct sigaction *) osa; 449 } */ *uap = v; 450 struct proc *p; 451 struct sigaction nsa, osa; 452 int error; 453 454 if (SCARG(uap, nsa)) { 455 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa)); 456 if (error) 457 return (error); 458 } 459 p = l->l_proc; 460 error = sigaction1(p, SCARG(uap, signum), 461 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0, 462 NULL, 0); 463 if (error) 464 return (error); 465 if (SCARG(uap, osa)) { 466 error = copyout(&osa, SCARG(uap, osa), sizeof(osa)); 467 if (error) 468 return (error); 469 } 470 return (0); 471 } 472 #endif 473 474 /* ARGSUSED */ 475 int 476 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval) 477 { 478 struct sys___sigaction_sigtramp_args /* { 479 syscallarg(int) signum; 480 syscallarg(const struct sigaction *) nsa; 481 syscallarg(struct sigaction *) osa; 482 syscallarg(void *) tramp; 483 syscallarg(int) vers; 484 } */ *uap = v; 485 struct proc *p = l->l_proc; 486 struct sigaction nsa, osa; 487 int error; 488 489 if (SCARG(uap, nsa)) { 490 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa)); 491 if (error) 492 return (error); 493 } 494 error = sigaction1(p, SCARG(uap, signum), 495 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0, 496 SCARG(uap, tramp), SCARG(uap, vers)); 497 if (error) 498 return (error); 499 if (SCARG(uap, osa)) { 500 error = copyout(&osa, SCARG(uap, osa), sizeof(osa)); 501 if (error) 502 return (error); 503 } 504 return (0); 505 } 506 507 /* 508 * Initialize signal state for process 0; 509 * set to ignore signals that are ignored by default and disable the signal 510 * stack. 511 */ 512 void 513 siginit(struct proc *p) 514 { 515 struct sigacts *ps; 516 int signum, prop; 517 518 ps = p->p_sigacts; 519 sigemptyset(&contsigmask); 520 sigemptyset(&stopsigmask); 521 sigemptyset(&sigcantmask); 522 for (signum = 1; signum < NSIG; signum++) { 523 prop = sigprop[signum]; 524 if (prop & SA_CONT) 525 sigaddset(&contsigmask, signum); 526 if (prop & SA_STOP) 527 sigaddset(&stopsigmask, signum); 528 if (prop & SA_CANTMASK) 529 sigaddset(&sigcantmask, signum); 530 if (prop & SA_IGNORE && signum != SIGCONT) 531 sigaddset(&p->p_sigctx.ps_sigignore, signum); 532 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask); 533 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART; 534 } 535 sigemptyset(&p->p_sigctx.ps_sigcatch); 536 p->p_sigctx.ps_sigwaited = NULL; 537 p->p_flag &= ~P_NOCLDSTOP; 538 539 /* 540 * Reset stack state to the user stack. 541 */ 542 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE; 543 p->p_sigctx.ps_sigstk.ss_size = 0; 544 p->p_sigctx.ps_sigstk.ss_sp = 0; 545 546 /* One reference. */ 547 ps->sa_refcnt = 1; 548 } 549 550 /* 551 * Reset signals for an exec of the specified process. 552 */ 553 void 554 execsigs(struct proc *p) 555 { 556 struct sigacts *ps; 557 int signum, prop; 558 559 sigactsunshare(p); 560 561 ps = p->p_sigacts; 562 563 /* 564 * Reset caught signals. Held signals remain held 565 * through p_sigctx.ps_sigmask (unless they were caught, 566 * and are now ignored by default). 567 */ 568 for (signum = 1; signum < NSIG; signum++) { 569 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) { 570 prop = sigprop[signum]; 571 if (prop & SA_IGNORE) { 572 if ((prop & SA_CONT) == 0) 573 sigaddset(&p->p_sigctx.ps_sigignore, 574 signum); 575 sigdelset(&p->p_sigctx.ps_siglist, signum); 576 } 577 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 578 } 579 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask); 580 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART; 581 } 582 sigemptyset(&p->p_sigctx.ps_sigcatch); 583 p->p_sigctx.ps_sigwaited = NULL; 584 585 /* 586 * Reset no zombies if child dies flag as Solaris does. 587 */ 588 p->p_flag &= ~(P_NOCLDWAIT | P_CLDSIGIGN); 589 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN) 590 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL; 591 592 /* 593 * Reset stack state to the user stack. 594 */ 595 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE; 596 p->p_sigctx.ps_sigstk.ss_size = 0; 597 p->p_sigctx.ps_sigstk.ss_sp = 0; 598 } 599 600 int 601 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss) 602 { 603 604 if (oss) 605 *oss = p->p_sigctx.ps_sigmask; 606 607 if (nss) { 608 (void)splsched(); /* XXXSMP */ 609 switch (how) { 610 case SIG_BLOCK: 611 sigplusset(nss, &p->p_sigctx.ps_sigmask); 612 break; 613 case SIG_UNBLOCK: 614 sigminusset(nss, &p->p_sigctx.ps_sigmask); 615 CHECKSIGS(p); 616 break; 617 case SIG_SETMASK: 618 p->p_sigctx.ps_sigmask = *nss; 619 CHECKSIGS(p); 620 break; 621 default: 622 (void)spl0(); /* XXXSMP */ 623 return (EINVAL); 624 } 625 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask); 626 (void)spl0(); /* XXXSMP */ 627 } 628 629 return (0); 630 } 631 632 /* 633 * Manipulate signal mask. 634 * Note that we receive new mask, not pointer, 635 * and return old mask as return value; 636 * the library stub does the rest. 637 */ 638 int 639 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval) 640 { 641 struct sys___sigprocmask14_args /* { 642 syscallarg(int) how; 643 syscallarg(const sigset_t *) set; 644 syscallarg(sigset_t *) oset; 645 } */ *uap = v; 646 struct proc *p; 647 sigset_t nss, oss; 648 int error; 649 650 if (SCARG(uap, set)) { 651 error = copyin(SCARG(uap, set), &nss, sizeof(nss)); 652 if (error) 653 return (error); 654 } 655 p = l->l_proc; 656 error = sigprocmask1(p, SCARG(uap, how), 657 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0); 658 if (error) 659 return (error); 660 if (SCARG(uap, oset)) { 661 error = copyout(&oss, SCARG(uap, oset), sizeof(oss)); 662 if (error) 663 return (error); 664 } 665 return (0); 666 } 667 668 void 669 sigpending1(struct proc *p, sigset_t *ss) 670 { 671 672 *ss = p->p_sigctx.ps_siglist; 673 sigminusset(&p->p_sigctx.ps_sigmask, ss); 674 } 675 676 /* ARGSUSED */ 677 int 678 sys___sigpending14(struct lwp *l, void *v, register_t *retval) 679 { 680 struct sys___sigpending14_args /* { 681 syscallarg(sigset_t *) set; 682 } */ *uap = v; 683 struct proc *p; 684 sigset_t ss; 685 686 p = l->l_proc; 687 sigpending1(p, &ss); 688 return (copyout(&ss, SCARG(uap, set), sizeof(ss))); 689 } 690 691 int 692 sigsuspend1(struct proc *p, const sigset_t *ss) 693 { 694 struct sigacts *ps; 695 696 ps = p->p_sigacts; 697 if (ss) { 698 /* 699 * When returning from sigpause, we want 700 * the old mask to be restored after the 701 * signal handler has finished. Thus, we 702 * save it here and mark the sigctx structure 703 * to indicate this. 704 */ 705 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask; 706 p->p_sigctx.ps_flags |= SAS_OLDMASK; 707 (void) splsched(); /* XXXSMP */ 708 p->p_sigctx.ps_sigmask = *ss; 709 CHECKSIGS(p); 710 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask); 711 (void) spl0(); /* XXXSMP */ 712 } 713 714 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0) 715 /* void */; 716 717 /* always return EINTR rather than ERESTART... */ 718 return (EINTR); 719 } 720 721 /* 722 * Suspend process until signal, providing mask to be set 723 * in the meantime. Note nonstandard calling convention: 724 * libc stub passes mask, not pointer, to save a copyin. 725 */ 726 /* ARGSUSED */ 727 int 728 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval) 729 { 730 struct sys___sigsuspend14_args /* { 731 syscallarg(const sigset_t *) set; 732 } */ *uap = v; 733 struct proc *p; 734 sigset_t ss; 735 int error; 736 737 if (SCARG(uap, set)) { 738 error = copyin(SCARG(uap, set), &ss, sizeof(ss)); 739 if (error) 740 return (error); 741 } 742 743 p = l->l_proc; 744 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0)); 745 } 746 747 int 748 sigaltstack1(struct proc *p, const struct sigaltstack *nss, 749 struct sigaltstack *oss) 750 { 751 752 if (oss) 753 *oss = p->p_sigctx.ps_sigstk; 754 755 if (nss) { 756 if (nss->ss_flags & ~SS_ALLBITS) 757 return (EINVAL); 758 759 if (nss->ss_flags & SS_DISABLE) { 760 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) 761 return (EINVAL); 762 } else { 763 if (nss->ss_size < MINSIGSTKSZ) 764 return (ENOMEM); 765 } 766 p->p_sigctx.ps_sigstk = *nss; 767 } 768 769 return (0); 770 } 771 772 /* ARGSUSED */ 773 int 774 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval) 775 { 776 struct sys___sigaltstack14_args /* { 777 syscallarg(const struct sigaltstack *) nss; 778 syscallarg(struct sigaltstack *) oss; 779 } */ *uap = v; 780 struct proc *p; 781 struct sigaltstack nss, oss; 782 int error; 783 784 if (SCARG(uap, nss)) { 785 error = copyin(SCARG(uap, nss), &nss, sizeof(nss)); 786 if (error) 787 return (error); 788 } 789 p = l->l_proc; 790 error = sigaltstack1(p, 791 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0); 792 if (error) 793 return (error); 794 if (SCARG(uap, oss)) { 795 error = copyout(&oss, SCARG(uap, oss), sizeof(oss)); 796 if (error) 797 return (error); 798 } 799 return (0); 800 } 801 802 /* ARGSUSED */ 803 int 804 sys_kill(struct lwp *l, void *v, register_t *retval) 805 { 806 struct sys_kill_args /* { 807 syscallarg(int) pid; 808 syscallarg(int) signum; 809 } */ *uap = v; 810 struct proc *p; 811 ksiginfo_t ksi; 812 int signum = SCARG(uap, signum); 813 int error; 814 815 if ((u_int)signum >= NSIG) 816 return (EINVAL); 817 KSI_INIT(&ksi); 818 ksi.ksi_signo = signum; 819 ksi.ksi_code = SI_USER; 820 ksi.ksi_pid = l->l_proc->p_pid; 821 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 822 if (SCARG(uap, pid) > 0) { 823 /* kill single process */ 824 if ((p = pfind(SCARG(uap, pid))) == NULL) 825 return (ESRCH); 826 error = kauth_authorize_process(l->l_cred, 827 KAUTH_PROCESS_CANSIGNAL, p, (void *)(uintptr_t)signum, 828 NULL, NULL); 829 if (error) 830 return error; 831 if (signum) 832 kpsignal2(p, &ksi); 833 return (0); 834 } 835 switch (SCARG(uap, pid)) { 836 case -1: /* broadcast signal */ 837 return (killpg1(l, &ksi, 0, 1)); 838 case 0: /* signal own process group */ 839 return (killpg1(l, &ksi, 0, 0)); 840 default: /* negative explicit process group */ 841 return (killpg1(l, &ksi, -SCARG(uap, pid), 0)); 842 } 843 /* NOTREACHED */ 844 } 845 846 /* 847 * Common code for kill process group/broadcast kill. 848 * cp is calling process. 849 */ 850 int 851 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all) 852 { 853 struct proc *p, *cp; 854 kauth_cred_t pc; 855 struct pgrp *pgrp; 856 int nfound; 857 int signum = ksi->ksi_signo; 858 859 cp = l->l_proc; 860 pc = l->l_cred; 861 nfound = 0; 862 if (all) { 863 /* 864 * broadcast 865 */ 866 proclist_lock_read(); 867 PROCLIST_FOREACH(p, &allproc) { 868 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || p == cp || 869 kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL, 870 p, (void *)(uintptr_t)signum, NULL, NULL) != 0) 871 continue; 872 nfound++; 873 if (signum) 874 kpsignal2(p, ksi); 875 } 876 proclist_unlock_read(); 877 } else { 878 if (pgid == 0) 879 /* 880 * zero pgid means send to my process group. 881 */ 882 pgrp = cp->p_pgrp; 883 else { 884 pgrp = pgfind(pgid); 885 if (pgrp == NULL) 886 return (ESRCH); 887 } 888 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 889 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 890 kauth_authorize_process(pc, KAUTH_PROCESS_CANSIGNAL, 891 p, (void *)(uintptr_t)signum, NULL, NULL) != 0) 892 continue; 893 nfound++; 894 if (signum && P_ZOMBIE(p) == 0) 895 kpsignal2(p, ksi); 896 } 897 } 898 return (nfound ? 0 : ESRCH); 899 } 900 901 /* 902 * Send a signal to a process group. 903 */ 904 void 905 gsignal(int pgid, int signum) 906 { 907 ksiginfo_t ksi; 908 KSI_INIT_EMPTY(&ksi); 909 ksi.ksi_signo = signum; 910 kgsignal(pgid, &ksi, NULL); 911 } 912 913 void 914 kgsignal(int pgid, ksiginfo_t *ksi, void *data) 915 { 916 struct pgrp *pgrp; 917 918 if (pgid && (pgrp = pgfind(pgid))) 919 kpgsignal(pgrp, ksi, data, 0); 920 } 921 922 /* 923 * Send a signal to a process group. If checktty is 1, 924 * limit to members which have a controlling terminal. 925 */ 926 void 927 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 928 { 929 ksiginfo_t ksi; 930 KSI_INIT_EMPTY(&ksi); 931 ksi.ksi_signo = sig; 932 kpgsignal(pgrp, &ksi, NULL, checkctty); 933 } 934 935 void 936 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty) 937 { 938 struct proc *p; 939 940 if (pgrp) 941 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 942 if (checkctty == 0 || p->p_flag & P_CONTROLT) 943 kpsignal(p, ksi, data); 944 } 945 946 /* 947 * Send a signal caused by a trap to the current process. 948 * If it will be caught immediately, deliver it with correct code. 949 * Otherwise, post it normally. 950 */ 951 void 952 trapsignal(struct lwp *l, const ksiginfo_t *ksi) 953 { 954 struct proc *p; 955 struct sigacts *ps; 956 int signum = ksi->ksi_signo; 957 958 KASSERT(KSI_TRAP_P(ksi)); 959 960 p = l->l_proc; 961 ps = p->p_sigacts; 962 if ((p->p_flag & P_TRACED) == 0 && 963 sigismember(&p->p_sigctx.ps_sigcatch, signum) && 964 !sigismember(&p->p_sigctx.ps_sigmask, signum)) { 965 p->p_stats->p_ru.ru_nsignals++; 966 #ifdef KTRACE 967 if (KTRPOINT(p, KTR_PSIG)) 968 ktrpsig(l, signum, SIGACTION_PS(ps, signum).sa_handler, 969 &p->p_sigctx.ps_sigmask, ksi); 970 #endif 971 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask); 972 (void) splsched(); /* XXXSMP */ 973 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, 974 &p->p_sigctx.ps_sigmask); 975 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) { 976 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 977 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 978 sigaddset(&p->p_sigctx.ps_sigignore, signum); 979 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 980 } 981 (void) spl0(); /* XXXSMP */ 982 } else { 983 p->p_sigctx.ps_lwp = l->l_lid; 984 /* XXX for core dump/debugger */ 985 p->p_sigctx.ps_signo = ksi->ksi_signo; 986 p->p_sigctx.ps_code = ksi->ksi_trap; 987 kpsignal2(p, ksi); 988 } 989 } 990 991 /* 992 * Fill in signal information and signal the parent for a child status change. 993 */ 994 void 995 child_psignal(struct proc *p) 996 { 997 ksiginfo_t ksi; 998 999 KSI_INIT(&ksi); 1000 ksi.ksi_signo = SIGCHLD; 1001 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED; 1002 ksi.ksi_pid = p->p_pid; 1003 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred); 1004 ksi.ksi_status = p->p_xstat; 1005 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; 1006 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; 1007 kpsignal2(p->p_pptr, &ksi); 1008 } 1009 1010 /* 1011 * Send the signal to the process. If the signal has an action, the action 1012 * is usually performed by the target process rather than the caller; we add 1013 * the signal to the set of pending signals for the process. 1014 * 1015 * Exceptions: 1016 * o When a stop signal is sent to a sleeping process that takes the 1017 * default action, the process is stopped without awakening it. 1018 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1019 * regardless of the signal action (eg, blocked or ignored). 1020 * 1021 * Other ignored signals are discarded immediately. 1022 */ 1023 void 1024 psignal(struct proc *p, int signum) 1025 { 1026 ksiginfo_t ksi; 1027 1028 KSI_INIT_EMPTY(&ksi); 1029 ksi.ksi_signo = signum; 1030 kpsignal2(p, &ksi); 1031 } 1032 1033 void 1034 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data) 1035 { 1036 1037 if ((p->p_flag & P_WEXIT) == 0 && data) { 1038 size_t fd; 1039 struct filedesc *fdp = p->p_fd; 1040 1041 ksi->ksi_fd = -1; 1042 for (fd = 0; fd < fdp->fd_nfiles; fd++) { 1043 struct file *fp = fdp->fd_ofiles[fd]; 1044 /* XXX: lock? */ 1045 if (fp && fp->f_data == data) { 1046 ksi->ksi_fd = fd; 1047 break; 1048 } 1049 } 1050 } 1051 kpsignal2(p, ksi); 1052 } 1053 1054 static void 1055 kpsignal2(struct proc *p, const ksiginfo_t *ksi) 1056 { 1057 struct lwp *l, *suspended = NULL; 1058 struct sadata_vp *vp; 1059 ksiginfo_t *newkp; 1060 int s = 0, prop, allsusp; 1061 sig_t action; 1062 int signum = ksi->ksi_signo; 1063 1064 #ifdef DIAGNOSTIC 1065 if (signum <= 0 || signum >= NSIG) 1066 panic("psignal signal number %d", signum); 1067 1068 SCHED_ASSERT_UNLOCKED(); 1069 #endif 1070 1071 /* 1072 * Notify any interested parties in the signal. 1073 */ 1074 KNOTE(&p->p_klist, NOTE_SIGNAL | signum); 1075 1076 prop = sigprop[signum]; 1077 1078 /* 1079 * If proc is traced, always give parent a chance. 1080 */ 1081 if (p->p_flag & P_TRACED) { 1082 action = SIG_DFL; 1083 1084 /* 1085 * If the process is being traced and the signal is being 1086 * caught, make sure to save any ksiginfo. 1087 */ 1088 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) { 1089 SCHED_ASSERT_UNLOCKED(); 1090 ksiginfo_queue(p, ksi, NULL); 1091 } 1092 } else { 1093 /* 1094 * If the signal was the result of a trap, reset it 1095 * to default action if it's currently masked, so that it would 1096 * coredump immediatelly instead of spinning repeatedly 1097 * taking the signal. 1098 */ 1099 if (KSI_TRAP_P(ksi) 1100 && sigismember(&p->p_sigctx.ps_sigmask, signum) 1101 && !sigismember(&p->p_sigctx.ps_sigcatch, signum)) { 1102 sigdelset(&p->p_sigctx.ps_sigignore, signum); 1103 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 1104 sigdelset(&p->p_sigctx.ps_sigmask, signum); 1105 SIGACTION(p, signum).sa_handler = SIG_DFL; 1106 } 1107 1108 /* 1109 * If the signal is being ignored, 1110 * then we forget about it immediately. 1111 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore, 1112 * and if it is set to SIG_IGN, 1113 * action will be SIG_DFL here.) 1114 */ 1115 if (sigismember(&p->p_sigctx.ps_sigignore, signum)) 1116 return; 1117 if (sigismember(&p->p_sigctx.ps_sigmask, signum)) 1118 action = SIG_HOLD; 1119 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) 1120 action = SIG_CATCH; 1121 else { 1122 action = SIG_DFL; 1123 1124 if (prop & SA_KILL && p->p_nice > NZERO) 1125 p->p_nice = NZERO; 1126 1127 /* 1128 * If sending a tty stop signal to a member of an 1129 * orphaned process group, discard the signal here if 1130 * the action is default; don't stop the process below 1131 * if sleeping, and don't clear any pending SIGCONT. 1132 */ 1133 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0) 1134 return; 1135 } 1136 } 1137 1138 if (prop & SA_CONT) 1139 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist); 1140 1141 if (prop & SA_STOP) 1142 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist); 1143 1144 /* 1145 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL, 1146 * please!), check if anything waits on it. If yes, save the 1147 * info into provided ps_sigwaited, and wake-up the waiter. 1148 * The signal won't be processed further here. 1149 */ 1150 if ((prop & SA_CANTMASK) == 0 1151 && p->p_sigctx.ps_sigwaited 1152 && sigismember(p->p_sigctx.ps_sigwait, signum) 1153 && p->p_stat != SSTOP) { 1154 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info; 1155 p->p_sigctx.ps_sigwaited = NULL; 1156 wakeup_one(&p->p_sigctx.ps_sigwait); 1157 return; 1158 } 1159 1160 sigaddset(&p->p_sigctx.ps_siglist, signum); 1161 1162 /* CHECKSIGS() is "inlined" here. */ 1163 p->p_sigctx.ps_sigcheck = 1; 1164 1165 /* 1166 * Defer further processing for signals which are held, 1167 * except that stopped processes must be continued by SIGCONT. 1168 */ 1169 if (action == SIG_HOLD && 1170 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) { 1171 SCHED_ASSERT_UNLOCKED(); 1172 ksiginfo_queue(p, ksi, NULL); 1173 return; 1174 } 1175 1176 /* 1177 * Allocate a ksiginfo_t incase we need to insert it with the 1178 * scheduler lock held, but only if this ksiginfo_t isn't empty. 1179 */ 1180 if (!KSI_EMPTY_P(ksi)) { 1181 newkp = ksiginfo_alloc(PR_NOWAIT); 1182 if (newkp == NULL) { 1183 #ifdef DIAGNOSTIC 1184 printf("kpsignal2: couldn't allocated ksiginfo\n"); 1185 #endif 1186 return; 1187 } 1188 } else 1189 newkp = NULL; 1190 1191 SCHED_LOCK(s); 1192 1193 if (p->p_flag & P_SA) { 1194 allsusp = 0; 1195 l = NULL; 1196 if (p->p_stat == SACTIVE) { 1197 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1198 l = vp->savp_lwp; 1199 KDASSERT(l != NULL); 1200 if (l->l_flag & L_SA_IDLE) { 1201 /* wakeup idle LWP */ 1202 goto found; 1203 /*NOTREACHED*/ 1204 } else if (l->l_flag & L_SA_YIELD) { 1205 /* idle LWP is already waking up */ 1206 goto out; 1207 /*NOTREACHED*/ 1208 } 1209 } 1210 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1211 l = vp->savp_lwp; 1212 if (l->l_stat == LSRUN || 1213 l->l_stat == LSONPROC) { 1214 signotify(p); 1215 goto out; 1216 /*NOTREACHED*/ 1217 } 1218 if (l->l_stat == LSSLEEP && 1219 l->l_flag & L_SINTR) { 1220 /* ok to signal vp lwp */ 1221 break; 1222 } else 1223 l = NULL; 1224 } 1225 } else if (p->p_stat == SSTOP) { 1226 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1227 l = vp->savp_lwp; 1228 if (l->l_stat == LSSLEEP && (l->l_flag & L_SINTR) != 0) 1229 break; 1230 l = NULL; 1231 } 1232 } 1233 } else if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) { 1234 /* 1235 * At least one LWP is running or on a run queue. 1236 * The signal will be noticed when one of them returns 1237 * to userspace. 1238 */ 1239 signotify(p); 1240 /* 1241 * The signal will be noticed very soon. 1242 */ 1243 goto out; 1244 /*NOTREACHED*/ 1245 } else { 1246 /* 1247 * Find out if any of the sleeps are interruptable, 1248 * and if all the live LWPs remaining are suspended. 1249 */ 1250 allsusp = 1; 1251 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1252 if (l->l_stat == LSSLEEP && 1253 l->l_flag & L_SINTR) 1254 break; 1255 if (l->l_stat == LSSUSPENDED) 1256 suspended = l; 1257 else if ((l->l_stat != LSZOMB) && 1258 (l->l_stat != LSDEAD)) 1259 allsusp = 0; 1260 } 1261 } 1262 1263 found: 1264 switch (p->p_stat) { 1265 case SACTIVE: 1266 1267 if (l != NULL && (p->p_flag & P_TRACED)) 1268 goto run; 1269 1270 /* 1271 * If SIGCONT is default (or ignored) and process is 1272 * asleep, we are finished; the process should not 1273 * be awakened. 1274 */ 1275 if ((prop & SA_CONT) && action == SIG_DFL) { 1276 sigdelset(&p->p_sigctx.ps_siglist, signum); 1277 goto done; 1278 } 1279 1280 /* 1281 * When a sleeping process receives a stop 1282 * signal, process immediately if possible. 1283 */ 1284 if ((prop & SA_STOP) && action == SIG_DFL) { 1285 /* 1286 * If a child holding parent blocked, 1287 * stopping could cause deadlock. 1288 */ 1289 if (p->p_flag & P_PPWAIT) { 1290 goto out; 1291 } 1292 sigdelset(&p->p_sigctx.ps_siglist, signum); 1293 p->p_xstat = signum; 1294 proc_stop(p, 1); /* XXXSMP: recurse? */ 1295 SCHED_UNLOCK(s); 1296 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) { 1297 child_psignal(p); 1298 } 1299 goto done_unlocked; 1300 } 1301 1302 if (l == NULL) { 1303 /* 1304 * Special case: SIGKILL of a process 1305 * which is entirely composed of 1306 * suspended LWPs should succeed. We 1307 * make this happen by unsuspending one of 1308 * them. 1309 */ 1310 if (allsusp && (signum == SIGKILL)) { 1311 lwp_continue(suspended); 1312 } 1313 goto done; 1314 } 1315 /* 1316 * All other (caught or default) signals 1317 * cause the process to run. 1318 */ 1319 goto runfast; 1320 /*NOTREACHED*/ 1321 case SSTOP: 1322 /* Process is stopped */ 1323 /* 1324 * If traced process is already stopped, 1325 * then no further action is necessary. 1326 */ 1327 if (p->p_flag & P_TRACED) 1328 goto done; 1329 1330 /* 1331 * Kill signal always sets processes running, 1332 * if possible. 1333 */ 1334 if (signum == SIGKILL) { 1335 l = proc_unstop(p); 1336 if (l) 1337 goto runfast; 1338 goto done; 1339 } 1340 1341 if (prop & SA_CONT) { 1342 /* 1343 * If SIGCONT is default (or ignored), 1344 * we continue the process but don't 1345 * leave the signal in ps_siglist, as 1346 * it has no further action. If 1347 * SIGCONT is held, we continue the 1348 * process and leave the signal in 1349 * ps_siglist. If the process catches 1350 * SIGCONT, let it handle the signal 1351 * itself. If it isn't waiting on an 1352 * event, then it goes back to run 1353 * state. Otherwise, process goes 1354 * back to sleep state. 1355 */ 1356 if (action == SIG_DFL) 1357 sigdelset(&p->p_sigctx.ps_siglist, 1358 signum); 1359 l = proc_unstop(p); 1360 if (l && (action == SIG_CATCH)) 1361 goto runfast; 1362 goto out; 1363 } 1364 1365 if (prop & SA_STOP) { 1366 /* 1367 * Already stopped, don't need to stop again. 1368 * (If we did the shell could get confused.) 1369 */ 1370 sigdelset(&p->p_sigctx.ps_siglist, signum); 1371 goto done; 1372 } 1373 1374 /* 1375 * If a lwp is sleeping interruptibly, then 1376 * wake it up; it will run until the kernel 1377 * boundary, where it will stop in issignal(), 1378 * since p->p_stat is still SSTOP. When the 1379 * process is continued, it will be made 1380 * runnable and can look at the signal. 1381 */ 1382 if (l) 1383 goto run; 1384 goto out; 1385 case SIDL: 1386 /* Process is being created by fork */ 1387 /* XXX: We are not ready to receive signals yet */ 1388 goto done; 1389 default: 1390 /* Else what? */ 1391 panic("psignal: Invalid process state %d.", p->p_stat); 1392 } 1393 /*NOTREACHED*/ 1394 1395 runfast: 1396 if (action == SIG_CATCH) { 1397 ksiginfo_queue(p, ksi, &newkp); 1398 action = SIG_HOLD; 1399 } 1400 /* 1401 * Raise priority to at least PUSER. 1402 */ 1403 if (l->l_priority > PUSER) 1404 l->l_priority = PUSER; 1405 run: 1406 if (action == SIG_CATCH) { 1407 ksiginfo_queue(p, ksi, &newkp); 1408 action = SIG_HOLD; 1409 } 1410 1411 setrunnable(l); /* XXXSMP: recurse? */ 1412 out: 1413 if (action == SIG_CATCH) 1414 ksiginfo_queue(p, ksi, &newkp); 1415 done: 1416 SCHED_UNLOCK(s); 1417 1418 done_unlocked: 1419 if (newkp) 1420 ksiginfo_free(newkp); 1421 } 1422 1423 siginfo_t * 1424 siginfo_alloc(int flags) 1425 { 1426 1427 return pool_get(&siginfo_pool, flags); 1428 } 1429 1430 void 1431 siginfo_free(void *arg) 1432 { 1433 1434 pool_put(&siginfo_pool, arg); 1435 } 1436 1437 void 1438 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask) 1439 { 1440 struct proc *p = l->l_proc; 1441 struct lwp *le, *li; 1442 siginfo_t *si; 1443 int f; 1444 1445 if (p->p_flag & P_SA) { 1446 1447 /* XXXUPSXXX What if not on sa_vp ? */ 1448 1449 f = l->l_flag & L_SA; 1450 l->l_flag &= ~L_SA; 1451 si = siginfo_alloc(PR_WAITOK); 1452 si->_info = ksi->ksi_info; 1453 le = li = NULL; 1454 if (KSI_TRAP_P(ksi)) 1455 le = l; 1456 else 1457 li = l; 1458 if (sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li, 1459 sizeof(*si), si, siginfo_free) != 0) { 1460 siginfo_free(si); 1461 #if 0 1462 if (KSI_TRAP_P(ksi)) 1463 /* XXX What do we do here?? */; 1464 #endif 1465 } 1466 l->l_flag |= f; 1467 return; 1468 } 1469 1470 (*p->p_emul->e_sendsig)(ksi, mask); 1471 } 1472 1473 static inline int firstsig(const sigset_t *); 1474 1475 static inline int 1476 firstsig(const sigset_t *ss) 1477 { 1478 int sig; 1479 1480 sig = ffs(ss->__bits[0]); 1481 if (sig != 0) 1482 return (sig); 1483 #if NSIG > 33 1484 sig = ffs(ss->__bits[1]); 1485 if (sig != 0) 1486 return (sig + 32); 1487 #endif 1488 #if NSIG > 65 1489 sig = ffs(ss->__bits[2]); 1490 if (sig != 0) 1491 return (sig + 64); 1492 #endif 1493 #if NSIG > 97 1494 sig = ffs(ss->__bits[3]); 1495 if (sig != 0) 1496 return (sig + 96); 1497 #endif 1498 return (0); 1499 } 1500 1501 /* 1502 * If the current process has received a signal (should be caught or cause 1503 * termination, should interrupt current syscall), return the signal number. 1504 * Stop signals with default action are processed immediately, then cleared; 1505 * they aren't returned. This is checked after each entry to the system for 1506 * a syscall or trap (though this can usually be done without calling issignal 1507 * by checking the pending signal masks in the CURSIG macro.) The normal call 1508 * sequence is 1509 * 1510 * while (signum = CURSIG(curlwp)) 1511 * postsig(signum); 1512 */ 1513 int 1514 issignal(struct lwp *l) 1515 { 1516 struct proc *p = l->l_proc; 1517 int s, signum, prop; 1518 sigset_t ss; 1519 1520 /* Bail out if we do not own the virtual processor */ 1521 if (l->l_flag & L_SA && l->l_savp->savp_lwp != l) 1522 return 0; 1523 1524 KERNEL_PROC_LOCK(l); 1525 1526 if (p->p_stat == SSTOP) { 1527 /* 1528 * The process is stopped/stopping. Stop ourselves now that 1529 * we're on the kernel/userspace boundary. 1530 */ 1531 SCHED_LOCK(s); 1532 l->l_stat = LSSTOP; 1533 p->p_nrlwps--; 1534 if (p->p_flag & P_TRACED) 1535 goto sigtraceswitch; 1536 else 1537 goto sigswitch; 1538 } 1539 for (;;) { 1540 sigpending1(p, &ss); 1541 if (p->p_flag & P_PPWAIT) 1542 sigminusset(&stopsigmask, &ss); 1543 signum = firstsig(&ss); 1544 if (signum == 0) { /* no signal to send */ 1545 p->p_sigctx.ps_sigcheck = 0; 1546 KERNEL_PROC_UNLOCK(l); 1547 return (0); 1548 } 1549 /* take the signal! */ 1550 sigdelset(&p->p_sigctx.ps_siglist, signum); 1551 1552 /* 1553 * We should see pending but ignored signals 1554 * only if P_TRACED was on when they were posted. 1555 */ 1556 if (sigismember(&p->p_sigctx.ps_sigignore, signum) && 1557 (p->p_flag & P_TRACED) == 0) 1558 continue; 1559 1560 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 1561 /* 1562 * If traced, always stop, and stay 1563 * stopped until released by the debugger. 1564 */ 1565 p->p_xstat = signum; 1566 1567 /* Emulation-specific handling of signal trace */ 1568 if ((p->p_emul->e_tracesig != NULL) && 1569 ((*p->p_emul->e_tracesig)(p, signum) != 0)) 1570 goto childresumed; 1571 1572 if ((p->p_flag & P_FSTRACE) == 0) 1573 child_psignal(p); 1574 SCHED_LOCK(s); 1575 proc_stop(p, 1); 1576 sigtraceswitch: 1577 mi_switch(l, NULL); 1578 SCHED_ASSERT_UNLOCKED(); 1579 splx(s); 1580 1581 childresumed: 1582 /* 1583 * If we are no longer being traced, or the parent 1584 * didn't give us a signal, look for more signals. 1585 */ 1586 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0) 1587 continue; 1588 1589 /* 1590 * If the new signal is being masked, look for other 1591 * signals. 1592 */ 1593 signum = p->p_xstat; 1594 p->p_xstat = 0; 1595 /* 1596 * `p->p_sigctx.ps_siglist |= mask' is done 1597 * in setrunnable(). 1598 */ 1599 if (sigismember(&p->p_sigctx.ps_sigmask, signum)) 1600 continue; 1601 /* take the signal! */ 1602 sigdelset(&p->p_sigctx.ps_siglist, signum); 1603 } 1604 1605 prop = sigprop[signum]; 1606 1607 /* 1608 * Decide whether the signal should be returned. 1609 * Return the signal's number, or fall through 1610 * to clear it from the pending mask. 1611 */ 1612 switch ((long)SIGACTION(p, signum).sa_handler) { 1613 1614 case (long)SIG_DFL: 1615 /* 1616 * Don't take default actions on system processes. 1617 */ 1618 if (p->p_pid <= 1) { 1619 #ifdef DIAGNOSTIC 1620 /* 1621 * Are you sure you want to ignore SIGSEGV 1622 * in init? XXX 1623 */ 1624 printf("Process (pid %d) got signal %d\n", 1625 p->p_pid, signum); 1626 #endif 1627 break; /* == ignore */ 1628 } 1629 /* 1630 * If there is a pending stop signal to process 1631 * with default action, stop here, 1632 * then clear the signal. However, 1633 * if process is member of an orphaned 1634 * process group, ignore tty stop signals. 1635 */ 1636 if (prop & SA_STOP) { 1637 if (p->p_flag & P_TRACED || 1638 (p->p_pgrp->pg_jobc == 0 && 1639 prop & SA_TTYSTOP)) 1640 break; /* == ignore */ 1641 p->p_xstat = signum; 1642 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) 1643 child_psignal(p); 1644 SCHED_LOCK(s); 1645 proc_stop(p, 1); 1646 sigswitch: 1647 mi_switch(l, NULL); 1648 SCHED_ASSERT_UNLOCKED(); 1649 splx(s); 1650 break; 1651 } else if (prop & SA_IGNORE) { 1652 /* 1653 * Except for SIGCONT, shouldn't get here. 1654 * Default action is to ignore; drop it. 1655 */ 1656 break; /* == ignore */ 1657 } else 1658 goto keep; 1659 /*NOTREACHED*/ 1660 1661 case (long)SIG_IGN: 1662 /* 1663 * Masking above should prevent us ever trying 1664 * to take action on an ignored signal other 1665 * than SIGCONT, unless process is traced. 1666 */ 1667 #ifdef DEBUG_ISSIGNAL 1668 if ((prop & SA_CONT) == 0 && 1669 (p->p_flag & P_TRACED) == 0) 1670 printf("issignal\n"); 1671 #endif 1672 break; /* == ignore */ 1673 1674 default: 1675 /* 1676 * This signal has an action, let 1677 * postsig() process it. 1678 */ 1679 goto keep; 1680 } 1681 } 1682 /* NOTREACHED */ 1683 1684 keep: 1685 /* leave the signal for later */ 1686 sigaddset(&p->p_sigctx.ps_siglist, signum); 1687 CHECKSIGS(p); 1688 KERNEL_PROC_UNLOCK(l); 1689 return (signum); 1690 } 1691 1692 /* 1693 * Put the argument process into the stopped state and notify the parent 1694 * via wakeup. Signals are handled elsewhere. The process must not be 1695 * on the run queue. 1696 */ 1697 void 1698 proc_stop(struct proc *p, int dowakeup) 1699 { 1700 struct lwp *l; 1701 struct proc *parent; 1702 struct sadata_vp *vp; 1703 1704 SCHED_ASSERT_LOCKED(); 1705 1706 /* XXX lock process LWP state */ 1707 p->p_flag &= ~P_WAITED; 1708 p->p_stat = SSTOP; 1709 parent = p->p_pptr; 1710 parent->p_nstopchild++; 1711 1712 if (p->p_flag & P_SA) { 1713 /* 1714 * Only (try to) put the LWP on the VP in stopped 1715 * state. 1716 * All other LWPs will suspend in sa_setwoken() 1717 * because the VP-LWP in stopped state cannot be 1718 * repossessed. 1719 */ 1720 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1721 l = vp->savp_lwp; 1722 if (l->l_stat == LSONPROC && l->l_cpu == curcpu()) { 1723 l->l_stat = LSSTOP; 1724 p->p_nrlwps--; 1725 } else if (l->l_stat == LSRUN) { 1726 /* Remove LWP from the run queue */ 1727 remrunqueue(l); 1728 l->l_stat = LSSTOP; 1729 p->p_nrlwps--; 1730 } else if (l->l_stat == LSSLEEP && 1731 l->l_flag & L_SA_IDLE) { 1732 l->l_flag &= ~L_SA_IDLE; 1733 l->l_stat = LSSTOP; 1734 } 1735 } 1736 goto out; 1737 } 1738 1739 /* 1740 * Put as many LWP's as possible in stopped state. 1741 * Sleeping ones will notice the stopped state as they try to 1742 * return to userspace. 1743 */ 1744 1745 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1746 if (l->l_stat == LSONPROC) { 1747 /* XXX SMP this assumes that a LWP that is LSONPROC 1748 * is curlwp and hence is about to be mi_switched 1749 * away; the only callers of proc_stop() are: 1750 * - psignal 1751 * - issignal() 1752 * For the former, proc_stop() is only called when 1753 * no processes are running, so we don't worry. 1754 * For the latter, proc_stop() is called right 1755 * before mi_switch(). 1756 */ 1757 l->l_stat = LSSTOP; 1758 p->p_nrlwps--; 1759 } else if (l->l_stat == LSRUN) { 1760 /* Remove LWP from the run queue */ 1761 remrunqueue(l); 1762 l->l_stat = LSSTOP; 1763 p->p_nrlwps--; 1764 } else if ((l->l_stat == LSSLEEP) || 1765 (l->l_stat == LSSUSPENDED) || 1766 (l->l_stat == LSZOMB) || 1767 (l->l_stat == LSDEAD)) { 1768 /* 1769 * Don't do anything; let sleeping LWPs 1770 * discover the stopped state of the process 1771 * on their way out of the kernel; otherwise, 1772 * things like NFS threads that sleep with 1773 * locks will block the rest of the system 1774 * from getting any work done. 1775 * 1776 * Suspended/dead/zombie LWPs aren't going 1777 * anywhere, so we don't need to touch them. 1778 */ 1779 } 1780 #ifdef DIAGNOSTIC 1781 else { 1782 panic("proc_stop: process %d lwp %d " 1783 "in unstoppable state %d.\n", 1784 p->p_pid, l->l_lid, l->l_stat); 1785 } 1786 #endif 1787 } 1788 1789 out: 1790 /* XXX unlock process LWP state */ 1791 1792 if (dowakeup) 1793 sched_wakeup((caddr_t)p->p_pptr); 1794 } 1795 1796 /* 1797 * Given a process in state SSTOP, set the state back to SACTIVE and 1798 * move LSSTOP'd LWPs to LSSLEEP or make them runnable. 1799 * 1800 * If no LWPs ended up runnable (and therefore able to take a signal), 1801 * return a LWP that is sleeping interruptably. The caller can wake 1802 * that LWP up to take a signal. 1803 */ 1804 struct lwp * 1805 proc_unstop(struct proc *p) 1806 { 1807 struct lwp *l, *lr = NULL; 1808 struct sadata_vp *vp; 1809 int cantake = 0; 1810 1811 SCHED_ASSERT_LOCKED(); 1812 1813 /* 1814 * Our caller wants to be informed if there are only sleeping 1815 * and interruptable LWPs left after we have run so that it 1816 * can invoke setrunnable() if required - return one of the 1817 * interruptable LWPs if this is the case. 1818 */ 1819 1820 if (!(p->p_flag & P_WAITED)) 1821 p->p_pptr->p_nstopchild--; 1822 p->p_stat = SACTIVE; 1823 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1824 if (l->l_stat == LSRUN) { 1825 lr = NULL; 1826 cantake = 1; 1827 } 1828 if (l->l_stat != LSSTOP) 1829 continue; 1830 1831 if (l->l_wchan != NULL) { 1832 l->l_stat = LSSLEEP; 1833 if ((cantake == 0) && (l->l_flag & L_SINTR)) { 1834 lr = l; 1835 cantake = 1; 1836 } 1837 } else { 1838 setrunnable(l); 1839 lr = NULL; 1840 cantake = 1; 1841 } 1842 } 1843 if (p->p_flag & P_SA) { 1844 /* Only consider returning the LWP on the VP. */ 1845 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1846 lr = vp->savp_lwp; 1847 if (lr->l_stat == LSSLEEP) { 1848 if (lr->l_flag & L_SA_YIELD) { 1849 setrunnable(lr); 1850 break; 1851 } else if (lr->l_flag & L_SINTR) 1852 return lr; 1853 } 1854 } 1855 return NULL; 1856 } 1857 return lr; 1858 } 1859 1860 /* 1861 * Take the action for the specified signal 1862 * from the current set of pending signals. 1863 */ 1864 void 1865 postsig(int signum) 1866 { 1867 struct lwp *l; 1868 struct proc *p; 1869 struct sigacts *ps; 1870 sig_t action; 1871 sigset_t *returnmask; 1872 1873 l = curlwp; 1874 p = l->l_proc; 1875 ps = p->p_sigacts; 1876 #ifdef DIAGNOSTIC 1877 if (signum == 0) 1878 panic("postsig"); 1879 #endif 1880 1881 KERNEL_PROC_LOCK(l); 1882 1883 #ifdef MULTIPROCESSOR 1884 /* 1885 * On MP, issignal() can return the same signal to multiple 1886 * LWPs. The LWPs will block above waiting for the kernel 1887 * lock and the first LWP which gets through will then remove 1888 * the signal from ps_siglist. All other LWPs exit here. 1889 */ 1890 if (!sigismember(&p->p_sigctx.ps_siglist, signum)) { 1891 KERNEL_PROC_UNLOCK(l); 1892 return; 1893 } 1894 #endif 1895 sigdelset(&p->p_sigctx.ps_siglist, signum); 1896 action = SIGACTION_PS(ps, signum).sa_handler; 1897 if (action == SIG_DFL) { 1898 #ifdef KTRACE 1899 if (KTRPOINT(p, KTR_PSIG)) 1900 ktrpsig(l, signum, action, 1901 p->p_sigctx.ps_flags & SAS_OLDMASK ? 1902 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 1903 NULL); 1904 #endif 1905 /* 1906 * Default action, where the default is to kill 1907 * the process. (Other cases were ignored above.) 1908 */ 1909 sigexit(l, signum); 1910 /* NOTREACHED */ 1911 } else { 1912 ksiginfo_t *ksi; 1913 /* 1914 * If we get here, the signal must be caught. 1915 */ 1916 #ifdef DIAGNOSTIC 1917 if (action == SIG_IGN || 1918 sigismember(&p->p_sigctx.ps_sigmask, signum)) 1919 panic("postsig action"); 1920 #endif 1921 /* 1922 * Set the new mask value and also defer further 1923 * occurrences of this signal. 1924 * 1925 * Special case: user has done a sigpause. Here the 1926 * current mask is not of interest, but rather the 1927 * mask from before the sigpause is what we want 1928 * restored after the signal processing is completed. 1929 */ 1930 if (p->p_sigctx.ps_flags & SAS_OLDMASK) { 1931 returnmask = &p->p_sigctx.ps_oldmask; 1932 p->p_sigctx.ps_flags &= ~SAS_OLDMASK; 1933 } else 1934 returnmask = &p->p_sigctx.ps_sigmask; 1935 p->p_stats->p_ru.ru_nsignals++; 1936 ksi = ksiginfo_dequeue(p, signum); 1937 #ifdef KTRACE 1938 if (KTRPOINT(p, KTR_PSIG)) 1939 ktrpsig(l, signum, action, 1940 p->p_sigctx.ps_flags & SAS_OLDMASK ? 1941 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 1942 ksi); 1943 #endif 1944 if (ksi == NULL) { 1945 ksiginfo_t ksi1; 1946 /* 1947 * we did not save any siginfo for this, either 1948 * because the signal was not caught, or because the 1949 * user did not request SA_SIGINFO 1950 */ 1951 KSI_INIT_EMPTY(&ksi1); 1952 ksi1.ksi_signo = signum; 1953 kpsendsig(l, &ksi1, returnmask); 1954 } else { 1955 kpsendsig(l, ksi, returnmask); 1956 ksiginfo_free(ksi); 1957 } 1958 p->p_sigctx.ps_lwp = 0; 1959 p->p_sigctx.ps_code = 0; 1960 p->p_sigctx.ps_signo = 0; 1961 (void) splsched(); /* XXXSMP */ 1962 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, 1963 &p->p_sigctx.ps_sigmask); 1964 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) { 1965 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 1966 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1967 sigaddset(&p->p_sigctx.ps_sigignore, signum); 1968 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 1969 } 1970 (void) spl0(); /* XXXSMP */ 1971 } 1972 1973 KERNEL_PROC_UNLOCK(l); 1974 } 1975 1976 /* 1977 * Kill the current process for stated reason. 1978 */ 1979 void 1980 killproc(struct proc *p, const char *why) 1981 { 1982 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why); 1983 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why); 1984 psignal(p, SIGKILL); 1985 } 1986 1987 /* 1988 * Force the current process to exit with the specified signal, dumping core 1989 * if appropriate. We bypass the normal tests for masked and caught signals, 1990 * allowing unrecoverable failures to terminate the process without changing 1991 * signal state. Mark the accounting record with the signal termination. 1992 * If dumping core, save the signal number for the debugger. Calls exit and 1993 * does not return. 1994 */ 1995 1996 #if defined(DEBUG) 1997 int kern_logsigexit = 1; /* not static to make public for sysctl */ 1998 #else 1999 int kern_logsigexit = 0; /* not static to make public for sysctl */ 2000 #endif 2001 2002 static const char logcoredump[] = 2003 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n"; 2004 static const char lognocoredump[] = 2005 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n"; 2006 2007 /* Wrapper function for use in p_userret */ 2008 static void 2009 lwp_coredump_hook(struct lwp *l, void *arg) 2010 { 2011 int s; 2012 2013 /* 2014 * Suspend ourselves, so that the kernel stack and therefore 2015 * the userland registers saved in the trapframe are around 2016 * for coredump() to write them out. 2017 */ 2018 KERNEL_PROC_LOCK(l); 2019 l->l_flag &= ~L_DETACHED; 2020 SCHED_LOCK(s); 2021 l->l_stat = LSSUSPENDED; 2022 l->l_proc->p_nrlwps--; 2023 /* XXX NJWLWP check if this makes sense here: */ 2024 l->l_proc->p_stats->p_ru.ru_nvcsw++; 2025 mi_switch(l, NULL); 2026 SCHED_ASSERT_UNLOCKED(); 2027 splx(s); 2028 2029 lwp_exit(l); 2030 } 2031 2032 void 2033 sigexit(struct lwp *l, int signum) 2034 { 2035 struct proc *p; 2036 #if 0 2037 struct lwp *l2; 2038 #endif 2039 int exitsig; 2040 #ifdef COREDUMP 2041 int error; 2042 #endif 2043 2044 p = l->l_proc; 2045 2046 /* 2047 * Don't permit coredump() or exit1() multiple times 2048 * in the same process. 2049 */ 2050 if (p->p_flag & P_WEXIT) { 2051 KERNEL_PROC_UNLOCK(l); 2052 (*p->p_userret)(l, p->p_userret_arg); 2053 } 2054 p->p_flag |= P_WEXIT; 2055 /* We don't want to switch away from exiting. */ 2056 /* XXX multiprocessor: stop LWPs on other processors. */ 2057 #if 0 2058 if (p->p_flag & P_SA) { 2059 LIST_FOREACH(l2, &p->p_lwps, l_sibling) 2060 l2->l_flag &= ~L_SA; 2061 p->p_flag &= ~P_SA; 2062 } 2063 #endif 2064 2065 /* Make other LWPs stick around long enough to be dumped */ 2066 p->p_userret = lwp_coredump_hook; 2067 p->p_userret_arg = NULL; 2068 2069 exitsig = signum; 2070 p->p_acflag |= AXSIG; 2071 if (sigprop[signum] & SA_CORE) { 2072 p->p_sigctx.ps_signo = signum; 2073 #ifdef COREDUMP 2074 if ((error = coredump(l, NULL)) == 0) 2075 exitsig |= WCOREFLAG; 2076 #endif 2077 2078 if (kern_logsigexit) { 2079 /* XXX What if we ever have really large UIDs? */ 2080 int uid = l->l_cred ? 2081 (int)kauth_cred_geteuid(l->l_cred) : -1; 2082 2083 #ifdef COREDUMP 2084 if (error) 2085 log(LOG_INFO, lognocoredump, p->p_pid, 2086 p->p_comm, uid, signum, error); 2087 else 2088 #endif 2089 log(LOG_INFO, logcoredump, p->p_pid, 2090 p->p_comm, uid, signum); 2091 } 2092 2093 #ifdef PAX_SEGVGUARD 2094 pax_segvguard(l, p->p_textvp, p->p_comm, TRUE); 2095 #endif /* PAX_SEGVGUARD */ 2096 } 2097 2098 exit1(l, W_EXITCODE(0, exitsig)); 2099 /* NOTREACHED */ 2100 } 2101 2102 #ifdef COREDUMP 2103 struct coredump_iostate { 2104 struct lwp *io_lwp; 2105 struct vnode *io_vp; 2106 kauth_cred_t io_cred; 2107 off_t io_offset; 2108 }; 2109 2110 int 2111 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 2112 { 2113 struct coredump_iostate *io = cookie; 2114 int error; 2115 2116 error = vn_rdwr(UIO_WRITE, io->io_vp, __UNCONST(data), len, 2117 io->io_offset, segflg, 2118 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL, 2119 segflg == UIO_USERSPACE ? io->io_lwp : NULL); 2120 if (error) { 2121 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n", 2122 io->io_lwp->l_proc->p_pid, io->io_lwp->l_proc->p_comm, 2123 segflg == UIO_USERSPACE ? "user" : "system", 2124 len, data, (long long) io->io_offset, error); 2125 return (error); 2126 } 2127 2128 io->io_offset += len; 2129 return (0); 2130 } 2131 2132 /* 2133 * Dump core, into a file named "progname.core" or "core" (depending on the 2134 * value of shortcorename), unless the process was setuid/setgid. 2135 */ 2136 int 2137 coredump(struct lwp *l, const char *pattern) 2138 { 2139 struct vnode *vp; 2140 struct proc *p; 2141 struct vmspace *vm; 2142 kauth_cred_t cred; 2143 struct nameidata nd; 2144 struct vattr vattr; 2145 struct mount *mp; 2146 struct coredump_iostate io; 2147 int error, error1; 2148 char *name = NULL; 2149 2150 p = l->l_proc; 2151 vm = p->p_vmspace; 2152 cred = l->l_cred; 2153 2154 /* 2155 * Make sure the process has not set-id, to prevent data leaks, 2156 * unless it was specifically requested to allow set-id coredumps. 2157 */ 2158 if ((p->p_flag & P_SUGID) && !security_setidcore_dump) 2159 return EPERM; 2160 2161 /* 2162 * Refuse to core if the data + stack + user size is larger than 2163 * the core dump limit. XXX THIS IS WRONG, because of mapped 2164 * data. 2165 */ 2166 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= 2167 p->p_rlimit[RLIMIT_CORE].rlim_cur) 2168 return EFBIG; /* better error code? */ 2169 2170 restart: 2171 /* 2172 * The core dump will go in the current working directory. Make 2173 * sure that the directory is still there and that the mount flags 2174 * allow us to write core dumps there. 2175 */ 2176 vp = p->p_cwdi->cwdi_cdir; 2177 if (vp->v_mount == NULL || 2178 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) { 2179 error = EPERM; 2180 goto done; 2181 } 2182 2183 if ((p->p_flag & P_SUGID) && security_setidcore_dump) 2184 pattern = security_setidcore_path; 2185 2186 if (pattern == NULL) 2187 pattern = p->p_limit->pl_corename; 2188 if (name == NULL) { 2189 name = PNBUF_GET(); 2190 } 2191 if ((error = build_corename(p, name, pattern, MAXPATHLEN)) != 0) 2192 goto done; 2193 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, l); 2194 if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, 2195 S_IRUSR | S_IWUSR)) != 0) 2196 goto done; 2197 vp = nd.ni_vp; 2198 2199 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2200 VOP_UNLOCK(vp, 0); 2201 if ((error = vn_close(vp, FWRITE, cred, l)) != 0) 2202 goto done; 2203 if ((error = vn_start_write(NULL, &mp, 2204 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0) 2205 goto done; 2206 goto restart; 2207 } 2208 2209 /* Don't dump to non-regular files or files with links. */ 2210 if (vp->v_type != VREG || 2211 VOP_GETATTR(vp, &vattr, cred, l) || vattr.va_nlink != 1) { 2212 error = EINVAL; 2213 goto out; 2214 } 2215 VATTR_NULL(&vattr); 2216 vattr.va_size = 0; 2217 2218 if ((p->p_flag & P_SUGID) && security_setidcore_dump) { 2219 vattr.va_uid = security_setidcore_owner; 2220 vattr.va_gid = security_setidcore_group; 2221 vattr.va_mode = security_setidcore_mode; 2222 } 2223 2224 VOP_LEASE(vp, l, cred, LEASE_WRITE); 2225 VOP_SETATTR(vp, &vattr, cred, l); 2226 p->p_acflag |= ACORE; 2227 2228 io.io_lwp = l; 2229 io.io_vp = vp; 2230 io.io_cred = cred; 2231 io.io_offset = 0; 2232 2233 /* Now dump the actual core file. */ 2234 error = (*p->p_execsw->es_coredump)(l, &io); 2235 out: 2236 VOP_UNLOCK(vp, 0); 2237 vn_finished_write(mp, 0); 2238 error1 = vn_close(vp, FWRITE, cred, l); 2239 if (error == 0) 2240 error = error1; 2241 done: 2242 if (name != NULL) 2243 PNBUF_PUT(name); 2244 return error; 2245 } 2246 #endif /* COREDUMP */ 2247 2248 /* 2249 * Nonexistent system call-- signal process (may want to handle it). 2250 * Flag error in case process won't see signal immediately (blocked or ignored). 2251 */ 2252 #ifndef PTRACE 2253 __weak_alias(sys_ptrace, sys_nosys); 2254 #endif 2255 2256 /* ARGSUSED */ 2257 int 2258 sys_nosys(struct lwp *l, void *v, register_t *retval) 2259 { 2260 struct proc *p; 2261 2262 p = l->l_proc; 2263 psignal(p, SIGSYS); 2264 return (ENOSYS); 2265 } 2266 2267 #ifdef COREDUMP 2268 static int 2269 build_corename(struct proc *p, char *dst, const char *src, size_t len) 2270 { 2271 const char *s; 2272 char *d, *end; 2273 int i; 2274 2275 for (s = src, d = dst, end = d + len; *s != '\0'; s++) { 2276 if (*s == '%') { 2277 switch (*(s + 1)) { 2278 case 'n': 2279 i = snprintf(d, end - d, "%s", p->p_comm); 2280 break; 2281 case 'p': 2282 i = snprintf(d, end - d, "%d", p->p_pid); 2283 break; 2284 case 'u': 2285 i = snprintf(d, end - d, "%.*s", 2286 (int)sizeof p->p_pgrp->pg_session->s_login, 2287 p->p_pgrp->pg_session->s_login); 2288 break; 2289 case 't': 2290 i = snprintf(d, end - d, "%ld", 2291 p->p_stats->p_start.tv_sec); 2292 break; 2293 default: 2294 goto copy; 2295 } 2296 d += i; 2297 s++; 2298 } else { 2299 copy: *d = *s; 2300 d++; 2301 } 2302 if (d >= end) 2303 return (ENAMETOOLONG); 2304 } 2305 *d = '\0'; 2306 return 0; 2307 } 2308 #endif /* COREDUMP */ 2309 2310 void 2311 getucontext(struct lwp *l, ucontext_t *ucp) 2312 { 2313 struct proc *p; 2314 2315 p = l->l_proc; 2316 2317 ucp->uc_flags = 0; 2318 ucp->uc_link = l->l_ctxlink; 2319 2320 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask); 2321 ucp->uc_flags |= _UC_SIGMASK; 2322 2323 /* 2324 * The (unsupplied) definition of the `current execution stack' 2325 * in the System V Interface Definition appears to allow returning 2326 * the main context stack. 2327 */ 2328 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) { 2329 ucp->uc_stack.ss_sp = (void *)USRSTACK; 2330 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize); 2331 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */ 2332 } else { 2333 /* Simply copy alternate signal execution stack. */ 2334 ucp->uc_stack = p->p_sigctx.ps_sigstk; 2335 } 2336 ucp->uc_flags |= _UC_STACK; 2337 2338 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags); 2339 } 2340 2341 /* ARGSUSED */ 2342 int 2343 sys_getcontext(struct lwp *l, void *v, register_t *retval) 2344 { 2345 struct sys_getcontext_args /* { 2346 syscallarg(struct __ucontext *) ucp; 2347 } */ *uap = v; 2348 ucontext_t uc; 2349 2350 getucontext(l, &uc); 2351 2352 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp)))); 2353 } 2354 2355 int 2356 setucontext(struct lwp *l, const ucontext_t *ucp) 2357 { 2358 struct proc *p; 2359 int error; 2360 2361 p = l->l_proc; 2362 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0) 2363 return (error); 2364 l->l_ctxlink = ucp->uc_link; 2365 2366 if ((ucp->uc_flags & _UC_SIGMASK) != 0) 2367 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL); 2368 2369 /* 2370 * If there was stack information, update whether or not we are 2371 * still running on an alternate signal stack. 2372 */ 2373 if ((ucp->uc_flags & _UC_STACK) != 0) { 2374 if (ucp->uc_stack.ss_flags & SS_ONSTACK) 2375 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; 2376 else 2377 p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; 2378 } 2379 2380 return 0; 2381 } 2382 2383 /* ARGSUSED */ 2384 int 2385 sys_setcontext(struct lwp *l, void *v, register_t *retval) 2386 { 2387 struct sys_setcontext_args /* { 2388 syscallarg(const ucontext_t *) ucp; 2389 } */ *uap = v; 2390 ucontext_t uc; 2391 int error; 2392 2393 error = copyin(SCARG(uap, ucp), &uc, sizeof (uc)); 2394 if (error) 2395 return (error); 2396 if (!(uc.uc_flags & _UC_CPU)) 2397 return (EINVAL); 2398 error = setucontext(l, &uc); 2399 if (error) 2400 return (error); 2401 2402 return (EJUSTRETURN); 2403 } 2404 2405 /* 2406 * sigtimedwait(2) system call, used also for implementation 2407 * of sigwaitinfo() and sigwait(). 2408 * 2409 * This only handles single LWP in signal wait. libpthread provides 2410 * it's own sigtimedwait() wrapper to DTRT WRT individual threads. 2411 */ 2412 int 2413 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval) 2414 { 2415 return __sigtimedwait1(l, v, retval, copyout, copyin, copyout); 2416 } 2417 2418 int 2419 __sigtimedwait1(struct lwp *l, void *v, register_t *retval, 2420 copyout_t put_info, copyin_t fetch_timeout, copyout_t put_timeout) 2421 { 2422 struct sys___sigtimedwait_args /* { 2423 syscallarg(const sigset_t *) set; 2424 syscallarg(siginfo_t *) info; 2425 syscallarg(struct timespec *) timeout; 2426 } */ *uap = v; 2427 sigset_t *waitset, twaitset; 2428 struct proc *p = l->l_proc; 2429 int error, signum; 2430 int timo = 0; 2431 struct timespec ts, tsstart; 2432 ksiginfo_t *ksi; 2433 2434 memset(&tsstart, 0, sizeof tsstart); /* XXX gcc */ 2435 2436 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK); 2437 2438 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) { 2439 FREE(waitset, M_TEMP); 2440 return (error); 2441 } 2442 2443 /* 2444 * Silently ignore SA_CANTMASK signals. psignal() would 2445 * ignore SA_CANTMASK signals in waitset, we do this 2446 * only for the below siglist check. 2447 */ 2448 sigminusset(&sigcantmask, waitset); 2449 2450 /* 2451 * First scan siglist and check if there is signal from 2452 * our waitset already pending. 2453 */ 2454 twaitset = *waitset; 2455 __sigandset(&p->p_sigctx.ps_siglist, &twaitset); 2456 if ((signum = firstsig(&twaitset))) { 2457 /* found pending signal */ 2458 sigdelset(&p->p_sigctx.ps_siglist, signum); 2459 ksi = ksiginfo_dequeue(p, signum); 2460 if (!ksi) { 2461 /* No queued siginfo, manufacture one */ 2462 ksi = ksiginfo_alloc(PR_WAITOK); 2463 KSI_INIT(ksi); 2464 ksi->ksi_info._signo = signum; 2465 ksi->ksi_info._code = SI_USER; 2466 } 2467 2468 goto sig; 2469 } 2470 2471 /* 2472 * Calculate timeout, if it was specified. 2473 */ 2474 if (SCARG(uap, timeout)) { 2475 uint64_t ms; 2476 2477 if ((error = (*fetch_timeout)(SCARG(uap, timeout), &ts, sizeof(ts)))) 2478 return (error); 2479 2480 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000); 2481 timo = mstohz(ms); 2482 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0) 2483 timo = 1; 2484 if (timo <= 0) 2485 return (EAGAIN); 2486 2487 /* 2488 * Remember current uptime, it would be used in 2489 * ECANCELED/ERESTART case. 2490 */ 2491 getnanouptime(&tsstart); 2492 } 2493 2494 /* 2495 * Setup ps_sigwait list. Pass pointer to malloced memory 2496 * here; it's not possible to pass pointer to a structure 2497 * on current process's stack, the current process might 2498 * be swapped out at the time the signal would get delivered. 2499 */ 2500 ksi = ksiginfo_alloc(PR_WAITOK); 2501 p->p_sigctx.ps_sigwaited = ksi; 2502 p->p_sigctx.ps_sigwait = waitset; 2503 2504 /* 2505 * Wait for signal to arrive. We can either be woken up or 2506 * time out. 2507 */ 2508 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo); 2509 2510 /* 2511 * Need to find out if we woke as a result of lwp_wakeup() 2512 * or a signal outside our wait set. 2513 */ 2514 if (error == EINTR && p->p_sigctx.ps_sigwaited 2515 && !firstsig(&p->p_sigctx.ps_siglist)) { 2516 /* wakeup via _lwp_wakeup() */ 2517 error = ECANCELED; 2518 } else if (!error && p->p_sigctx.ps_sigwaited) { 2519 /* spurious wakeup - arrange for syscall restart */ 2520 error = ERESTART; 2521 goto fail; 2522 } 2523 2524 /* 2525 * On error, clear sigwait indication. psignal() clears it 2526 * in !error case. 2527 */ 2528 if (error) { 2529 p->p_sigctx.ps_sigwaited = NULL; 2530 2531 /* 2532 * If the sleep was interrupted (either by signal or wakeup), 2533 * update the timeout and copyout new value back. 2534 * It would be used when the syscall would be restarted 2535 * or called again. 2536 */ 2537 if (timo && (error == ERESTART || error == ECANCELED)) { 2538 struct timespec tsnow; 2539 int err; 2540 2541 /* XXX double check the following change */ 2542 getnanouptime(&tsnow); 2543 2544 /* compute how much time has passed since start */ 2545 timespecsub(&tsnow, &tsstart, &tsnow); 2546 /* substract passed time from timeout */ 2547 timespecsub(&ts, &tsnow, &ts); 2548 2549 if (ts.tv_sec < 0) { 2550 error = EAGAIN; 2551 goto fail; 2552 } 2553 /* XXX double check the previous change */ 2554 2555 /* copy updated timeout to userland */ 2556 if ((err = (*put_timeout)(&ts, SCARG(uap, timeout), 2557 sizeof(ts)))) { 2558 error = err; 2559 goto fail; 2560 } 2561 } 2562 2563 goto fail; 2564 } 2565 2566 /* 2567 * If a signal from the wait set arrived, copy it to userland. 2568 * Copy only the used part of siginfo, the padding part is 2569 * left unchanged (userland is not supposed to touch it anyway). 2570 */ 2571 sig: 2572 return (*put_info)(&ksi->ksi_info, SCARG(uap, info), sizeof(ksi->ksi_info)); 2573 2574 fail: 2575 FREE(waitset, M_TEMP); 2576 ksiginfo_free(ksi); 2577 p->p_sigctx.ps_sigwait = NULL; 2578 2579 return (error); 2580 } 2581 2582 /* 2583 * Returns true if signal is ignored or masked for passed process. 2584 */ 2585 int 2586 sigismasked(struct proc *p, int sig) 2587 { 2588 2589 return (sigismember(&p->p_sigctx.ps_sigignore, sig) || 2590 sigismember(&p->p_sigctx.ps_sigmask, sig)); 2591 } 2592 2593 static int 2594 filt_sigattach(struct knote *kn) 2595 { 2596 struct proc *p = curproc; 2597 2598 kn->kn_ptr.p_proc = p; 2599 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2600 2601 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2602 2603 return (0); 2604 } 2605 2606 static void 2607 filt_sigdetach(struct knote *kn) 2608 { 2609 struct proc *p = kn->kn_ptr.p_proc; 2610 2611 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2612 } 2613 2614 /* 2615 * signal knotes are shared with proc knotes, so we apply a mask to 2616 * the hint in order to differentiate them from process hints. This 2617 * could be avoided by using a signal-specific knote list, but probably 2618 * isn't worth the trouble. 2619 */ 2620 static int 2621 filt_signal(struct knote *kn, long hint) 2622 { 2623 2624 if (hint & NOTE_SIGNAL) { 2625 hint &= ~NOTE_SIGNAL; 2626 2627 if (kn->kn_id == hint) 2628 kn->kn_data++; 2629 } 2630 return (kn->kn_data != 0); 2631 } 2632 2633 const struct filterops sig_filtops = { 2634 0, filt_sigattach, filt_sigdetach, filt_signal 2635 }; 2636