1 /* $OpenBSD: kern_sig.c,v 1.183 2015/07/27 18:22:37 deraadt Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/acct.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/kernel.h> 55 #include <sys/wait.h> 56 #include <sys/ktrace.h> 57 #include <sys/stat.h> 58 #include <sys/core.h> 59 #include <sys/malloc.h> 60 #include <sys/pool.h> 61 #include <sys/ptrace.h> 62 #include <sys/sched.h> 63 #include <sys/user.h> 64 #include <sys/syslog.h> 65 66 #include <sys/mount.h> 67 #include <sys/syscallargs.h> 68 69 #include <uvm/uvm_extern.h> 70 71 int filt_sigattach(struct knote *kn); 72 void filt_sigdetach(struct knote *kn); 73 int filt_signal(struct knote *kn, long hint); 74 75 struct filterops sig_filtops = 76 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 77 78 void proc_stop(struct proc *p, int); 79 void proc_stop_sweep(void *); 80 struct timeout proc_stop_to; 81 82 int cansignal(struct proc *, struct process *, int); 83 84 struct pool sigacts_pool; /* memory pool for sigacts structures */ 85 86 /* 87 * Can thread p, send the signal signum to process qr? 88 */ 89 int 90 cansignal(struct proc *p, struct process *qr, int signum) 91 { 92 struct process *pr = p->p_p; 93 struct ucred *uc = p->p_ucred; 94 struct ucred *quc = qr->ps_ucred; 95 96 if (uc->cr_uid == 0) 97 return (1); /* root can always signal */ 98 99 if (pr == qr) 100 return (1); /* process can always signal itself */ 101 102 /* optimization: if the same creds then the tests below will pass */ 103 if (uc == quc) 104 return (1); 105 106 if (signum == SIGCONT && qr->ps_session == pr->ps_session) 107 return (1); /* SIGCONT in session */ 108 109 /* 110 * Using kill(), only certain signals can be sent to setugid 111 * child processes 112 */ 113 if (qr->ps_flags & PS_SUGID) { 114 switch (signum) { 115 case 0: 116 case SIGKILL: 117 case SIGINT: 118 case SIGTERM: 119 case SIGALRM: 120 case SIGSTOP: 121 case SIGTTIN: 122 case SIGTTOU: 123 case SIGTSTP: 124 case SIGHUP: 125 case SIGUSR1: 126 case SIGUSR2: 127 if (uc->cr_ruid == quc->cr_ruid || 128 uc->cr_uid == quc->cr_ruid) 129 return (1); 130 } 131 return (0); 132 } 133 134 if (uc->cr_ruid == quc->cr_ruid || 135 uc->cr_ruid == quc->cr_svuid || 136 uc->cr_uid == quc->cr_ruid || 137 uc->cr_uid == quc->cr_svuid) 138 return (1); 139 return (0); 140 } 141 142 /* 143 * Initialize signal-related data structures. 144 */ 145 void 146 signal_init(void) 147 { 148 timeout_set(&proc_stop_to, proc_stop_sweep, NULL); 149 150 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, PR_WAITOK, 151 "sigapl", NULL); 152 } 153 154 /* 155 * Create an initial sigacts structure, using the same signal state 156 * as p. 157 */ 158 struct sigacts * 159 sigactsinit(struct process *pr) 160 { 161 struct sigacts *ps; 162 163 ps = pool_get(&sigacts_pool, PR_WAITOK); 164 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); 165 ps->ps_refcnt = 1; 166 return (ps); 167 } 168 169 /* 170 * Share a sigacts structure. 171 */ 172 struct sigacts * 173 sigactsshare(struct process *pr) 174 { 175 struct sigacts *ps = pr->ps_sigacts; 176 177 ps->ps_refcnt++; 178 return ps; 179 } 180 181 /* 182 * Initialize a new sigaltstack structure. 183 */ 184 void 185 sigstkinit(struct sigaltstack *ss) 186 { 187 ss->ss_flags = SS_DISABLE; 188 ss->ss_size = 0; 189 ss->ss_sp = 0; 190 } 191 192 /* 193 * Make this process not share its sigacts, maintaining all 194 * signal state. 195 */ 196 void 197 sigactsunshare(struct process *pr) 198 { 199 struct sigacts *newps; 200 201 if (pr->ps_sigacts->ps_refcnt == 1) 202 return; 203 204 newps = sigactsinit(pr); 205 sigactsfree(pr); 206 pr->ps_sigacts = newps; 207 } 208 209 /* 210 * Release a sigacts structure. 211 */ 212 void 213 sigactsfree(struct process *pr) 214 { 215 struct sigacts *ps = pr->ps_sigacts; 216 217 if (--ps->ps_refcnt > 0) 218 return; 219 220 pr->ps_sigacts = NULL; 221 222 pool_put(&sigacts_pool, ps); 223 } 224 225 /* ARGSUSED */ 226 int 227 sys_sigaction(struct proc *p, void *v, register_t *retval) 228 { 229 struct sys_sigaction_args /* { 230 syscallarg(int) signum; 231 syscallarg(const struct sigaction *) nsa; 232 syscallarg(struct sigaction *) osa; 233 } */ *uap = v; 234 struct sigaction vec; 235 #ifdef KTRACE 236 struct sigaction ovec; 237 #endif 238 struct sigaction *sa; 239 const struct sigaction *nsa; 240 struct sigaction *osa; 241 struct sigacts *ps = p->p_p->ps_sigacts; 242 int signum; 243 int bit, error; 244 245 signum = SCARG(uap, signum); 246 nsa = SCARG(uap, nsa); 247 osa = SCARG(uap, osa); 248 249 if (signum <= 0 || signum >= NSIG || 250 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 251 return (EINVAL); 252 sa = &vec; 253 if (osa) { 254 sa->sa_handler = ps->ps_sigact[signum]; 255 sa->sa_mask = ps->ps_catchmask[signum]; 256 bit = sigmask(signum); 257 sa->sa_flags = 0; 258 if ((ps->ps_sigonstack & bit) != 0) 259 sa->sa_flags |= SA_ONSTACK; 260 if ((ps->ps_sigintr & bit) == 0) 261 sa->sa_flags |= SA_RESTART; 262 if ((ps->ps_sigreset & bit) != 0) 263 sa->sa_flags |= SA_RESETHAND; 264 if ((ps->ps_siginfo & bit) != 0) 265 sa->sa_flags |= SA_SIGINFO; 266 if (signum == SIGCHLD) { 267 if ((ps->ps_flags & SAS_NOCLDSTOP) != 0) 268 sa->sa_flags |= SA_NOCLDSTOP; 269 if ((ps->ps_flags & SAS_NOCLDWAIT) != 0) 270 sa->sa_flags |= SA_NOCLDWAIT; 271 } 272 if ((sa->sa_mask & bit) == 0) 273 sa->sa_flags |= SA_NODEFER; 274 sa->sa_mask &= ~bit; 275 error = copyout(sa, osa, sizeof (vec)); 276 if (error) 277 return (error); 278 #ifdef KTRACE 279 if (KTRPOINT(p, KTR_STRUCT)) 280 ovec = vec; 281 #endif 282 } 283 if (nsa) { 284 error = copyin(nsa, sa, sizeof (vec)); 285 if (error) 286 return (error); 287 #ifdef KTRACE 288 if (KTRPOINT(p, KTR_STRUCT)) 289 ktrsigaction(p, sa); 290 #endif 291 setsigvec(p, signum, sa); 292 } 293 #ifdef KTRACE 294 if (osa && KTRPOINT(p, KTR_STRUCT)) 295 ktrsigaction(p, &ovec); 296 #endif 297 return (0); 298 } 299 300 void 301 setsigvec(struct proc *p, int signum, struct sigaction *sa) 302 { 303 struct sigacts *ps = p->p_p->ps_sigacts; 304 int bit; 305 int s; 306 307 bit = sigmask(signum); 308 /* 309 * Change setting atomically. 310 */ 311 s = splhigh(); 312 ps->ps_sigact[signum] = sa->sa_handler; 313 if ((sa->sa_flags & SA_NODEFER) == 0) 314 sa->sa_mask |= sigmask(signum); 315 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 316 if (signum == SIGCHLD) { 317 if (sa->sa_flags & SA_NOCLDSTOP) 318 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 319 else 320 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 321 /* 322 * If the SA_NOCLDWAIT flag is set or the handler 323 * is SIG_IGN we reparent the dying child to PID 1 324 * (init) which will reap the zombie. Because we use 325 * init to do our dirty work we never set SAS_NOCLDWAIT 326 * for PID 1. 327 * XXX exit1 rework means this is unnecessary? 328 */ 329 if (initprocess->ps_sigacts != ps && 330 ((sa->sa_flags & SA_NOCLDWAIT) || 331 sa->sa_handler == SIG_IGN)) 332 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 333 else 334 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 335 } 336 if ((sa->sa_flags & SA_RESETHAND) != 0) 337 ps->ps_sigreset |= bit; 338 else 339 ps->ps_sigreset &= ~bit; 340 if ((sa->sa_flags & SA_SIGINFO) != 0) 341 ps->ps_siginfo |= bit; 342 else 343 ps->ps_siginfo &= ~bit; 344 if ((sa->sa_flags & SA_RESTART) == 0) 345 ps->ps_sigintr |= bit; 346 else 347 ps->ps_sigintr &= ~bit; 348 if ((sa->sa_flags & SA_ONSTACK) != 0) 349 ps->ps_sigonstack |= bit; 350 else 351 ps->ps_sigonstack &= ~bit; 352 /* 353 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 354 * and for signals set to SIG_DFL where the default is to ignore. 355 * However, don't put SIGCONT in ps_sigignore, 356 * as we have to restart the process. 357 */ 358 if (sa->sa_handler == SIG_IGN || 359 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 360 atomic_clearbits_int(&p->p_siglist, bit); 361 if (signum != SIGCONT) 362 ps->ps_sigignore |= bit; /* easier in psignal */ 363 ps->ps_sigcatch &= ~bit; 364 } else { 365 ps->ps_sigignore &= ~bit; 366 if (sa->sa_handler == SIG_DFL) 367 ps->ps_sigcatch &= ~bit; 368 else 369 ps->ps_sigcatch |= bit; 370 } 371 splx(s); 372 } 373 374 /* 375 * Initialize signal state for process 0; 376 * set to ignore signals that are ignored by default. 377 */ 378 void 379 siginit(struct process *pr) 380 { 381 struct sigacts *ps = pr->ps_sigacts; 382 int i; 383 384 for (i = 0; i < NSIG; i++) 385 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 386 ps->ps_sigignore |= sigmask(i); 387 ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 388 } 389 390 /* 391 * Reset signals for an exec by the specified thread. 392 */ 393 void 394 execsigs(struct proc *p) 395 { 396 struct sigacts *ps; 397 int nc, mask; 398 399 sigactsunshare(p->p_p); 400 ps = p->p_p->ps_sigacts; 401 402 /* 403 * Reset caught signals. Held signals remain held 404 * through p_sigmask (unless they were caught, 405 * and are now ignored by default). 406 */ 407 while (ps->ps_sigcatch) { 408 nc = ffs((long)ps->ps_sigcatch); 409 mask = sigmask(nc); 410 ps->ps_sigcatch &= ~mask; 411 if (sigprop[nc] & SA_IGNORE) { 412 if (nc != SIGCONT) 413 ps->ps_sigignore |= mask; 414 atomic_clearbits_int(&p->p_siglist, mask); 415 } 416 ps->ps_sigact[nc] = SIG_DFL; 417 } 418 /* 419 * Reset stack state to the user stack. 420 * Clear set of signals caught on the signal stack. 421 */ 422 sigstkinit(&p->p_sigstk); 423 ps->ps_flags &= ~SAS_NOCLDWAIT; 424 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 425 ps->ps_sigact[SIGCHLD] = SIG_DFL; 426 } 427 428 /* 429 * Manipulate signal mask. 430 * Note that we receive new mask, not pointer, 431 * and return old mask as return value; 432 * the library stub does the rest. 433 */ 434 int 435 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 436 { 437 struct sys_sigprocmask_args /* { 438 syscallarg(int) how; 439 syscallarg(sigset_t) mask; 440 } */ *uap = v; 441 int error = 0; 442 sigset_t mask; 443 444 *retval = p->p_sigmask; 445 mask = SCARG(uap, mask) &~ sigcantmask; 446 447 switch (SCARG(uap, how)) { 448 case SIG_BLOCK: 449 atomic_setbits_int(&p->p_sigmask, mask); 450 break; 451 case SIG_UNBLOCK: 452 atomic_clearbits_int(&p->p_sigmask, mask); 453 break; 454 case SIG_SETMASK: 455 p->p_sigmask = mask; 456 break; 457 default: 458 error = EINVAL; 459 break; 460 } 461 return (error); 462 } 463 464 /* ARGSUSED */ 465 int 466 sys_sigpending(struct proc *p, void *v, register_t *retval) 467 { 468 469 *retval = p->p_siglist; 470 return (0); 471 } 472 473 /* 474 * Temporarily replace calling proc's signal mask for the duration of a 475 * system call. Original signal mask will be restored by userret(). 476 */ 477 void 478 dosigsuspend(struct proc *p, sigset_t newmask) 479 { 480 KASSERT(p == curproc); 481 482 p->p_oldmask = p->p_sigmask; 483 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 484 p->p_sigmask = newmask; 485 } 486 487 /* 488 * Suspend process until signal, providing mask to be set 489 * in the meantime. Note nonstandard calling convention: 490 * libc stub passes mask, not pointer, to save a copyin. 491 */ 492 /* ARGSUSED */ 493 int 494 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 495 { 496 struct sys_sigsuspend_args /* { 497 syscallarg(int) mask; 498 } */ *uap = v; 499 struct process *pr = p->p_p; 500 struct sigacts *ps = pr->ps_sigacts; 501 502 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask); 503 while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0) 504 /* void */; 505 /* always return EINTR rather than ERESTART... */ 506 return (EINTR); 507 } 508 509 int 510 sigonstack(size_t stack) 511 { 512 const struct sigaltstack *ss = &curproc->p_sigstk; 513 514 return (ss->ss_flags & SS_DISABLE ? 0 : 515 (stack - (size_t)ss->ss_sp < ss->ss_size)); 516 } 517 518 int 519 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 520 { 521 struct sys_sigaltstack_args /* { 522 syscallarg(const struct sigaltstack *) nss; 523 syscallarg(struct sigaltstack *) oss; 524 } */ *uap = v; 525 struct sigaltstack ss; 526 const struct sigaltstack *nss; 527 struct sigaltstack *oss; 528 int onstack = sigonstack(PROC_STACK(p)); 529 int error; 530 531 nss = SCARG(uap, nss); 532 oss = SCARG(uap, oss); 533 534 if (oss != NULL) { 535 ss = p->p_sigstk; 536 if (onstack) 537 ss.ss_flags |= SS_ONSTACK; 538 if ((error = copyout(&ss, oss, sizeof(ss)))) 539 return (error); 540 } 541 if (nss == NULL) 542 return (0); 543 error = copyin(nss, &ss, sizeof(ss)); 544 if (error) 545 return (error); 546 if (onstack) 547 return (EPERM); 548 if (ss.ss_flags & ~SS_DISABLE) 549 return (EINVAL); 550 if (ss.ss_flags & SS_DISABLE) { 551 p->p_sigstk.ss_flags = ss.ss_flags; 552 return (0); 553 } 554 if (ss.ss_size < MINSIGSTKSZ) 555 return (ENOMEM); 556 p->p_sigstk = ss; 557 return (0); 558 } 559 560 /* ARGSUSED */ 561 int 562 sys_kill(struct proc *cp, void *v, register_t *retval) 563 { 564 struct sys_kill_args /* { 565 syscallarg(int) pid; 566 syscallarg(int) signum; 567 } */ *uap = v; 568 struct proc *p; 569 int pid = SCARG(uap, pid); 570 int signum = SCARG(uap, signum); 571 572 if (((u_int)signum) >= NSIG) 573 return (EINVAL); 574 if (pid > 0) { 575 enum signal_type type = SPROCESS; 576 577 /* 578 * If the target pid is > THREAD_PID_OFFSET then this 579 * must be a kill of another thread in the same process. 580 * Otherwise, this is a process kill and the target must 581 * be a main thread. 582 */ 583 if (pid > THREAD_PID_OFFSET) { 584 if ((p = pfind(pid - THREAD_PID_OFFSET)) == NULL) 585 return (ESRCH); 586 if (p->p_p != cp->p_p) 587 return (ESRCH); 588 type = STHREAD; 589 } else { 590 /* XXX use prfind() */ 591 if ((p = pfind(pid)) == NULL) 592 return (ESRCH); 593 if (p->p_flag & P_THREAD) 594 return (ESRCH); 595 if (!cansignal(cp, p->p_p, signum)) 596 return (EPERM); 597 } 598 599 /* kill single process or thread */ 600 if (signum) 601 ptsignal(p, signum, type); 602 return (0); 603 } 604 switch (pid) { 605 case -1: /* broadcast signal */ 606 return (killpg1(cp, signum, 0, 1)); 607 case 0: /* signal own process group */ 608 return (killpg1(cp, signum, 0, 0)); 609 default: /* negative explicit process group */ 610 return (killpg1(cp, signum, -pid, 0)); 611 } 612 /* NOTREACHED */ 613 } 614 615 /* 616 * Common code for kill process group/broadcast kill. 617 * cp is calling process. 618 */ 619 int 620 killpg1(struct proc *cp, int signum, int pgid, int all) 621 { 622 struct process *pr; 623 struct pgrp *pgrp; 624 int nfound = 0; 625 626 if (all) 627 /* 628 * broadcast 629 */ 630 LIST_FOREACH(pr, &allprocess, ps_list) { 631 if (pr->ps_pid <= 1 || 632 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) || 633 pr == cp->p_p || !cansignal(cp, pr, signum)) 634 continue; 635 nfound++; 636 if (signum) 637 prsignal(pr, signum); 638 } 639 else { 640 if (pgid == 0) 641 /* 642 * zero pgid means send to my process group. 643 */ 644 pgrp = cp->p_p->ps_pgrp; 645 else { 646 pgrp = pgfind(pgid); 647 if (pgrp == NULL) 648 return (ESRCH); 649 } 650 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 651 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM || 652 !cansignal(cp, pr, signum)) 653 continue; 654 nfound++; 655 if (signum) 656 prsignal(pr, signum); 657 } 658 } 659 return (nfound ? 0 : ESRCH); 660 } 661 662 #define CANDELIVER(uid, euid, pr) \ 663 (euid == 0 || \ 664 (uid) == (pr)->ps_ucred->cr_ruid || \ 665 (uid) == (pr)->ps_ucred->cr_svuid || \ 666 (uid) == (pr)->ps_ucred->cr_uid || \ 667 (euid) == (pr)->ps_ucred->cr_ruid || \ 668 (euid) == (pr)->ps_ucred->cr_svuid || \ 669 (euid) == (pr)->ps_ucred->cr_uid) 670 671 /* 672 * Deliver signum to pgid, but first check uid/euid against each 673 * process and see if it is permitted. 674 */ 675 void 676 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid) 677 { 678 struct pgrp *pgrp; 679 struct process *pr; 680 681 if (pgid == 0) 682 return; 683 if (pgid < 0) { 684 pgid = -pgid; 685 if ((pgrp = pgfind(pgid)) == NULL) 686 return; 687 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 688 if (CANDELIVER(uid, euid, pr)) 689 prsignal(pr, signum); 690 } else { 691 if ((pr = prfind(pgid)) == NULL) 692 return; 693 if (CANDELIVER(uid, euid, pr)) 694 prsignal(pr, signum); 695 } 696 } 697 698 /* 699 * Send a signal to a process group. 700 */ 701 void 702 gsignal(int pgid, int signum) 703 { 704 struct pgrp *pgrp; 705 706 if (pgid && (pgrp = pgfind(pgid))) 707 pgsignal(pgrp, signum, 0); 708 } 709 710 /* 711 * Send a signal to a process group. If checktty is 1, 712 * limit to members which have a controlling terminal. 713 */ 714 void 715 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 716 { 717 struct process *pr; 718 719 if (pgrp) 720 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 721 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 722 prsignal(pr, signum); 723 } 724 725 /* 726 * Send a signal caused by a trap to the current process. 727 * If it will be caught immediately, deliver it with correct code. 728 * Otherwise, post it normally. 729 */ 730 void 731 trapsignal(struct proc *p, int signum, u_long trapno, int code, 732 union sigval sigval) 733 { 734 struct process *pr = p->p_p; 735 struct sigacts *ps = pr->ps_sigacts; 736 int mask; 737 738 mask = sigmask(signum); 739 if ((pr->ps_flags & PS_TRACED) == 0 && 740 (ps->ps_sigcatch & mask) != 0 && 741 (p->p_sigmask & mask) == 0) { 742 #ifdef KTRACE 743 if (KTRPOINT(p, KTR_PSIG)) { 744 siginfo_t si; 745 746 initsiginfo(&si, signum, trapno, code, sigval); 747 ktrpsig(p, signum, ps->ps_sigact[signum], 748 p->p_sigmask, code, &si); 749 } 750 #endif 751 p->p_ru.ru_nsignals++; 752 (*pr->ps_emul->e_sendsig)(ps->ps_sigact[signum], signum, 753 p->p_sigmask, trapno, code, sigval); 754 atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); 755 if ((ps->ps_sigreset & mask) != 0) { 756 ps->ps_sigcatch &= ~mask; 757 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 758 ps->ps_sigignore |= mask; 759 ps->ps_sigact[signum] = SIG_DFL; 760 } 761 } else { 762 p->p_sisig = signum; 763 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 764 p->p_sicode = code; 765 p->p_sigval = sigval; 766 767 /* 768 * Signals like SIGBUS and SIGSEGV should not, when 769 * generated by the kernel, be ignorable or blockable. 770 * If it is and we're not being traced, then just kill 771 * the process. 772 */ 773 if ((pr->ps_flags & PS_TRACED) == 0 && 774 (sigprop[signum] & SA_KILL) && 775 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask))) 776 sigexit(p, signum); 777 ptsignal(p, signum, STHREAD); 778 } 779 } 780 781 /* 782 * Send the signal to the process. If the signal has an action, the action 783 * is usually performed by the target process rather than the caller; we add 784 * the signal to the set of pending signals for the process. 785 * 786 * Exceptions: 787 * o When a stop signal is sent to a sleeping process that takes the 788 * default action, the process is stopped without awakening it. 789 * o SIGCONT restarts stopped processes (or puts them back to sleep) 790 * regardless of the signal action (eg, blocked or ignored). 791 * 792 * Other ignored signals are discarded immediately. 793 */ 794 void 795 psignal(struct proc *p, int signum) 796 { 797 ptsignal(p, signum, SPROCESS); 798 } 799 800 /* 801 * type = SPROCESS process signal, can be diverted (sigwait()) 802 * XXX if blocked in all threads, mark as pending in struct process 803 * type = STHREAD thread signal, but should be propagated if unhandled 804 * type = SPROPAGATED propagated to this thread, so don't propagate again 805 */ 806 void 807 ptsignal(struct proc *p, int signum, enum signal_type type) 808 { 809 int s, prop; 810 sig_t action; 811 int mask; 812 struct process *pr = p->p_p; 813 struct proc *q; 814 int wakeparent = 0; 815 816 #ifdef DIAGNOSTIC 817 if ((u_int)signum >= NSIG || signum == 0) 818 panic("psignal signal number"); 819 #endif 820 821 /* Ignore signal if we are exiting */ 822 if (pr->ps_flags & PS_EXITING) 823 return; 824 825 mask = sigmask(signum); 826 827 if (type == SPROCESS) { 828 /* Accept SIGKILL to coredumping processes */ 829 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) { 830 if (pr->ps_single != NULL) 831 p = pr->ps_single; 832 atomic_setbits_int(&p->p_siglist, mask); 833 return; 834 } 835 836 /* 837 * If the current thread can process the signal 838 * immediately (it's unblocked) then have it take it. 839 */ 840 q = curproc; 841 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 && 842 (q->p_sigmask & mask) == 0) 843 p = q; 844 else { 845 /* 846 * A process-wide signal can be diverted to a 847 * different thread that's in sigwait() for this 848 * signal. If there isn't such a thread, then 849 * pick a thread that doesn't have it blocked so 850 * that the stop/kill consideration isn't 851 * delayed. Otherwise, mark it pending on the 852 * main thread. 853 */ 854 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 855 /* ignore exiting threads */ 856 if (q->p_flag & P_WEXIT) 857 continue; 858 859 /* skip threads that have the signal blocked */ 860 if ((q->p_sigmask & mask) != 0) 861 continue; 862 863 /* okay, could send to this thread */ 864 p = q; 865 866 /* 867 * sigsuspend, sigwait, ppoll/pselect, etc? 868 * Definitely go to this thread, as it's 869 * already blocked in the kernel. 870 */ 871 if (q->p_flag & P_SIGSUSPEND) 872 break; 873 } 874 } 875 } 876 877 if (type != SPROPAGATED) 878 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 879 880 prop = sigprop[signum]; 881 882 /* 883 * If proc is traced, always give parent a chance. 884 */ 885 if (pr->ps_flags & PS_TRACED) { 886 action = SIG_DFL; 887 atomic_setbits_int(&p->p_siglist, mask); 888 } else { 889 /* 890 * If the signal is being ignored, 891 * then we forget about it immediately. 892 * (Note: we don't set SIGCONT in ps_sigignore, 893 * and if it is set to SIG_IGN, 894 * action will be SIG_DFL here.) 895 */ 896 if (pr->ps_sigacts->ps_sigignore & mask) 897 return; 898 if (p->p_sigmask & mask) { 899 action = SIG_HOLD; 900 } else if (pr->ps_sigacts->ps_sigcatch & mask) { 901 action = SIG_CATCH; 902 } else { 903 action = SIG_DFL; 904 905 if (prop & SA_KILL && pr->ps_nice > NZERO) 906 pr->ps_nice = NZERO; 907 908 /* 909 * If sending a tty stop signal to a member of an 910 * orphaned process group, discard the signal here if 911 * the action is default; don't stop the process below 912 * if sleeping, and don't clear any pending SIGCONT. 913 */ 914 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 915 return; 916 } 917 918 atomic_setbits_int(&p->p_siglist, mask); 919 } 920 921 if (prop & SA_CONT) 922 atomic_clearbits_int(&p->p_siglist, stopsigmask); 923 924 if (prop & SA_STOP) { 925 atomic_clearbits_int(&p->p_siglist, contsigmask); 926 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 927 } 928 929 /* 930 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 931 */ 932 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) 933 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 934 if (q != p) 935 ptsignal(q, signum, SPROPAGATED); 936 937 /* 938 * Defer further processing for signals which are held, 939 * except that stopped processes must be continued by SIGCONT. 940 */ 941 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 942 return; 943 944 SCHED_LOCK(s); 945 946 switch (p->p_stat) { 947 948 case SSLEEP: 949 /* 950 * If process is sleeping uninterruptibly 951 * we can't interrupt the sleep... the signal will 952 * be noticed when the process returns through 953 * trap() or syscall(). 954 */ 955 if ((p->p_flag & P_SINTR) == 0) 956 goto out; 957 /* 958 * Process is sleeping and traced... make it runnable 959 * so it can discover the signal in issignal() and stop 960 * for the parent. 961 */ 962 if (pr->ps_flags & PS_TRACED) 963 goto run; 964 /* 965 * If SIGCONT is default (or ignored) and process is 966 * asleep, we are finished; the process should not 967 * be awakened. 968 */ 969 if ((prop & SA_CONT) && action == SIG_DFL) { 970 atomic_clearbits_int(&p->p_siglist, mask); 971 goto out; 972 } 973 /* 974 * When a sleeping process receives a stop 975 * signal, process immediately if possible. 976 */ 977 if ((prop & SA_STOP) && action == SIG_DFL) { 978 /* 979 * If a child holding parent blocked, 980 * stopping could cause deadlock. 981 */ 982 if (pr->ps_flags & PS_PPWAIT) 983 goto out; 984 atomic_clearbits_int(&p->p_siglist, mask); 985 p->p_xstat = signum; 986 proc_stop(p, 0); 987 goto out; 988 } 989 /* 990 * All other (caught or default) signals 991 * cause the process to run. 992 */ 993 goto runfast; 994 /*NOTREACHED*/ 995 996 case SSTOP: 997 /* 998 * If traced process is already stopped, 999 * then no further action is necessary. 1000 */ 1001 if (pr->ps_flags & PS_TRACED) 1002 goto out; 1003 1004 /* 1005 * Kill signal always sets processes running. 1006 */ 1007 if (signum == SIGKILL) { 1008 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1009 goto runfast; 1010 } 1011 1012 if (prop & SA_CONT) { 1013 /* 1014 * If SIGCONT is default (or ignored), we continue the 1015 * process but don't leave the signal in p_siglist, as 1016 * it has no further action. If SIGCONT is held, we 1017 * continue the process and leave the signal in 1018 * p_siglist. If the process catches SIGCONT, let it 1019 * handle the signal itself. If it isn't waiting on 1020 * an event, then it goes back to run state. 1021 * Otherwise, process goes back to sleep state. 1022 */ 1023 atomic_setbits_int(&p->p_flag, P_CONTINUED); 1024 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1025 wakeparent = 1; 1026 if (action == SIG_DFL) 1027 atomic_clearbits_int(&p->p_siglist, mask); 1028 if (action == SIG_CATCH) 1029 goto runfast; 1030 if (p->p_wchan == 0) 1031 goto run; 1032 p->p_stat = SSLEEP; 1033 goto out; 1034 } 1035 1036 if (prop & SA_STOP) { 1037 /* 1038 * Already stopped, don't need to stop again. 1039 * (If we did the shell could get confused.) 1040 */ 1041 atomic_clearbits_int(&p->p_siglist, mask); 1042 goto out; 1043 } 1044 1045 /* 1046 * If process is sleeping interruptibly, then simulate a 1047 * wakeup so that when it is continued, it will be made 1048 * runnable and can look at the signal. But don't make 1049 * the process runnable, leave it stopped. 1050 */ 1051 if (p->p_wchan && p->p_flag & P_SINTR) 1052 unsleep(p); 1053 goto out; 1054 1055 case SONPROC: 1056 signotify(p); 1057 /* FALLTHROUGH */ 1058 default: 1059 /* 1060 * SRUN, SIDL, SDEAD do nothing with the signal, 1061 * other than kicking ourselves if we are running. 1062 * It will either never be noticed, or noticed very soon. 1063 */ 1064 goto out; 1065 } 1066 /*NOTREACHED*/ 1067 1068 runfast: 1069 /* 1070 * Raise priority to at least PUSER. 1071 */ 1072 if (p->p_priority > PUSER) 1073 p->p_priority = PUSER; 1074 run: 1075 setrunnable(p); 1076 out: 1077 SCHED_UNLOCK(s); 1078 if (wakeparent) 1079 wakeup(pr->ps_pptr); 1080 } 1081 1082 /* 1083 * If the current process has received a signal (should be caught or cause 1084 * termination, should interrupt current syscall), return the signal number. 1085 * Stop signals with default action are processed immediately, then cleared; 1086 * they aren't returned. This is checked after each entry to the system for 1087 * a syscall or trap (though this can usually be done without calling issignal 1088 * by checking the pending signal masks in the CURSIG macro.) The normal call 1089 * sequence is 1090 * 1091 * while (signum = CURSIG(curproc)) 1092 * postsig(signum); 1093 * 1094 * Assumes that if the P_SINTR flag is set, we're holding both the 1095 * kernel and scheduler locks. 1096 */ 1097 int 1098 issignal(struct proc *p) 1099 { 1100 struct process *pr = p->p_p; 1101 int signum, mask, prop; 1102 int dolock = (p->p_flag & P_SINTR) == 0; 1103 int s; 1104 1105 for (;;) { 1106 mask = p->p_siglist & ~p->p_sigmask; 1107 if (pr->ps_flags & PS_PPWAIT) 1108 mask &= ~stopsigmask; 1109 if (mask == 0) /* no signal to send */ 1110 return (0); 1111 signum = ffs((long)mask); 1112 mask = sigmask(signum); 1113 atomic_clearbits_int(&p->p_siglist, mask); 1114 1115 /* 1116 * We should see pending but ignored signals 1117 * only if PS_TRACED was on when they were posted. 1118 */ 1119 if (mask & pr->ps_sigacts->ps_sigignore && 1120 (pr->ps_flags & PS_TRACED) == 0) 1121 continue; 1122 1123 if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) { 1124 /* 1125 * If traced, always stop, and stay 1126 * stopped until released by the debugger. 1127 */ 1128 p->p_xstat = signum; 1129 1130 if (dolock) 1131 KERNEL_LOCK(); 1132 single_thread_set(p, SINGLE_PTRACE, 0); 1133 if (dolock) 1134 KERNEL_UNLOCK(); 1135 1136 if (dolock) 1137 SCHED_LOCK(s); 1138 proc_stop(p, 1); 1139 if (dolock) 1140 SCHED_UNLOCK(s); 1141 1142 if (dolock) 1143 KERNEL_LOCK(); 1144 single_thread_clear(p, 0); 1145 if (dolock) 1146 KERNEL_UNLOCK(); 1147 1148 /* 1149 * If we are no longer being traced, or the parent 1150 * didn't give us a signal, look for more signals. 1151 */ 1152 if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0) 1153 continue; 1154 1155 /* 1156 * If the new signal is being masked, look for other 1157 * signals. 1158 */ 1159 signum = p->p_xstat; 1160 mask = sigmask(signum); 1161 if ((p->p_sigmask & mask) != 0) 1162 continue; 1163 1164 /* take the signal! */ 1165 atomic_clearbits_int(&p->p_siglist, mask); 1166 } 1167 1168 prop = sigprop[signum]; 1169 1170 /* 1171 * Decide whether the signal should be returned. 1172 * Return the signal's number, or fall through 1173 * to clear it from the pending mask. 1174 */ 1175 switch ((long)pr->ps_sigacts->ps_sigact[signum]) { 1176 case (long)SIG_DFL: 1177 /* 1178 * Don't take default actions on system processes. 1179 */ 1180 if (p->p_pid <= 1) { 1181 #ifdef DIAGNOSTIC 1182 /* 1183 * Are you sure you want to ignore SIGSEGV 1184 * in init? XXX 1185 */ 1186 printf("Process (pid %d) got signal %d\n", 1187 p->p_pid, signum); 1188 #endif 1189 break; /* == ignore */ 1190 } 1191 /* 1192 * If there is a pending stop signal to process 1193 * with default action, stop here, 1194 * then clear the signal. However, 1195 * if process is member of an orphaned 1196 * process group, ignore tty stop signals. 1197 */ 1198 if (prop & SA_STOP) { 1199 if (pr->ps_flags & PS_TRACED || 1200 (pr->ps_pgrp->pg_jobc == 0 && 1201 prop & SA_TTYSTOP)) 1202 break; /* == ignore */ 1203 p->p_xstat = signum; 1204 if (dolock) 1205 SCHED_LOCK(s); 1206 proc_stop(p, 1); 1207 if (dolock) 1208 SCHED_UNLOCK(s); 1209 break; 1210 } else if (prop & SA_IGNORE) { 1211 /* 1212 * Except for SIGCONT, shouldn't get here. 1213 * Default action is to ignore; drop it. 1214 */ 1215 break; /* == ignore */ 1216 } else 1217 goto keep; 1218 /*NOTREACHED*/ 1219 case (long)SIG_IGN: 1220 /* 1221 * Masking above should prevent us ever trying 1222 * to take action on an ignored signal other 1223 * than SIGCONT, unless process is traced. 1224 */ 1225 if ((prop & SA_CONT) == 0 && 1226 (pr->ps_flags & PS_TRACED) == 0) 1227 printf("issignal\n"); 1228 break; /* == ignore */ 1229 default: 1230 /* 1231 * This signal has an action, let 1232 * postsig() process it. 1233 */ 1234 goto keep; 1235 } 1236 } 1237 /* NOTREACHED */ 1238 1239 keep: 1240 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1241 return (signum); 1242 } 1243 1244 /* 1245 * Put the argument process into the stopped state and notify the parent 1246 * via wakeup. Signals are handled elsewhere. The process must not be 1247 * on the run queue. 1248 */ 1249 void 1250 proc_stop(struct proc *p, int sw) 1251 { 1252 struct process *pr = p->p_p; 1253 extern void *softclock_si; 1254 1255 #ifdef MULTIPROCESSOR 1256 SCHED_ASSERT_LOCKED(); 1257 #endif 1258 1259 p->p_stat = SSTOP; 1260 atomic_clearbits_int(&pr->ps_flags, PS_WAITED); 1261 atomic_setbits_int(&pr->ps_flags, PS_STOPPED); 1262 atomic_setbits_int(&p->p_flag, P_SUSPSIG); 1263 if (!timeout_pending(&proc_stop_to)) { 1264 timeout_add(&proc_stop_to, 0); 1265 /* 1266 * We need this soft interrupt to be handled fast. 1267 * Extra calls to softclock don't hurt. 1268 */ 1269 softintr_schedule(softclock_si); 1270 } 1271 if (sw) 1272 mi_switch(); 1273 } 1274 1275 /* 1276 * Called from a timeout to send signals to the parents of stopped processes. 1277 * We can't do this in proc_stop because it's called with nasty locks held 1278 * and we would need recursive scheduler lock to deal with that. 1279 */ 1280 void 1281 proc_stop_sweep(void *v) 1282 { 1283 struct process *pr; 1284 1285 LIST_FOREACH(pr, &allprocess, ps_list) { 1286 if ((pr->ps_flags & PS_STOPPED) == 0) 1287 continue; 1288 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); 1289 1290 if ((pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDSTOP) == 0) 1291 prsignal(pr->ps_pptr, SIGCHLD); 1292 wakeup(pr->ps_pptr); 1293 } 1294 } 1295 1296 /* 1297 * Take the action for the specified signal 1298 * from the current set of pending signals. 1299 */ 1300 void 1301 postsig(int signum) 1302 { 1303 struct proc *p = curproc; 1304 struct process *pr = p->p_p; 1305 struct sigacts *ps = pr->ps_sigacts; 1306 sig_t action; 1307 u_long trapno; 1308 int mask, returnmask; 1309 union sigval sigval; 1310 int s, code; 1311 1312 #ifdef DIAGNOSTIC 1313 if (signum == 0) 1314 panic("postsig"); 1315 #endif 1316 1317 KERNEL_LOCK(); 1318 1319 mask = sigmask(signum); 1320 atomic_clearbits_int(&p->p_siglist, mask); 1321 action = ps->ps_sigact[signum]; 1322 sigval.sival_ptr = 0; 1323 1324 if (p->p_sisig != signum) { 1325 trapno = 0; 1326 code = SI_USER; 1327 sigval.sival_ptr = 0; 1328 } else { 1329 trapno = p->p_sitrapno; 1330 code = p->p_sicode; 1331 sigval = p->p_sigval; 1332 } 1333 1334 #ifdef KTRACE 1335 if (KTRPOINT(p, KTR_PSIG)) { 1336 siginfo_t si; 1337 1338 initsiginfo(&si, signum, trapno, code, sigval); 1339 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ? 1340 p->p_oldmask : p->p_sigmask, code, &si); 1341 } 1342 #endif 1343 if (action == SIG_DFL) { 1344 /* 1345 * Default action, where the default is to kill 1346 * the process. (Other cases were ignored above.) 1347 */ 1348 sigexit(p, signum); 1349 /* NOTREACHED */ 1350 } else { 1351 /* 1352 * If we get here, the signal must be caught. 1353 */ 1354 #ifdef DIAGNOSTIC 1355 if (action == SIG_IGN || (p->p_sigmask & mask)) 1356 panic("postsig action"); 1357 #endif 1358 /* 1359 * Set the new mask value and also defer further 1360 * occurrences of this signal. 1361 * 1362 * Special case: user has done a sigpause. Here the 1363 * current mask is not of interest, but rather the 1364 * mask from before the sigpause is what we want 1365 * restored after the signal processing is completed. 1366 */ 1367 #ifdef MULTIPROCESSOR 1368 s = splsched(); 1369 #else 1370 s = splhigh(); 1371 #endif 1372 if (p->p_flag & P_SIGSUSPEND) { 1373 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1374 returnmask = p->p_oldmask; 1375 } else { 1376 returnmask = p->p_sigmask; 1377 } 1378 atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); 1379 if ((ps->ps_sigreset & mask) != 0) { 1380 ps->ps_sigcatch &= ~mask; 1381 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1382 ps->ps_sigignore |= mask; 1383 ps->ps_sigact[signum] = SIG_DFL; 1384 } 1385 splx(s); 1386 p->p_ru.ru_nsignals++; 1387 if (p->p_sisig == signum) { 1388 p->p_sisig = 0; 1389 p->p_sitrapno = 0; 1390 p->p_sicode = SI_USER; 1391 p->p_sigval.sival_ptr = NULL; 1392 } 1393 1394 (*pr->ps_emul->e_sendsig)(action, signum, returnmask, trapno, 1395 code, sigval); 1396 } 1397 1398 KERNEL_UNLOCK(); 1399 } 1400 1401 /* 1402 * Force the current process to exit with the specified signal, dumping core 1403 * if appropriate. We bypass the normal tests for masked and caught signals, 1404 * allowing unrecoverable failures to terminate the process without changing 1405 * signal state. Mark the accounting record with the signal termination. 1406 * If dumping core, save the signal number for the debugger. Calls exit and 1407 * does not return. 1408 */ 1409 void 1410 sigexit(struct proc *p, int signum) 1411 { 1412 /* Mark process as going away */ 1413 atomic_setbits_int(&p->p_flag, P_WEXIT); 1414 1415 p->p_p->ps_acflag |= AXSIG; 1416 if (sigprop[signum] & SA_CORE) { 1417 p->p_sisig = signum; 1418 1419 /* if there are other threads, pause them */ 1420 if (TAILQ_FIRST(&p->p_p->ps_threads) != p || 1421 TAILQ_NEXT(p, p_thr_link) != NULL) 1422 single_thread_set(p, SINGLE_SUSPEND, 0); 1423 1424 if (coredump(p) == 0) 1425 signum |= WCOREFLAG; 1426 } 1427 exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL); 1428 /* NOTREACHED */ 1429 } 1430 1431 int nosuidcoredump = 1; 1432 1433 struct coredump_iostate { 1434 struct proc *io_proc; 1435 struct vnode *io_vp; 1436 struct ucred *io_cred; 1437 off_t io_offset; 1438 }; 1439 1440 /* 1441 * Dump core, into a file named "progname.core", unless the process was 1442 * setuid/setgid. 1443 */ 1444 int 1445 coredump(struct proc *p) 1446 { 1447 #ifdef SMALL_KERNEL 1448 return EPERM; 1449 #else 1450 struct process *pr = p->p_p; 1451 struct vnode *vp; 1452 struct ucred *cred = p->p_ucred; 1453 struct vmspace *vm = p->p_vmspace; 1454 struct nameidata nd; 1455 struct vattr vattr; 1456 struct coredump_iostate io; 1457 int error, len, incrash = 0; 1458 char name[MAXPATHLEN]; 1459 const char *dir = "/var/crash"; 1460 1461 if (pr->ps_emul->e_coredump == NULL) 1462 return (EINVAL); 1463 1464 pr->ps_flags |= PS_COREDUMP; 1465 1466 /* 1467 * If the process has inconsistant uids, nosuidcoredump 1468 * determines coredump placement policy. 1469 */ 1470 if (((pr->ps_flags & PS_SUGID) && (error = suser(p, 0))) || 1471 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) { 1472 if (nosuidcoredump == 3 || nosuidcoredump == 2) 1473 incrash = 1; 1474 else 1475 return (EPERM); 1476 } 1477 1478 /* Don't dump if will exceed file size limit. */ 1479 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= 1480 p->p_rlimit[RLIMIT_CORE].rlim_cur) 1481 return (EFBIG); 1482 1483 if (incrash && nosuidcoredump == 3) { 1484 /* 1485 * If the program directory does not exist, dumps of 1486 * that core will silently fail. 1487 */ 1488 len = snprintf(name, sizeof(name), "%s/%s/%u.core", 1489 dir, p->p_comm, p->p_pid); 1490 } else if (incrash && nosuidcoredump == 2) 1491 len = snprintf(name, sizeof(name), "%s/%s.core", 1492 dir, p->p_comm); 1493 else 1494 len = snprintf(name, sizeof(name), "%s.core", p->p_comm); 1495 if (len >= sizeof(name)) 1496 return (EACCES); 1497 1498 /* 1499 * Control the UID used to write out. The normal case uses 1500 * the real UID. If the sugid case is going to write into the 1501 * controlled directory, we do so as root. 1502 */ 1503 if (incrash == 0) { 1504 cred = crdup(cred); 1505 cred->cr_uid = cred->cr_ruid; 1506 cred->cr_gid = cred->cr_rgid; 1507 } else { 1508 if (p->p_fd->fd_rdir) { 1509 vrele(p->p_fd->fd_rdir); 1510 p->p_fd->fd_rdir = NULL; 1511 } 1512 p->p_ucred = crdup(p->p_ucred); 1513 crfree(cred); 1514 cred = p->p_ucred; 1515 crhold(cred); 1516 cred->cr_uid = 0; 1517 cred->cr_gid = 0; 1518 } 1519 1520 p->p_tamenote = TMN_COREDUMP; 1521 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 1522 1523 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 1524 1525 if (error) 1526 goto out; 1527 1528 /* 1529 * Don't dump to non-regular files, files with links, or files 1530 * owned by someone else. 1531 */ 1532 vp = nd.ni_vp; 1533 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) { 1534 VOP_UNLOCK(vp, 0, p); 1535 vn_close(vp, FWRITE, cred, p); 1536 goto out; 1537 } 1538 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1539 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1540 vattr.va_uid != cred->cr_uid) { 1541 error = EACCES; 1542 VOP_UNLOCK(vp, 0, p); 1543 vn_close(vp, FWRITE, cred, p); 1544 goto out; 1545 } 1546 VATTR_NULL(&vattr); 1547 vattr.va_size = 0; 1548 VOP_SETATTR(vp, &vattr, cred, p); 1549 pr->ps_acflag |= ACORE; 1550 1551 io.io_proc = p; 1552 io.io_vp = vp; 1553 io.io_cred = cred; 1554 io.io_offset = 0; 1555 VOP_UNLOCK(vp, 0, p); 1556 vref(vp); 1557 error = vn_close(vp, FWRITE, cred, p); 1558 if (error == 0) 1559 error = (*pr->ps_emul->e_coredump)(p, &io); 1560 vrele(vp); 1561 out: 1562 crfree(cred); 1563 return (error); 1564 #endif 1565 } 1566 1567 #ifndef SMALL_KERNEL 1568 int 1569 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1570 { 1571 struct coredump_iostate *io = cookie; 1572 off_t coffset = 0; 1573 size_t csize; 1574 int chunk, error; 1575 1576 csize = len; 1577 do { 1578 if (io->io_proc->p_siglist & sigmask(SIGKILL)) 1579 return (EINTR); 1580 1581 /* Rest of the loop sleeps with lock held, so... */ 1582 yield(); 1583 1584 chunk = MIN(csize, MAXPHYS); 1585 error = vn_rdwr(UIO_WRITE, io->io_vp, 1586 (caddr_t)data + coffset, chunk, 1587 io->io_offset + coffset, segflg, 1588 IO_UNIT, io->io_cred, NULL, io->io_proc); 1589 if (error) { 1590 if (error == ENOSPC) 1591 log(LOG_ERR, "coredump of %s(%d) failed, filesystem full", 1592 io->io_proc->p_comm, io->io_proc->p_pid); 1593 else 1594 log(LOG_ERR, "coredump of %s(%d), write failed: errno %d", 1595 io->io_proc->p_comm, io->io_proc->p_pid, error); 1596 return (error); 1597 } 1598 1599 coffset += chunk; 1600 csize -= chunk; 1601 } while (csize > 0); 1602 1603 io->io_offset += len; 1604 return (0); 1605 } 1606 1607 void 1608 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end) 1609 { 1610 struct coredump_iostate *io = cookie; 1611 1612 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end); 1613 } 1614 1615 #endif /* !SMALL_KERNEL */ 1616 1617 /* 1618 * Nonexistent system call-- signal process (may want to handle it). 1619 * Flag error in case process won't see signal immediately (blocked or ignored). 1620 */ 1621 /* ARGSUSED */ 1622 int 1623 sys_nosys(struct proc *p, void *v, register_t *retval) 1624 { 1625 1626 ptsignal(p, SIGSYS, STHREAD); 1627 return (ENOSYS); 1628 } 1629 1630 int 1631 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1632 { 1633 static int sigwaitsleep; 1634 struct sys___thrsigdivert_args /* { 1635 syscallarg(sigset_t) sigmask; 1636 syscallarg(siginfo_t *) info; 1637 syscallarg(const struct timespec *) timeout; 1638 } */ *uap = v; 1639 struct process *pr = p->p_p; 1640 sigset_t *m; 1641 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask; 1642 siginfo_t si; 1643 long long to_ticks = 0; 1644 int timeinvalid = 0; 1645 int error = 0; 1646 1647 memset(&si, 0, sizeof(si)); 1648 1649 if (SCARG(uap, timeout) != NULL) { 1650 struct timespec ts; 1651 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1652 return (error); 1653 #ifdef KTRACE 1654 if (KTRPOINT(p, KTR_STRUCT)) 1655 ktrreltimespec(p, &ts); 1656 #endif 1657 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1658 timeinvalid = 1; 1659 else { 1660 to_ticks = (long long)hz * ts.tv_sec + 1661 ts.tv_nsec / (tick * 1000); 1662 if (to_ticks > INT_MAX) 1663 to_ticks = INT_MAX; 1664 } 1665 } 1666 1667 dosigsuspend(p, p->p_sigmask &~ mask); 1668 for (;;) { 1669 si.si_signo = CURSIG(p); 1670 if (si.si_signo != 0) { 1671 sigset_t smask = sigmask(si.si_signo); 1672 if (smask & mask) { 1673 if (p->p_siglist & smask) 1674 m = &p->p_siglist; 1675 else if (pr->ps_mainproc->p_siglist & smask) 1676 m = &pr->ps_mainproc->p_siglist; 1677 else { 1678 /* signal got eaten by someone else? */ 1679 continue; 1680 } 1681 atomic_clearbits_int(m, smask); 1682 error = 0; 1683 break; 1684 } 1685 } 1686 1687 /* per-POSIX, delay this error until after the above */ 1688 if (timeinvalid) 1689 error = EINVAL; 1690 1691 if (error != 0) 1692 break; 1693 1694 error = tsleep(&sigwaitsleep, PPAUSE|PCATCH, "sigwait", 1695 (int)to_ticks); 1696 } 1697 1698 if (error == 0) { 1699 *retval = si.si_signo; 1700 if (SCARG(uap, info) != NULL) 1701 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1702 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) { 1703 /* 1704 * Restarting is wrong if there's a timeout, as it'll be 1705 * for the same interval again 1706 */ 1707 error = EINTR; 1708 } 1709 1710 return (error); 1711 } 1712 1713 void 1714 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1715 { 1716 memset(si, 0, sizeof(*si)); 1717 1718 si->si_signo = sig; 1719 si->si_code = code; 1720 if (code == SI_USER) { 1721 si->si_value = val; 1722 } else { 1723 switch (sig) { 1724 case SIGSEGV: 1725 case SIGILL: 1726 case SIGBUS: 1727 case SIGFPE: 1728 si->si_addr = val.sival_ptr; 1729 si->si_trapno = trapno; 1730 break; 1731 case SIGXFSZ: 1732 break; 1733 } 1734 } 1735 } 1736 1737 int 1738 filt_sigattach(struct knote *kn) 1739 { 1740 struct process *pr = curproc->p_p; 1741 1742 kn->kn_ptr.p_process = pr; 1743 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1744 1745 /* XXX lock the proc here while adding to the list? */ 1746 SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext); 1747 1748 return (0); 1749 } 1750 1751 void 1752 filt_sigdetach(struct knote *kn) 1753 { 1754 struct process *pr = kn->kn_ptr.p_process; 1755 1756 SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext); 1757 } 1758 1759 /* 1760 * signal knotes are shared with proc knotes, so we apply a mask to 1761 * the hint in order to differentiate them from process hints. This 1762 * could be avoided by using a signal-specific knote list, but probably 1763 * isn't worth the trouble. 1764 */ 1765 int 1766 filt_signal(struct knote *kn, long hint) 1767 { 1768 1769 if (hint & NOTE_SIGNAL) { 1770 hint &= ~NOTE_SIGNAL; 1771 1772 if (kn->kn_id == hint) 1773 kn->kn_data++; 1774 } 1775 return (kn->kn_data != 0); 1776 } 1777 1778 void 1779 userret(struct proc *p) 1780 { 1781 int sig; 1782 1783 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */ 1784 if (p->p_flag & P_PROFPEND) { 1785 atomic_clearbits_int(&p->p_flag, P_PROFPEND); 1786 KERNEL_LOCK(); 1787 psignal(p, SIGPROF); 1788 KERNEL_UNLOCK(); 1789 } 1790 if (p->p_flag & P_ALRMPEND) { 1791 atomic_clearbits_int(&p->p_flag, P_ALRMPEND); 1792 KERNEL_LOCK(); 1793 psignal(p, SIGVTALRM); 1794 KERNEL_UNLOCK(); 1795 } 1796 1797 while ((sig = CURSIG(p)) != 0) 1798 postsig(sig); 1799 1800 /* 1801 * If P_SIGSUSPEND is still set here, then we still need to restore 1802 * the original sigmask before returning to userspace. Also, this 1803 * might unmask some pending signals, so we need to check a second 1804 * time for signals to post. 1805 */ 1806 if (p->p_flag & P_SIGSUSPEND) { 1807 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1808 p->p_sigmask = p->p_oldmask; 1809 1810 while ((sig = CURSIG(p)) != 0) 1811 postsig(sig); 1812 } 1813 1814 if (p->p_flag & P_SUSPSINGLE) { 1815 KERNEL_LOCK(); 1816 single_thread_check(p, 0); 1817 KERNEL_UNLOCK(); 1818 } 1819 1820 p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 1821 } 1822 1823 int 1824 single_thread_check(struct proc *p, int deep) 1825 { 1826 struct process *pr = p->p_p; 1827 1828 if (pr->ps_single != NULL && pr->ps_single != p) { 1829 do { 1830 int s; 1831 1832 /* if we're in deep, we need to unwind to the edge */ 1833 if (deep) { 1834 if (pr->ps_flags & PS_SINGLEUNWIND) 1835 return (ERESTART); 1836 if (pr->ps_flags & PS_SINGLEEXIT) 1837 return (EINTR); 1838 } 1839 1840 if (--pr->ps_singlecount == 0) 1841 wakeup(&pr->ps_singlecount); 1842 if (pr->ps_flags & PS_SINGLEEXIT) 1843 exit1(p, 0, EXIT_THREAD_NOCHECK); 1844 1845 /* not exiting and don't need to unwind, so suspend */ 1846 SCHED_LOCK(s); 1847 p->p_stat = SSTOP; 1848 mi_switch(); 1849 SCHED_UNLOCK(s); 1850 } while (pr->ps_single != NULL); 1851 } 1852 1853 return (0); 1854 } 1855 1856 /* 1857 * Stop other threads in the process. The mode controls how and 1858 * where the other threads should stop: 1859 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 1860 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 1861 * - SINGLE_PTRACE: stop wherever they are, will wait for them to stop 1862 * later (via single_thread_wait()) and released as with SINGLE_SUSPEND 1863 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 1864 * or released as with SINGLE_SUSPEND 1865 * - SINGLE_EXIT: unwind to kernel boundary and exit 1866 */ 1867 int 1868 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep) 1869 { 1870 struct process *pr = p->p_p; 1871 struct proc *q; 1872 int error; 1873 1874 KERNEL_ASSERT_LOCKED(); 1875 1876 if ((error = single_thread_check(p, deep))) 1877 return error; 1878 1879 switch (mode) { 1880 case SINGLE_SUSPEND: 1881 case SINGLE_PTRACE: 1882 break; 1883 case SINGLE_UNWIND: 1884 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1885 break; 1886 case SINGLE_EXIT: 1887 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 1888 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1889 break; 1890 #ifdef DIAGNOSTIC 1891 default: 1892 panic("single_thread_mode = %d", mode); 1893 #endif 1894 } 1895 pr->ps_single = p; 1896 pr->ps_singlecount = 0; 1897 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1898 int s; 1899 1900 if (q == p) 1901 continue; 1902 if (q->p_flag & P_WEXIT) { 1903 if (mode == SINGLE_EXIT) { 1904 SCHED_LOCK(s); 1905 if (q->p_stat == SSTOP) { 1906 setrunnable(q); 1907 pr->ps_singlecount++; 1908 } 1909 SCHED_UNLOCK(s); 1910 } 1911 continue; 1912 } 1913 SCHED_LOCK(s); 1914 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 1915 switch (q->p_stat) { 1916 case SIDL: 1917 case SRUN: 1918 pr->ps_singlecount++; 1919 break; 1920 case SSLEEP: 1921 /* if it's not interruptible, then just have to wait */ 1922 if (q->p_flag & P_SINTR) { 1923 /* merely need to suspend? just stop it */ 1924 if (mode == SINGLE_SUSPEND || 1925 mode == SINGLE_PTRACE) { 1926 q->p_stat = SSTOP; 1927 break; 1928 } 1929 /* need to unwind or exit, so wake it */ 1930 setrunnable(q); 1931 } 1932 pr->ps_singlecount++; 1933 break; 1934 case SSTOP: 1935 if (mode == SINGLE_EXIT) { 1936 setrunnable(q); 1937 pr->ps_singlecount++; 1938 } 1939 break; 1940 case SDEAD: 1941 break; 1942 case SONPROC: 1943 pr->ps_singlecount++; 1944 signotify(q); 1945 break; 1946 } 1947 SCHED_UNLOCK(s); 1948 } 1949 1950 if (mode != SINGLE_PTRACE) 1951 single_thread_wait(pr); 1952 1953 return 0; 1954 } 1955 1956 void 1957 single_thread_wait(struct process *pr) 1958 { 1959 /* wait until they're all suspended */ 1960 while (pr->ps_singlecount > 0) 1961 tsleep(&pr->ps_singlecount, PUSER, "suspend", 0); 1962 } 1963 1964 void 1965 single_thread_clear(struct proc *p, int flag) 1966 { 1967 struct process *pr = p->p_p; 1968 struct proc *q; 1969 1970 KASSERT(pr->ps_single == p); 1971 KERNEL_ASSERT_LOCKED(); 1972 1973 pr->ps_single = NULL; 1974 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 1975 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1976 int s; 1977 1978 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 1979 continue; 1980 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 1981 1982 /* 1983 * if the thread was only stopped for single threading 1984 * then clearing that either makes it runnable or puts 1985 * it back into some sleep queue 1986 */ 1987 SCHED_LOCK(s); 1988 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 1989 if (q->p_wchan == 0) 1990 setrunnable(q); 1991 else 1992 q->p_stat = SSLEEP; 1993 } 1994 SCHED_UNLOCK(s); 1995 } 1996 } 1997