1 /* $OpenBSD: kern_sig.c,v 1.175 2014/11/16 05:42:21 guenther Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/buf.h> 52 #include <sys/acct.h> 53 #include <sys/file.h> 54 #include <sys/filedesc.h> 55 #include <sys/kernel.h> 56 #include <sys/wait.h> 57 #include <sys/ktrace.h> 58 #include <sys/stat.h> 59 #include <sys/core.h> 60 #include <sys/malloc.h> 61 #include <sys/pool.h> 62 #include <sys/ptrace.h> 63 #include <sys/sched.h> 64 #include <sys/user.h> 65 66 #include <sys/mount.h> 67 #include <sys/syscallargs.h> 68 69 int filt_sigattach(struct knote *kn); 70 void filt_sigdetach(struct knote *kn); 71 int filt_signal(struct knote *kn, long hint); 72 73 struct filterops sig_filtops = 74 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 75 76 void proc_stop(struct proc *p, int); 77 void proc_stop_sweep(void *); 78 struct timeout proc_stop_to; 79 80 int cansignal(struct proc *, struct process *, int); 81 82 struct pool sigacts_pool; /* memory pool for sigacts structures */ 83 84 /* 85 * Can thread p, send the signal signum to process qr? 86 */ 87 int 88 cansignal(struct proc *p, struct process *qr, int signum) 89 { 90 struct process *pr = p->p_p; 91 struct ucred *uc = p->p_ucred; 92 struct ucred *quc = qr->ps_ucred; 93 94 if (uc->cr_uid == 0) 95 return (1); /* root can always signal */ 96 97 if (pr == qr) 98 return (1); /* process can always signal itself */ 99 100 /* optimization: if the same creds then the tests below will pass */ 101 if (uc == quc) 102 return (1); 103 104 if (signum == SIGCONT && qr->ps_session == pr->ps_session) 105 return (1); /* SIGCONT in session */ 106 107 /* 108 * Using kill(), only certain signals can be sent to setugid 109 * child processes 110 */ 111 if (qr->ps_flags & PS_SUGID) { 112 switch (signum) { 113 case 0: 114 case SIGKILL: 115 case SIGINT: 116 case SIGTERM: 117 case SIGALRM: 118 case SIGSTOP: 119 case SIGTTIN: 120 case SIGTTOU: 121 case SIGTSTP: 122 case SIGHUP: 123 case SIGUSR1: 124 case SIGUSR2: 125 if (uc->cr_ruid == quc->cr_ruid || 126 uc->cr_uid == quc->cr_ruid) 127 return (1); 128 } 129 return (0); 130 } 131 132 if (uc->cr_ruid == quc->cr_ruid || 133 uc->cr_ruid == quc->cr_svuid || 134 uc->cr_uid == quc->cr_ruid || 135 uc->cr_uid == quc->cr_svuid) 136 return (1); 137 return (0); 138 } 139 140 /* 141 * Initialize signal-related data structures. 142 */ 143 void 144 signal_init(void) 145 { 146 timeout_set(&proc_stop_to, proc_stop_sweep, NULL); 147 148 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", 149 &pool_allocator_nointr); 150 } 151 152 /* 153 * Create an initial sigacts structure, using the same signal state 154 * as p. 155 */ 156 struct sigacts * 157 sigactsinit(struct process *pr) 158 { 159 struct sigacts *ps; 160 161 ps = pool_get(&sigacts_pool, PR_WAITOK); 162 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); 163 ps->ps_refcnt = 1; 164 return (ps); 165 } 166 167 /* 168 * Share a sigacts structure. 169 */ 170 struct sigacts * 171 sigactsshare(struct process *pr) 172 { 173 struct sigacts *ps = pr->ps_sigacts; 174 175 ps->ps_refcnt++; 176 return ps; 177 } 178 179 /* 180 * Initialize a new sigaltstack structure. 181 */ 182 void 183 sigstkinit(struct sigaltstack *ss) 184 { 185 ss->ss_flags = SS_DISABLE; 186 ss->ss_size = 0; 187 ss->ss_sp = 0; 188 } 189 190 /* 191 * Make this process not share its sigacts, maintaining all 192 * signal state. 193 */ 194 void 195 sigactsunshare(struct process *pr) 196 { 197 struct sigacts *newps; 198 199 if (pr->ps_sigacts->ps_refcnt == 1) 200 return; 201 202 newps = sigactsinit(pr); 203 sigactsfree(pr); 204 pr->ps_sigacts = newps; 205 } 206 207 /* 208 * Release a sigacts structure. 209 */ 210 void 211 sigactsfree(struct process *pr) 212 { 213 struct sigacts *ps = pr->ps_sigacts; 214 215 if (--ps->ps_refcnt > 0) 216 return; 217 218 pr->ps_sigacts = NULL; 219 220 pool_put(&sigacts_pool, ps); 221 } 222 223 /* ARGSUSED */ 224 int 225 sys_sigaction(struct proc *p, void *v, register_t *retval) 226 { 227 struct sys_sigaction_args /* { 228 syscallarg(int) signum; 229 syscallarg(const struct sigaction *) nsa; 230 syscallarg(struct sigaction *) osa; 231 } */ *uap = v; 232 struct sigaction vec; 233 #ifdef KTRACE 234 struct sigaction ovec; 235 #endif 236 struct sigaction *sa; 237 const struct sigaction *nsa; 238 struct sigaction *osa; 239 struct sigacts *ps = p->p_p->ps_sigacts; 240 int signum; 241 int bit, error; 242 243 signum = SCARG(uap, signum); 244 nsa = SCARG(uap, nsa); 245 osa = SCARG(uap, osa); 246 247 if (signum <= 0 || signum >= NSIG || 248 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 249 return (EINVAL); 250 sa = &vec; 251 if (osa) { 252 sa->sa_handler = ps->ps_sigact[signum]; 253 sa->sa_mask = ps->ps_catchmask[signum]; 254 bit = sigmask(signum); 255 sa->sa_flags = 0; 256 if ((ps->ps_sigonstack & bit) != 0) 257 sa->sa_flags |= SA_ONSTACK; 258 if ((ps->ps_sigintr & bit) == 0) 259 sa->sa_flags |= SA_RESTART; 260 if ((ps->ps_sigreset & bit) != 0) 261 sa->sa_flags |= SA_RESETHAND; 262 if ((ps->ps_siginfo & bit) != 0) 263 sa->sa_flags |= SA_SIGINFO; 264 if (signum == SIGCHLD) { 265 if ((ps->ps_flags & SAS_NOCLDSTOP) != 0) 266 sa->sa_flags |= SA_NOCLDSTOP; 267 if ((ps->ps_flags & SAS_NOCLDWAIT) != 0) 268 sa->sa_flags |= SA_NOCLDWAIT; 269 } 270 if ((sa->sa_mask & bit) == 0) 271 sa->sa_flags |= SA_NODEFER; 272 sa->sa_mask &= ~bit; 273 error = copyout(sa, osa, sizeof (vec)); 274 if (error) 275 return (error); 276 #ifdef KTRACE 277 if (KTRPOINT(p, KTR_STRUCT)) 278 ovec = vec; 279 #endif 280 } 281 if (nsa) { 282 error = copyin(nsa, sa, sizeof (vec)); 283 if (error) 284 return (error); 285 #ifdef KTRACE 286 if (KTRPOINT(p, KTR_STRUCT)) 287 ktrsigaction(p, sa); 288 #endif 289 setsigvec(p, signum, sa); 290 } 291 #ifdef KTRACE 292 if (osa && KTRPOINT(p, KTR_STRUCT)) 293 ktrsigaction(p, &ovec); 294 #endif 295 return (0); 296 } 297 298 void 299 setsigvec(struct proc *p, int signum, struct sigaction *sa) 300 { 301 struct sigacts *ps = p->p_p->ps_sigacts; 302 int bit; 303 int s; 304 305 bit = sigmask(signum); 306 /* 307 * Change setting atomically. 308 */ 309 s = splhigh(); 310 ps->ps_sigact[signum] = sa->sa_handler; 311 if ((sa->sa_flags & SA_NODEFER) == 0) 312 sa->sa_mask |= sigmask(signum); 313 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 314 if (signum == SIGCHLD) { 315 if (sa->sa_flags & SA_NOCLDSTOP) 316 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 317 else 318 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 319 /* 320 * If the SA_NOCLDWAIT flag is set or the handler 321 * is SIG_IGN we reparent the dying child to PID 1 322 * (init) which will reap the zombie. Because we use 323 * init to do our dirty work we never set SAS_NOCLDWAIT 324 * for PID 1. 325 * XXX exit1 rework means this is unnecessary? 326 */ 327 if (initprocess->ps_sigacts != ps && 328 ((sa->sa_flags & SA_NOCLDWAIT) || 329 sa->sa_handler == SIG_IGN)) 330 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 331 else 332 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 333 } 334 if ((sa->sa_flags & SA_RESETHAND) != 0) 335 ps->ps_sigreset |= bit; 336 else 337 ps->ps_sigreset &= ~bit; 338 if ((sa->sa_flags & SA_SIGINFO) != 0) 339 ps->ps_siginfo |= bit; 340 else 341 ps->ps_siginfo &= ~bit; 342 if ((sa->sa_flags & SA_RESTART) == 0) 343 ps->ps_sigintr |= bit; 344 else 345 ps->ps_sigintr &= ~bit; 346 if ((sa->sa_flags & SA_ONSTACK) != 0) 347 ps->ps_sigonstack |= bit; 348 else 349 ps->ps_sigonstack &= ~bit; 350 /* 351 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 352 * and for signals set to SIG_DFL where the default is to ignore. 353 * However, don't put SIGCONT in ps_sigignore, 354 * as we have to restart the process. 355 */ 356 if (sa->sa_handler == SIG_IGN || 357 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 358 atomic_clearbits_int(&p->p_siglist, bit); 359 if (signum != SIGCONT) 360 ps->ps_sigignore |= bit; /* easier in psignal */ 361 ps->ps_sigcatch &= ~bit; 362 } else { 363 ps->ps_sigignore &= ~bit; 364 if (sa->sa_handler == SIG_DFL) 365 ps->ps_sigcatch &= ~bit; 366 else 367 ps->ps_sigcatch |= bit; 368 } 369 splx(s); 370 } 371 372 /* 373 * Initialize signal state for process 0; 374 * set to ignore signals that are ignored by default. 375 */ 376 void 377 siginit(struct process *pr) 378 { 379 struct sigacts *ps = pr->ps_sigacts; 380 int i; 381 382 for (i = 0; i < NSIG; i++) 383 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 384 ps->ps_sigignore |= sigmask(i); 385 ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 386 } 387 388 /* 389 * Reset signals for an exec by the specified thread. 390 */ 391 void 392 execsigs(struct proc *p) 393 { 394 struct sigacts *ps; 395 int nc, mask; 396 397 sigactsunshare(p->p_p); 398 ps = p->p_p->ps_sigacts; 399 400 /* 401 * Reset caught signals. Held signals remain held 402 * through p_sigmask (unless they were caught, 403 * and are now ignored by default). 404 */ 405 while (ps->ps_sigcatch) { 406 nc = ffs((long)ps->ps_sigcatch); 407 mask = sigmask(nc); 408 ps->ps_sigcatch &= ~mask; 409 if (sigprop[nc] & SA_IGNORE) { 410 if (nc != SIGCONT) 411 ps->ps_sigignore |= mask; 412 atomic_clearbits_int(&p->p_siglist, mask); 413 } 414 ps->ps_sigact[nc] = SIG_DFL; 415 } 416 /* 417 * Reset stack state to the user stack. 418 * Clear set of signals caught on the signal stack. 419 */ 420 sigstkinit(&p->p_sigstk); 421 ps->ps_flags &= ~SAS_NOCLDWAIT; 422 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 423 ps->ps_sigact[SIGCHLD] = SIG_DFL; 424 } 425 426 /* 427 * Manipulate signal mask. 428 * Note that we receive new mask, not pointer, 429 * and return old mask as return value; 430 * the library stub does the rest. 431 */ 432 int 433 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 434 { 435 struct sys_sigprocmask_args /* { 436 syscallarg(int) how; 437 syscallarg(sigset_t) mask; 438 } */ *uap = v; 439 int error = 0; 440 int s; 441 sigset_t mask; 442 443 *retval = p->p_sigmask; 444 mask = SCARG(uap, mask); 445 s = splhigh(); 446 447 switch (SCARG(uap, how)) { 448 case SIG_BLOCK: 449 p->p_sigmask |= mask &~ sigcantmask; 450 break; 451 case SIG_UNBLOCK: 452 p->p_sigmask &= ~mask; 453 break; 454 case SIG_SETMASK: 455 p->p_sigmask = mask &~ sigcantmask; 456 break; 457 default: 458 error = EINVAL; 459 break; 460 } 461 splx(s); 462 return (error); 463 } 464 465 /* ARGSUSED */ 466 int 467 sys_sigpending(struct proc *p, void *v, register_t *retval) 468 { 469 470 *retval = p->p_siglist; 471 return (0); 472 } 473 474 /* 475 * Temporarily replace calling proc's signal mask for the duration of a 476 * system call. Original signal mask will be restored by userret(). 477 */ 478 void 479 dosigsuspend(struct proc *p, sigset_t newmask) 480 { 481 KASSERT(p == curproc); 482 483 p->p_oldmask = p->p_sigmask; 484 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 485 p->p_sigmask = newmask; 486 } 487 488 /* 489 * Suspend process until signal, providing mask to be set 490 * in the meantime. Note nonstandard calling convention: 491 * libc stub passes mask, not pointer, to save a copyin. 492 */ 493 /* ARGSUSED */ 494 int 495 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 496 { 497 struct sys_sigsuspend_args /* { 498 syscallarg(int) mask; 499 } */ *uap = v; 500 struct process *pr = p->p_p; 501 struct sigacts *ps = pr->ps_sigacts; 502 503 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask); 504 while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0) 505 /* void */; 506 /* always return EINTR rather than ERESTART... */ 507 return (EINTR); 508 } 509 510 int 511 sigonstack(size_t stack) 512 { 513 const struct sigaltstack *ss = &curproc->p_sigstk; 514 515 return (ss->ss_flags & SS_DISABLE ? 0 : 516 (stack - (size_t)ss->ss_sp < ss->ss_size)); 517 } 518 519 int 520 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 521 { 522 struct sys_sigaltstack_args /* { 523 syscallarg(const struct sigaltstack *) nss; 524 syscallarg(struct sigaltstack *) oss; 525 } */ *uap = v; 526 struct sigaltstack ss; 527 const struct sigaltstack *nss; 528 struct sigaltstack *oss; 529 int onstack = sigonstack(PROC_STACK(p)); 530 int error; 531 532 nss = SCARG(uap, nss); 533 oss = SCARG(uap, oss); 534 535 if (oss != NULL) { 536 ss = p->p_sigstk; 537 if (onstack) 538 ss.ss_flags |= SS_ONSTACK; 539 if ((error = copyout(&ss, oss, sizeof(ss)))) 540 return (error); 541 } 542 if (nss == NULL) 543 return (0); 544 error = copyin(nss, &ss, sizeof(ss)); 545 if (error) 546 return (error); 547 if (onstack) 548 return (EPERM); 549 if (ss.ss_flags & ~SS_DISABLE) 550 return (EINVAL); 551 if (ss.ss_flags & SS_DISABLE) { 552 p->p_sigstk.ss_flags = ss.ss_flags; 553 return (0); 554 } 555 if (ss.ss_size < MINSIGSTKSZ) 556 return (ENOMEM); 557 p->p_sigstk = ss; 558 return (0); 559 } 560 561 /* ARGSUSED */ 562 int 563 sys_kill(struct proc *cp, void *v, register_t *retval) 564 { 565 struct sys_kill_args /* { 566 syscallarg(int) pid; 567 syscallarg(int) signum; 568 } */ *uap = v; 569 struct proc *p; 570 int pid = SCARG(uap, pid); 571 int signum = SCARG(uap, signum); 572 573 if (((u_int)signum) >= NSIG) 574 return (EINVAL); 575 if (pid > 0) { 576 enum signal_type type = SPROCESS; 577 578 /* 579 * If the target pid is > THREAD_PID_OFFSET then this 580 * must be a kill of another thread in the same process. 581 * Otherwise, this is a process kill and the target must 582 * be a main thread. 583 */ 584 if (pid > THREAD_PID_OFFSET) { 585 if ((p = pfind(pid - THREAD_PID_OFFSET)) == NULL) 586 return (ESRCH); 587 if (p->p_p != cp->p_p) 588 return (ESRCH); 589 type = STHREAD; 590 } else { 591 /* XXX use prfind() */ 592 if ((p = pfind(pid)) == NULL) 593 return (ESRCH); 594 if (p->p_flag & P_THREAD) 595 return (ESRCH); 596 if (!cansignal(cp, p->p_p, signum)) 597 return (EPERM); 598 } 599 600 /* kill single process or thread */ 601 if (signum) 602 ptsignal(p, signum, type); 603 return (0); 604 } 605 switch (pid) { 606 case -1: /* broadcast signal */ 607 return (killpg1(cp, signum, 0, 1)); 608 case 0: /* signal own process group */ 609 return (killpg1(cp, signum, 0, 0)); 610 default: /* negative explicit process group */ 611 return (killpg1(cp, signum, -pid, 0)); 612 } 613 /* NOTREACHED */ 614 } 615 616 /* 617 * Common code for kill process group/broadcast kill. 618 * cp is calling process. 619 */ 620 int 621 killpg1(struct proc *cp, int signum, int pgid, int all) 622 { 623 struct process *pr; 624 struct pgrp *pgrp; 625 int nfound = 0; 626 627 if (all) 628 /* 629 * broadcast 630 */ 631 LIST_FOREACH(pr, &allprocess, ps_list) { 632 if (pr->ps_pid <= 1 || 633 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) || 634 pr == cp->p_p || !cansignal(cp, pr, signum)) 635 continue; 636 nfound++; 637 if (signum) 638 prsignal(pr, signum); 639 } 640 else { 641 if (pgid == 0) 642 /* 643 * zero pgid means send to my process group. 644 */ 645 pgrp = cp->p_p->ps_pgrp; 646 else { 647 pgrp = pgfind(pgid); 648 if (pgrp == NULL) 649 return (ESRCH); 650 } 651 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 652 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM || 653 !cansignal(cp, pr, signum)) 654 continue; 655 nfound++; 656 if (signum) 657 prsignal(pr, signum); 658 } 659 } 660 return (nfound ? 0 : ESRCH); 661 } 662 663 #define CANDELIVER(uid, euid, pr) \ 664 (euid == 0 || \ 665 (uid) == (pr)->ps_ucred->cr_ruid || \ 666 (uid) == (pr)->ps_ucred->cr_svuid || \ 667 (uid) == (pr)->ps_ucred->cr_uid || \ 668 (euid) == (pr)->ps_ucred->cr_ruid || \ 669 (euid) == (pr)->ps_ucred->cr_svuid || \ 670 (euid) == (pr)->ps_ucred->cr_uid) 671 672 /* 673 * Deliver signum to pgid, but first check uid/euid against each 674 * process and see if it is permitted. 675 */ 676 void 677 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid) 678 { 679 struct pgrp *pgrp; 680 struct process *pr; 681 682 if (pgid == 0) 683 return; 684 if (pgid < 0) { 685 pgid = -pgid; 686 if ((pgrp = pgfind(pgid)) == NULL) 687 return; 688 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 689 if (CANDELIVER(uid, euid, pr)) 690 prsignal(pr, signum); 691 } else { 692 if ((pr = prfind(pgid)) == NULL) 693 return; 694 if (CANDELIVER(uid, euid, pr)) 695 prsignal(pr, signum); 696 } 697 } 698 699 /* 700 * Send a signal to a process group. 701 */ 702 void 703 gsignal(int pgid, int signum) 704 { 705 struct pgrp *pgrp; 706 707 if (pgid && (pgrp = pgfind(pgid))) 708 pgsignal(pgrp, signum, 0); 709 } 710 711 /* 712 * Send a signal to a process group. If checktty is 1, 713 * limit to members which have a controlling terminal. 714 */ 715 void 716 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 717 { 718 struct process *pr; 719 720 if (pgrp) 721 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 722 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 723 prsignal(pr, signum); 724 } 725 726 /* 727 * Send a signal caused by a trap to the current process. 728 * If it will be caught immediately, deliver it with correct code. 729 * Otherwise, post it normally. 730 */ 731 void 732 trapsignal(struct proc *p, int signum, u_long trapno, int code, 733 union sigval sigval) 734 { 735 struct process *pr = p->p_p; 736 struct sigacts *ps = pr->ps_sigacts; 737 int mask; 738 739 mask = sigmask(signum); 740 if ((pr->ps_flags & PS_TRACED) == 0 && 741 (ps->ps_sigcatch & mask) != 0 && 742 (p->p_sigmask & mask) == 0) { 743 #ifdef KTRACE 744 if (KTRPOINT(p, KTR_PSIG)) { 745 siginfo_t si; 746 747 initsiginfo(&si, signum, trapno, code, sigval); 748 ktrpsig(p, signum, ps->ps_sigact[signum], 749 p->p_sigmask, code, &si); 750 } 751 #endif 752 p->p_ru.ru_nsignals++; 753 (*pr->ps_emul->e_sendsig)(ps->ps_sigact[signum], signum, 754 p->p_sigmask, trapno, code, sigval); 755 p->p_sigmask |= ps->ps_catchmask[signum]; 756 if ((ps->ps_sigreset & mask) != 0) { 757 ps->ps_sigcatch &= ~mask; 758 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 759 ps->ps_sigignore |= mask; 760 ps->ps_sigact[signum] = SIG_DFL; 761 } 762 } else { 763 p->p_sisig = signum; 764 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 765 p->p_sicode = code; 766 p->p_sigval = sigval; 767 768 /* 769 * Signals like SIGBUS and SIGSEGV should not, when 770 * generated by the kernel, be ignorable or blockable. 771 * If it is and we're not being traced, then just kill 772 * the process. 773 */ 774 if ((pr->ps_flags & PS_TRACED) == 0 && 775 (sigprop[signum] & SA_KILL) && 776 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask))) 777 sigexit(p, signum); 778 ptsignal(p, signum, STHREAD); 779 } 780 } 781 782 /* 783 * Send the signal to the process. If the signal has an action, the action 784 * is usually performed by the target process rather than the caller; we add 785 * the signal to the set of pending signals for the process. 786 * 787 * Exceptions: 788 * o When a stop signal is sent to a sleeping process that takes the 789 * default action, the process is stopped without awakening it. 790 * o SIGCONT restarts stopped processes (or puts them back to sleep) 791 * regardless of the signal action (eg, blocked or ignored). 792 * 793 * Other ignored signals are discarded immediately. 794 */ 795 void 796 psignal(struct proc *p, int signum) 797 { 798 ptsignal(p, signum, SPROCESS); 799 } 800 801 /* 802 * type = SPROCESS process signal, can be diverted (sigwait()) 803 * XXX if blocked in all threads, mark as pending in struct process 804 * type = STHREAD thread signal, but should be propagated if unhandled 805 * type = SPROPAGATED propagated to this thread, so don't propagate again 806 */ 807 void 808 ptsignal(struct proc *p, int signum, enum signal_type type) 809 { 810 int s, prop; 811 sig_t action; 812 int mask; 813 struct process *pr = p->p_p; 814 struct proc *q; 815 int wakeparent = 0; 816 817 #ifdef DIAGNOSTIC 818 if ((u_int)signum >= NSIG || signum == 0) 819 panic("psignal signal number"); 820 #endif 821 822 /* Ignore signal if we are exiting */ 823 if (pr->ps_flags & PS_EXITING) 824 return; 825 826 mask = sigmask(signum); 827 828 if (type == SPROCESS) { 829 /* Accept SIGKILL to coredumping processes */ 830 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) { 831 if (pr->ps_single != NULL) 832 p = pr->ps_single; 833 atomic_setbits_int(&p->p_siglist, mask); 834 return; 835 } 836 837 /* 838 * If the current thread can process the signal 839 * immediately (it's unblocked) then have it take it. 840 */ 841 q = curproc; 842 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 && 843 (q->p_sigmask & mask) == 0) 844 p = q; 845 else { 846 /* 847 * A process-wide signal can be diverted to a 848 * different thread that's in sigwait() for this 849 * signal. If there isn't such a thread, then 850 * pick a thread that doesn't have it blocked so 851 * that the stop/kill consideration isn't 852 * delayed. Otherwise, mark it pending on the 853 * main thread. 854 */ 855 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 856 /* ignore exiting threads */ 857 if (q->p_flag & P_WEXIT) 858 continue; 859 860 /* skip threads that have the signal blocked */ 861 if ((q->p_sigmask & mask) != 0) 862 continue; 863 864 /* okay, could send to this thread */ 865 p = q; 866 867 /* 868 * sigsuspend, sigwait, ppoll/pselect, etc? 869 * Definitely go to this thread, as it's 870 * already blocked in the kernel. 871 */ 872 if (q->p_flag & P_SIGSUSPEND) 873 break; 874 } 875 } 876 } 877 878 if (type != SPROPAGATED) 879 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 880 881 prop = sigprop[signum]; 882 883 /* 884 * If proc is traced, always give parent a chance. 885 */ 886 if (pr->ps_flags & PS_TRACED) { 887 action = SIG_DFL; 888 atomic_setbits_int(&p->p_siglist, mask); 889 } else { 890 /* 891 * If the signal is being ignored, 892 * then we forget about it immediately. 893 * (Note: we don't set SIGCONT in ps_sigignore, 894 * and if it is set to SIG_IGN, 895 * action will be SIG_DFL here.) 896 */ 897 if (pr->ps_sigacts->ps_sigignore & mask) 898 return; 899 if (p->p_sigmask & mask) { 900 action = SIG_HOLD; 901 } else if (pr->ps_sigacts->ps_sigcatch & mask) { 902 action = SIG_CATCH; 903 } else { 904 action = SIG_DFL; 905 906 if (prop & SA_KILL && pr->ps_nice > NZERO) 907 pr->ps_nice = NZERO; 908 909 /* 910 * If sending a tty stop signal to a member of an 911 * orphaned process group, discard the signal here if 912 * the action is default; don't stop the process below 913 * if sleeping, and don't clear any pending SIGCONT. 914 */ 915 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 916 return; 917 } 918 919 atomic_setbits_int(&p->p_siglist, mask); 920 } 921 922 if (prop & SA_CONT) 923 atomic_clearbits_int(&p->p_siglist, stopsigmask); 924 925 if (prop & SA_STOP) { 926 atomic_clearbits_int(&p->p_siglist, contsigmask); 927 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 928 } 929 930 /* 931 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 932 */ 933 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) 934 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 935 if (q != p) 936 ptsignal(q, signum, SPROPAGATED); 937 938 /* 939 * Defer further processing for signals which are held, 940 * except that stopped processes must be continued by SIGCONT. 941 */ 942 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 943 return; 944 945 SCHED_LOCK(s); 946 947 switch (p->p_stat) { 948 949 case SSLEEP: 950 /* 951 * If process is sleeping uninterruptibly 952 * we can't interrupt the sleep... the signal will 953 * be noticed when the process returns through 954 * trap() or syscall(). 955 */ 956 if ((p->p_flag & P_SINTR) == 0) 957 goto out; 958 /* 959 * Process is sleeping and traced... make it runnable 960 * so it can discover the signal in issignal() and stop 961 * for the parent. 962 */ 963 if (pr->ps_flags & PS_TRACED) 964 goto run; 965 /* 966 * If SIGCONT is default (or ignored) and process is 967 * asleep, we are finished; the process should not 968 * be awakened. 969 */ 970 if ((prop & SA_CONT) && action == SIG_DFL) { 971 atomic_clearbits_int(&p->p_siglist, mask); 972 goto out; 973 } 974 /* 975 * When a sleeping process receives a stop 976 * signal, process immediately if possible. 977 */ 978 if ((prop & SA_STOP) && action == SIG_DFL) { 979 /* 980 * If a child holding parent blocked, 981 * stopping could cause deadlock. 982 */ 983 if (pr->ps_flags & PS_PPWAIT) 984 goto out; 985 atomic_clearbits_int(&p->p_siglist, mask); 986 p->p_xstat = signum; 987 proc_stop(p, 0); 988 goto out; 989 } 990 /* 991 * All other (caught or default) signals 992 * cause the process to run. 993 */ 994 goto runfast; 995 /*NOTREACHED*/ 996 997 case SSTOP: 998 /* 999 * If traced process is already stopped, 1000 * then no further action is necessary. 1001 */ 1002 if (pr->ps_flags & PS_TRACED) 1003 goto out; 1004 1005 /* 1006 * Kill signal always sets processes running. 1007 */ 1008 if (signum == SIGKILL) { 1009 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1010 goto runfast; 1011 } 1012 1013 if (prop & SA_CONT) { 1014 /* 1015 * If SIGCONT is default (or ignored), we continue the 1016 * process but don't leave the signal in p_siglist, as 1017 * it has no further action. If SIGCONT is held, we 1018 * continue the process and leave the signal in 1019 * p_siglist. If the process catches SIGCONT, let it 1020 * handle the signal itself. If it isn't waiting on 1021 * an event, then it goes back to run state. 1022 * Otherwise, process goes back to sleep state. 1023 */ 1024 atomic_setbits_int(&p->p_flag, P_CONTINUED); 1025 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1026 wakeparent = 1; 1027 if (action == SIG_DFL) 1028 atomic_clearbits_int(&p->p_siglist, mask); 1029 if (action == SIG_CATCH) 1030 goto runfast; 1031 if (p->p_wchan == 0) 1032 goto run; 1033 p->p_stat = SSLEEP; 1034 goto out; 1035 } 1036 1037 if (prop & SA_STOP) { 1038 /* 1039 * Already stopped, don't need to stop again. 1040 * (If we did the shell could get confused.) 1041 */ 1042 atomic_clearbits_int(&p->p_siglist, mask); 1043 goto out; 1044 } 1045 1046 /* 1047 * If process is sleeping interruptibly, then simulate a 1048 * wakeup so that when it is continued, it will be made 1049 * runnable and can look at the signal. But don't make 1050 * the process runnable, leave it stopped. 1051 */ 1052 if (p->p_wchan && p->p_flag & P_SINTR) 1053 unsleep(p); 1054 goto out; 1055 1056 case SONPROC: 1057 signotify(p); 1058 /* FALLTHROUGH */ 1059 default: 1060 /* 1061 * SRUN, SIDL, SDEAD do nothing with the signal, 1062 * other than kicking ourselves if we are running. 1063 * It will either never be noticed, or noticed very soon. 1064 */ 1065 goto out; 1066 } 1067 /*NOTREACHED*/ 1068 1069 runfast: 1070 /* 1071 * Raise priority to at least PUSER. 1072 */ 1073 if (p->p_priority > PUSER) 1074 p->p_priority = PUSER; 1075 run: 1076 setrunnable(p); 1077 out: 1078 SCHED_UNLOCK(s); 1079 if (wakeparent) 1080 wakeup(pr->ps_pptr); 1081 } 1082 1083 /* 1084 * If the current process has received a signal (should be caught or cause 1085 * termination, should interrupt current syscall), return the signal number. 1086 * Stop signals with default action are processed immediately, then cleared; 1087 * they aren't returned. This is checked after each entry to the system for 1088 * a syscall or trap (though this can usually be done without calling issignal 1089 * by checking the pending signal masks in the CURSIG macro.) The normal call 1090 * sequence is 1091 * 1092 * while (signum = CURSIG(curproc)) 1093 * postsig(signum); 1094 * 1095 * Assumes that if the P_SINTR flag is set, we're holding both the 1096 * kernel and scheduler locks. 1097 */ 1098 int 1099 issignal(struct proc *p) 1100 { 1101 struct process *pr = p->p_p; 1102 int signum, mask, prop; 1103 int dolock = (p->p_flag & P_SINTR) == 0; 1104 int s; 1105 1106 for (;;) { 1107 mask = p->p_siglist & ~p->p_sigmask; 1108 if (pr->ps_flags & PS_PPWAIT) 1109 mask &= ~stopsigmask; 1110 if (mask == 0) /* no signal to send */ 1111 return (0); 1112 signum = ffs((long)mask); 1113 mask = sigmask(signum); 1114 atomic_clearbits_int(&p->p_siglist, mask); 1115 1116 /* 1117 * We should see pending but ignored signals 1118 * only if PS_TRACED was on when they were posted. 1119 */ 1120 if (mask & pr->ps_sigacts->ps_sigignore && 1121 (pr->ps_flags & PS_TRACED) == 0) 1122 continue; 1123 1124 if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) { 1125 /* 1126 * If traced, always stop, and stay 1127 * stopped until released by the debugger. 1128 */ 1129 p->p_xstat = signum; 1130 1131 if (dolock) 1132 KERNEL_LOCK(); 1133 single_thread_set(p, SINGLE_PTRACE, 0); 1134 if (dolock) 1135 KERNEL_UNLOCK(); 1136 1137 if (dolock) 1138 SCHED_LOCK(s); 1139 proc_stop(p, 1); 1140 if (dolock) 1141 SCHED_UNLOCK(s); 1142 1143 if (dolock) 1144 KERNEL_LOCK(); 1145 single_thread_clear(p, 0); 1146 if (dolock) 1147 KERNEL_UNLOCK(); 1148 1149 /* 1150 * If we are no longer being traced, or the parent 1151 * didn't give us a signal, look for more signals. 1152 */ 1153 if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0) 1154 continue; 1155 1156 /* 1157 * If the new signal is being masked, look for other 1158 * signals. 1159 */ 1160 signum = p->p_xstat; 1161 mask = sigmask(signum); 1162 if ((p->p_sigmask & mask) != 0) 1163 continue; 1164 1165 /* take the signal! */ 1166 atomic_clearbits_int(&p->p_siglist, mask); 1167 } 1168 1169 prop = sigprop[signum]; 1170 1171 /* 1172 * Decide whether the signal should be returned. 1173 * Return the signal's number, or fall through 1174 * to clear it from the pending mask. 1175 */ 1176 switch ((long)pr->ps_sigacts->ps_sigact[signum]) { 1177 case (long)SIG_DFL: 1178 /* 1179 * Don't take default actions on system processes. 1180 */ 1181 if (p->p_pid <= 1) { 1182 #ifdef DIAGNOSTIC 1183 /* 1184 * Are you sure you want to ignore SIGSEGV 1185 * in init? XXX 1186 */ 1187 printf("Process (pid %d) got signal %d\n", 1188 p->p_pid, signum); 1189 #endif 1190 break; /* == ignore */ 1191 } 1192 /* 1193 * If there is a pending stop signal to process 1194 * with default action, stop here, 1195 * then clear the signal. However, 1196 * if process is member of an orphaned 1197 * process group, ignore tty stop signals. 1198 */ 1199 if (prop & SA_STOP) { 1200 if (pr->ps_flags & PS_TRACED || 1201 (pr->ps_pgrp->pg_jobc == 0 && 1202 prop & SA_TTYSTOP)) 1203 break; /* == ignore */ 1204 p->p_xstat = signum; 1205 if (dolock) 1206 SCHED_LOCK(s); 1207 proc_stop(p, 1); 1208 if (dolock) 1209 SCHED_UNLOCK(s); 1210 break; 1211 } else if (prop & SA_IGNORE) { 1212 /* 1213 * Except for SIGCONT, shouldn't get here. 1214 * Default action is to ignore; drop it. 1215 */ 1216 break; /* == ignore */ 1217 } else 1218 goto keep; 1219 /*NOTREACHED*/ 1220 case (long)SIG_IGN: 1221 /* 1222 * Masking above should prevent us ever trying 1223 * to take action on an ignored signal other 1224 * than SIGCONT, unless process is traced. 1225 */ 1226 if ((prop & SA_CONT) == 0 && 1227 (pr->ps_flags & PS_TRACED) == 0) 1228 printf("issignal\n"); 1229 break; /* == ignore */ 1230 default: 1231 /* 1232 * This signal has an action, let 1233 * postsig() process it. 1234 */ 1235 goto keep; 1236 } 1237 } 1238 /* NOTREACHED */ 1239 1240 keep: 1241 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1242 return (signum); 1243 } 1244 1245 /* 1246 * Put the argument process into the stopped state and notify the parent 1247 * via wakeup. Signals are handled elsewhere. The process must not be 1248 * on the run queue. 1249 */ 1250 void 1251 proc_stop(struct proc *p, int sw) 1252 { 1253 struct process *pr = p->p_p; 1254 extern void *softclock_si; 1255 1256 #ifdef MULTIPROCESSOR 1257 SCHED_ASSERT_LOCKED(); 1258 #endif 1259 1260 p->p_stat = SSTOP; 1261 atomic_clearbits_int(&pr->ps_flags, PS_WAITED); 1262 atomic_setbits_int(&pr->ps_flags, PS_STOPPED); 1263 atomic_setbits_int(&p->p_flag, P_SUSPSIG); 1264 if (!timeout_pending(&proc_stop_to)) { 1265 timeout_add(&proc_stop_to, 0); 1266 /* 1267 * We need this soft interrupt to be handled fast. 1268 * Extra calls to softclock don't hurt. 1269 */ 1270 softintr_schedule(softclock_si); 1271 } 1272 if (sw) 1273 mi_switch(); 1274 } 1275 1276 /* 1277 * Called from a timeout to send signals to the parents of stopped processes. 1278 * We can't do this in proc_stop because it's called with nasty locks held 1279 * and we would need recursive scheduler lock to deal with that. 1280 */ 1281 void 1282 proc_stop_sweep(void *v) 1283 { 1284 struct process *pr; 1285 1286 LIST_FOREACH(pr, &allprocess, ps_list) { 1287 if ((pr->ps_flags & PS_STOPPED) == 0) 1288 continue; 1289 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); 1290 1291 if ((pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDSTOP) == 0) 1292 prsignal(pr->ps_pptr, SIGCHLD); 1293 wakeup(pr->ps_pptr); 1294 } 1295 } 1296 1297 /* 1298 * Take the action for the specified signal 1299 * from the current set of pending signals. 1300 */ 1301 void 1302 postsig(int signum) 1303 { 1304 struct proc *p = curproc; 1305 struct process *pr = p->p_p; 1306 struct sigacts *ps = pr->ps_sigacts; 1307 sig_t action; 1308 u_long trapno; 1309 int mask, returnmask; 1310 union sigval sigval; 1311 int s, code; 1312 1313 #ifdef DIAGNOSTIC 1314 if (signum == 0) 1315 panic("postsig"); 1316 #endif 1317 1318 KERNEL_LOCK(); 1319 1320 mask = sigmask(signum); 1321 atomic_clearbits_int(&p->p_siglist, mask); 1322 action = ps->ps_sigact[signum]; 1323 sigval.sival_ptr = 0; 1324 1325 if (p->p_sisig != signum) { 1326 trapno = 0; 1327 code = SI_USER; 1328 sigval.sival_ptr = 0; 1329 } else { 1330 trapno = p->p_sitrapno; 1331 code = p->p_sicode; 1332 sigval = p->p_sigval; 1333 } 1334 1335 #ifdef KTRACE 1336 if (KTRPOINT(p, KTR_PSIG)) { 1337 siginfo_t si; 1338 1339 initsiginfo(&si, signum, trapno, code, sigval); 1340 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ? 1341 p->p_oldmask : p->p_sigmask, code, &si); 1342 } 1343 #endif 1344 if (action == SIG_DFL) { 1345 /* 1346 * Default action, where the default is to kill 1347 * the process. (Other cases were ignored above.) 1348 */ 1349 sigexit(p, signum); 1350 /* NOTREACHED */ 1351 } else { 1352 /* 1353 * If we get here, the signal must be caught. 1354 */ 1355 #ifdef DIAGNOSTIC 1356 if (action == SIG_IGN || (p->p_sigmask & mask)) 1357 panic("postsig action"); 1358 #endif 1359 /* 1360 * Set the new mask value and also defer further 1361 * occurrences of this signal. 1362 * 1363 * Special case: user has done a sigpause. Here the 1364 * current mask is not of interest, but rather the 1365 * mask from before the sigpause is what we want 1366 * restored after the signal processing is completed. 1367 */ 1368 #ifdef MULTIPROCESSOR 1369 s = splsched(); 1370 #else 1371 s = splhigh(); 1372 #endif 1373 if (p->p_flag & P_SIGSUSPEND) { 1374 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1375 returnmask = p->p_oldmask; 1376 } else { 1377 returnmask = p->p_sigmask; 1378 } 1379 p->p_sigmask |= ps->ps_catchmask[signum]; 1380 if ((ps->ps_sigreset & mask) != 0) { 1381 ps->ps_sigcatch &= ~mask; 1382 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1383 ps->ps_sigignore |= mask; 1384 ps->ps_sigact[signum] = SIG_DFL; 1385 } 1386 splx(s); 1387 p->p_ru.ru_nsignals++; 1388 if (p->p_sisig == signum) { 1389 p->p_sisig = 0; 1390 p->p_sitrapno = 0; 1391 p->p_sicode = SI_USER; 1392 p->p_sigval.sival_ptr = NULL; 1393 } 1394 1395 (*pr->ps_emul->e_sendsig)(action, signum, returnmask, trapno, 1396 code, sigval); 1397 } 1398 1399 KERNEL_UNLOCK(); 1400 } 1401 1402 /* 1403 * Force the current process to exit with the specified signal, dumping core 1404 * if appropriate. We bypass the normal tests for masked and caught signals, 1405 * allowing unrecoverable failures to terminate the process without changing 1406 * signal state. Mark the accounting record with the signal termination. 1407 * If dumping core, save the signal number for the debugger. Calls exit and 1408 * does not return. 1409 */ 1410 void 1411 sigexit(struct proc *p, int signum) 1412 { 1413 /* Mark process as going away */ 1414 atomic_setbits_int(&p->p_flag, P_WEXIT); 1415 1416 p->p_p->ps_acflag |= AXSIG; 1417 if (sigprop[signum] & SA_CORE) { 1418 p->p_sisig = signum; 1419 1420 /* if there are other threads, pause them */ 1421 if (TAILQ_FIRST(&p->p_p->ps_threads) != p || 1422 TAILQ_NEXT(p, p_thr_link) != NULL) 1423 single_thread_set(p, SINGLE_SUSPEND, 0); 1424 1425 if (coredump(p) == 0) 1426 signum |= WCOREFLAG; 1427 } 1428 exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL); 1429 /* NOTREACHED */ 1430 } 1431 1432 int nosuidcoredump = 1; 1433 1434 struct coredump_iostate { 1435 struct proc *io_proc; 1436 struct vnode *io_vp; 1437 struct ucred *io_cred; 1438 off_t io_offset; 1439 }; 1440 1441 /* 1442 * Dump core, into a file named "progname.core", unless the process was 1443 * setuid/setgid. 1444 */ 1445 int 1446 coredump(struct proc *p) 1447 { 1448 #ifdef SMALL_KERNEL 1449 return EPERM; 1450 #else 1451 struct process *pr = p->p_p; 1452 struct vnode *vp; 1453 struct ucred *cred = p->p_ucred; 1454 struct vmspace *vm = p->p_vmspace; 1455 struct nameidata nd; 1456 struct vattr vattr; 1457 struct coredump_iostate io; 1458 int error, len, incrash = 0; 1459 char name[MAXPATHLEN]; 1460 const char *dir = "/var/crash"; 1461 1462 pr->ps_flags |= PS_COREDUMP; 1463 1464 /* 1465 * If the process has inconsistant uids, nosuidcoredump 1466 * determines coredump placement policy. 1467 */ 1468 if (((pr->ps_flags & PS_SUGID) && (error = suser(p, 0))) || 1469 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) { 1470 if (nosuidcoredump == 3 || nosuidcoredump == 2) 1471 incrash = 1; 1472 else 1473 return (EPERM); 1474 } 1475 1476 /* Don't dump if will exceed file size limit. */ 1477 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= 1478 p->p_rlimit[RLIMIT_CORE].rlim_cur) 1479 return (EFBIG); 1480 1481 if (incrash && nosuidcoredump == 3) { 1482 /* 1483 * If the program directory does not exist, dumps of 1484 * that core will silently fail. 1485 */ 1486 len = snprintf(name, sizeof(name), "%s/%s/%u.core", 1487 dir, p->p_comm, p->p_pid); 1488 } else if (incrash && nosuidcoredump == 2) 1489 len = snprintf(name, sizeof(name), "%s/%s.core", 1490 dir, p->p_comm); 1491 else 1492 len = snprintf(name, sizeof(name), "%s.core", p->p_comm); 1493 if (len >= sizeof(name)) 1494 return (EACCES); 1495 1496 /* 1497 * Control the UID used to write out. The normal case uses 1498 * the real UID. If the sugid case is going to write into the 1499 * controlled directory, we do so as root. 1500 */ 1501 if (incrash == 0) { 1502 cred = crdup(cred); 1503 cred->cr_uid = cred->cr_ruid; 1504 cred->cr_gid = cred->cr_rgid; 1505 } else { 1506 if (p->p_fd->fd_rdir) { 1507 vrele(p->p_fd->fd_rdir); 1508 p->p_fd->fd_rdir = NULL; 1509 } 1510 p->p_ucred = crdup(p->p_ucred); 1511 crfree(cred); 1512 cred = p->p_ucred; 1513 crhold(cred); 1514 cred->cr_uid = 0; 1515 cred->cr_gid = 0; 1516 } 1517 1518 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 1519 1520 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 1521 1522 if (error) 1523 goto out; 1524 1525 /* 1526 * Don't dump to non-regular files, files with links, or files 1527 * owned by someone else. 1528 */ 1529 vp = nd.ni_vp; 1530 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) { 1531 VOP_UNLOCK(vp, 0, p); 1532 vn_close(vp, FWRITE, cred, p); 1533 goto out; 1534 } 1535 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1536 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1537 vattr.va_uid != cred->cr_uid) { 1538 error = EACCES; 1539 VOP_UNLOCK(vp, 0, p); 1540 vn_close(vp, FWRITE, cred, p); 1541 goto out; 1542 } 1543 VATTR_NULL(&vattr); 1544 vattr.va_size = 0; 1545 VOP_SETATTR(vp, &vattr, cred, p); 1546 pr->ps_acflag |= ACORE; 1547 1548 io.io_proc = p; 1549 io.io_vp = vp; 1550 io.io_cred = cred; 1551 io.io_offset = 0; 1552 VOP_UNLOCK(vp, 0, p); 1553 vref(vp); 1554 error = vn_close(vp, FWRITE, cred, p); 1555 if (error == 0) 1556 error = (*pr->ps_emul->e_coredump)(p, &io); 1557 vrele(vp); 1558 out: 1559 crfree(cred); 1560 return (error); 1561 #endif 1562 } 1563 1564 int 1565 coredump_trad(struct proc *p, void *cookie) 1566 { 1567 #ifdef SMALL_KERNEL 1568 return EPERM; 1569 #else 1570 struct coredump_iostate *io = cookie; 1571 struct vmspace *vm = io->io_proc->p_vmspace; 1572 struct vnode *vp = io->io_vp; 1573 struct ucred *cred = io->io_cred; 1574 struct core core; 1575 int error; 1576 1577 core.c_midmag = 0; 1578 strlcpy(core.c_name, p->p_comm, sizeof(core.c_name)); 1579 core.c_nseg = 0; 1580 core.c_signo = p->p_sisig; 1581 core.c_ucode = p->p_sitrapno; 1582 core.c_cpusize = 0; 1583 core.c_tsize = (u_long)ptoa(vm->vm_tsize); 1584 core.c_dsize = (u_long)ptoa(vm->vm_dsize); 1585 core.c_ssize = (u_long)round_page(ptoa(vm->vm_ssize)); 1586 error = cpu_coredump(p, vp, cred, &core); 1587 if (error) 1588 return (error); 1589 /* 1590 * uvm_coredump() spits out all appropriate segments. 1591 * All that's left to do is to write the core header. 1592 */ 1593 error = uvm_coredump(p, vp, cred, &core); 1594 if (error) 1595 return (error); 1596 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core, 1597 (int)core.c_hdrsize, (off_t)0, 1598 UIO_SYSSPACE, IO_UNIT, cred, NULL, p); 1599 return (error); 1600 #endif 1601 } 1602 1603 #ifndef SMALL_KERNEL 1604 int 1605 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1606 { 1607 struct coredump_iostate *io = cookie; 1608 off_t coffset = 0; 1609 size_t csize; 1610 int chunk, error; 1611 1612 csize = len; 1613 do { 1614 if (io->io_proc->p_siglist & sigmask(SIGKILL)) 1615 return (EINTR); 1616 1617 /* Rest of the loop sleeps with lock held, so... */ 1618 yield(); 1619 1620 chunk = MIN(csize, MAXPHYS); 1621 error = vn_rdwr(UIO_WRITE, io->io_vp, 1622 (caddr_t)data + coffset, chunk, 1623 io->io_offset + coffset, segflg, 1624 IO_UNIT, io->io_cred, NULL, io->io_proc); 1625 if (error) { 1626 printf("pid %d (%s): %s write of %lu@%p" 1627 " at %lld failed: %d\n", 1628 io->io_proc->p_pid, io->io_proc->p_comm, 1629 segflg == UIO_USERSPACE ? "user" : "system", 1630 len, data, (long long)io->io_offset, error); 1631 return (error); 1632 } 1633 1634 coffset += chunk; 1635 csize -= chunk; 1636 } while (csize > 0); 1637 1638 io->io_offset += len; 1639 return (0); 1640 } 1641 1642 void 1643 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end) 1644 { 1645 struct coredump_iostate *io = cookie; 1646 1647 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end); 1648 } 1649 1650 #endif /* !SMALL_KERNEL */ 1651 1652 /* 1653 * Nonexistent system call-- signal process (may want to handle it). 1654 * Flag error in case process won't see signal immediately (blocked or ignored). 1655 */ 1656 /* ARGSUSED */ 1657 int 1658 sys_nosys(struct proc *p, void *v, register_t *retval) 1659 { 1660 1661 ptsignal(p, SIGSYS, STHREAD); 1662 return (ENOSYS); 1663 } 1664 1665 int 1666 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1667 { 1668 static int sigwaitsleep; 1669 struct sys___thrsigdivert_args /* { 1670 syscallarg(sigset_t) sigmask; 1671 syscallarg(siginfo_t *) info; 1672 syscallarg(const struct timespec *) timeout; 1673 } */ *uap = v; 1674 struct process *pr = p->p_p; 1675 sigset_t *m; 1676 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask; 1677 siginfo_t si; 1678 long long to_ticks = 0; 1679 int timeinvalid = 0; 1680 int error = 0; 1681 1682 memset(&si, 0, sizeof(si)); 1683 1684 if (SCARG(uap, timeout) != NULL) { 1685 struct timespec ts; 1686 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1687 return (error); 1688 #ifdef KTRACE 1689 if (KTRPOINT(p, KTR_STRUCT)) 1690 ktrreltimespec(p, &ts); 1691 #endif 1692 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1693 timeinvalid = 1; 1694 else { 1695 to_ticks = (long long)hz * ts.tv_sec + 1696 ts.tv_nsec / (tick * 1000); 1697 if (to_ticks > INT_MAX) 1698 to_ticks = INT_MAX; 1699 } 1700 } 1701 1702 dosigsuspend(p, p->p_sigmask &~ mask); 1703 for (;;) { 1704 si.si_signo = CURSIG(p); 1705 if (si.si_signo != 0) { 1706 sigset_t smask = sigmask(si.si_signo); 1707 if (smask & mask) { 1708 if (p->p_siglist & smask) 1709 m = &p->p_siglist; 1710 else if (pr->ps_mainproc->p_siglist & smask) 1711 m = &pr->ps_mainproc->p_siglist; 1712 else { 1713 /* signal got eaten by someone else? */ 1714 continue; 1715 } 1716 atomic_clearbits_int(m, smask); 1717 error = 0; 1718 break; 1719 } 1720 } 1721 1722 /* per-POSIX, delay this error until after the above */ 1723 if (timeinvalid) 1724 error = EINVAL; 1725 1726 if (error != 0) 1727 break; 1728 1729 error = tsleep(&sigwaitsleep, PPAUSE|PCATCH, "sigwait", 1730 (int)to_ticks); 1731 } 1732 1733 if (error == 0) { 1734 *retval = si.si_signo; 1735 if (SCARG(uap, info) != NULL) 1736 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1737 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) { 1738 /* 1739 * Restarting is wrong if there's a timeout, as it'll be 1740 * for the same interval again 1741 */ 1742 error = EINTR; 1743 } 1744 1745 return (error); 1746 } 1747 1748 void 1749 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1750 { 1751 memset(si, 0, sizeof(*si)); 1752 1753 si->si_signo = sig; 1754 si->si_code = code; 1755 if (code == SI_USER) { 1756 si->si_value = val; 1757 } else { 1758 switch (sig) { 1759 case SIGSEGV: 1760 case SIGILL: 1761 case SIGBUS: 1762 case SIGFPE: 1763 si->si_addr = val.sival_ptr; 1764 si->si_trapno = trapno; 1765 break; 1766 case SIGXFSZ: 1767 break; 1768 } 1769 } 1770 } 1771 1772 int 1773 filt_sigattach(struct knote *kn) 1774 { 1775 struct process *pr = curproc->p_p; 1776 1777 kn->kn_ptr.p_process = pr; 1778 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1779 1780 /* XXX lock the proc here while adding to the list? */ 1781 SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext); 1782 1783 return (0); 1784 } 1785 1786 void 1787 filt_sigdetach(struct knote *kn) 1788 { 1789 struct process *pr = kn->kn_ptr.p_process; 1790 1791 SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext); 1792 } 1793 1794 /* 1795 * signal knotes are shared with proc knotes, so we apply a mask to 1796 * the hint in order to differentiate them from process hints. This 1797 * could be avoided by using a signal-specific knote list, but probably 1798 * isn't worth the trouble. 1799 */ 1800 int 1801 filt_signal(struct knote *kn, long hint) 1802 { 1803 1804 if (hint & NOTE_SIGNAL) { 1805 hint &= ~NOTE_SIGNAL; 1806 1807 if (kn->kn_id == hint) 1808 kn->kn_data++; 1809 } 1810 return (kn->kn_data != 0); 1811 } 1812 1813 void 1814 userret(struct proc *p) 1815 { 1816 int sig; 1817 1818 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */ 1819 if (p->p_flag & P_PROFPEND) { 1820 atomic_clearbits_int(&p->p_flag, P_PROFPEND); 1821 KERNEL_LOCK(); 1822 psignal(p, SIGPROF); 1823 KERNEL_UNLOCK(); 1824 } 1825 if (p->p_flag & P_ALRMPEND) { 1826 atomic_clearbits_int(&p->p_flag, P_ALRMPEND); 1827 KERNEL_LOCK(); 1828 psignal(p, SIGVTALRM); 1829 KERNEL_UNLOCK(); 1830 } 1831 1832 while ((sig = CURSIG(p)) != 0) 1833 postsig(sig); 1834 1835 /* 1836 * If P_SIGSUSPEND is still set here, then we still need to restore 1837 * the original sigmask before returning to userspace. Also, this 1838 * might unmask some pending signals, so we need to check a second 1839 * time for signals to post. 1840 */ 1841 if (p->p_flag & P_SIGSUSPEND) { 1842 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1843 p->p_sigmask = p->p_oldmask; 1844 1845 while ((sig = CURSIG(p)) != 0) 1846 postsig(sig); 1847 } 1848 1849 if (p->p_flag & P_SUSPSINGLE) { 1850 KERNEL_LOCK(); 1851 single_thread_check(p, 0); 1852 KERNEL_UNLOCK(); 1853 } 1854 1855 p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 1856 } 1857 1858 int 1859 single_thread_check(struct proc *p, int deep) 1860 { 1861 struct process *pr = p->p_p; 1862 1863 if (pr->ps_single != NULL && pr->ps_single != p) { 1864 do { 1865 int s; 1866 1867 /* if we're in deep, we need to unwind to the edge */ 1868 if (deep) { 1869 if (pr->ps_flags & PS_SINGLEUNWIND) 1870 return (ERESTART); 1871 if (pr->ps_flags & PS_SINGLEEXIT) 1872 return (EINTR); 1873 } 1874 1875 if (--pr->ps_singlecount == 0) 1876 wakeup(&pr->ps_singlecount); 1877 if (pr->ps_flags & PS_SINGLEEXIT) 1878 exit1(p, 0, EXIT_THREAD_NOCHECK); 1879 1880 /* not exiting and don't need to unwind, so suspend */ 1881 SCHED_LOCK(s); 1882 p->p_stat = SSTOP; 1883 mi_switch(); 1884 SCHED_UNLOCK(s); 1885 } while (pr->ps_single != NULL); 1886 } 1887 1888 return (0); 1889 } 1890 1891 /* 1892 * Stop other threads in the process. The mode controls how and 1893 * where the other threads should stop: 1894 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 1895 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 1896 * - SINGLE_PTRACE: stop wherever they are, will wait for them to stop 1897 * later (via single_thread_wait()) and released as with SINGLE_SUSPEND 1898 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 1899 * or released as with SINGLE_SUSPEND 1900 * - SINGLE_EXIT: unwind to kernel boundary and exit 1901 */ 1902 int 1903 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep) 1904 { 1905 struct process *pr = p->p_p; 1906 struct proc *q; 1907 int error; 1908 1909 KERNEL_ASSERT_LOCKED(); 1910 1911 if ((error = single_thread_check(p, deep))) 1912 return error; 1913 1914 switch (mode) { 1915 case SINGLE_SUSPEND: 1916 case SINGLE_PTRACE: 1917 break; 1918 case SINGLE_UNWIND: 1919 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1920 break; 1921 case SINGLE_EXIT: 1922 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 1923 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1924 break; 1925 #ifdef DIAGNOSTIC 1926 default: 1927 panic("single_thread_mode = %d", mode); 1928 #endif 1929 } 1930 pr->ps_single = p; 1931 pr->ps_singlecount = 0; 1932 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1933 int s; 1934 1935 if (q == p) 1936 continue; 1937 if (q->p_flag & P_WEXIT) { 1938 if (mode == SINGLE_EXIT) { 1939 SCHED_LOCK(s); 1940 if (q->p_stat == SSTOP) { 1941 setrunnable(q); 1942 pr->ps_singlecount++; 1943 } 1944 SCHED_UNLOCK(s); 1945 } 1946 continue; 1947 } 1948 SCHED_LOCK(s); 1949 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 1950 switch (q->p_stat) { 1951 case SIDL: 1952 case SRUN: 1953 pr->ps_singlecount++; 1954 break; 1955 case SSLEEP: 1956 /* if it's not interruptible, then just have to wait */ 1957 if (q->p_flag & P_SINTR) { 1958 /* merely need to suspend? just stop it */ 1959 if (mode == SINGLE_SUSPEND || 1960 mode == SINGLE_PTRACE) { 1961 q->p_stat = SSTOP; 1962 break; 1963 } 1964 /* need to unwind or exit, so wake it */ 1965 setrunnable(q); 1966 } 1967 pr->ps_singlecount++; 1968 break; 1969 case SSTOP: 1970 if (mode == SINGLE_EXIT) { 1971 setrunnable(q); 1972 pr->ps_singlecount++; 1973 } 1974 break; 1975 case SDEAD: 1976 break; 1977 case SONPROC: 1978 pr->ps_singlecount++; 1979 signotify(q); 1980 break; 1981 } 1982 SCHED_UNLOCK(s); 1983 } 1984 1985 if (mode != SINGLE_PTRACE) 1986 single_thread_wait(pr); 1987 1988 return 0; 1989 } 1990 1991 void 1992 single_thread_wait(struct process *pr) 1993 { 1994 /* wait until they're all suspended */ 1995 while (pr->ps_singlecount > 0) 1996 tsleep(&pr->ps_singlecount, PUSER, "suspend", 0); 1997 } 1998 1999 void 2000 single_thread_clear(struct proc *p, int flag) 2001 { 2002 struct process *pr = p->p_p; 2003 struct proc *q; 2004 2005 KASSERT(pr->ps_single == p); 2006 KERNEL_ASSERT_LOCKED(); 2007 2008 pr->ps_single = NULL; 2009 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 2010 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 2011 int s; 2012 2013 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 2014 continue; 2015 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 2016 2017 /* 2018 * if the thread was only stopped for single threading 2019 * then clearing that either makes it runnable or puts 2020 * it back into some sleep queue 2021 */ 2022 SCHED_LOCK(s); 2023 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 2024 if (q->p_wchan == 0) 2025 setrunnable(q); 2026 else 2027 q->p_stat = SSLEEP; 2028 } 2029 SCHED_UNLOCK(s); 2030 } 2031 } 2032