1 /* $OpenBSD: kern_sig.c,v 1.143 2012/07/11 08:45:21 guenther Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/times.h> 52 #include <sys/buf.h> 53 #include <sys/acct.h> 54 #include <sys/file.h> 55 #include <sys/kernel.h> 56 #include <sys/wait.h> 57 #include <sys/ktrace.h> 58 #include <sys/stat.h> 59 #include <sys/core.h> 60 #include <sys/malloc.h> 61 #include <sys/pool.h> 62 #include <sys/ptrace.h> 63 #include <sys/sched.h> 64 65 #include <sys/mount.h> 66 #include <sys/syscallargs.h> 67 68 #include <machine/cpu.h> 69 70 #include <uvm/uvm_extern.h> 71 72 int filt_sigattach(struct knote *kn); 73 void filt_sigdetach(struct knote *kn); 74 int filt_signal(struct knote *kn, long hint); 75 76 struct filterops sig_filtops = 77 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 78 79 void proc_stop(struct proc *p, int); 80 void proc_stop_sweep(void *); 81 struct timeout proc_stop_to; 82 83 int cansignal(struct proc *, struct pcred *, struct proc *, int); 84 85 struct pool sigacts_pool; /* memory pool for sigacts structures */ 86 87 /* 88 * Can process p, with pcred pc, send the signal signum to process q? 89 */ 90 int 91 cansignal(struct proc *p, struct pcred *pc, struct proc *q, int signum) 92 { 93 if (pc->pc_ucred->cr_uid == 0) 94 return (1); /* root can always signal */ 95 96 if (p == q) 97 return (1); /* process can always signal itself */ 98 99 if (signum == SIGCONT && q->p_p->ps_session == p->p_p->ps_session) 100 return (1); /* SIGCONT in session */ 101 102 /* 103 * Using kill(), only certain signals can be sent to setugid 104 * child processes 105 */ 106 if (q->p_p->ps_flags & PS_SUGID) { 107 switch (signum) { 108 case 0: 109 case SIGKILL: 110 case SIGINT: 111 case SIGTERM: 112 case SIGALRM: 113 case SIGSTOP: 114 case SIGTTIN: 115 case SIGTTOU: 116 case SIGTSTP: 117 case SIGHUP: 118 case SIGUSR1: 119 case SIGUSR2: 120 if (pc->p_ruid == q->p_cred->p_ruid || 121 pc->pc_ucred->cr_uid == q->p_cred->p_ruid) 122 return (1); 123 } 124 return (0); 125 } 126 127 if (pc->p_ruid == q->p_cred->p_ruid || 128 pc->p_ruid == q->p_cred->p_svuid || 129 pc->pc_ucred->cr_uid == q->p_cred->p_ruid || 130 pc->pc_ucred->cr_uid == q->p_cred->p_svuid) 131 return (1); 132 return (0); 133 } 134 135 /* 136 * Initialize signal-related data structures. 137 */ 138 void 139 signal_init(void) 140 { 141 timeout_set(&proc_stop_to, proc_stop_sweep, NULL); 142 143 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", 144 &pool_allocator_nointr); 145 } 146 147 /* 148 * Create an initial sigacts structure, using the same signal state 149 * as p. 150 */ 151 struct sigacts * 152 sigactsinit(struct proc *p) 153 { 154 struct sigacts *ps; 155 156 ps = pool_get(&sigacts_pool, PR_WAITOK); 157 memcpy(ps, p->p_sigacts, sizeof(struct sigacts)); 158 ps->ps_refcnt = 1; 159 return (ps); 160 } 161 162 /* 163 * Share a sigacts structure. 164 */ 165 struct sigacts * 166 sigactsshare(struct proc *p) 167 { 168 p->p_sigacts->ps_refcnt++; 169 return p->p_sigacts; 170 } 171 172 /* 173 * Initialize a new sigaltstack structure. 174 */ 175 void 176 sigstkinit(struct sigaltstack *ss) 177 { 178 ss->ss_flags = SS_DISABLE; 179 ss->ss_size = 0; 180 ss->ss_sp = 0; 181 } 182 183 /* 184 * Make this process not share its sigacts, maintaining all 185 * signal state. 186 */ 187 void 188 sigactsunshare(struct proc *p) 189 { 190 struct sigacts *newps; 191 192 if (p->p_sigacts->ps_refcnt == 1) 193 return; 194 195 newps = sigactsinit(p); 196 sigactsfree(p); 197 p->p_sigacts = newps; 198 } 199 200 /* 201 * Release a sigacts structure. 202 */ 203 void 204 sigactsfree(struct proc *p) 205 { 206 struct sigacts *ps = p->p_sigacts; 207 208 if (--ps->ps_refcnt > 0) 209 return; 210 211 p->p_sigacts = NULL; 212 213 pool_put(&sigacts_pool, ps); 214 } 215 216 /* ARGSUSED */ 217 int 218 sys_sigaction(struct proc *p, void *v, register_t *retval) 219 { 220 struct sys_sigaction_args /* { 221 syscallarg(int) signum; 222 syscallarg(const struct sigaction *) nsa; 223 syscallarg(struct sigaction *) osa; 224 } */ *uap = v; 225 struct sigaction vec; 226 #ifdef KTRACE 227 struct sigaction ovec; 228 #endif 229 struct sigaction *sa; 230 const struct sigaction *nsa; 231 struct sigaction *osa; 232 struct sigacts *ps = p->p_sigacts; 233 int signum; 234 int bit, error; 235 236 signum = SCARG(uap, signum); 237 nsa = SCARG(uap, nsa); 238 osa = SCARG(uap, osa); 239 240 if (signum <= 0 || signum >= NSIG || 241 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 242 return (EINVAL); 243 sa = &vec; 244 if (osa) { 245 sa->sa_handler = ps->ps_sigact[signum]; 246 sa->sa_mask = ps->ps_catchmask[signum]; 247 bit = sigmask(signum); 248 sa->sa_flags = 0; 249 if ((ps->ps_sigonstack & bit) != 0) 250 sa->sa_flags |= SA_ONSTACK; 251 if ((ps->ps_sigintr & bit) == 0) 252 sa->sa_flags |= SA_RESTART; 253 if ((ps->ps_sigreset & bit) != 0) 254 sa->sa_flags |= SA_RESETHAND; 255 if ((ps->ps_siginfo & bit) != 0) 256 sa->sa_flags |= SA_SIGINFO; 257 if (signum == SIGCHLD) { 258 if ((ps->ps_flags & SAS_NOCLDSTOP) != 0) 259 sa->sa_flags |= SA_NOCLDSTOP; 260 if ((ps->ps_flags & SAS_NOCLDWAIT) != 0) 261 sa->sa_flags |= SA_NOCLDWAIT; 262 } 263 if ((sa->sa_mask & bit) == 0) 264 sa->sa_flags |= SA_NODEFER; 265 sa->sa_mask &= ~bit; 266 error = copyout(sa, osa, sizeof (vec)); 267 if (error) 268 return (error); 269 #ifdef KTRACE 270 if (KTRPOINT(p, KTR_STRUCT)) 271 ovec = vec; 272 #endif 273 } 274 if (nsa) { 275 error = copyin(nsa, sa, sizeof (vec)); 276 if (error) 277 return (error); 278 #ifdef KTRACE 279 if (KTRPOINT(p, KTR_STRUCT)) 280 ktrsigaction(p, sa); 281 #endif 282 setsigvec(p, signum, sa); 283 } 284 #ifdef KTRACE 285 if (osa && KTRPOINT(p, KTR_STRUCT)) 286 ktrsigaction(p, &ovec); 287 #endif 288 return (0); 289 } 290 291 void 292 setsigvec(struct proc *p, int signum, struct sigaction *sa) 293 { 294 struct sigacts *ps = p->p_sigacts; 295 int bit; 296 int s; 297 298 bit = sigmask(signum); 299 /* 300 * Change setting atomically. 301 */ 302 s = splhigh(); 303 ps->ps_sigact[signum] = sa->sa_handler; 304 if ((sa->sa_flags & SA_NODEFER) == 0) 305 sa->sa_mask |= sigmask(signum); 306 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 307 if (signum == SIGCHLD) { 308 if (sa->sa_flags & SA_NOCLDSTOP) 309 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 310 else 311 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 312 /* 313 * If the SA_NOCLDWAIT flag is set or the handler 314 * is SIG_IGN we reparent the dying child to PID 1 315 * (init) which will reap the zombie. Because we use 316 * init to do our dirty work we never set SAS_NOCLDWAIT 317 * for PID 1. 318 */ 319 if (initproc->p_sigacts != ps && 320 ((sa->sa_flags & SA_NOCLDWAIT) || 321 sa->sa_handler == SIG_IGN)) 322 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 323 else 324 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 325 } 326 if ((sa->sa_flags & SA_RESETHAND) != 0) 327 ps->ps_sigreset |= bit; 328 else 329 ps->ps_sigreset &= ~bit; 330 if ((sa->sa_flags & SA_SIGINFO) != 0) 331 ps->ps_siginfo |= bit; 332 else 333 ps->ps_siginfo &= ~bit; 334 if ((sa->sa_flags & SA_RESTART) == 0) 335 ps->ps_sigintr |= bit; 336 else 337 ps->ps_sigintr &= ~bit; 338 if ((sa->sa_flags & SA_ONSTACK) != 0) 339 ps->ps_sigonstack |= bit; 340 else 341 ps->ps_sigonstack &= ~bit; 342 /* 343 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 344 * and for signals set to SIG_DFL where the default is to ignore. 345 * However, don't put SIGCONT in ps_sigignore, 346 * as we have to restart the process. 347 */ 348 if (sa->sa_handler == SIG_IGN || 349 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 350 atomic_clearbits_int(&p->p_siglist, bit); 351 if (signum != SIGCONT) 352 ps->ps_sigignore |= bit; /* easier in psignal */ 353 ps->ps_sigcatch &= ~bit; 354 } else { 355 ps->ps_sigignore &= ~bit; 356 if (sa->sa_handler == SIG_DFL) 357 ps->ps_sigcatch &= ~bit; 358 else 359 ps->ps_sigcatch |= bit; 360 } 361 splx(s); 362 } 363 364 /* 365 * Initialize signal state for process 0; 366 * set to ignore signals that are ignored by default. 367 */ 368 void 369 siginit(struct proc *p) 370 { 371 struct sigacts *ps = p->p_sigacts; 372 int i; 373 374 for (i = 0; i < NSIG; i++) 375 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 376 ps->ps_sigignore |= sigmask(i); 377 ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 378 } 379 380 /* 381 * Reset signals for an exec of the specified process. 382 */ 383 void 384 execsigs(struct proc *p) 385 { 386 struct sigacts *ps; 387 int nc, mask; 388 389 sigactsunshare(p); 390 ps = p->p_sigacts; 391 392 /* 393 * Reset caught signals. Held signals remain held 394 * through p_sigmask (unless they were caught, 395 * and are now ignored by default). 396 */ 397 while (ps->ps_sigcatch) { 398 nc = ffs((long)ps->ps_sigcatch); 399 mask = sigmask(nc); 400 ps->ps_sigcatch &= ~mask; 401 if (sigprop[nc] & SA_IGNORE) { 402 if (nc != SIGCONT) 403 ps->ps_sigignore |= mask; 404 atomic_clearbits_int(&p->p_siglist, mask); 405 } 406 ps->ps_sigact[nc] = SIG_DFL; 407 } 408 /* 409 * Reset stack state to the user stack. 410 * Clear set of signals caught on the signal stack. 411 */ 412 sigstkinit(&p->p_sigstk); 413 ps->ps_flags &= ~SAS_NOCLDWAIT; 414 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 415 ps->ps_sigact[SIGCHLD] = SIG_DFL; 416 } 417 418 /* 419 * Manipulate signal mask. 420 * Note that we receive new mask, not pointer, 421 * and return old mask as return value; 422 * the library stub does the rest. 423 */ 424 int 425 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 426 { 427 struct sys_sigprocmask_args /* { 428 syscallarg(int) how; 429 syscallarg(sigset_t) mask; 430 } */ *uap = v; 431 int error = 0; 432 int s; 433 sigset_t mask; 434 435 *retval = p->p_sigmask; 436 mask = SCARG(uap, mask); 437 s = splhigh(); 438 439 switch (SCARG(uap, how)) { 440 case SIG_BLOCK: 441 p->p_sigmask |= mask &~ sigcantmask; 442 break; 443 case SIG_UNBLOCK: 444 p->p_sigmask &= ~mask; 445 break; 446 case SIG_SETMASK: 447 p->p_sigmask = mask &~ sigcantmask; 448 break; 449 default: 450 error = EINVAL; 451 break; 452 } 453 splx(s); 454 return (error); 455 } 456 457 /* ARGSUSED */ 458 int 459 sys_sigpending(struct proc *p, void *v, register_t *retval) 460 { 461 462 *retval = p->p_siglist; 463 return (0); 464 } 465 466 /* 467 * Suspend process until signal, providing mask to be set 468 * in the meantime. Note nonstandard calling convention: 469 * libc stub passes mask, not pointer, to save a copyin. 470 */ 471 /* ARGSUSED */ 472 int 473 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 474 { 475 struct sys_sigsuspend_args /* { 476 syscallarg(int) mask; 477 } */ *uap = v; 478 struct sigacts *ps = p->p_sigacts; 479 480 /* 481 * When returning from sigpause, we want 482 * the old mask to be restored after the 483 * signal handler has finished. Thus, we 484 * save it here and mark the sigacts structure 485 * to indicate this. 486 */ 487 p->p_oldmask = p->p_sigmask; 488 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 489 p->p_sigmask = SCARG(uap, mask) &~ sigcantmask; 490 while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0) 491 /* void */; 492 /* always return EINTR rather than ERESTART... */ 493 return (EINTR); 494 } 495 496 int 497 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 498 { 499 struct sys_sigaltstack_args /* { 500 syscallarg(const struct sigaltstack *) nss; 501 syscallarg(struct sigaltstack *) oss; 502 } */ *uap = v; 503 struct sigaltstack ss; 504 const struct sigaltstack *nss; 505 struct sigaltstack *oss; 506 int error; 507 508 nss = SCARG(uap, nss); 509 oss = SCARG(uap, oss); 510 511 if (oss && (error = copyout(&p->p_sigstk, oss, sizeof(p->p_sigstk)))) 512 return (error); 513 if (nss == NULL) 514 return (0); 515 error = copyin(nss, &ss, sizeof(ss)); 516 if (error) 517 return (error); 518 if (p->p_sigstk.ss_flags & SS_ONSTACK) 519 return (EPERM); 520 if (ss.ss_flags & ~SS_DISABLE) 521 return (EINVAL); 522 if (ss.ss_flags & SS_DISABLE) { 523 p->p_sigstk.ss_flags = ss.ss_flags; 524 return (0); 525 } 526 if (ss.ss_size < MINSIGSTKSZ) 527 return (ENOMEM); 528 p->p_sigstk = ss; 529 return (0); 530 } 531 532 /* ARGSUSED */ 533 int 534 sys_kill(struct proc *cp, void *v, register_t *retval) 535 { 536 struct sys_kill_args /* { 537 syscallarg(int) pid; 538 syscallarg(int) signum; 539 } */ *uap = v; 540 struct proc *p; 541 struct pcred *pc = cp->p_cred; 542 int pid = SCARG(uap, pid); 543 int signum = SCARG(uap, signum); 544 545 if (((u_int)signum) >= NSIG) 546 return (EINVAL); 547 if (pid > 0) { 548 enum signal_type type = SPROCESS; 549 550 /* 551 * If the target pid is > THREAD_PID_OFFSET then this 552 * must be a kill of another thread in the same process. 553 * Otherwise, this is a process kill and the target must 554 * be a main thread. 555 */ 556 if (pid > THREAD_PID_OFFSET) { 557 if ((p = pfind(pid - THREAD_PID_OFFSET)) == NULL) 558 return (ESRCH); 559 if (p->p_p != cp->p_p) 560 return (ESRCH); 561 type = STHREAD; 562 } else { 563 if ((p = pfind(pid)) == NULL) 564 return (ESRCH); 565 if (p->p_flag & P_THREAD) 566 return (ESRCH); 567 if (!cansignal(cp, pc, p, signum)) 568 return (EPERM); 569 } 570 571 /* kill single process or thread */ 572 if (signum) 573 ptsignal(p, signum, type); 574 return (0); 575 } 576 switch (pid) { 577 case -1: /* broadcast signal */ 578 return (killpg1(cp, signum, 0, 1)); 579 case 0: /* signal own process group */ 580 return (killpg1(cp, signum, 0, 0)); 581 default: /* negative explicit process group */ 582 return (killpg1(cp, signum, -pid, 0)); 583 } 584 /* NOTREACHED */ 585 } 586 587 /* 588 * Common code for kill process group/broadcast kill. 589 * cp is calling process. 590 */ 591 int 592 killpg1(struct proc *cp, int signum, int pgid, int all) 593 { 594 struct proc *p; 595 struct process *pr; 596 struct pcred *pc = cp->p_cred; 597 struct pgrp *pgrp; 598 int nfound = 0; 599 600 if (all) 601 /* 602 * broadcast 603 */ 604 LIST_FOREACH(p, &allproc, p_list) { 605 if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) || 606 p == cp || !cansignal(cp, pc, p, signum)) 607 continue; 608 nfound++; 609 if (signum) 610 psignal(p, signum); 611 } 612 else { 613 if (pgid == 0) 614 /* 615 * zero pgid means send to my process group. 616 */ 617 pgrp = cp->p_p->ps_pgrp; 618 else { 619 pgrp = pgfind(pgid); 620 if (pgrp == NULL) 621 return (ESRCH); 622 } 623 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 624 p = pr->ps_mainproc; 625 if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) || 626 !cansignal(cp, pc, p, signum)) 627 continue; 628 nfound++; 629 if (signum && P_ZOMBIE(p) == 0) 630 psignal(p, signum); 631 } 632 } 633 return (nfound ? 0 : ESRCH); 634 } 635 636 #define CANDELIVER(uid, euid, pr) \ 637 (euid == 0 || \ 638 (uid) == (pr)->ps_cred->p_ruid || \ 639 (uid) == (pr)->ps_cred->p_svuid || \ 640 (uid) == (pr)->ps_cred->pc_ucred->cr_uid || \ 641 (euid) == (pr)->ps_cred->p_ruid || \ 642 (euid) == (pr)->ps_cred->p_svuid || \ 643 (euid) == (pr)->ps_cred->pc_ucred->cr_uid) 644 645 /* 646 * Deliver signum to pgid, but first check uid/euid against each 647 * process and see if it is permitted. 648 */ 649 void 650 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid) 651 { 652 struct pgrp *pgrp; 653 struct process *pr; 654 655 if (pgid == 0) 656 return; 657 if (pgid < 0) { 658 pgid = -pgid; 659 if ((pgrp = pgfind(pgid)) == NULL) 660 return; 661 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 662 if (CANDELIVER(uid, euid, pr)) 663 prsignal(pr, signum); 664 } else { 665 if ((pr = prfind(pgid)) == NULL) 666 return; 667 if (CANDELIVER(uid, euid, pr)) 668 prsignal(pr, signum); 669 } 670 } 671 672 /* 673 * Send a signal to a process group. 674 */ 675 void 676 gsignal(int pgid, int signum) 677 { 678 struct pgrp *pgrp; 679 680 if (pgid && (pgrp = pgfind(pgid))) 681 pgsignal(pgrp, signum, 0); 682 } 683 684 /* 685 * Send a signal to a process group. If checktty is 1, 686 * limit to members which have a controlling terminal. 687 */ 688 void 689 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 690 { 691 struct process *pr; 692 693 if (pgrp) 694 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 695 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 696 prsignal(pr, signum); 697 } 698 699 /* 700 * Send a signal caused by a trap to the current process. 701 * If it will be caught immediately, deliver it with correct code. 702 * Otherwise, post it normally. 703 */ 704 void 705 trapsignal(struct proc *p, int signum, u_long trapno, int code, 706 union sigval sigval) 707 { 708 struct sigacts *ps = p->p_sigacts; 709 int mask; 710 711 mask = sigmask(signum); 712 if ((p->p_p->ps_flags & PS_TRACED) == 0 && 713 (ps->ps_sigcatch & mask) != 0 && 714 (p->p_sigmask & mask) == 0) { 715 #ifdef KTRACE 716 if (KTRPOINT(p, KTR_PSIG)) { 717 siginfo_t si; 718 719 initsiginfo(&si, signum, trapno, code, sigval); 720 ktrpsig(p, signum, ps->ps_sigact[signum], 721 p->p_sigmask, code, &si); 722 } 723 #endif 724 p->p_ru.ru_nsignals++; 725 (*p->p_emul->e_sendsig)(ps->ps_sigact[signum], signum, 726 p->p_sigmask, trapno, code, sigval); 727 p->p_sigmask |= ps->ps_catchmask[signum]; 728 if ((ps->ps_sigreset & mask) != 0) { 729 ps->ps_sigcatch &= ~mask; 730 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 731 ps->ps_sigignore |= mask; 732 ps->ps_sigact[signum] = SIG_DFL; 733 } 734 } else { 735 p->p_sisig = signum; 736 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 737 p->p_sicode = code; 738 p->p_sigval = sigval; 739 ptsignal(p, signum, STHREAD); 740 } 741 } 742 743 /* 744 * Send the signal to the process. If the signal has an action, the action 745 * is usually performed by the target process rather than the caller; we add 746 * the signal to the set of pending signals for the process. 747 * 748 * Exceptions: 749 * o When a stop signal is sent to a sleeping process that takes the 750 * default action, the process is stopped without awakening it. 751 * o SIGCONT restarts stopped processes (or puts them back to sleep) 752 * regardless of the signal action (eg, blocked or ignored). 753 * 754 * Other ignored signals are discarded immediately. 755 */ 756 void 757 psignal(struct proc *p, int signum) 758 { 759 ptsignal(p, signum, SPROCESS); 760 } 761 762 /* 763 * type = SPROCESS process signal, can be diverted (sigwait()) 764 * XXX if blocked in all threads, mark as pending in struct process 765 * type = STHREAD thread signal, but should be propagated if unhandled 766 * type = SPROPAGATED propagated to this thread, so don't propagate again 767 */ 768 void 769 ptsignal(struct proc *p, int signum, enum signal_type type) 770 { 771 int s, prop; 772 sig_t action; 773 int mask; 774 struct process *pr = p->p_p; 775 struct proc *q; 776 int wakeparent = 0; 777 778 #ifdef DIAGNOSTIC 779 if ((u_int)signum >= NSIG || signum == 0) 780 panic("psignal signal number"); 781 #endif 782 783 /* Ignore signal if we are exiting */ 784 if (pr->ps_flags & PS_EXITING) 785 return; 786 787 mask = sigmask(signum); 788 789 if (type == SPROCESS) { 790 /* 791 * A process-wide signal can be diverted to a different 792 * thread that's in sigwait() for this signal. If there 793 * isn't such a thread, then pick a thread that doesn't 794 * have it blocked so that the stop/kill consideration 795 * isn't delayed. Otherwise, mark it pending on the 796 * main thread. 797 */ 798 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 799 /* ignore exiting threads */ 800 if (q->p_flag & P_WEXIT) 801 continue; 802 803 /* sigwait: definitely go to this thread */ 804 if (q->p_sigdivert & mask) { 805 p = q; 806 break; 807 } 808 809 /* unblocked: possibly go to this thread */ 810 if ((q->p_sigmask & mask) == 0) 811 p = q; 812 } 813 } 814 815 if (type != SPROPAGATED) 816 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 817 818 prop = sigprop[signum]; 819 820 /* 821 * If proc is traced, always give parent a chance. 822 * XXX give sigwait() priority until it's fixed to do this 823 * XXX from issignal/postsig 824 */ 825 if (p->p_sigdivert & mask) { 826 p->p_sigwait = signum; 827 atomic_clearbits_int(&p->p_sigdivert, ~0); 828 action = SIG_CATCH; 829 wakeup(&p->p_sigdivert); 830 } else if (pr->ps_flags & PS_TRACED) { 831 action = SIG_DFL; 832 atomic_setbits_int(&p->p_siglist, mask); 833 } else { 834 /* 835 * If the signal is being ignored, 836 * then we forget about it immediately. 837 * (Note: we don't set SIGCONT in ps_sigignore, 838 * and if it is set to SIG_IGN, 839 * action will be SIG_DFL here.) 840 */ 841 if (p->p_sigacts->ps_sigignore & mask) 842 return; 843 if (p->p_sigmask & mask) 844 action = SIG_HOLD; 845 else if (p->p_sigacts->ps_sigcatch & mask) 846 action = SIG_CATCH; 847 else { 848 action = SIG_DFL; 849 850 if (prop & SA_KILL && pr->ps_nice > NZERO) 851 pr->ps_nice = NZERO; 852 853 /* 854 * If sending a tty stop signal to a member of an 855 * orphaned process group, discard the signal here if 856 * the action is default; don't stop the process below 857 * if sleeping, and don't clear any pending SIGCONT. 858 */ 859 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 860 return; 861 } 862 863 atomic_setbits_int(&p->p_siglist, mask); 864 } 865 866 if (prop & SA_CONT) { 867 atomic_clearbits_int(&p->p_siglist, stopsigmask); 868 } 869 870 if (prop & SA_STOP) { 871 atomic_clearbits_int(&p->p_siglist, contsigmask); 872 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 873 } 874 875 /* 876 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 877 */ 878 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) { 879 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 880 if (q != p) 881 ptsignal(q, signum, SPROPAGATED); 882 } 883 } 884 885 /* 886 * Defer further processing for signals which are held, 887 * except that stopped processes must be continued by SIGCONT. 888 */ 889 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 890 return; 891 892 SCHED_LOCK(s); 893 894 switch (p->p_stat) { 895 896 case SSLEEP: 897 /* 898 * If process is sleeping uninterruptibly 899 * we can't interrupt the sleep... the signal will 900 * be noticed when the process returns through 901 * trap() or syscall(). 902 */ 903 if ((p->p_flag & P_SINTR) == 0) 904 goto out; 905 /* 906 * Process is sleeping and traced... make it runnable 907 * so it can discover the signal in issignal() and stop 908 * for the parent. 909 */ 910 if (pr->ps_flags & PS_TRACED) 911 goto run; 912 /* 913 * If SIGCONT is default (or ignored) and process is 914 * asleep, we are finished; the process should not 915 * be awakened. 916 */ 917 if ((prop & SA_CONT) && action == SIG_DFL) { 918 atomic_clearbits_int(&p->p_siglist, mask); 919 goto out; 920 } 921 /* 922 * When a sleeping process receives a stop 923 * signal, process immediately if possible. 924 */ 925 if ((prop & SA_STOP) && action == SIG_DFL) { 926 /* 927 * If a child holding parent blocked, 928 * stopping could cause deadlock. 929 */ 930 if (pr->ps_flags & PS_PPWAIT) 931 goto out; 932 atomic_clearbits_int(&p->p_siglist, mask); 933 p->p_xstat = signum; 934 proc_stop(p, 0); 935 goto out; 936 } 937 /* 938 * All other (caught or default) signals 939 * cause the process to run. 940 */ 941 goto runfast; 942 /*NOTREACHED*/ 943 944 case SSTOP: 945 /* 946 * If traced process is already stopped, 947 * then no further action is necessary. 948 */ 949 if (pr->ps_flags & PS_TRACED) 950 goto out; 951 952 /* 953 * Kill signal always sets processes running. 954 */ 955 if (signum == SIGKILL) { 956 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 957 goto runfast; 958 } 959 960 if (prop & SA_CONT) { 961 /* 962 * If SIGCONT is default (or ignored), we continue the 963 * process but don't leave the signal in p_siglist, as 964 * it has no further action. If SIGCONT is held, we 965 * continue the process and leave the signal in 966 * p_siglist. If the process catches SIGCONT, let it 967 * handle the signal itself. If it isn't waiting on 968 * an event, then it goes back to run state. 969 * Otherwise, process goes back to sleep state. 970 */ 971 atomic_setbits_int(&p->p_flag, P_CONTINUED); 972 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 973 wakeparent = 1; 974 if (action == SIG_DFL) 975 atomic_clearbits_int(&p->p_siglist, mask); 976 if (action == SIG_CATCH) 977 goto runfast; 978 if (p->p_wchan == 0) 979 goto run; 980 p->p_stat = SSLEEP; 981 goto out; 982 } 983 984 if (prop & SA_STOP) { 985 /* 986 * Already stopped, don't need to stop again. 987 * (If we did the shell could get confused.) 988 */ 989 atomic_clearbits_int(&p->p_siglist, mask); 990 goto out; 991 } 992 993 /* 994 * If process is sleeping interruptibly, then simulate a 995 * wakeup so that when it is continued, it will be made 996 * runnable and can look at the signal. But don't make 997 * the process runnable, leave it stopped. 998 */ 999 if (p->p_wchan && p->p_flag & P_SINTR) 1000 unsleep(p); 1001 goto out; 1002 1003 case SONPROC: 1004 signotify(p); 1005 /* FALLTHROUGH */ 1006 default: 1007 /* 1008 * SRUN, SIDL, SZOMB do nothing with the signal, 1009 * other than kicking ourselves if we are running. 1010 * It will either never be noticed, or noticed very soon. 1011 */ 1012 goto out; 1013 } 1014 /*NOTREACHED*/ 1015 1016 runfast: 1017 /* 1018 * Raise priority to at least PUSER. 1019 */ 1020 if (p->p_priority > PUSER) 1021 p->p_priority = PUSER; 1022 run: 1023 setrunnable(p); 1024 out: 1025 SCHED_UNLOCK(s); 1026 if (wakeparent) 1027 wakeup(pr->ps_pptr); 1028 } 1029 1030 /* 1031 * If the current process has received a signal (should be caught or cause 1032 * termination, should interrupt current syscall), return the signal number. 1033 * Stop signals with default action are processed immediately, then cleared; 1034 * they aren't returned. This is checked after each entry to the system for 1035 * a syscall or trap (though this can usually be done without calling issignal 1036 * by checking the pending signal masks in the CURSIG macro.) The normal call 1037 * sequence is 1038 * 1039 * while (signum = CURSIG(curproc)) 1040 * postsig(signum); 1041 */ 1042 int 1043 issignal(struct proc *p) 1044 { 1045 struct process *pr = p->p_p; 1046 int signum, mask, prop; 1047 int dolock = (p->p_flag & P_SINTR) == 0; 1048 int s; 1049 1050 for (;;) { 1051 mask = p->p_siglist & ~p->p_sigmask; 1052 if (pr->ps_flags & PS_PPWAIT) 1053 mask &= ~stopsigmask; 1054 if (mask == 0) /* no signal to send */ 1055 return (0); 1056 signum = ffs((long)mask); 1057 mask = sigmask(signum); 1058 atomic_clearbits_int(&p->p_siglist, mask); 1059 1060 /* 1061 * We should see pending but ignored signals 1062 * only if P_TRACED was on when they were posted. 1063 */ 1064 if (mask & p->p_sigacts->ps_sigignore && 1065 (pr->ps_flags & PS_TRACED) == 0) 1066 continue; 1067 1068 if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) { 1069 /* 1070 * If traced, always stop, and stay 1071 * stopped until released by the debugger. 1072 */ 1073 p->p_xstat = signum; 1074 1075 KERNEL_LOCK(); 1076 single_thread_set(p, SINGLE_SUSPEND, 0); 1077 KERNEL_UNLOCK(); 1078 1079 if (dolock) 1080 SCHED_LOCK(s); 1081 proc_stop(p, 1); 1082 if (dolock) 1083 SCHED_UNLOCK(s); 1084 1085 KERNEL_LOCK(); 1086 single_thread_clear(p, 0); 1087 KERNEL_UNLOCK(); 1088 1089 /* 1090 * If we are no longer being traced, or the parent 1091 * didn't give us a signal, look for more signals. 1092 */ 1093 if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0) 1094 continue; 1095 1096 /* 1097 * If the new signal is being masked, look for other 1098 * signals. 1099 */ 1100 signum = p->p_xstat; 1101 mask = sigmask(signum); 1102 if ((p->p_sigmask & mask) != 0) 1103 continue; 1104 1105 /* take the signal! */ 1106 atomic_clearbits_int(&p->p_siglist, mask); 1107 } 1108 1109 prop = sigprop[signum]; 1110 1111 /* 1112 * Decide whether the signal should be returned. 1113 * Return the signal's number, or fall through 1114 * to clear it from the pending mask. 1115 */ 1116 switch ((long)p->p_sigacts->ps_sigact[signum]) { 1117 1118 case (long)SIG_DFL: 1119 /* 1120 * Don't take default actions on system processes. 1121 */ 1122 if (p->p_pid <= 1) { 1123 #ifdef DIAGNOSTIC 1124 /* 1125 * Are you sure you want to ignore SIGSEGV 1126 * in init? XXX 1127 */ 1128 printf("Process (pid %d) got signal %d\n", 1129 p->p_pid, signum); 1130 #endif 1131 break; /* == ignore */ 1132 } 1133 /* 1134 * If there is a pending stop signal to process 1135 * with default action, stop here, 1136 * then clear the signal. However, 1137 * if process is member of an orphaned 1138 * process group, ignore tty stop signals. 1139 */ 1140 if (prop & SA_STOP) { 1141 if (pr->ps_flags & PS_TRACED || 1142 (pr->ps_pgrp->pg_jobc == 0 && 1143 prop & SA_TTYSTOP)) 1144 break; /* == ignore */ 1145 p->p_xstat = signum; 1146 if (dolock) 1147 SCHED_LOCK(s); 1148 proc_stop(p, 1); 1149 if (dolock) 1150 SCHED_UNLOCK(s); 1151 break; 1152 } else if (prop & SA_IGNORE) { 1153 /* 1154 * Except for SIGCONT, shouldn't get here. 1155 * Default action is to ignore; drop it. 1156 */ 1157 break; /* == ignore */ 1158 } else 1159 goto keep; 1160 /*NOTREACHED*/ 1161 1162 case (long)SIG_IGN: 1163 /* 1164 * Masking above should prevent us ever trying 1165 * to take action on an ignored signal other 1166 * than SIGCONT, unless process is traced. 1167 */ 1168 if ((prop & SA_CONT) == 0 && 1169 (pr->ps_flags & PS_TRACED) == 0) 1170 printf("issignal\n"); 1171 break; /* == ignore */ 1172 1173 default: 1174 /* 1175 * This signal has an action, let 1176 * postsig() process it. 1177 */ 1178 goto keep; 1179 } 1180 } 1181 /* NOTREACHED */ 1182 1183 keep: 1184 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1185 return (signum); 1186 } 1187 1188 /* 1189 * Put the argument process into the stopped state and notify the parent 1190 * via wakeup. Signals are handled elsewhere. The process must not be 1191 * on the run queue. 1192 */ 1193 void 1194 proc_stop(struct proc *p, int sw) 1195 { 1196 extern void *softclock_si; 1197 1198 #ifdef MULTIPROCESSOR 1199 SCHED_ASSERT_LOCKED(); 1200 #endif 1201 1202 p->p_stat = SSTOP; 1203 atomic_clearbits_int(&p->p_p->ps_flags, PS_WAITED); 1204 atomic_setbits_int(&p->p_flag, P_STOPPED|P_SUSPSIG); 1205 if (!timeout_pending(&proc_stop_to)) { 1206 timeout_add(&proc_stop_to, 0); 1207 /* 1208 * We need this soft interrupt to be handled fast. 1209 * Extra calls to softclock don't hurt. 1210 */ 1211 softintr_schedule(softclock_si); 1212 } 1213 if (sw) 1214 mi_switch(); 1215 } 1216 1217 /* 1218 * Called from a timeout to send signals to the parents of stopped processes. 1219 * We can't do this in proc_stop because it's called with nasty locks held 1220 * and we would need recursive scheduler lock to deal with that. 1221 */ 1222 void 1223 proc_stop_sweep(void *v) 1224 { 1225 struct proc *p; 1226 1227 LIST_FOREACH(p, &allproc, p_list) { 1228 if ((p->p_flag & P_STOPPED) == 0) 1229 continue; 1230 atomic_clearbits_int(&p->p_flag, P_STOPPED); 1231 1232 if ((p->p_p->ps_pptr->ps_mainproc->p_sigacts->ps_flags & 1233 SAS_NOCLDSTOP) == 0) 1234 prsignal(p->p_p->ps_pptr, SIGCHLD); 1235 wakeup(p->p_p->ps_pptr); 1236 } 1237 } 1238 1239 /* 1240 * Take the action for the specified signal 1241 * from the current set of pending signals. 1242 */ 1243 void 1244 postsig(int signum) 1245 { 1246 struct proc *p = curproc; 1247 struct sigacts *ps = p->p_sigacts; 1248 sig_t action; 1249 u_long trapno; 1250 int mask, returnmask; 1251 union sigval sigval; 1252 int s, code; 1253 1254 #ifdef DIAGNOSTIC 1255 if (signum == 0) 1256 panic("postsig"); 1257 #endif 1258 1259 KERNEL_LOCK(); 1260 1261 mask = sigmask(signum); 1262 atomic_clearbits_int(&p->p_siglist, mask); 1263 action = ps->ps_sigact[signum]; 1264 sigval.sival_ptr = 0; 1265 1266 if (p->p_sisig != signum) { 1267 trapno = 0; 1268 code = SI_USER; 1269 sigval.sival_ptr = 0; 1270 } else { 1271 trapno = p->p_sitrapno; 1272 code = p->p_sicode; 1273 sigval = p->p_sigval; 1274 } 1275 1276 #ifdef KTRACE 1277 if (KTRPOINT(p, KTR_PSIG)) { 1278 siginfo_t si; 1279 1280 initsiginfo(&si, signum, trapno, code, sigval); 1281 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ? 1282 p->p_oldmask : p->p_sigmask, code, &si); 1283 } 1284 #endif 1285 if (action == SIG_DFL) { 1286 /* 1287 * Default action, where the default is to kill 1288 * the process. (Other cases were ignored above.) 1289 */ 1290 sigexit(p, signum); 1291 /* NOTREACHED */ 1292 } else { 1293 /* 1294 * If we get here, the signal must be caught. 1295 */ 1296 #ifdef DIAGNOSTIC 1297 if (action == SIG_IGN || (p->p_sigmask & mask)) 1298 panic("postsig action"); 1299 #endif 1300 /* 1301 * Set the new mask value and also defer further 1302 * occurrences of this signal. 1303 * 1304 * Special case: user has done a sigpause. Here the 1305 * current mask is not of interest, but rather the 1306 * mask from before the sigpause is what we want 1307 * restored after the signal processing is completed. 1308 */ 1309 #ifdef MULTIPROCESSOR 1310 s = splsched(); 1311 #else 1312 s = splhigh(); 1313 #endif 1314 if (p->p_flag & P_SIGSUSPEND) { 1315 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1316 returnmask = p->p_oldmask; 1317 } else 1318 returnmask = p->p_sigmask; 1319 p->p_sigmask |= ps->ps_catchmask[signum]; 1320 if ((ps->ps_sigreset & mask) != 0) { 1321 ps->ps_sigcatch &= ~mask; 1322 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1323 ps->ps_sigignore |= mask; 1324 ps->ps_sigact[signum] = SIG_DFL; 1325 } 1326 splx(s); 1327 p->p_ru.ru_nsignals++; 1328 if (p->p_sisig == signum) { 1329 p->p_sisig = 0; 1330 p->p_sitrapno = 0; 1331 p->p_sicode = SI_USER; 1332 p->p_sigval.sival_ptr = NULL; 1333 } 1334 1335 (*p->p_emul->e_sendsig)(action, signum, returnmask, trapno, 1336 code, sigval); 1337 } 1338 1339 KERNEL_UNLOCK(); 1340 } 1341 1342 /* 1343 * Force the current process to exit with the specified signal, dumping core 1344 * if appropriate. We bypass the normal tests for masked and caught signals, 1345 * allowing unrecoverable failures to terminate the process without changing 1346 * signal state. Mark the accounting record with the signal termination. 1347 * If dumping core, save the signal number for the debugger. Calls exit and 1348 * does not return. 1349 */ 1350 void 1351 sigexit(struct proc *p, int signum) 1352 { 1353 /* Mark process as going away */ 1354 atomic_setbits_int(&p->p_flag, P_WEXIT); 1355 1356 p->p_p->ps_acflag |= AXSIG; 1357 if (sigprop[signum] & SA_CORE) { 1358 p->p_sisig = signum; 1359 1360 /* if there are other threads, pause them */ 1361 if (TAILQ_FIRST(&p->p_p->ps_threads) != p || 1362 TAILQ_NEXT(p, p_thr_link) != NULL) 1363 single_thread_set(p, SINGLE_SUSPEND, 0); 1364 1365 if (coredump(p) == 0) 1366 signum |= WCOREFLAG; 1367 } 1368 exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL); 1369 /* NOTREACHED */ 1370 } 1371 1372 int nosuidcoredump = 1; 1373 1374 struct coredump_iostate { 1375 struct proc *io_proc; 1376 struct vnode *io_vp; 1377 struct ucred *io_cred; 1378 off_t io_offset; 1379 }; 1380 1381 /* 1382 * Dump core, into a file named "progname.core", unless the process was 1383 * setuid/setgid. 1384 */ 1385 int 1386 coredump(struct proc *p) 1387 { 1388 #ifdef SMALL_KERNEL 1389 return EPERM; 1390 #else 1391 struct vnode *vp; 1392 struct ucred *cred = p->p_ucred; 1393 struct vmspace *vm = p->p_vmspace; 1394 struct nameidata nd; 1395 struct vattr vattr; 1396 struct coredump_iostate io; 1397 int error, error1, len; 1398 char name[sizeof("/var/crash/") + MAXCOMLEN + sizeof(".core")]; 1399 char *dir = ""; 1400 1401 /* 1402 * Don't dump if not root and the process has used set user or 1403 * group privileges, unless the nosuidcoredump sysctl is set to 2, 1404 * in which case dumps are put into /var/crash/. 1405 */ 1406 if (((p->p_p->ps_flags & PS_SUGID) && (error = suser(p, 0))) || 1407 ((p->p_p->ps_flags & PS_SUGID) && nosuidcoredump)) { 1408 if (nosuidcoredump == 2) 1409 dir = "/var/crash/"; 1410 else 1411 return (EPERM); 1412 } 1413 1414 /* Don't dump if will exceed file size limit. */ 1415 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= 1416 p->p_rlimit[RLIMIT_CORE].rlim_cur) 1417 return (EFBIG); 1418 1419 len = snprintf(name, sizeof(name), "%s%s.core", dir, p->p_comm); 1420 if (len >= sizeof(name)) 1421 return (EACCES); 1422 1423 /* 1424 * ... but actually write it as UID 1425 */ 1426 cred = crdup(cred); 1427 cred->cr_uid = p->p_cred->p_ruid; 1428 cred->cr_gid = p->p_cred->p_rgid; 1429 1430 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 1431 1432 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 1433 1434 if (error) { 1435 crfree(cred); 1436 return (error); 1437 } 1438 1439 /* 1440 * Don't dump to non-regular files, files with links, or files 1441 * owned by someone else. 1442 */ 1443 vp = nd.ni_vp; 1444 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) 1445 goto out; 1446 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1447 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1448 vattr.va_uid != cred->cr_uid) { 1449 error = EACCES; 1450 goto out; 1451 } 1452 VATTR_NULL(&vattr); 1453 vattr.va_size = 0; 1454 VOP_SETATTR(vp, &vattr, cred, p); 1455 p->p_p->ps_acflag |= ACORE; 1456 1457 io.io_proc = p; 1458 io.io_vp = vp; 1459 io.io_cred = cred; 1460 io.io_offset = 0; 1461 1462 error = (*p->p_emul->e_coredump)(p, &io); 1463 out: 1464 VOP_UNLOCK(vp, 0, p); 1465 error1 = vn_close(vp, FWRITE, cred, p); 1466 crfree(cred); 1467 if (error == 0) 1468 error = error1; 1469 return (error); 1470 #endif 1471 } 1472 1473 int 1474 coredump_trad(struct proc *p, void *cookie) 1475 { 1476 #ifdef SMALL_KERNEL 1477 return EPERM; 1478 #else 1479 struct coredump_iostate *io = cookie; 1480 struct vmspace *vm = io->io_proc->p_vmspace; 1481 struct vnode *vp = io->io_vp; 1482 struct ucred *cred = io->io_cred; 1483 struct core core; 1484 int error; 1485 1486 core.c_midmag = 0; 1487 strlcpy(core.c_name, p->p_comm, sizeof(core.c_name)); 1488 core.c_nseg = 0; 1489 core.c_signo = p->p_sisig; 1490 core.c_ucode = p->p_sitrapno; 1491 core.c_cpusize = 0; 1492 core.c_tsize = (u_long)ptoa(vm->vm_tsize); 1493 core.c_dsize = (u_long)ptoa(vm->vm_dsize); 1494 core.c_ssize = (u_long)round_page(ptoa(vm->vm_ssize)); 1495 error = cpu_coredump(p, vp, cred, &core); 1496 if (error) 1497 return (error); 1498 /* 1499 * uvm_coredump() spits out all appropriate segments. 1500 * All that's left to do is to write the core header. 1501 */ 1502 error = uvm_coredump(p, vp, cred, &core); 1503 if (error) 1504 return (error); 1505 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core, 1506 (int)core.c_hdrsize, (off_t)0, 1507 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p); 1508 return (error); 1509 #endif 1510 } 1511 1512 #ifndef SMALL_KERNEL 1513 int 1514 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1515 { 1516 struct coredump_iostate *io = cookie; 1517 int error; 1518 1519 error = vn_rdwr(UIO_WRITE, io->io_vp, (void *)data, len, 1520 io->io_offset, segflg, 1521 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL, io->io_proc); 1522 if (error) { 1523 printf("pid %d (%s): %s write of %lu@%p at %lld failed: %d\n", 1524 io->io_proc->p_pid, io->io_proc->p_comm, 1525 segflg == UIO_USERSPACE ? "user" : "system", 1526 len, data, (long long) io->io_offset, error); 1527 return (error); 1528 } 1529 1530 io->io_offset += len; 1531 return (0); 1532 } 1533 #endif /* !SMALL_KERNEL */ 1534 1535 /* 1536 * Nonexistent system call-- signal process (may want to handle it). 1537 * Flag error in case process won't see signal immediately (blocked or ignored). 1538 */ 1539 /* ARGSUSED */ 1540 int 1541 sys_nosys(struct proc *p, void *v, register_t *retval) 1542 { 1543 1544 ptsignal(p, SIGSYS, STHREAD); 1545 return (ENOSYS); 1546 } 1547 1548 int 1549 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1550 { 1551 struct sys___thrsigdivert_args /* { 1552 syscallarg(sigset_t) sigmask; 1553 syscallarg(siginfo_t *) info; 1554 syscallarg(const struct timespec *) timeout; 1555 } */ *uap = v; 1556 sigset_t mask; 1557 sigset_t *m; 1558 long long to_ticks = 0; 1559 int error; 1560 1561 if (!rthreads_enabled) 1562 return (ENOTSUP); 1563 1564 m = NULL; 1565 mask = SCARG(uap, sigmask) &~ sigcantmask; 1566 1567 /* pending signal for this thread? */ 1568 if (p->p_siglist & mask) 1569 m = &p->p_siglist; 1570 else if (p->p_p->ps_mainproc->p_siglist & mask) 1571 m = &p->p_p->ps_mainproc->p_siglist; 1572 if (m != NULL) { 1573 int sig = ffs((long)(*m & mask)); 1574 atomic_clearbits_int(m, sigmask(sig)); 1575 *retval = sig; 1576 return (0); 1577 } 1578 1579 if (SCARG(uap, timeout) != NULL) { 1580 struct timespec ts; 1581 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1582 return (error); 1583 #ifdef KTRACE 1584 if (KTRPOINT(p, KTR_STRUCT)) 1585 ktrreltimespec(p, &ts); 1586 #endif 1587 to_ticks = (long long)hz * ts.tv_sec + 1588 ts.tv_nsec / (tick * 1000); 1589 if (to_ticks > INT_MAX) 1590 to_ticks = INT_MAX; 1591 } 1592 1593 p->p_sigwait = 0; 1594 atomic_setbits_int(&p->p_sigdivert, mask); 1595 error = tsleep(&p->p_sigdivert, PPAUSE|PCATCH, "sigwait", 1596 (int)to_ticks); 1597 if (p->p_sigdivert) { 1598 /* interrupted */ 1599 KASSERT(error != 0); 1600 atomic_clearbits_int(&p->p_sigdivert, ~0); 1601 if (error == EINTR) 1602 error = ERESTART; 1603 else if (error == ETIMEDOUT) 1604 error = EAGAIN; 1605 return (error); 1606 1607 } 1608 KASSERT(p->p_sigwait != 0); 1609 *retval = p->p_sigwait; 1610 1611 if (SCARG(uap, info) == NULL) { 1612 error = 0; 1613 } else { 1614 siginfo_t si; 1615 1616 bzero(&si, sizeof si); 1617 si.si_signo = p->p_sigwait; 1618 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1619 } 1620 return (error); 1621 } 1622 1623 void 1624 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1625 { 1626 bzero(si, sizeof *si); 1627 1628 si->si_signo = sig; 1629 si->si_code = code; 1630 if (code == SI_USER) { 1631 si->si_value = val; 1632 } else { 1633 switch (sig) { 1634 case SIGSEGV: 1635 case SIGILL: 1636 case SIGBUS: 1637 case SIGFPE: 1638 si->si_addr = val.sival_ptr; 1639 si->si_trapno = trapno; 1640 break; 1641 case SIGXFSZ: 1642 break; 1643 } 1644 } 1645 } 1646 1647 int 1648 filt_sigattach(struct knote *kn) 1649 { 1650 struct process *pr = curproc->p_p; 1651 1652 kn->kn_ptr.p_process = pr; 1653 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1654 1655 /* XXX lock the proc here while adding to the list? */ 1656 SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext); 1657 1658 return (0); 1659 } 1660 1661 void 1662 filt_sigdetach(struct knote *kn) 1663 { 1664 struct process *pr = kn->kn_ptr.p_process; 1665 1666 SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext); 1667 } 1668 1669 /* 1670 * signal knotes are shared with proc knotes, so we apply a mask to 1671 * the hint in order to differentiate them from process hints. This 1672 * could be avoided by using a signal-specific knote list, but probably 1673 * isn't worth the trouble. 1674 */ 1675 int 1676 filt_signal(struct knote *kn, long hint) 1677 { 1678 1679 if (hint & NOTE_SIGNAL) { 1680 hint &= ~NOTE_SIGNAL; 1681 1682 if (kn->kn_id == hint) 1683 kn->kn_data++; 1684 } 1685 return (kn->kn_data != 0); 1686 } 1687 1688 void 1689 userret(struct proc *p) 1690 { 1691 int sig; 1692 1693 while ((sig = CURSIG(p)) != 0) 1694 postsig(sig); 1695 1696 if (p->p_flag & P_SUSPSINGLE) { 1697 KERNEL_LOCK(); 1698 single_thread_check(p, 0); 1699 KERNEL_UNLOCK(); 1700 } 1701 1702 p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 1703 } 1704 1705 int 1706 single_thread_check(struct proc *p, int deep) 1707 { 1708 struct process *pr = p->p_p; 1709 1710 if (pr->ps_single != NULL && pr->ps_single != p) { 1711 do { 1712 int s; 1713 1714 /* if we're in deep, we need to unwind to the edge */ 1715 if (deep) { 1716 if (pr->ps_flags & PS_SINGLEUNWIND) 1717 return (ERESTART); 1718 if (pr->ps_flags & PS_SINGLEEXIT) 1719 return (EINTR); 1720 } 1721 1722 if (--pr->ps_singlecount == 0) 1723 wakeup(&pr->ps_singlecount); 1724 if (pr->ps_flags & PS_SINGLEEXIT) 1725 exit1(p, 0, EXIT_THREAD_NOCHECK); 1726 1727 /* not exiting and don't need to unwind, so suspend */ 1728 SCHED_LOCK(s); 1729 p->p_stat = SSTOP; 1730 mi_switch(); 1731 SCHED_UNLOCK(s); 1732 } while (pr->ps_single != NULL); 1733 } 1734 1735 return (0); 1736 } 1737 1738 /* 1739 * Stop other threads in the process. The mode controls how and 1740 * where the other threads should stop: 1741 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 1742 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 1743 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 1744 * or released as with SINGLE_SUSPEND 1745 * - SINGLE_EXIT: unwind to kernel boundary and exit 1746 */ 1747 int 1748 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep) 1749 { 1750 struct process *pr = p->p_p; 1751 struct proc *q; 1752 int error; 1753 1754 if ((error = single_thread_check(p, deep))) 1755 return error; 1756 1757 switch (mode) { 1758 case SINGLE_SUSPEND: 1759 break; 1760 case SINGLE_UNWIND: 1761 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1762 break; 1763 case SINGLE_EXIT: 1764 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 1765 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1766 break; 1767 #ifdef DIAGNOSTIC 1768 default: 1769 panic("single_thread_mode = %d", mode); 1770 #endif 1771 } 1772 pr->ps_single = p; 1773 pr->ps_singlecount = 0; 1774 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1775 int s; 1776 1777 if (q == p || ISSET(q->p_flag, P_WEXIT)) 1778 continue; 1779 SCHED_LOCK(s); 1780 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 1781 switch (q->p_stat) { 1782 case SIDL: 1783 case SRUN: 1784 pr->ps_singlecount++; 1785 break; 1786 case SSLEEP: 1787 /* if it's not interruptible, then just have to wait */ 1788 if (q->p_flag & P_SINTR) { 1789 /* merely need to suspend? just stop it */ 1790 if (mode == SINGLE_SUSPEND) { 1791 q->p_stat = SSTOP; 1792 break; 1793 } 1794 /* need to unwind or exit, so wake it */ 1795 setrunnable(q); 1796 } 1797 pr->ps_singlecount++; 1798 break; 1799 case SSTOP: 1800 if (mode == SINGLE_EXIT) { 1801 setrunnable(q); 1802 pr->ps_singlecount++; 1803 } 1804 break; 1805 case SZOMB: 1806 case SDEAD: 1807 break; 1808 case SONPROC: 1809 pr->ps_singlecount++; 1810 signotify(q); 1811 break; 1812 } 1813 SCHED_UNLOCK(s); 1814 } 1815 1816 /* wait until they're all suspended */ 1817 while (pr->ps_singlecount > 0) 1818 tsleep(&pr->ps_singlecount, PUSER, "suspend", 0); 1819 return 0; 1820 } 1821 1822 void 1823 single_thread_clear(struct proc *p, int flag) 1824 { 1825 struct process *pr = p->p_p; 1826 struct proc *q; 1827 1828 KASSERT(pr->ps_single == p); 1829 1830 pr->ps_single = NULL; 1831 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 1832 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1833 int s; 1834 1835 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 1836 continue; 1837 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 1838 1839 /* 1840 * if the thread was only stopped for single threading 1841 * then clearing that either makes it runnable or puts 1842 * it back into some sleep queue 1843 */ 1844 SCHED_LOCK(s); 1845 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 1846 if (q->p_wchan == 0) 1847 setrunnable(q); 1848 else 1849 q->p_stat = SSLEEP; 1850 } 1851 SCHED_UNLOCK(s); 1852 } 1853 } 1854