1 /* $OpenBSD: kern_sig.c,v 1.103 2009/03/05 19:52:24 kettenis Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/timeb.h> 52 #include <sys/times.h> 53 #include <sys/buf.h> 54 #include <sys/acct.h> 55 #include <sys/file.h> 56 #include <sys/kernel.h> 57 #include <sys/wait.h> 58 #include <sys/ktrace.h> 59 #include <sys/stat.h> 60 #include <sys/core.h> 61 #include <sys/malloc.h> 62 #include <sys/pool.h> 63 #include <sys/ptrace.h> 64 #include <sys/sched.h> 65 66 #include <sys/mount.h> 67 #include <sys/syscallargs.h> 68 69 #include <machine/cpu.h> 70 71 #include <uvm/uvm_extern.h> 72 #include <sys/user.h> /* for coredump */ 73 74 int filt_sigattach(struct knote *kn); 75 void filt_sigdetach(struct knote *kn); 76 int filt_signal(struct knote *kn, long hint); 77 78 struct filterops sig_filtops = 79 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 80 81 void proc_stop(struct proc *p, int); 82 void proc_stop_sweep(void *); 83 struct timeout proc_stop_to; 84 85 int cansignal(struct proc *, struct pcred *, struct proc *, int); 86 87 struct pool sigacts_pool; /* memory pool for sigacts structures */ 88 89 /* 90 * Can process p, with pcred pc, send the signal signum to process q? 91 */ 92 int 93 cansignal(struct proc *p, struct pcred *pc, struct proc *q, int signum) 94 { 95 if (pc->pc_ucred->cr_uid == 0) 96 return (1); /* root can always signal */ 97 98 if (p == q) 99 return (1); /* process can always signal itself */ 100 101 #ifdef RTHREADS 102 /* a thread can only be signalled from within the same process */ 103 if (q->p_flag & P_THREAD) { 104 return (p->p_p == q->p_p); 105 } 106 #endif 107 108 if (signum == SIGCONT && q->p_session == p->p_session) 109 return (1); /* SIGCONT in session */ 110 111 /* 112 * Using kill(), only certain signals can be sent to setugid 113 * child processes 114 */ 115 if (q->p_flag & P_SUGID) { 116 switch (signum) { 117 case 0: 118 case SIGKILL: 119 case SIGINT: 120 case SIGTERM: 121 case SIGALRM: 122 case SIGSTOP: 123 case SIGTTIN: 124 case SIGTTOU: 125 case SIGTSTP: 126 case SIGHUP: 127 case SIGUSR1: 128 case SIGUSR2: 129 if (pc->p_ruid == q->p_cred->p_ruid || 130 pc->pc_ucred->cr_uid == q->p_cred->p_ruid) 131 return (1); 132 } 133 return (0); 134 } 135 136 if (pc->p_ruid == q->p_cred->p_ruid || 137 pc->p_ruid == q->p_cred->p_svuid || 138 pc->pc_ucred->cr_uid == q->p_cred->p_ruid || 139 pc->pc_ucred->cr_uid == q->p_cred->p_svuid) 140 return (1); 141 return (0); 142 } 143 144 /* 145 * Initialize signal-related data structures. 146 */ 147 void 148 signal_init(void) 149 { 150 timeout_set(&proc_stop_to, proc_stop_sweep, NULL); 151 152 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", 153 &pool_allocator_nointr); 154 } 155 156 /* 157 * Create an initial sigacts structure, using the same signal state 158 * as p. 159 */ 160 struct sigacts * 161 sigactsinit(struct proc *p) 162 { 163 struct sigacts *ps; 164 165 ps = pool_get(&sigacts_pool, PR_WAITOK); 166 memcpy(ps, p->p_sigacts, sizeof(struct sigacts)); 167 ps->ps_refcnt = 1; 168 return (ps); 169 } 170 171 /* 172 * Make p2 share p1's sigacts. 173 */ 174 void 175 sigactsshare(struct proc *p1, struct proc *p2) 176 { 177 178 p2->p_sigacts = p1->p_sigacts; 179 p1->p_sigacts->ps_refcnt++; 180 } 181 182 /* 183 * Make this process not share its sigacts, maintaining all 184 * signal state. 185 */ 186 void 187 sigactsunshare(struct proc *p) 188 { 189 struct sigacts *newps; 190 191 if (p->p_sigacts->ps_refcnt == 1) 192 return; 193 194 newps = sigactsinit(p); 195 sigactsfree(p); 196 p->p_sigacts = newps; 197 } 198 199 /* 200 * Release a sigacts structure. 201 */ 202 void 203 sigactsfree(struct proc *p) 204 { 205 struct sigacts *ps = p->p_sigacts; 206 207 if (--ps->ps_refcnt > 0) 208 return; 209 210 p->p_sigacts = NULL; 211 212 pool_put(&sigacts_pool, ps); 213 } 214 215 /* ARGSUSED */ 216 int 217 sys_sigaction(struct proc *p, void *v, register_t *retval) 218 { 219 struct sys_sigaction_args /* { 220 syscallarg(int) signum; 221 syscallarg(const struct sigaction *) nsa; 222 syscallarg(struct sigaction *) osa; 223 } */ *uap = v; 224 struct sigaction vec; 225 struct sigaction *sa; 226 struct sigacts *ps = p->p_sigacts; 227 int signum; 228 int bit, error; 229 230 signum = SCARG(uap, signum); 231 if (signum <= 0 || signum >= NSIG || 232 (SCARG(uap, nsa) && (signum == SIGKILL || signum == SIGSTOP))) 233 return (EINVAL); 234 sa = &vec; 235 if (SCARG(uap, osa)) { 236 sa->sa_handler = ps->ps_sigact[signum]; 237 sa->sa_mask = ps->ps_catchmask[signum]; 238 bit = sigmask(signum); 239 sa->sa_flags = 0; 240 if ((ps->ps_sigonstack & bit) != 0) 241 sa->sa_flags |= SA_ONSTACK; 242 if ((ps->ps_sigintr & bit) == 0) 243 sa->sa_flags |= SA_RESTART; 244 if ((ps->ps_sigreset & bit) != 0) 245 sa->sa_flags |= SA_RESETHAND; 246 if ((ps->ps_siginfo & bit) != 0) 247 sa->sa_flags |= SA_SIGINFO; 248 if (signum == SIGCHLD) { 249 if ((p->p_flag & P_NOCLDSTOP) != 0) 250 sa->sa_flags |= SA_NOCLDSTOP; 251 if ((p->p_flag & P_NOCLDWAIT) != 0) 252 sa->sa_flags |= SA_NOCLDWAIT; 253 } 254 if ((sa->sa_mask & bit) == 0) 255 sa->sa_flags |= SA_NODEFER; 256 sa->sa_mask &= ~bit; 257 error = copyout(sa, SCARG(uap, osa), sizeof (vec)); 258 if (error) 259 return (error); 260 } 261 if (SCARG(uap, nsa)) { 262 error = copyin(SCARG(uap, nsa), sa, sizeof (vec)); 263 if (error) 264 return (error); 265 setsigvec(p, signum, sa); 266 } 267 return (0); 268 } 269 270 void 271 setsigvec(struct proc *p, int signum, struct sigaction *sa) 272 { 273 struct sigacts *ps = p->p_sigacts; 274 int bit; 275 int s; 276 277 bit = sigmask(signum); 278 /* 279 * Change setting atomically. 280 */ 281 s = splhigh(); 282 ps->ps_sigact[signum] = sa->sa_handler; 283 if ((sa->sa_flags & SA_NODEFER) == 0) 284 sa->sa_mask |= sigmask(signum); 285 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 286 if (signum == SIGCHLD) { 287 if (sa->sa_flags & SA_NOCLDSTOP) 288 atomic_setbits_int(&p->p_flag, P_NOCLDSTOP); 289 else 290 atomic_clearbits_int(&p->p_flag, P_NOCLDSTOP); 291 /* 292 * If the SA_NOCLDWAIT flag is set or the handler 293 * is SIG_IGN we reparent the dying child to PID 1 294 * (init) which will reap the zombie. Because we use 295 * init to do our dirty work we never set P_NOCLDWAIT 296 * for PID 1. 297 */ 298 if (p->p_pid != 1 && ((sa->sa_flags & SA_NOCLDWAIT) || 299 sa->sa_handler == SIG_IGN)) 300 atomic_setbits_int(&p->p_flag, P_NOCLDWAIT); 301 else 302 atomic_clearbits_int(&p->p_flag, P_NOCLDWAIT); 303 } 304 if ((sa->sa_flags & SA_RESETHAND) != 0) 305 ps->ps_sigreset |= bit; 306 else 307 ps->ps_sigreset &= ~bit; 308 if ((sa->sa_flags & SA_SIGINFO) != 0) 309 ps->ps_siginfo |= bit; 310 else 311 ps->ps_siginfo &= ~bit; 312 if ((sa->sa_flags & SA_RESTART) == 0) 313 ps->ps_sigintr |= bit; 314 else 315 ps->ps_sigintr &= ~bit; 316 if ((sa->sa_flags & SA_ONSTACK) != 0) 317 ps->ps_sigonstack |= bit; 318 else 319 ps->ps_sigonstack &= ~bit; 320 #ifdef COMPAT_SUNOS 321 { 322 extern struct emul emul_sunos; 323 if (p->p_emul == &emul_sunos) { 324 if (sa->sa_flags & SA_USERTRAMP) 325 ps->ps_usertramp |= bit; 326 else 327 ps->ps_usertramp &= ~bit; 328 } 329 } 330 #endif 331 /* 332 * Set bit in p_sigignore for signals that are set to SIG_IGN, 333 * and for signals set to SIG_DFL where the default is to ignore. 334 * However, don't put SIGCONT in p_sigignore, 335 * as we have to restart the process. 336 */ 337 if (sa->sa_handler == SIG_IGN || 338 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 339 atomic_clearbits_int(&p->p_siglist, bit); 340 if (signum != SIGCONT) 341 p->p_sigignore |= bit; /* easier in psignal */ 342 p->p_sigcatch &= ~bit; 343 } else { 344 p->p_sigignore &= ~bit; 345 if (sa->sa_handler == SIG_DFL) 346 p->p_sigcatch &= ~bit; 347 else 348 p->p_sigcatch |= bit; 349 } 350 splx(s); 351 } 352 353 /* 354 * Initialize signal state for process 0; 355 * set to ignore signals that are ignored by default. 356 */ 357 void 358 siginit(struct proc *p) 359 { 360 int i; 361 362 for (i = 0; i < NSIG; i++) 363 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 364 p->p_sigignore |= sigmask(i); 365 } 366 367 /* 368 * Reset signals for an exec of the specified process. 369 */ 370 void 371 execsigs(struct proc *p) 372 { 373 struct sigacts *ps; 374 int nc, mask; 375 376 sigactsunshare(p); 377 ps = p->p_sigacts; 378 379 /* 380 * Reset caught signals. Held signals remain held 381 * through p_sigmask (unless they were caught, 382 * and are now ignored by default). 383 */ 384 while (p->p_sigcatch) { 385 nc = ffs((long)p->p_sigcatch); 386 mask = sigmask(nc); 387 p->p_sigcatch &= ~mask; 388 if (sigprop[nc] & SA_IGNORE) { 389 if (nc != SIGCONT) 390 p->p_sigignore |= mask; 391 atomic_clearbits_int(&p->p_siglist, mask); 392 } 393 ps->ps_sigact[nc] = SIG_DFL; 394 } 395 /* 396 * Reset stack state to the user stack. 397 * Clear set of signals caught on the signal stack. 398 */ 399 ps->ps_sigstk.ss_flags = SS_DISABLE; 400 ps->ps_sigstk.ss_size = 0; 401 ps->ps_sigstk.ss_sp = 0; 402 ps->ps_flags = 0; 403 atomic_clearbits_int(&p->p_flag, P_NOCLDWAIT); 404 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 405 ps->ps_sigact[SIGCHLD] = SIG_DFL; 406 } 407 408 /* 409 * Manipulate signal mask. 410 * Note that we receive new mask, not pointer, 411 * and return old mask as return value; 412 * the library stub does the rest. 413 */ 414 int 415 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 416 { 417 struct sys_sigprocmask_args /* { 418 syscallarg(int) how; 419 syscallarg(sigset_t) mask; 420 } */ *uap = v; 421 int error = 0; 422 int s; 423 424 *retval = p->p_sigmask; 425 s = splhigh(); 426 427 switch (SCARG(uap, how)) { 428 case SIG_BLOCK: 429 p->p_sigmask |= SCARG(uap, mask) &~ sigcantmask; 430 break; 431 432 case SIG_UNBLOCK: 433 p->p_sigmask &= ~SCARG(uap, mask); 434 break; 435 436 case SIG_SETMASK: 437 p->p_sigmask = SCARG(uap, mask) &~ sigcantmask; 438 break; 439 440 default: 441 error = EINVAL; 442 break; 443 } 444 splx(s); 445 return (error); 446 } 447 448 /* ARGSUSED */ 449 int 450 sys_sigpending(struct proc *p, void *v, register_t *retval) 451 { 452 453 *retval = p->p_siglist; 454 return (0); 455 } 456 457 /* 458 * Suspend process until signal, providing mask to be set 459 * in the meantime. Note nonstandard calling convention: 460 * libc stub passes mask, not pointer, to save a copyin. 461 */ 462 /* ARGSUSED */ 463 int 464 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 465 { 466 struct sys_sigsuspend_args /* { 467 syscallarg(int) mask; 468 } */ *uap = v; 469 struct sigacts *ps = p->p_sigacts; 470 471 /* 472 * When returning from sigpause, we want 473 * the old mask to be restored after the 474 * signal handler has finished. Thus, we 475 * save it here and mark the sigacts structure 476 * to indicate this. 477 */ 478 ps->ps_oldmask = p->p_sigmask; 479 ps->ps_flags |= SAS_OLDMASK; 480 p->p_sigmask = SCARG(uap, mask) &~ sigcantmask; 481 while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0) 482 /* void */; 483 /* always return EINTR rather than ERESTART... */ 484 return (EINTR); 485 } 486 487 /* ARGSUSED */ 488 int 489 sys_osigaltstack(struct proc *p, void *v, register_t *retval) 490 { 491 struct sys_osigaltstack_args /* { 492 syscallarg(const struct osigaltstack *) nss; 493 syscallarg(struct osigaltstack *) oss; 494 } */ *uap = v; 495 struct sigacts *psp; 496 struct osigaltstack ss; 497 int error; 498 499 psp = p->p_sigacts; 500 if ((psp->ps_flags & SAS_ALTSTACK) == 0) 501 psp->ps_sigstk.ss_flags |= SS_DISABLE; 502 if (SCARG(uap, oss)) { 503 ss.ss_sp = psp->ps_sigstk.ss_sp; 504 ss.ss_size = psp->ps_sigstk.ss_size; 505 ss.ss_flags = psp->ps_sigstk.ss_flags; 506 if ((error = copyout(&ss, SCARG(uap, oss), sizeof(ss)))) 507 return (error); 508 } 509 if (SCARG(uap, nss) == NULL) 510 return (0); 511 error = copyin(SCARG(uap, nss), &ss, sizeof(ss)); 512 if (error) 513 return (error); 514 if (ss.ss_flags & SS_DISABLE) { 515 if (psp->ps_sigstk.ss_flags & SS_ONSTACK) 516 return (EINVAL); 517 psp->ps_flags &= ~SAS_ALTSTACK; 518 psp->ps_sigstk.ss_flags = ss.ss_flags; 519 return (0); 520 } 521 if (ss.ss_size < MINSIGSTKSZ) 522 return (ENOMEM); 523 psp->ps_flags |= SAS_ALTSTACK; 524 psp->ps_sigstk.ss_sp = ss.ss_sp; 525 psp->ps_sigstk.ss_size = ss.ss_size; 526 psp->ps_sigstk.ss_flags = ss.ss_flags; 527 return (0); 528 } 529 530 int 531 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 532 { 533 struct sys_sigaltstack_args /* { 534 syscallarg(const struct sigaltstack *) nss; 535 syscallarg(struct sigaltstack *) oss; 536 } */ *uap = v; 537 struct sigacts *psp; 538 struct sigaltstack ss; 539 int error; 540 541 psp = p->p_sigacts; 542 if ((psp->ps_flags & SAS_ALTSTACK) == 0) 543 psp->ps_sigstk.ss_flags |= SS_DISABLE; 544 if (SCARG(uap, oss) && (error = copyout(&psp->ps_sigstk, 545 SCARG(uap, oss), sizeof(struct sigaltstack)))) 546 return (error); 547 if (SCARG(uap, nss) == NULL) 548 return (0); 549 error = copyin(SCARG(uap, nss), &ss, sizeof(ss)); 550 if (error) 551 return (error); 552 if (ss.ss_flags & SS_DISABLE) { 553 if (psp->ps_sigstk.ss_flags & SS_ONSTACK) 554 return (EINVAL); 555 psp->ps_flags &= ~SAS_ALTSTACK; 556 psp->ps_sigstk.ss_flags = ss.ss_flags; 557 return (0); 558 } 559 if (ss.ss_size < MINSIGSTKSZ) 560 return (ENOMEM); 561 psp->ps_flags |= SAS_ALTSTACK; 562 psp->ps_sigstk = ss; 563 return (0); 564 } 565 566 /* ARGSUSED */ 567 int 568 sys_kill(struct proc *cp, void *v, register_t *retval) 569 { 570 struct sys_kill_args /* { 571 syscallarg(int) pid; 572 syscallarg(int) signum; 573 } */ *uap = v; 574 struct proc *p; 575 struct pcred *pc = cp->p_cred; 576 577 if ((u_int)SCARG(uap, signum) >= NSIG) 578 return (EINVAL); 579 if (SCARG(uap, pid) > 0) { 580 enum signal_type type = SPROCESS; 581 582 #ifdef RTHREADS 583 if (SCARG(uap, pid) > THREAD_PID_OFFSET) { 584 if ((p = pfind(SCARG(uap, pid) 585 - THREAD_PID_OFFSET)) == NULL) 586 return (ESRCH); 587 if (p->p_flag & P_THREAD) 588 return (ESRCH); 589 type = STHREAD; 590 } else 591 #endif 592 { 593 if ((p = pfind(SCARG(uap, pid))) == NULL) 594 return (ESRCH); 595 #ifdef RTHREADS 596 if (p->p_flag & P_THREAD) 597 type = STHREAD; 598 #endif 599 } 600 601 /* kill single process */ 602 if (!cansignal(cp, pc, p, SCARG(uap, signum))) 603 return (EPERM); 604 if (SCARG(uap, signum)) 605 ptsignal(p, SCARG(uap, signum), type); 606 return (0); 607 } 608 switch (SCARG(uap, pid)) { 609 case -1: /* broadcast signal */ 610 return (killpg1(cp, SCARG(uap, signum), 0, 1)); 611 case 0: /* signal own process group */ 612 return (killpg1(cp, SCARG(uap, signum), 0, 0)); 613 default: /* negative explicit process group */ 614 return (killpg1(cp, SCARG(uap, signum), -SCARG(uap, pid), 0)); 615 } 616 /* NOTREACHED */ 617 } 618 619 /* 620 * Common code for kill process group/broadcast kill. 621 * cp is calling process. 622 */ 623 int 624 killpg1(struct proc *cp, int signum, int pgid, int all) 625 { 626 struct proc *p; 627 struct pcred *pc = cp->p_cred; 628 struct pgrp *pgrp; 629 int nfound = 0; 630 631 if (all) 632 /* 633 * broadcast 634 */ 635 LIST_FOREACH(p, &allproc, p_list) { 636 if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) || 637 p == cp || !cansignal(cp, pc, p, signum)) 638 continue; 639 nfound++; 640 if (signum) 641 psignal(p, signum); 642 } 643 else { 644 if (pgid == 0) 645 /* 646 * zero pgid means send to my process group. 647 */ 648 pgrp = cp->p_pgrp; 649 else { 650 pgrp = pgfind(pgid); 651 if (pgrp == NULL) 652 return (ESRCH); 653 } 654 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 655 if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) || 656 !cansignal(cp, pc, p, signum)) 657 continue; 658 nfound++; 659 if (signum && P_ZOMBIE(p) == 0) 660 psignal(p, signum); 661 } 662 } 663 return (nfound ? 0 : ESRCH); 664 } 665 666 #define CANDELIVER(uid, euid, p) \ 667 (euid == 0 || \ 668 (uid) == (p)->p_cred->p_ruid || \ 669 (uid) == (p)->p_cred->p_svuid || \ 670 (uid) == (p)->p_ucred->cr_uid || \ 671 (euid) == (p)->p_cred->p_ruid || \ 672 (euid) == (p)->p_cred->p_svuid || \ 673 (euid) == (p)->p_ucred->cr_uid) 674 675 /* 676 * Deliver signum to pgid, but first check uid/euid against each 677 * process and see if it is permitted. 678 */ 679 void 680 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid) 681 { 682 struct pgrp *pgrp; 683 struct proc *p; 684 685 if (pgid == 0) 686 return; 687 if (pgid < 0) { 688 pgid = -pgid; 689 if ((pgrp = pgfind(pgid)) == NULL) 690 return; 691 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 692 if (CANDELIVER(uid, euid, p)) 693 psignal(p, signum); 694 } else { 695 if ((p = pfind(pgid)) == NULL) 696 return; 697 if (CANDELIVER(uid, euid, p)) 698 psignal(p, signum); 699 } 700 } 701 702 /* 703 * Send a signal to a process group. 704 */ 705 void 706 gsignal(int pgid, int signum) 707 { 708 struct pgrp *pgrp; 709 710 if (pgid && (pgrp = pgfind(pgid))) 711 pgsignal(pgrp, signum, 0); 712 } 713 714 /* 715 * Send a signal to a process group. If checktty is 1, 716 * limit to members which have a controlling terminal. 717 */ 718 void 719 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 720 { 721 struct proc *p; 722 723 if (pgrp) 724 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 725 if ((checkctty == 0 || p->p_flag & P_CONTROLT) && 726 (p->p_flag & P_THREAD) == 0) 727 psignal(p, signum); 728 } 729 730 /* 731 * Send a signal caused by a trap to the current process. 732 * If it will be caught immediately, deliver it with correct code. 733 * Otherwise, post it normally. 734 */ 735 void 736 trapsignal(struct proc *p, int signum, u_long code, int type, 737 union sigval sigval) 738 { 739 struct sigacts *ps = p->p_sigacts; 740 int mask; 741 742 mask = sigmask(signum); 743 if ((p->p_flag & P_TRACED) == 0 && (p->p_sigcatch & mask) != 0 && 744 (p->p_sigmask & mask) == 0) { 745 #ifdef KTRACE 746 if (KTRPOINT(p, KTR_PSIG)) { 747 siginfo_t si; 748 749 initsiginfo(&si, signum, code, type, sigval); 750 ktrpsig(p, signum, ps->ps_sigact[signum], 751 p->p_sigmask, type, &si); 752 } 753 #endif 754 p->p_stats->p_ru.ru_nsignals++; 755 (*p->p_emul->e_sendsig)(ps->ps_sigact[signum], signum, 756 p->p_sigmask, code, type, sigval); 757 p->p_sigmask |= ps->ps_catchmask[signum]; 758 if ((ps->ps_sigreset & mask) != 0) { 759 p->p_sigcatch &= ~mask; 760 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 761 p->p_sigignore |= mask; 762 ps->ps_sigact[signum] = SIG_DFL; 763 } 764 } else { 765 ps->ps_sig = signum; 766 ps->ps_code = code; /* XXX for core dump/debugger */ 767 ps->ps_type = type; 768 ps->ps_sigval = sigval; 769 ptsignal(p, signum, STHREAD); 770 } 771 } 772 773 /* 774 * Send the signal to the process. If the signal has an action, the action 775 * is usually performed by the target process rather than the caller; we add 776 * the signal to the set of pending signals for the process. 777 * 778 * Exceptions: 779 * o When a stop signal is sent to a sleeping process that takes the 780 * default action, the process is stopped without awakening it. 781 * o SIGCONT restarts stopped processes (or puts them back to sleep) 782 * regardless of the signal action (eg, blocked or ignored). 783 * 784 * Other ignored signals are discarded immediately. 785 */ 786 void 787 psignal(struct proc *p, int signum) 788 { 789 ptsignal(p, signum, SPROCESS); 790 } 791 792 /* 793 * type = SPROCESS process signal, can be diverted (sigwait()) 794 * XXX if blocked in all threads, mark as pending in struct process 795 * type = STHREAD thread signal, but should be propagated if unhandled 796 * type = SPROPAGATED propagated to this thread, so don't propagate again 797 */ 798 void 799 ptsignal(struct proc *p, int signum, enum signal_type type) 800 { 801 int s, prop; 802 sig_t action; 803 int mask; 804 #ifdef RTHREADS 805 struct proc *q; 806 #endif 807 int wakeparent = 0; 808 809 #ifdef DIAGNOSTIC 810 if ((u_int)signum >= NSIG || signum == 0) 811 panic("psignal signal number"); 812 #endif 813 814 /* Ignore signal if we are exiting */ 815 if (p->p_flag & P_WEXIT) 816 return; 817 818 mask = sigmask(signum); 819 820 #ifdef RTHREADS 821 if (type == SPROCESS) { 822 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { 823 /* ignore exiting threads */ 824 if (q->p_flag & P_WEXIT) 825 continue; 826 if (q->p_sigdivert & mask) { 827 /* sigwait: convert to thread-specific */ 828 type = STHREAD; 829 p = q; 830 break; 831 } 832 } 833 } 834 #endif 835 836 if (type != SPROPAGATED) 837 KNOTE(&p->p_klist, NOTE_SIGNAL | signum); 838 839 prop = sigprop[signum]; 840 841 /* 842 * If proc is traced, always give parent a chance. 843 */ 844 if (p->p_flag & P_TRACED) 845 action = SIG_DFL; 846 #ifdef RTHREADS 847 else if (p->p_sigdivert & mask) { 848 p->p_sigwait = signum; 849 atomic_clearbits_int(&p->p_sigdivert, ~0); 850 action = SIG_CATCH; 851 wakeup(&p->p_sigdivert); 852 } 853 #endif 854 else { 855 /* 856 * If the signal is being ignored, 857 * then we forget about it immediately. 858 * (Note: we don't set SIGCONT in p_sigignore, 859 * and if it is set to SIG_IGN, 860 * action will be SIG_DFL here.) 861 */ 862 if (p->p_sigignore & mask) 863 return; 864 if (p->p_sigmask & mask) 865 action = SIG_HOLD; 866 else if (p->p_sigcatch & mask) 867 action = SIG_CATCH; 868 else { 869 action = SIG_DFL; 870 871 if (prop & SA_KILL && p->p_nice > NZERO) 872 p->p_nice = NZERO; 873 874 /* 875 * If sending a tty stop signal to a member of an 876 * orphaned process group, discard the signal here if 877 * the action is default; don't stop the process below 878 * if sleeping, and don't clear any pending SIGCONT. 879 */ 880 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0) 881 return; 882 } 883 } 884 885 if (prop & SA_CONT) { 886 atomic_clearbits_int(&p->p_siglist, stopsigmask); 887 } 888 889 if (prop & SA_STOP) { 890 atomic_clearbits_int(&p->p_siglist, contsigmask); 891 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 892 } 893 894 atomic_setbits_int(&p->p_siglist, mask); 895 896 #ifdef RTHREADS 897 /* 898 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 899 */ 900 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) { 901 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { 902 if (q != p) 903 ptsignal(q, signum, SPROPAGATED); 904 } 905 } 906 #endif 907 908 /* 909 * Defer further processing for signals which are held, 910 * except that stopped processes must be continued by SIGCONT. 911 */ 912 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 913 return; 914 915 SCHED_LOCK(s); 916 917 switch (p->p_stat) { 918 919 case SSLEEP: 920 /* 921 * If process is sleeping uninterruptibly 922 * we can't interrupt the sleep... the signal will 923 * be noticed when the process returns through 924 * trap() or syscall(). 925 */ 926 if ((p->p_flag & P_SINTR) == 0) 927 goto out; 928 /* 929 * Process is sleeping and traced... make it runnable 930 * so it can discover the signal in issignal() and stop 931 * for the parent. 932 */ 933 if (p->p_flag & P_TRACED) 934 goto run; 935 /* 936 * If SIGCONT is default (or ignored) and process is 937 * asleep, we are finished; the process should not 938 * be awakened. 939 */ 940 if ((prop & SA_CONT) && action == SIG_DFL) { 941 atomic_clearbits_int(&p->p_siglist, mask); 942 goto out; 943 } 944 /* 945 * When a sleeping process receives a stop 946 * signal, process immediately if possible. 947 */ 948 if ((prop & SA_STOP) && action == SIG_DFL) { 949 /* 950 * If a child holding parent blocked, 951 * stopping could cause deadlock. 952 */ 953 if (p->p_flag & P_PPWAIT) 954 goto out; 955 atomic_clearbits_int(&p->p_siglist, mask); 956 p->p_xstat = signum; 957 proc_stop(p, 0); 958 goto out; 959 } 960 /* 961 * All other (caught or default) signals 962 * cause the process to run. 963 */ 964 goto runfast; 965 /*NOTREACHED*/ 966 967 case SSTOP: 968 /* 969 * If traced process is already stopped, 970 * then no further action is necessary. 971 */ 972 if (p->p_flag & P_TRACED) 973 goto out; 974 975 /* 976 * Kill signal always sets processes running. 977 */ 978 if (signum == SIGKILL) 979 goto runfast; 980 981 if (prop & SA_CONT) { 982 /* 983 * If SIGCONT is default (or ignored), we continue the 984 * process but don't leave the signal in p_siglist, as 985 * it has no further action. If SIGCONT is held, we 986 * continue the process and leave the signal in 987 * p_siglist. If the process catches SIGCONT, let it 988 * handle the signal itself. If it isn't waiting on 989 * an event, then it goes back to run state. 990 * Otherwise, process goes back to sleep state. 991 */ 992 atomic_setbits_int(&p->p_flag, P_CONTINUED); 993 wakeparent = 1; 994 if (action == SIG_DFL) 995 atomic_clearbits_int(&p->p_siglist, mask); 996 if (action == SIG_CATCH) 997 goto runfast; 998 if (p->p_wchan == 0) 999 goto run; 1000 p->p_stat = SSLEEP; 1001 goto out; 1002 } 1003 1004 if (prop & SA_STOP) { 1005 /* 1006 * Already stopped, don't need to stop again. 1007 * (If we did the shell could get confused.) 1008 */ 1009 atomic_clearbits_int(&p->p_siglist, mask); 1010 goto out; 1011 } 1012 1013 /* 1014 * If process is sleeping interruptibly, then simulate a 1015 * wakeup so that when it is continued, it will be made 1016 * runnable and can look at the signal. But don't make 1017 * the process runnable, leave it stopped. 1018 */ 1019 if (p->p_wchan && p->p_flag & P_SINTR) 1020 unsleep(p); 1021 goto out; 1022 1023 case SONPROC: 1024 signotify(p); 1025 /* FALLTHROUGH */ 1026 default: 1027 /* 1028 * SRUN, SIDL, SZOMB do nothing with the signal, 1029 * other than kicking ourselves if we are running. 1030 * It will either never be noticed, or noticed very soon. 1031 */ 1032 goto out; 1033 } 1034 /*NOTREACHED*/ 1035 1036 runfast: 1037 /* 1038 * Raise priority to at least PUSER. 1039 */ 1040 if (p->p_priority > PUSER) 1041 p->p_priority = PUSER; 1042 run: 1043 setrunnable(p); 1044 out: 1045 SCHED_UNLOCK(s); 1046 if (wakeparent) 1047 wakeup(p->p_pptr); 1048 } 1049 1050 /* 1051 * If the current process has received a signal (should be caught or cause 1052 * termination, should interrupt current syscall), return the signal number. 1053 * Stop signals with default action are processed immediately, then cleared; 1054 * they aren't returned. This is checked after each entry to the system for 1055 * a syscall or trap (though this can usually be done without calling issignal 1056 * by checking the pending signal masks in the CURSIG macro.) The normal call 1057 * sequence is 1058 * 1059 * while (signum = CURSIG(curproc)) 1060 * postsig(signum); 1061 */ 1062 int 1063 issignal(struct proc *p) 1064 { 1065 int signum, mask, prop; 1066 int dolock = (p->p_flag & P_SINTR) == 0; 1067 int s; 1068 1069 for (;;) { 1070 mask = p->p_siglist & ~p->p_sigmask; 1071 if (p->p_flag & P_PPWAIT) 1072 mask &= ~stopsigmask; 1073 if (mask == 0) /* no signal to send */ 1074 return (0); 1075 signum = ffs((long)mask); 1076 mask = sigmask(signum); 1077 atomic_clearbits_int(&p->p_siglist, mask); 1078 1079 /* 1080 * We should see pending but ignored signals 1081 * only if P_TRACED was on when they were posted. 1082 */ 1083 if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) 1084 continue; 1085 1086 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 1087 /* 1088 * If traced, always stop, and stay 1089 * stopped until released by the debugger. 1090 */ 1091 p->p_xstat = signum; 1092 1093 if (dolock) 1094 SCHED_LOCK(s); 1095 proc_stop(p, 1); 1096 if (dolock) 1097 SCHED_UNLOCK(s); 1098 1099 /* 1100 * If we are no longer being traced, or the parent 1101 * didn't give us a signal, look for more signals. 1102 */ 1103 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0) 1104 continue; 1105 1106 /* 1107 * If the new signal is being masked, look for other 1108 * signals. 1109 */ 1110 signum = p->p_xstat; 1111 mask = sigmask(signum); 1112 if ((p->p_sigmask & mask) != 0) 1113 continue; 1114 1115 /* take the signal! */ 1116 atomic_clearbits_int(&p->p_siglist, mask); 1117 } 1118 1119 prop = sigprop[signum]; 1120 1121 /* 1122 * Decide whether the signal should be returned. 1123 * Return the signal's number, or fall through 1124 * to clear it from the pending mask. 1125 */ 1126 switch ((long)p->p_sigacts->ps_sigact[signum]) { 1127 1128 case (long)SIG_DFL: 1129 /* 1130 * Don't take default actions on system processes. 1131 */ 1132 if (p->p_pid <= 1) { 1133 #ifdef DIAGNOSTIC 1134 /* 1135 * Are you sure you want to ignore SIGSEGV 1136 * in init? XXX 1137 */ 1138 printf("Process (pid %d) got signal %d\n", 1139 p->p_pid, signum); 1140 #endif 1141 break; /* == ignore */ 1142 } 1143 /* 1144 * If there is a pending stop signal to process 1145 * with default action, stop here, 1146 * then clear the signal. However, 1147 * if process is member of an orphaned 1148 * process group, ignore tty stop signals. 1149 */ 1150 if (prop & SA_STOP) { 1151 if (p->p_flag & P_TRACED || 1152 (p->p_pgrp->pg_jobc == 0 && 1153 prop & SA_TTYSTOP)) 1154 break; /* == ignore */ 1155 p->p_xstat = signum; 1156 if (dolock) 1157 SCHED_LOCK(s); 1158 proc_stop(p, 1); 1159 if (dolock) 1160 SCHED_UNLOCK(s); 1161 break; 1162 } else if (prop & SA_IGNORE) { 1163 /* 1164 * Except for SIGCONT, shouldn't get here. 1165 * Default action is to ignore; drop it. 1166 */ 1167 break; /* == ignore */ 1168 } else 1169 goto keep; 1170 /*NOTREACHED*/ 1171 1172 case (long)SIG_IGN: 1173 /* 1174 * Masking above should prevent us ever trying 1175 * to take action on an ignored signal other 1176 * than SIGCONT, unless process is traced. 1177 */ 1178 if ((prop & SA_CONT) == 0 && 1179 (p->p_flag & P_TRACED) == 0) 1180 printf("issignal\n"); 1181 break; /* == ignore */ 1182 1183 default: 1184 /* 1185 * This signal has an action, let 1186 * postsig() process it. 1187 */ 1188 goto keep; 1189 } 1190 } 1191 /* NOTREACHED */ 1192 1193 keep: 1194 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1195 return (signum); 1196 } 1197 1198 /* 1199 * Put the argument process into the stopped state and notify the parent 1200 * via wakeup. Signals are handled elsewhere. The process must not be 1201 * on the run queue. 1202 */ 1203 void 1204 proc_stop(struct proc *p, int sw) 1205 { 1206 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 1207 extern void *softclock_si; 1208 #endif 1209 #ifdef MULTIPROCESSOR 1210 SCHED_ASSERT_LOCKED(); 1211 #endif 1212 1213 p->p_stat = SSTOP; 1214 atomic_clearbits_int(&p->p_flag, P_WAITED); 1215 atomic_setbits_int(&p->p_flag, P_STOPPED); 1216 if (!timeout_pending(&proc_stop_to)) { 1217 timeout_add(&proc_stop_to, 0); 1218 /* 1219 * We need this soft interrupt to be handled fast. 1220 * Extra calls to softclock don't hurt. 1221 */ 1222 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 1223 softintr_schedule(softclock_si); 1224 #else 1225 setsoftclock(); 1226 #endif 1227 } 1228 if (sw) 1229 mi_switch(); 1230 } 1231 1232 /* 1233 * Called from a timeout to send signals to the parents of stopped processes. 1234 * We can't do this in proc_stop because it's called with nasty locks held 1235 * and we would need recursive scheduler lock to deal with that. 1236 */ 1237 void 1238 proc_stop_sweep(void *v) 1239 { 1240 struct proc *p; 1241 1242 LIST_FOREACH(p, &allproc, p_list) { 1243 if ((p->p_flag & P_STOPPED) == 0) 1244 continue; 1245 atomic_clearbits_int(&p->p_flag, P_STOPPED); 1246 1247 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) 1248 psignal(p->p_pptr, SIGCHLD); 1249 wakeup(p->p_pptr); 1250 } 1251 } 1252 1253 /* 1254 * Take the action for the specified signal 1255 * from the current set of pending signals. 1256 */ 1257 void 1258 postsig(int signum) 1259 { 1260 struct proc *p = curproc; 1261 struct sigacts *ps = p->p_sigacts; 1262 sig_t action; 1263 u_long code; 1264 int mask, returnmask; 1265 union sigval sigval; 1266 int s, type; 1267 1268 #ifdef DIAGNOSTIC 1269 if (signum == 0) 1270 panic("postsig"); 1271 #endif 1272 1273 KERNEL_PROC_LOCK(p); 1274 1275 mask = sigmask(signum); 1276 atomic_clearbits_int(&p->p_siglist, mask); 1277 action = ps->ps_sigact[signum]; 1278 sigval.sival_ptr = 0; 1279 type = SI_USER; 1280 1281 if (ps->ps_sig != signum) { 1282 code = 0; 1283 type = SI_USER; 1284 sigval.sival_ptr = 0; 1285 } else { 1286 code = ps->ps_code; 1287 type = ps->ps_type; 1288 sigval = ps->ps_sigval; 1289 } 1290 1291 #ifdef KTRACE 1292 if (KTRPOINT(p, KTR_PSIG)) { 1293 siginfo_t si; 1294 1295 initsiginfo(&si, signum, code, type, sigval); 1296 ktrpsig(p, signum, action, ps->ps_flags & SAS_OLDMASK ? 1297 ps->ps_oldmask : p->p_sigmask, type, &si); 1298 } 1299 #endif 1300 if (action == SIG_DFL) { 1301 /* 1302 * Default action, where the default is to kill 1303 * the process. (Other cases were ignored above.) 1304 */ 1305 sigexit(p, signum); 1306 /* NOTREACHED */ 1307 } else { 1308 /* 1309 * If we get here, the signal must be caught. 1310 */ 1311 #ifdef DIAGNOSTIC 1312 if (action == SIG_IGN || (p->p_sigmask & mask)) 1313 panic("postsig action"); 1314 #endif 1315 /* 1316 * Set the new mask value and also defer further 1317 * occurrences of this signal. 1318 * 1319 * Special case: user has done a sigpause. Here the 1320 * current mask is not of interest, but rather the 1321 * mask from before the sigpause is what we want 1322 * restored after the signal processing is completed. 1323 */ 1324 #ifdef MULTIPROCESSOR 1325 s = splsched(); 1326 #else 1327 s = splhigh(); 1328 #endif 1329 if (ps->ps_flags & SAS_OLDMASK) { 1330 returnmask = ps->ps_oldmask; 1331 ps->ps_flags &= ~SAS_OLDMASK; 1332 } else 1333 returnmask = p->p_sigmask; 1334 p->p_sigmask |= ps->ps_catchmask[signum]; 1335 if ((ps->ps_sigreset & mask) != 0) { 1336 p->p_sigcatch &= ~mask; 1337 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1338 p->p_sigignore |= mask; 1339 ps->ps_sigact[signum] = SIG_DFL; 1340 } 1341 splx(s); 1342 p->p_stats->p_ru.ru_nsignals++; 1343 if (ps->ps_sig == signum) { 1344 ps->ps_sig = 0; 1345 ps->ps_code = 0; 1346 ps->ps_type = SI_USER; 1347 ps->ps_sigval.sival_ptr = NULL; 1348 } 1349 1350 (*p->p_emul->e_sendsig)(action, signum, returnmask, code, 1351 type, sigval); 1352 } 1353 1354 KERNEL_PROC_UNLOCK(p); 1355 } 1356 1357 /* 1358 * Force the current process to exit with the specified signal, dumping core 1359 * if appropriate. We bypass the normal tests for masked and caught signals, 1360 * allowing unrecoverable failures to terminate the process without changing 1361 * signal state. Mark the accounting record with the signal termination. 1362 * If dumping core, save the signal number for the debugger. Calls exit and 1363 * does not return. 1364 */ 1365 void 1366 sigexit(struct proc *p, int signum) 1367 { 1368 /* Mark process as going away */ 1369 atomic_setbits_int(&p->p_flag, P_WEXIT); 1370 1371 p->p_acflag |= AXSIG; 1372 if (sigprop[signum] & SA_CORE) { 1373 p->p_sigacts->ps_sig = signum; 1374 if (coredump(p) == 0) 1375 signum |= WCOREFLAG; 1376 } 1377 exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL); 1378 /* NOTREACHED */ 1379 } 1380 1381 int nosuidcoredump = 1; 1382 1383 struct coredump_iostate { 1384 struct proc *io_proc; 1385 struct vnode *io_vp; 1386 struct ucred *io_cred; 1387 off_t io_offset; 1388 }; 1389 1390 /* 1391 * Dump core, into a file named "progname.core", unless the process was 1392 * setuid/setgid. 1393 */ 1394 int 1395 coredump(struct proc *p) 1396 { 1397 struct vnode *vp; 1398 struct ucred *cred = p->p_ucred; 1399 struct vmspace *vm = p->p_vmspace; 1400 struct nameidata nd; 1401 struct vattr vattr; 1402 struct coredump_iostate io; 1403 int error, error1, len; 1404 char name[sizeof("/var/crash/") + MAXCOMLEN + sizeof(".core")]; 1405 char *dir = ""; 1406 1407 /* 1408 * Don't dump if not root and the process has used set user or 1409 * group privileges, unless the nosuidcoredump sysctl is set to 2, 1410 * in which case dumps are put into /var/crash/. 1411 */ 1412 if (((p->p_flag & P_SUGID) && (error = suser(p, 0))) || 1413 ((p->p_flag & P_SUGID) && nosuidcoredump)) { 1414 if (nosuidcoredump == 2) 1415 dir = "/var/crash/"; 1416 else 1417 return (EPERM); 1418 } 1419 1420 /* Don't dump if will exceed file size limit. */ 1421 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= 1422 p->p_rlimit[RLIMIT_CORE].rlim_cur) 1423 return (EFBIG); 1424 1425 len = snprintf(name, sizeof(name), "%s%s.core", dir, p->p_comm); 1426 if (len >= sizeof(name)) 1427 return (EACCES); 1428 1429 /* 1430 * ... but actually write it as UID 1431 */ 1432 cred = crdup(cred); 1433 cred->cr_uid = p->p_cred->p_ruid; 1434 cred->cr_gid = p->p_cred->p_rgid; 1435 1436 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 1437 1438 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 1439 1440 if (error) { 1441 crfree(cred); 1442 return (error); 1443 } 1444 1445 /* 1446 * Don't dump to non-regular files, files with links, or files 1447 * owned by someone else. 1448 */ 1449 vp = nd.ni_vp; 1450 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) 1451 goto out; 1452 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1453 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1454 vattr.va_uid != cred->cr_uid) { 1455 error = EACCES; 1456 goto out; 1457 } 1458 VATTR_NULL(&vattr); 1459 vattr.va_size = 0; 1460 VOP_SETATTR(vp, &vattr, cred, p); 1461 p->p_acflag |= ACORE; 1462 bcopy(p, &p->p_addr->u_kproc.kp_proc, sizeof(struct proc)); 1463 fill_eproc(p, &p->p_addr->u_kproc.kp_eproc); 1464 1465 io.io_proc = p; 1466 io.io_vp = vp; 1467 io.io_cred = cred; 1468 io.io_offset = 0; 1469 1470 error = (*p->p_emul->e_coredump)(p, &io); 1471 out: 1472 VOP_UNLOCK(vp, 0, p); 1473 error1 = vn_close(vp, FWRITE, cred, p); 1474 crfree(cred); 1475 if (error == 0) 1476 error = error1; 1477 return (error); 1478 } 1479 1480 int 1481 coredump_trad(struct proc *p, void *cookie) 1482 { 1483 struct coredump_iostate *io = cookie; 1484 struct vmspace *vm = io->io_proc->p_vmspace; 1485 struct vnode *vp = io->io_vp; 1486 struct ucred *cred = io->io_cred; 1487 struct core core; 1488 int error; 1489 1490 core.c_midmag = 0; 1491 strlcpy(core.c_name, p->p_comm, sizeof(core.c_name)); 1492 core.c_nseg = 0; 1493 core.c_signo = p->p_sigacts->ps_sig; 1494 core.c_ucode = p->p_sigacts->ps_code; 1495 core.c_cpusize = 0; 1496 core.c_tsize = (u_long)ptoa(vm->vm_tsize); 1497 core.c_dsize = (u_long)ptoa(vm->vm_dsize); 1498 core.c_ssize = (u_long)round_page(ptoa(vm->vm_ssize)); 1499 error = cpu_coredump(p, vp, cred, &core); 1500 if (error) 1501 return (error); 1502 /* 1503 * uvm_coredump() spits out all appropriate segments. 1504 * All that's left to do is to write the core header. 1505 */ 1506 error = uvm_coredump(p, vp, cred, &core); 1507 if (error) 1508 return (error); 1509 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core, 1510 (int)core.c_hdrsize, (off_t)0, 1511 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p); 1512 return (error); 1513 } 1514 1515 int 1516 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1517 { 1518 struct coredump_iostate *io = cookie; 1519 int error; 1520 1521 error = vn_rdwr(UIO_WRITE, io->io_vp, (void *)data, len, 1522 io->io_offset, segflg, 1523 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL, io->io_proc); 1524 if (error) { 1525 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n", 1526 io->io_proc->p_pid, io->io_proc->p_comm, 1527 segflg == UIO_USERSPACE ? "user" : "system", 1528 len, data, (long long) io->io_offset, error); 1529 return (error); 1530 } 1531 1532 io->io_offset += len; 1533 return (0); 1534 } 1535 1536 /* 1537 * Nonexistent system call-- signal process (may want to handle it). 1538 * Flag error in case process won't see signal immediately (blocked or ignored). 1539 */ 1540 /* ARGSUSED */ 1541 int 1542 sys_nosys(struct proc *p, void *v, register_t *retval) 1543 { 1544 1545 ptsignal(p, SIGSYS, STHREAD); 1546 return (ENOSYS); 1547 } 1548 1549 #ifdef RTHREADS 1550 int 1551 sys_thrsigdivert(struct proc *p, void *v, register_t *retval) 1552 { 1553 struct sys_thrsigdivert_args /* { 1554 syscallarg(sigset_t) sigmask; 1555 } */ *uap = v; 1556 sigset_t mask; 1557 sigset_t *m; 1558 int error; 1559 1560 m = NULL; 1561 mask = SCARG(uap, sigmask) &~ sigcantmask; 1562 1563 /* pending signal for this thread? */ 1564 if (p->p_siglist & mask) 1565 m = &p->p_siglist; 1566 else if (p->p_p->ps_mainproc->p_siglist & mask) 1567 m = &p->p_p->ps_mainproc->p_siglist; 1568 if (m != NULL) { 1569 int sig = ffs((long)(*m & mask)); 1570 atomic_clearbits_int(m, sigmask(sig)); 1571 *retval = sig; 1572 return (0); 1573 } 1574 1575 p->p_sigwait = 0; 1576 atomic_setbits_int(&p->p_sigdivert, mask); 1577 error = tsleep(&p->p_sigdivert, PPAUSE|PCATCH, "sigwait", 0); 1578 if (p->p_sigdivert) { 1579 /* interrupted */ 1580 KASSERT(error != 0); 1581 atomic_clearbits_int(&p->p_sigdivert, ~0); 1582 if (error == EINTR) 1583 error = ERESTART; 1584 return (error); 1585 1586 } 1587 KASSERT(p->p_sigwait != 0); 1588 *retval = p->p_sigwait; 1589 return (0); 1590 } 1591 #endif 1592 1593 void 1594 initsiginfo(siginfo_t *si, int sig, u_long code, int type, union sigval val) 1595 { 1596 bzero(si, sizeof *si); 1597 1598 si->si_signo = sig; 1599 si->si_code = type; 1600 if (type == SI_USER) { 1601 si->si_value = val; 1602 } else { 1603 switch (sig) { 1604 case SIGSEGV: 1605 case SIGILL: 1606 case SIGBUS: 1607 case SIGFPE: 1608 si->si_addr = val.sival_ptr; 1609 si->si_trapno = code; 1610 break; 1611 case SIGXFSZ: 1612 break; 1613 } 1614 } 1615 } 1616 1617 int 1618 filt_sigattach(struct knote *kn) 1619 { 1620 struct proc *p = curproc; 1621 1622 kn->kn_ptr.p_proc = p; 1623 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1624 1625 /* XXX lock the proc here while adding to the list? */ 1626 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 1627 1628 return (0); 1629 } 1630 1631 void 1632 filt_sigdetach(struct knote *kn) 1633 { 1634 struct proc *p = kn->kn_ptr.p_proc; 1635 1636 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 1637 } 1638 1639 /* 1640 * signal knotes are shared with proc knotes, so we apply a mask to 1641 * the hint in order to differentiate them from process hints. This 1642 * could be avoided by using a signal-specific knote list, but probably 1643 * isn't worth the trouble. 1644 */ 1645 int 1646 filt_signal(struct knote *kn, long hint) 1647 { 1648 1649 if (hint & NOTE_SIGNAL) { 1650 hint &= ~NOTE_SIGNAL; 1651 1652 if (kn->kn_id == hint) 1653 kn->kn_data++; 1654 } 1655 return (kn->kn_data != 0); 1656 } 1657