1 /* $OpenBSD: kern_sig.c,v 1.295 2022/03/11 10:05:38 claudio Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #include <sys/param.h> 42 #include <sys/signalvar.h> 43 #include <sys/resourcevar.h> 44 #include <sys/queue.h> 45 #include <sys/namei.h> 46 #include <sys/vnode.h> 47 #include <sys/event.h> 48 #include <sys/proc.h> 49 #include <sys/systm.h> 50 #include <sys/acct.h> 51 #include <sys/fcntl.h> 52 #include <sys/filedesc.h> 53 #include <sys/kernel.h> 54 #include <sys/wait.h> 55 #include <sys/ktrace.h> 56 #include <sys/stat.h> 57 #include <sys/core.h> 58 #include <sys/malloc.h> 59 #include <sys/pool.h> 60 #include <sys/ptrace.h> 61 #include <sys/sched.h> 62 #include <sys/user.h> 63 #include <sys/syslog.h> 64 #include <sys/ttycom.h> 65 #include <sys/pledge.h> 66 #include <sys/witness.h> 67 #include <sys/exec_elf.h> 68 69 #include <sys/mount.h> 70 #include <sys/syscallargs.h> 71 72 #include <uvm/uvm_extern.h> 73 #include <machine/tcb.h> 74 75 int nosuidcoredump = 1; 76 77 int filt_sigattach(struct knote *kn); 78 void filt_sigdetach(struct knote *kn); 79 int filt_signal(struct knote *kn, long hint); 80 81 const struct filterops sig_filtops = { 82 .f_flags = 0, 83 .f_attach = filt_sigattach, 84 .f_detach = filt_sigdetach, 85 .f_event = filt_signal, 86 }; 87 88 /* 89 * The array below categorizes the signals and their default actions. 90 */ 91 const int sigprop[NSIG] = { 92 0, /* unused */ 93 SA_KILL, /* SIGHUP */ 94 SA_KILL, /* SIGINT */ 95 SA_KILL|SA_CORE, /* SIGQUIT */ 96 SA_KILL|SA_CORE, /* SIGILL */ 97 SA_KILL|SA_CORE, /* SIGTRAP */ 98 SA_KILL|SA_CORE, /* SIGABRT */ 99 SA_KILL|SA_CORE, /* SIGEMT */ 100 SA_KILL|SA_CORE, /* SIGFPE */ 101 SA_KILL, /* SIGKILL */ 102 SA_KILL|SA_CORE, /* SIGBUS */ 103 SA_KILL|SA_CORE, /* SIGSEGV */ 104 SA_KILL|SA_CORE, /* SIGSYS */ 105 SA_KILL, /* SIGPIPE */ 106 SA_KILL, /* SIGALRM */ 107 SA_KILL, /* SIGTERM */ 108 SA_IGNORE, /* SIGURG */ 109 SA_STOP, /* SIGSTOP */ 110 SA_STOP|SA_TTYSTOP, /* SIGTSTP */ 111 SA_IGNORE|SA_CONT, /* SIGCONT */ 112 SA_IGNORE, /* SIGCHLD */ 113 SA_STOP|SA_TTYSTOP, /* SIGTTIN */ 114 SA_STOP|SA_TTYSTOP, /* SIGTTOU */ 115 SA_IGNORE, /* SIGIO */ 116 SA_KILL, /* SIGXCPU */ 117 SA_KILL, /* SIGXFSZ */ 118 SA_KILL, /* SIGVTALRM */ 119 SA_KILL, /* SIGPROF */ 120 SA_IGNORE, /* SIGWINCH */ 121 SA_IGNORE, /* SIGINFO */ 122 SA_KILL, /* SIGUSR1 */ 123 SA_KILL, /* SIGUSR2 */ 124 SA_IGNORE, /* SIGTHR */ 125 }; 126 127 #define CONTSIGMASK (sigmask(SIGCONT)) 128 #define STOPSIGMASK (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \ 129 sigmask(SIGTTIN) | sigmask(SIGTTOU)) 130 131 void setsigvec(struct proc *, int, struct sigaction *); 132 133 void proc_stop(struct proc *p, int); 134 void proc_stop_sweep(void *); 135 void *proc_stop_si; 136 137 void setsigctx(struct proc *, int, struct sigctx *); 138 void postsig_done(struct proc *, int, sigset_t, int); 139 void postsig(struct proc *, int, struct sigctx *); 140 int cansignal(struct proc *, struct process *, int); 141 142 struct pool sigacts_pool; /* memory pool for sigacts structures */ 143 144 void sigio_del(struct sigiolst *); 145 void sigio_unlink(struct sigio_ref *, struct sigiolst *); 146 struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH); 147 148 /* 149 * Can thread p, send the signal signum to process qr? 150 */ 151 int 152 cansignal(struct proc *p, struct process *qr, int signum) 153 { 154 struct process *pr = p->p_p; 155 struct ucred *uc = p->p_ucred; 156 struct ucred *quc = qr->ps_ucred; 157 158 if (uc->cr_uid == 0) 159 return (1); /* root can always signal */ 160 161 if (pr == qr) 162 return (1); /* process can always signal itself */ 163 164 /* optimization: if the same creds then the tests below will pass */ 165 if (uc == quc) 166 return (1); 167 168 if (signum == SIGCONT && qr->ps_session == pr->ps_session) 169 return (1); /* SIGCONT in session */ 170 171 /* 172 * Using kill(), only certain signals can be sent to setugid 173 * child processes 174 */ 175 if (qr->ps_flags & PS_SUGID) { 176 switch (signum) { 177 case 0: 178 case SIGKILL: 179 case SIGINT: 180 case SIGTERM: 181 case SIGALRM: 182 case SIGSTOP: 183 case SIGTTIN: 184 case SIGTTOU: 185 case SIGTSTP: 186 case SIGHUP: 187 case SIGUSR1: 188 case SIGUSR2: 189 if (uc->cr_ruid == quc->cr_ruid || 190 uc->cr_uid == quc->cr_ruid) 191 return (1); 192 } 193 return (0); 194 } 195 196 if (uc->cr_ruid == quc->cr_ruid || 197 uc->cr_ruid == quc->cr_svuid || 198 uc->cr_uid == quc->cr_ruid || 199 uc->cr_uid == quc->cr_svuid) 200 return (1); 201 return (0); 202 } 203 204 /* 205 * Initialize signal-related data structures. 206 */ 207 void 208 signal_init(void) 209 { 210 proc_stop_si = softintr_establish(IPL_SOFTCLOCK, proc_stop_sweep, 211 NULL); 212 if (proc_stop_si == NULL) 213 panic("signal_init failed to register softintr"); 214 215 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE, 216 PR_WAITOK, "sigapl", NULL); 217 } 218 219 /* 220 * Initialize a new sigaltstack structure. 221 */ 222 void 223 sigstkinit(struct sigaltstack *ss) 224 { 225 ss->ss_flags = SS_DISABLE; 226 ss->ss_size = 0; 227 ss->ss_sp = NULL; 228 } 229 230 /* 231 * Create an initial sigacts structure, using the same signal state 232 * as pr. 233 */ 234 struct sigacts * 235 sigactsinit(struct process *pr) 236 { 237 struct sigacts *ps; 238 239 ps = pool_get(&sigacts_pool, PR_WAITOK); 240 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); 241 return (ps); 242 } 243 244 /* 245 * Release a sigacts structure. 246 */ 247 void 248 sigactsfree(struct sigacts *ps) 249 { 250 pool_put(&sigacts_pool, ps); 251 } 252 253 int 254 sys_sigaction(struct proc *p, void *v, register_t *retval) 255 { 256 struct sys_sigaction_args /* { 257 syscallarg(int) signum; 258 syscallarg(const struct sigaction *) nsa; 259 syscallarg(struct sigaction *) osa; 260 } */ *uap = v; 261 struct sigaction vec; 262 #ifdef KTRACE 263 struct sigaction ovec; 264 #endif 265 struct sigaction *sa; 266 const struct sigaction *nsa; 267 struct sigaction *osa; 268 struct sigacts *ps = p->p_p->ps_sigacts; 269 int signum; 270 int bit, error; 271 272 signum = SCARG(uap, signum); 273 nsa = SCARG(uap, nsa); 274 osa = SCARG(uap, osa); 275 276 if (signum <= 0 || signum >= NSIG || 277 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 278 return (EINVAL); 279 sa = &vec; 280 if (osa) { 281 sa->sa_handler = ps->ps_sigact[signum]; 282 sa->sa_mask = ps->ps_catchmask[signum]; 283 bit = sigmask(signum); 284 sa->sa_flags = 0; 285 if ((ps->ps_sigonstack & bit) != 0) 286 sa->sa_flags |= SA_ONSTACK; 287 if ((ps->ps_sigintr & bit) == 0) 288 sa->sa_flags |= SA_RESTART; 289 if ((ps->ps_sigreset & bit) != 0) 290 sa->sa_flags |= SA_RESETHAND; 291 if ((ps->ps_siginfo & bit) != 0) 292 sa->sa_flags |= SA_SIGINFO; 293 if (signum == SIGCHLD) { 294 if ((ps->ps_sigflags & SAS_NOCLDSTOP) != 0) 295 sa->sa_flags |= SA_NOCLDSTOP; 296 if ((ps->ps_sigflags & SAS_NOCLDWAIT) != 0) 297 sa->sa_flags |= SA_NOCLDWAIT; 298 } 299 if ((sa->sa_mask & bit) == 0) 300 sa->sa_flags |= SA_NODEFER; 301 sa->sa_mask &= ~bit; 302 error = copyout(sa, osa, sizeof (vec)); 303 if (error) 304 return (error); 305 #ifdef KTRACE 306 if (KTRPOINT(p, KTR_STRUCT)) 307 ovec = vec; 308 #endif 309 } 310 if (nsa) { 311 error = copyin(nsa, sa, sizeof (vec)); 312 if (error) 313 return (error); 314 #ifdef KTRACE 315 if (KTRPOINT(p, KTR_STRUCT)) 316 ktrsigaction(p, sa); 317 #endif 318 setsigvec(p, signum, sa); 319 } 320 #ifdef KTRACE 321 if (osa && KTRPOINT(p, KTR_STRUCT)) 322 ktrsigaction(p, &ovec); 323 #endif 324 return (0); 325 } 326 327 void 328 setsigvec(struct proc *p, int signum, struct sigaction *sa) 329 { 330 struct sigacts *ps = p->p_p->ps_sigacts; 331 int bit; 332 int s; 333 334 bit = sigmask(signum); 335 /* 336 * Change setting atomically. 337 */ 338 s = splhigh(); 339 ps->ps_sigact[signum] = sa->sa_handler; 340 if ((sa->sa_flags & SA_NODEFER) == 0) 341 sa->sa_mask |= sigmask(signum); 342 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 343 if (signum == SIGCHLD) { 344 if (sa->sa_flags & SA_NOCLDSTOP) 345 atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP); 346 else 347 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP); 348 /* 349 * If the SA_NOCLDWAIT flag is set or the handler 350 * is SIG_IGN we reparent the dying child to PID 1 351 * (init) which will reap the zombie. Because we use 352 * init to do our dirty work we never set SAS_NOCLDWAIT 353 * for PID 1. 354 * XXX exit1 rework means this is unnecessary? 355 */ 356 if (initprocess->ps_sigacts != ps && 357 ((sa->sa_flags & SA_NOCLDWAIT) || 358 sa->sa_handler == SIG_IGN)) 359 atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT); 360 else 361 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT); 362 } 363 if ((sa->sa_flags & SA_RESETHAND) != 0) 364 ps->ps_sigreset |= bit; 365 else 366 ps->ps_sigreset &= ~bit; 367 if ((sa->sa_flags & SA_SIGINFO) != 0) 368 ps->ps_siginfo |= bit; 369 else 370 ps->ps_siginfo &= ~bit; 371 if ((sa->sa_flags & SA_RESTART) == 0) 372 ps->ps_sigintr |= bit; 373 else 374 ps->ps_sigintr &= ~bit; 375 if ((sa->sa_flags & SA_ONSTACK) != 0) 376 ps->ps_sigonstack |= bit; 377 else 378 ps->ps_sigonstack &= ~bit; 379 /* 380 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 381 * and for signals set to SIG_DFL where the default is to ignore. 382 * However, don't put SIGCONT in ps_sigignore, 383 * as we have to restart the process. 384 */ 385 if (sa->sa_handler == SIG_IGN || 386 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 387 atomic_clearbits_int(&p->p_siglist, bit); 388 atomic_clearbits_int(&p->p_p->ps_siglist, bit); 389 if (signum != SIGCONT) 390 ps->ps_sigignore |= bit; /* easier in psignal */ 391 ps->ps_sigcatch &= ~bit; 392 } else { 393 ps->ps_sigignore &= ~bit; 394 if (sa->sa_handler == SIG_DFL) 395 ps->ps_sigcatch &= ~bit; 396 else 397 ps->ps_sigcatch |= bit; 398 } 399 splx(s); 400 } 401 402 /* 403 * Initialize signal state for process 0; 404 * set to ignore signals that are ignored by default. 405 */ 406 void 407 siginit(struct sigacts *ps) 408 { 409 int i; 410 411 for (i = 0; i < NSIG; i++) 412 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 413 ps->ps_sigignore |= sigmask(i); 414 ps->ps_sigflags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 415 } 416 417 /* 418 * Reset signals for an exec by the specified thread. 419 */ 420 void 421 execsigs(struct proc *p) 422 { 423 struct sigacts *ps; 424 int nc, mask; 425 426 ps = p->p_p->ps_sigacts; 427 428 /* 429 * Reset caught signals. Held signals remain held 430 * through p_sigmask (unless they were caught, 431 * and are now ignored by default). 432 */ 433 while (ps->ps_sigcatch) { 434 nc = ffs((long)ps->ps_sigcatch); 435 mask = sigmask(nc); 436 ps->ps_sigcatch &= ~mask; 437 if (sigprop[nc] & SA_IGNORE) { 438 if (nc != SIGCONT) 439 ps->ps_sigignore |= mask; 440 atomic_clearbits_int(&p->p_siglist, mask); 441 atomic_clearbits_int(&p->p_p->ps_siglist, mask); 442 } 443 ps->ps_sigact[nc] = SIG_DFL; 444 } 445 /* 446 * Reset stack state to the user stack. 447 * Clear set of signals caught on the signal stack. 448 */ 449 sigstkinit(&p->p_sigstk); 450 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT); 451 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 452 ps->ps_sigact[SIGCHLD] = SIG_DFL; 453 } 454 455 /* 456 * Manipulate signal mask. 457 * Note that we receive new mask, not pointer, 458 * and return old mask as return value; 459 * the library stub does the rest. 460 */ 461 int 462 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 463 { 464 struct sys_sigprocmask_args /* { 465 syscallarg(int) how; 466 syscallarg(sigset_t) mask; 467 } */ *uap = v; 468 int error = 0; 469 sigset_t mask; 470 471 KASSERT(p == curproc); 472 473 *retval = p->p_sigmask; 474 mask = SCARG(uap, mask) &~ sigcantmask; 475 476 switch (SCARG(uap, how)) { 477 case SIG_BLOCK: 478 atomic_setbits_int(&p->p_sigmask, mask); 479 break; 480 case SIG_UNBLOCK: 481 atomic_clearbits_int(&p->p_sigmask, mask); 482 break; 483 case SIG_SETMASK: 484 p->p_sigmask = mask; 485 break; 486 default: 487 error = EINVAL; 488 break; 489 } 490 return (error); 491 } 492 493 int 494 sys_sigpending(struct proc *p, void *v, register_t *retval) 495 { 496 *retval = p->p_siglist | p->p_p->ps_siglist; 497 return (0); 498 } 499 500 /* 501 * Temporarily replace calling proc's signal mask for the duration of a 502 * system call. Original signal mask will be restored by userret(). 503 */ 504 void 505 dosigsuspend(struct proc *p, sigset_t newmask) 506 { 507 KASSERT(p == curproc); 508 509 p->p_oldmask = p->p_sigmask; 510 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 511 p->p_sigmask = newmask; 512 } 513 514 /* 515 * Suspend thread until signal, providing mask to be set 516 * in the meantime. Note nonstandard calling convention: 517 * libc stub passes mask, not pointer, to save a copyin. 518 */ 519 int 520 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 521 { 522 struct sys_sigsuspend_args /* { 523 syscallarg(int) mask; 524 } */ *uap = v; 525 526 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask); 527 while (tsleep_nsec(&nowake, PPAUSE|PCATCH, "sigsusp", INFSLP) == 0) 528 continue; 529 /* always return EINTR rather than ERESTART... */ 530 return (EINTR); 531 } 532 533 int 534 sigonstack(size_t stack) 535 { 536 const struct sigaltstack *ss = &curproc->p_sigstk; 537 538 return (ss->ss_flags & SS_DISABLE ? 0 : 539 (stack - (size_t)ss->ss_sp < ss->ss_size)); 540 } 541 542 int 543 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 544 { 545 struct sys_sigaltstack_args /* { 546 syscallarg(const struct sigaltstack *) nss; 547 syscallarg(struct sigaltstack *) oss; 548 } */ *uap = v; 549 struct sigaltstack ss; 550 const struct sigaltstack *nss; 551 struct sigaltstack *oss; 552 int onstack = sigonstack(PROC_STACK(p)); 553 int error; 554 555 nss = SCARG(uap, nss); 556 oss = SCARG(uap, oss); 557 558 if (oss != NULL) { 559 ss = p->p_sigstk; 560 if (onstack) 561 ss.ss_flags |= SS_ONSTACK; 562 if ((error = copyout(&ss, oss, sizeof(ss)))) 563 return (error); 564 } 565 if (nss == NULL) 566 return (0); 567 error = copyin(nss, &ss, sizeof(ss)); 568 if (error) 569 return (error); 570 if (onstack) 571 return (EPERM); 572 if (ss.ss_flags & ~SS_DISABLE) 573 return (EINVAL); 574 if (ss.ss_flags & SS_DISABLE) { 575 p->p_sigstk.ss_flags = ss.ss_flags; 576 return (0); 577 } 578 if (ss.ss_size < MINSIGSTKSZ) 579 return (ENOMEM); 580 581 error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size); 582 if (error) 583 return (error); 584 585 p->p_sigstk = ss; 586 return (0); 587 } 588 589 int 590 sys_kill(struct proc *cp, void *v, register_t *retval) 591 { 592 struct sys_kill_args /* { 593 syscallarg(int) pid; 594 syscallarg(int) signum; 595 } */ *uap = v; 596 struct process *pr; 597 int pid = SCARG(uap, pid); 598 int signum = SCARG(uap, signum); 599 int error; 600 int zombie = 0; 601 602 if ((error = pledge_kill(cp, pid)) != 0) 603 return (error); 604 if (((u_int)signum) >= NSIG) 605 return (EINVAL); 606 if (pid > 0) { 607 if ((pr = prfind(pid)) == NULL) { 608 if ((pr = zombiefind(pid)) == NULL) 609 return (ESRCH); 610 else 611 zombie = 1; 612 } 613 if (!cansignal(cp, pr, signum)) 614 return (EPERM); 615 616 /* kill single process */ 617 if (signum && !zombie) 618 prsignal(pr, signum); 619 return (0); 620 } 621 switch (pid) { 622 case -1: /* broadcast signal */ 623 return (killpg1(cp, signum, 0, 1)); 624 case 0: /* signal own process group */ 625 return (killpg1(cp, signum, 0, 0)); 626 default: /* negative explicit process group */ 627 return (killpg1(cp, signum, -pid, 0)); 628 } 629 } 630 631 int 632 sys_thrkill(struct proc *cp, void *v, register_t *retval) 633 { 634 struct sys_thrkill_args /* { 635 syscallarg(pid_t) tid; 636 syscallarg(int) signum; 637 syscallarg(void *) tcb; 638 } */ *uap = v; 639 struct proc *p; 640 int tid = SCARG(uap, tid); 641 int signum = SCARG(uap, signum); 642 void *tcb; 643 644 if (((u_int)signum) >= NSIG) 645 return (EINVAL); 646 if (tid > THREAD_PID_OFFSET) { 647 if ((p = tfind(tid - THREAD_PID_OFFSET)) == NULL) 648 return (ESRCH); 649 650 /* can only kill threads in the same process */ 651 if (p->p_p != cp->p_p) 652 return (ESRCH); 653 } else if (tid == 0) 654 p = cp; 655 else 656 return (EINVAL); 657 658 /* optionally require the target thread to have the given tcb addr */ 659 tcb = SCARG(uap, tcb); 660 if (tcb != NULL && tcb != TCB_GET(p)) 661 return (ESRCH); 662 663 if (signum) 664 ptsignal(p, signum, STHREAD); 665 return (0); 666 } 667 668 /* 669 * Common code for kill process group/broadcast kill. 670 * cp is calling process. 671 */ 672 int 673 killpg1(struct proc *cp, int signum, int pgid, int all) 674 { 675 struct process *pr; 676 struct pgrp *pgrp; 677 int nfound = 0; 678 679 if (all) { 680 /* 681 * broadcast 682 */ 683 LIST_FOREACH(pr, &allprocess, ps_list) { 684 if (pr->ps_pid <= 1 || 685 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) || 686 pr == cp->p_p || !cansignal(cp, pr, signum)) 687 continue; 688 nfound++; 689 if (signum) 690 prsignal(pr, signum); 691 } 692 } else { 693 if (pgid == 0) 694 /* 695 * zero pgid means send to my process group. 696 */ 697 pgrp = cp->p_p->ps_pgrp; 698 else { 699 pgrp = pgfind(pgid); 700 if (pgrp == NULL) 701 return (ESRCH); 702 } 703 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 704 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM || 705 !cansignal(cp, pr, signum)) 706 continue; 707 nfound++; 708 if (signum) 709 prsignal(pr, signum); 710 } 711 } 712 return (nfound ? 0 : ESRCH); 713 } 714 715 #define CANDELIVER(uid, euid, pr) \ 716 (euid == 0 || \ 717 (uid) == (pr)->ps_ucred->cr_ruid || \ 718 (uid) == (pr)->ps_ucred->cr_svuid || \ 719 (uid) == (pr)->ps_ucred->cr_uid || \ 720 (euid) == (pr)->ps_ucred->cr_ruid || \ 721 (euid) == (pr)->ps_ucred->cr_svuid || \ 722 (euid) == (pr)->ps_ucred->cr_uid) 723 724 #define CANSIGIO(cr, pr) \ 725 CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr)) 726 727 /* 728 * Send a signal to a process group. If checktty is 1, 729 * limit to members which have a controlling terminal. 730 */ 731 void 732 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 733 { 734 struct process *pr; 735 736 if (pgrp) 737 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 738 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 739 prsignal(pr, signum); 740 } 741 742 /* 743 * Send a SIGIO or SIGURG signal to a process or process group using stored 744 * credentials rather than those of the current process. 745 */ 746 void 747 pgsigio(struct sigio_ref *sir, int sig, int checkctty) 748 { 749 struct process *pr; 750 struct sigio *sigio; 751 752 if (sir->sir_sigio == NULL) 753 return; 754 755 KERNEL_LOCK(); 756 mtx_enter(&sigio_lock); 757 sigio = sir->sir_sigio; 758 if (sigio == NULL) 759 goto out; 760 if (sigio->sio_pgid > 0) { 761 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc)) 762 prsignal(sigio->sio_proc, sig); 763 } else if (sigio->sio_pgid < 0) { 764 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist) { 765 if (CANSIGIO(sigio->sio_ucred, pr) && 766 (checkctty == 0 || (pr->ps_flags & PS_CONTROLT))) 767 prsignal(pr, sig); 768 } 769 } 770 out: 771 mtx_leave(&sigio_lock); 772 KERNEL_UNLOCK(); 773 } 774 775 /* 776 * Recalculate the signal mask and reset the signal disposition after 777 * usermode frame for delivery is formed. 778 */ 779 void 780 postsig_done(struct proc *p, int signum, sigset_t catchmask, int reset) 781 { 782 KERNEL_ASSERT_LOCKED(); 783 784 p->p_ru.ru_nsignals++; 785 atomic_setbits_int(&p->p_sigmask, catchmask); 786 if (reset != 0) { 787 sigset_t mask = sigmask(signum); 788 struct sigacts *ps = p->p_p->ps_sigacts; 789 790 ps->ps_sigcatch &= ~mask; 791 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 792 ps->ps_sigignore |= mask; 793 ps->ps_sigact[signum] = SIG_DFL; 794 } 795 } 796 797 /* 798 * Send a signal caused by a trap to the current thread 799 * If it will be caught immediately, deliver it with correct code. 800 * Otherwise, post it normally. 801 */ 802 void 803 trapsignal(struct proc *p, int signum, u_long trapno, int code, 804 union sigval sigval) 805 { 806 struct process *pr = p->p_p; 807 struct sigacts *ps = pr->ps_sigacts; 808 int mask; 809 810 KERNEL_LOCK(); 811 switch (signum) { 812 case SIGILL: 813 case SIGBUS: 814 case SIGSEGV: 815 pr->ps_acflag |= ATRAP; 816 break; 817 } 818 819 mask = sigmask(signum); 820 if ((pr->ps_flags & PS_TRACED) == 0 && 821 (ps->ps_sigcatch & mask) != 0 && 822 (p->p_sigmask & mask) == 0) { 823 siginfo_t si; 824 sigset_t catchmask = ps->ps_catchmask[signum]; 825 int info = (ps->ps_siginfo & mask) != 0; 826 int onstack = (ps->ps_sigonstack & mask) != 0; 827 int reset = (ps->ps_sigreset & mask) != 0; 828 829 initsiginfo(&si, signum, trapno, code, sigval); 830 #ifdef KTRACE 831 if (KTRPOINT(p, KTR_PSIG)) { 832 ktrpsig(p, signum, ps->ps_sigact[signum], 833 p->p_sigmask, code, &si); 834 } 835 #endif 836 if (sendsig(ps->ps_sigact[signum], signum, p->p_sigmask, &si, 837 info, onstack)) { 838 sigexit(p, SIGILL); 839 /* NOTREACHED */ 840 } 841 postsig_done(p, signum, catchmask, reset); 842 } else { 843 p->p_sisig = signum; 844 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 845 p->p_sicode = code; 846 p->p_sigval = sigval; 847 848 /* 849 * If traced, stop if signal is masked, and stay stopped 850 * until released by the debugger. If our parent process 851 * is waiting for us, don't hang as we could deadlock. 852 */ 853 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) && 854 signum != SIGKILL && (p->p_sigmask & mask) != 0) { 855 int s; 856 857 pr->ps_xsig = signum; 858 859 single_thread_set(p, SINGLE_SUSPEND, 0); 860 861 SCHED_LOCK(s); 862 proc_stop(p, 1); 863 SCHED_UNLOCK(s); 864 865 single_thread_clear(p, 0); 866 867 /* 868 * If we are no longer being traced, or the parent 869 * didn't give us a signal, skip sending the signal. 870 */ 871 if ((pr->ps_flags & PS_TRACED) == 0 || 872 pr->ps_xsig == 0) { 873 KERNEL_UNLOCK(); 874 return; 875 } 876 877 /* update signal info */ 878 signum = pr->ps_xsig; 879 p->p_sisig = signum; 880 mask = sigmask(signum); 881 } 882 883 /* 884 * Signals like SIGBUS and SIGSEGV should not, when 885 * generated by the kernel, be ignorable or blockable. 886 * If it is and we're not being traced, then just kill 887 * the process. 888 * After vfs_shutdown(9), init(8) cannot receive signals 889 * because new code pages of the signal handler cannot be 890 * mapped from halted storage. init(8) may not die or the 891 * kernel panics. Better loop between signal handler and 892 * page fault trap until the machine is halted. 893 */ 894 if ((pr->ps_flags & PS_TRACED) == 0 && 895 (sigprop[signum] & SA_KILL) && 896 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask)) && 897 pr->ps_pid != 1) 898 sigexit(p, signum); 899 ptsignal(p, signum, STHREAD); 900 } 901 KERNEL_UNLOCK(); 902 } 903 904 /* 905 * Send the signal to the process. If the signal has an action, the action 906 * is usually performed by the target process rather than the caller; we add 907 * the signal to the set of pending signals for the process. 908 * 909 * Exceptions: 910 * o When a stop signal is sent to a sleeping process that takes the 911 * default action, the process is stopped without awakening it. 912 * o SIGCONT restarts stopped processes (or puts them back to sleep) 913 * regardless of the signal action (eg, blocked or ignored). 914 * 915 * Other ignored signals are discarded immediately. 916 */ 917 void 918 psignal(struct proc *p, int signum) 919 { 920 ptsignal(p, signum, SPROCESS); 921 } 922 923 /* 924 * type = SPROCESS process signal, can be diverted (sigwait()) 925 * type = STHREAD thread signal, but should be propagated if unhandled 926 * type = SPROPAGATED propagated to this thread, so don't propagate again 927 */ 928 void 929 ptsignal(struct proc *p, int signum, enum signal_type type) 930 { 931 int s, prop; 932 sig_t action; 933 int mask; 934 int *siglist; 935 struct process *pr = p->p_p; 936 struct proc *q; 937 int wakeparent = 0; 938 939 KERNEL_ASSERT_LOCKED(); 940 941 #ifdef DIAGNOSTIC 942 if ((u_int)signum >= NSIG || signum == 0) 943 panic("psignal signal number"); 944 #endif 945 946 /* Ignore signal if the target process is exiting */ 947 if (pr->ps_flags & PS_EXITING) 948 return; 949 950 mask = sigmask(signum); 951 952 if (type == SPROCESS) { 953 /* Accept SIGKILL to coredumping processes */ 954 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) { 955 atomic_setbits_int(&pr->ps_siglist, mask); 956 return; 957 } 958 959 /* 960 * If the current thread can process the signal 961 * immediately (it's unblocked) then have it take it. 962 */ 963 q = curproc; 964 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 && 965 (q->p_sigmask & mask) == 0) 966 p = q; 967 else { 968 /* 969 * A process-wide signal can be diverted to a 970 * different thread that's in sigwait() for this 971 * signal. If there isn't such a thread, then 972 * pick a thread that doesn't have it blocked so 973 * that the stop/kill consideration isn't 974 * delayed. Otherwise, mark it pending on the 975 * main thread. 976 */ 977 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 978 /* ignore exiting threads */ 979 if (q->p_flag & P_WEXIT) 980 continue; 981 982 /* skip threads that have the signal blocked */ 983 if ((q->p_sigmask & mask) != 0) 984 continue; 985 986 /* okay, could send to this thread */ 987 p = q; 988 989 /* 990 * sigsuspend, sigwait, ppoll/pselect, etc? 991 * Definitely go to this thread, as it's 992 * already blocked in the kernel. 993 */ 994 if (q->p_flag & P_SIGSUSPEND) 995 break; 996 } 997 } 998 } 999 1000 if (type != SPROPAGATED) 1001 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 1002 1003 prop = sigprop[signum]; 1004 1005 /* 1006 * If proc is traced, always give parent a chance. 1007 */ 1008 if (pr->ps_flags & PS_TRACED) { 1009 action = SIG_DFL; 1010 } else { 1011 /* 1012 * If the signal is being ignored, 1013 * then we forget about it immediately. 1014 * (Note: we don't set SIGCONT in ps_sigignore, 1015 * and if it is set to SIG_IGN, 1016 * action will be SIG_DFL here.) 1017 */ 1018 if (pr->ps_sigacts->ps_sigignore & mask) 1019 return; 1020 if (p->p_sigmask & mask) { 1021 action = SIG_HOLD; 1022 } else if (pr->ps_sigacts->ps_sigcatch & mask) { 1023 action = SIG_CATCH; 1024 } else { 1025 action = SIG_DFL; 1026 1027 if (prop & SA_KILL && pr->ps_nice > NZERO) 1028 pr->ps_nice = NZERO; 1029 1030 /* 1031 * If sending a tty stop signal to a member of an 1032 * orphaned process group, discard the signal here if 1033 * the action is default; don't stop the process below 1034 * if sleeping, and don't clear any pending SIGCONT. 1035 */ 1036 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 1037 return; 1038 } 1039 } 1040 /* 1041 * If delivered to process, mark as pending there. Continue and stop 1042 * signals will be propagated to all threads. So they are always 1043 * marked at thread level. 1044 */ 1045 siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist; 1046 if (prop & SA_CONT) { 1047 siglist = &p->p_siglist; 1048 atomic_clearbits_int(siglist, STOPSIGMASK); 1049 } 1050 if (prop & SA_STOP) { 1051 siglist = &p->p_siglist; 1052 atomic_clearbits_int(siglist, CONTSIGMASK); 1053 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 1054 } 1055 atomic_setbits_int(siglist, mask); 1056 1057 /* 1058 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 1059 */ 1060 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) 1061 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 1062 if (q != p) 1063 ptsignal(q, signum, SPROPAGATED); 1064 1065 /* 1066 * Defer further processing for signals which are held, 1067 * except that stopped processes must be continued by SIGCONT. 1068 */ 1069 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 1070 return; 1071 1072 SCHED_LOCK(s); 1073 1074 switch (p->p_stat) { 1075 1076 case SSLEEP: 1077 /* 1078 * If process is sleeping uninterruptibly 1079 * we can't interrupt the sleep... the signal will 1080 * be noticed when the process returns through 1081 * trap() or syscall(). 1082 */ 1083 if ((p->p_flag & P_SINTR) == 0) 1084 goto out; 1085 /* 1086 * Process is sleeping and traced... make it runnable 1087 * so it can discover the signal in cursig() and stop 1088 * for the parent. 1089 */ 1090 if (pr->ps_flags & PS_TRACED) 1091 goto run; 1092 /* 1093 * If SIGCONT is default (or ignored) and process is 1094 * asleep, we are finished; the process should not 1095 * be awakened. 1096 */ 1097 if ((prop & SA_CONT) && action == SIG_DFL) { 1098 atomic_clearbits_int(siglist, mask); 1099 goto out; 1100 } 1101 /* 1102 * When a sleeping process receives a stop 1103 * signal, process immediately if possible. 1104 */ 1105 if ((prop & SA_STOP) && action == SIG_DFL) { 1106 /* 1107 * If a child holding parent blocked, 1108 * stopping could cause deadlock. 1109 */ 1110 if (pr->ps_flags & PS_PPWAIT) 1111 goto out; 1112 atomic_clearbits_int(siglist, mask); 1113 pr->ps_xsig = signum; 1114 proc_stop(p, 0); 1115 goto out; 1116 } 1117 /* 1118 * All other (caught or default) signals 1119 * cause the process to run. 1120 */ 1121 goto runfast; 1122 /* NOTREACHED */ 1123 1124 case SSTOP: 1125 /* 1126 * If traced process is already stopped, 1127 * then no further action is necessary. 1128 */ 1129 if (pr->ps_flags & PS_TRACED) 1130 goto out; 1131 1132 /* 1133 * Kill signal always sets processes running. 1134 */ 1135 if (signum == SIGKILL) { 1136 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1137 goto runfast; 1138 } 1139 1140 if (prop & SA_CONT) { 1141 /* 1142 * If SIGCONT is default (or ignored), we continue the 1143 * process but don't leave the signal in p_siglist, as 1144 * it has no further action. If SIGCONT is held, we 1145 * continue the process and leave the signal in 1146 * p_siglist. If the process catches SIGCONT, let it 1147 * handle the signal itself. If it isn't waiting on 1148 * an event, then it goes back to run state. 1149 * Otherwise, process goes back to sleep state. 1150 */ 1151 atomic_setbits_int(&p->p_flag, P_CONTINUED); 1152 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1153 wakeparent = 1; 1154 if (action == SIG_DFL) 1155 atomic_clearbits_int(siglist, mask); 1156 if (action == SIG_CATCH) 1157 goto runfast; 1158 if (p->p_wchan == NULL) 1159 goto run; 1160 p->p_stat = SSLEEP; 1161 goto out; 1162 } 1163 1164 if (prop & SA_STOP) { 1165 /* 1166 * Already stopped, don't need to stop again. 1167 * (If we did the shell could get confused.) 1168 */ 1169 atomic_clearbits_int(siglist, mask); 1170 goto out; 1171 } 1172 1173 /* 1174 * If process is sleeping interruptibly, then simulate a 1175 * wakeup so that when it is continued, it will be made 1176 * runnable and can look at the signal. But don't make 1177 * the process runnable, leave it stopped. 1178 */ 1179 if (p->p_flag & P_SINTR) 1180 unsleep(p); 1181 goto out; 1182 1183 case SONPROC: 1184 signotify(p); 1185 /* FALLTHROUGH */ 1186 default: 1187 /* 1188 * SRUN, SIDL, SDEAD do nothing with the signal, 1189 * other than kicking ourselves if we are running. 1190 * It will either never be noticed, or noticed very soon. 1191 */ 1192 goto out; 1193 } 1194 /* NOTREACHED */ 1195 1196 runfast: 1197 /* 1198 * Raise priority to at least PUSER. 1199 */ 1200 if (p->p_usrpri > PUSER) 1201 p->p_usrpri = PUSER; 1202 run: 1203 setrunnable(p); 1204 out: 1205 SCHED_UNLOCK(s); 1206 if (wakeparent) 1207 wakeup(pr->ps_pptr); 1208 } 1209 1210 /* fill the signal context which should be used by postsig() and issignal() */ 1211 void 1212 setsigctx(struct proc *p, int signum, struct sigctx *sctx) 1213 { 1214 struct sigacts *ps = p->p_p->ps_sigacts; 1215 sigset_t mask; 1216 1217 mask = sigmask(signum); 1218 sctx->sig_action = ps->ps_sigact[signum]; 1219 sctx->sig_catchmask = ps->ps_catchmask[signum]; 1220 sctx->sig_reset = (ps->ps_sigreset & mask) != 0; 1221 sctx->sig_info = (ps->ps_siginfo & mask) != 0; 1222 sctx->sig_intr = (ps->ps_sigintr & mask) != 0; 1223 sctx->sig_onstack = (ps->ps_sigonstack & mask) != 0; 1224 sctx->sig_ignore = (ps->ps_sigignore & mask) != 0; 1225 } 1226 1227 /* 1228 * Determine signal that should be delivered to process p, the current 1229 * process, 0 if none. 1230 * 1231 * If the current process has received a signal (should be caught or cause 1232 * termination, should interrupt current syscall), return the signal number. 1233 * Stop signals with default action are processed immediately, then cleared; 1234 * they aren't returned. This is checked after each entry to the system for 1235 * a syscall or trap. The normal call sequence is 1236 * 1237 * while (signum = cursig(curproc, &ctx)) 1238 * postsig(signum, &ctx); 1239 * 1240 * Assumes that if the P_SINTR flag is set, we're holding both the 1241 * kernel and scheduler locks. 1242 */ 1243 int 1244 cursig(struct proc *p, struct sigctx *sctx) 1245 { 1246 struct process *pr = p->p_p; 1247 int signum, mask, prop; 1248 int dolock = (p->p_flag & P_SINTR) == 0; 1249 int s; 1250 1251 KERNEL_ASSERT_LOCKED(); 1252 KASSERT(p == curproc); 1253 1254 for (;;) { 1255 mask = SIGPENDING(p); 1256 if (pr->ps_flags & PS_PPWAIT) 1257 mask &= ~STOPSIGMASK; 1258 if (mask == 0) /* no signal to send */ 1259 return (0); 1260 signum = ffs((long)mask); 1261 mask = sigmask(signum); 1262 1263 /* take the signal! */ 1264 atomic_clearbits_int(&p->p_siglist, mask); 1265 atomic_clearbits_int(&pr->ps_siglist, mask); 1266 setsigctx(p, signum, sctx); 1267 1268 /* 1269 * We should see pending but ignored signals 1270 * only if PS_TRACED was on when they were posted. 1271 */ 1272 if (sctx->sig_ignore && (pr->ps_flags & PS_TRACED) == 0) 1273 continue; 1274 1275 /* 1276 * If traced, always stop, and stay stopped until released 1277 * by the debugger. If our parent process is waiting for 1278 * us, don't hang as we could deadlock. 1279 */ 1280 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) && 1281 signum != SIGKILL) { 1282 pr->ps_xsig = signum; 1283 1284 single_thread_set(p, SINGLE_SUSPEND, 0); 1285 1286 if (dolock) 1287 SCHED_LOCK(s); 1288 proc_stop(p, 1); 1289 if (dolock) 1290 SCHED_UNLOCK(s); 1291 1292 single_thread_clear(p, 0); 1293 1294 /* 1295 * If we are no longer being traced, or the parent 1296 * didn't give us a signal, look for more signals. 1297 */ 1298 if ((pr->ps_flags & PS_TRACED) == 0 || 1299 pr->ps_xsig == 0) 1300 continue; 1301 1302 /* 1303 * If the new signal is being masked, look for other 1304 * signals. 1305 */ 1306 signum = pr->ps_xsig; 1307 mask = sigmask(signum); 1308 if ((p->p_sigmask & mask) != 0) 1309 continue; 1310 1311 /* take the signal! */ 1312 atomic_clearbits_int(&p->p_siglist, mask); 1313 atomic_clearbits_int(&pr->ps_siglist, mask); 1314 setsigctx(p, signum, sctx); 1315 } 1316 1317 prop = sigprop[signum]; 1318 1319 /* 1320 * Decide whether the signal should be returned. 1321 * Return the signal's number, or fall through 1322 * to clear it from the pending mask. 1323 */ 1324 switch ((long)sctx->sig_action) { 1325 case (long)SIG_DFL: 1326 /* 1327 * Don't take default actions on system processes. 1328 */ 1329 if (pr->ps_pid <= 1) { 1330 #ifdef DIAGNOSTIC 1331 /* 1332 * Are you sure you want to ignore SIGSEGV 1333 * in init? XXX 1334 */ 1335 printf("Process (pid %d) got signal" 1336 " %d\n", pr->ps_pid, signum); 1337 #endif 1338 break; /* == ignore */ 1339 } 1340 /* 1341 * If there is a pending stop signal to process 1342 * with default action, stop here, 1343 * then clear the signal. However, 1344 * if process is member of an orphaned 1345 * process group, ignore tty stop signals. 1346 */ 1347 if (prop & SA_STOP) { 1348 if (pr->ps_flags & PS_TRACED || 1349 (pr->ps_pgrp->pg_jobc == 0 && 1350 prop & SA_TTYSTOP)) 1351 break; /* == ignore */ 1352 pr->ps_xsig = signum; 1353 if (dolock) 1354 SCHED_LOCK(s); 1355 proc_stop(p, 1); 1356 if (dolock) 1357 SCHED_UNLOCK(s); 1358 break; 1359 } else if (prop & SA_IGNORE) { 1360 /* 1361 * Except for SIGCONT, shouldn't get here. 1362 * Default action is to ignore; drop it. 1363 */ 1364 break; /* == ignore */ 1365 } else 1366 goto keep; 1367 /* NOTREACHED */ 1368 case (long)SIG_IGN: 1369 /* 1370 * Masking above should prevent us ever trying 1371 * to take action on an ignored signal other 1372 * than SIGCONT, unless process is traced. 1373 */ 1374 if ((prop & SA_CONT) == 0 && 1375 (pr->ps_flags & PS_TRACED) == 0) 1376 printf("%s\n", __func__); 1377 break; /* == ignore */ 1378 default: 1379 /* 1380 * This signal has an action, let 1381 * postsig() process it. 1382 */ 1383 goto keep; 1384 } 1385 } 1386 /* NOTREACHED */ 1387 1388 keep: 1389 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1390 return (signum); 1391 } 1392 1393 /* 1394 * Put the argument process into the stopped state and notify the parent 1395 * via wakeup. Signals are handled elsewhere. The process must not be 1396 * on the run queue. 1397 */ 1398 void 1399 proc_stop(struct proc *p, int sw) 1400 { 1401 struct process *pr = p->p_p; 1402 1403 #ifdef MULTIPROCESSOR 1404 SCHED_ASSERT_LOCKED(); 1405 #endif 1406 1407 p->p_stat = SSTOP; 1408 atomic_clearbits_int(&pr->ps_flags, PS_WAITED); 1409 atomic_setbits_int(&pr->ps_flags, PS_STOPPED); 1410 atomic_setbits_int(&p->p_flag, P_SUSPSIG); 1411 /* 1412 * We need this soft interrupt to be handled fast. 1413 * Extra calls to softclock don't hurt. 1414 */ 1415 softintr_schedule(proc_stop_si); 1416 if (sw) 1417 mi_switch(); 1418 } 1419 1420 /* 1421 * Called from a soft interrupt to send signals to the parents of stopped 1422 * processes. 1423 * We can't do this in proc_stop because it's called with nasty locks held 1424 * and we would need recursive scheduler lock to deal with that. 1425 */ 1426 void 1427 proc_stop_sweep(void *v) 1428 { 1429 struct process *pr; 1430 1431 LIST_FOREACH(pr, &allprocess, ps_list) { 1432 if ((pr->ps_flags & PS_STOPPED) == 0) 1433 continue; 1434 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); 1435 1436 if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP) == 0) 1437 prsignal(pr->ps_pptr, SIGCHLD); 1438 wakeup(pr->ps_pptr); 1439 } 1440 } 1441 1442 /* 1443 * Take the action for the specified signal 1444 * from the current set of pending signals. 1445 */ 1446 void 1447 postsig(struct proc *p, int signum, struct sigctx *sctx) 1448 { 1449 u_long trapno; 1450 int mask, returnmask; 1451 siginfo_t si; 1452 union sigval sigval; 1453 int s, code; 1454 1455 KASSERT(signum != 0); 1456 KERNEL_ASSERT_LOCKED(); 1457 1458 mask = sigmask(signum); 1459 atomic_clearbits_int(&p->p_siglist, mask); 1460 sigval.sival_ptr = NULL; 1461 1462 if (p->p_sisig != signum) { 1463 trapno = 0; 1464 code = SI_USER; 1465 sigval.sival_ptr = NULL; 1466 } else { 1467 trapno = p->p_sitrapno; 1468 code = p->p_sicode; 1469 sigval = p->p_sigval; 1470 } 1471 initsiginfo(&si, signum, trapno, code, sigval); 1472 1473 #ifdef KTRACE 1474 if (KTRPOINT(p, KTR_PSIG)) { 1475 ktrpsig(p, signum, sctx->sig_action, p->p_flag & P_SIGSUSPEND ? 1476 p->p_oldmask : p->p_sigmask, code, &si); 1477 } 1478 #endif 1479 if (sctx->sig_action == SIG_DFL) { 1480 /* 1481 * Default action, where the default is to kill 1482 * the process. (Other cases were ignored above.) 1483 */ 1484 sigexit(p, signum); 1485 /* NOTREACHED */ 1486 } else { 1487 /* 1488 * If we get here, the signal must be caught. 1489 */ 1490 #ifdef DIAGNOSTIC 1491 if (sctx->sig_action == SIG_IGN || (p->p_sigmask & mask)) 1492 panic("postsig action"); 1493 #endif 1494 /* 1495 * Set the new mask value and also defer further 1496 * occurrences of this signal. 1497 * 1498 * Special case: user has done a sigpause. Here the 1499 * current mask is not of interest, but rather the 1500 * mask from before the sigpause is what we want 1501 * restored after the signal processing is completed. 1502 */ 1503 #ifdef MULTIPROCESSOR 1504 s = splsched(); 1505 #else 1506 s = splhigh(); 1507 #endif 1508 if (p->p_flag & P_SIGSUSPEND) { 1509 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1510 returnmask = p->p_oldmask; 1511 } else { 1512 returnmask = p->p_sigmask; 1513 } 1514 if (p->p_sisig == signum) { 1515 p->p_sisig = 0; 1516 p->p_sitrapno = 0; 1517 p->p_sicode = SI_USER; 1518 p->p_sigval.sival_ptr = NULL; 1519 } 1520 1521 if (sendsig(sctx->sig_action, signum, returnmask, &si, 1522 sctx->sig_info, sctx->sig_onstack)) { 1523 sigexit(p, SIGILL); 1524 /* NOTREACHED */ 1525 } 1526 postsig_done(p, signum, sctx->sig_catchmask, sctx->sig_reset); 1527 splx(s); 1528 } 1529 } 1530 1531 /* 1532 * Force the current process to exit with the specified signal, dumping core 1533 * if appropriate. We bypass the normal tests for masked and caught signals, 1534 * allowing unrecoverable failures to terminate the process without changing 1535 * signal state. Mark the accounting record with the signal termination. 1536 * If dumping core, save the signal number for the debugger. Calls exit and 1537 * does not return. 1538 */ 1539 void 1540 sigexit(struct proc *p, int signum) 1541 { 1542 /* Mark process as going away */ 1543 atomic_setbits_int(&p->p_flag, P_WEXIT); 1544 1545 p->p_p->ps_acflag |= AXSIG; 1546 if (sigprop[signum] & SA_CORE) { 1547 p->p_sisig = signum; 1548 1549 /* if there are other threads, pause them */ 1550 if (P_HASSIBLING(p)) 1551 single_thread_set(p, SINGLE_SUSPEND, 1); 1552 1553 if (coredump(p) == 0) 1554 signum |= WCOREFLAG; 1555 } 1556 exit1(p, 0, signum, EXIT_NORMAL); 1557 /* NOTREACHED */ 1558 } 1559 1560 /* 1561 * Send uncatchable SIGABRT for coredump. 1562 */ 1563 void 1564 sigabort(struct proc *p) 1565 { 1566 struct sigaction sa; 1567 1568 memset(&sa, 0, sizeof sa); 1569 sa.sa_handler = SIG_DFL; 1570 setsigvec(p, SIGABRT, &sa); 1571 atomic_clearbits_int(&p->p_sigmask, sigmask(SIGABRT)); 1572 psignal(p, SIGABRT); 1573 } 1574 1575 /* 1576 * Return 1 if `sig', a given signal, is ignored or masked for `p', a given 1577 * thread, and 0 otherwise. 1578 */ 1579 int 1580 sigismasked(struct proc *p, int sig) 1581 { 1582 struct process *pr = p->p_p; 1583 1584 if ((pr->ps_sigacts->ps_sigignore & sigmask(sig)) || 1585 (p->p_sigmask & sigmask(sig))) 1586 return 1; 1587 1588 return 0; 1589 } 1590 1591 struct coredump_iostate { 1592 struct proc *io_proc; 1593 struct vnode *io_vp; 1594 struct ucred *io_cred; 1595 off_t io_offset; 1596 }; 1597 1598 /* 1599 * Dump core, into a file named "progname.core", unless the process was 1600 * setuid/setgid. 1601 */ 1602 int 1603 coredump(struct proc *p) 1604 { 1605 #ifdef SMALL_KERNEL 1606 return EPERM; 1607 #else 1608 struct process *pr = p->p_p; 1609 struct vnode *vp; 1610 struct ucred *cred = p->p_ucred; 1611 struct vmspace *vm = p->p_vmspace; 1612 struct nameidata nd; 1613 struct vattr vattr; 1614 struct coredump_iostate io; 1615 int error, len, incrash = 0; 1616 char *name; 1617 const char *dir = "/var/crash"; 1618 1619 atomic_setbits_int(&pr->ps_flags, PS_COREDUMP); 1620 1621 /* Don't dump if will exceed file size limit. */ 1622 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= lim_cur(RLIMIT_CORE)) 1623 return (EFBIG); 1624 1625 name = pool_get(&namei_pool, PR_WAITOK); 1626 1627 /* 1628 * If the process has inconsistent uids, nosuidcoredump 1629 * determines coredump placement policy. 1630 */ 1631 if (((pr->ps_flags & PS_SUGID) && (error = suser(p))) || 1632 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) { 1633 if (nosuidcoredump == 3) { 1634 /* 1635 * If the program directory does not exist, dumps of 1636 * that core will silently fail. 1637 */ 1638 len = snprintf(name, MAXPATHLEN, "%s/%s/%u.core", 1639 dir, pr->ps_comm, pr->ps_pid); 1640 incrash = KERNELPATH; 1641 } else if (nosuidcoredump == 2) { 1642 len = snprintf(name, MAXPATHLEN, "%s/%s.core", 1643 dir, pr->ps_comm); 1644 incrash = KERNELPATH; 1645 } else { 1646 pool_put(&namei_pool, name); 1647 return (EPERM); 1648 } 1649 } else 1650 len = snprintf(name, MAXPATHLEN, "%s.core", pr->ps_comm); 1651 1652 if (len >= MAXPATHLEN) { 1653 pool_put(&namei_pool, name); 1654 return (EACCES); 1655 } 1656 1657 /* 1658 * Control the UID used to write out. The normal case uses 1659 * the real UID. If the sugid case is going to write into the 1660 * controlled directory, we do so as root. 1661 */ 1662 if (incrash == 0) { 1663 cred = crdup(cred); 1664 cred->cr_uid = cred->cr_ruid; 1665 cred->cr_gid = cred->cr_rgid; 1666 } else { 1667 if (p->p_fd->fd_rdir) { 1668 vrele(p->p_fd->fd_rdir); 1669 p->p_fd->fd_rdir = NULL; 1670 } 1671 p->p_ucred = crdup(p->p_ucred); 1672 crfree(cred); 1673 cred = p->p_ucred; 1674 crhold(cred); 1675 cred->cr_uid = 0; 1676 cred->cr_gid = 0; 1677 } 1678 1679 /* incrash should be 0 or KERNELPATH only */ 1680 NDINIT(&nd, 0, incrash, UIO_SYSSPACE, name, p); 1681 1682 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW | O_NONBLOCK, 1683 S_IRUSR | S_IWUSR); 1684 1685 if (error) 1686 goto out; 1687 1688 /* 1689 * Don't dump to non-regular files, files with links, or files 1690 * owned by someone else. 1691 */ 1692 vp = nd.ni_vp; 1693 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) { 1694 VOP_UNLOCK(vp); 1695 vn_close(vp, FWRITE, cred, p); 1696 goto out; 1697 } 1698 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1699 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1700 vattr.va_uid != cred->cr_uid) { 1701 error = EACCES; 1702 VOP_UNLOCK(vp); 1703 vn_close(vp, FWRITE, cred, p); 1704 goto out; 1705 } 1706 VATTR_NULL(&vattr); 1707 vattr.va_size = 0; 1708 VOP_SETATTR(vp, &vattr, cred, p); 1709 pr->ps_acflag |= ACORE; 1710 1711 io.io_proc = p; 1712 io.io_vp = vp; 1713 io.io_cred = cred; 1714 io.io_offset = 0; 1715 VOP_UNLOCK(vp); 1716 vref(vp); 1717 error = vn_close(vp, FWRITE, cred, p); 1718 if (error == 0) 1719 error = coredump_elf(p, &io); 1720 vrele(vp); 1721 out: 1722 crfree(cred); 1723 pool_put(&namei_pool, name); 1724 return (error); 1725 #endif 1726 } 1727 1728 #ifndef SMALL_KERNEL 1729 int 1730 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1731 { 1732 struct coredump_iostate *io = cookie; 1733 off_t coffset = 0; 1734 size_t csize; 1735 int chunk, error; 1736 1737 csize = len; 1738 do { 1739 if (sigmask(SIGKILL) & 1740 (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist)) 1741 return (EINTR); 1742 1743 /* Rest of the loop sleeps with lock held, so... */ 1744 yield(); 1745 1746 chunk = MIN(csize, MAXPHYS); 1747 error = vn_rdwr(UIO_WRITE, io->io_vp, 1748 (caddr_t)data + coffset, chunk, 1749 io->io_offset + coffset, segflg, 1750 IO_UNIT, io->io_cred, NULL, io->io_proc); 1751 if (error) { 1752 struct process *pr = io->io_proc->p_p; 1753 1754 if (error == ENOSPC) 1755 log(LOG_ERR, 1756 "coredump of %s(%d) failed, filesystem full\n", 1757 pr->ps_comm, pr->ps_pid); 1758 else 1759 log(LOG_ERR, 1760 "coredump of %s(%d), write failed: errno %d\n", 1761 pr->ps_comm, pr->ps_pid, error); 1762 return (error); 1763 } 1764 1765 coffset += chunk; 1766 csize -= chunk; 1767 } while (csize > 0); 1768 1769 io->io_offset += len; 1770 return (0); 1771 } 1772 1773 void 1774 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end) 1775 { 1776 struct coredump_iostate *io = cookie; 1777 1778 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end); 1779 } 1780 1781 #endif /* !SMALL_KERNEL */ 1782 1783 /* 1784 * Nonexistent system call-- signal process (may want to handle it). 1785 * Flag error in case process won't see signal immediately (blocked or ignored). 1786 */ 1787 int 1788 sys_nosys(struct proc *p, void *v, register_t *retval) 1789 { 1790 ptsignal(p, SIGSYS, STHREAD); 1791 return (ENOSYS); 1792 } 1793 1794 int 1795 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1796 { 1797 static int sigwaitsleep; 1798 struct sys___thrsigdivert_args /* { 1799 syscallarg(sigset_t) sigmask; 1800 syscallarg(siginfo_t *) info; 1801 syscallarg(const struct timespec *) timeout; 1802 } */ *uap = v; 1803 struct sigctx ctx; 1804 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask; 1805 siginfo_t si; 1806 uint64_t nsecs = INFSLP; 1807 int timeinvalid = 0; 1808 int error = 0; 1809 1810 memset(&si, 0, sizeof(si)); 1811 1812 if (SCARG(uap, timeout) != NULL) { 1813 struct timespec ts; 1814 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1815 return (error); 1816 #ifdef KTRACE 1817 if (KTRPOINT(p, KTR_STRUCT)) 1818 ktrreltimespec(p, &ts); 1819 #endif 1820 if (!timespecisvalid(&ts)) 1821 timeinvalid = 1; 1822 else 1823 nsecs = TIMESPEC_TO_NSEC(&ts); 1824 } 1825 1826 dosigsuspend(p, p->p_sigmask &~ mask); 1827 for (;;) { 1828 si.si_signo = cursig(p, &ctx); 1829 if (si.si_signo != 0) { 1830 sigset_t smask = sigmask(si.si_signo); 1831 if (smask & mask) { 1832 atomic_clearbits_int(&p->p_siglist, smask); 1833 error = 0; 1834 break; 1835 } 1836 } 1837 1838 /* per-POSIX, delay this error until after the above */ 1839 if (timeinvalid) 1840 error = EINVAL; 1841 /* per-POSIX, return immediately if timeout is zero-valued */ 1842 if (nsecs == 0) 1843 error = EAGAIN; 1844 1845 if (error != 0) 1846 break; 1847 1848 error = tsleep_nsec(&sigwaitsleep, PPAUSE|PCATCH, "sigwait", 1849 nsecs); 1850 } 1851 1852 if (error == 0) { 1853 *retval = si.si_signo; 1854 if (SCARG(uap, info) != NULL) 1855 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1856 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) { 1857 /* 1858 * Restarting is wrong if there's a timeout, as it'll be 1859 * for the same interval again 1860 */ 1861 error = EINTR; 1862 } 1863 1864 return (error); 1865 } 1866 1867 void 1868 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1869 { 1870 memset(si, 0, sizeof(*si)); 1871 1872 si->si_signo = sig; 1873 si->si_code = code; 1874 if (code == SI_USER) { 1875 si->si_value = val; 1876 } else { 1877 switch (sig) { 1878 case SIGSEGV: 1879 case SIGILL: 1880 case SIGBUS: 1881 case SIGFPE: 1882 si->si_addr = val.sival_ptr; 1883 si->si_trapno = trapno; 1884 break; 1885 case SIGXFSZ: 1886 break; 1887 } 1888 } 1889 } 1890 1891 int 1892 filt_sigattach(struct knote *kn) 1893 { 1894 struct process *pr = curproc->p_p; 1895 int s; 1896 1897 if (kn->kn_id >= NSIG) 1898 return EINVAL; 1899 1900 kn->kn_ptr.p_process = pr; 1901 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1902 1903 s = splhigh(); 1904 klist_insert_locked(&pr->ps_klist, kn); 1905 splx(s); 1906 1907 return (0); 1908 } 1909 1910 void 1911 filt_sigdetach(struct knote *kn) 1912 { 1913 struct process *pr = kn->kn_ptr.p_process; 1914 int s; 1915 1916 s = splhigh(); 1917 klist_remove_locked(&pr->ps_klist, kn); 1918 splx(s); 1919 } 1920 1921 /* 1922 * signal knotes are shared with proc knotes, so we apply a mask to 1923 * the hint in order to differentiate them from process hints. This 1924 * could be avoided by using a signal-specific knote list, but probably 1925 * isn't worth the trouble. 1926 */ 1927 int 1928 filt_signal(struct knote *kn, long hint) 1929 { 1930 1931 if (hint & NOTE_SIGNAL) { 1932 hint &= ~NOTE_SIGNAL; 1933 1934 if (kn->kn_id == hint) 1935 kn->kn_data++; 1936 } 1937 return (kn->kn_data != 0); 1938 } 1939 1940 void 1941 userret(struct proc *p) 1942 { 1943 struct sigctx ctx; 1944 int signum; 1945 1946 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */ 1947 if (p->p_flag & P_PROFPEND) { 1948 atomic_clearbits_int(&p->p_flag, P_PROFPEND); 1949 KERNEL_LOCK(); 1950 psignal(p, SIGPROF); 1951 KERNEL_UNLOCK(); 1952 } 1953 if (p->p_flag & P_ALRMPEND) { 1954 atomic_clearbits_int(&p->p_flag, P_ALRMPEND); 1955 KERNEL_LOCK(); 1956 psignal(p, SIGVTALRM); 1957 KERNEL_UNLOCK(); 1958 } 1959 1960 if (SIGPENDING(p) != 0) { 1961 KERNEL_LOCK(); 1962 while ((signum = cursig(p, &ctx)) != 0) 1963 postsig(p, signum, &ctx); 1964 KERNEL_UNLOCK(); 1965 } 1966 1967 /* 1968 * If P_SIGSUSPEND is still set here, then we still need to restore 1969 * the original sigmask before returning to userspace. Also, this 1970 * might unmask some pending signals, so we need to check a second 1971 * time for signals to post. 1972 */ 1973 if (p->p_flag & P_SIGSUSPEND) { 1974 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1975 p->p_sigmask = p->p_oldmask; 1976 1977 KERNEL_LOCK(); 1978 while ((signum = cursig(p, &ctx)) != 0) 1979 postsig(p, signum, &ctx); 1980 KERNEL_UNLOCK(); 1981 } 1982 1983 if (p->p_flag & P_SUSPSINGLE) 1984 single_thread_check(p, 0); 1985 1986 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); 1987 1988 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 1989 } 1990 1991 int 1992 single_thread_check_locked(struct proc *p, int deep, int s) 1993 { 1994 struct process *pr = p->p_p; 1995 1996 SCHED_ASSERT_LOCKED(); 1997 1998 if (pr->ps_single != NULL && pr->ps_single != p) { 1999 do { 2000 /* if we're in deep, we need to unwind to the edge */ 2001 if (deep) { 2002 if (pr->ps_flags & PS_SINGLEUNWIND) 2003 return (ERESTART); 2004 if (pr->ps_flags & PS_SINGLEEXIT) 2005 return (EINTR); 2006 } 2007 2008 if (pr->ps_single == NULL) 2009 continue; 2010 2011 if (atomic_dec_int_nv(&pr->ps_singlecount) == 0) 2012 wakeup(&pr->ps_singlecount); 2013 2014 if (pr->ps_flags & PS_SINGLEEXIT) { 2015 SCHED_UNLOCK(s); 2016 KERNEL_LOCK(); 2017 exit1(p, 0, 0, EXIT_THREAD_NOCHECK); 2018 /* NOTREACHED */ 2019 } 2020 2021 /* not exiting and don't need to unwind, so suspend */ 2022 p->p_stat = SSTOP; 2023 mi_switch(); 2024 } while (pr->ps_single != NULL); 2025 } 2026 2027 return (0); 2028 } 2029 2030 int 2031 single_thread_check(struct proc *p, int deep) 2032 { 2033 int s, error; 2034 2035 SCHED_LOCK(s); 2036 error = single_thread_check_locked(p, deep, s); 2037 SCHED_UNLOCK(s); 2038 2039 return error; 2040 } 2041 2042 /* 2043 * Stop other threads in the process. The mode controls how and 2044 * where the other threads should stop: 2045 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 2046 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 2047 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 2048 * or released as with SINGLE_SUSPEND 2049 * - SINGLE_EXIT: unwind to kernel boundary and exit 2050 */ 2051 int 2052 single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) 2053 { 2054 struct process *pr = p->p_p; 2055 struct proc *q; 2056 int error, s; 2057 2058 KASSERT(curproc == p); 2059 2060 SCHED_LOCK(s); 2061 error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s); 2062 if (error) { 2063 SCHED_UNLOCK(s); 2064 return error; 2065 } 2066 2067 switch (mode) { 2068 case SINGLE_SUSPEND: 2069 break; 2070 case SINGLE_UNWIND: 2071 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 2072 break; 2073 case SINGLE_EXIT: 2074 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 2075 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 2076 break; 2077 #ifdef DIAGNOSTIC 2078 default: 2079 panic("single_thread_mode = %d", mode); 2080 #endif 2081 } 2082 pr->ps_singlecount = 0; 2083 membar_producer(); 2084 pr->ps_single = p; 2085 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 2086 if (q == p) 2087 continue; 2088 if (q->p_flag & P_WEXIT) { 2089 if (mode == SINGLE_EXIT) { 2090 if (q->p_stat == SSTOP) { 2091 setrunnable(q); 2092 atomic_inc_int(&pr->ps_singlecount); 2093 } 2094 } 2095 continue; 2096 } 2097 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 2098 switch (q->p_stat) { 2099 case SIDL: 2100 case SRUN: 2101 atomic_inc_int(&pr->ps_singlecount); 2102 break; 2103 case SSLEEP: 2104 /* if it's not interruptible, then just have to wait */ 2105 if (q->p_flag & P_SINTR) { 2106 /* merely need to suspend? just stop it */ 2107 if (mode == SINGLE_SUSPEND) { 2108 q->p_stat = SSTOP; 2109 break; 2110 } 2111 /* need to unwind or exit, so wake it */ 2112 setrunnable(q); 2113 } 2114 atomic_inc_int(&pr->ps_singlecount); 2115 break; 2116 case SSTOP: 2117 if (mode == SINGLE_EXIT) { 2118 setrunnable(q); 2119 atomic_inc_int(&pr->ps_singlecount); 2120 } 2121 break; 2122 case SDEAD: 2123 break; 2124 case SONPROC: 2125 atomic_inc_int(&pr->ps_singlecount); 2126 signotify(q); 2127 break; 2128 } 2129 } 2130 SCHED_UNLOCK(s); 2131 2132 if (wait) 2133 single_thread_wait(pr, 1); 2134 2135 return 0; 2136 } 2137 2138 /* 2139 * Wait for other threads to stop. If recheck is false then the function 2140 * returns non-zero if the caller needs to restart the check else 0 is 2141 * returned. If recheck is true the return value is always 0. 2142 */ 2143 int 2144 single_thread_wait(struct process *pr, int recheck) 2145 { 2146 struct sleep_state sls; 2147 int wait; 2148 2149 /* wait until they're all suspended */ 2150 wait = pr->ps_singlecount > 0; 2151 while (wait) { 2152 sleep_setup(&sls, &pr->ps_singlecount, PWAIT, "suspend", 0); 2153 wait = pr->ps_singlecount > 0; 2154 sleep_finish(&sls, wait); 2155 if (!recheck) 2156 break; 2157 } 2158 2159 return wait; 2160 } 2161 2162 void 2163 single_thread_clear(struct proc *p, int flag) 2164 { 2165 struct process *pr = p->p_p; 2166 struct proc *q; 2167 int s; 2168 2169 KASSERT(pr->ps_single == p); 2170 KASSERT(curproc == p); 2171 2172 SCHED_LOCK(s); 2173 pr->ps_single = NULL; 2174 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 2175 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 2176 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 2177 continue; 2178 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 2179 2180 /* 2181 * if the thread was only stopped for single threading 2182 * then clearing that either makes it runnable or puts 2183 * it back into some sleep queue 2184 */ 2185 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 2186 if (q->p_wchan == NULL) 2187 setrunnable(q); 2188 else 2189 q->p_stat = SSLEEP; 2190 } 2191 } 2192 SCHED_UNLOCK(s); 2193 } 2194 2195 void 2196 sigio_del(struct sigiolst *rmlist) 2197 { 2198 struct sigio *sigio; 2199 2200 while ((sigio = LIST_FIRST(rmlist)) != NULL) { 2201 LIST_REMOVE(sigio, sio_pgsigio); 2202 crfree(sigio->sio_ucred); 2203 free(sigio, M_SIGIO, sizeof(*sigio)); 2204 } 2205 } 2206 2207 void 2208 sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist) 2209 { 2210 struct sigio *sigio; 2211 2212 MUTEX_ASSERT_LOCKED(&sigio_lock); 2213 2214 sigio = sir->sir_sigio; 2215 if (sigio != NULL) { 2216 KASSERT(sigio->sio_myref == sir); 2217 sir->sir_sigio = NULL; 2218 2219 if (sigio->sio_pgid > 0) 2220 sigio->sio_proc = NULL; 2221 else 2222 sigio->sio_pgrp = NULL; 2223 LIST_REMOVE(sigio, sio_pgsigio); 2224 2225 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio); 2226 } 2227 } 2228 2229 void 2230 sigio_free(struct sigio_ref *sir) 2231 { 2232 struct sigiolst rmlist; 2233 2234 if (sir->sir_sigio == NULL) 2235 return; 2236 2237 LIST_INIT(&rmlist); 2238 2239 mtx_enter(&sigio_lock); 2240 sigio_unlink(sir, &rmlist); 2241 mtx_leave(&sigio_lock); 2242 2243 sigio_del(&rmlist); 2244 } 2245 2246 void 2247 sigio_freelist(struct sigiolst *sigiolst) 2248 { 2249 struct sigiolst rmlist; 2250 struct sigio *sigio; 2251 2252 if (LIST_EMPTY(sigiolst)) 2253 return; 2254 2255 LIST_INIT(&rmlist); 2256 2257 mtx_enter(&sigio_lock); 2258 while ((sigio = LIST_FIRST(sigiolst)) != NULL) 2259 sigio_unlink(sigio->sio_myref, &rmlist); 2260 mtx_leave(&sigio_lock); 2261 2262 sigio_del(&rmlist); 2263 } 2264 2265 int 2266 sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data) 2267 { 2268 struct sigiolst rmlist; 2269 struct proc *p = curproc; 2270 struct pgrp *pgrp = NULL; 2271 struct process *pr = NULL; 2272 struct sigio *sigio; 2273 int error; 2274 pid_t pgid = *(int *)data; 2275 2276 if (pgid == 0) { 2277 sigio_free(sir); 2278 return (0); 2279 } 2280 2281 if (cmd == TIOCSPGRP) { 2282 if (pgid < 0) 2283 return (EINVAL); 2284 pgid = -pgid; 2285 } 2286 2287 sigio = malloc(sizeof(*sigio), M_SIGIO, M_WAITOK); 2288 sigio->sio_pgid = pgid; 2289 sigio->sio_ucred = crhold(p->p_ucred); 2290 sigio->sio_myref = sir; 2291 2292 LIST_INIT(&rmlist); 2293 2294 /* 2295 * The kernel lock, and not sleeping between prfind()/pgfind() and 2296 * linking of the sigio ensure that the process or process group does 2297 * not disappear unexpectedly. 2298 */ 2299 KERNEL_LOCK(); 2300 mtx_enter(&sigio_lock); 2301 2302 if (pgid > 0) { 2303 pr = prfind(pgid); 2304 if (pr == NULL) { 2305 error = ESRCH; 2306 goto fail; 2307 } 2308 2309 /* 2310 * Policy - Don't allow a process to FSETOWN a process 2311 * in another session. 2312 * 2313 * Remove this test to allow maximum flexibility or 2314 * restrict FSETOWN to the current process or process 2315 * group for maximum safety. 2316 */ 2317 if (pr->ps_session != p->p_p->ps_session) { 2318 error = EPERM; 2319 goto fail; 2320 } 2321 2322 if ((pr->ps_flags & PS_EXITING) != 0) { 2323 error = ESRCH; 2324 goto fail; 2325 } 2326 } else /* if (pgid < 0) */ { 2327 pgrp = pgfind(-pgid); 2328 if (pgrp == NULL) { 2329 error = ESRCH; 2330 goto fail; 2331 } 2332 2333 /* 2334 * Policy - Don't allow a process to FSETOWN a process 2335 * in another session. 2336 * 2337 * Remove this test to allow maximum flexibility or 2338 * restrict FSETOWN to the current process or process 2339 * group for maximum safety. 2340 */ 2341 if (pgrp->pg_session != p->p_p->ps_session) { 2342 error = EPERM; 2343 goto fail; 2344 } 2345 } 2346 2347 if (pgid > 0) { 2348 sigio->sio_proc = pr; 2349 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio); 2350 } else { 2351 sigio->sio_pgrp = pgrp; 2352 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 2353 } 2354 2355 sigio_unlink(sir, &rmlist); 2356 sir->sir_sigio = sigio; 2357 2358 mtx_leave(&sigio_lock); 2359 KERNEL_UNLOCK(); 2360 2361 sigio_del(&rmlist); 2362 2363 return (0); 2364 2365 fail: 2366 mtx_leave(&sigio_lock); 2367 KERNEL_UNLOCK(); 2368 2369 crfree(sigio->sio_ucred); 2370 free(sigio, M_SIGIO, sizeof(*sigio)); 2371 2372 return (error); 2373 } 2374 2375 void 2376 sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data) 2377 { 2378 struct sigio *sigio; 2379 pid_t pgid = 0; 2380 2381 mtx_enter(&sigio_lock); 2382 sigio = sir->sir_sigio; 2383 if (sigio != NULL) 2384 pgid = sigio->sio_pgid; 2385 mtx_leave(&sigio_lock); 2386 2387 if (cmd == TIOCGPGRP) 2388 pgid = -pgid; 2389 2390 *(int *)data = pgid; 2391 } 2392 2393 void 2394 sigio_copy(struct sigio_ref *dst, struct sigio_ref *src) 2395 { 2396 struct sigiolst rmlist; 2397 struct sigio *newsigio, *sigio; 2398 2399 sigio_free(dst); 2400 2401 if (src->sir_sigio == NULL) 2402 return; 2403 2404 newsigio = malloc(sizeof(*newsigio), M_SIGIO, M_WAITOK); 2405 LIST_INIT(&rmlist); 2406 2407 mtx_enter(&sigio_lock); 2408 2409 sigio = src->sir_sigio; 2410 if (sigio == NULL) { 2411 mtx_leave(&sigio_lock); 2412 free(newsigio, M_SIGIO, sizeof(*newsigio)); 2413 return; 2414 } 2415 2416 newsigio->sio_pgid = sigio->sio_pgid; 2417 newsigio->sio_ucred = crhold(sigio->sio_ucred); 2418 newsigio->sio_myref = dst; 2419 if (newsigio->sio_pgid > 0) { 2420 newsigio->sio_proc = sigio->sio_proc; 2421 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio, 2422 sio_pgsigio); 2423 } else { 2424 newsigio->sio_pgrp = sigio->sio_pgrp; 2425 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio, 2426 sio_pgsigio); 2427 } 2428 2429 sigio_unlink(dst, &rmlist); 2430 dst->sir_sigio = newsigio; 2431 2432 mtx_leave(&sigio_lock); 2433 2434 sigio_del(&rmlist); 2435 } 2436