1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 35 * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/sysmsg.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vnode.h> 47 #include <sys/event.h> 48 #include <sys/proc.h> 49 #include <sys/nlookup.h> 50 #include <sys/pioctl.h> 51 #include <sys/acct.h> 52 #include <sys/fcntl.h> 53 #include <sys/lock.h> 54 #include <sys/wait.h> 55 #include <sys/ktrace.h> 56 #include <sys/syslog.h> 57 #include <sys/stat.h> 58 #include <sys/sysent.h> 59 #include <sys/sysctl.h> 60 #include <sys/malloc.h> 61 #include <sys/interrupt.h> 62 #include <sys/unistd.h> 63 #include <sys/kern_syscall.h> 64 #include <sys/vkernel.h> 65 66 #include <sys/signal2.h> 67 #include <sys/thread2.h> 68 #include <sys/spinlock2.h> 69 70 #include <machine/cpu.h> 71 #include <machine/smp.h> 72 73 static int coredump(struct lwp *, int); 74 static char *expand_name(const char *, uid_t, pid_t); 75 static int dokillpg(int sig, int pgid, int all); 76 static int sig_ffs(sigset_t *set); 77 static int sigprop(int sig); 78 static void lwp_signotify(struct lwp *lp); 79 static void lwp_signotify_remote(void *arg); 80 static int kern_sigtimedwait(sigset_t set, siginfo_t *info, 81 struct timespec *timeout); 82 static void proc_stopwait(struct proc *p); 83 84 static int filt_sigattach(struct knote *kn); 85 static void filt_sigdetach(struct knote *kn); 86 static int filt_signal(struct knote *kn, long hint); 87 88 struct filterops sig_filtops = 89 { FILTEROP_MPSAFE, filt_sigattach, filt_sigdetach, filt_signal }; 90 91 static int kern_logsigexit = 1; 92 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 93 &kern_logsigexit, 0, 94 "Log processes quitting on abnormal signals to syslog(3)"); 95 96 /* 97 * Can process p send the signal sig to process q? Only processes within 98 * the current reaper or children of the current reaper can be signaled. 99 * Normally the reaper itself cannot be signalled, unless initok is set. 100 */ 101 #define CANSIGNAL(q, sig, initok) \ 102 ((!p_trespass(curproc->p_ucred, (q)->p_ucred) && \ 103 reaper_sigtest(curproc, p, initok)) || \ 104 ((sig) == SIGCONT && (q)->p_session == curproc->p_session)) 105 106 /* 107 * Policy -- Can real uid ruid with ucred uc send a signal to process q? 108 */ 109 #define CANSIGIO(ruid, uc, q) \ 110 ((uc)->cr_uid == 0 || \ 111 (ruid) == (q)->p_ucred->cr_ruid || \ 112 (uc)->cr_uid == (q)->p_ucred->cr_ruid || \ 113 (ruid) == (q)->p_ucred->cr_uid || \ 114 (uc)->cr_uid == (q)->p_ucred->cr_uid) 115 116 int sugid_coredump; 117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 119 120 static int do_coredump = 1; 121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 122 &do_coredump, 0, "Enable/Disable coredumps"); 123 124 /* 125 * Signal properties and actions. 126 * The array below categorizes the signals and their default actions 127 * according to the following properties: 128 */ 129 #define SA_KILL 0x01 /* terminates process by default */ 130 #define SA_CORE 0x02 /* ditto and coredumps */ 131 #define SA_STOP 0x04 /* suspend process */ 132 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 133 #define SA_IGNORE 0x10 /* ignore by default */ 134 #define SA_CONT 0x20 /* continue if suspended */ 135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 136 #define SA_CKPT 0x80 /* checkpoint process */ 137 138 139 static int sigproptbl[NSIG] = { 140 SA_KILL, /* SIGHUP */ 141 SA_KILL, /* SIGINT */ 142 SA_KILL|SA_CORE, /* SIGQUIT */ 143 SA_KILL|SA_CORE, /* SIGILL */ 144 SA_KILL|SA_CORE, /* SIGTRAP */ 145 SA_KILL|SA_CORE, /* SIGABRT */ 146 SA_KILL|SA_CORE, /* SIGEMT */ 147 SA_KILL|SA_CORE, /* SIGFPE */ 148 SA_KILL, /* SIGKILL */ 149 SA_KILL|SA_CORE, /* SIGBUS */ 150 SA_KILL|SA_CORE, /* SIGSEGV */ 151 SA_KILL|SA_CORE, /* SIGSYS */ 152 SA_KILL, /* SIGPIPE */ 153 SA_KILL, /* SIGALRM */ 154 SA_KILL, /* SIGTERM */ 155 SA_IGNORE, /* SIGURG */ 156 SA_STOP, /* SIGSTOP */ 157 SA_STOP|SA_TTYSTOP, /* SIGTSTP */ 158 SA_IGNORE|SA_CONT, /* SIGCONT */ 159 SA_IGNORE, /* SIGCHLD */ 160 SA_STOP|SA_TTYSTOP, /* SIGTTIN */ 161 SA_STOP|SA_TTYSTOP, /* SIGTTOU */ 162 SA_IGNORE, /* SIGIO */ 163 SA_KILL, /* SIGXCPU */ 164 SA_KILL, /* SIGXFSZ */ 165 SA_KILL, /* SIGVTALRM */ 166 SA_KILL, /* SIGPROF */ 167 SA_IGNORE, /* SIGWINCH */ 168 SA_IGNORE, /* SIGINFO */ 169 SA_KILL, /* SIGUSR1 */ 170 SA_KILL, /* SIGUSR2 */ 171 SA_IGNORE, /* SIGTHR */ 172 SA_CKPT, /* SIGCKPT */ 173 SA_KILL|SA_CKPT, /* SIGCKPTEXIT */ 174 SA_IGNORE, 175 SA_IGNORE, 176 SA_IGNORE, 177 SA_IGNORE, 178 SA_IGNORE, 179 SA_IGNORE, 180 SA_IGNORE, 181 SA_IGNORE, 182 SA_IGNORE, 183 SA_IGNORE, 184 SA_IGNORE, 185 SA_IGNORE, 186 SA_IGNORE, 187 SA_IGNORE, 188 SA_IGNORE, 189 SA_IGNORE, 190 SA_IGNORE, 191 SA_IGNORE, 192 SA_IGNORE, 193 SA_IGNORE, 194 SA_IGNORE, 195 SA_IGNORE, 196 SA_IGNORE, 197 SA_IGNORE, 198 SA_IGNORE, 199 SA_IGNORE, 200 SA_IGNORE, 201 SA_IGNORE, 202 SA_IGNORE, 203 SA_IGNORE, 204 }; 205 206 __read_mostly sigset_t sigcantmask_mask; 207 208 static __inline int 209 sigprop(int sig) 210 { 211 212 if (sig > 0 && sig < NSIG) 213 return (sigproptbl[_SIG_IDX(sig)]); 214 215 return (0); 216 } 217 218 static __inline int 219 sig_ffs(sigset_t *set) 220 { 221 int i; 222 223 for (i = 0; i < _SIG_WORDS; i++) 224 if (set->__bits[i]) 225 return (ffs(set->__bits[i]) + (i * 32)); 226 return (0); 227 } 228 229 /* 230 * Allows us to populate siginfo->si_pid and si_uid in the target process 231 * (p) from the originating thread (td). This function must work properly 232 * even if a kernel thread is sending the signal. 233 * 234 * NOTE: Signals are not queued, so if multiple signals are received the 235 * signal handler will only see the most recent pid and uid for any 236 * given signal number. 237 */ 238 static __inline void 239 sigsetfrompid(thread_t td, struct proc *p, int sig) 240 { 241 struct sigacts *sap; 242 243 if ((sap = p->p_sigacts) == NULL) 244 return; 245 if (td->td_proc) { 246 sap->ps_frominfo[sig].pid = td->td_proc->p_pid; 247 sap->ps_frominfo[sig].uid = td->td_ucred->cr_uid; 248 } else { 249 sap->ps_frominfo[sig].pid = 0; 250 sap->ps_frominfo[sig].uid = 0; 251 } 252 } 253 254 /* 255 * No requirements. 256 */ 257 int 258 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact) 259 { 260 struct thread *td = curthread; 261 struct proc *p = td->td_proc; 262 struct lwp *lp; 263 struct sigacts *ps = p->p_sigacts; 264 265 if (sig <= 0 || sig >= _SIG_MAXSIG) 266 return (EINVAL); 267 268 lwkt_gettoken(&p->p_token); 269 270 if (oact) { 271 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 272 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 273 oact->sa_flags = 0; 274 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 275 oact->sa_flags |= SA_ONSTACK; 276 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 277 oact->sa_flags |= SA_RESTART; 278 if (SIGISMEMBER(ps->ps_sigreset, sig)) 279 oact->sa_flags |= SA_RESETHAND; 280 if (SIGISMEMBER(ps->ps_signodefer, sig)) 281 oact->sa_flags |= SA_NODEFER; 282 if (SIGISMEMBER(ps->ps_siginfo, sig)) 283 oact->sa_flags |= SA_SIGINFO; 284 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDSTOP) 285 oact->sa_flags |= SA_NOCLDSTOP; 286 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDWAIT) 287 oact->sa_flags |= SA_NOCLDWAIT; 288 } 289 if (act) { 290 /* 291 * Check for invalid requests. KILL and STOP cannot be 292 * caught. 293 */ 294 if (sig == SIGKILL || sig == SIGSTOP) { 295 if (act->sa_handler != SIG_DFL) { 296 lwkt_reltoken(&p->p_token); 297 return (EINVAL); 298 } 299 } 300 301 /* 302 * Change setting atomically. 303 */ 304 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 305 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 306 if (act->sa_flags & SA_SIGINFO) { 307 ps->ps_sigact[_SIG_IDX(sig)] = 308 (__sighandler_t *)act->sa_sigaction; 309 SIGADDSET(ps->ps_siginfo, sig); 310 } else { 311 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 312 SIGDELSET(ps->ps_siginfo, sig); 313 } 314 if (!(act->sa_flags & SA_RESTART)) 315 SIGADDSET(ps->ps_sigintr, sig); 316 else 317 SIGDELSET(ps->ps_sigintr, sig); 318 if (act->sa_flags & SA_ONSTACK) 319 SIGADDSET(ps->ps_sigonstack, sig); 320 else 321 SIGDELSET(ps->ps_sigonstack, sig); 322 if (act->sa_flags & SA_RESETHAND) 323 SIGADDSET(ps->ps_sigreset, sig); 324 else 325 SIGDELSET(ps->ps_sigreset, sig); 326 if (act->sa_flags & SA_NODEFER) 327 SIGADDSET(ps->ps_signodefer, sig); 328 else 329 SIGDELSET(ps->ps_signodefer, sig); 330 if (sig == SIGCHLD) { 331 if (act->sa_flags & SA_NOCLDSTOP) 332 p->p_sigacts->ps_flag |= PS_NOCLDSTOP; 333 else 334 p->p_sigacts->ps_flag &= ~PS_NOCLDSTOP; 335 if (act->sa_flags & SA_NOCLDWAIT) { 336 /* 337 * Paranoia: since SA_NOCLDWAIT is implemented 338 * by reparenting the dying child to PID 1 (and 339 * trust it to reap the zombie), PID 1 itself 340 * is forbidden to set SA_NOCLDWAIT. 341 */ 342 if (p->p_pid == 1) 343 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 344 else 345 p->p_sigacts->ps_flag |= PS_NOCLDWAIT; 346 } else { 347 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 348 } 349 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 350 ps->ps_flag |= PS_CLDSIGIGN; 351 else 352 ps->ps_flag &= ~PS_CLDSIGIGN; 353 } 354 /* 355 * Set bit in p_sigignore for signals that are set to SIG_IGN, 356 * and for signals set to SIG_DFL where the default is to 357 * ignore. However, don't put SIGCONT in p_sigignore, as we 358 * have to restart the process. 359 * 360 * Also remove the signal from the process and lwp signal 361 * list. 362 */ 363 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 364 (sigprop(sig) & SA_IGNORE && 365 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 366 SIGDELSET_ATOMIC(p->p_siglist, sig); 367 FOREACH_LWP_IN_PROC(lp, p) { 368 spin_lock(&lp->lwp_spin); 369 SIGDELSET(lp->lwp_siglist, sig); 370 spin_unlock(&lp->lwp_spin); 371 } 372 if (sig != SIGCONT) { 373 /* easier in ksignal */ 374 SIGADDSET(p->p_sigignore, sig); 375 } 376 SIGDELSET(p->p_sigcatch, sig); 377 } else { 378 SIGDELSET(p->p_sigignore, sig); 379 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 380 SIGDELSET(p->p_sigcatch, sig); 381 else 382 SIGADDSET(p->p_sigcatch, sig); 383 } 384 } 385 lwkt_reltoken(&p->p_token); 386 return (0); 387 } 388 389 int 390 sys_sigaction(struct sysmsg *sysmsg, const struct sigaction_args *uap) 391 { 392 struct sigaction act, oact; 393 struct sigaction *actp, *oactp; 394 int error; 395 396 actp = (uap->act != NULL) ? &act : NULL; 397 oactp = (uap->oact != NULL) ? &oact : NULL; 398 if (actp) { 399 error = copyin(uap->act, actp, sizeof(act)); 400 if (error) 401 return (error); 402 } 403 error = kern_sigaction(uap->sig, actp, oactp); 404 if (oactp && !error) { 405 error = copyout(oactp, uap->oact, sizeof(oact)); 406 } 407 return (error); 408 } 409 410 /* 411 * Initialize signal state for process 0; 412 * set to ignore signals that are ignored by default. 413 */ 414 void 415 siginit(struct proc *p) 416 { 417 int i; 418 419 for (i = 1; i <= NSIG; i++) { 420 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 421 SIGADDSET(p->p_sigignore, i); 422 } 423 424 /* 425 * Also initialize signal-related global state. 426 */ 427 SIGSETOR_CANTMASK(sigcantmask_mask); 428 } 429 430 /* 431 * Reset signals for an exec of the specified process. 432 */ 433 void 434 execsigs(struct proc *p) 435 { 436 struct sigacts *ps = p->p_sigacts; 437 struct lwp *lp; 438 int sig; 439 440 lp = ONLY_LWP_IN_PROC(p); 441 442 /* 443 * Reset caught signals. Held signals remain held 444 * through p_sigmask (unless they were caught, 445 * and are now ignored by default). 446 */ 447 while (SIGNOTEMPTY(p->p_sigcatch)) { 448 sig = sig_ffs(&p->p_sigcatch); 449 SIGDELSET(p->p_sigcatch, sig); 450 if (sigprop(sig) & SA_IGNORE) { 451 if (sig != SIGCONT) 452 SIGADDSET(p->p_sigignore, sig); 453 SIGDELSET_ATOMIC(p->p_siglist, sig); 454 /* don't need spinlock */ 455 SIGDELSET(lp->lwp_siglist, sig); 456 } 457 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 458 } 459 460 /* 461 * Reset stack state to the user stack. 462 * Clear set of signals caught on the signal stack. 463 */ 464 lp->lwp_sigstk.ss_flags = SS_DISABLE; 465 lp->lwp_sigstk.ss_size = 0; 466 lp->lwp_sigstk.ss_sp = NULL; 467 lp->lwp_flags &= ~LWP_ALTSTACK; 468 /* 469 * Reset no zombies if child dies flag as Solaris does. 470 */ 471 p->p_sigacts->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 472 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 473 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 474 } 475 476 /* 477 * kern_sigprocmask() - MP SAFE ONLY IF p == curproc 478 * 479 * Manipulate signal mask. This routine is MP SAFE *ONLY* if 480 * p == curproc. 481 */ 482 int 483 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset) 484 { 485 struct thread *td = curthread; 486 struct lwp *lp = td->td_lwp; 487 struct proc *p = td->td_proc; 488 int error; 489 490 lwkt_gettoken(&p->p_token); 491 492 if (oset != NULL) 493 *oset = lp->lwp_sigmask; 494 495 error = 0; 496 if (set != NULL) { 497 switch (how) { 498 case SIG_BLOCK: 499 SIG_CANTMASK(*set); 500 SIGSETOR(lp->lwp_sigmask, *set); 501 break; 502 case SIG_UNBLOCK: 503 SIGSETNAND(lp->lwp_sigmask, *set); 504 break; 505 case SIG_SETMASK: 506 SIG_CANTMASK(*set); 507 lp->lwp_sigmask = *set; 508 sigirefs_wait(p); 509 break; 510 default: 511 error = EINVAL; 512 break; 513 } 514 } 515 516 lwkt_reltoken(&p->p_token); 517 518 return (error); 519 } 520 521 /* 522 * sigprocmask() 523 * 524 * MPSAFE 525 */ 526 int 527 sys_sigprocmask(struct sysmsg *sysmsg, const struct sigprocmask_args *uap) 528 { 529 sigset_t set, oset; 530 sigset_t *setp, *osetp; 531 int error; 532 533 setp = (uap->set != NULL) ? &set : NULL; 534 osetp = (uap->oset != NULL) ? &oset : NULL; 535 if (setp) { 536 error = copyin(uap->set, setp, sizeof(set)); 537 if (error) 538 return (error); 539 } 540 error = kern_sigprocmask(uap->how, setp, osetp); 541 if (osetp && !error) { 542 error = copyout(osetp, uap->oset, sizeof(oset)); 543 } 544 return (error); 545 } 546 547 /* 548 * MPSAFE 549 */ 550 int 551 kern_sigpending(sigset_t *set) 552 { 553 struct lwp *lp = curthread->td_lwp; 554 555 *set = lwp_sigpend(lp); 556 557 return (0); 558 } 559 560 /* 561 * MPSAFE 562 */ 563 int 564 sys_sigpending(struct sysmsg *sysmsg, const struct sigpending_args *uap) 565 { 566 sigset_t set; 567 int error; 568 569 error = kern_sigpending(&set); 570 571 if (error == 0) 572 error = copyout(&set, uap->set, sizeof(set)); 573 return (error); 574 } 575 576 /* 577 * Suspend process until signal, providing mask to be set 578 * in the meantime. 579 * 580 * MPSAFE 581 */ 582 int 583 kern_sigsuspend(sigset_t *set) 584 { 585 struct thread *td = curthread; 586 struct lwp *lp = td->td_lwp; 587 struct proc *p = td->td_proc; 588 struct sigacts *ps = p->p_sigacts; 589 590 /* 591 * When returning from sigsuspend, we want the old mask to be 592 * restored after the signal handler has finished. Thus, we 593 * save it here and mark the sigacts structure to indicate this. 594 * 595 * To interlock signal deliveries which may race this function, we 596 * must hold the LWP token, otherwise the signal may be made pending 597 * to the process rather than the lwp during execution of the tsleep() 598 * (which does not hold the process token to interlock that) and be 599 * missed by the tsleep(). 600 */ 601 lwkt_gettoken(&lp->lwp_token); 602 lp->lwp_oldsigmask = lp->lwp_sigmask; 603 lp->lwp_flags |= LWP_OLDMASK; 604 SIG_CANTMASK(*set); 605 lp->lwp_sigmask = *set; 606 lwkt_reltoken(&lp->lwp_token); 607 sigirefs_wait(p); 608 609 while (tsleep(ps, PCATCH, "pause", 0) == 0) 610 /* void */; 611 /* always return EINTR rather than ERESTART... */ 612 return (EINTR); 613 } 614 615 /* 616 * Note nonstandard calling convention: libc stub passes mask, not 617 * pointer, to save a copyin. 618 * 619 * MPSAFE 620 */ 621 int 622 sys_sigsuspend(struct sysmsg *sysmsg, const struct sigsuspend_args *uap) 623 { 624 sigset_t mask; 625 int error; 626 627 error = copyin(uap->sigmask, &mask, sizeof(mask)); 628 if (error) 629 return (error); 630 631 error = kern_sigsuspend(&mask); 632 633 return (error); 634 } 635 636 /* 637 * MPSAFE 638 */ 639 int 640 kern_sigaltstack(stack_t *ss, stack_t *oss) 641 { 642 struct thread *td = curthread; 643 struct lwp *lp = td->td_lwp; 644 struct proc *p = td->td_proc; 645 646 if ((lp->lwp_flags & LWP_ALTSTACK) == 0) 647 lp->lwp_sigstk.ss_flags |= SS_DISABLE; 648 649 if (oss) 650 *oss = lp->lwp_sigstk; 651 652 if (ss) { 653 if (ss->ss_flags & ~SS_DISABLE) 654 return (EINVAL); 655 if (ss->ss_flags & SS_DISABLE) { 656 if (lp->lwp_sigstk.ss_flags & SS_ONSTACK) 657 return (EPERM); 658 lp->lwp_flags &= ~LWP_ALTSTACK; 659 lp->lwp_sigstk.ss_flags = ss->ss_flags; 660 } else { 661 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 662 return (ENOMEM); 663 lp->lwp_flags |= LWP_ALTSTACK; 664 lp->lwp_sigstk = *ss; 665 } 666 } 667 668 return (0); 669 } 670 671 /* 672 * MPSAFE 673 */ 674 int 675 sys_sigaltstack(struct sysmsg *sysmsg, const struct sigaltstack_args *uap) 676 { 677 stack_t ss, oss; 678 int error; 679 680 if (uap->ss) { 681 error = copyin(uap->ss, &ss, sizeof(ss)); 682 if (error) 683 return (error); 684 } 685 686 error = kern_sigaltstack(uap->ss ? &ss : NULL, uap->oss ? &oss : NULL); 687 688 if (error == 0 && uap->oss) 689 error = copyout(&oss, uap->oss, sizeof(*uap->oss)); 690 return (error); 691 } 692 693 /* 694 * Common code for kill process group/broadcast kill. 695 * cp is calling process. 696 */ 697 struct killpg_info { 698 int nfound; 699 int sig; 700 }; 701 702 static int killpg_all_callback(struct proc *p, void *data); 703 704 static int 705 dokillpg(int sig, int pgid, int all) 706 { 707 struct killpg_info info; 708 struct proc *cp = curproc; 709 struct proc *p; 710 struct pgrp *pgrp; 711 712 info.nfound = 0; 713 info.sig = sig; 714 715 if (all) { 716 /* 717 * broadcast 718 */ 719 allproc_scan(killpg_all_callback, &info, 0); 720 } else { 721 if (pgid == 0) { 722 /* 723 * zero pgid means send to my process group. 724 */ 725 pgrp = cp->p_pgrp; 726 pgref(pgrp); 727 } else { 728 pgrp = pgfind(pgid); 729 if (pgrp == NULL) 730 return (ESRCH); 731 } 732 733 /* 734 * Must interlock all signals against fork 735 */ 736 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 737 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 738 if (p->p_pid <= 1 || 739 p->p_stat == SZOMB || 740 (p->p_flags & P_SYSTEM) || 741 !CANSIGNAL(p, sig, 0)) { 742 continue; 743 } 744 ++info.nfound; 745 if (sig) 746 ksignal(p, sig); 747 } 748 lockmgr(&pgrp->pg_lock, LK_RELEASE); 749 pgrel(pgrp); 750 } 751 return (info.nfound ? 0 : ESRCH); 752 } 753 754 static int 755 killpg_all_callback(struct proc *p, void *data) 756 { 757 struct killpg_info *info = data; 758 759 if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) || 760 p == curproc || !CANSIGNAL(p, info->sig, 0)) { 761 return (0); 762 } 763 ++info->nfound; 764 if (info->sig) 765 ksignal(p, info->sig); 766 return(0); 767 } 768 769 /* 770 * Send a general signal to a process or LWPs within that process. 771 * 772 * Note that new signals cannot be sent if a process is exiting or already 773 * a zombie, but we return success anyway as userland is likely to not handle 774 * the race properly. 775 * 776 * No requirements. 777 */ 778 int 779 kern_kill(int sig, pid_t pid, lwpid_t tid) 780 { 781 int t; 782 783 if ((u_int)sig >= _SIG_MAXSIG) 784 return (EINVAL); 785 786 if (pid > 0) { 787 struct proc *p; 788 struct lwp *lp = NULL; 789 790 /* 791 * Sending a signal to pid 1 as root requires that we 792 * are not reboot-restricted. 793 */ 794 if (pid == 1 && caps_priv_check_self(SYSCAP_NOREBOOT)) 795 return EPERM; 796 797 /* 798 * Send a signal to a single process. If the kill() is 799 * racing an exiting process which has not yet been reaped 800 * act as though the signal was delivered successfully but 801 * don't actually try to deliver the signal. 802 */ 803 if ((p = pfind(pid)) == NULL) { 804 if ((p = zpfind(pid)) == NULL) 805 return (ESRCH); 806 PRELE(p); 807 return (0); 808 } 809 if (p != curproc) { 810 lwkt_gettoken_shared(&p->p_token); 811 if (!CANSIGNAL(p, sig, 1)) { 812 lwkt_reltoken(&p->p_token); 813 PRELE(p); 814 return (EPERM); 815 } 816 lwkt_reltoken(&p->p_token); 817 } 818 819 /* 820 * NOP if the process is exiting. Note that lwpsignal() is 821 * called directly with P_WEXIT set to kill individual LWPs 822 * during exit, which is allowed. 823 */ 824 if (p->p_flags & P_WEXIT) { 825 PRELE(p); 826 return (0); 827 } 828 if (tid != -1) { 829 lwkt_gettoken_shared(&p->p_token); 830 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid); 831 if (lp == NULL) { 832 lwkt_reltoken(&p->p_token); 833 PRELE(p); 834 return (ESRCH); 835 } 836 LWPHOLD(lp); 837 lwkt_reltoken(&p->p_token); 838 } 839 if (sig) 840 lwpsignal(p, lp, sig); 841 if (lp) 842 LWPRELE(lp); 843 PRELE(p); 844 845 return (0); 846 } 847 848 /* 849 * If we come here, pid is a special broadcast pid. 850 * This doesn't mix with a tid. 851 */ 852 if (tid != -1) 853 return (EINVAL); 854 855 switch (pid) { 856 case -1: /* broadcast signal */ 857 t = (dokillpg(sig, 0, 1)); 858 break; 859 case 0: /* signal own process group */ 860 t = (dokillpg(sig, 0, 0)); 861 break; 862 default: /* negative explicit process group */ 863 t = (dokillpg(sig, -pid, 0)); 864 break; 865 } 866 return t; 867 } 868 869 int 870 sys_kill(struct sysmsg *sysmsg, const struct kill_args *uap) 871 { 872 int error; 873 874 error = kern_kill(uap->signum, uap->pid, -1); 875 return (error); 876 } 877 878 int 879 sys_lwp_kill(struct sysmsg *sysmsg, const struct lwp_kill_args *uap) 880 { 881 int error; 882 pid_t pid = uap->pid; 883 884 /* 885 * A tid is mandatory for lwp_kill(), otherwise 886 * you could simply use kill(). 887 */ 888 if (uap->tid == -1) 889 return (EINVAL); 890 891 /* 892 * To save on a getpid() function call for intra-process 893 * signals, pid == -1 means current process. 894 */ 895 if (pid == -1) 896 pid = curproc->p_pid; 897 898 error = kern_kill(uap->signum, pid, uap->tid); 899 return (error); 900 } 901 902 /* 903 * Send a signal to a process group. 904 */ 905 void 906 gsignal(int pgid, int sig) 907 { 908 struct pgrp *pgrp; 909 910 if (pgid && (pgrp = pgfind(pgid))) 911 pgsignal(pgrp, sig, 0); 912 } 913 914 /* 915 * Send a signal to a process group. If checktty is 1, 916 * limit to members which have a controlling terminal. 917 * 918 * pg_lock interlocks against a fork that might be in progress, to 919 * ensure that the new child process picks up the signal. 920 */ 921 void 922 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 923 { 924 struct proc *p; 925 926 /* 927 * Must interlock all signals against fork 928 */ 929 if (pgrp) { 930 pgref(pgrp); 931 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 932 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 933 if (checkctty == 0 || p->p_flags & P_CONTROLT) 934 ksignal(p, sig); 935 } 936 lockmgr(&pgrp->pg_lock, LK_RELEASE); 937 pgrel(pgrp); 938 } 939 } 940 941 /* 942 * Send a signal caused by a trap to the current lwp. If it will be caught 943 * immediately, deliver it with correct code. Otherwise, post it normally. 944 * 945 * These signals may ONLY be delivered to the specified lwp and may never 946 * be delivered to the process generically. 947 * 948 * lpmap->blockallsigs is ignored. 949 */ 950 void 951 trapsignal(struct lwp *lp, int sig, u_long code) 952 { 953 struct proc *p = lp->lwp_proc; 954 struct sigacts *ps = p->p_sigacts; 955 956 /* 957 * If we are a virtual kernel running an emulated user process 958 * context, switch back to the virtual kernel context before 959 * trying to post the signal. 960 */ 961 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 962 struct trapframe *tf = lp->lwp_md.md_regs; 963 tf->tf_trapno = 0; 964 vkernel_trap(lp, tf); 965 } 966 967 if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && 968 !SIGISMEMBER(lp->lwp_sigmask, sig)) { 969 lp->lwp_ru.ru_nsignals++; 970 #ifdef KTRACE 971 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 972 ktrpsig(lp, sig, ps->ps_sigact[_SIG_IDX(sig)], 973 &lp->lwp_sigmask, code); 974 #endif 975 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig, 976 &lp->lwp_sigmask, code); 977 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 978 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 979 SIGADDSET(lp->lwp_sigmask, sig); 980 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 981 /* 982 * See kern_sigaction() for origin of this code. 983 */ 984 SIGDELSET(p->p_sigcatch, sig); 985 if (sig != SIGCONT && 986 sigprop(sig) & SA_IGNORE) 987 SIGADDSET(p->p_sigignore, sig); 988 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 989 } 990 } else { 991 lp->lwp_code = code; /* XXX for core dump/debugger */ 992 lp->lwp_sig = sig; /* XXX to verify code */ 993 lwpsignal(p, lp, sig); 994 } 995 } 996 997 /* 998 * Find a suitable lwp to deliver the signal to. Returns NULL if all 999 * lwps hold the signal blocked. 1000 * 1001 * Caller must hold p->p_token. 1002 * 1003 * Returns a lp or NULL. If non-NULL the lp is held and its token is 1004 * acquired. 1005 */ 1006 static struct lwp * 1007 find_lwp_for_signal(struct proc *p, int sig) 1008 { 1009 struct lwp *lp; 1010 struct lwp *run, *sleep, *stop; 1011 1012 /* 1013 * If the running/preempted thread belongs to the proc to which 1014 * the signal is being delivered and this thread does not block 1015 * the signal, then we can avoid a context switch by delivering 1016 * the signal to this thread, because it will return to userland 1017 * soon anyways. 1018 */ 1019 lp = lwkt_preempted_proc(); 1020 if (lp != NULL && lp->lwp_proc == p) { 1021 LWPHOLD(lp); 1022 lwkt_gettoken(&lp->lwp_token); 1023 if (!SIGISMEMBER(lp->lwp_sigmask, sig)) { 1024 /* return w/ token held */ 1025 return (lp); 1026 } 1027 lwkt_reltoken(&lp->lwp_token); 1028 LWPRELE(lp); 1029 } 1030 1031 run = sleep = stop = NULL; 1032 FOREACH_LWP_IN_PROC(lp, p) { 1033 /* 1034 * If the signal is being blocked by the lwp, then this 1035 * lwp is not eligible for receiving the signal. 1036 */ 1037 LWPHOLD(lp); 1038 lwkt_gettoken(&lp->lwp_token); 1039 1040 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 1041 lwkt_reltoken(&lp->lwp_token); 1042 LWPRELE(lp); 1043 continue; 1044 } 1045 1046 switch (lp->lwp_stat) { 1047 case LSRUN: 1048 if (sleep) { 1049 lwkt_token_swap(); 1050 lwkt_reltoken(&sleep->lwp_token); 1051 LWPRELE(sleep); 1052 sleep = NULL; 1053 run = lp; 1054 } else if (stop) { 1055 lwkt_token_swap(); 1056 lwkt_reltoken(&stop->lwp_token); 1057 LWPRELE(stop); 1058 stop = NULL; 1059 run = lp; 1060 } else { 1061 run = lp; 1062 } 1063 break; 1064 case LSSLEEP: 1065 if (lp->lwp_flags & LWP_SINTR) { 1066 if (sleep) { 1067 lwkt_reltoken(&lp->lwp_token); 1068 LWPRELE(lp); 1069 } else if (stop) { 1070 lwkt_token_swap(); 1071 lwkt_reltoken(&stop->lwp_token); 1072 LWPRELE(stop); 1073 stop = NULL; 1074 sleep = lp; 1075 } else { 1076 sleep = lp; 1077 } 1078 } else { 1079 lwkt_reltoken(&lp->lwp_token); 1080 LWPRELE(lp); 1081 } 1082 break; 1083 case LSSTOP: 1084 if (sleep) { 1085 lwkt_reltoken(&lp->lwp_token); 1086 LWPRELE(lp); 1087 } else if (stop) { 1088 lwkt_reltoken(&lp->lwp_token); 1089 LWPRELE(lp); 1090 } else { 1091 stop = lp; 1092 } 1093 break; 1094 } 1095 if (run) 1096 break; 1097 } 1098 1099 if (run != NULL) 1100 return (run); 1101 else if (sleep != NULL) 1102 return (sleep); 1103 else 1104 return (stop); 1105 } 1106 1107 /* 1108 * Send the signal to the process. If the signal has an action, the action 1109 * is usually performed by the target process rather than the caller; we add 1110 * the signal to the set of pending signals for the process. 1111 * 1112 * Exceptions: 1113 * o When a stop signal is sent to a sleeping process that takes the 1114 * default action, the process is stopped without awakening it. 1115 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1116 * regardless of the signal action (eg, blocked or ignored). 1117 * 1118 * Other ignored signals are discarded immediately. 1119 * 1120 * If the caller wishes to call this function from a hard code section the 1121 * caller must already hold p->p_token (see kern_clock.c). 1122 * 1123 * No requirements. 1124 */ 1125 void 1126 ksignal(struct proc *p, int sig) 1127 { 1128 lwpsignal(p, NULL, sig); 1129 } 1130 1131 /* 1132 * The core for ksignal. lp may be NULL, then a suitable thread 1133 * will be chosen. If not, lp MUST be a member of p. 1134 * 1135 * If the caller wishes to call this function from a hard code section the 1136 * caller must already hold p->p_token. 1137 * 1138 * No requirements. 1139 */ 1140 void 1141 lwpsignal(struct proc *p, struct lwp *lp, int sig) 1142 { 1143 struct proc *q; 1144 sig_t action; 1145 int prop; 1146 1147 if (sig >= _SIG_MAXSIG || sig <= 0) { 1148 kprintf("lwpsignal: signal %d\n", sig); 1149 panic("lwpsignal signal number"); 1150 } 1151 1152 KKASSERT(lp == NULL || lp->lwp_proc == p); 1153 1154 /* 1155 * We don't want to race... well, all sorts of things. Get appropriate 1156 * tokens. 1157 * 1158 * Don't try to deliver a generic signal to an exiting process, 1159 * the signal structures could be in flux. We check the LWP later 1160 * on. 1161 */ 1162 PHOLD(p); 1163 if (lp) { 1164 LWPHOLD(lp); 1165 lwkt_gettoken(&lp->lwp_token); 1166 } else { 1167 lwkt_gettoken(&p->p_token); 1168 if (p->p_flags & P_WEXIT) 1169 goto out; 1170 } 1171 1172 prop = sigprop(sig); 1173 1174 /* 1175 * If proc is traced, always give parent a chance; 1176 * if signal event is tracked by procfs, give *that* 1177 * a chance, as well. 1178 */ 1179 if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) { 1180 action = SIG_DFL; 1181 } else { 1182 /* 1183 * Do not try to deliver signals to an exiting lwp other 1184 * than SIGKILL. Note that we must still deliver the signal 1185 * if P_WEXIT is set in the process flags. 1186 */ 1187 if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT) && sig != SIGKILL) { 1188 lwkt_reltoken(&lp->lwp_token); 1189 LWPRELE(lp); 1190 PRELE(p); 1191 return; 1192 } 1193 1194 /* 1195 * If the signal is being ignored, then we forget about 1196 * it immediately. NOTE: We don't set SIGCONT in p_sigignore, 1197 * and if it is set to SIG_IGN, action will be SIG_DFL here. 1198 */ 1199 if (SIGISMEMBER(p->p_sigignore, sig)) { 1200 /* 1201 * Even if a signal is set SIG_IGN, it may still be 1202 * lurking in a kqueue. 1203 */ 1204 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1205 if (lp) { 1206 lwkt_reltoken(&lp->lwp_token); 1207 LWPRELE(lp); 1208 } else { 1209 lwkt_reltoken(&p->p_token); 1210 } 1211 PRELE(p); 1212 return; 1213 } 1214 if (SIGISMEMBER(p->p_sigcatch, sig)) 1215 action = SIG_CATCH; 1216 else 1217 action = SIG_DFL; 1218 } 1219 1220 /* 1221 * If continuing, clear any pending STOP signals for the whole 1222 * process. 1223 */ 1224 if (prop & SA_CONT) { 1225 lwkt_gettoken(&p->p_token); 1226 SIG_STOPSIGMASK_ATOMIC(p->p_siglist); 1227 lwkt_reltoken(&p->p_token); 1228 } 1229 1230 if (prop & SA_STOP) { 1231 /* 1232 * If sending a tty stop signal to a member of an orphaned 1233 * process group, discard the signal here if the action 1234 * is default; don't stop the process below if sleeping, 1235 * and don't clear any pending SIGCONT. 1236 */ 1237 if ((prop & SA_TTYSTOP) && p->p_pgrp->pg_jobc == 0 && 1238 action == SIG_DFL) { 1239 if (lp) { 1240 lwkt_reltoken(&lp->lwp_token); 1241 LWPRELE(lp); 1242 } else { 1243 lwkt_reltoken(&p->p_token); 1244 } 1245 PRELE(p); 1246 return; 1247 } 1248 lwkt_gettoken(&p->p_token); 1249 SIG_CONTSIGMASK_ATOMIC(p->p_siglist); 1250 p->p_flags &= ~P_CONTINUED; 1251 lwkt_reltoken(&p->p_token); 1252 } 1253 1254 if (p->p_stat == SSTOP) { 1255 /* 1256 * Nobody can handle this signal, add it to the lwp or 1257 * process pending list 1258 */ 1259 lwkt_gettoken(&p->p_token); 1260 if (p->p_stat != SSTOP) { 1261 lwkt_reltoken(&p->p_token); 1262 goto not_stopped; 1263 } 1264 sigsetfrompid(curthread, p, sig); 1265 if (lp) { 1266 spin_lock(&lp->lwp_spin); 1267 SIGADDSET(lp->lwp_siglist, sig); 1268 spin_unlock(&lp->lwp_spin); 1269 } else { 1270 SIGADDSET_ATOMIC(p->p_siglist, sig); 1271 } 1272 1273 /* 1274 * If the process is stopped and is being traced, then no 1275 * further action is necessary. 1276 */ 1277 if (p->p_flags & P_TRACED) { 1278 lwkt_reltoken(&p->p_token); 1279 goto out; 1280 } 1281 1282 /* 1283 * If the process is stopped and receives a KILL signal, 1284 * make the process runnable. 1285 */ 1286 if (sig == SIGKILL) { 1287 proc_unstop(p, SSTOP); 1288 lwkt_reltoken(&p->p_token); 1289 goto active_process; 1290 } 1291 1292 /* 1293 * If the process is stopped and receives a CONT signal, 1294 * then try to make the process runnable again. 1295 */ 1296 if (prop & SA_CONT) { 1297 /* 1298 * If SIGCONT is default (or ignored), we continue the 1299 * process but don't leave the signal in p_siglist, as 1300 * it has no further action. If SIGCONT is held, we 1301 * continue the process and leave the signal in 1302 * p_siglist. If the process catches SIGCONT, let it 1303 * handle the signal itself. 1304 * 1305 * XXX what if the signal is being held blocked? 1306 * 1307 * Token required to interlock kern_wait(). 1308 * Reparenting can also cause a race so we have to 1309 * hold (q). 1310 */ 1311 q = p->p_pptr; 1312 PHOLD(q); 1313 lwkt_gettoken(&q->p_token); 1314 p->p_flags |= P_CONTINUED; 1315 wakeup(q); 1316 if (action == SIG_DFL) 1317 SIGDELSET_ATOMIC(p->p_siglist, sig); 1318 proc_unstop(p, SSTOP); 1319 lwkt_reltoken(&q->p_token); 1320 PRELE(q); 1321 lwkt_reltoken(&p->p_token); 1322 if (action == SIG_CATCH) 1323 goto active_process; 1324 goto out; 1325 } 1326 1327 /* 1328 * If the process is stopped and receives another STOP 1329 * signal, we do not need to stop it again. If we did 1330 * the shell could get confused. 1331 * 1332 * However, if the current/preempted lwp is part of the 1333 * process receiving the signal, we need to keep it, 1334 * so that this lwp can stop in issignal() later, as 1335 * we don't want to wait until it reaches userret! 1336 */ 1337 if (prop & SA_STOP) { 1338 if (lwkt_preempted_proc() == NULL || 1339 lwkt_preempted_proc()->lwp_proc != p) { 1340 SIGDELSET_ATOMIC(p->p_siglist, sig); 1341 } 1342 } 1343 1344 /* 1345 * Otherwise the process is stopped and it received some 1346 * signal, which does not change its stopped state. When 1347 * the process is continued a wakeup(p) will be issued which 1348 * will wakeup any threads sleeping in tstop(). 1349 */ 1350 lwkt_reltoken(&p->p_token); 1351 goto out; 1352 /* NOTREACHED */ 1353 } 1354 not_stopped: 1355 ; 1356 /* else not stopped */ 1357 active_process: 1358 1359 /* 1360 * Never deliver a lwp-specific signal to a random lwp. 1361 * 1362 * When delivering an untargetted signal, use p_sigirefs to 1363 * inform lwps of potential collisions. 1364 */ 1365 if (lp == NULL) { 1366 /* NOTE: returns lp w/ token held */ 1367 sigirefs_hold(p); 1368 lp = find_lwp_for_signal(p, sig); 1369 if (lp) { 1370 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 1371 lwkt_reltoken(&lp->lwp_token); 1372 LWPRELE(lp); 1373 lp = NULL; 1374 /* maintain proc token */ 1375 /* maintain sigirefs */ 1376 } else { 1377 lwkt_token_swap(); 1378 lwkt_reltoken(&p->p_token); 1379 /* maintain lp token */ 1380 sigirefs_drop(p); 1381 } 1382 } 1383 } 1384 1385 /* 1386 * Deliver to the process generically if (1) the signal is being 1387 * sent to any thread or (2) we could not find a thread to deliver 1388 * it to. 1389 * 1390 * Drop p_sigirefs after the signal has been resolved to interlock 1391 * against sigsuspend/ppoll/pselect. 1392 */ 1393 if (lp == NULL) { 1394 sigsetfrompid(curthread, p, sig); 1395 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1396 SIGADDSET_ATOMIC(p->p_siglist, sig); 1397 sigirefs_drop(p); 1398 goto out; 1399 } 1400 1401 /* 1402 * Deliver to a specific LWP whether it masks it or not. It will 1403 * not be dispatched if masked but we must still deliver it. 1404 */ 1405 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && 1406 (p->p_flags & P_TRACED) == 0) { 1407 lwkt_gettoken(&p->p_token); 1408 p->p_nice = NZERO; 1409 lwkt_reltoken(&p->p_token); 1410 } 1411 1412 /* 1413 * If the process receives a STOP signal which indeed needs to 1414 * stop the process, do so. If the process chose to catch the 1415 * signal, it will be treated like any other signal. 1416 */ 1417 if ((prop & SA_STOP) && action == SIG_DFL) { 1418 /* 1419 * If a child holding parent blocked, stopping 1420 * could cause deadlock. Take no action at this 1421 * time. 1422 */ 1423 lwkt_gettoken(&p->p_token); 1424 if (p->p_flags & P_PPWAIT) { 1425 sigsetfrompid(curthread, p, sig); 1426 SIGADDSET_ATOMIC(p->p_siglist, sig); 1427 lwkt_reltoken(&p->p_token); 1428 goto out; 1429 } 1430 1431 /* 1432 * Do not actually try to manipulate the process, but simply 1433 * stop it. Lwps will stop as soon as they safely can. 1434 * 1435 * Ignore stop if the process is exiting. 1436 */ 1437 if ((p->p_flags & P_WEXIT) == 0) { 1438 p->p_xstat = sig; 1439 proc_stop(p, SSTOP); 1440 } 1441 lwkt_reltoken(&p->p_token); 1442 goto out; 1443 } 1444 1445 /* 1446 * If it is a CONT signal with default action, just ignore it. 1447 */ 1448 if ((prop & SA_CONT) && action == SIG_DFL) 1449 goto out; 1450 1451 /* 1452 * Mark signal pending at this specific thread. 1453 */ 1454 sigsetfrompid(curthread, p, sig); 1455 spin_lock(&lp->lwp_spin); 1456 SIGADDSET(lp->lwp_siglist, sig); 1457 spin_unlock(&lp->lwp_spin); 1458 1459 lwp_signotify(lp); 1460 1461 out: 1462 if (lp) { 1463 lwkt_reltoken(&lp->lwp_token); 1464 LWPRELE(lp); 1465 } else { 1466 lwkt_reltoken(&p->p_token); 1467 } 1468 PRELE(p); 1469 } 1470 1471 /* 1472 * Notify the LWP that a signal has arrived. The LWP does not have to be 1473 * sleeping on the current cpu. 1474 * 1475 * p->p_token and lp->lwp_token must be held on call. 1476 * 1477 * We can only safely schedule the thread on its current cpu and only if 1478 * one of the SINTR flags is set. If an SINTR flag is set AND we are on 1479 * the correct cpu we are properly interlocked, otherwise we could be 1480 * racing other thread transition states (or the lwp is on the user scheduler 1481 * runq but not scheduled) and must not do anything. 1482 * 1483 * Since we hold the lwp token we know the lwp cannot be ripped out from 1484 * under us so we can safely hold it to prevent it from being ripped out 1485 * from under us if we are forced to IPI another cpu to make the local 1486 * checks there. 1487 * 1488 * Adjustment of lp->lwp_stat can only occur when we hold the lwp_token, 1489 * which we won't in an IPI so any fixups have to be done here, effectively 1490 * replicating part of what setrunnable() does. 1491 */ 1492 static void 1493 lwp_signotify(struct lwp *lp) 1494 { 1495 thread_t dtd; 1496 1497 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1498 dtd = lp->lwp_thread; 1499 1500 crit_enter(); 1501 if (lp == lwkt_preempted_proc()) { 1502 /* 1503 * lwp is on the current cpu AND it is currently running 1504 * (we preempted it). 1505 */ 1506 signotify(); 1507 } else if (lp->lwp_flags & LWP_SINTR) { 1508 /* 1509 * lwp is sitting in tsleep() with PCATCH set 1510 */ 1511 if (dtd->td_gd == mycpu) { 1512 setrunnable(lp); 1513 } else { 1514 /* 1515 * We can only adjust lwp_stat while we hold the 1516 * lwp_token, and we won't in the IPI function. 1517 */ 1518 LWPHOLD(lp); 1519 if (lp->lwp_stat == LSSTOP) 1520 lp->lwp_stat = LSSLEEP; 1521 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1522 } 1523 } else if (dtd->td_flags & TDF_SINTR) { 1524 /* 1525 * lwp is sitting in lwkt_sleep() with PCATCH set. 1526 */ 1527 if (dtd->td_gd == mycpu) { 1528 setrunnable(lp); 1529 } else { 1530 /* 1531 * We can only adjust lwp_stat while we hold the 1532 * lwp_token, and we won't in the IPI function. 1533 */ 1534 LWPHOLD(lp); 1535 if (lp->lwp_stat == LSSTOP) 1536 lp->lwp_stat = LSSLEEP; 1537 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1538 } 1539 } else { 1540 /* 1541 * Otherwise the lwp is either in some uninterruptible state 1542 * or it is on the userland scheduler's runqueue waiting to 1543 * be scheduled to a cpu, or it is running in userland. We 1544 * generally want to send an IPI so a running target gets the 1545 * signal ASAP, otherwise a scheduler-tick worth of latency 1546 * will occur. 1547 * 1548 * Issue an IPI to the remote cpu to knock it into the kernel, 1549 * remote cpu will issue the cpu-local signotify() if the IPI 1550 * preempts the desired thread. 1551 */ 1552 if (dtd->td_gd != mycpu) { 1553 LWPHOLD(lp); 1554 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1555 } 1556 } 1557 crit_exit(); 1558 } 1559 1560 /* 1561 * This function is called via an IPI so we cannot call setrunnable() here 1562 * (because while we hold the lp we don't own its token, and can't get it 1563 * from an IPI). 1564 * 1565 * We are interlocked by virtue of being on the same cpu as the target. If 1566 * we still are and LWP_SINTR or TDF_SINTR is set we can safely schedule 1567 * the target thread. 1568 */ 1569 static void 1570 lwp_signotify_remote(void *arg) 1571 { 1572 struct lwp *lp = arg; 1573 thread_t td = lp->lwp_thread; 1574 1575 if (lp == lwkt_preempted_proc()) { 1576 signotify(); 1577 LWPRELE(lp); 1578 } else if (td->td_gd == mycpu) { 1579 if ((lp->lwp_flags & LWP_SINTR) || 1580 (td->td_flags & TDF_SINTR)) { 1581 lwkt_schedule(td); 1582 } 1583 LWPRELE(lp); 1584 } else { 1585 lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp); 1586 /* LWPHOLD() is forwarded to the target cpu */ 1587 } 1588 } 1589 1590 /* 1591 * Caller must hold p->p_token 1592 */ 1593 void 1594 proc_stop(struct proc *p, int stat) 1595 { 1596 struct proc *q; 1597 struct lwp *lp; 1598 1599 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1600 1601 /* 1602 * If somebody raced us, be happy with it. SCORE overrides SSTOP. 1603 */ 1604 if (stat == SCORE) { 1605 if (p->p_stat == SCORE || p->p_stat == SZOMB) 1606 return; 1607 } else { 1608 if (p->p_stat == SSTOP || p->p_stat == SCORE || 1609 p->p_stat == SZOMB) { 1610 return; 1611 } 1612 } 1613 p->p_stat = stat; 1614 1615 FOREACH_LWP_IN_PROC(lp, p) { 1616 LWPHOLD(lp); 1617 lwkt_gettoken(&lp->lwp_token); 1618 1619 switch (lp->lwp_stat) { 1620 case LSSTOP: 1621 /* 1622 * Do nothing, we are already counted in 1623 * p_nstopped. 1624 */ 1625 break; 1626 1627 case LSSLEEP: 1628 /* 1629 * We're sleeping, but we will stop before 1630 * returning to userspace, so count us 1631 * as stopped as well. We set LWP_MP_WSTOP 1632 * to signal the lwp that it should not 1633 * increase p_nstopped when reaching tstop(). 1634 * 1635 * LWP_MP_WSTOP is protected by lp->lwp_token. 1636 */ 1637 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1638 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1639 ++p->p_nstopped; 1640 } 1641 break; 1642 1643 case LSRUN: 1644 /* 1645 * We might notify ourself, but that's not 1646 * a problem. 1647 */ 1648 lwp_signotify(lp); 1649 break; 1650 } 1651 lwkt_reltoken(&lp->lwp_token); 1652 LWPRELE(lp); 1653 } 1654 1655 if (p->p_nstopped == p->p_nthreads) { 1656 /* 1657 * Token required to interlock kern_wait(). Reparenting can 1658 * also cause a race so we have to hold (q). 1659 */ 1660 q = p->p_pptr; 1661 PHOLD(q); 1662 lwkt_gettoken(&q->p_token); 1663 p->p_flags &= ~P_WAITED; 1664 wakeup(q); 1665 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1666 ksignal(p->p_pptr, SIGCHLD); 1667 lwkt_reltoken(&q->p_token); 1668 PRELE(q); 1669 } 1670 } 1671 1672 /* 1673 * Caller must hold p_token 1674 */ 1675 void 1676 proc_unstop(struct proc *p, int stat) 1677 { 1678 struct lwp *lp; 1679 1680 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1681 1682 if (p->p_stat != stat) 1683 return; 1684 1685 p->p_stat = SACTIVE; 1686 1687 FOREACH_LWP_IN_PROC(lp, p) { 1688 LWPHOLD(lp); 1689 lwkt_gettoken(&lp->lwp_token); 1690 1691 switch (lp->lwp_stat) { 1692 case LSRUN: 1693 /* 1694 * Uh? Not stopped? Well, I guess that's okay. 1695 */ 1696 if (bootverbose) 1697 kprintf("proc_unstop: lwp %d/%d not sleeping\n", 1698 p->p_pid, lp->lwp_tid); 1699 break; 1700 1701 case LSSLEEP: 1702 /* 1703 * Still sleeping. Don't bother waking it up. 1704 * However, if this thread was counted as 1705 * stopped, undo this. 1706 * 1707 * Nevertheless we call setrunnable() so that it 1708 * will wake up in case a signal or timeout arrived 1709 * in the meantime. 1710 * 1711 * LWP_MP_WSTOP is protected by lp->lwp_token. 1712 */ 1713 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 1714 atomic_clear_int(&lp->lwp_mpflags, 1715 LWP_MP_WSTOP); 1716 --p->p_nstopped; 1717 } else { 1718 if (bootverbose) 1719 kprintf("proc_unstop: lwp %d/%d sleeping, not stopped\n", 1720 p->p_pid, lp->lwp_tid); 1721 } 1722 /* FALLTHROUGH */ 1723 1724 case LSSTOP: 1725 /* 1726 * This handles any lwp's waiting in a tsleep with 1727 * SIGCATCH. 1728 */ 1729 lwp_signotify(lp); 1730 break; 1731 1732 } 1733 lwkt_reltoken(&lp->lwp_token); 1734 LWPRELE(lp); 1735 } 1736 1737 /* 1738 * This handles any lwp's waiting in tstop(). We have interlocked 1739 * the setting of p_stat by acquiring and releasing each lpw's 1740 * token. 1741 */ 1742 wakeup(p); 1743 } 1744 1745 /* 1746 * Wait for all threads except the current thread to stop. 1747 */ 1748 static void 1749 proc_stopwait(struct proc *p) 1750 { 1751 while ((p->p_stat == SSTOP || p->p_stat == SCORE) && 1752 p->p_nstopped < p->p_nthreads - 1) { 1753 tsleep_interlock(&p->p_nstopped, 0); 1754 if (p->p_nstopped < p->p_nthreads - 1) { 1755 tsleep(&p->p_nstopped, PINTERLOCKED, "stopwt", hz); 1756 } 1757 } 1758 } 1759 1760 /* 1761 * No requirements. 1762 */ 1763 static int 1764 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout) 1765 { 1766 sigset_t savedmask, set; 1767 struct proc *p = curproc; 1768 struct lwp *lp = curthread->td_lwp; 1769 int error, sig, hz, timevalid = 0; 1770 struct timespec rts, ets, ts; 1771 struct timeval tv; 1772 1773 error = 0; 1774 sig = 0; 1775 ets.tv_sec = 0; /* silence compiler warning */ 1776 ets.tv_nsec = 0; /* silence compiler warning */ 1777 SIG_CANTMASK(waitset); 1778 savedmask = lp->lwp_sigmask; 1779 1780 if (timeout) { 1781 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 && 1782 timeout->tv_nsec < 1000000000) { 1783 timevalid = 1; 1784 getnanouptime(&rts); 1785 timespecadd(&rts, timeout, &ets); 1786 } 1787 } 1788 1789 for (;;) { 1790 set = lwp_sigpend(lp); 1791 SIGSETAND(set, waitset); 1792 if ((sig = sig_ffs(&set)) != 0) { 1793 SIGFILLSET(lp->lwp_sigmask); 1794 SIGDELSET(lp->lwp_sigmask, sig); 1795 SIG_CANTMASK(lp->lwp_sigmask); 1796 sig = issignal(lp, 1, 0); 1797 /* 1798 * It may be a STOP signal, in the case, issignal 1799 * returns 0, because we may stop there, and new 1800 * signal can come in, we should restart if we got 1801 * nothing. 1802 */ 1803 if (sig == 0) 1804 continue; 1805 else 1806 break; 1807 } 1808 1809 /* 1810 * Previous checking got nothing, and we retried but still 1811 * got nothing, we should return the error status. 1812 */ 1813 if (error) 1814 break; 1815 1816 /* 1817 * POSIX says this must be checked after looking for pending 1818 * signals. 1819 */ 1820 if (timeout) { 1821 if (timevalid == 0) { 1822 error = EINVAL; 1823 break; 1824 } 1825 getnanouptime(&rts); 1826 if (timespeccmp(&rts, &ets, >=)) { 1827 error = EAGAIN; 1828 break; 1829 } 1830 timespecsub(&ets, &rts, &ts); 1831 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1832 hz = tvtohz_high(&tv); 1833 } else { 1834 hz = 0; 1835 } 1836 1837 lp->lwp_sigmask = savedmask; 1838 SIGSETNAND(lp->lwp_sigmask, waitset); 1839 sigirefs_wait(p); 1840 1841 /* 1842 * We won't ever be woken up. Instead, our sleep will 1843 * be broken in lwpsignal(). 1844 */ 1845 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz); 1846 if (timeout) { 1847 if (error == ERESTART) { 1848 /* can not restart a timeout wait. */ 1849 error = EINTR; 1850 } else if (error == EAGAIN) { 1851 /* will calculate timeout by ourself. */ 1852 error = 0; 1853 } 1854 } 1855 /* Retry ... */ 1856 } 1857 1858 lp->lwp_sigmask = savedmask; 1859 sigirefs_wait(p); 1860 1861 if (sig) { 1862 error = 0; 1863 bzero(info, sizeof(*info)); 1864 info->si_signo = sig; 1865 spin_lock(&lp->lwp_spin); 1866 lwp_delsig(lp, sig, 1); /* take the signal! */ 1867 spin_unlock(&lp->lwp_spin); 1868 1869 if (sig == SIGKILL) { 1870 sigexit(lp, sig); 1871 /* NOT REACHED */ 1872 } 1873 } 1874 1875 return (error); 1876 } 1877 1878 /* 1879 * MPALMOSTSAFE 1880 */ 1881 int 1882 sys_sigtimedwait(struct sysmsg *sysmsg, const struct sigtimedwait_args *uap) 1883 { 1884 struct timespec ts; 1885 struct timespec *timeout; 1886 sigset_t set; 1887 siginfo_t info; 1888 int error; 1889 1890 if (uap->timeout) { 1891 error = copyin(uap->timeout, &ts, sizeof(ts)); 1892 if (error) 1893 return (error); 1894 timeout = &ts; 1895 } else { 1896 timeout = NULL; 1897 } 1898 error = copyin(uap->set, &set, sizeof(set)); 1899 if (error) 1900 return (error); 1901 error = kern_sigtimedwait(set, &info, timeout); 1902 if (error) 1903 return (error); 1904 if (uap->info) 1905 error = copyout(&info, uap->info, sizeof(info)); 1906 /* Repost if we got an error. */ 1907 /* 1908 * XXX lwp 1909 * 1910 * This could transform a thread-specific signal to another 1911 * thread / process pending signal. 1912 */ 1913 if (error) { 1914 ksignal(curproc, info.si_signo); 1915 } else { 1916 sysmsg->sysmsg_result = info.si_signo; 1917 } 1918 return (error); 1919 } 1920 1921 /* 1922 * MPALMOSTSAFE 1923 */ 1924 int 1925 sys_sigwaitinfo(struct sysmsg *sysmsg, const struct sigwaitinfo_args *uap) 1926 { 1927 siginfo_t info; 1928 sigset_t set; 1929 int error; 1930 1931 error = copyin(uap->set, &set, sizeof(set)); 1932 if (error) 1933 return (error); 1934 error = kern_sigtimedwait(set, &info, NULL); 1935 if (error) 1936 return (error); 1937 if (uap->info) 1938 error = copyout(&info, uap->info, sizeof(info)); 1939 /* Repost if we got an error. */ 1940 /* 1941 * XXX lwp 1942 * 1943 * This could transform a thread-specific signal to another 1944 * thread / process pending signal. 1945 */ 1946 if (error) { 1947 ksignal(curproc, info.si_signo); 1948 } else { 1949 sysmsg->sysmsg_result = info.si_signo; 1950 } 1951 return (error); 1952 } 1953 1954 /* 1955 * If the current process has received a signal that would interrupt a 1956 * system call, return EINTR or ERESTART as appropriate. 1957 */ 1958 int 1959 iscaught(struct lwp *lp) 1960 { 1961 struct proc *p = lp->lwp_proc; 1962 int sig; 1963 1964 if (p) { 1965 if ((sig = CURSIG(lp)) != 0) { 1966 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 1967 return (EINTR); 1968 return (ERESTART); 1969 } 1970 } 1971 return(EWOULDBLOCK); 1972 } 1973 1974 /* 1975 * If the current lwp/proc has received a signal (should be caught or cause 1976 * termination, should interrupt current syscall), return the signal number. 1977 * Stop signals with default action are processed immediately, then cleared; 1978 * they aren't returned. This is checked after each entry to the system for 1979 * a syscall or trap (though this can usually be done without calling issignal 1980 * by checking the pending signal masks in the CURSIG macro). 1981 * 1982 * This routine is called via CURSIG/__cursig. We will acquire and release 1983 * p->p_token but if the caller needs to interlock the test the caller must 1984 * also hold p->p_token. 1985 * 1986 * while (sig = CURSIG(curproc)) 1987 * postsig(sig); 1988 */ 1989 int 1990 issignal(struct lwp *lp, int maytrace, int *ptokp) 1991 { 1992 struct proc *p = lp->lwp_proc; 1993 sigset_t mask; 1994 int sig, prop; 1995 int haveptok; 1996 1997 for (;;) { 1998 int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG); 1999 2000 haveptok = 0; 2001 2002 /* 2003 * NOTE: Do not tstop here. Issue the proc_stop() 2004 * so other parties see that we know we need 2005 * to stop, but don't block here. Locks might 2006 * be held. 2007 * 2008 * XXX If this process is supposed to stop, stop this thread. 2009 * removed. 2010 */ 2011 #if 0 2012 if (STOPLWP(p, lp)) { 2013 lwkt_gettoken(&p->p_token); 2014 tstop(); 2015 lwkt_reltoken(&p->p_token); 2016 } 2017 #endif 2018 2019 /* 2020 * Quick check without token 2021 */ 2022 mask = lwp_sigpend(lp); 2023 SIGSETNAND(mask, lp->lwp_sigmask); 2024 if (p->p_flags & P_PPWAIT) 2025 SIG_STOPSIGMASK(mask); 2026 SIG_CONDBLOCKALLSIGS(mask, lp); 2027 2028 if (SIGISEMPTY(mask)) /* no signal to send */ 2029 return (0); 2030 2031 /* 2032 * If the signal is a member of the process signal set 2033 * we need p_token (even if it is also a member of the 2034 * lwp signal set). 2035 */ 2036 sig = sig_ffs(&mask); 2037 if (SIGISMEMBER(p->p_siglist, sig)) { 2038 /* 2039 * Recheck with token 2040 */ 2041 haveptok = 1; 2042 lwkt_gettoken(&p->p_token); 2043 2044 mask = lwp_sigpend(lp); 2045 SIGSETNAND(mask, lp->lwp_sigmask); 2046 if (p->p_flags & P_PPWAIT) 2047 SIG_STOPSIGMASK(mask); 2048 if (SIGISEMPTY(mask)) { /* no signal to send */ 2049 /* haveptok is TRUE */ 2050 lwkt_reltoken(&p->p_token); 2051 return (0); 2052 } 2053 sig = sig_ffs(&mask); 2054 } 2055 2056 STOPEVENT(p, S_SIG, sig); 2057 2058 /* 2059 * We should see pending but ignored signals 2060 * only if P_TRACED was on when they were posted. 2061 */ 2062 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) { 2063 spin_lock(&lp->lwp_spin); 2064 lwp_delsig(lp, sig, haveptok); 2065 spin_unlock(&lp->lwp_spin); 2066 if (haveptok) 2067 lwkt_reltoken(&p->p_token); 2068 continue; 2069 } 2070 if (maytrace && 2071 (p->p_flags & P_TRACED) && 2072 (p->p_flags & P_PPWAIT) == 0) { 2073 /* 2074 * If traced, always stop, and stay stopped until 2075 * released by the parent. 2076 * 2077 * NOTE: SSTOP may get cleared during the loop, but 2078 * we do not re-notify the parent if we have 2079 * to loop several times waiting for the parent 2080 * to let us continue. XXX not sure if this is 2081 * still true 2082 * 2083 * NOTE: Do not tstop here. Issue the proc_stop() 2084 * so other parties see that we know we need 2085 * to stop, but don't block here. Locks might 2086 * be held. 2087 */ 2088 if (haveptok == 0) { 2089 lwkt_gettoken(&p->p_token); 2090 haveptok = 1; 2091 } 2092 p->p_xstat = sig; 2093 proc_stop(p, SSTOP); 2094 2095 /* 2096 * Normally we don't stop until we return to userland, but 2097 * make an exception when tracing and 'maytrace' is asserted. 2098 */ 2099 if (p->p_flags & P_TRACED) 2100 tstop(); 2101 2102 /* 2103 * If parent wants us to take the signal, 2104 * then it will leave it in p->p_xstat; 2105 * otherwise we just look for signals again. 2106 */ 2107 spin_lock(&lp->lwp_spin); 2108 lwp_delsig(lp, sig, 1); /* clear old signal */ 2109 spin_unlock(&lp->lwp_spin); 2110 sig = p->p_xstat; 2111 if (sig == 0) { 2112 /* haveptok is TRUE */ 2113 lwkt_reltoken(&p->p_token); 2114 continue; 2115 } 2116 2117 /* 2118 * Put the new signal into p_siglist. If the 2119 * signal is being masked, look for other signals. 2120 * 2121 * XXX lwp might need a call to ksignal() 2122 */ 2123 SIGADDSET_ATOMIC(p->p_siglist, sig); 2124 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 2125 /* haveptok is TRUE */ 2126 lwkt_reltoken(&p->p_token); 2127 continue; 2128 } 2129 2130 /* 2131 * If the traced bit got turned off, go back up 2132 * to the top to rescan signals. This ensures 2133 * that p_sig* and ps_sigact are consistent. 2134 */ 2135 if ((p->p_flags & P_TRACED) == 0) { 2136 /* haveptok is TRUE */ 2137 lwkt_reltoken(&p->p_token); 2138 continue; 2139 } 2140 } 2141 2142 /* 2143 * p_token may be held here 2144 */ 2145 prop = sigprop(sig); 2146 2147 /* 2148 * Decide whether the signal should be returned. 2149 * Return the signal's number, or fall through 2150 * to clear it from the pending mask. 2151 */ 2152 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 2153 case (intptr_t)SIG_DFL: 2154 /* 2155 * Don't take default actions on system processes. 2156 */ 2157 if (p->p_pid <= 1) { 2158 #ifdef DIAGNOSTIC 2159 /* 2160 * Are you sure you want to ignore SIGSEGV 2161 * in init? XXX 2162 */ 2163 kprintf("Process (pid %lu) got signal %d\n", 2164 (u_long)p->p_pid, sig); 2165 #endif 2166 break; /* == ignore */ 2167 } 2168 2169 /* 2170 * Handle the in-kernel checkpoint action 2171 */ 2172 if (prop & SA_CKPT) { 2173 if (haveptok == 0) { 2174 lwkt_gettoken(&p->p_token); 2175 haveptok = 1; 2176 } 2177 checkpoint_signal_handler(lp); 2178 break; 2179 } 2180 2181 /* 2182 * If there is a pending stop signal to process 2183 * with default action, stop here, 2184 * then clear the signal. However, 2185 * if process is member of an orphaned 2186 * process group, ignore tty stop signals. 2187 */ 2188 if (prop & SA_STOP) { 2189 if (haveptok == 0) { 2190 lwkt_gettoken(&p->p_token); 2191 haveptok = 1; 2192 } 2193 if (p->p_flags & P_TRACED || 2194 (p->p_pgrp->pg_jobc == 0 && 2195 prop & SA_TTYSTOP)) 2196 break; /* == ignore */ 2197 if ((p->p_flags & P_WEXIT) == 0) { 2198 /* 2199 * NOTE: We do not block here. Issue 2200 * stopthe stop so other parties 2201 * see that we know we need to 2202 * stop. Locks might be held. 2203 */ 2204 p->p_xstat = sig; 2205 proc_stop(p, SSTOP); 2206 2207 #if 0 2208 tstop(); 2209 #endif 2210 } 2211 break; 2212 } else if (prop & SA_IGNORE) { 2213 /* 2214 * Except for SIGCONT, shouldn't get here. 2215 * Default action is to ignore; drop it. 2216 */ 2217 break; /* == ignore */ 2218 } else { 2219 if (ptokp) 2220 *ptokp = haveptok; 2221 else if (haveptok) 2222 lwkt_reltoken(&p->p_token); 2223 return (sig); 2224 } 2225 2226 /*NOTREACHED*/ 2227 2228 case (intptr_t)SIG_IGN: 2229 /* 2230 * Masking above should prevent us ever trying 2231 * to take action on an ignored signal other 2232 * than SIGCONT, unless process is traced. 2233 */ 2234 if ((prop & SA_CONT) == 0 && 2235 (p->p_flags & P_TRACED) == 0) 2236 kprintf("issignal\n"); 2237 break; /* == ignore */ 2238 2239 default: 2240 /* 2241 * This signal has an action, let 2242 * postsig() process it. 2243 */ 2244 if (ptokp) 2245 *ptokp = haveptok; 2246 else if (haveptok) 2247 lwkt_reltoken(&p->p_token); 2248 return (sig); 2249 } 2250 spin_lock(&lp->lwp_spin); 2251 lwp_delsig(lp, sig, haveptok); /* take the signal! */ 2252 spin_unlock(&lp->lwp_spin); 2253 2254 if (haveptok) 2255 lwkt_reltoken(&p->p_token); 2256 } 2257 /* NOTREACHED */ 2258 } 2259 2260 /* 2261 * Take the action for the specified signal from the current set of 2262 * pending signals. 2263 * 2264 * haveptok indicates whether the caller is holding p->p_token. If the 2265 * caller is, we are responsible for releasing it. 2266 * 2267 * This routine can only be called from the top-level trap from usermode. 2268 * It is expecting to be able to modify the top-level stack frame. 2269 */ 2270 void 2271 postsig(int sig, int haveptok) 2272 { 2273 struct lwp *lp = curthread->td_lwp; 2274 struct proc *p = lp->lwp_proc; 2275 struct sigacts *ps = p->p_sigacts; 2276 sig_t action; 2277 sigset_t returnmask; 2278 int code; 2279 2280 KASSERT(sig != 0, ("postsig")); 2281 2282 /* 2283 * If we are a virtual kernel running an emulated user process 2284 * context, switch back to the virtual kernel context before 2285 * trying to post the signal. 2286 */ 2287 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 2288 struct trapframe *tf = lp->lwp_md.md_regs; 2289 tf->tf_trapno = 0; 2290 vkernel_trap(lp, tf); 2291 } 2292 2293 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 2294 2295 spin_lock(&lp->lwp_spin); 2296 lwp_delsig(lp, sig, haveptok); 2297 spin_unlock(&lp->lwp_spin); 2298 action = ps->ps_sigact[_SIG_IDX(sig)]; 2299 #ifdef KTRACE 2300 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 2301 ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ? 2302 &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0); 2303 #endif 2304 /* 2305 * We don't need p_token after this point. 2306 */ 2307 if (haveptok) 2308 lwkt_reltoken(&p->p_token); 2309 2310 STOPEVENT(p, S_SIG, sig); 2311 2312 if (action == SIG_DFL) { 2313 /* 2314 * Default action, where the default is to kill 2315 * the process. (Other cases were ignored above.) 2316 */ 2317 sigexit(lp, sig); 2318 /* NOTREACHED */ 2319 } else { 2320 /* 2321 * If we get here, the signal must be caught. 2322 */ 2323 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig), 2324 ("postsig action")); 2325 2326 /* 2327 * Reset the signal handler if asked to 2328 */ 2329 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2330 /* 2331 * See kern_sigaction() for origin of this code. 2332 */ 2333 SIGDELSET(p->p_sigcatch, sig); 2334 if (sig != SIGCONT && 2335 sigprop(sig) & SA_IGNORE) 2336 SIGADDSET(p->p_sigignore, sig); 2337 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2338 } 2339 2340 /* 2341 * Set the signal mask and calculate the mask to restore 2342 * when the signal function returns. 2343 * 2344 * Special case: user has done a sigsuspend. Here the 2345 * current mask is not of interest, but rather the 2346 * mask from before the sigsuspend is what we want 2347 * restored after the signal processing is completed. 2348 */ 2349 if (lp->lwp_flags & LWP_OLDMASK) { 2350 returnmask = lp->lwp_oldsigmask; 2351 lp->lwp_flags &= ~LWP_OLDMASK; 2352 } else { 2353 returnmask = lp->lwp_sigmask; 2354 } 2355 2356 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2357 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2358 SIGADDSET(lp->lwp_sigmask, sig); 2359 2360 lp->lwp_ru.ru_nsignals++; 2361 if (lp->lwp_sig != sig) { 2362 code = 0; 2363 } else { 2364 code = lp->lwp_code; 2365 lp->lwp_code = 0; 2366 lp->lwp_sig = 0; 2367 } 2368 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code); 2369 } 2370 } 2371 2372 /* 2373 * Kill the current process for stated reason. 2374 */ 2375 void 2376 killproc(struct proc *p, char *why) 2377 { 2378 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", 2379 p->p_pid, p->p_comm, 2380 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2381 ksignal(p, SIGKILL); 2382 } 2383 2384 /* 2385 * Force the current process to exit with the specified signal, dumping core 2386 * if appropriate. We bypass the normal tests for masked and caught signals, 2387 * allowing unrecoverable failures to terminate the process without changing 2388 * signal state. Mark the accounting record with the signal termination. 2389 * If dumping core, save the signal number for the debugger. Calls exit and 2390 * does not return. 2391 * 2392 * This routine does not return. 2393 */ 2394 void 2395 sigexit(struct lwp *lp, int sig) 2396 { 2397 struct proc *p = lp->lwp_proc; 2398 2399 lwkt_gettoken(&p->p_token); 2400 p->p_acflag |= AXSIG; 2401 if (sigprop(sig) & SA_CORE) { 2402 lp->lwp_sig = sig; 2403 2404 /* 2405 * All threads must be stopped before we can safely coredump. 2406 * Stop threads using SCORE, which cannot be overridden. 2407 */ 2408 if (p->p_stat != SCORE) { 2409 proc_stop(p, SCORE); 2410 proc_stopwait(p); 2411 2412 if (coredump(lp, sig) == 0) 2413 sig |= WCOREFLAG; 2414 p->p_stat = SSTOP; 2415 } 2416 2417 /* 2418 * Log signals which would cause core dumps 2419 * (Log as LOG_INFO to appease those who don't want 2420 * these messages.) 2421 * XXX : Todo, as well as euid, write out ruid too 2422 */ 2423 if (kern_logsigexit) { 2424 log(LOG_INFO, 2425 "pid %d (%s), uid %d: exited on signal %d%s\n", 2426 p->p_pid, p->p_comm, 2427 p->p_ucred ? p->p_ucred->cr_uid : -1, 2428 sig &~ WCOREFLAG, 2429 sig & WCOREFLAG ? " (core dumped)" : ""); 2430 if (kern_logsigexit > 1) 2431 kprintf("DEBUG - waiting on kern.logsigexit\n"); 2432 while (kern_logsigexit > 1) { 2433 tsleep(&kern_logsigexit, 0, "DEBUG", hz); 2434 } 2435 } 2436 } 2437 lwkt_reltoken(&p->p_token); 2438 exit1(W_EXITCODE(0, sig)); 2439 /* NOTREACHED */ 2440 } 2441 2442 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 2443 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2444 sizeof(corefilename), "process corefile name format string"); 2445 2446 /* 2447 * expand_name(name, uid, pid) 2448 * Expand the name described in corefilename, using name, uid, and pid. 2449 * corefilename is a kprintf-like string, with three format specifiers: 2450 * %N name of process ("name") 2451 * %P process id (pid) 2452 * %U user id (uid) 2453 * For example, "%N.core" is the default; they can be disabled completely 2454 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2455 * This is controlled by the sysctl variable kern.corefile (see above). 2456 */ 2457 2458 static char * 2459 expand_name(const char *name, uid_t uid, pid_t pid) 2460 { 2461 char *temp; 2462 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2463 int i, n; 2464 char *format = corefilename; 2465 size_t namelen; 2466 2467 temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT); 2468 if (temp == NULL) 2469 return NULL; 2470 namelen = strlen(name); 2471 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2472 int l; 2473 switch (format[i]) { 2474 case '%': /* Format character */ 2475 i++; 2476 switch (format[i]) { 2477 case '%': 2478 temp[n++] = '%'; 2479 break; 2480 case 'N': /* process name */ 2481 if ((n + namelen) > MAXPATHLEN) { 2482 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2483 pid, name, uid, temp, name); 2484 kfree(temp, M_TEMP); 2485 return NULL; 2486 } 2487 memcpy(temp+n, name, namelen); 2488 n += namelen; 2489 break; 2490 case 'P': /* process id */ 2491 l = ksprintf(buf, "%u", pid); 2492 if ((n + l) > MAXPATHLEN) { 2493 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2494 pid, name, uid, temp, name); 2495 kfree(temp, M_TEMP); 2496 return NULL; 2497 } 2498 memcpy(temp+n, buf, l); 2499 n += l; 2500 break; 2501 case 'U': /* user id */ 2502 l = ksprintf(buf, "%u", uid); 2503 if ((n + l) > MAXPATHLEN) { 2504 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2505 pid, name, uid, temp, name); 2506 kfree(temp, M_TEMP); 2507 return NULL; 2508 } 2509 memcpy(temp+n, buf, l); 2510 n += l; 2511 break; 2512 default: 2513 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); 2514 } 2515 break; 2516 default: 2517 temp[n++] = format[i]; 2518 } 2519 } 2520 temp[n] = '\0'; 2521 return temp; 2522 } 2523 2524 /* 2525 * Dump a process' core. The main routine does some 2526 * policy checking, and creates the name of the coredump; 2527 * then it passes on a vnode and a size limit to the process-specific 2528 * coredump routine if there is one; if there _is not_ one, it returns 2529 * ENOSYS; otherwise it returns the error from the process-specific routine. 2530 * 2531 * The parameter `lp' is the lwp which triggered the coredump. 2532 */ 2533 2534 static int 2535 coredump(struct lwp *lp, int sig) 2536 { 2537 struct proc *p = lp->lwp_proc; 2538 struct vnode *vp; 2539 struct ucred *cred = p->p_ucred; 2540 struct flock lf; 2541 struct nlookupdata nd; 2542 struct vattr vattr; 2543 int error, error1; 2544 char *name; /* name of corefile */ 2545 off_t limit; 2546 2547 STOPEVENT(p, S_CORE, 0); 2548 2549 if (((sugid_coredump == 0) && (p->p_flags & P_SUGID)) || 2550 do_coredump == 0) 2551 { 2552 return (EFAULT); 2553 } 2554 2555 /* 2556 * Note that the bulk of limit checking is done after 2557 * the corefile is created. The exception is if the limit 2558 * for corefiles is 0, in which case we don't bother 2559 * creating the corefile at all. This layout means that 2560 * a corefile is truncated instead of not being created, 2561 * if it is larger than the limit. 2562 */ 2563 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur; 2564 if (limit == 0) 2565 return EFBIG; 2566 2567 name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid); 2568 if (name == NULL) 2569 return (EINVAL); 2570 error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP); 2571 if (error == 0) 2572 error = vn_open(&nd, NULL, 2573 O_CREAT | FWRITE | O_NOFOLLOW, 2574 S_IRUSR | S_IWUSR); 2575 kfree(name, M_TEMP); 2576 if (error) { 2577 nlookup_done(&nd); 2578 return (error); 2579 } 2580 vp = nd.nl_open_vp; 2581 nd.nl_open_vp = NULL; 2582 nlookup_done(&nd); 2583 2584 vn_unlock(vp); 2585 lf.l_whence = SEEK_SET; 2586 lf.l_start = 0; 2587 lf.l_len = 0; 2588 lf.l_type = F_WRLCK; 2589 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0); 2590 if (error) 2591 goto out2; 2592 2593 /* Don't dump to non-regular files or files with links. */ 2594 if (vp->v_type != VREG || 2595 VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) { 2596 error = EFAULT; 2597 goto out1; 2598 } 2599 2600 /* Don't dump to files current user does not own */ 2601 if (vattr.va_uid != p->p_ucred->cr_uid) { 2602 error = EFAULT; 2603 goto out1; 2604 } 2605 2606 VATTR_NULL(&vattr); 2607 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2608 vattr.va_size = 0; 2609 VOP_SETATTR(vp, &vattr, cred); 2610 p->p_acflag |= ACORE; 2611 vn_unlock(vp); 2612 2613 error = p->p_sysent->sv_coredump ? 2614 p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS; 2615 2616 out1: 2617 lf.l_type = F_UNLCK; 2618 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0); 2619 out2: 2620 error1 = vn_close(vp, FWRITE, NULL); 2621 if (error == 0) 2622 error = error1; 2623 return (error); 2624 } 2625 2626 /* 2627 * Nonexistent system call-- signal process (may want to handle it). 2628 * Flag error in case process won't see signal immediately (blocked or ignored). 2629 * 2630 * MPALMOSTSAFE 2631 */ 2632 /* ARGSUSED */ 2633 int 2634 sys_nosys(struct sysmsg *sysmsg, const struct nosys_args *args) 2635 { 2636 lwpsignal(curproc, curthread->td_lwp, SIGSYS); 2637 return (EINVAL); 2638 } 2639 2640 /* 2641 * Send a SIGIO or SIGURG signal to a process or process group using 2642 * stored credentials rather than those of the current process. 2643 */ 2644 void 2645 pgsigio(struct sigio *sigio, int sig, int checkctty) 2646 { 2647 if (sigio == NULL) 2648 return; 2649 2650 if (sigio->sio_pgid > 0) { 2651 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, 2652 sigio->sio_proc)) 2653 ksignal(sigio->sio_proc, sig); 2654 } else if (sigio->sio_pgid < 0) { 2655 struct proc *p; 2656 struct pgrp *pg = sigio->sio_pgrp; 2657 2658 /* 2659 * Must interlock all signals against fork 2660 */ 2661 pgref(pg); 2662 lockmgr(&pg->pg_lock, LK_EXCLUSIVE); 2663 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 2664 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) && 2665 (checkctty == 0 || (p->p_flags & P_CONTROLT))) 2666 ksignal(p, sig); 2667 } 2668 lockmgr(&pg->pg_lock, LK_RELEASE); 2669 pgrel(pg); 2670 } 2671 } 2672 2673 static int 2674 filt_sigattach(struct knote *kn) 2675 { 2676 struct proc *p = curproc; 2677 2678 kn->kn_ptr.p_proc = p; 2679 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2680 2681 /* XXX lock the proc here while adding to the list? */ 2682 knote_insert(&p->p_klist, kn); 2683 2684 return (0); 2685 } 2686 2687 static void 2688 filt_sigdetach(struct knote *kn) 2689 { 2690 struct proc *p = kn->kn_ptr.p_proc; 2691 2692 knote_remove(&p->p_klist, kn); 2693 } 2694 2695 /* 2696 * signal knotes are shared with proc knotes, so we apply a mask to 2697 * the hint in order to differentiate them from process hints. This 2698 * could be avoided by using a signal-specific knote list, but probably 2699 * isn't worth the trouble. 2700 */ 2701 static int 2702 filt_signal(struct knote *kn, long hint) 2703 { 2704 if (hint & NOTE_SIGNAL) { 2705 hint &= ~NOTE_SIGNAL; 2706 2707 if (kn->kn_id == hint) 2708 kn->kn_data++; 2709 } 2710 return (kn->kn_data != 0); 2711 } 2712