1 /* $NetBSD: kern_sig.c,v 1.184 2003/12/24 22:53:59 manu Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.184 2003/12/24 22:53:59 manu Exp $"); 41 42 #include "opt_ktrace.h" 43 #include "opt_compat_sunos.h" 44 #include "opt_compat_netbsd.h" 45 #include "opt_compat_netbsd32.h" 46 47 #define SIGPROP /* include signal properties table */ 48 #include <sys/param.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/namei.h> 52 #include <sys/vnode.h> 53 #include <sys/proc.h> 54 #include <sys/systm.h> 55 #include <sys/timeb.h> 56 #include <sys/times.h> 57 #include <sys/buf.h> 58 #include <sys/acct.h> 59 #include <sys/file.h> 60 #include <sys/kernel.h> 61 #include <sys/wait.h> 62 #include <sys/ktrace.h> 63 #include <sys/syslog.h> 64 #include <sys/stat.h> 65 #include <sys/core.h> 66 #include <sys/filedesc.h> 67 #include <sys/malloc.h> 68 #include <sys/pool.h> 69 #include <sys/ucontext.h> 70 #include <sys/sa.h> 71 #include <sys/savar.h> 72 #include <sys/exec.h> 73 74 #include <sys/mount.h> 75 #include <sys/syscallargs.h> 76 77 #include <machine/cpu.h> 78 79 #include <sys/user.h> /* for coredump */ 80 81 #include <uvm/uvm_extern.h> 82 83 static void child_psignal(struct proc *, int); 84 static int build_corename(struct proc *, char [MAXPATHLEN]); 85 static void ksiginfo_exithook(struct proc *, void *); 86 static void ksiginfo_put(struct proc *, const ksiginfo_t *); 87 static ksiginfo_t *ksiginfo_get(struct proc *, int); 88 static void kpsignal2(struct proc *, const ksiginfo_t *, int); 89 90 sigset_t contsigmask, stopsigmask, sigcantmask; 91 92 struct pool sigacts_pool; /* memory pool for sigacts structures */ 93 struct pool siginfo_pool; /* memory pool for siginfo structures */ 94 struct pool ksiginfo_pool; /* memory pool for ksiginfo structures */ 95 96 /* 97 * Can process p, with pcred pc, send the signal signum to process q? 98 */ 99 #define CANSIGNAL(p, pc, q, signum) \ 100 ((pc)->pc_ucred->cr_uid == 0 || \ 101 (pc)->p_ruid == (q)->p_cred->p_ruid || \ 102 (pc)->pc_ucred->cr_uid == (q)->p_cred->p_ruid || \ 103 (pc)->p_ruid == (q)->p_ucred->cr_uid || \ 104 (pc)->pc_ucred->cr_uid == (q)->p_ucred->cr_uid || \ 105 ((signum) == SIGCONT && (q)->p_session == (p)->p_session)) 106 107 /* 108 * Remove and return the first ksiginfo element that matches our requested 109 * signal, or return NULL if one not found. 110 */ 111 static ksiginfo_t * 112 ksiginfo_get(struct proc *p, int signo) 113 { 114 ksiginfo_t *ksi; 115 int s; 116 117 s = splsoftclock(); 118 simple_lock(&p->p_sigctx.ps_silock); 119 CIRCLEQ_FOREACH(ksi, &p->p_sigctx.ps_siginfo, ksi_list) { 120 if (ksi->ksi_signo == signo) { 121 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list); 122 goto out; 123 } 124 } 125 ksi = NULL; 126 out: 127 simple_unlock(&p->p_sigctx.ps_silock); 128 splx(s); 129 return ksi; 130 } 131 132 /* 133 * Append a new ksiginfo element to the list of pending ksiginfo's, if 134 * we need to (SA_SIGINFO was requested). We replace non RT signals if 135 * they already existed in the queue and we add new entries for RT signals, 136 * or for non RT signals with non-existing entries. 137 */ 138 static void 139 ksiginfo_put(struct proc *p, const ksiginfo_t *ksi) 140 { 141 ksiginfo_t *kp; 142 struct sigaction *sa = &SIGACTION_PS(p->p_sigacts, ksi->ksi_signo); 143 int s; 144 145 if ((sa->sa_flags & SA_SIGINFO) == 0) 146 return; 147 148 s = splsoftclock(); 149 simple_lock(&p->p_sigctx.ps_silock); 150 #ifdef notyet /* XXX: QUEUING */ 151 if (ksi->ksi_signo < SIGRTMIN) 152 #endif 153 { 154 CIRCLEQ_FOREACH(kp, &p->p_sigctx.ps_siginfo, ksi_list) { 155 if (kp->ksi_signo == ksi->ksi_signo) { 156 KSI_COPY(ksi, kp); 157 goto out; 158 } 159 } 160 } 161 kp = pool_get(&ksiginfo_pool, PR_NOWAIT); 162 if (kp == NULL) { 163 #ifdef DIAGNOSTIC 164 printf("Out of memory allocating siginfo for pid %d\n", 165 p->p_pid); 166 #endif 167 goto out; 168 } 169 *kp = *ksi; 170 CIRCLEQ_INSERT_TAIL(&p->p_sigctx.ps_siginfo, kp, ksi_list); 171 out: 172 simple_unlock(&p->p_sigctx.ps_silock); 173 splx(s); 174 } 175 176 /* 177 * free all pending ksiginfo on exit 178 */ 179 static void 180 ksiginfo_exithook(struct proc *p, void *v) 181 { 182 int s; 183 184 s = splsoftclock(); 185 simple_lock(&p->p_sigctx.ps_silock); 186 while (!CIRCLEQ_EMPTY(&p->p_sigctx.ps_siginfo)) { 187 ksiginfo_t *ksi = CIRCLEQ_FIRST(&p->p_sigctx.ps_siginfo); 188 CIRCLEQ_REMOVE(&p->p_sigctx.ps_siginfo, ksi, ksi_list); 189 pool_put(&ksiginfo_pool, ksi); 190 } 191 simple_unlock(&p->p_sigctx.ps_silock); 192 splx(s); 193 } 194 195 /* 196 * Initialize signal-related data structures. 197 */ 198 void 199 signal_init(void) 200 { 201 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", 202 &pool_allocator_nointr); 203 pool_init(&siginfo_pool, sizeof(siginfo_t), 0, 0, 0, "siginfo", 204 &pool_allocator_nointr); 205 pool_init(&ksiginfo_pool, sizeof(ksiginfo_t), 0, 0, 0, "ksiginfo", 206 NULL); 207 exithook_establish(ksiginfo_exithook, NULL); 208 exechook_establish(ksiginfo_exithook, NULL); 209 } 210 211 /* 212 * Create an initial sigctx structure, using the same signal state 213 * as p. If 'share' is set, share the sigctx_proc part, otherwise just 214 * copy it from parent. 215 */ 216 void 217 sigactsinit(struct proc *np, struct proc *pp, int share) 218 { 219 struct sigacts *ps; 220 221 if (share) { 222 np->p_sigacts = pp->p_sigacts; 223 pp->p_sigacts->sa_refcnt++; 224 } else { 225 ps = pool_get(&sigacts_pool, PR_WAITOK); 226 if (pp) 227 memcpy(ps, pp->p_sigacts, sizeof(struct sigacts)); 228 else 229 memset(ps, '\0', sizeof(struct sigacts)); 230 ps->sa_refcnt = 1; 231 np->p_sigacts = ps; 232 } 233 } 234 235 /* 236 * Make this process not share its sigctx, maintaining all 237 * signal state. 238 */ 239 void 240 sigactsunshare(struct proc *p) 241 { 242 struct sigacts *oldps; 243 244 if (p->p_sigacts->sa_refcnt == 1) 245 return; 246 247 oldps = p->p_sigacts; 248 sigactsinit(p, NULL, 0); 249 250 if (--oldps->sa_refcnt == 0) 251 pool_put(&sigacts_pool, oldps); 252 } 253 254 /* 255 * Release a sigctx structure. 256 */ 257 void 258 sigactsfree(struct proc *p) 259 { 260 struct sigacts *ps; 261 262 ps = p->p_sigacts; 263 if (--ps->sa_refcnt > 0) 264 return; 265 266 pool_put(&sigacts_pool, ps); 267 } 268 269 int 270 sigaction1(struct proc *p, int signum, const struct sigaction *nsa, 271 struct sigaction *osa, const void *tramp, int vers) 272 { 273 struct sigacts *ps; 274 int prop; 275 276 ps = p->p_sigacts; 277 if (signum <= 0 || signum >= NSIG) 278 return (EINVAL); 279 280 /* 281 * Trampoline ABI version 0 is reserved for the legacy 282 * kernel-provided on-stack trampoline. Conversely, if we are 283 * using a non-0 ABI version, we must have a trampoline. Only 284 * validate the vers if a new sigaction was supplied. Emulations 285 * use legacy kernel trampolines with version 0, alternatively 286 * check for that too. 287 */ 288 if ((vers != 0 && tramp == NULL) || 289 #ifdef SIGTRAMP_VALID 290 (nsa != NULL && 291 ((vers == 0) ? 292 (p->p_emul->e_sigcode == NULL) : 293 !SIGTRAMP_VALID(vers))) || 294 #endif 295 (vers == 0 && tramp != NULL)) 296 return (EINVAL); 297 298 if (osa) 299 *osa = SIGACTION_PS(ps, signum); 300 301 if (nsa) { 302 if (nsa->sa_flags & ~SA_ALLBITS) 303 return (EINVAL); 304 305 #ifndef __HAVE_SIGINFO 306 if (nsa->sa_flags & SA_SIGINFO) 307 return (EINVAL); 308 #endif 309 310 prop = sigprop[signum]; 311 if (prop & SA_CANTMASK) 312 return (EINVAL); 313 314 (void) splsched(); /* XXXSMP */ 315 SIGACTION_PS(ps, signum) = *nsa; 316 ps->sa_sigdesc[signum].sd_tramp = tramp; 317 ps->sa_sigdesc[signum].sd_vers = vers; 318 sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask); 319 if ((prop & SA_NORESET) != 0) 320 SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND; 321 if (signum == SIGCHLD) { 322 if (nsa->sa_flags & SA_NOCLDSTOP) 323 p->p_flag |= P_NOCLDSTOP; 324 else 325 p->p_flag &= ~P_NOCLDSTOP; 326 if (nsa->sa_flags & SA_NOCLDWAIT) { 327 /* 328 * Paranoia: since SA_NOCLDWAIT is implemented 329 * by reparenting the dying child to PID 1 (and 330 * trust it to reap the zombie), PID 1 itself 331 * is forbidden to set SA_NOCLDWAIT. 332 */ 333 if (p->p_pid == 1) 334 p->p_flag &= ~P_NOCLDWAIT; 335 else 336 p->p_flag |= P_NOCLDWAIT; 337 } else 338 p->p_flag &= ~P_NOCLDWAIT; 339 } 340 if ((nsa->sa_flags & SA_NODEFER) == 0) 341 sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum); 342 else 343 sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum); 344 /* 345 * Set bit in p_sigctx.ps_sigignore for signals that are set to 346 * SIG_IGN, and for signals set to SIG_DFL where the default is 347 * to ignore. However, don't put SIGCONT in 348 * p_sigctx.ps_sigignore, as we have to restart the process. 349 */ 350 if (nsa->sa_handler == SIG_IGN || 351 (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) { 352 /* never to be seen again */ 353 sigdelset(&p->p_sigctx.ps_siglist, signum); 354 if (signum != SIGCONT) { 355 /* easier in psignal */ 356 sigaddset(&p->p_sigctx.ps_sigignore, signum); 357 } 358 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 359 } else { 360 sigdelset(&p->p_sigctx.ps_sigignore, signum); 361 if (nsa->sa_handler == SIG_DFL) 362 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 363 else 364 sigaddset(&p->p_sigctx.ps_sigcatch, signum); 365 } 366 (void) spl0(); 367 } 368 369 return (0); 370 } 371 372 #ifdef COMPAT_16 373 /* ARGSUSED */ 374 int 375 compat_16_sys___sigaction14(struct lwp *l, void *v, register_t *retval) 376 { 377 struct compat_16_sys___sigaction14_args /* { 378 syscallarg(int) signum; 379 syscallarg(const struct sigaction *) nsa; 380 syscallarg(struct sigaction *) osa; 381 } */ *uap = v; 382 struct proc *p; 383 struct sigaction nsa, osa; 384 int error; 385 386 if (SCARG(uap, nsa)) { 387 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa)); 388 if (error) 389 return (error); 390 } 391 p = l->l_proc; 392 error = sigaction1(p, SCARG(uap, signum), 393 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0, 394 NULL, 0); 395 if (error) 396 return (error); 397 if (SCARG(uap, osa)) { 398 error = copyout(&osa, SCARG(uap, osa), sizeof(osa)); 399 if (error) 400 return (error); 401 } 402 return (0); 403 } 404 #endif 405 406 /* ARGSUSED */ 407 int 408 sys___sigaction_sigtramp(struct lwp *l, void *v, register_t *retval) 409 { 410 struct sys___sigaction_sigtramp_args /* { 411 syscallarg(int) signum; 412 syscallarg(const struct sigaction *) nsa; 413 syscallarg(struct sigaction *) osa; 414 syscallarg(void *) tramp; 415 syscallarg(int) vers; 416 } */ *uap = v; 417 struct proc *p = l->l_proc; 418 struct sigaction nsa, osa; 419 int error; 420 421 if (SCARG(uap, nsa)) { 422 error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa)); 423 if (error) 424 return (error); 425 } 426 error = sigaction1(p, SCARG(uap, signum), 427 SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0, 428 SCARG(uap, tramp), SCARG(uap, vers)); 429 if (error) 430 return (error); 431 if (SCARG(uap, osa)) { 432 error = copyout(&osa, SCARG(uap, osa), sizeof(osa)); 433 if (error) 434 return (error); 435 } 436 return (0); 437 } 438 439 /* 440 * Initialize signal state for process 0; 441 * set to ignore signals that are ignored by default and disable the signal 442 * stack. 443 */ 444 void 445 siginit(struct proc *p) 446 { 447 struct sigacts *ps; 448 int signum, prop; 449 450 ps = p->p_sigacts; 451 sigemptyset(&contsigmask); 452 sigemptyset(&stopsigmask); 453 sigemptyset(&sigcantmask); 454 for (signum = 1; signum < NSIG; signum++) { 455 prop = sigprop[signum]; 456 if (prop & SA_CONT) 457 sigaddset(&contsigmask, signum); 458 if (prop & SA_STOP) 459 sigaddset(&stopsigmask, signum); 460 if (prop & SA_CANTMASK) 461 sigaddset(&sigcantmask, signum); 462 if (prop & SA_IGNORE && signum != SIGCONT) 463 sigaddset(&p->p_sigctx.ps_sigignore, signum); 464 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask); 465 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART; 466 } 467 sigemptyset(&p->p_sigctx.ps_sigcatch); 468 p->p_sigctx.ps_sigwaited = NULL; 469 p->p_flag &= ~P_NOCLDSTOP; 470 471 /* 472 * Reset stack state to the user stack. 473 */ 474 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE; 475 p->p_sigctx.ps_sigstk.ss_size = 0; 476 p->p_sigctx.ps_sigstk.ss_sp = 0; 477 478 /* One reference. */ 479 ps->sa_refcnt = 1; 480 } 481 482 /* 483 * Reset signals for an exec of the specified process. 484 */ 485 void 486 execsigs(struct proc *p) 487 { 488 struct sigacts *ps; 489 int signum, prop; 490 491 sigactsunshare(p); 492 493 ps = p->p_sigacts; 494 495 /* 496 * Reset caught signals. Held signals remain held 497 * through p_sigctx.ps_sigmask (unless they were caught, 498 * and are now ignored by default). 499 */ 500 for (signum = 1; signum < NSIG; signum++) { 501 if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) { 502 prop = sigprop[signum]; 503 if (prop & SA_IGNORE) { 504 if ((prop & SA_CONT) == 0) 505 sigaddset(&p->p_sigctx.ps_sigignore, 506 signum); 507 sigdelset(&p->p_sigctx.ps_siglist, signum); 508 } 509 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 510 } 511 sigemptyset(&SIGACTION_PS(ps, signum).sa_mask); 512 SIGACTION_PS(ps, signum).sa_flags = SA_RESTART; 513 } 514 sigemptyset(&p->p_sigctx.ps_sigcatch); 515 p->p_sigctx.ps_sigwaited = NULL; 516 p->p_flag &= ~P_NOCLDSTOP; 517 518 /* 519 * Reset stack state to the user stack. 520 */ 521 p->p_sigctx.ps_sigstk.ss_flags = SS_DISABLE; 522 p->p_sigctx.ps_sigstk.ss_size = 0; 523 p->p_sigctx.ps_sigstk.ss_sp = 0; 524 } 525 526 int 527 sigprocmask1(struct proc *p, int how, const sigset_t *nss, sigset_t *oss) 528 { 529 530 if (oss) 531 *oss = p->p_sigctx.ps_sigmask; 532 533 if (nss) { 534 (void)splsched(); /* XXXSMP */ 535 switch (how) { 536 case SIG_BLOCK: 537 sigplusset(nss, &p->p_sigctx.ps_sigmask); 538 break; 539 case SIG_UNBLOCK: 540 sigminusset(nss, &p->p_sigctx.ps_sigmask); 541 CHECKSIGS(p); 542 break; 543 case SIG_SETMASK: 544 p->p_sigctx.ps_sigmask = *nss; 545 CHECKSIGS(p); 546 break; 547 default: 548 (void)spl0(); /* XXXSMP */ 549 return (EINVAL); 550 } 551 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask); 552 (void)spl0(); /* XXXSMP */ 553 } 554 555 return (0); 556 } 557 558 /* 559 * Manipulate signal mask. 560 * Note that we receive new mask, not pointer, 561 * and return old mask as return value; 562 * the library stub does the rest. 563 */ 564 int 565 sys___sigprocmask14(struct lwp *l, void *v, register_t *retval) 566 { 567 struct sys___sigprocmask14_args /* { 568 syscallarg(int) how; 569 syscallarg(const sigset_t *) set; 570 syscallarg(sigset_t *) oset; 571 } */ *uap = v; 572 struct proc *p; 573 sigset_t nss, oss; 574 int error; 575 576 if (SCARG(uap, set)) { 577 error = copyin(SCARG(uap, set), &nss, sizeof(nss)); 578 if (error) 579 return (error); 580 } 581 p = l->l_proc; 582 error = sigprocmask1(p, SCARG(uap, how), 583 SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0); 584 if (error) 585 return (error); 586 if (SCARG(uap, oset)) { 587 error = copyout(&oss, SCARG(uap, oset), sizeof(oss)); 588 if (error) 589 return (error); 590 } 591 return (0); 592 } 593 594 void 595 sigpending1(struct proc *p, sigset_t *ss) 596 { 597 598 *ss = p->p_sigctx.ps_siglist; 599 sigminusset(&p->p_sigctx.ps_sigmask, ss); 600 } 601 602 /* ARGSUSED */ 603 int 604 sys___sigpending14(struct lwp *l, void *v, register_t *retval) 605 { 606 struct sys___sigpending14_args /* { 607 syscallarg(sigset_t *) set; 608 } */ *uap = v; 609 struct proc *p; 610 sigset_t ss; 611 612 p = l->l_proc; 613 sigpending1(p, &ss); 614 return (copyout(&ss, SCARG(uap, set), sizeof(ss))); 615 } 616 617 int 618 sigsuspend1(struct proc *p, const sigset_t *ss) 619 { 620 struct sigacts *ps; 621 622 ps = p->p_sigacts; 623 if (ss) { 624 /* 625 * When returning from sigpause, we want 626 * the old mask to be restored after the 627 * signal handler has finished. Thus, we 628 * save it here and mark the sigctx structure 629 * to indicate this. 630 */ 631 p->p_sigctx.ps_oldmask = p->p_sigctx.ps_sigmask; 632 p->p_sigctx.ps_flags |= SAS_OLDMASK; 633 (void) splsched(); /* XXXSMP */ 634 p->p_sigctx.ps_sigmask = *ss; 635 CHECKSIGS(p); 636 sigminusset(&sigcantmask, &p->p_sigctx.ps_sigmask); 637 (void) spl0(); /* XXXSMP */ 638 } 639 640 while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0) 641 /* void */; 642 643 /* always return EINTR rather than ERESTART... */ 644 return (EINTR); 645 } 646 647 /* 648 * Suspend process until signal, providing mask to be set 649 * in the meantime. Note nonstandard calling convention: 650 * libc stub passes mask, not pointer, to save a copyin. 651 */ 652 /* ARGSUSED */ 653 int 654 sys___sigsuspend14(struct lwp *l, void *v, register_t *retval) 655 { 656 struct sys___sigsuspend14_args /* { 657 syscallarg(const sigset_t *) set; 658 } */ *uap = v; 659 struct proc *p; 660 sigset_t ss; 661 int error; 662 663 if (SCARG(uap, set)) { 664 error = copyin(SCARG(uap, set), &ss, sizeof(ss)); 665 if (error) 666 return (error); 667 } 668 669 p = l->l_proc; 670 return (sigsuspend1(p, SCARG(uap, set) ? &ss : 0)); 671 } 672 673 int 674 sigaltstack1(struct proc *p, const struct sigaltstack *nss, 675 struct sigaltstack *oss) 676 { 677 678 if (oss) 679 *oss = p->p_sigctx.ps_sigstk; 680 681 if (nss) { 682 if (nss->ss_flags & ~SS_ALLBITS) 683 return (EINVAL); 684 685 if (nss->ss_flags & SS_DISABLE) { 686 if (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) 687 return (EINVAL); 688 } else { 689 if (nss->ss_size < MINSIGSTKSZ) 690 return (ENOMEM); 691 } 692 p->p_sigctx.ps_sigstk = *nss; 693 } 694 695 return (0); 696 } 697 698 /* ARGSUSED */ 699 int 700 sys___sigaltstack14(struct lwp *l, void *v, register_t *retval) 701 { 702 struct sys___sigaltstack14_args /* { 703 syscallarg(const struct sigaltstack *) nss; 704 syscallarg(struct sigaltstack *) oss; 705 } */ *uap = v; 706 struct proc *p; 707 struct sigaltstack nss, oss; 708 int error; 709 710 if (SCARG(uap, nss)) { 711 error = copyin(SCARG(uap, nss), &nss, sizeof(nss)); 712 if (error) 713 return (error); 714 } 715 p = l->l_proc; 716 error = sigaltstack1(p, 717 SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0); 718 if (error) 719 return (error); 720 if (SCARG(uap, oss)) { 721 error = copyout(&oss, SCARG(uap, oss), sizeof(oss)); 722 if (error) 723 return (error); 724 } 725 return (0); 726 } 727 728 /* ARGSUSED */ 729 int 730 sys_kill(struct lwp *l, void *v, register_t *retval) 731 { 732 struct sys_kill_args /* { 733 syscallarg(int) pid; 734 syscallarg(int) signum; 735 } */ *uap = v; 736 struct proc *cp, *p; 737 struct pcred *pc; 738 ksiginfo_t ksi; 739 740 cp = l->l_proc; 741 pc = cp->p_cred; 742 if ((u_int)SCARG(uap, signum) >= NSIG) 743 return (EINVAL); 744 memset(&ksi, 0, sizeof(ksi)); 745 ksi.ksi_signo = SCARG(uap, signum); 746 ksi.ksi_code = SI_USER; 747 ksi.ksi_pid = cp->p_pid; 748 ksi.ksi_uid = cp->p_ucred->cr_uid; 749 if (SCARG(uap, pid) > 0) { 750 /* kill single process */ 751 if ((p = pfind(SCARG(uap, pid))) == NULL) 752 return (ESRCH); 753 if (!CANSIGNAL(cp, pc, p, SCARG(uap, signum))) 754 return (EPERM); 755 if (SCARG(uap, signum)) 756 kpsignal2(p, &ksi, 1); 757 return (0); 758 } 759 switch (SCARG(uap, pid)) { 760 case -1: /* broadcast signal */ 761 return (killpg1(cp, &ksi, 0, 1)); 762 case 0: /* signal own process group */ 763 return (killpg1(cp, &ksi, 0, 0)); 764 default: /* negative explicit process group */ 765 return (killpg1(cp, &ksi, -SCARG(uap, pid), 0)); 766 } 767 /* NOTREACHED */ 768 } 769 770 /* 771 * Common code for kill process group/broadcast kill. 772 * cp is calling process. 773 */ 774 int 775 killpg1(struct proc *cp, ksiginfo_t *ksi, int pgid, int all) 776 { 777 struct proc *p; 778 struct pcred *pc; 779 struct pgrp *pgrp; 780 int nfound; 781 int signum = ksi->ksi_signo; 782 783 pc = cp->p_cred; 784 nfound = 0; 785 if (all) { 786 /* 787 * broadcast 788 */ 789 proclist_lock_read(); 790 LIST_FOREACH(p, &allproc, p_list) { 791 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 792 p == cp || !CANSIGNAL(cp, pc, p, signum)) 793 continue; 794 nfound++; 795 if (signum) 796 kpsignal2(p, ksi, 1); 797 } 798 proclist_unlock_read(); 799 } else { 800 if (pgid == 0) 801 /* 802 * zero pgid means send to my process group. 803 */ 804 pgrp = cp->p_pgrp; 805 else { 806 pgrp = pgfind(pgid); 807 if (pgrp == NULL) 808 return (ESRCH); 809 } 810 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 811 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || 812 !CANSIGNAL(cp, pc, p, signum)) 813 continue; 814 nfound++; 815 if (signum && P_ZOMBIE(p) == 0) 816 kpsignal2(p, ksi, 1); 817 } 818 } 819 return (nfound ? 0 : ESRCH); 820 } 821 822 /* 823 * Send a signal to a process group. 824 */ 825 void 826 gsignal(int pgid, int signum) 827 { 828 ksiginfo_t ksi; 829 memset(&ksi, 0, sizeof(ksi)); 830 ksi.ksi_signo = signum; 831 kgsignal(pgid, &ksi, NULL); 832 } 833 834 void 835 kgsignal(int pgid, ksiginfo_t *ksi, void *data) 836 { 837 struct pgrp *pgrp; 838 839 if (pgid && (pgrp = pgfind(pgid))) 840 kpgsignal(pgrp, ksi, data, 0); 841 } 842 843 /* 844 * Send a signal to a process group. If checktty is 1, 845 * limit to members which have a controlling terminal. 846 */ 847 void 848 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 849 { 850 ksiginfo_t ksi; 851 memset(&ksi, 0, sizeof(ksi)); 852 ksi.ksi_signo = sig; 853 kpgsignal(pgrp, &ksi, NULL, checkctty); 854 } 855 856 void 857 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty) 858 { 859 struct proc *p; 860 861 if (pgrp) 862 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 863 if (checkctty == 0 || p->p_flag & P_CONTROLT) 864 kpsignal(p, ksi, data); 865 } 866 867 /* 868 * Send a signal caused by a trap to the current process. 869 * If it will be caught immediately, deliver it with correct code. 870 * Otherwise, post it normally. 871 */ 872 #ifndef __HAVE_SIGINFO 873 void _trapsignal(struct lwp *, const ksiginfo_t *); 874 void 875 trapsignal(struct lwp *l, int signum, u_long code) 876 { 877 #define trapsignal _trapsignal 878 ksiginfo_t ksi; 879 880 KSI_INIT_TRAP(&ksi); 881 ksi.ksi_signo = signum; 882 ksi.ksi_trap = (int)code; 883 trapsignal(l, &ksi); 884 } 885 #endif 886 887 void 888 trapsignal(struct lwp *l, const ksiginfo_t *ksi) 889 { 890 struct proc *p; 891 struct sigacts *ps; 892 int signum = ksi->ksi_signo; 893 894 KASSERT(KSI_TRAP_P(ksi)); 895 896 p = l->l_proc; 897 ps = p->p_sigacts; 898 if ((p->p_flag & P_TRACED) == 0 && 899 sigismember(&p->p_sigctx.ps_sigcatch, signum) && 900 !sigismember(&p->p_sigctx.ps_sigmask, signum)) { 901 p->p_stats->p_ru.ru_nsignals++; 902 #ifdef KTRACE 903 if (KTRPOINT(p, KTR_PSIG)) 904 ktrpsig(p, signum, SIGACTION_PS(ps, signum).sa_handler, 905 &p->p_sigctx.ps_sigmask, ksi); 906 #endif 907 kpsendsig(l, ksi, &p->p_sigctx.ps_sigmask); 908 (void) splsched(); /* XXXSMP */ 909 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, 910 &p->p_sigctx.ps_sigmask); 911 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) { 912 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 913 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 914 sigaddset(&p->p_sigctx.ps_sigignore, signum); 915 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 916 } 917 (void) spl0(); /* XXXSMP */ 918 } else { 919 p->p_sigctx.ps_lwp = l->l_lid; 920 /* XXX for core dump/debugger */ 921 p->p_sigctx.ps_signo = ksi->ksi_signo; 922 p->p_sigctx.ps_code = ksi->ksi_trap; 923 kpsignal2(p, ksi, 1); 924 } 925 } 926 927 /* 928 * Fill in signal information and signal the parent for a child status change. 929 */ 930 static void 931 child_psignal(struct proc *p, int dolock) 932 { 933 ksiginfo_t ksi; 934 935 (void)memset(&ksi, 0, sizeof(ksi)); 936 ksi.ksi_signo = SIGCHLD; 937 ksi.ksi_code = p->p_xstat == SIGCONT ? CLD_CONTINUED : CLD_STOPPED; 938 ksi.ksi_pid = p->p_pid; 939 ksi.ksi_uid = p->p_ucred->cr_uid; 940 ksi.ksi_status = p->p_xstat; 941 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; 942 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; 943 kpsignal2(p->p_pptr, &ksi, dolock); 944 } 945 946 /* 947 * Send the signal to the process. If the signal has an action, the action 948 * is usually performed by the target process rather than the caller; we add 949 * the signal to the set of pending signals for the process. 950 * 951 * Exceptions: 952 * o When a stop signal is sent to a sleeping process that takes the 953 * default action, the process is stopped without awakening it. 954 * o SIGCONT restarts stopped processes (or puts them back to sleep) 955 * regardless of the signal action (eg, blocked or ignored). 956 * 957 * Other ignored signals are discarded immediately. 958 * 959 * XXXSMP: Invoked as psignal() or sched_psignal(). 960 */ 961 void 962 psignal1(struct proc *p, int signum, int dolock) 963 { 964 ksiginfo_t ksi; 965 966 memset(&ksi, 0, sizeof(ksi)); 967 ksi.ksi_signo = signum; 968 kpsignal2(p, &ksi, dolock); 969 } 970 971 void 972 kpsignal1(struct proc *p, ksiginfo_t *ksi, void *data, int dolock) 973 { 974 975 if ((p->p_flag & P_WEXIT) == 0 && data) { 976 size_t fd; 977 struct filedesc *fdp = p->p_fd; 978 979 ksi->ksi_fd = -1; 980 for (fd = 0; fd < fdp->fd_nfiles; fd++) { 981 struct file *fp = fdp->fd_ofiles[fd]; 982 /* XXX: lock? */ 983 if (fp && fp->f_data == data) { 984 ksi->ksi_fd = fd; 985 break; 986 } 987 } 988 } 989 kpsignal2(p, ksi, dolock); 990 } 991 992 static void 993 kpsignal2(struct proc *p, const ksiginfo_t *ksi, int dolock) 994 { 995 struct lwp *l, *suspended = NULL; 996 int s = 0, prop, allsusp; 997 sig_t action; 998 int signum = ksi->ksi_signo; 999 1000 #ifdef DIAGNOSTIC 1001 if (signum <= 0 || signum >= NSIG) 1002 panic("psignal signal number %d", signum); 1003 1004 /* XXXSMP: works, but icky */ 1005 if (dolock) 1006 SCHED_ASSERT_UNLOCKED(); 1007 else 1008 SCHED_ASSERT_LOCKED(); 1009 #endif 1010 1011 /* 1012 * Notify any interested parties in the signal. 1013 */ 1014 KNOTE(&p->p_klist, NOTE_SIGNAL | signum); 1015 1016 prop = sigprop[signum]; 1017 1018 /* 1019 * If proc is traced, always give parent a chance. 1020 */ 1021 if (p->p_flag & P_TRACED) 1022 action = SIG_DFL; 1023 else { 1024 /* 1025 * If the signal is being ignored, 1026 * then we forget about it immediately. 1027 * (Note: we don't set SIGCONT in p_sigctx.ps_sigignore, 1028 * and if it is set to SIG_IGN, 1029 * action will be SIG_DFL here.) 1030 */ 1031 if (sigismember(&p->p_sigctx.ps_sigignore, signum)) 1032 return; 1033 if (sigismember(&p->p_sigctx.ps_sigmask, signum)) 1034 action = SIG_HOLD; 1035 else if (sigismember(&p->p_sigctx.ps_sigcatch, signum)) 1036 action = SIG_CATCH; 1037 else { 1038 action = SIG_DFL; 1039 1040 if (prop & SA_KILL && p->p_nice > NZERO) 1041 p->p_nice = NZERO; 1042 1043 /* 1044 * If sending a tty stop signal to a member of an 1045 * orphaned process group, discard the signal here if 1046 * the action is default; don't stop the process below 1047 * if sleeping, and don't clear any pending SIGCONT. 1048 */ 1049 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0) 1050 return; 1051 } 1052 } 1053 1054 if (prop & SA_CONT) 1055 sigminusset(&stopsigmask, &p->p_sigctx.ps_siglist); 1056 1057 if (prop & SA_STOP) 1058 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist); 1059 1060 /* 1061 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL, 1062 * please!), check if anything waits on it. If yes, save the 1063 * info into provided ps_sigwaited, and wake-up the waiter. 1064 * The signal won't be processed further here. 1065 */ 1066 if ((prop & SA_CANTMASK) == 0 1067 && p->p_sigctx.ps_sigwaited 1068 && sigismember(p->p_sigctx.ps_sigwait, signum) 1069 && p->p_stat != SSTOP) { 1070 p->p_sigctx.ps_sigwaited->ksi_info = ksi->ksi_info; 1071 p->p_sigctx.ps_sigwaited = NULL; 1072 if (dolock) 1073 wakeup_one(&p->p_sigctx.ps_sigwait); 1074 else 1075 sched_wakeup(&p->p_sigctx.ps_sigwait); 1076 return; 1077 } 1078 1079 sigaddset(&p->p_sigctx.ps_siglist, signum); 1080 1081 /* CHECKSIGS() is "inlined" here. */ 1082 p->p_sigctx.ps_sigcheck = 1; 1083 1084 /* 1085 * Defer further processing for signals which are held, 1086 * except that stopped processes must be continued by SIGCONT. 1087 */ 1088 if (action == SIG_HOLD && 1089 ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) { 1090 ksiginfo_put(p, ksi); 1091 return; 1092 } 1093 /* XXXSMP: works, but icky */ 1094 if (dolock) 1095 SCHED_LOCK(s); 1096 1097 if (p->p_flag & P_SA) { 1098 l = p->p_sa->sa_vp; 1099 allsusp = 0; 1100 if (p->p_stat == SACTIVE) { 1101 KDASSERT(l != NULL); 1102 if (l->l_flag & L_SA_IDLE) { 1103 /* wakeup idle LWP */ 1104 } else if (l->l_flag & L_SA_YIELD) { 1105 /* idle LWP is already waking up */ 1106 goto out; 1107 /*NOTREACHED*/ 1108 } else { 1109 if (l->l_stat == LSRUN || 1110 l->l_stat == LSONPROC) { 1111 signotify(p); 1112 goto out; 1113 /*NOTREACHED*/ 1114 } 1115 if (l->l_stat == LSSLEEP && 1116 l->l_flag & L_SINTR) { 1117 /* ok to signal vp lwp */ 1118 } else if (signum == SIGKILL) { 1119 /* 1120 * get a suspended lwp from 1121 * the cache to send KILL 1122 * signal 1123 * XXXcl add signal checks at resume points 1124 */ 1125 suspended = sa_getcachelwp(p); 1126 allsusp = 1; 1127 } else 1128 l = NULL; 1129 } 1130 } else if (p->p_stat == SSTOP) { 1131 if (l->l_stat != LSSLEEP || (l->l_flag & L_SINTR) == 0) 1132 l = NULL; 1133 } 1134 } else if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) { 1135 /* 1136 * At least one LWP is running or on a run queue. 1137 * The signal will be noticed when one of them returns 1138 * to userspace. 1139 */ 1140 signotify(p); 1141 /* 1142 * The signal will be noticed very soon. 1143 */ 1144 goto out; 1145 /*NOTREACHED*/ 1146 } else { 1147 /* 1148 * Find out if any of the sleeps are interruptable, 1149 * and if all the live LWPs remaining are suspended. 1150 */ 1151 allsusp = 1; 1152 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1153 if (l->l_stat == LSSLEEP && 1154 l->l_flag & L_SINTR) 1155 break; 1156 if (l->l_stat == LSSUSPENDED) 1157 suspended = l; 1158 else if ((l->l_stat != LSZOMB) && 1159 (l->l_stat != LSDEAD)) 1160 allsusp = 0; 1161 } 1162 } 1163 1164 if (p->p_stat == SACTIVE) { 1165 1166 if (l != NULL && (p->p_flag & P_TRACED)) 1167 goto run; 1168 1169 /* 1170 * If SIGCONT is default (or ignored) and process is 1171 * asleep, we are finished; the process should not 1172 * be awakened. 1173 */ 1174 if ((prop & SA_CONT) && action == SIG_DFL) { 1175 sigdelset(&p->p_sigctx.ps_siglist, signum); 1176 goto done; 1177 } 1178 1179 /* 1180 * When a sleeping process receives a stop 1181 * signal, process immediately if possible. 1182 */ 1183 if ((prop & SA_STOP) && action == SIG_DFL) { 1184 /* 1185 * If a child holding parent blocked, 1186 * stopping could cause deadlock. 1187 */ 1188 if (p->p_flag & P_PPWAIT) { 1189 goto out; 1190 } 1191 sigdelset(&p->p_sigctx.ps_siglist, signum); 1192 p->p_xstat = signum; 1193 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) { 1194 /* 1195 * XXXSMP: recursive call; don't lock 1196 * the second time around. 1197 */ 1198 child_psignal(p, 0); 1199 } 1200 proc_stop(p, 1); /* XXXSMP: recurse? */ 1201 goto done; 1202 } 1203 1204 if (l == NULL) { 1205 /* 1206 * Special case: SIGKILL of a process 1207 * which is entirely composed of 1208 * suspended LWPs should succeed. We 1209 * make this happen by unsuspending one of 1210 * them. 1211 */ 1212 if (allsusp && (signum == SIGKILL)) 1213 lwp_continue(suspended); 1214 goto done; 1215 } 1216 /* 1217 * All other (caught or default) signals 1218 * cause the process to run. 1219 */ 1220 goto runfast; 1221 /*NOTREACHED*/ 1222 } else if (p->p_stat == SSTOP) { 1223 /* Process is stopped */ 1224 /* 1225 * If traced process is already stopped, 1226 * then no further action is necessary. 1227 */ 1228 if (p->p_flag & P_TRACED) 1229 goto done; 1230 1231 /* 1232 * Kill signal always sets processes running, 1233 * if possible. 1234 */ 1235 if (signum == SIGKILL) { 1236 l = proc_unstop(p); 1237 if (l) 1238 goto runfast; 1239 goto done; 1240 } 1241 1242 if (prop & SA_CONT) { 1243 /* 1244 * If SIGCONT is default (or ignored), 1245 * we continue the process but don't 1246 * leave the signal in ps_siglist, as 1247 * it has no further action. If 1248 * SIGCONT is held, we continue the 1249 * process and leave the signal in 1250 * ps_siglist. If the process catches 1251 * SIGCONT, let it handle the signal 1252 * itself. If it isn't waiting on an 1253 * event, then it goes back to run 1254 * state. Otherwise, process goes 1255 * back to sleep state. 1256 */ 1257 if (action == SIG_DFL) 1258 sigdelset(&p->p_sigctx.ps_siglist, 1259 signum); 1260 l = proc_unstop(p); 1261 if (l && (action == SIG_CATCH)) 1262 goto runfast; 1263 goto out; 1264 } 1265 1266 if (prop & SA_STOP) { 1267 /* 1268 * Already stopped, don't need to stop again. 1269 * (If we did the shell could get confused.) 1270 */ 1271 sigdelset(&p->p_sigctx.ps_siglist, signum); 1272 goto done; 1273 } 1274 1275 /* 1276 * If a lwp is sleeping interruptibly, then 1277 * wake it up; it will run until the kernel 1278 * boundary, where it will stop in issignal(), 1279 * since p->p_stat is still SSTOP. When the 1280 * process is continued, it will be made 1281 * runnable and can look at the signal. 1282 */ 1283 if (l) 1284 goto run; 1285 goto out; 1286 } else { 1287 /* Else what? */ 1288 panic("psignal: Invalid process state %d.", p->p_stat); 1289 } 1290 /*NOTREACHED*/ 1291 1292 runfast: 1293 if (action == SIG_CATCH) { 1294 ksiginfo_put(p, ksi); 1295 action = SIG_HOLD; 1296 } 1297 /* 1298 * Raise priority to at least PUSER. 1299 */ 1300 if (l->l_priority > PUSER) 1301 l->l_priority = PUSER; 1302 run: 1303 if (action == SIG_CATCH) { 1304 ksiginfo_put(p, ksi); 1305 action = SIG_HOLD; 1306 } 1307 1308 setrunnable(l); /* XXXSMP: recurse? */ 1309 out: 1310 if (action == SIG_CATCH) 1311 ksiginfo_put(p, ksi); 1312 done: 1313 /* XXXSMP: works, but icky */ 1314 if (dolock) 1315 SCHED_UNLOCK(s); 1316 } 1317 1318 void 1319 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask) 1320 { 1321 struct proc *p = l->l_proc; 1322 struct lwp *le, *li; 1323 siginfo_t *si; 1324 int f; 1325 1326 if (p->p_flag & P_SA) { 1327 1328 /* XXXUPSXXX What if not on sa_vp ? */ 1329 1330 f = l->l_flag & L_SA; 1331 l->l_flag &= ~L_SA; 1332 si = pool_get(&siginfo_pool, PR_WAITOK); 1333 si->_info = ksi->ksi_info; 1334 le = li = NULL; 1335 if (KSI_TRAP_P(ksi)) 1336 le = l; 1337 else 1338 li = l; 1339 1340 sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li, 1341 sizeof(siginfo_t), si); 1342 l->l_flag |= f; 1343 return; 1344 } 1345 1346 #ifdef __HAVE_SIGINFO 1347 (*p->p_emul->e_sendsig)(ksi, mask); 1348 #else 1349 (*p->p_emul->e_sendsig)(ksi->ksi_signo, mask, KSI_TRAPCODE(ksi)); 1350 #endif 1351 } 1352 1353 static __inline int firstsig(const sigset_t *); 1354 1355 static __inline int 1356 firstsig(const sigset_t *ss) 1357 { 1358 int sig; 1359 1360 sig = ffs(ss->__bits[0]); 1361 if (sig != 0) 1362 return (sig); 1363 #if NSIG > 33 1364 sig = ffs(ss->__bits[1]); 1365 if (sig != 0) 1366 return (sig + 32); 1367 #endif 1368 #if NSIG > 65 1369 sig = ffs(ss->__bits[2]); 1370 if (sig != 0) 1371 return (sig + 64); 1372 #endif 1373 #if NSIG > 97 1374 sig = ffs(ss->__bits[3]); 1375 if (sig != 0) 1376 return (sig + 96); 1377 #endif 1378 return (0); 1379 } 1380 1381 /* 1382 * If the current process has received a signal (should be caught or cause 1383 * termination, should interrupt current syscall), return the signal number. 1384 * Stop signals with default action are processed immediately, then cleared; 1385 * they aren't returned. This is checked after each entry to the system for 1386 * a syscall or trap (though this can usually be done without calling issignal 1387 * by checking the pending signal masks in the CURSIG macro.) The normal call 1388 * sequence is 1389 * 1390 * while (signum = CURSIG(curlwp)) 1391 * postsig(signum); 1392 */ 1393 int 1394 issignal(struct lwp *l) 1395 { 1396 struct proc *p = l->l_proc; 1397 int s = 0, signum, prop; 1398 int dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock; 1399 sigset_t ss; 1400 1401 if (l->l_flag & L_SA) { 1402 struct sadata *sa = p->p_sa; 1403 1404 /* Bail out if we do not own the virtual processor */ 1405 if (sa->sa_vp != l) 1406 return 0; 1407 } 1408 1409 if (p->p_stat == SSTOP) { 1410 /* 1411 * The process is stopped/stopping. Stop ourselves now that 1412 * we're on the kernel/userspace boundary. 1413 */ 1414 if (dolock) 1415 SCHED_LOCK(s); 1416 l->l_stat = LSSTOP; 1417 p->p_nrlwps--; 1418 if (p->p_flag & P_TRACED) 1419 goto sigtraceswitch; 1420 else 1421 goto sigswitch; 1422 } 1423 for (;;) { 1424 sigpending1(p, &ss); 1425 if (p->p_flag & P_PPWAIT) 1426 sigminusset(&stopsigmask, &ss); 1427 signum = firstsig(&ss); 1428 if (signum == 0) { /* no signal to send */ 1429 p->p_sigctx.ps_sigcheck = 0; 1430 if (locked && dolock) 1431 SCHED_LOCK(s); 1432 return (0); 1433 } 1434 /* take the signal! */ 1435 sigdelset(&p->p_sigctx.ps_siglist, signum); 1436 1437 /* 1438 * We should see pending but ignored signals 1439 * only if P_TRACED was on when they were posted. 1440 */ 1441 if (sigismember(&p->p_sigctx.ps_sigignore, signum) && 1442 (p->p_flag & P_TRACED) == 0) 1443 continue; 1444 1445 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { 1446 /* 1447 * If traced, always stop, and stay 1448 * stopped until released by the debugger. 1449 */ 1450 p->p_xstat = signum; 1451 1452 /* Emulation-specific handling of signal trace */ 1453 if ((p->p_emul->e_tracesig != NULL) && 1454 ((*p->p_emul->e_tracesig)(p, signum) != 0)) 1455 goto childresumed; 1456 1457 if ((p->p_flag & P_FSTRACE) == 0) 1458 child_psignal(p, dolock); 1459 if (dolock) 1460 SCHED_LOCK(s); 1461 proc_stop(p, 1); 1462 sigtraceswitch: 1463 mi_switch(l, NULL); 1464 SCHED_ASSERT_UNLOCKED(); 1465 if (dolock) 1466 splx(s); 1467 else 1468 dolock = 1; 1469 1470 childresumed: 1471 /* 1472 * If we are no longer being traced, or the parent 1473 * didn't give us a signal, look for more signals. 1474 */ 1475 if ((p->p_flag & P_TRACED) == 0 || p->p_xstat == 0) 1476 continue; 1477 1478 /* 1479 * If the new signal is being masked, look for other 1480 * signals. 1481 */ 1482 signum = p->p_xstat; 1483 p->p_xstat = 0; 1484 /* 1485 * `p->p_sigctx.ps_siglist |= mask' is done 1486 * in setrunnable(). 1487 */ 1488 if (sigismember(&p->p_sigctx.ps_sigmask, signum)) 1489 continue; 1490 /* take the signal! */ 1491 sigdelset(&p->p_sigctx.ps_siglist, signum); 1492 } 1493 1494 prop = sigprop[signum]; 1495 1496 /* 1497 * Decide whether the signal should be returned. 1498 * Return the signal's number, or fall through 1499 * to clear it from the pending mask. 1500 */ 1501 switch ((long)SIGACTION(p, signum).sa_handler) { 1502 1503 case (long)SIG_DFL: 1504 /* 1505 * Don't take default actions on system processes. 1506 */ 1507 if (p->p_pid <= 1) { 1508 #ifdef DIAGNOSTIC 1509 /* 1510 * Are you sure you want to ignore SIGSEGV 1511 * in init? XXX 1512 */ 1513 printf("Process (pid %d) got signal %d\n", 1514 p->p_pid, signum); 1515 #endif 1516 break; /* == ignore */ 1517 } 1518 /* 1519 * If there is a pending stop signal to process 1520 * with default action, stop here, 1521 * then clear the signal. However, 1522 * if process is member of an orphaned 1523 * process group, ignore tty stop signals. 1524 */ 1525 if (prop & SA_STOP) { 1526 if (p->p_flag & P_TRACED || 1527 (p->p_pgrp->pg_jobc == 0 && 1528 prop & SA_TTYSTOP)) 1529 break; /* == ignore */ 1530 p->p_xstat = signum; 1531 if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) 1532 child_psignal(p, dolock); 1533 if (dolock) 1534 SCHED_LOCK(s); 1535 proc_stop(p, 1); 1536 sigswitch: 1537 mi_switch(l, NULL); 1538 SCHED_ASSERT_UNLOCKED(); 1539 if (dolock) 1540 splx(s); 1541 else 1542 dolock = 1; 1543 break; 1544 } else if (prop & SA_IGNORE) { 1545 /* 1546 * Except for SIGCONT, shouldn't get here. 1547 * Default action is to ignore; drop it. 1548 */ 1549 break; /* == ignore */ 1550 } else 1551 goto keep; 1552 /*NOTREACHED*/ 1553 1554 case (long)SIG_IGN: 1555 /* 1556 * Masking above should prevent us ever trying 1557 * to take action on an ignored signal other 1558 * than SIGCONT, unless process is traced. 1559 */ 1560 #ifdef DEBUG_ISSIGNAL 1561 if ((prop & SA_CONT) == 0 && 1562 (p->p_flag & P_TRACED) == 0) 1563 printf("issignal\n"); 1564 #endif 1565 break; /* == ignore */ 1566 1567 default: 1568 /* 1569 * This signal has an action, let 1570 * postsig() process it. 1571 */ 1572 goto keep; 1573 } 1574 } 1575 /* NOTREACHED */ 1576 1577 keep: 1578 /* leave the signal for later */ 1579 sigaddset(&p->p_sigctx.ps_siglist, signum); 1580 CHECKSIGS(p); 1581 if (locked && dolock) 1582 SCHED_LOCK(s); 1583 return (signum); 1584 } 1585 1586 /* 1587 * Put the argument process into the stopped state and notify the parent 1588 * via wakeup. Signals are handled elsewhere. The process must not be 1589 * on the run queue. 1590 */ 1591 void 1592 proc_stop(struct proc *p, int wakeup) 1593 { 1594 struct lwp *l; 1595 struct proc *parent; 1596 1597 SCHED_ASSERT_LOCKED(); 1598 1599 /* XXX lock process LWP state */ 1600 p->p_flag &= ~P_WAITED; 1601 p->p_stat = SSTOP; 1602 parent = p->p_pptr; 1603 parent->p_nstopchild++; 1604 1605 if (p->p_flag & P_SA) { 1606 /* 1607 * Only (try to) put the LWP on the VP in stopped 1608 * state. 1609 * All other LWPs will suspend in sa_setwoken() 1610 * because the VP-LWP in stopped state cannot be 1611 * repossessed. 1612 */ 1613 l = p->p_sa->sa_vp; 1614 if (l->l_stat == LSONPROC && l->l_cpu == curcpu()) { 1615 l->l_stat = LSSTOP; 1616 p->p_nrlwps--; 1617 } else if (l->l_stat == LSRUN) { 1618 /* Remove LWP from the run queue */ 1619 remrunqueue(l); 1620 l->l_stat = LSSTOP; 1621 p->p_nrlwps--; 1622 } else if (l->l_stat == LSSLEEP && 1623 l->l_flag & L_SA_IDLE) { 1624 l->l_flag &= ~L_SA_IDLE; 1625 l->l_stat = LSSTOP; 1626 } 1627 goto out; 1628 } 1629 1630 /* 1631 * Put as many LWP's as possible in stopped state. 1632 * Sleeping ones will notice the stopped state as they try to 1633 * return to userspace. 1634 */ 1635 1636 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1637 if (l->l_stat == LSONPROC) { 1638 /* XXX SMP this assumes that a LWP that is LSONPROC 1639 * is curlwp and hence is about to be mi_switched 1640 * away; the only callers of proc_stop() are: 1641 * - psignal 1642 * - issignal() 1643 * For the former, proc_stop() is only called when 1644 * no processes are running, so we don't worry. 1645 * For the latter, proc_stop() is called right 1646 * before mi_switch(). 1647 */ 1648 l->l_stat = LSSTOP; 1649 p->p_nrlwps--; 1650 } else if (l->l_stat == LSRUN) { 1651 /* Remove LWP from the run queue */ 1652 remrunqueue(l); 1653 l->l_stat = LSSTOP; 1654 p->p_nrlwps--; 1655 } else if ((l->l_stat == LSSLEEP) || 1656 (l->l_stat == LSSUSPENDED) || 1657 (l->l_stat == LSZOMB) || 1658 (l->l_stat == LSDEAD)) { 1659 /* 1660 * Don't do anything; let sleeping LWPs 1661 * discover the stopped state of the process 1662 * on their way out of the kernel; otherwise, 1663 * things like NFS threads that sleep with 1664 * locks will block the rest of the system 1665 * from getting any work done. 1666 * 1667 * Suspended/dead/zombie LWPs aren't going 1668 * anywhere, so we don't need to touch them. 1669 */ 1670 } 1671 #ifdef DIAGNOSTIC 1672 else { 1673 panic("proc_stop: process %d lwp %d " 1674 "in unstoppable state %d.\n", 1675 p->p_pid, l->l_lid, l->l_stat); 1676 } 1677 #endif 1678 } 1679 1680 out: 1681 /* XXX unlock process LWP state */ 1682 1683 if (wakeup) 1684 sched_wakeup((caddr_t)p->p_pptr); 1685 } 1686 1687 /* 1688 * Given a process in state SSTOP, set the state back to SACTIVE and 1689 * move LSSTOP'd LWPs to LSSLEEP or make them runnable. 1690 * 1691 * If no LWPs ended up runnable (and therefore able to take a signal), 1692 * return a LWP that is sleeping interruptably. The caller can wake 1693 * that LWP up to take a signal. 1694 */ 1695 struct lwp * 1696 proc_unstop(struct proc *p) 1697 { 1698 struct lwp *l, *lr = NULL; 1699 int cantake = 0; 1700 1701 SCHED_ASSERT_LOCKED(); 1702 1703 /* 1704 * Our caller wants to be informed if there are only sleeping 1705 * and interruptable LWPs left after we have run so that it 1706 * can invoke setrunnable() if required - return one of the 1707 * interruptable LWPs if this is the case. 1708 */ 1709 1710 if (!(p->p_flag & P_WAITED)) 1711 p->p_pptr->p_nstopchild--; 1712 p->p_stat = SACTIVE; 1713 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1714 if (l->l_stat == LSRUN) { 1715 lr = NULL; 1716 cantake = 1; 1717 } 1718 if (l->l_stat != LSSTOP) 1719 continue; 1720 1721 if (l->l_wchan != NULL) { 1722 l->l_stat = LSSLEEP; 1723 if ((cantake == 0) && (l->l_flag & L_SINTR)) { 1724 lr = l; 1725 cantake = 1; 1726 } 1727 } else { 1728 setrunnable(l); 1729 lr = NULL; 1730 cantake = 1; 1731 } 1732 } 1733 if (p->p_flag & P_SA) { 1734 /* Only consider returning the LWP on the VP. */ 1735 lr = p->p_sa->sa_vp; 1736 if (lr->l_stat == LSSLEEP) { 1737 if (lr->l_flag & L_SA_YIELD) 1738 setrunnable(lr); 1739 else if (lr->l_flag & L_SINTR) 1740 return lr; 1741 } 1742 return NULL; 1743 } 1744 return lr; 1745 } 1746 1747 /* 1748 * Take the action for the specified signal 1749 * from the current set of pending signals. 1750 */ 1751 void 1752 postsig(int signum) 1753 { 1754 struct lwp *l; 1755 struct proc *p; 1756 struct sigacts *ps; 1757 sig_t action; 1758 sigset_t *returnmask; 1759 1760 l = curlwp; 1761 p = l->l_proc; 1762 ps = p->p_sigacts; 1763 #ifdef DIAGNOSTIC 1764 if (signum == 0) 1765 panic("postsig"); 1766 #endif 1767 1768 KERNEL_PROC_LOCK(l); 1769 1770 sigdelset(&p->p_sigctx.ps_siglist, signum); 1771 action = SIGACTION_PS(ps, signum).sa_handler; 1772 if (action == SIG_DFL) { 1773 #ifdef KTRACE 1774 if (KTRPOINT(p, KTR_PSIG)) 1775 ktrpsig(p, signum, action, 1776 p->p_sigctx.ps_flags & SAS_OLDMASK ? 1777 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 1778 NULL); 1779 #endif 1780 /* 1781 * Default action, where the default is to kill 1782 * the process. (Other cases were ignored above.) 1783 */ 1784 sigexit(l, signum); 1785 /* NOTREACHED */ 1786 } else { 1787 ksiginfo_t *ksi; 1788 /* 1789 * If we get here, the signal must be caught. 1790 */ 1791 #ifdef DIAGNOSTIC 1792 if (action == SIG_IGN || 1793 sigismember(&p->p_sigctx.ps_sigmask, signum)) 1794 panic("postsig action"); 1795 #endif 1796 /* 1797 * Set the new mask value and also defer further 1798 * occurrences of this signal. 1799 * 1800 * Special case: user has done a sigpause. Here the 1801 * current mask is not of interest, but rather the 1802 * mask from before the sigpause is what we want 1803 * restored after the signal processing is completed. 1804 */ 1805 if (p->p_sigctx.ps_flags & SAS_OLDMASK) { 1806 returnmask = &p->p_sigctx.ps_oldmask; 1807 p->p_sigctx.ps_flags &= ~SAS_OLDMASK; 1808 } else 1809 returnmask = &p->p_sigctx.ps_sigmask; 1810 p->p_stats->p_ru.ru_nsignals++; 1811 ksi = ksiginfo_get(p, signum); 1812 #ifdef KTRACE 1813 if (KTRPOINT(p, KTR_PSIG)) 1814 ktrpsig(p, signum, action, 1815 p->p_sigctx.ps_flags & SAS_OLDMASK ? 1816 &p->p_sigctx.ps_oldmask : &p->p_sigctx.ps_sigmask, 1817 ksi); 1818 #endif 1819 if (ksi == NULL) { 1820 ksiginfo_t ksi1; 1821 /* 1822 * we did not save any siginfo for this, either 1823 * because the signal was not caught, or because the 1824 * user did not request SA_SIGINFO 1825 */ 1826 (void)memset(&ksi1, 0, sizeof(ksi1)); 1827 ksi1.ksi_signo = signum; 1828 kpsendsig(l, &ksi1, returnmask); 1829 } else { 1830 kpsendsig(l, ksi, returnmask); 1831 pool_put(&ksiginfo_pool, ksi); 1832 } 1833 p->p_sigctx.ps_lwp = 0; 1834 p->p_sigctx.ps_code = 0; 1835 p->p_sigctx.ps_signo = 0; 1836 (void) splsched(); /* XXXSMP */ 1837 sigplusset(&SIGACTION_PS(ps, signum).sa_mask, 1838 &p->p_sigctx.ps_sigmask); 1839 if (SIGACTION_PS(ps, signum).sa_flags & SA_RESETHAND) { 1840 sigdelset(&p->p_sigctx.ps_sigcatch, signum); 1841 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1842 sigaddset(&p->p_sigctx.ps_sigignore, signum); 1843 SIGACTION_PS(ps, signum).sa_handler = SIG_DFL; 1844 } 1845 (void) spl0(); /* XXXSMP */ 1846 } 1847 1848 KERNEL_PROC_UNLOCK(l); 1849 } 1850 1851 /* 1852 * Kill the current process for stated reason. 1853 */ 1854 void 1855 killproc(struct proc *p, const char *why) 1856 { 1857 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why); 1858 uprintf("sorry, pid %d was killed: %s\n", p->p_pid, why); 1859 psignal(p, SIGKILL); 1860 } 1861 1862 /* 1863 * Force the current process to exit with the specified signal, dumping core 1864 * if appropriate. We bypass the normal tests for masked and caught signals, 1865 * allowing unrecoverable failures to terminate the process without changing 1866 * signal state. Mark the accounting record with the signal termination. 1867 * If dumping core, save the signal number for the debugger. Calls exit and 1868 * does not return. 1869 */ 1870 1871 #if defined(DEBUG) 1872 int kern_logsigexit = 1; /* not static to make public for sysctl */ 1873 #else 1874 int kern_logsigexit = 0; /* not static to make public for sysctl */ 1875 #endif 1876 1877 static const char logcoredump[] = 1878 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n"; 1879 static const char lognocoredump[] = 1880 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n"; 1881 1882 /* Wrapper function for use in p_userret */ 1883 static void 1884 lwp_coredump_hook(struct lwp *l, void *arg) 1885 { 1886 int s; 1887 1888 /* 1889 * Suspend ourselves, so that the kernel stack and therefore 1890 * the userland registers saved in the trapframe are around 1891 * for coredump() to write them out. 1892 */ 1893 KERNEL_PROC_LOCK(l); 1894 l->l_flag &= ~L_DETACHED; 1895 SCHED_LOCK(s); 1896 l->l_stat = LSSUSPENDED; 1897 l->l_proc->p_nrlwps--; 1898 /* XXX NJWLWP check if this makes sense here: */ 1899 l->l_proc->p_stats->p_ru.ru_nvcsw++; 1900 mi_switch(l, NULL); 1901 SCHED_ASSERT_UNLOCKED(); 1902 splx(s); 1903 1904 lwp_exit(l); 1905 } 1906 1907 void 1908 sigexit(struct lwp *l, int signum) 1909 { 1910 struct proc *p; 1911 #if 0 1912 struct lwp *l2; 1913 #endif 1914 int error, exitsig; 1915 1916 p = l->l_proc; 1917 1918 /* 1919 * Don't permit coredump() or exit1() multiple times 1920 * in the same process. 1921 */ 1922 if (p->p_flag & P_WEXIT) { 1923 KERNEL_PROC_UNLOCK(l); 1924 (*p->p_userret)(l, p->p_userret_arg); 1925 } 1926 p->p_flag |= P_WEXIT; 1927 /* We don't want to switch away from exiting. */ 1928 /* XXX multiprocessor: stop LWPs on other processors. */ 1929 #if 0 1930 if (p->p_flag & P_SA) { 1931 LIST_FOREACH(l2, &p->p_lwps, l_sibling) 1932 l2->l_flag &= ~L_SA; 1933 p->p_flag &= ~P_SA; 1934 } 1935 #endif 1936 1937 /* Make other LWPs stick around long enough to be dumped */ 1938 p->p_userret = lwp_coredump_hook; 1939 p->p_userret_arg = NULL; 1940 1941 exitsig = signum; 1942 p->p_acflag |= AXSIG; 1943 if (sigprop[signum] & SA_CORE) { 1944 p->p_sigctx.ps_signo = signum; 1945 if ((error = coredump(l)) == 0) 1946 exitsig |= WCOREFLAG; 1947 1948 if (kern_logsigexit) { 1949 /* XXX What if we ever have really large UIDs? */ 1950 int uid = p->p_cred && p->p_ucred ? 1951 (int) p->p_ucred->cr_uid : -1; 1952 1953 if (error) 1954 log(LOG_INFO, lognocoredump, p->p_pid, 1955 p->p_comm, uid, signum, error); 1956 else 1957 log(LOG_INFO, logcoredump, p->p_pid, 1958 p->p_comm, uid, signum); 1959 } 1960 1961 } 1962 1963 exit1(l, W_EXITCODE(0, exitsig)); 1964 /* NOTREACHED */ 1965 } 1966 1967 /* 1968 * Dump core, into a file named "progname.core" or "core" (depending on the 1969 * value of shortcorename), unless the process was setuid/setgid. 1970 */ 1971 int 1972 coredump(struct lwp *l) 1973 { 1974 struct vnode *vp; 1975 struct proc *p; 1976 struct vmspace *vm; 1977 struct ucred *cred; 1978 struct nameidata nd; 1979 struct vattr vattr; 1980 struct mount *mp; 1981 int error, error1; 1982 char name[MAXPATHLEN]; 1983 1984 p = l->l_proc; 1985 vm = p->p_vmspace; 1986 cred = p->p_cred->pc_ucred; 1987 1988 /* 1989 * Make sure the process has not set-id, to prevent data leaks. 1990 */ 1991 if (p->p_flag & P_SUGID) 1992 return (EPERM); 1993 1994 /* 1995 * Refuse to core if the data + stack + user size is larger than 1996 * the core dump limit. XXX THIS IS WRONG, because of mapped 1997 * data. 1998 */ 1999 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= 2000 p->p_rlimit[RLIMIT_CORE].rlim_cur) 2001 return (EFBIG); /* better error code? */ 2002 2003 restart: 2004 /* 2005 * The core dump will go in the current working directory. Make 2006 * sure that the directory is still there and that the mount flags 2007 * allow us to write core dumps there. 2008 */ 2009 vp = p->p_cwdi->cwdi_cdir; 2010 if (vp->v_mount == NULL || 2011 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) 2012 return (EPERM); 2013 2014 error = build_corename(p, name); 2015 if (error) 2016 return error; 2017 2018 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 2019 error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR); 2020 if (error) 2021 return (error); 2022 vp = nd.ni_vp; 2023 2024 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2025 VOP_UNLOCK(vp, 0); 2026 if ((error = vn_close(vp, FWRITE, cred, p)) != 0) 2027 return (error); 2028 if ((error = vn_start_write(NULL, &mp, 2029 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0) 2030 return (error); 2031 goto restart; 2032 } 2033 2034 /* Don't dump to non-regular files or files with links. */ 2035 if (vp->v_type != VREG || 2036 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) { 2037 error = EINVAL; 2038 goto out; 2039 } 2040 VATTR_NULL(&vattr); 2041 vattr.va_size = 0; 2042 VOP_LEASE(vp, p, cred, LEASE_WRITE); 2043 VOP_SETATTR(vp, &vattr, cred, p); 2044 p->p_acflag |= ACORE; 2045 2046 /* Now dump the actual core file. */ 2047 error = (*p->p_execsw->es_coredump)(l, vp, cred); 2048 out: 2049 VOP_UNLOCK(vp, 0); 2050 vn_finished_write(mp, 0); 2051 error1 = vn_close(vp, FWRITE, cred, p); 2052 if (error == 0) 2053 error = error1; 2054 return (error); 2055 } 2056 2057 /* 2058 * Nonexistent system call-- signal process (may want to handle it). 2059 * Flag error in case process won't see signal immediately (blocked or ignored). 2060 */ 2061 /* ARGSUSED */ 2062 int 2063 sys_nosys(struct lwp *l, void *v, register_t *retval) 2064 { 2065 struct proc *p; 2066 2067 p = l->l_proc; 2068 psignal(p, SIGSYS); 2069 return (ENOSYS); 2070 } 2071 2072 static int 2073 build_corename(struct proc *p, char dst[MAXPATHLEN]) 2074 { 2075 const char *s; 2076 char *d, *end; 2077 int i; 2078 2079 for (s = p->p_limit->pl_corename, d = dst, end = d + MAXPATHLEN; 2080 *s != '\0'; s++) { 2081 if (*s == '%') { 2082 switch (*(s + 1)) { 2083 case 'n': 2084 i = snprintf(d, end - d, "%s", p->p_comm); 2085 break; 2086 case 'p': 2087 i = snprintf(d, end - d, "%d", p->p_pid); 2088 break; 2089 case 'u': 2090 i = snprintf(d, end - d, "%.*s", 2091 (int)sizeof p->p_pgrp->pg_session->s_login, 2092 p->p_pgrp->pg_session->s_login); 2093 break; 2094 case 't': 2095 i = snprintf(d, end - d, "%ld", 2096 p->p_stats->p_start.tv_sec); 2097 break; 2098 default: 2099 goto copy; 2100 } 2101 d += i; 2102 s++; 2103 } else { 2104 copy: *d = *s; 2105 d++; 2106 } 2107 if (d >= end) 2108 return (ENAMETOOLONG); 2109 } 2110 *d = '\0'; 2111 return 0; 2112 } 2113 2114 void 2115 getucontext(struct lwp *l, ucontext_t *ucp) 2116 { 2117 struct proc *p; 2118 2119 p = l->l_proc; 2120 2121 ucp->uc_flags = 0; 2122 ucp->uc_link = l->l_ctxlink; 2123 2124 (void)sigprocmask1(p, 0, NULL, &ucp->uc_sigmask); 2125 ucp->uc_flags |= _UC_SIGMASK; 2126 2127 /* 2128 * The (unsupplied) definition of the `current execution stack' 2129 * in the System V Interface Definition appears to allow returning 2130 * the main context stack. 2131 */ 2132 if ((p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK) == 0) { 2133 ucp->uc_stack.ss_sp = (void *)USRSTACK; 2134 ucp->uc_stack.ss_size = ctob(p->p_vmspace->vm_ssize); 2135 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */ 2136 } else { 2137 /* Simply copy alternate signal execution stack. */ 2138 ucp->uc_stack = p->p_sigctx.ps_sigstk; 2139 } 2140 ucp->uc_flags |= _UC_STACK; 2141 2142 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags); 2143 } 2144 2145 /* ARGSUSED */ 2146 int 2147 sys_getcontext(struct lwp *l, void *v, register_t *retval) 2148 { 2149 struct sys_getcontext_args /* { 2150 syscallarg(struct __ucontext *) ucp; 2151 } */ *uap = v; 2152 ucontext_t uc; 2153 2154 getucontext(l, &uc); 2155 2156 return (copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp)))); 2157 } 2158 2159 int 2160 setucontext(struct lwp *l, const ucontext_t *ucp) 2161 { 2162 struct proc *p; 2163 int error; 2164 2165 p = l->l_proc; 2166 if ((error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags)) != 0) 2167 return (error); 2168 l->l_ctxlink = ucp->uc_link; 2169 /* 2170 * We might want to take care of the stack portion here but currently 2171 * don't; see the comment in getucontext(). 2172 */ 2173 if ((ucp->uc_flags & _UC_SIGMASK) != 0) 2174 sigprocmask1(p, SIG_SETMASK, &ucp->uc_sigmask, NULL); 2175 2176 return 0; 2177 } 2178 2179 /* ARGSUSED */ 2180 int 2181 sys_setcontext(struct lwp *l, void *v, register_t *retval) 2182 { 2183 struct sys_setcontext_args /* { 2184 syscallarg(const ucontext_t *) ucp; 2185 } */ *uap = v; 2186 ucontext_t uc; 2187 int error; 2188 2189 if (SCARG(uap, ucp) == NULL) /* i.e. end of uc_link chain */ 2190 exit1(l, W_EXITCODE(0, 0)); 2191 else if ((error = copyin(SCARG(uap, ucp), &uc, sizeof (uc))) != 0 || 2192 (error = setucontext(l, &uc)) != 0) 2193 return (error); 2194 2195 return (EJUSTRETURN); 2196 } 2197 2198 /* 2199 * sigtimedwait(2) system call, used also for implementation 2200 * of sigwaitinfo() and sigwait(). 2201 * 2202 * This only handles single LWP in signal wait. libpthread provides 2203 * it's own sigtimedwait() wrapper to DTRT WRT individual threads. 2204 */ 2205 int 2206 sys___sigtimedwait(struct lwp *l, void *v, register_t *retval) 2207 { 2208 struct sys___sigtimedwait_args /* { 2209 syscallarg(const sigset_t *) set; 2210 syscallarg(siginfo_t *) info; 2211 syscallarg(struct timespec *) timeout; 2212 } */ *uap = v; 2213 sigset_t *waitset, twaitset; 2214 struct proc *p = l->l_proc; 2215 int error, signum, s; 2216 int timo = 0; 2217 struct timeval tvstart; 2218 struct timespec ts; 2219 ksiginfo_t *ksi; 2220 2221 MALLOC(waitset, sigset_t *, sizeof(sigset_t), M_TEMP, M_WAITOK); 2222 2223 if ((error = copyin(SCARG(uap, set), waitset, sizeof(sigset_t)))) { 2224 FREE(waitset, M_TEMP); 2225 return (error); 2226 } 2227 2228 /* 2229 * Silently ignore SA_CANTMASK signals. psignal1() would 2230 * ignore SA_CANTMASK signals in waitset, we do this 2231 * only for the below siglist check. 2232 */ 2233 sigminusset(&sigcantmask, waitset); 2234 2235 /* 2236 * First scan siglist and check if there is signal from 2237 * our waitset already pending. 2238 */ 2239 twaitset = *waitset; 2240 __sigandset(&p->p_sigctx.ps_siglist, &twaitset); 2241 if ((signum = firstsig(&twaitset))) { 2242 /* found pending signal */ 2243 sigdelset(&p->p_sigctx.ps_siglist, signum); 2244 ksi = ksiginfo_get(p, signum); 2245 if (!ksi) { 2246 /* No queued siginfo, manufacture one */ 2247 ksi = pool_get(&ksiginfo_pool, PR_WAITOK); 2248 KSI_INIT(ksi); 2249 ksi->ksi_info._signo = signum; 2250 ksi->ksi_info._code = SI_USER; 2251 } 2252 2253 goto sig; 2254 } 2255 2256 /* 2257 * Calculate timeout, if it was specified. 2258 */ 2259 if (SCARG(uap, timeout)) { 2260 uint64_t ms; 2261 2262 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts)))) 2263 return (error); 2264 2265 ms = (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000); 2266 timo = mstohz(ms); 2267 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0) 2268 timo = 1; 2269 if (timo <= 0) 2270 return (EAGAIN); 2271 2272 /* 2273 * Remember current mono_time, it would be used in 2274 * ECANCELED/ERESTART case. 2275 */ 2276 s = splclock(); 2277 tvstart = mono_time; 2278 splx(s); 2279 } 2280 2281 /* 2282 * Setup ps_sigwait list. Pass pointer to malloced memory 2283 * here; it's not possible to pass pointer to a structure 2284 * on current process's stack, the current process might 2285 * be swapped out at the time the signal would get delivered. 2286 */ 2287 ksi = pool_get(&ksiginfo_pool, PR_WAITOK); 2288 p->p_sigctx.ps_sigwaited = ksi; 2289 p->p_sigctx.ps_sigwait = waitset; 2290 2291 /* 2292 * Wait for signal to arrive. We can either be woken up or 2293 * time out. 2294 */ 2295 error = tsleep(&p->p_sigctx.ps_sigwait, PPAUSE|PCATCH, "sigwait", timo); 2296 2297 /* 2298 * Need to find out if we woke as a result of lwp_wakeup() 2299 * or a signal outside our wait set. 2300 */ 2301 if (error == EINTR && p->p_sigctx.ps_sigwaited 2302 && !firstsig(&p->p_sigctx.ps_siglist)) { 2303 /* wakeup via _lwp_wakeup() */ 2304 error = ECANCELED; 2305 } else if (!error && p->p_sigctx.ps_sigwaited) { 2306 /* spurious wakeup - arrange for syscall restart */ 2307 error = ERESTART; 2308 goto fail; 2309 } 2310 2311 /* 2312 * On error, clear sigwait indication. psignal1() clears it 2313 * in !error case. 2314 */ 2315 if (error) { 2316 p->p_sigctx.ps_sigwaited = NULL; 2317 2318 /* 2319 * If the sleep was interrupted (either by signal or wakeup), 2320 * update the timeout and copyout new value back. 2321 * It would be used when the syscall would be restarted 2322 * or called again. 2323 */ 2324 if (timo && (error == ERESTART || error == ECANCELED)) { 2325 struct timeval tvnow, tvtimo; 2326 int err; 2327 2328 s = splclock(); 2329 tvnow = mono_time; 2330 splx(s); 2331 2332 TIMESPEC_TO_TIMEVAL(&tvtimo, &ts); 2333 2334 /* compute how much time has passed since start */ 2335 timersub(&tvnow, &tvstart, &tvnow); 2336 /* substract passed time from timeout */ 2337 timersub(&tvtimo, &tvnow, &tvtimo); 2338 2339 if (tvtimo.tv_sec < 0) { 2340 error = EAGAIN; 2341 goto fail; 2342 } 2343 2344 TIMEVAL_TO_TIMESPEC(&tvtimo, &ts); 2345 2346 /* copy updated timeout to userland */ 2347 if ((err = copyout(&ts, SCARG(uap, timeout), sizeof(ts)))) { 2348 error = err; 2349 goto fail; 2350 } 2351 } 2352 2353 goto fail; 2354 } 2355 2356 /* 2357 * If a signal from the wait set arrived, copy it to userland. 2358 * Copy only the used part of siginfo, the padding part is 2359 * left unchanged (userland is not supposed to touch it anyway). 2360 */ 2361 sig: 2362 error = copyout(&ksi->ksi_info, SCARG(uap, info), sizeof(ksi->ksi_info)); 2363 2364 fail: 2365 FREE(waitset, M_TEMP); 2366 pool_put(&ksiginfo_pool, ksi); 2367 p->p_sigctx.ps_sigwait = NULL; 2368 2369 return (error); 2370 } 2371 2372 /* 2373 * Returns true if signal is ignored or masked for passed process. 2374 */ 2375 int 2376 sigismasked(struct proc *p, int sig) 2377 { 2378 2379 return (sigismember(&p->p_sigctx.ps_sigignore, sig) || 2380 sigismember(&p->p_sigctx.ps_sigmask, sig)); 2381 } 2382 2383 static int 2384 filt_sigattach(struct knote *kn) 2385 { 2386 struct proc *p = curproc; 2387 2388 kn->kn_ptr.p_proc = p; 2389 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2390 2391 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2392 2393 return (0); 2394 } 2395 2396 static void 2397 filt_sigdetach(struct knote *kn) 2398 { 2399 struct proc *p = kn->kn_ptr.p_proc; 2400 2401 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2402 } 2403 2404 /* 2405 * signal knotes are shared with proc knotes, so we apply a mask to 2406 * the hint in order to differentiate them from process hints. This 2407 * could be avoided by using a signal-specific knote list, but probably 2408 * isn't worth the trouble. 2409 */ 2410 static int 2411 filt_signal(struct knote *kn, long hint) 2412 { 2413 2414 if (hint & NOTE_SIGNAL) { 2415 hint &= ~NOTE_SIGNAL; 2416 2417 if (kn->kn_id == hint) 2418 kn->kn_data++; 2419 } 2420 return (kn->kn_data != 0); 2421 } 2422 2423 const struct filterops sig_filtops = { 2424 0, filt_sigattach, filt_sigdetach, filt_signal 2425 }; 2426