1 /* $OpenBSD: kern_sig.c,v 1.251 2020/02/21 11:10:23 claudio Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/acct.h> 52 #include <sys/fcntl.h> 53 #include <sys/filedesc.h> 54 #include <sys/kernel.h> 55 #include <sys/wait.h> 56 #include <sys/ktrace.h> 57 #include <sys/stat.h> 58 #include <sys/core.h> 59 #include <sys/malloc.h> 60 #include <sys/pool.h> 61 #include <sys/ptrace.h> 62 #include <sys/sched.h> 63 #include <sys/user.h> 64 #include <sys/syslog.h> 65 #include <sys/ttycom.h> 66 #include <sys/pledge.h> 67 #include <sys/witness.h> 68 69 #include <sys/mount.h> 70 #include <sys/syscallargs.h> 71 72 #include <uvm/uvm_extern.h> 73 #include <machine/tcb.h> 74 75 int filt_sigattach(struct knote *kn); 76 void filt_sigdetach(struct knote *kn); 77 int filt_signal(struct knote *kn, long hint); 78 79 const struct filterops sig_filtops = { 80 .f_flags = 0, 81 .f_attach = filt_sigattach, 82 .f_detach = filt_sigdetach, 83 .f_event = filt_signal, 84 }; 85 86 void proc_stop(struct proc *p, int); 87 void proc_stop_sweep(void *); 88 void *proc_stop_si; 89 90 void postsig(struct proc *, int); 91 int cansignal(struct proc *, struct process *, int); 92 93 struct pool sigacts_pool; /* memory pool for sigacts structures */ 94 95 void sigio_del(struct sigiolst *); 96 void sigio_unlink(struct sigio_ref *, struct sigiolst *); 97 struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH); 98 99 /* 100 * Can thread p, send the signal signum to process qr? 101 */ 102 int 103 cansignal(struct proc *p, struct process *qr, int signum) 104 { 105 struct process *pr = p->p_p; 106 struct ucred *uc = p->p_ucred; 107 struct ucred *quc = qr->ps_ucred; 108 109 if (uc->cr_uid == 0) 110 return (1); /* root can always signal */ 111 112 if (pr == qr) 113 return (1); /* process can always signal itself */ 114 115 /* optimization: if the same creds then the tests below will pass */ 116 if (uc == quc) 117 return (1); 118 119 if (signum == SIGCONT && qr->ps_session == pr->ps_session) 120 return (1); /* SIGCONT in session */ 121 122 /* 123 * Using kill(), only certain signals can be sent to setugid 124 * child processes 125 */ 126 if (qr->ps_flags & PS_SUGID) { 127 switch (signum) { 128 case 0: 129 case SIGKILL: 130 case SIGINT: 131 case SIGTERM: 132 case SIGALRM: 133 case SIGSTOP: 134 case SIGTTIN: 135 case SIGTTOU: 136 case SIGTSTP: 137 case SIGHUP: 138 case SIGUSR1: 139 case SIGUSR2: 140 if (uc->cr_ruid == quc->cr_ruid || 141 uc->cr_uid == quc->cr_ruid) 142 return (1); 143 } 144 return (0); 145 } 146 147 if (uc->cr_ruid == quc->cr_ruid || 148 uc->cr_ruid == quc->cr_svuid || 149 uc->cr_uid == quc->cr_ruid || 150 uc->cr_uid == quc->cr_svuid) 151 return (1); 152 return (0); 153 } 154 155 /* 156 * Initialize signal-related data structures. 157 */ 158 void 159 signal_init(void) 160 { 161 proc_stop_si = softintr_establish(IPL_SOFTCLOCK, proc_stop_sweep, 162 NULL); 163 if (proc_stop_si == NULL) 164 panic("signal_init failed to register softintr"); 165 166 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE, 167 PR_WAITOK, "sigapl", NULL); 168 } 169 170 /* 171 * Create an initial sigacts structure, using the same signal state 172 * as pr. 173 */ 174 struct sigacts * 175 sigactsinit(struct process *pr) 176 { 177 struct sigacts *ps; 178 179 ps = pool_get(&sigacts_pool, PR_WAITOK); 180 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); 181 return (ps); 182 } 183 184 /* 185 * Initialize a new sigaltstack structure. 186 */ 187 void 188 sigstkinit(struct sigaltstack *ss) 189 { 190 ss->ss_flags = SS_DISABLE; 191 ss->ss_size = 0; 192 ss->ss_sp = 0; 193 } 194 195 /* 196 * Release a sigacts structure. 197 */ 198 void 199 sigactsfree(struct process *pr) 200 { 201 struct sigacts *ps = pr->ps_sigacts; 202 203 pr->ps_sigacts = NULL; 204 205 pool_put(&sigacts_pool, ps); 206 } 207 208 int 209 sys_sigaction(struct proc *p, void *v, register_t *retval) 210 { 211 struct sys_sigaction_args /* { 212 syscallarg(int) signum; 213 syscallarg(const struct sigaction *) nsa; 214 syscallarg(struct sigaction *) osa; 215 } */ *uap = v; 216 struct sigaction vec; 217 #ifdef KTRACE 218 struct sigaction ovec; 219 #endif 220 struct sigaction *sa; 221 const struct sigaction *nsa; 222 struct sigaction *osa; 223 struct sigacts *ps = p->p_p->ps_sigacts; 224 int signum; 225 int bit, error; 226 227 signum = SCARG(uap, signum); 228 nsa = SCARG(uap, nsa); 229 osa = SCARG(uap, osa); 230 231 if (signum <= 0 || signum >= NSIG || 232 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 233 return (EINVAL); 234 sa = &vec; 235 if (osa) { 236 sa->sa_handler = ps->ps_sigact[signum]; 237 sa->sa_mask = ps->ps_catchmask[signum]; 238 bit = sigmask(signum); 239 sa->sa_flags = 0; 240 if ((ps->ps_sigonstack & bit) != 0) 241 sa->sa_flags |= SA_ONSTACK; 242 if ((ps->ps_sigintr & bit) == 0) 243 sa->sa_flags |= SA_RESTART; 244 if ((ps->ps_sigreset & bit) != 0) 245 sa->sa_flags |= SA_RESETHAND; 246 if ((ps->ps_siginfo & bit) != 0) 247 sa->sa_flags |= SA_SIGINFO; 248 if (signum == SIGCHLD) { 249 if ((ps->ps_flags & SAS_NOCLDSTOP) != 0) 250 sa->sa_flags |= SA_NOCLDSTOP; 251 if ((ps->ps_flags & SAS_NOCLDWAIT) != 0) 252 sa->sa_flags |= SA_NOCLDWAIT; 253 } 254 if ((sa->sa_mask & bit) == 0) 255 sa->sa_flags |= SA_NODEFER; 256 sa->sa_mask &= ~bit; 257 error = copyout(sa, osa, sizeof (vec)); 258 if (error) 259 return (error); 260 #ifdef KTRACE 261 if (KTRPOINT(p, KTR_STRUCT)) 262 ovec = vec; 263 #endif 264 } 265 if (nsa) { 266 error = copyin(nsa, sa, sizeof (vec)); 267 if (error) 268 return (error); 269 #ifdef KTRACE 270 if (KTRPOINT(p, KTR_STRUCT)) 271 ktrsigaction(p, sa); 272 #endif 273 setsigvec(p, signum, sa); 274 } 275 #ifdef KTRACE 276 if (osa && KTRPOINT(p, KTR_STRUCT)) 277 ktrsigaction(p, &ovec); 278 #endif 279 return (0); 280 } 281 282 void 283 setsigvec(struct proc *p, int signum, struct sigaction *sa) 284 { 285 struct sigacts *ps = p->p_p->ps_sigacts; 286 int bit; 287 int s; 288 289 bit = sigmask(signum); 290 /* 291 * Change setting atomically. 292 */ 293 s = splhigh(); 294 ps->ps_sigact[signum] = sa->sa_handler; 295 if ((sa->sa_flags & SA_NODEFER) == 0) 296 sa->sa_mask |= sigmask(signum); 297 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 298 if (signum == SIGCHLD) { 299 if (sa->sa_flags & SA_NOCLDSTOP) 300 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 301 else 302 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 303 /* 304 * If the SA_NOCLDWAIT flag is set or the handler 305 * is SIG_IGN we reparent the dying child to PID 1 306 * (init) which will reap the zombie. Because we use 307 * init to do our dirty work we never set SAS_NOCLDWAIT 308 * for PID 1. 309 * XXX exit1 rework means this is unnecessary? 310 */ 311 if (initprocess->ps_sigacts != ps && 312 ((sa->sa_flags & SA_NOCLDWAIT) || 313 sa->sa_handler == SIG_IGN)) 314 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 315 else 316 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 317 } 318 if ((sa->sa_flags & SA_RESETHAND) != 0) 319 ps->ps_sigreset |= bit; 320 else 321 ps->ps_sigreset &= ~bit; 322 if ((sa->sa_flags & SA_SIGINFO) != 0) 323 ps->ps_siginfo |= bit; 324 else 325 ps->ps_siginfo &= ~bit; 326 if ((sa->sa_flags & SA_RESTART) == 0) 327 ps->ps_sigintr |= bit; 328 else 329 ps->ps_sigintr &= ~bit; 330 if ((sa->sa_flags & SA_ONSTACK) != 0) 331 ps->ps_sigonstack |= bit; 332 else 333 ps->ps_sigonstack &= ~bit; 334 /* 335 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 336 * and for signals set to SIG_DFL where the default is to ignore. 337 * However, don't put SIGCONT in ps_sigignore, 338 * as we have to restart the process. 339 */ 340 if (sa->sa_handler == SIG_IGN || 341 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 342 atomic_clearbits_int(&p->p_siglist, bit); 343 atomic_clearbits_int(&p->p_p->ps_siglist, bit); 344 if (signum != SIGCONT) 345 ps->ps_sigignore |= bit; /* easier in psignal */ 346 ps->ps_sigcatch &= ~bit; 347 } else { 348 ps->ps_sigignore &= ~bit; 349 if (sa->sa_handler == SIG_DFL) 350 ps->ps_sigcatch &= ~bit; 351 else 352 ps->ps_sigcatch |= bit; 353 } 354 splx(s); 355 } 356 357 /* 358 * Initialize signal state for process 0; 359 * set to ignore signals that are ignored by default. 360 */ 361 void 362 siginit(struct process *pr) 363 { 364 struct sigacts *ps = pr->ps_sigacts; 365 int i; 366 367 for (i = 0; i < NSIG; i++) 368 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 369 ps->ps_sigignore |= sigmask(i); 370 ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 371 } 372 373 /* 374 * Reset signals for an exec by the specified thread. 375 */ 376 void 377 execsigs(struct proc *p) 378 { 379 struct sigacts *ps; 380 int nc, mask; 381 382 ps = p->p_p->ps_sigacts; 383 384 /* 385 * Reset caught signals. Held signals remain held 386 * through p_sigmask (unless they were caught, 387 * and are now ignored by default). 388 */ 389 while (ps->ps_sigcatch) { 390 nc = ffs((long)ps->ps_sigcatch); 391 mask = sigmask(nc); 392 ps->ps_sigcatch &= ~mask; 393 if (sigprop[nc] & SA_IGNORE) { 394 if (nc != SIGCONT) 395 ps->ps_sigignore |= mask; 396 atomic_clearbits_int(&p->p_siglist, mask); 397 atomic_clearbits_int(&p->p_p->ps_siglist, mask); 398 } 399 ps->ps_sigact[nc] = SIG_DFL; 400 } 401 /* 402 * Reset stack state to the user stack. 403 * Clear set of signals caught on the signal stack. 404 */ 405 sigstkinit(&p->p_sigstk); 406 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 407 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 408 ps->ps_sigact[SIGCHLD] = SIG_DFL; 409 } 410 411 /* 412 * Manipulate signal mask. 413 * Note that we receive new mask, not pointer, 414 * and return old mask as return value; 415 * the library stub does the rest. 416 */ 417 int 418 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 419 { 420 struct sys_sigprocmask_args /* { 421 syscallarg(int) how; 422 syscallarg(sigset_t) mask; 423 } */ *uap = v; 424 int error = 0; 425 sigset_t mask; 426 427 *retval = p->p_sigmask; 428 mask = SCARG(uap, mask) &~ sigcantmask; 429 430 switch (SCARG(uap, how)) { 431 case SIG_BLOCK: 432 atomic_setbits_int(&p->p_sigmask, mask); 433 break; 434 case SIG_UNBLOCK: 435 atomic_clearbits_int(&p->p_sigmask, mask); 436 break; 437 case SIG_SETMASK: 438 p->p_sigmask = mask; 439 break; 440 default: 441 error = EINVAL; 442 break; 443 } 444 return (error); 445 } 446 447 int 448 sys_sigpending(struct proc *p, void *v, register_t *retval) 449 { 450 451 *retval = p->p_siglist | p->p_p->ps_siglist; 452 return (0); 453 } 454 455 /* 456 * Temporarily replace calling proc's signal mask for the duration of a 457 * system call. Original signal mask will be restored by userret(). 458 */ 459 void 460 dosigsuspend(struct proc *p, sigset_t newmask) 461 { 462 KASSERT(p == curproc); 463 464 p->p_oldmask = p->p_sigmask; 465 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 466 p->p_sigmask = newmask; 467 } 468 469 /* 470 * Suspend process until signal, providing mask to be set 471 * in the meantime. Note nonstandard calling convention: 472 * libc stub passes mask, not pointer, to save a copyin. 473 */ 474 int 475 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 476 { 477 struct sys_sigsuspend_args /* { 478 syscallarg(int) mask; 479 } */ *uap = v; 480 struct process *pr = p->p_p; 481 struct sigacts *ps = pr->ps_sigacts; 482 483 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask); 484 while (tsleep_nsec(ps, PPAUSE|PCATCH, "pause", INFSLP) == 0) 485 /* void */; 486 /* always return EINTR rather than ERESTART... */ 487 return (EINTR); 488 } 489 490 int 491 sigonstack(size_t stack) 492 { 493 const struct sigaltstack *ss = &curproc->p_sigstk; 494 495 return (ss->ss_flags & SS_DISABLE ? 0 : 496 (stack - (size_t)ss->ss_sp < ss->ss_size)); 497 } 498 499 int 500 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 501 { 502 struct sys_sigaltstack_args /* { 503 syscallarg(const struct sigaltstack *) nss; 504 syscallarg(struct sigaltstack *) oss; 505 } */ *uap = v; 506 struct sigaltstack ss; 507 const struct sigaltstack *nss; 508 struct sigaltstack *oss; 509 int onstack = sigonstack(PROC_STACK(p)); 510 int error; 511 512 nss = SCARG(uap, nss); 513 oss = SCARG(uap, oss); 514 515 if (oss != NULL) { 516 ss = p->p_sigstk; 517 if (onstack) 518 ss.ss_flags |= SS_ONSTACK; 519 if ((error = copyout(&ss, oss, sizeof(ss)))) 520 return (error); 521 } 522 if (nss == NULL) 523 return (0); 524 error = copyin(nss, &ss, sizeof(ss)); 525 if (error) 526 return (error); 527 if (onstack) 528 return (EPERM); 529 if (ss.ss_flags & ~SS_DISABLE) 530 return (EINVAL); 531 if (ss.ss_flags & SS_DISABLE) { 532 p->p_sigstk.ss_flags = ss.ss_flags; 533 return (0); 534 } 535 if (ss.ss_size < MINSIGSTKSZ) 536 return (ENOMEM); 537 538 error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size); 539 if (error) 540 return (error); 541 542 p->p_sigstk = ss; 543 return (0); 544 } 545 546 int 547 sys_kill(struct proc *cp, void *v, register_t *retval) 548 { 549 struct sys_kill_args /* { 550 syscallarg(int) pid; 551 syscallarg(int) signum; 552 } */ *uap = v; 553 struct process *pr; 554 int pid = SCARG(uap, pid); 555 int signum = SCARG(uap, signum); 556 int error; 557 int zombie = 0; 558 559 if ((error = pledge_kill(cp, pid)) != 0) 560 return (error); 561 if (((u_int)signum) >= NSIG) 562 return (EINVAL); 563 if (pid > 0) { 564 if ((pr = prfind(pid)) == NULL) { 565 if ((pr = zombiefind(pid)) == NULL) 566 return (ESRCH); 567 else 568 zombie = 1; 569 } 570 if (!cansignal(cp, pr, signum)) 571 return (EPERM); 572 573 /* kill single process */ 574 if (signum && !zombie) 575 prsignal(pr, signum); 576 return (0); 577 } 578 switch (pid) { 579 case -1: /* broadcast signal */ 580 return (killpg1(cp, signum, 0, 1)); 581 case 0: /* signal own process group */ 582 return (killpg1(cp, signum, 0, 0)); 583 default: /* negative explicit process group */ 584 return (killpg1(cp, signum, -pid, 0)); 585 } 586 } 587 588 int 589 sys_thrkill(struct proc *cp, void *v, register_t *retval) 590 { 591 struct sys_thrkill_args /* { 592 syscallarg(pid_t) tid; 593 syscallarg(int) signum; 594 syscallarg(void *) tcb; 595 } */ *uap = v; 596 struct proc *p; 597 int tid = SCARG(uap, tid); 598 int signum = SCARG(uap, signum); 599 void *tcb; 600 601 if (((u_int)signum) >= NSIG) 602 return (EINVAL); 603 if (tid > THREAD_PID_OFFSET) { 604 if ((p = tfind(tid - THREAD_PID_OFFSET)) == NULL) 605 return (ESRCH); 606 607 /* can only kill threads in the same process */ 608 if (p->p_p != cp->p_p) 609 return (ESRCH); 610 } else if (tid == 0) 611 p = cp; 612 else 613 return (EINVAL); 614 615 /* optionally require the target thread to have the given tcb addr */ 616 tcb = SCARG(uap, tcb); 617 if (tcb != NULL && tcb != TCB_GET(p)) 618 return (ESRCH); 619 620 if (signum) 621 ptsignal(p, signum, STHREAD); 622 return (0); 623 } 624 625 /* 626 * Common code for kill process group/broadcast kill. 627 * cp is calling process. 628 */ 629 int 630 killpg1(struct proc *cp, int signum, int pgid, int all) 631 { 632 struct process *pr; 633 struct pgrp *pgrp; 634 int nfound = 0; 635 636 if (all) { 637 /* 638 * broadcast 639 */ 640 LIST_FOREACH(pr, &allprocess, ps_list) { 641 if (pr->ps_pid <= 1 || 642 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) || 643 pr == cp->p_p || !cansignal(cp, pr, signum)) 644 continue; 645 nfound++; 646 if (signum) 647 prsignal(pr, signum); 648 } 649 } else { 650 if (pgid == 0) 651 /* 652 * zero pgid means send to my process group. 653 */ 654 pgrp = cp->p_p->ps_pgrp; 655 else { 656 pgrp = pgfind(pgid); 657 if (pgrp == NULL) 658 return (ESRCH); 659 } 660 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 661 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM || 662 !cansignal(cp, pr, signum)) 663 continue; 664 nfound++; 665 if (signum) 666 prsignal(pr, signum); 667 } 668 } 669 return (nfound ? 0 : ESRCH); 670 } 671 672 #define CANDELIVER(uid, euid, pr) \ 673 (euid == 0 || \ 674 (uid) == (pr)->ps_ucred->cr_ruid || \ 675 (uid) == (pr)->ps_ucred->cr_svuid || \ 676 (uid) == (pr)->ps_ucred->cr_uid || \ 677 (euid) == (pr)->ps_ucred->cr_ruid || \ 678 (euid) == (pr)->ps_ucred->cr_svuid || \ 679 (euid) == (pr)->ps_ucred->cr_uid) 680 681 #define CANSIGIO(cr, pr) \ 682 CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr)) 683 684 /* 685 * Send a signal to a process group. If checktty is 1, 686 * limit to members which have a controlling terminal. 687 */ 688 void 689 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 690 { 691 struct process *pr; 692 693 if (pgrp) 694 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 695 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 696 prsignal(pr, signum); 697 } 698 699 /* 700 * Send a SIGIO or SIGURG signal to a process or process group using stored 701 * credentials rather than those of the current process. 702 */ 703 void 704 pgsigio(struct sigio_ref *sir, int sig, int checkctty) 705 { 706 struct process *pr; 707 struct sigio *sigio; 708 709 if (sir->sir_sigio == NULL) 710 return; 711 712 KERNEL_LOCK(); 713 mtx_enter(&sigio_lock); 714 sigio = sir->sir_sigio; 715 if (sigio == NULL) 716 goto out; 717 if (sigio->sio_pgid > 0) { 718 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc)) 719 prsignal(sigio->sio_proc, sig); 720 } else if (sigio->sio_pgid < 0) { 721 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist) { 722 if (CANSIGIO(sigio->sio_ucred, pr) && 723 (checkctty == 0 || (pr->ps_flags & PS_CONTROLT))) 724 prsignal(pr, sig); 725 } 726 } 727 out: 728 mtx_leave(&sigio_lock); 729 KERNEL_UNLOCK(); 730 } 731 732 /* 733 * Recalculate the signal mask and reset the signal disposition after 734 * usermode frame for delivery is formed. 735 */ 736 void 737 postsig_done(struct proc *p, int signum, struct sigacts *ps) 738 { 739 int mask = sigmask(signum); 740 741 KERNEL_ASSERT_LOCKED(); 742 743 p->p_ru.ru_nsignals++; 744 atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); 745 if ((ps->ps_sigreset & mask) != 0) { 746 ps->ps_sigcatch &= ~mask; 747 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 748 ps->ps_sigignore |= mask; 749 ps->ps_sigact[signum] = SIG_DFL; 750 } 751 } 752 753 /* 754 * Send a signal caused by a trap to the current thread 755 * If it will be caught immediately, deliver it with correct code. 756 * Otherwise, post it normally. 757 */ 758 void 759 trapsignal(struct proc *p, int signum, u_long trapno, int code, 760 union sigval sigval) 761 { 762 struct process *pr = p->p_p; 763 struct sigacts *ps = pr->ps_sigacts; 764 int mask; 765 766 switch (signum) { 767 case SIGILL: 768 case SIGBUS: 769 case SIGSEGV: 770 pr->ps_acflag |= ATRAP; 771 break; 772 } 773 774 mask = sigmask(signum); 775 if ((pr->ps_flags & PS_TRACED) == 0 && 776 (ps->ps_sigcatch & mask) != 0 && 777 (p->p_sigmask & mask) == 0) { 778 siginfo_t si; 779 initsiginfo(&si, signum, trapno, code, sigval); 780 #ifdef KTRACE 781 if (KTRPOINT(p, KTR_PSIG)) { 782 ktrpsig(p, signum, ps->ps_sigact[signum], 783 p->p_sigmask, code, &si); 784 } 785 #endif 786 sendsig(ps->ps_sigact[signum], signum, p->p_sigmask, &si); 787 postsig_done(p, signum, ps); 788 } else { 789 p->p_sisig = signum; 790 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 791 p->p_sicode = code; 792 p->p_sigval = sigval; 793 794 /* 795 * Signals like SIGBUS and SIGSEGV should not, when 796 * generated by the kernel, be ignorable or blockable. 797 * If it is and we're not being traced, then just kill 798 * the process. 799 */ 800 if ((pr->ps_flags & PS_TRACED) == 0 && 801 (sigprop[signum] & SA_KILL) && 802 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask))) 803 sigexit(p, signum); 804 ptsignal(p, signum, STHREAD); 805 } 806 } 807 808 /* 809 * Send the signal to the process. If the signal has an action, the action 810 * is usually performed by the target process rather than the caller; we add 811 * the signal to the set of pending signals for the process. 812 * 813 * Exceptions: 814 * o When a stop signal is sent to a sleeping process that takes the 815 * default action, the process is stopped without awakening it. 816 * o SIGCONT restarts stopped processes (or puts them back to sleep) 817 * regardless of the signal action (eg, blocked or ignored). 818 * 819 * Other ignored signals are discarded immediately. 820 */ 821 void 822 psignal(struct proc *p, int signum) 823 { 824 ptsignal(p, signum, SPROCESS); 825 } 826 827 /* 828 * type = SPROCESS process signal, can be diverted (sigwait()) 829 * type = STHREAD thread signal, but should be propagated if unhandled 830 * type = SPROPAGATED propagated to this thread, so don't propagate again 831 */ 832 void 833 ptsignal(struct proc *p, int signum, enum signal_type type) 834 { 835 int s, prop; 836 sig_t action; 837 int mask; 838 int *siglist; 839 struct process *pr = p->p_p; 840 struct proc *q; 841 int wakeparent = 0; 842 843 KERNEL_ASSERT_LOCKED(); 844 845 #ifdef DIAGNOSTIC 846 if ((u_int)signum >= NSIG || signum == 0) 847 panic("psignal signal number"); 848 #endif 849 850 /* Ignore signal if the target process is exiting */ 851 if (pr->ps_flags & PS_EXITING) 852 return; 853 854 mask = sigmask(signum); 855 856 if (type == SPROCESS) { 857 /* Accept SIGKILL to coredumping processes */ 858 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) { 859 atomic_setbits_int(&pr->ps_siglist, mask); 860 return; 861 } 862 863 /* 864 * If the current thread can process the signal 865 * immediately (it's unblocked) then have it take it. 866 */ 867 q = curproc; 868 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 && 869 (q->p_sigmask & mask) == 0) 870 p = q; 871 else { 872 /* 873 * A process-wide signal can be diverted to a 874 * different thread that's in sigwait() for this 875 * signal. If there isn't such a thread, then 876 * pick a thread that doesn't have it blocked so 877 * that the stop/kill consideration isn't 878 * delayed. Otherwise, mark it pending on the 879 * main thread. 880 */ 881 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 882 /* ignore exiting threads */ 883 if (q->p_flag & P_WEXIT) 884 continue; 885 886 /* skip threads that have the signal blocked */ 887 if ((q->p_sigmask & mask) != 0) 888 continue; 889 890 /* okay, could send to this thread */ 891 p = q; 892 893 /* 894 * sigsuspend, sigwait, ppoll/pselect, etc? 895 * Definitely go to this thread, as it's 896 * already blocked in the kernel. 897 */ 898 if (q->p_flag & P_SIGSUSPEND) 899 break; 900 } 901 } 902 } 903 904 if (type != SPROPAGATED) 905 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 906 907 prop = sigprop[signum]; 908 909 /* 910 * If proc is traced, always give parent a chance. 911 */ 912 if (pr->ps_flags & PS_TRACED) { 913 action = SIG_DFL; 914 } else { 915 /* 916 * If the signal is being ignored, 917 * then we forget about it immediately. 918 * (Note: we don't set SIGCONT in ps_sigignore, 919 * and if it is set to SIG_IGN, 920 * action will be SIG_DFL here.) 921 */ 922 if (pr->ps_sigacts->ps_sigignore & mask) 923 return; 924 if (p->p_sigmask & mask) { 925 action = SIG_HOLD; 926 } else if (pr->ps_sigacts->ps_sigcatch & mask) { 927 action = SIG_CATCH; 928 } else { 929 action = SIG_DFL; 930 931 if (prop & SA_KILL && pr->ps_nice > NZERO) 932 pr->ps_nice = NZERO; 933 934 /* 935 * If sending a tty stop signal to a member of an 936 * orphaned process group, discard the signal here if 937 * the action is default; don't stop the process below 938 * if sleeping, and don't clear any pending SIGCONT. 939 */ 940 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 941 return; 942 } 943 } 944 /* 945 * If delivered to process, mark as pending there. Continue and stop 946 * signals will be propagated to all threads. So they are always 947 * marked at thread level. 948 */ 949 siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist; 950 if (prop & SA_CONT) { 951 siglist = &p->p_siglist; 952 atomic_clearbits_int(siglist, stopsigmask); 953 } 954 if (prop & SA_STOP) { 955 siglist = &p->p_siglist; 956 atomic_clearbits_int(siglist, contsigmask); 957 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 958 } 959 atomic_setbits_int(siglist, mask); 960 961 /* 962 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 963 */ 964 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) 965 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 966 if (q != p) 967 ptsignal(q, signum, SPROPAGATED); 968 969 /* 970 * Defer further processing for signals which are held, 971 * except that stopped processes must be continued by SIGCONT. 972 */ 973 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 974 return; 975 976 SCHED_LOCK(s); 977 978 switch (p->p_stat) { 979 980 case SSLEEP: 981 /* 982 * If process is sleeping uninterruptibly 983 * we can't interrupt the sleep... the signal will 984 * be noticed when the process returns through 985 * trap() or syscall(). 986 */ 987 if ((p->p_flag & P_SINTR) == 0) 988 goto out; 989 /* 990 * Process is sleeping and traced... make it runnable 991 * so it can discover the signal in issignal() and stop 992 * for the parent. 993 */ 994 if (pr->ps_flags & PS_TRACED) 995 goto run; 996 /* 997 * If SIGCONT is default (or ignored) and process is 998 * asleep, we are finished; the process should not 999 * be awakened. 1000 */ 1001 if ((prop & SA_CONT) && action == SIG_DFL) { 1002 atomic_clearbits_int(siglist, mask); 1003 goto out; 1004 } 1005 /* 1006 * When a sleeping process receives a stop 1007 * signal, process immediately if possible. 1008 */ 1009 if ((prop & SA_STOP) && action == SIG_DFL) { 1010 /* 1011 * If a child holding parent blocked, 1012 * stopping could cause deadlock. 1013 */ 1014 if (pr->ps_flags & PS_PPWAIT) 1015 goto out; 1016 atomic_clearbits_int(siglist, mask); 1017 pr->ps_xsig = signum; 1018 proc_stop(p, 0); 1019 goto out; 1020 } 1021 /* 1022 * All other (caught or default) signals 1023 * cause the process to run. 1024 */ 1025 goto runfast; 1026 /*NOTREACHED*/ 1027 1028 case SSTOP: 1029 /* 1030 * If traced process is already stopped, 1031 * then no further action is necessary. 1032 */ 1033 if (pr->ps_flags & PS_TRACED) 1034 goto out; 1035 1036 /* 1037 * Kill signal always sets processes running. 1038 */ 1039 if (signum == SIGKILL) { 1040 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1041 goto runfast; 1042 } 1043 1044 if (prop & SA_CONT) { 1045 /* 1046 * If SIGCONT is default (or ignored), we continue the 1047 * process but don't leave the signal in p_siglist, as 1048 * it has no further action. If SIGCONT is held, we 1049 * continue the process and leave the signal in 1050 * p_siglist. If the process catches SIGCONT, let it 1051 * handle the signal itself. If it isn't waiting on 1052 * an event, then it goes back to run state. 1053 * Otherwise, process goes back to sleep state. 1054 */ 1055 atomic_setbits_int(&p->p_flag, P_CONTINUED); 1056 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1057 wakeparent = 1; 1058 if (action == SIG_DFL) 1059 atomic_clearbits_int(siglist, mask); 1060 if (action == SIG_CATCH) 1061 goto runfast; 1062 if (p->p_wchan == 0) 1063 goto run; 1064 p->p_stat = SSLEEP; 1065 goto out; 1066 } 1067 1068 if (prop & SA_STOP) { 1069 /* 1070 * Already stopped, don't need to stop again. 1071 * (If we did the shell could get confused.) 1072 */ 1073 atomic_clearbits_int(siglist, mask); 1074 goto out; 1075 } 1076 1077 /* 1078 * If process is sleeping interruptibly, then simulate a 1079 * wakeup so that when it is continued, it will be made 1080 * runnable and can look at the signal. But don't make 1081 * the process runnable, leave it stopped. 1082 */ 1083 if (p->p_flag & P_SINTR) 1084 unsleep(p); 1085 goto out; 1086 1087 case SONPROC: 1088 signotify(p); 1089 /* FALLTHROUGH */ 1090 default: 1091 /* 1092 * SRUN, SIDL, SDEAD do nothing with the signal, 1093 * other than kicking ourselves if we are running. 1094 * It will either never be noticed, or noticed very soon. 1095 */ 1096 goto out; 1097 } 1098 /*NOTREACHED*/ 1099 1100 runfast: 1101 /* 1102 * Raise priority to at least PUSER. 1103 */ 1104 if (p->p_usrpri > PUSER) 1105 p->p_usrpri = PUSER; 1106 run: 1107 setrunnable(p); 1108 out: 1109 SCHED_UNLOCK(s); 1110 if (wakeparent) 1111 wakeup(pr->ps_pptr); 1112 } 1113 1114 /* 1115 * If the current process has received a signal (should be caught or cause 1116 * termination, should interrupt current syscall), return the signal number. 1117 * Stop signals with default action are processed immediately, then cleared; 1118 * they aren't returned. This is checked after each entry to the system for 1119 * a syscall or trap (though this can usually be done without calling issignal 1120 * by checking the pending signal masks in the CURSIG macro.) The normal call 1121 * sequence is 1122 * 1123 * while (signum = CURSIG(curproc)) 1124 * postsig(signum); 1125 * 1126 * Assumes that if the P_SINTR flag is set, we're holding both the 1127 * kernel and scheduler locks. 1128 */ 1129 int 1130 issignal(struct proc *p) 1131 { 1132 struct process *pr = p->p_p; 1133 int signum, mask, prop; 1134 int dolock = (p->p_flag & P_SINTR) == 0; 1135 int s; 1136 1137 for (;;) { 1138 mask = SIGPENDING(p); 1139 if (pr->ps_flags & PS_PPWAIT) 1140 mask &= ~stopsigmask; 1141 if (mask == 0) /* no signal to send */ 1142 return (0); 1143 signum = ffs((long)mask); 1144 mask = sigmask(signum); 1145 atomic_clearbits_int(&p->p_siglist, mask); 1146 atomic_clearbits_int(&pr->ps_siglist, mask); 1147 1148 /* 1149 * We should see pending but ignored signals 1150 * only if PS_TRACED was on when they were posted. 1151 */ 1152 if (mask & pr->ps_sigacts->ps_sigignore && 1153 (pr->ps_flags & PS_TRACED) == 0) 1154 continue; 1155 1156 /* 1157 * If traced, always stop, and stay stopped until released 1158 * by the debugger. If our parent process is waiting for 1159 * us, don't hang as we could deadlock. 1160 */ 1161 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) && 1162 signum != SIGKILL) { 1163 pr->ps_xsig = signum; 1164 1165 if (dolock) 1166 KERNEL_LOCK(); 1167 single_thread_set(p, SINGLE_PTRACE, 0); 1168 if (dolock) 1169 KERNEL_UNLOCK(); 1170 1171 if (dolock) 1172 SCHED_LOCK(s); 1173 proc_stop(p, 1); 1174 if (dolock) 1175 SCHED_UNLOCK(s); 1176 1177 if (dolock) 1178 KERNEL_LOCK(); 1179 single_thread_clear(p, 0); 1180 if (dolock) 1181 KERNEL_UNLOCK(); 1182 1183 /* 1184 * If we are no longer being traced, or the parent 1185 * didn't give us a signal, look for more signals. 1186 */ 1187 if ((pr->ps_flags & PS_TRACED) == 0 || 1188 pr->ps_xsig == 0) 1189 continue; 1190 1191 /* 1192 * If the new signal is being masked, look for other 1193 * signals. 1194 */ 1195 signum = pr->ps_xsig; 1196 mask = sigmask(signum); 1197 if ((p->p_sigmask & mask) != 0) 1198 continue; 1199 1200 /* take the signal! */ 1201 atomic_clearbits_int(&p->p_siglist, mask); 1202 atomic_clearbits_int(&pr->ps_siglist, mask); 1203 } 1204 1205 prop = sigprop[signum]; 1206 1207 /* 1208 * Decide whether the signal should be returned. 1209 * Return the signal's number, or fall through 1210 * to clear it from the pending mask. 1211 */ 1212 switch ((long)pr->ps_sigacts->ps_sigact[signum]) { 1213 case (long)SIG_DFL: 1214 /* 1215 * Don't take default actions on system processes. 1216 */ 1217 if (pr->ps_pid <= 1) { 1218 #ifdef DIAGNOSTIC 1219 /* 1220 * Are you sure you want to ignore SIGSEGV 1221 * in init? XXX 1222 */ 1223 printf("Process (pid %d) got signal" 1224 " %d\n", pr->ps_pid, signum); 1225 #endif 1226 break; /* == ignore */ 1227 } 1228 /* 1229 * If there is a pending stop signal to process 1230 * with default action, stop here, 1231 * then clear the signal. However, 1232 * if process is member of an orphaned 1233 * process group, ignore tty stop signals. 1234 */ 1235 if (prop & SA_STOP) { 1236 if (pr->ps_flags & PS_TRACED || 1237 (pr->ps_pgrp->pg_jobc == 0 && 1238 prop & SA_TTYSTOP)) 1239 break; /* == ignore */ 1240 pr->ps_xsig = signum; 1241 if (dolock) 1242 SCHED_LOCK(s); 1243 proc_stop(p, 1); 1244 if (dolock) 1245 SCHED_UNLOCK(s); 1246 break; 1247 } else if (prop & SA_IGNORE) { 1248 /* 1249 * Except for SIGCONT, shouldn't get here. 1250 * Default action is to ignore; drop it. 1251 */ 1252 break; /* == ignore */ 1253 } else 1254 goto keep; 1255 /*NOTREACHED*/ 1256 case (long)SIG_IGN: 1257 /* 1258 * Masking above should prevent us ever trying 1259 * to take action on an ignored signal other 1260 * than SIGCONT, unless process is traced. 1261 */ 1262 if ((prop & SA_CONT) == 0 && 1263 (pr->ps_flags & PS_TRACED) == 0) 1264 printf("issignal\n"); 1265 break; /* == ignore */ 1266 default: 1267 /* 1268 * This signal has an action, let 1269 * postsig() process it. 1270 */ 1271 goto keep; 1272 } 1273 } 1274 /* NOTREACHED */ 1275 1276 keep: 1277 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1278 return (signum); 1279 } 1280 1281 /* 1282 * Put the argument process into the stopped state and notify the parent 1283 * via wakeup. Signals are handled elsewhere. The process must not be 1284 * on the run queue. 1285 */ 1286 void 1287 proc_stop(struct proc *p, int sw) 1288 { 1289 struct process *pr = p->p_p; 1290 1291 #ifdef MULTIPROCESSOR 1292 SCHED_ASSERT_LOCKED(); 1293 #endif 1294 1295 p->p_stat = SSTOP; 1296 atomic_clearbits_int(&pr->ps_flags, PS_WAITED); 1297 atomic_setbits_int(&pr->ps_flags, PS_STOPPED); 1298 atomic_setbits_int(&p->p_flag, P_SUSPSIG); 1299 /* 1300 * We need this soft interrupt to be handled fast. 1301 * Extra calls to softclock don't hurt. 1302 */ 1303 softintr_schedule(proc_stop_si); 1304 if (sw) 1305 mi_switch(); 1306 } 1307 1308 /* 1309 * Called from a soft interrupt to send signals to the parents of stopped 1310 * processes. 1311 * We can't do this in proc_stop because it's called with nasty locks held 1312 * and we would need recursive scheduler lock to deal with that. 1313 */ 1314 void 1315 proc_stop_sweep(void *v) 1316 { 1317 struct process *pr; 1318 1319 LIST_FOREACH(pr, &allprocess, ps_list) { 1320 if ((pr->ps_flags & PS_STOPPED) == 0) 1321 continue; 1322 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); 1323 1324 if ((pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDSTOP) == 0) 1325 prsignal(pr->ps_pptr, SIGCHLD); 1326 wakeup(pr->ps_pptr); 1327 } 1328 } 1329 1330 /* 1331 * Take the action for the specified signal 1332 * from the current set of pending signals. 1333 */ 1334 void 1335 postsig(struct proc *p, int signum) 1336 { 1337 struct process *pr = p->p_p; 1338 struct sigacts *ps = pr->ps_sigacts; 1339 sig_t action; 1340 u_long trapno; 1341 int mask, returnmask; 1342 siginfo_t si; 1343 union sigval sigval; 1344 int s, code; 1345 1346 KASSERT(signum != 0); 1347 KERNEL_ASSERT_LOCKED(); 1348 1349 mask = sigmask(signum); 1350 atomic_clearbits_int(&p->p_siglist, mask); 1351 action = ps->ps_sigact[signum]; 1352 sigval.sival_ptr = 0; 1353 1354 if (p->p_sisig != signum) { 1355 trapno = 0; 1356 code = SI_USER; 1357 sigval.sival_ptr = 0; 1358 } else { 1359 trapno = p->p_sitrapno; 1360 code = p->p_sicode; 1361 sigval = p->p_sigval; 1362 } 1363 initsiginfo(&si, signum, trapno, code, sigval); 1364 1365 #ifdef KTRACE 1366 if (KTRPOINT(p, KTR_PSIG)) { 1367 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ? 1368 p->p_oldmask : p->p_sigmask, code, &si); 1369 } 1370 #endif 1371 if (action == SIG_DFL) { 1372 /* 1373 * Default action, where the default is to kill 1374 * the process. (Other cases were ignored above.) 1375 */ 1376 sigexit(p, signum); 1377 /* NOTREACHED */ 1378 } else { 1379 /* 1380 * If we get here, the signal must be caught. 1381 */ 1382 #ifdef DIAGNOSTIC 1383 if (action == SIG_IGN || (p->p_sigmask & mask)) 1384 panic("postsig action"); 1385 #endif 1386 /* 1387 * Set the new mask value and also defer further 1388 * occurrences of this signal. 1389 * 1390 * Special case: user has done a sigpause. Here the 1391 * current mask is not of interest, but rather the 1392 * mask from before the sigpause is what we want 1393 * restored after the signal processing is completed. 1394 */ 1395 #ifdef MULTIPROCESSOR 1396 s = splsched(); 1397 #else 1398 s = splhigh(); 1399 #endif 1400 if (p->p_flag & P_SIGSUSPEND) { 1401 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1402 returnmask = p->p_oldmask; 1403 } else { 1404 returnmask = p->p_sigmask; 1405 } 1406 if (p->p_sisig == signum) { 1407 p->p_sisig = 0; 1408 p->p_sitrapno = 0; 1409 p->p_sicode = SI_USER; 1410 p->p_sigval.sival_ptr = NULL; 1411 } 1412 1413 sendsig(action, signum, returnmask, &si); 1414 postsig_done(p, signum, ps); 1415 splx(s); 1416 } 1417 } 1418 1419 /* 1420 * Force the current process to exit with the specified signal, dumping core 1421 * if appropriate. We bypass the normal tests for masked and caught signals, 1422 * allowing unrecoverable failures to terminate the process without changing 1423 * signal state. Mark the accounting record with the signal termination. 1424 * If dumping core, save the signal number for the debugger. Calls exit and 1425 * does not return. 1426 */ 1427 void 1428 sigexit(struct proc *p, int signum) 1429 { 1430 /* Mark process as going away */ 1431 atomic_setbits_int(&p->p_flag, P_WEXIT); 1432 1433 p->p_p->ps_acflag |= AXSIG; 1434 if (sigprop[signum] & SA_CORE) { 1435 p->p_sisig = signum; 1436 1437 /* if there are other threads, pause them */ 1438 if (P_HASSIBLING(p)) 1439 single_thread_set(p, SINGLE_SUSPEND, 0); 1440 1441 if (coredump(p) == 0) 1442 signum |= WCOREFLAG; 1443 } 1444 exit1(p, 0, signum, EXIT_NORMAL); 1445 /* NOTREACHED */ 1446 } 1447 1448 int nosuidcoredump = 1; 1449 1450 struct coredump_iostate { 1451 struct proc *io_proc; 1452 struct vnode *io_vp; 1453 struct ucred *io_cred; 1454 off_t io_offset; 1455 }; 1456 1457 /* 1458 * Dump core, into a file named "progname.core", unless the process was 1459 * setuid/setgid. 1460 */ 1461 int 1462 coredump(struct proc *p) 1463 { 1464 #ifdef SMALL_KERNEL 1465 return EPERM; 1466 #else 1467 struct process *pr = p->p_p; 1468 struct vnode *vp; 1469 struct ucred *cred = p->p_ucred; 1470 struct vmspace *vm = p->p_vmspace; 1471 struct nameidata nd; 1472 struct vattr vattr; 1473 struct coredump_iostate io; 1474 int error, len, incrash = 0; 1475 char *name; 1476 const char *dir = "/var/crash"; 1477 1478 if (pr->ps_emul->e_coredump == NULL) 1479 return (EINVAL); 1480 1481 atomic_setbits_int(&pr->ps_flags, PS_COREDUMP); 1482 1483 /* Don't dump if will exceed file size limit. */ 1484 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= lim_cur(RLIMIT_CORE)) 1485 return (EFBIG); 1486 1487 name = pool_get(&namei_pool, PR_WAITOK); 1488 1489 /* 1490 * If the process has inconsistent uids, nosuidcoredump 1491 * determines coredump placement policy. 1492 */ 1493 if (((pr->ps_flags & PS_SUGID) && (error = suser(p))) || 1494 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) { 1495 if (nosuidcoredump == 3) { 1496 /* 1497 * If the program directory does not exist, dumps of 1498 * that core will silently fail. 1499 */ 1500 len = snprintf(name, MAXPATHLEN, "%s/%s/%u.core", 1501 dir, pr->ps_comm, pr->ps_pid); 1502 incrash = KERNELPATH; 1503 } else if (nosuidcoredump == 2) { 1504 len = snprintf(name, MAXPATHLEN, "%s/%s.core", 1505 dir, pr->ps_comm); 1506 incrash = KERNELPATH; 1507 } else { 1508 pool_put(&namei_pool, name); 1509 return (EPERM); 1510 } 1511 } else 1512 len = snprintf(name, MAXPATHLEN, "%s.core", pr->ps_comm); 1513 1514 if (len >= MAXPATHLEN) { 1515 pool_put(&namei_pool, name); 1516 return (EACCES); 1517 } 1518 1519 /* 1520 * Control the UID used to write out. The normal case uses 1521 * the real UID. If the sugid case is going to write into the 1522 * controlled directory, we do so as root. 1523 */ 1524 if (incrash == 0) { 1525 cred = crdup(cred); 1526 cred->cr_uid = cred->cr_ruid; 1527 cred->cr_gid = cred->cr_rgid; 1528 } else { 1529 if (p->p_fd->fd_rdir) { 1530 vrele(p->p_fd->fd_rdir); 1531 p->p_fd->fd_rdir = NULL; 1532 } 1533 p->p_ucred = crdup(p->p_ucred); 1534 crfree(cred); 1535 cred = p->p_ucred; 1536 crhold(cred); 1537 cred->cr_uid = 0; 1538 cred->cr_gid = 0; 1539 } 1540 1541 /* incrash should be 0 or KERNELPATH only */ 1542 NDINIT(&nd, 0, incrash, UIO_SYSSPACE, name, p); 1543 1544 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW | O_NONBLOCK, 1545 S_IRUSR | S_IWUSR); 1546 1547 if (error) 1548 goto out; 1549 1550 /* 1551 * Don't dump to non-regular files, files with links, or files 1552 * owned by someone else. 1553 */ 1554 vp = nd.ni_vp; 1555 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) { 1556 VOP_UNLOCK(vp); 1557 vn_close(vp, FWRITE, cred, p); 1558 goto out; 1559 } 1560 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1561 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1562 vattr.va_uid != cred->cr_uid) { 1563 error = EACCES; 1564 VOP_UNLOCK(vp); 1565 vn_close(vp, FWRITE, cred, p); 1566 goto out; 1567 } 1568 VATTR_NULL(&vattr); 1569 vattr.va_size = 0; 1570 VOP_SETATTR(vp, &vattr, cred, p); 1571 pr->ps_acflag |= ACORE; 1572 1573 io.io_proc = p; 1574 io.io_vp = vp; 1575 io.io_cred = cred; 1576 io.io_offset = 0; 1577 VOP_UNLOCK(vp); 1578 vref(vp); 1579 error = vn_close(vp, FWRITE, cred, p); 1580 if (error == 0) 1581 error = (*pr->ps_emul->e_coredump)(p, &io); 1582 vrele(vp); 1583 out: 1584 crfree(cred); 1585 pool_put(&namei_pool, name); 1586 return (error); 1587 #endif 1588 } 1589 1590 #ifndef SMALL_KERNEL 1591 int 1592 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1593 { 1594 struct coredump_iostate *io = cookie; 1595 off_t coffset = 0; 1596 size_t csize; 1597 int chunk, error; 1598 1599 csize = len; 1600 do { 1601 if (sigmask(SIGKILL) & 1602 (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist)) 1603 return (EINTR); 1604 1605 /* Rest of the loop sleeps with lock held, so... */ 1606 yield(); 1607 1608 chunk = MIN(csize, MAXPHYS); 1609 error = vn_rdwr(UIO_WRITE, io->io_vp, 1610 (caddr_t)data + coffset, chunk, 1611 io->io_offset + coffset, segflg, 1612 IO_UNIT, io->io_cred, NULL, io->io_proc); 1613 if (error) { 1614 struct process *pr = io->io_proc->p_p; 1615 1616 if (error == ENOSPC) 1617 log(LOG_ERR, 1618 "coredump of %s(%d) failed, filesystem full\n", 1619 pr->ps_comm, pr->ps_pid); 1620 else 1621 log(LOG_ERR, 1622 "coredump of %s(%d), write failed: errno %d\n", 1623 pr->ps_comm, pr->ps_pid, error); 1624 return (error); 1625 } 1626 1627 coffset += chunk; 1628 csize -= chunk; 1629 } while (csize > 0); 1630 1631 io->io_offset += len; 1632 return (0); 1633 } 1634 1635 void 1636 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end) 1637 { 1638 struct coredump_iostate *io = cookie; 1639 1640 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end); 1641 } 1642 1643 #endif /* !SMALL_KERNEL */ 1644 1645 /* 1646 * Nonexistent system call-- signal process (may want to handle it). 1647 * Flag error in case process won't see signal immediately (blocked or ignored). 1648 */ 1649 int 1650 sys_nosys(struct proc *p, void *v, register_t *retval) 1651 { 1652 1653 ptsignal(p, SIGSYS, STHREAD); 1654 return (ENOSYS); 1655 } 1656 1657 int 1658 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1659 { 1660 static int sigwaitsleep; 1661 struct sys___thrsigdivert_args /* { 1662 syscallarg(sigset_t) sigmask; 1663 syscallarg(siginfo_t *) info; 1664 syscallarg(const struct timespec *) timeout; 1665 } */ *uap = v; 1666 struct process *pr = p->p_p; 1667 sigset_t *m; 1668 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask; 1669 siginfo_t si; 1670 uint64_t nsecs = INFSLP; 1671 int timeinvalid = 0; 1672 int error = 0; 1673 1674 memset(&si, 0, sizeof(si)); 1675 1676 if (SCARG(uap, timeout) != NULL) { 1677 struct timespec ts; 1678 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1679 return (error); 1680 #ifdef KTRACE 1681 if (KTRPOINT(p, KTR_STRUCT)) 1682 ktrreltimespec(p, &ts); 1683 #endif 1684 if (!timespecisvalid(&ts)) 1685 timeinvalid = 1; 1686 else 1687 nsecs = TIMESPEC_TO_NSEC(&ts); 1688 } 1689 1690 dosigsuspend(p, p->p_sigmask &~ mask); 1691 for (;;) { 1692 si.si_signo = CURSIG(p); 1693 if (si.si_signo != 0) { 1694 sigset_t smask = sigmask(si.si_signo); 1695 if (smask & mask) { 1696 if (p->p_siglist & smask) 1697 m = &p->p_siglist; 1698 else if (pr->ps_siglist & smask) 1699 m = &pr->ps_siglist; 1700 else { 1701 /* signal got eaten by someone else? */ 1702 continue; 1703 } 1704 atomic_clearbits_int(m, smask); 1705 error = 0; 1706 break; 1707 } 1708 } 1709 1710 /* per-POSIX, delay this error until after the above */ 1711 if (timeinvalid) 1712 error = EINVAL; 1713 1714 if (SCARG(uap, timeout) != NULL && nsecs == INFSLP) 1715 error = EAGAIN; 1716 1717 if (error != 0) 1718 break; 1719 1720 error = tsleep_nsec(&sigwaitsleep, PPAUSE|PCATCH, "sigwait", 1721 nsecs); 1722 } 1723 1724 if (error == 0) { 1725 *retval = si.si_signo; 1726 if (SCARG(uap, info) != NULL) 1727 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1728 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) { 1729 /* 1730 * Restarting is wrong if there's a timeout, as it'll be 1731 * for the same interval again 1732 */ 1733 error = EINTR; 1734 } 1735 1736 return (error); 1737 } 1738 1739 void 1740 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1741 { 1742 memset(si, 0, sizeof(*si)); 1743 1744 si->si_signo = sig; 1745 si->si_code = code; 1746 if (code == SI_USER) { 1747 si->si_value = val; 1748 } else { 1749 switch (sig) { 1750 case SIGSEGV: 1751 case SIGILL: 1752 case SIGBUS: 1753 case SIGFPE: 1754 si->si_addr = val.sival_ptr; 1755 si->si_trapno = trapno; 1756 break; 1757 case SIGXFSZ: 1758 break; 1759 } 1760 } 1761 } 1762 1763 int 1764 filt_sigattach(struct knote *kn) 1765 { 1766 struct process *pr = curproc->p_p; 1767 1768 if (kn->kn_id >= NSIG) 1769 return EINVAL; 1770 1771 kn->kn_ptr.p_process = pr; 1772 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1773 1774 /* XXX lock the proc here while adding to the list? */ 1775 SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext); 1776 1777 return (0); 1778 } 1779 1780 void 1781 filt_sigdetach(struct knote *kn) 1782 { 1783 struct process *pr = kn->kn_ptr.p_process; 1784 1785 SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext); 1786 } 1787 1788 /* 1789 * signal knotes are shared with proc knotes, so we apply a mask to 1790 * the hint in order to differentiate them from process hints. This 1791 * could be avoided by using a signal-specific knote list, but probably 1792 * isn't worth the trouble. 1793 */ 1794 int 1795 filt_signal(struct knote *kn, long hint) 1796 { 1797 1798 if (hint & NOTE_SIGNAL) { 1799 hint &= ~NOTE_SIGNAL; 1800 1801 if (kn->kn_id == hint) 1802 kn->kn_data++; 1803 } 1804 return (kn->kn_data != 0); 1805 } 1806 1807 void 1808 userret(struct proc *p) 1809 { 1810 int signum; 1811 1812 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */ 1813 if (p->p_flag & P_PROFPEND) { 1814 atomic_clearbits_int(&p->p_flag, P_PROFPEND); 1815 KERNEL_LOCK(); 1816 psignal(p, SIGPROF); 1817 KERNEL_UNLOCK(); 1818 } 1819 if (p->p_flag & P_ALRMPEND) { 1820 atomic_clearbits_int(&p->p_flag, P_ALRMPEND); 1821 KERNEL_LOCK(); 1822 psignal(p, SIGVTALRM); 1823 KERNEL_UNLOCK(); 1824 } 1825 1826 if (SIGPENDING(p) != 0) { 1827 KERNEL_LOCK(); 1828 while ((signum = CURSIG(p)) != 0) 1829 postsig(p, signum); 1830 KERNEL_UNLOCK(); 1831 } 1832 1833 /* 1834 * If P_SIGSUSPEND is still set here, then we still need to restore 1835 * the original sigmask before returning to userspace. Also, this 1836 * might unmask some pending signals, so we need to check a second 1837 * time for signals to post. 1838 */ 1839 if (p->p_flag & P_SIGSUSPEND) { 1840 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1841 p->p_sigmask = p->p_oldmask; 1842 1843 KERNEL_LOCK(); 1844 while ((signum = CURSIG(p)) != 0) 1845 postsig(p, signum); 1846 KERNEL_UNLOCK(); 1847 } 1848 1849 if (p->p_flag & P_SUSPSINGLE) { 1850 KERNEL_LOCK(); 1851 single_thread_check(p, 0); 1852 KERNEL_UNLOCK(); 1853 } 1854 1855 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); 1856 1857 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 1858 } 1859 1860 int 1861 single_thread_check(struct proc *p, int deep) 1862 { 1863 struct process *pr = p->p_p; 1864 1865 if (pr->ps_single != NULL && pr->ps_single != p) { 1866 do { 1867 int s; 1868 1869 /* if we're in deep, we need to unwind to the edge */ 1870 if (deep) { 1871 if (pr->ps_flags & PS_SINGLEUNWIND) 1872 return (ERESTART); 1873 if (pr->ps_flags & PS_SINGLEEXIT) 1874 return (EINTR); 1875 } 1876 1877 if (--pr->ps_singlecount == 0) 1878 wakeup(&pr->ps_singlecount); 1879 if (pr->ps_flags & PS_SINGLEEXIT) 1880 exit1(p, 0, 0, EXIT_THREAD_NOCHECK); 1881 1882 /* not exiting and don't need to unwind, so suspend */ 1883 SCHED_LOCK(s); 1884 p->p_stat = SSTOP; 1885 mi_switch(); 1886 SCHED_UNLOCK(s); 1887 } while (pr->ps_single != NULL); 1888 } 1889 1890 return (0); 1891 } 1892 1893 /* 1894 * Stop other threads in the process. The mode controls how and 1895 * where the other threads should stop: 1896 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 1897 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 1898 * - SINGLE_PTRACE: stop wherever they are, will wait for them to stop 1899 * later (via single_thread_wait()) and released as with SINGLE_SUSPEND 1900 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 1901 * or released as with SINGLE_SUSPEND 1902 * - SINGLE_EXIT: unwind to kernel boundary and exit 1903 */ 1904 int 1905 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep) 1906 { 1907 struct process *pr = p->p_p; 1908 struct proc *q; 1909 int error; 1910 1911 KERNEL_ASSERT_LOCKED(); 1912 1913 if ((error = single_thread_check(p, deep))) 1914 return error; 1915 1916 switch (mode) { 1917 case SINGLE_SUSPEND: 1918 case SINGLE_PTRACE: 1919 break; 1920 case SINGLE_UNWIND: 1921 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1922 break; 1923 case SINGLE_EXIT: 1924 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 1925 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1926 break; 1927 #ifdef DIAGNOSTIC 1928 default: 1929 panic("single_thread_mode = %d", mode); 1930 #endif 1931 } 1932 pr->ps_single = p; 1933 pr->ps_singlecount = 0; 1934 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1935 int s; 1936 1937 if (q == p) 1938 continue; 1939 if (q->p_flag & P_WEXIT) { 1940 if (mode == SINGLE_EXIT) { 1941 SCHED_LOCK(s); 1942 if (q->p_stat == SSTOP) { 1943 setrunnable(q); 1944 pr->ps_singlecount++; 1945 } 1946 SCHED_UNLOCK(s); 1947 } 1948 continue; 1949 } 1950 SCHED_LOCK(s); 1951 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 1952 switch (q->p_stat) { 1953 case SIDL: 1954 case SRUN: 1955 pr->ps_singlecount++; 1956 break; 1957 case SSLEEP: 1958 /* if it's not interruptible, then just have to wait */ 1959 if (q->p_flag & P_SINTR) { 1960 /* merely need to suspend? just stop it */ 1961 if (mode == SINGLE_SUSPEND || 1962 mode == SINGLE_PTRACE) { 1963 q->p_stat = SSTOP; 1964 break; 1965 } 1966 /* need to unwind or exit, so wake it */ 1967 setrunnable(q); 1968 } 1969 pr->ps_singlecount++; 1970 break; 1971 case SSTOP: 1972 if (mode == SINGLE_EXIT) { 1973 setrunnable(q); 1974 pr->ps_singlecount++; 1975 } 1976 break; 1977 case SDEAD: 1978 break; 1979 case SONPROC: 1980 pr->ps_singlecount++; 1981 signotify(q); 1982 break; 1983 } 1984 SCHED_UNLOCK(s); 1985 } 1986 1987 if (mode != SINGLE_PTRACE) 1988 single_thread_wait(pr); 1989 1990 return 0; 1991 } 1992 1993 void 1994 single_thread_wait(struct process *pr) 1995 { 1996 /* wait until they're all suspended */ 1997 while (pr->ps_singlecount > 0) 1998 tsleep_nsec(&pr->ps_singlecount, PWAIT, "suspend", INFSLP); 1999 } 2000 2001 void 2002 single_thread_clear(struct proc *p, int flag) 2003 { 2004 struct process *pr = p->p_p; 2005 struct proc *q; 2006 2007 KASSERT(pr->ps_single == p); 2008 KERNEL_ASSERT_LOCKED(); 2009 2010 pr->ps_single = NULL; 2011 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 2012 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 2013 int s; 2014 2015 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 2016 continue; 2017 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 2018 2019 /* 2020 * if the thread was only stopped for single threading 2021 * then clearing that either makes it runnable or puts 2022 * it back into some sleep queue 2023 */ 2024 SCHED_LOCK(s); 2025 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 2026 if (q->p_wchan == 0) 2027 setrunnable(q); 2028 else 2029 q->p_stat = SSLEEP; 2030 } 2031 SCHED_UNLOCK(s); 2032 } 2033 } 2034 2035 void 2036 sigio_del(struct sigiolst *rmlist) 2037 { 2038 struct sigio *sigio; 2039 2040 while ((sigio = LIST_FIRST(rmlist)) != NULL) { 2041 LIST_REMOVE(sigio, sio_pgsigio); 2042 crfree(sigio->sio_ucred); 2043 free(sigio, M_SIGIO, sizeof(*sigio)); 2044 } 2045 } 2046 2047 void 2048 sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist) 2049 { 2050 struct sigio *sigio; 2051 2052 MUTEX_ASSERT_LOCKED(&sigio_lock); 2053 2054 sigio = sir->sir_sigio; 2055 if (sigio != NULL) { 2056 KASSERT(sigio->sio_myref == sir); 2057 sir->sir_sigio = NULL; 2058 2059 if (sigio->sio_pgid > 0) 2060 sigio->sio_proc = NULL; 2061 else 2062 sigio->sio_pgrp = NULL; 2063 LIST_REMOVE(sigio, sio_pgsigio); 2064 2065 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio); 2066 } 2067 } 2068 2069 void 2070 sigio_free(struct sigio_ref *sir) 2071 { 2072 struct sigiolst rmlist; 2073 2074 if (sir->sir_sigio == NULL) 2075 return; 2076 2077 LIST_INIT(&rmlist); 2078 2079 mtx_enter(&sigio_lock); 2080 sigio_unlink(sir, &rmlist); 2081 mtx_leave(&sigio_lock); 2082 2083 sigio_del(&rmlist); 2084 } 2085 2086 void 2087 sigio_freelist(struct sigiolst *sigiolst) 2088 { 2089 struct sigiolst rmlist; 2090 struct sigio *sigio; 2091 2092 if (LIST_EMPTY(sigiolst)) 2093 return; 2094 2095 LIST_INIT(&rmlist); 2096 2097 mtx_enter(&sigio_lock); 2098 while ((sigio = LIST_FIRST(sigiolst)) != NULL) 2099 sigio_unlink(sigio->sio_myref, &rmlist); 2100 mtx_leave(&sigio_lock); 2101 2102 sigio_del(&rmlist); 2103 } 2104 2105 int 2106 sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data) 2107 { 2108 struct sigiolst rmlist; 2109 struct proc *p = curproc; 2110 struct pgrp *pgrp = NULL; 2111 struct process *pr = NULL; 2112 struct sigio *sigio; 2113 int error; 2114 pid_t pgid = *(int *)data; 2115 2116 if (pgid == 0) { 2117 sigio_free(sir); 2118 return (0); 2119 } 2120 2121 if (cmd == TIOCSPGRP) { 2122 if (pgid < 0) 2123 return (EINVAL); 2124 pgid = -pgid; 2125 } 2126 2127 sigio = malloc(sizeof(*sigio), M_SIGIO, M_WAITOK); 2128 sigio->sio_pgid = pgid; 2129 sigio->sio_ucred = crhold(p->p_ucred); 2130 sigio->sio_myref = sir; 2131 2132 LIST_INIT(&rmlist); 2133 2134 /* 2135 * The kernel lock, and not sleeping between prfind()/pgfind() and 2136 * linking of the sigio ensure that the process or process group does 2137 * not disappear unexpectedly. 2138 */ 2139 KERNEL_LOCK(); 2140 mtx_enter(&sigio_lock); 2141 2142 if (pgid > 0) { 2143 pr = prfind(pgid); 2144 if (pr == NULL) { 2145 error = ESRCH; 2146 goto fail; 2147 } 2148 2149 /* 2150 * Policy - Don't allow a process to FSETOWN a process 2151 * in another session. 2152 * 2153 * Remove this test to allow maximum flexibility or 2154 * restrict FSETOWN to the current process or process 2155 * group for maximum safety. 2156 */ 2157 if (pr->ps_session != p->p_p->ps_session) { 2158 error = EPERM; 2159 goto fail; 2160 } 2161 2162 if ((pr->ps_flags & PS_EXITING) != 0) { 2163 error = ESRCH; 2164 goto fail; 2165 } 2166 } else /* if (pgid < 0) */ { 2167 pgrp = pgfind(-pgid); 2168 if (pgrp == NULL) { 2169 error = ESRCH; 2170 goto fail; 2171 } 2172 2173 /* 2174 * Policy - Don't allow a process to FSETOWN a process 2175 * in another session. 2176 * 2177 * Remove this test to allow maximum flexibility or 2178 * restrict FSETOWN to the current process or process 2179 * group for maximum safety. 2180 */ 2181 if (pgrp->pg_session != p->p_p->ps_session) { 2182 error = EPERM; 2183 goto fail; 2184 } 2185 } 2186 2187 if (pgid > 0) { 2188 sigio->sio_proc = pr; 2189 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio); 2190 } else { 2191 sigio->sio_pgrp = pgrp; 2192 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 2193 } 2194 2195 sigio_unlink(sir, &rmlist); 2196 sir->sir_sigio = sigio; 2197 2198 mtx_leave(&sigio_lock); 2199 KERNEL_UNLOCK(); 2200 2201 sigio_del(&rmlist); 2202 2203 return (0); 2204 2205 fail: 2206 mtx_leave(&sigio_lock); 2207 KERNEL_UNLOCK(); 2208 2209 crfree(sigio->sio_ucred); 2210 free(sigio, M_SIGIO, sizeof(*sigio)); 2211 2212 return (error); 2213 } 2214 2215 void 2216 sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data) 2217 { 2218 struct sigio *sigio; 2219 pid_t pgid = 0; 2220 2221 mtx_enter(&sigio_lock); 2222 sigio = sir->sir_sigio; 2223 if (sigio != NULL) 2224 pgid = sigio->sio_pgid; 2225 mtx_leave(&sigio_lock); 2226 2227 if (cmd == TIOCGPGRP) 2228 pgid = -pgid; 2229 2230 *(int *)data = pgid; 2231 } 2232 2233 void 2234 sigio_copy(struct sigio_ref *dst, struct sigio_ref *src) 2235 { 2236 struct sigiolst rmlist; 2237 struct sigio *newsigio, *sigio; 2238 2239 sigio_free(dst); 2240 2241 if (src->sir_sigio == NULL) 2242 return; 2243 2244 newsigio = malloc(sizeof(*newsigio), M_SIGIO, M_WAITOK); 2245 LIST_INIT(&rmlist); 2246 2247 mtx_enter(&sigio_lock); 2248 2249 sigio = src->sir_sigio; 2250 if (sigio == NULL) { 2251 mtx_leave(&sigio_lock); 2252 free(newsigio, M_SIGIO, sizeof(*newsigio)); 2253 return; 2254 } 2255 2256 newsigio->sio_pgid = sigio->sio_pgid; 2257 newsigio->sio_ucred = crhold(sigio->sio_ucred); 2258 newsigio->sio_myref = dst; 2259 if (newsigio->sio_pgid > 0) { 2260 newsigio->sio_proc = sigio->sio_proc; 2261 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio, 2262 sio_pgsigio); 2263 } else { 2264 newsigio->sio_pgrp = sigio->sio_pgrp; 2265 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio, 2266 sio_pgsigio); 2267 } 2268 2269 sigio_unlink(dst, &rmlist); 2270 dst->sir_sigio = newsigio; 2271 2272 mtx_leave(&sigio_lock); 2273 2274 sigio_del(&rmlist); 2275 } 2276