xref: /openbsd-src/sys/kern/kern_sig.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /*	$OpenBSD: kern_sig.c,v 1.160 2014/03/24 03:48:00 guenther Exp $	*/
2 /*	$NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Theo de Raadt. All rights reserved.
6  * Copyright (c) 1982, 1986, 1989, 1991, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
39  */
40 
41 #define	SIGPROP		/* include signal properties table */
42 #include <sys/param.h>
43 #include <sys/signalvar.h>
44 #include <sys/resourcevar.h>
45 #include <sys/queue.h>
46 #include <sys/namei.h>
47 #include <sys/vnode.h>
48 #include <sys/event.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/buf.h>
52 #include <sys/acct.h>
53 #include <sys/file.h>
54 #include <sys/kernel.h>
55 #include <sys/wait.h>
56 #include <sys/ktrace.h>
57 #include <sys/stat.h>
58 #include <sys/core.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/ptrace.h>
62 #include <sys/sched.h>
63 #include <sys/user.h>
64 
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67 
68 
69 #include <uvm/uvm_extern.h>
70 
71 int	filt_sigattach(struct knote *kn);
72 void	filt_sigdetach(struct knote *kn);
73 int	filt_signal(struct knote *kn, long hint);
74 
75 struct filterops sig_filtops =
76 	{ 0, filt_sigattach, filt_sigdetach, filt_signal };
77 
78 void proc_stop(struct proc *p, int);
79 void proc_stop_sweep(void *);
80 struct timeout proc_stop_to;
81 
82 int cansignal(struct proc *, struct pcred *, struct proc *, int);
83 
84 struct pool sigacts_pool;	/* memory pool for sigacts structures */
85 
86 /*
87  * Can process p, with pcred pc, send the signal signum to process q?
88  */
89 int
90 cansignal(struct proc *p, struct pcred *pc, struct proc *q, int signum)
91 {
92 	if (pc->pc_ucred->cr_uid == 0)
93 		return (1);		/* root can always signal */
94 
95 	if (p == q)
96 		return (1);		/* process can always signal itself */
97 
98 	if (signum == SIGCONT && q->p_p->ps_session == p->p_p->ps_session)
99 		return (1);		/* SIGCONT in session */
100 
101 	/*
102 	 * Using kill(), only certain signals can be sent to setugid
103 	 * child processes
104 	 */
105 	if (q->p_p->ps_flags & PS_SUGID) {
106 		switch (signum) {
107 		case 0:
108 		case SIGKILL:
109 		case SIGINT:
110 		case SIGTERM:
111 		case SIGALRM:
112 		case SIGSTOP:
113 		case SIGTTIN:
114 		case SIGTTOU:
115 		case SIGTSTP:
116 		case SIGHUP:
117 		case SIGUSR1:
118 		case SIGUSR2:
119 			if (pc->p_ruid == q->p_cred->p_ruid ||
120 			    pc->pc_ucred->cr_uid == q->p_cred->p_ruid)
121 				return (1);
122 		}
123 		return (0);
124 	}
125 
126 	if (pc->p_ruid == q->p_cred->p_ruid ||
127 	    pc->p_ruid == q->p_cred->p_svuid ||
128 	    pc->pc_ucred->cr_uid == q->p_cred->p_ruid ||
129 	    pc->pc_ucred->cr_uid == q->p_cred->p_svuid)
130 		return (1);
131 	return (0);
132 }
133 
134 /*
135  * Initialize signal-related data structures.
136  */
137 void
138 signal_init(void)
139 {
140 	timeout_set(&proc_stop_to, proc_stop_sweep, NULL);
141 
142 	pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
143 	    &pool_allocator_nointr);
144 }
145 
146 /*
147  * Create an initial sigacts structure, using the same signal state
148  * as p.
149  */
150 struct sigacts *
151 sigactsinit(struct process *pr)
152 {
153 	struct sigacts *ps;
154 
155 	ps = pool_get(&sigacts_pool, PR_WAITOK);
156 	memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts));
157 	ps->ps_refcnt = 1;
158 	return (ps);
159 }
160 
161 /*
162  * Share a sigacts structure.
163  */
164 struct sigacts *
165 sigactsshare(struct process *pr)
166 {
167 	struct sigacts *ps = pr->ps_sigacts;
168 
169 	ps->ps_refcnt++;
170 	return ps;
171 }
172 
173 /*
174  * Initialize a new sigaltstack structure.
175  */
176 void
177 sigstkinit(struct sigaltstack *ss)
178 {
179 	ss->ss_flags = SS_DISABLE;
180 	ss->ss_size = 0;
181 	ss->ss_sp = 0;
182 }
183 
184 /*
185  * Make this process not share its sigacts, maintaining all
186  * signal state.
187  */
188 void
189 sigactsunshare(struct process *pr)
190 {
191 	struct sigacts *newps;
192 
193 	if (pr->ps_sigacts->ps_refcnt == 1)
194 		return;
195 
196 	newps = sigactsinit(pr);
197 	sigactsfree(pr);
198 	pr->ps_sigacts = newps;
199 }
200 
201 /*
202  * Release a sigacts structure.
203  */
204 void
205 sigactsfree(struct process *pr)
206 {
207 	struct sigacts *ps = pr->ps_sigacts;
208 
209 	if (--ps->ps_refcnt > 0)
210 		return;
211 
212 	pr->ps_sigacts = NULL;
213 
214 	pool_put(&sigacts_pool, ps);
215 }
216 
217 /* ARGSUSED */
218 int
219 sys_sigaction(struct proc *p, void *v, register_t *retval)
220 {
221 	struct sys_sigaction_args /* {
222 		syscallarg(int) signum;
223 		syscallarg(const struct sigaction *) nsa;
224 		syscallarg(struct sigaction *) osa;
225 	} */ *uap = v;
226 	struct sigaction vec;
227 #ifdef KTRACE
228 	struct sigaction ovec;
229 #endif
230 	struct sigaction *sa;
231 	const struct sigaction *nsa;
232 	struct sigaction *osa;
233 	struct sigacts *ps = p->p_p->ps_sigacts;
234 	int signum;
235 	int bit, error;
236 
237 	signum = SCARG(uap, signum);
238 	nsa = SCARG(uap, nsa);
239 	osa = SCARG(uap, osa);
240 
241 	if (signum <= 0 || signum >= NSIG ||
242 	    (nsa && (signum == SIGKILL || signum == SIGSTOP)))
243 		return (EINVAL);
244 	sa = &vec;
245 	if (osa) {
246 		sa->sa_handler = ps->ps_sigact[signum];
247 		sa->sa_mask = ps->ps_catchmask[signum];
248 		bit = sigmask(signum);
249 		sa->sa_flags = 0;
250 		if ((ps->ps_sigonstack & bit) != 0)
251 			sa->sa_flags |= SA_ONSTACK;
252 		if ((ps->ps_sigintr & bit) == 0)
253 			sa->sa_flags |= SA_RESTART;
254 		if ((ps->ps_sigreset & bit) != 0)
255 			sa->sa_flags |= SA_RESETHAND;
256 		if ((ps->ps_siginfo & bit) != 0)
257 			sa->sa_flags |= SA_SIGINFO;
258 		if (signum == SIGCHLD) {
259 			if ((ps->ps_flags & SAS_NOCLDSTOP) != 0)
260 				sa->sa_flags |= SA_NOCLDSTOP;
261 			if ((ps->ps_flags & SAS_NOCLDWAIT) != 0)
262 				sa->sa_flags |= SA_NOCLDWAIT;
263 		}
264 		if ((sa->sa_mask & bit) == 0)
265 			sa->sa_flags |= SA_NODEFER;
266 		sa->sa_mask &= ~bit;
267 		error = copyout(sa, osa, sizeof (vec));
268 		if (error)
269 			return (error);
270 #ifdef KTRACE
271 		if (KTRPOINT(p, KTR_STRUCT))
272 			ovec = vec;
273 #endif
274 	}
275 	if (nsa) {
276 		error = copyin(nsa, sa, sizeof (vec));
277 		if (error)
278 			return (error);
279 #ifdef KTRACE
280 		if (KTRPOINT(p, KTR_STRUCT))
281 			ktrsigaction(p, sa);
282 #endif
283 		setsigvec(p, signum, sa);
284 	}
285 #ifdef KTRACE
286 	if (osa && KTRPOINT(p, KTR_STRUCT))
287 		ktrsigaction(p, &ovec);
288 #endif
289 	return (0);
290 }
291 
292 void
293 setsigvec(struct proc *p, int signum, struct sigaction *sa)
294 {
295 	struct sigacts *ps = p->p_p->ps_sigacts;
296 	int bit;
297 	int s;
298 
299 	bit = sigmask(signum);
300 	/*
301 	 * Change setting atomically.
302 	 */
303 	s = splhigh();
304 	ps->ps_sigact[signum] = sa->sa_handler;
305 	if ((sa->sa_flags & SA_NODEFER) == 0)
306 		sa->sa_mask |= sigmask(signum);
307 	ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
308 	if (signum == SIGCHLD) {
309 		if (sa->sa_flags & SA_NOCLDSTOP)
310 			atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP);
311 		else
312 			atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP);
313 		/*
314 		 * If the SA_NOCLDWAIT flag is set or the handler
315 		 * is SIG_IGN we reparent the dying child to PID 1
316 		 * (init) which will reap the zombie.  Because we use
317 		 * init to do our dirty work we never set SAS_NOCLDWAIT
318 		 * for PID 1.
319 		 * XXX exit1 rework means this is unnecessary?
320 		 */
321 		if (initproc->p_p->ps_sigacts != ps &&
322 		    ((sa->sa_flags & SA_NOCLDWAIT) ||
323 		    sa->sa_handler == SIG_IGN))
324 			atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT);
325 		else
326 			atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT);
327 	}
328 	if ((sa->sa_flags & SA_RESETHAND) != 0)
329 		ps->ps_sigreset |= bit;
330 	else
331 		ps->ps_sigreset &= ~bit;
332 	if ((sa->sa_flags & SA_SIGINFO) != 0)
333 		ps->ps_siginfo |= bit;
334 	else
335 		ps->ps_siginfo &= ~bit;
336 	if ((sa->sa_flags & SA_RESTART) == 0)
337 		ps->ps_sigintr |= bit;
338 	else
339 		ps->ps_sigintr &= ~bit;
340 	if ((sa->sa_flags & SA_ONSTACK) != 0)
341 		ps->ps_sigonstack |= bit;
342 	else
343 		ps->ps_sigonstack &= ~bit;
344 	/*
345 	 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
346 	 * and for signals set to SIG_DFL where the default is to ignore.
347 	 * However, don't put SIGCONT in ps_sigignore,
348 	 * as we have to restart the process.
349 	 */
350 	if (sa->sa_handler == SIG_IGN ||
351 	    (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
352 		atomic_clearbits_int(&p->p_siglist, bit);
353 		if (signum != SIGCONT)
354 			ps->ps_sigignore |= bit;	/* easier in psignal */
355 		ps->ps_sigcatch &= ~bit;
356 	} else {
357 		ps->ps_sigignore &= ~bit;
358 		if (sa->sa_handler == SIG_DFL)
359 			ps->ps_sigcatch &= ~bit;
360 		else
361 			ps->ps_sigcatch |= bit;
362 	}
363 	splx(s);
364 }
365 
366 /*
367  * Initialize signal state for process 0;
368  * set to ignore signals that are ignored by default.
369  */
370 void
371 siginit(struct process *pr)
372 {
373 	struct sigacts *ps = pr->ps_sigacts;
374 	int i;
375 
376 	for (i = 0; i < NSIG; i++)
377 		if (sigprop[i] & SA_IGNORE && i != SIGCONT)
378 			ps->ps_sigignore |= sigmask(i);
379 	ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP;
380 }
381 
382 /*
383  * Reset signals for an exec by the specified thread.
384  */
385 void
386 execsigs(struct proc *p)
387 {
388 	struct sigacts *ps;
389 	int nc, mask;
390 
391 	sigactsunshare(p->p_p);
392 	ps = p->p_p->ps_sigacts;
393 
394 	/*
395 	 * Reset caught signals.  Held signals remain held
396 	 * through p_sigmask (unless they were caught,
397 	 * and are now ignored by default).
398 	 */
399 	while (ps->ps_sigcatch) {
400 		nc = ffs((long)ps->ps_sigcatch);
401 		mask = sigmask(nc);
402 		ps->ps_sigcatch &= ~mask;
403 		if (sigprop[nc] & SA_IGNORE) {
404 			if (nc != SIGCONT)
405 				ps->ps_sigignore |= mask;
406 			atomic_clearbits_int(&p->p_siglist, mask);
407 		}
408 		ps->ps_sigact[nc] = SIG_DFL;
409 	}
410 	/*
411 	 * Reset stack state to the user stack.
412 	 * Clear set of signals caught on the signal stack.
413 	 */
414 	sigstkinit(&p->p_sigstk);
415 	ps->ps_flags &= ~SAS_NOCLDWAIT;
416 	if (ps->ps_sigact[SIGCHLD] == SIG_IGN)
417 		ps->ps_sigact[SIGCHLD] = SIG_DFL;
418 }
419 
420 /*
421  * Manipulate signal mask.
422  * Note that we receive new mask, not pointer,
423  * and return old mask as return value;
424  * the library stub does the rest.
425  */
426 int
427 sys_sigprocmask(struct proc *p, void *v, register_t *retval)
428 {
429 	struct sys_sigprocmask_args /* {
430 		syscallarg(int) how;
431 		syscallarg(sigset_t) mask;
432 	} */ *uap = v;
433 	int error = 0;
434 	int s;
435 	sigset_t mask;
436 
437 	*retval = p->p_sigmask;
438 	mask = SCARG(uap, mask);
439 	s = splhigh();
440 
441 	switch (SCARG(uap, how)) {
442 	case SIG_BLOCK:
443 		p->p_sigmask |= mask &~ sigcantmask;
444 		break;
445 	case SIG_UNBLOCK:
446 		p->p_sigmask &= ~mask;
447 		break;
448 	case SIG_SETMASK:
449 		p->p_sigmask = mask &~ sigcantmask;
450 		break;
451 	default:
452 		error = EINVAL;
453 		break;
454 	}
455 	splx(s);
456 	return (error);
457 }
458 
459 /* ARGSUSED */
460 int
461 sys_sigpending(struct proc *p, void *v, register_t *retval)
462 {
463 
464 	*retval = p->p_siglist;
465 	return (0);
466 }
467 
468 /*
469  * Suspend process until signal, providing mask to be set
470  * in the meantime.  Note nonstandard calling convention:
471  * libc stub passes mask, not pointer, to save a copyin.
472  */
473 /* ARGSUSED */
474 int
475 sys_sigsuspend(struct proc *p, void *v, register_t *retval)
476 {
477 	struct sys_sigsuspend_args /* {
478 		syscallarg(int) mask;
479 	} */ *uap = v;
480 	struct process *pr = p->p_p;
481 	struct sigacts *ps = pr->ps_sigacts;
482 
483 	/*
484 	 * When returning from sigpause, we want
485 	 * the old mask to be restored after the
486 	 * signal handler has finished.  Thus, we
487 	 * save it here and mark the sigacts structure
488 	 * to indicate this.
489 	 */
490 	p->p_oldmask = p->p_sigmask;
491 	atomic_setbits_int(&p->p_flag, P_SIGSUSPEND);
492 	p->p_sigmask = SCARG(uap, mask) &~ sigcantmask;
493 	while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0)
494 		/* void */;
495 	/* always return EINTR rather than ERESTART... */
496 	return (EINTR);
497 }
498 
499 int
500 sigonstack(size_t stack)
501 {
502 	const struct sigaltstack *ss = &curproc->p_sigstk;
503 
504 	return (ss->ss_flags & SS_DISABLE ? 0 :
505 	    (stack - (size_t)ss->ss_sp < ss->ss_size));
506 }
507 
508 int
509 sys_sigaltstack(struct proc *p, void *v, register_t *retval)
510 {
511 	struct sys_sigaltstack_args /* {
512 		syscallarg(const struct sigaltstack *) nss;
513 		syscallarg(struct sigaltstack *) oss;
514 	} */ *uap = v;
515 	struct sigaltstack ss;
516 	const struct sigaltstack *nss;
517 	struct sigaltstack *oss;
518 	int onstack = sigonstack(PROC_STACK(p));
519 	int error;
520 
521 	nss = SCARG(uap, nss);
522 	oss = SCARG(uap, oss);
523 
524 	if (oss != NULL) {
525 		ss = p->p_sigstk;
526 		if (onstack)
527 			ss.ss_flags |= SS_ONSTACK;
528 		if ((error = copyout(&ss, oss, sizeof(ss))))
529 			return (error);
530 	}
531 	if (nss == NULL)
532 		return (0);
533 	error = copyin(nss, &ss, sizeof(ss));
534 	if (error)
535 		return (error);
536 	if (onstack)
537 		return (EPERM);
538 	if (ss.ss_flags & ~SS_DISABLE)
539 		return (EINVAL);
540 	if (ss.ss_flags & SS_DISABLE) {
541 		p->p_sigstk.ss_flags = ss.ss_flags;
542 		return (0);
543 	}
544 	if (ss.ss_size < MINSIGSTKSZ)
545 		return (ENOMEM);
546 	p->p_sigstk = ss;
547 	return (0);
548 }
549 
550 /* ARGSUSED */
551 int
552 sys_kill(struct proc *cp, void *v, register_t *retval)
553 {
554 	struct sys_kill_args /* {
555 		syscallarg(int) pid;
556 		syscallarg(int) signum;
557 	} */ *uap = v;
558 	struct proc *p;
559 	struct pcred *pc = cp->p_cred;
560 	int pid = SCARG(uap, pid);
561 	int signum = SCARG(uap, signum);
562 
563 	if (((u_int)signum) >= NSIG)
564 		return (EINVAL);
565 	if (pid > 0) {
566 		enum signal_type type = SPROCESS;
567 
568 		/*
569 		 * If the target pid is > THREAD_PID_OFFSET then this
570 		 * must be a kill of another thread in the same process.
571 		 * Otherwise, this is a process kill and the target must
572 		 * be a main thread.
573 		 */
574 		if (pid > THREAD_PID_OFFSET) {
575 			if ((p = pfind(pid - THREAD_PID_OFFSET)) == NULL)
576 				return (ESRCH);
577 			if (p->p_p != cp->p_p)
578 				return (ESRCH);
579 			type = STHREAD;
580 		} else {
581 			if ((p = pfind(pid)) == NULL)
582 				return (ESRCH);
583 			if (p->p_flag & P_THREAD)
584 				return (ESRCH);
585 			if (!cansignal(cp, pc, p, signum))
586 				return (EPERM);
587 		}
588 
589 		/* kill single process or thread */
590 		if (signum)
591 			ptsignal(p, signum, type);
592 		return (0);
593 	}
594 	switch (pid) {
595 	case -1:		/* broadcast signal */
596 		return (killpg1(cp, signum, 0, 1));
597 	case 0:			/* signal own process group */
598 		return (killpg1(cp, signum, 0, 0));
599 	default:		/* negative explicit process group */
600 		return (killpg1(cp, signum, -pid, 0));
601 	}
602 	/* NOTREACHED */
603 }
604 
605 /*
606  * Common code for kill process group/broadcast kill.
607  * cp is calling process.
608  */
609 int
610 killpg1(struct proc *cp, int signum, int pgid, int all)
611 {
612 	struct proc *p;
613 	struct process *pr;
614 	struct pcred *pc = cp->p_cred;
615 	struct pgrp *pgrp;
616 	int nfound = 0;
617 
618 	if (all)
619 		/*
620 		 * broadcast
621 		 */
622 		LIST_FOREACH(pr, &allprocess, ps_list) {
623 			p = pr->ps_mainproc;
624 			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
625 			    p == cp || !cansignal(cp, pc, p, signum))
626 				continue;
627 			nfound++;
628 			if (signum)
629 				psignal(p, signum);
630 		}
631 	else {
632 		if (pgid == 0)
633 			/*
634 			 * zero pgid means send to my process group.
635 			 */
636 			pgrp = cp->p_p->ps_pgrp;
637 		else {
638 			pgrp = pgfind(pgid);
639 			if (pgrp == NULL)
640 				return (ESRCH);
641 		}
642 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) {
643 			p = pr->ps_mainproc;
644 			if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) ||
645 			    !cansignal(cp, pc, p, signum))
646 				continue;
647 			nfound++;
648 			if (signum && P_ZOMBIE(p) == 0)
649 				psignal(p, signum);
650 		}
651 	}
652 	return (nfound ? 0 : ESRCH);
653 }
654 
655 #define CANDELIVER(uid, euid, pr) \
656 	(euid == 0 || \
657 	(uid) == (pr)->ps_cred->p_ruid || \
658 	(uid) == (pr)->ps_cred->p_svuid || \
659 	(uid) == (pr)->ps_cred->pc_ucred->cr_uid || \
660 	(euid) == (pr)->ps_cred->p_ruid || \
661 	(euid) == (pr)->ps_cred->p_svuid || \
662 	(euid) == (pr)->ps_cred->pc_ucred->cr_uid)
663 
664 /*
665  * Deliver signum to pgid, but first check uid/euid against each
666  * process and see if it is permitted.
667  */
668 void
669 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid)
670 {
671 	struct pgrp *pgrp;
672 	struct process *pr;
673 
674 	if (pgid == 0)
675 		return;
676 	if (pgid < 0) {
677 		pgid = -pgid;
678 		if ((pgrp = pgfind(pgid)) == NULL)
679 			return;
680 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
681 			if (CANDELIVER(uid, euid, pr))
682 				prsignal(pr, signum);
683 	} else {
684 		if ((pr = prfind(pgid)) == NULL)
685 			return;
686 		if (CANDELIVER(uid, euid, pr))
687 			prsignal(pr, signum);
688 	}
689 }
690 
691 /*
692  * Send a signal to a process group.
693  */
694 void
695 gsignal(int pgid, int signum)
696 {
697 	struct pgrp *pgrp;
698 
699 	if (pgid && (pgrp = pgfind(pgid)))
700 		pgsignal(pgrp, signum, 0);
701 }
702 
703 /*
704  * Send a signal to a process group.  If checktty is 1,
705  * limit to members which have a controlling terminal.
706  */
707 void
708 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
709 {
710 	struct process *pr;
711 
712 	if (pgrp)
713 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
714 			if (checkctty == 0 || pr->ps_flags & PS_CONTROLT)
715 				prsignal(pr, signum);
716 }
717 
718 /*
719  * Send a signal caused by a trap to the current process.
720  * If it will be caught immediately, deliver it with correct code.
721  * Otherwise, post it normally.
722  */
723 void
724 trapsignal(struct proc *p, int signum, u_long trapno, int code,
725     union sigval sigval)
726 {
727 	struct process *pr = p->p_p;
728 	struct sigacts *ps = pr->ps_sigacts;
729 	int mask;
730 
731 	mask = sigmask(signum);
732 	if ((pr->ps_flags & PS_TRACED) == 0 &&
733 	    (ps->ps_sigcatch & mask) != 0 &&
734 	    (p->p_sigmask & mask) == 0) {
735 #ifdef KTRACE
736 		if (KTRPOINT(p, KTR_PSIG)) {
737 			siginfo_t si;
738 
739 			initsiginfo(&si, signum, trapno, code, sigval);
740 			ktrpsig(p, signum, ps->ps_sigact[signum],
741 			    p->p_sigmask, code, &si);
742 		}
743 #endif
744 		p->p_ru.ru_nsignals++;
745 		(*p->p_emul->e_sendsig)(ps->ps_sigact[signum], signum,
746 		    p->p_sigmask, trapno, code, sigval);
747 		p->p_sigmask |= ps->ps_catchmask[signum];
748 		if ((ps->ps_sigreset & mask) != 0) {
749 			ps->ps_sigcatch &= ~mask;
750 			if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
751 				ps->ps_sigignore |= mask;
752 			ps->ps_sigact[signum] = SIG_DFL;
753 		}
754 	} else {
755 		p->p_sisig = signum;
756 		p->p_sitrapno = trapno;	/* XXX for core dump/debugger */
757 		p->p_sicode = code;
758 		p->p_sigval = sigval;
759 		ptsignal(p, signum, STHREAD);
760 	}
761 }
762 
763 /*
764  * Send the signal to the process.  If the signal has an action, the action
765  * is usually performed by the target process rather than the caller; we add
766  * the signal to the set of pending signals for the process.
767  *
768  * Exceptions:
769  *   o When a stop signal is sent to a sleeping process that takes the
770  *     default action, the process is stopped without awakening it.
771  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
772  *     regardless of the signal action (eg, blocked or ignored).
773  *
774  * Other ignored signals are discarded immediately.
775  */
776 void
777 psignal(struct proc *p, int signum)
778 {
779 	ptsignal(p, signum, SPROCESS);
780 }
781 
782 /*
783  * type = SPROCESS	process signal, can be diverted (sigwait())
784  *	XXX if blocked in all threads, mark as pending in struct process
785  * type = STHREAD	thread signal, but should be propagated if unhandled
786  * type = SPROPAGATED	propagated to this thread, so don't propagate again
787  */
788 void
789 ptsignal(struct proc *p, int signum, enum signal_type type)
790 {
791 	int s, prop;
792 	sig_t action;
793 	int mask;
794 	struct process *pr = p->p_p;
795 	struct proc *q;
796 	int wakeparent = 0;
797 
798 #ifdef DIAGNOSTIC
799 	if ((u_int)signum >= NSIG || signum == 0)
800 		panic("psignal signal number");
801 #endif
802 
803 	/* Ignore signal if we are exiting */
804 	if (pr->ps_flags & PS_EXITING)
805 		return;
806 
807 	mask = sigmask(signum);
808 
809 	if (type == SPROCESS) {
810 		/* Accept SIGKILL to coredumping processes */
811 		if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) {
812 			if (pr->ps_single != NULL)
813 				p = pr->ps_single;
814 			atomic_setbits_int(&p->p_siglist, mask);
815 			return;
816 		}
817 
818 		/*
819 		 * If the current thread can process the signal
820 		 * immediately, either because it's sigwait()ing
821 		 * on it or has it unblocked, then have it take it.
822 		 */
823 		q = curproc;
824 		if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 &&
825 		    ((q->p_sigdivert & mask) || (q->p_sigmask & mask) == 0))
826 			p = q;
827 		else {
828 			/*
829 			 * A process-wide signal can be diverted to a
830 			 * different thread that's in sigwait() for this
831 			 * signal.  If there isn't such a thread, then
832 			 * pick a thread that doesn't have it blocked so
833 			 * that the stop/kill consideration isn't
834 			 * delayed.  Otherwise, mark it pending on the
835 			 * main thread.
836 			 */
837 			TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
838 				/* ignore exiting threads */
839 				if (q->p_flag & P_WEXIT)
840 					continue;
841 
842 				/* sigwait: definitely go to this thread */
843 				if (q->p_sigdivert & mask) {
844 					p = q;
845 					break;
846 				}
847 
848 				/* unblocked: possibly go to this thread */
849 				if ((q->p_sigmask & mask) == 0)
850 					p = q;
851 			}
852 		}
853 	}
854 
855 	if (type != SPROPAGATED)
856 		KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum);
857 
858 	prop = sigprop[signum];
859 
860 	/*
861 	 * If proc is traced, always give parent a chance.
862 	 * XXX give sigwait() priority until it's fixed to do this
863 	 * XXX from issignal/postsig
864 	 */
865 	if (p->p_sigdivert & mask) {
866 		p->p_sigwait = signum;
867 		atomic_clearbits_int(&p->p_sigdivert, ~0);
868 		action = SIG_CATCH;
869 		wakeup(&p->p_sigdivert);
870 	} else if (pr->ps_flags & PS_TRACED) {
871 		action = SIG_DFL;
872 		atomic_setbits_int(&p->p_siglist, mask);
873 	} else {
874 		/*
875 		 * If the signal is being ignored,
876 		 * then we forget about it immediately.
877 		 * (Note: we don't set SIGCONT in ps_sigignore,
878 		 * and if it is set to SIG_IGN,
879 		 * action will be SIG_DFL here.)
880 		 */
881 		if (pr->ps_sigacts->ps_sigignore & mask)
882 			return;
883 		if (p->p_sigmask & mask) {
884 			action = SIG_HOLD;
885 		} else if (pr->ps_sigacts->ps_sigcatch & mask) {
886 			action = SIG_CATCH;
887 		} else {
888 			action = SIG_DFL;
889 
890 			if (prop & SA_KILL &&  pr->ps_nice > NZERO)
891 				 pr->ps_nice = NZERO;
892 
893 			/*
894 			 * If sending a tty stop signal to a member of an
895 			 * orphaned process group, discard the signal here if
896 			 * the action is default; don't stop the process below
897 			 * if sleeping, and don't clear any pending SIGCONT.
898 			 */
899 			if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0)
900 				return;
901 		}
902 
903 		atomic_setbits_int(&p->p_siglist, mask);
904 	}
905 
906 	if (prop & SA_CONT)
907 		atomic_clearbits_int(&p->p_siglist, stopsigmask);
908 
909 	if (prop & SA_STOP) {
910 		atomic_clearbits_int(&p->p_siglist, contsigmask);
911 		atomic_clearbits_int(&p->p_flag, P_CONTINUED);
912 	}
913 
914 	/*
915 	 * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
916 	 */
917 	if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED)
918 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)
919 			if (q != p)
920 				ptsignal(q, signum, SPROPAGATED);
921 
922 	/*
923 	 * Defer further processing for signals which are held,
924 	 * except that stopped processes must be continued by SIGCONT.
925 	 */
926 	if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
927 		return;
928 
929 	SCHED_LOCK(s);
930 
931 	switch (p->p_stat) {
932 
933 	case SSLEEP:
934 		/*
935 		 * If process is sleeping uninterruptibly
936 		 * we can't interrupt the sleep... the signal will
937 		 * be noticed when the process returns through
938 		 * trap() or syscall().
939 		 */
940 		if ((p->p_flag & P_SINTR) == 0)
941 			goto out;
942 		/*
943 		 * Process is sleeping and traced... make it runnable
944 		 * so it can discover the signal in issignal() and stop
945 		 * for the parent.
946 		 */
947 		if (pr->ps_flags & PS_TRACED)
948 			goto run;
949 		/*
950 		 * If SIGCONT is default (or ignored) and process is
951 		 * asleep, we are finished; the process should not
952 		 * be awakened.
953 		 */
954 		if ((prop & SA_CONT) && action == SIG_DFL) {
955 			atomic_clearbits_int(&p->p_siglist, mask);
956 			goto out;
957 		}
958 		/*
959 		 * When a sleeping process receives a stop
960 		 * signal, process immediately if possible.
961 		 */
962 		if ((prop & SA_STOP) && action == SIG_DFL) {
963 			/*
964 			 * If a child holding parent blocked,
965 			 * stopping could cause deadlock.
966 			 */
967 			if (pr->ps_flags & PS_PPWAIT)
968 				goto out;
969 			atomic_clearbits_int(&p->p_siglist, mask);
970 			p->p_xstat = signum;
971 			proc_stop(p, 0);
972 			goto out;
973 		}
974 		/*
975 		 * All other (caught or default) signals
976 		 * cause the process to run.
977 		 */
978 		goto runfast;
979 		/*NOTREACHED*/
980 
981 	case SSTOP:
982 		/*
983 		 * If traced process is already stopped,
984 		 * then no further action is necessary.
985 		 */
986 		if (pr->ps_flags & PS_TRACED)
987 			goto out;
988 
989 		/*
990 		 * Kill signal always sets processes running.
991 		 */
992 		if (signum == SIGKILL) {
993 			atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
994 			goto runfast;
995 		}
996 
997 		if (prop & SA_CONT) {
998 			/*
999 			 * If SIGCONT is default (or ignored), we continue the
1000 			 * process but don't leave the signal in p_siglist, as
1001 			 * it has no further action.  If SIGCONT is held, we
1002 			 * continue the process and leave the signal in
1003 			 * p_siglist.  If the process catches SIGCONT, let it
1004 			 * handle the signal itself.  If it isn't waiting on
1005 			 * an event, then it goes back to run state.
1006 			 * Otherwise, process goes back to sleep state.
1007 			 */
1008 			atomic_setbits_int(&p->p_flag, P_CONTINUED);
1009 			atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
1010 			wakeparent = 1;
1011 			if (action == SIG_DFL)
1012 				atomic_clearbits_int(&p->p_siglist, mask);
1013 			if (action == SIG_CATCH)
1014 				goto runfast;
1015 			if (p->p_wchan == 0)
1016 				goto run;
1017 			p->p_stat = SSLEEP;
1018 			goto out;
1019 		}
1020 
1021 		if (prop & SA_STOP) {
1022 			/*
1023 			 * Already stopped, don't need to stop again.
1024 			 * (If we did the shell could get confused.)
1025 			 */
1026 			atomic_clearbits_int(&p->p_siglist, mask);
1027 			goto out;
1028 		}
1029 
1030 		/*
1031 		 * If process is sleeping interruptibly, then simulate a
1032 		 * wakeup so that when it is continued, it will be made
1033 		 * runnable and can look at the signal.  But don't make
1034 		 * the process runnable, leave it stopped.
1035 		 */
1036 		if (p->p_wchan && p->p_flag & P_SINTR)
1037 			unsleep(p);
1038 		goto out;
1039 
1040 	case SONPROC:
1041 		signotify(p);
1042 		/* FALLTHROUGH */
1043 	default:
1044 		/*
1045 		 * SRUN, SIDL, SZOMB do nothing with the signal,
1046 		 * other than kicking ourselves if we are running.
1047 		 * It will either never be noticed, or noticed very soon.
1048 		 */
1049 		goto out;
1050 	}
1051 	/*NOTREACHED*/
1052 
1053 runfast:
1054 	/*
1055 	 * Raise priority to at least PUSER.
1056 	 */
1057 	if (p->p_priority > PUSER)
1058 		p->p_priority = PUSER;
1059 run:
1060 	setrunnable(p);
1061 out:
1062 	SCHED_UNLOCK(s);
1063 	if (wakeparent)
1064 		wakeup(pr->ps_pptr);
1065 }
1066 
1067 /*
1068  * If the current process has received a signal (should be caught or cause
1069  * termination, should interrupt current syscall), return the signal number.
1070  * Stop signals with default action are processed immediately, then cleared;
1071  * they aren't returned.  This is checked after each entry to the system for
1072  * a syscall or trap (though this can usually be done without calling issignal
1073  * by checking the pending signal masks in the CURSIG macro.) The normal call
1074  * sequence is
1075  *
1076  *	while (signum = CURSIG(curproc))
1077  *		postsig(signum);
1078  *
1079  * Assumes that if the P_SINTR flag is set, we're holding both the
1080  * kernel and scheduler locks.
1081  */
1082 int
1083 issignal(struct proc *p)
1084 {
1085 	struct process *pr = p->p_p;
1086 	int signum, mask, prop;
1087 	int dolock = (p->p_flag & P_SINTR) == 0;
1088 	int s;
1089 
1090 	for (;;) {
1091 		mask = p->p_siglist & ~p->p_sigmask;
1092 		if (pr->ps_flags & PS_PPWAIT)
1093 			mask &= ~stopsigmask;
1094 		if (mask == 0)	 	/* no signal to send */
1095 			return (0);
1096 		signum = ffs((long)mask);
1097 		mask = sigmask(signum);
1098 		atomic_clearbits_int(&p->p_siglist, mask);
1099 
1100 		/*
1101 		 * We should see pending but ignored signals
1102 		 * only if PS_TRACED was on when they were posted.
1103 		 */
1104 		if (mask & pr->ps_sigacts->ps_sigignore &&
1105 		    (pr->ps_flags & PS_TRACED) == 0)
1106 			continue;
1107 
1108 		if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) {
1109 			/*
1110 			 * If traced, always stop, and stay
1111 			 * stopped until released by the debugger.
1112 			 */
1113 			p->p_xstat = signum;
1114 
1115 			if (dolock)
1116 				KERNEL_LOCK();
1117 			single_thread_set(p, SINGLE_PTRACE, 0);
1118 			if (dolock)
1119 				KERNEL_UNLOCK();
1120 
1121 			if (dolock)
1122 				SCHED_LOCK(s);
1123 			proc_stop(p, 1);
1124 			if (dolock)
1125 				SCHED_UNLOCK(s);
1126 
1127 			if (dolock)
1128 				KERNEL_LOCK();
1129 			single_thread_clear(p, 0);
1130 			if (dolock)
1131 				KERNEL_UNLOCK();
1132 
1133 			/*
1134 			 * If we are no longer being traced, or the parent
1135 			 * didn't give us a signal, look for more signals.
1136 			 */
1137 			if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0)
1138 				continue;
1139 
1140 			/*
1141 			 * If the new signal is being masked, look for other
1142 			 * signals.
1143 			 */
1144 			signum = p->p_xstat;
1145 			mask = sigmask(signum);
1146 			if ((p->p_sigmask & mask) != 0)
1147 				continue;
1148 
1149 			/* take the signal! */
1150 			atomic_clearbits_int(&p->p_siglist, mask);
1151 		}
1152 
1153 		prop = sigprop[signum];
1154 
1155 		/*
1156 		 * Decide whether the signal should be returned.
1157 		 * Return the signal's number, or fall through
1158 		 * to clear it from the pending mask.
1159 		 */
1160 		switch ((long)pr->ps_sigacts->ps_sigact[signum]) {
1161 		case (long)SIG_DFL:
1162 			/*
1163 			 * Don't take default actions on system processes.
1164 			 */
1165 			if (p->p_pid <= 1) {
1166 #ifdef DIAGNOSTIC
1167 				/*
1168 				 * Are you sure you want to ignore SIGSEGV
1169 				 * in init? XXX
1170 				 */
1171 				printf("Process (pid %d) got signal %d\n",
1172 				    p->p_pid, signum);
1173 #endif
1174 				break;		/* == ignore */
1175 			}
1176 			/*
1177 			 * If there is a pending stop signal to process
1178 			 * with default action, stop here,
1179 			 * then clear the signal.  However,
1180 			 * if process is member of an orphaned
1181 			 * process group, ignore tty stop signals.
1182 			 */
1183 			if (prop & SA_STOP) {
1184 				if (pr->ps_flags & PS_TRACED ||
1185 		    		    (pr->ps_pgrp->pg_jobc == 0 &&
1186 				    prop & SA_TTYSTOP))
1187 					break;	/* == ignore */
1188 				p->p_xstat = signum;
1189 				if (dolock)
1190 					SCHED_LOCK(s);
1191 				proc_stop(p, 1);
1192 				if (dolock)
1193 					SCHED_UNLOCK(s);
1194 				break;
1195 			} else if (prop & SA_IGNORE) {
1196 				/*
1197 				 * Except for SIGCONT, shouldn't get here.
1198 				 * Default action is to ignore; drop it.
1199 				 */
1200 				break;		/* == ignore */
1201 			} else
1202 				goto keep;
1203 			/*NOTREACHED*/
1204 		case (long)SIG_IGN:
1205 			/*
1206 			 * Masking above should prevent us ever trying
1207 			 * to take action on an ignored signal other
1208 			 * than SIGCONT, unless process is traced.
1209 			 */
1210 			if ((prop & SA_CONT) == 0 &&
1211 			    (pr->ps_flags & PS_TRACED) == 0)
1212 				printf("issignal\n");
1213 			break;		/* == ignore */
1214 		default:
1215 			/*
1216 			 * This signal has an action, let
1217 			 * postsig() process it.
1218 			 */
1219 			goto keep;
1220 		}
1221 	}
1222 	/* NOTREACHED */
1223 
1224 keep:
1225 	atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */
1226 	return (signum);
1227 }
1228 
1229 /*
1230  * Put the argument process into the stopped state and notify the parent
1231  * via wakeup.  Signals are handled elsewhere.  The process must not be
1232  * on the run queue.
1233  */
1234 void
1235 proc_stop(struct proc *p, int sw)
1236 {
1237 	struct process *pr = p->p_p;
1238 	extern void *softclock_si;
1239 
1240 #ifdef MULTIPROCESSOR
1241 	SCHED_ASSERT_LOCKED();
1242 #endif
1243 
1244 	p->p_stat = SSTOP;
1245 	atomic_clearbits_int(&pr->ps_flags, PS_WAITED);
1246 	atomic_setbits_int(&pr->ps_flags, PS_STOPPED);
1247 	atomic_setbits_int(&p->p_flag, P_SUSPSIG);
1248 	if (!timeout_pending(&proc_stop_to)) {
1249 		timeout_add(&proc_stop_to, 0);
1250 		/*
1251 		 * We need this soft interrupt to be handled fast.
1252 		 * Extra calls to softclock don't hurt.
1253 		 */
1254                 softintr_schedule(softclock_si);
1255 	}
1256 	if (sw)
1257 		mi_switch();
1258 }
1259 
1260 /*
1261  * Called from a timeout to send signals to the parents of stopped processes.
1262  * We can't do this in proc_stop because it's called with nasty locks held
1263  * and we would need recursive scheduler lock to deal with that.
1264  */
1265 void
1266 proc_stop_sweep(void *v)
1267 {
1268 	struct process *pr;
1269 
1270 	LIST_FOREACH(pr, &allprocess, ps_list) {
1271 		if ((pr->ps_flags & PS_STOPPED) == 0)
1272 			continue;
1273 		atomic_clearbits_int(&pr->ps_flags, PS_STOPPED);
1274 
1275 		if ((pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDSTOP) == 0)
1276 			prsignal(pr->ps_pptr, SIGCHLD);
1277 		wakeup(pr->ps_pptr);
1278 	}
1279 }
1280 
1281 /*
1282  * Take the action for the specified signal
1283  * from the current set of pending signals.
1284  */
1285 void
1286 postsig(int signum)
1287 {
1288 	struct proc *p = curproc;
1289 	struct sigacts *ps = p->p_p->ps_sigacts;
1290 	sig_t action;
1291 	u_long trapno;
1292 	int mask, returnmask;
1293 	union sigval sigval;
1294 	int s, code;
1295 
1296 #ifdef DIAGNOSTIC
1297 	if (signum == 0)
1298 		panic("postsig");
1299 #endif
1300 
1301 	KERNEL_LOCK();
1302 
1303 	mask = sigmask(signum);
1304 	atomic_clearbits_int(&p->p_siglist, mask);
1305 	action = ps->ps_sigact[signum];
1306 	sigval.sival_ptr = 0;
1307 
1308 	if (p->p_sisig != signum) {
1309 		trapno = 0;
1310 		code = SI_USER;
1311 		sigval.sival_ptr = 0;
1312 	} else {
1313 		trapno = p->p_sitrapno;
1314 		code = p->p_sicode;
1315 		sigval = p->p_sigval;
1316 	}
1317 
1318 #ifdef KTRACE
1319 	if (KTRPOINT(p, KTR_PSIG)) {
1320 		siginfo_t si;
1321 
1322 		initsiginfo(&si, signum, trapno, code, sigval);
1323 		ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ?
1324 		    p->p_oldmask : p->p_sigmask, code, &si);
1325 	}
1326 #endif
1327 	if (action == SIG_DFL) {
1328 		/*
1329 		 * Default action, where the default is to kill
1330 		 * the process.  (Other cases were ignored above.)
1331 		 */
1332 		sigexit(p, signum);
1333 		/* NOTREACHED */
1334 	} else {
1335 		/*
1336 		 * If we get here, the signal must be caught.
1337 		 */
1338 #ifdef DIAGNOSTIC
1339 		if (action == SIG_IGN || (p->p_sigmask & mask))
1340 			panic("postsig action");
1341 #endif
1342 		/*
1343 		 * Set the new mask value and also defer further
1344 		 * occurrences of this signal.
1345 		 *
1346 		 * Special case: user has done a sigpause.  Here the
1347 		 * current mask is not of interest, but rather the
1348 		 * mask from before the sigpause is what we want
1349 		 * restored after the signal processing is completed.
1350 		 */
1351 #ifdef MULTIPROCESSOR
1352 		s = splsched();
1353 #else
1354 		s = splhigh();
1355 #endif
1356 		if (p->p_flag & P_SIGSUSPEND) {
1357 			atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
1358 			returnmask = p->p_oldmask;
1359 		} else {
1360 			returnmask = p->p_sigmask;
1361 		}
1362 		p->p_sigmask |= ps->ps_catchmask[signum];
1363 		if ((ps->ps_sigreset & mask) != 0) {
1364 			ps->ps_sigcatch &= ~mask;
1365 			if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1366 				ps->ps_sigignore |= mask;
1367 			ps->ps_sigact[signum] = SIG_DFL;
1368 		}
1369 		splx(s);
1370 		p->p_ru.ru_nsignals++;
1371 		if (p->p_sisig == signum) {
1372 			p->p_sisig = 0;
1373 			p->p_sitrapno = 0;
1374 			p->p_sicode = SI_USER;
1375 			p->p_sigval.sival_ptr = NULL;
1376 		}
1377 
1378 		(*p->p_emul->e_sendsig)(action, signum, returnmask, trapno,
1379 		    code, sigval);
1380 	}
1381 
1382 	KERNEL_UNLOCK();
1383 }
1384 
1385 /*
1386  * Force the current process to exit with the specified signal, dumping core
1387  * if appropriate.  We bypass the normal tests for masked and caught signals,
1388  * allowing unrecoverable failures to terminate the process without changing
1389  * signal state.  Mark the accounting record with the signal termination.
1390  * If dumping core, save the signal number for the debugger.  Calls exit and
1391  * does not return.
1392  */
1393 void
1394 sigexit(struct proc *p, int signum)
1395 {
1396 	/* Mark process as going away */
1397 	atomic_setbits_int(&p->p_flag, P_WEXIT);
1398 
1399 	p->p_p->ps_acflag |= AXSIG;
1400 	if (sigprop[signum] & SA_CORE) {
1401 		p->p_sisig = signum;
1402 
1403 		/* if there are other threads, pause them */
1404 		if (TAILQ_FIRST(&p->p_p->ps_threads) != p ||
1405 		    TAILQ_NEXT(p, p_thr_link) != NULL)
1406 			single_thread_set(p, SINGLE_SUSPEND, 0);
1407 
1408 		if (coredump(p) == 0)
1409 			signum |= WCOREFLAG;
1410 	}
1411 	exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL);
1412 	/* NOTREACHED */
1413 }
1414 
1415 int nosuidcoredump = 1;
1416 
1417 struct coredump_iostate {
1418 	struct proc *io_proc;
1419 	struct vnode *io_vp;
1420 	struct ucred *io_cred;
1421 	off_t io_offset;
1422 };
1423 
1424 /*
1425  * Dump core, into a file named "progname.core", unless the process was
1426  * setuid/setgid.
1427  */
1428 int
1429 coredump(struct proc *p)
1430 {
1431 #ifdef SMALL_KERNEL
1432 	return EPERM;
1433 #else
1434 	struct vnode *vp;
1435 	struct ucred *cred = p->p_ucred;
1436 	struct vmspace *vm = p->p_vmspace;
1437 	struct nameidata nd;
1438 	struct vattr vattr;
1439 	struct coredump_iostate	io;
1440 	int error, len;
1441 	char name[sizeof("/var/crash/") + MAXCOMLEN + sizeof(".core")];
1442 	char *dir = "";
1443 
1444 	p->p_p->ps_flags |= PS_COREDUMP;
1445 
1446 	/*
1447 	 * Don't dump if not root and the process has used set user or
1448 	 * group privileges, unless the nosuidcoredump sysctl is set to 2,
1449 	 * in which case dumps are put into /var/crash/.
1450 	 */
1451 	if (((p->p_p->ps_flags & PS_SUGID) && (error = suser(p, 0))) ||
1452 	   ((p->p_p->ps_flags & PS_SUGID) && nosuidcoredump)) {
1453 		if (nosuidcoredump == 2)
1454 			dir = "/var/crash/";
1455 		else
1456 			return (EPERM);
1457 	}
1458 
1459 	/* Don't dump if will exceed file size limit. */
1460 	if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >=
1461 	    p->p_rlimit[RLIMIT_CORE].rlim_cur)
1462 		return (EFBIG);
1463 
1464 	len = snprintf(name, sizeof(name), "%s%s.core", dir, p->p_comm);
1465 	if (len >= sizeof(name))
1466 		return (EACCES);
1467 
1468 	/*
1469 	 * ... but actually write it as UID
1470 	 */
1471 	cred = crdup(cred);
1472 	cred->cr_uid = p->p_cred->p_ruid;
1473 	cred->cr_gid = p->p_cred->p_rgid;
1474 
1475 	NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1476 
1477 	error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR);
1478 
1479 	if (error)
1480 		goto out;
1481 
1482 	/*
1483 	 * Don't dump to non-regular files, files with links, or files
1484 	 * owned by someone else.
1485 	 */
1486 	vp = nd.ni_vp;
1487 	if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
1488 		VOP_UNLOCK(vp, 0, p);
1489 		vn_close(vp, FWRITE, cred, p);
1490 		goto out;
1491 	}
1492 	if (vp->v_type != VREG || vattr.va_nlink != 1 ||
1493 	    vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) ||
1494 	    vattr.va_uid != cred->cr_uid) {
1495 		error = EACCES;
1496 		VOP_UNLOCK(vp, 0, p);
1497 		vn_close(vp, FWRITE, cred, p);
1498 		goto out;
1499 	}
1500 	VATTR_NULL(&vattr);
1501 	vattr.va_size = 0;
1502 	VOP_SETATTR(vp, &vattr, cred, p);
1503 	p->p_p->ps_acflag |= ACORE;
1504 
1505 	io.io_proc = p;
1506 	io.io_vp = vp;
1507 	io.io_cred = cred;
1508 	io.io_offset = 0;
1509 	VOP_UNLOCK(vp, 0, p);
1510 	vref(vp);
1511 	error = vn_close(vp, FWRITE, cred, p);
1512 	if (error == 0)
1513 		error = (*p->p_emul->e_coredump)(p, &io);
1514 	vrele(vp);
1515 out:
1516 	crfree(cred);
1517 	return (error);
1518 #endif
1519 }
1520 
1521 int
1522 coredump_trad(struct proc *p, void *cookie)
1523 {
1524 #ifdef SMALL_KERNEL
1525 	return EPERM;
1526 #else
1527 	struct coredump_iostate *io = cookie;
1528 	struct vmspace *vm = io->io_proc->p_vmspace;
1529 	struct vnode *vp = io->io_vp;
1530 	struct ucred *cred = io->io_cred;
1531 	struct core core;
1532 	int error;
1533 
1534 	core.c_midmag = 0;
1535 	strlcpy(core.c_name, p->p_comm, sizeof(core.c_name));
1536 	core.c_nseg = 0;
1537 	core.c_signo = p->p_sisig;
1538 	core.c_ucode = p->p_sitrapno;
1539 	core.c_cpusize = 0;
1540 	core.c_tsize = (u_long)ptoa(vm->vm_tsize);
1541 	core.c_dsize = (u_long)ptoa(vm->vm_dsize);
1542 	core.c_ssize = (u_long)round_page(ptoa(vm->vm_ssize));
1543 	error = cpu_coredump(p, vp, cred, &core);
1544 	if (error)
1545 		return (error);
1546 	/*
1547 	 * uvm_coredump() spits out all appropriate segments.
1548 	 * All that's left to do is to write the core header.
1549 	 */
1550 	error = uvm_coredump(p, vp, cred, &core);
1551 	if (error)
1552 		return (error);
1553 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core,
1554 	    (int)core.c_hdrsize, (off_t)0,
1555 	    UIO_SYSSPACE, IO_UNIT, cred, NULL, p);
1556 	return (error);
1557 #endif
1558 }
1559 
1560 #ifndef SMALL_KERNEL
1561 int
1562 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
1563 {
1564 	struct coredump_iostate *io = cookie;
1565 	off_t coffset = 0;
1566 	size_t csize;
1567 	int chunk, error;
1568 
1569 	csize = len;
1570 	do {
1571 		if (io->io_proc->p_siglist & sigmask(SIGKILL))
1572 			return (EINTR);
1573 
1574 		/* Rest of the loop sleeps with lock held, so... */
1575 		yield();
1576 
1577 		chunk = MIN(csize, MAXPHYS);
1578 		error = vn_rdwr(UIO_WRITE, io->io_vp,
1579 		    (caddr_t)data + coffset, chunk,
1580 		    io->io_offset + coffset, segflg,
1581 		    IO_UNIT, io->io_cred, NULL, io->io_proc);
1582 		if (error) {
1583 			printf("pid %d (%s): %s write of %lu@%p"
1584 			    " at %lld failed: %d\n",
1585 			    io->io_proc->p_pid, io->io_proc->p_comm,
1586 			    segflg == UIO_USERSPACE ? "user" : "system",
1587 			    len, data, (long long)io->io_offset, error);
1588 			return (error);
1589 		}
1590 
1591 		coffset += chunk;
1592 		csize -= chunk;
1593 	} while (csize > 0);
1594 
1595 	io->io_offset += len;
1596 	return (0);
1597 }
1598 
1599 void
1600 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
1601 {
1602 	struct coredump_iostate *io = cookie;
1603 
1604 	uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
1605 }
1606 
1607 #endif	/* !SMALL_KERNEL */
1608 
1609 /*
1610  * Nonexistent system call-- signal process (may want to handle it).
1611  * Flag error in case process won't see signal immediately (blocked or ignored).
1612  */
1613 /* ARGSUSED */
1614 int
1615 sys_nosys(struct proc *p, void *v, register_t *retval)
1616 {
1617 
1618 	ptsignal(p, SIGSYS, STHREAD);
1619 	return (ENOSYS);
1620 }
1621 
1622 int
1623 sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
1624 {
1625 	struct sys___thrsigdivert_args /* {
1626 		syscallarg(sigset_t) sigmask;
1627 		syscallarg(siginfo_t *) info;
1628 		syscallarg(const struct timespec *) timeout;
1629 	} */ *uap = v;
1630 	sigset_t mask;
1631 	sigset_t *m;
1632 	long long to_ticks = 0;
1633 	int error;
1634 
1635 	m = NULL;
1636 	mask = SCARG(uap, sigmask) &~ sigcantmask;
1637 
1638 	/* pending signal for this thread? */
1639 	if (p->p_siglist & mask)
1640 		m = &p->p_siglist;
1641 	else if (p->p_p->ps_mainproc->p_siglist & mask)
1642 		m = &p->p_p->ps_mainproc->p_siglist;
1643 	if (m != NULL) {
1644 		int sig = ffs((long)(*m & mask));
1645 		atomic_clearbits_int(m, sigmask(sig));
1646 		*retval = sig;
1647 		return (0);
1648 	}
1649 
1650 	if (SCARG(uap, timeout) != NULL) {
1651 		struct timespec ts;
1652 		if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0)
1653 			return (error);
1654 #ifdef KTRACE
1655 		if (KTRPOINT(p, KTR_STRUCT))
1656 			ktrreltimespec(p, &ts);
1657 #endif
1658 		to_ticks = (long long)hz * ts.tv_sec +
1659 		    ts.tv_nsec / (tick * 1000);
1660 		if (to_ticks > INT_MAX)
1661 			to_ticks = INT_MAX;
1662 	}
1663 
1664 	p->p_sigwait = 0;
1665 	atomic_setbits_int(&p->p_sigdivert, mask);
1666 	error = tsleep(&p->p_sigdivert, PPAUSE|PCATCH, "sigwait",
1667 	    (int)to_ticks);
1668 	if (p->p_sigdivert) {
1669 		/* interrupted */
1670 		KASSERT(error != 0);
1671 		atomic_clearbits_int(&p->p_sigdivert, ~0);
1672 		if (error == EINTR)
1673 			error = ERESTART;
1674 		else if (error == ETIMEDOUT)
1675 			error = EAGAIN;
1676 		return (error);
1677 
1678 	}
1679 	KASSERT(p->p_sigwait != 0);
1680 	*retval = p->p_sigwait;
1681 
1682 	if (SCARG(uap, info) == NULL) {
1683 		error = 0;
1684 	} else {
1685 		siginfo_t si;
1686 
1687 		memset(&si, 0, sizeof(si));
1688 		si.si_signo = p->p_sigwait;
1689 		error = copyout(&si, SCARG(uap, info), sizeof(si));
1690 	}
1691 	return (error);
1692 }
1693 
1694 void
1695 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
1696 {
1697 	memset(si, 0, sizeof(*si));
1698 
1699 	si->si_signo = sig;
1700 	si->si_code = code;
1701 	if (code == SI_USER) {
1702 		si->si_value = val;
1703 	} else {
1704 		switch (sig) {
1705 		case SIGSEGV:
1706 		case SIGILL:
1707 		case SIGBUS:
1708 		case SIGFPE:
1709 			si->si_addr = val.sival_ptr;
1710 			si->si_trapno = trapno;
1711 			break;
1712 		case SIGXFSZ:
1713 			break;
1714 		}
1715 	}
1716 }
1717 
1718 int
1719 filt_sigattach(struct knote *kn)
1720 {
1721 	struct process *pr = curproc->p_p;
1722 
1723 	kn->kn_ptr.p_process = pr;
1724 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
1725 
1726 	/* XXX lock the proc here while adding to the list? */
1727 	SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext);
1728 
1729 	return (0);
1730 }
1731 
1732 void
1733 filt_sigdetach(struct knote *kn)
1734 {
1735 	struct process *pr = kn->kn_ptr.p_process;
1736 
1737 	SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext);
1738 }
1739 
1740 /*
1741  * signal knotes are shared with proc knotes, so we apply a mask to
1742  * the hint in order to differentiate them from process hints.  This
1743  * could be avoided by using a signal-specific knote list, but probably
1744  * isn't worth the trouble.
1745  */
1746 int
1747 filt_signal(struct knote *kn, long hint)
1748 {
1749 
1750 	if (hint & NOTE_SIGNAL) {
1751 		hint &= ~NOTE_SIGNAL;
1752 
1753 		if (kn->kn_id == hint)
1754 			kn->kn_data++;
1755 	}
1756 	return (kn->kn_data != 0);
1757 }
1758 
1759 void
1760 userret(struct proc *p)
1761 {
1762 	int sig;
1763 
1764 	/* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
1765 	if (p->p_flag & P_PROFPEND) {
1766 		atomic_clearbits_int(&p->p_flag, P_PROFPEND);
1767 		KERNEL_LOCK();
1768 		psignal(p, SIGPROF);
1769 		KERNEL_UNLOCK();
1770 	}
1771 	if (p->p_flag & P_ALRMPEND) {
1772 		atomic_clearbits_int(&p->p_flag, P_ALRMPEND);
1773 		KERNEL_LOCK();
1774 		psignal(p, SIGVTALRM);
1775 		KERNEL_UNLOCK();
1776 	}
1777 
1778 	while ((sig = CURSIG(p)) != 0)
1779 		postsig(sig);
1780 
1781 	/*
1782 	 * If P_SIGSUSPEND is still set here, then we still need to restore
1783 	 * the original sigmask before returning to userspace.  Also, this
1784 	 * might unmask some pending signals, so we need to check a second
1785 	 * time for signals to post.
1786 	 */
1787 	if (p->p_flag & P_SIGSUSPEND) {
1788 		atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
1789 		p->p_sigmask = p->p_oldmask;
1790 
1791 		while ((sig = CURSIG(p)) != 0)
1792 			postsig(sig);
1793 	}
1794 
1795 	if (p->p_flag & P_SUSPSINGLE) {
1796 		KERNEL_LOCK();
1797 		single_thread_check(p, 0);
1798 		KERNEL_UNLOCK();
1799 	}
1800 
1801 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
1802 }
1803 
1804 int
1805 single_thread_check(struct proc *p, int deep)
1806 {
1807 	struct process *pr = p->p_p;
1808 
1809 	if (pr->ps_single != NULL && pr->ps_single != p) {
1810 		do {
1811 			int s;
1812 
1813 			/* if we're in deep, we need to unwind to the edge */
1814 			if (deep) {
1815 				if (pr->ps_flags & PS_SINGLEUNWIND)
1816 					return (ERESTART);
1817 				if (pr->ps_flags & PS_SINGLEEXIT)
1818 					return (EINTR);
1819 			}
1820 
1821 			if (--pr->ps_singlecount == 0)
1822 				wakeup(&pr->ps_singlecount);
1823 			if (pr->ps_flags & PS_SINGLEEXIT)
1824 				exit1(p, 0, EXIT_THREAD_NOCHECK);
1825 
1826 			/* not exiting and don't need to unwind, so suspend */
1827 			SCHED_LOCK(s);
1828 			p->p_stat = SSTOP;
1829 			mi_switch();
1830 			SCHED_UNLOCK(s);
1831 		} while (pr->ps_single != NULL);
1832 	}
1833 
1834 	return (0);
1835 }
1836 
1837 /*
1838  * Stop other threads in the process.  The mode controls how and
1839  * where the other threads should stop:
1840  *  - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit
1841  *    (by setting to SINGLE_EXIT) or be released (via single_thread_clear())
1842  *  - SINGLE_PTRACE: stop wherever they are, will wait for them to stop
1843  *    later (via single_thread_wait()) and released as with SINGLE_SUSPEND
1844  *  - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
1845  *    or released as with SINGLE_SUSPEND
1846  *  - SINGLE_EXIT: unwind to kernel boundary and exit
1847  */
1848 int
1849 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep)
1850 {
1851 	struct process *pr = p->p_p;
1852 	struct proc *q;
1853 	int error;
1854 
1855 #ifdef MULTIPROCESSOR
1856 	KASSERT(__mp_lock_held(&kernel_lock));
1857 #endif
1858 
1859 	if ((error = single_thread_check(p, deep)))
1860 		return error;
1861 
1862 	switch (mode) {
1863 	case SINGLE_SUSPEND:
1864 	case SINGLE_PTRACE:
1865 		break;
1866 	case SINGLE_UNWIND:
1867 		atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
1868 		break;
1869 	case SINGLE_EXIT:
1870 		atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT);
1871 		atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
1872 		break;
1873 #ifdef DIAGNOSTIC
1874 	default:
1875 		panic("single_thread_mode = %d", mode);
1876 #endif
1877 	}
1878 	pr->ps_single = p;
1879 	pr->ps_singlecount = 0;
1880 	TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1881 		int s;
1882 
1883 		if (q == p)
1884 			continue;
1885 		if (q->p_flag & P_WEXIT) {
1886 			if (mode == SINGLE_EXIT) {
1887 				SCHED_LOCK(s);
1888 				if (q->p_stat == SSTOP) {
1889 					setrunnable(q);
1890 					pr->ps_singlecount++;
1891 				}
1892 				SCHED_UNLOCK(s);
1893 			}
1894 			continue;
1895 		}
1896 		SCHED_LOCK(s);
1897 		atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
1898 		switch (q->p_stat) {
1899 		case SIDL:
1900 		case SRUN:
1901 			pr->ps_singlecount++;
1902 			break;
1903 		case SSLEEP:
1904 			/* if it's not interruptible, then just have to wait */
1905 			if (q->p_flag & P_SINTR) {
1906 				/* merely need to suspend?  just stop it */
1907 				if (mode == SINGLE_SUSPEND ||
1908 				    mode == SINGLE_PTRACE) {
1909 					q->p_stat = SSTOP;
1910 					break;
1911 				}
1912 				/* need to unwind or exit, so wake it */
1913 				setrunnable(q);
1914 			}
1915 			pr->ps_singlecount++;
1916 			break;
1917 		case SSTOP:
1918 			if (mode == SINGLE_EXIT) {
1919 				setrunnable(q);
1920 				pr->ps_singlecount++;
1921 			}
1922 			break;
1923 		case SZOMB:
1924 		case SDEAD:
1925 			break;
1926 		case SONPROC:
1927 			pr->ps_singlecount++;
1928 			signotify(q);
1929 			break;
1930 		}
1931 		SCHED_UNLOCK(s);
1932 	}
1933 
1934 	if (mode != SINGLE_PTRACE)
1935 		single_thread_wait(pr);
1936 
1937 	return 0;
1938 }
1939 
1940 void
1941 single_thread_wait(struct process *pr)
1942 {
1943 	/* wait until they're all suspended */
1944 	while (pr->ps_singlecount > 0)
1945 		tsleep(&pr->ps_singlecount, PUSER, "suspend", 0);
1946 }
1947 
1948 void
1949 single_thread_clear(struct proc *p, int flag)
1950 {
1951 	struct process *pr = p->p_p;
1952 	struct proc *q;
1953 
1954 	KASSERT(pr->ps_single == p);
1955 #ifdef MULTIPROCESSOR
1956 	KASSERT(__mp_lock_held(&kernel_lock));
1957 #endif
1958 
1959 	pr->ps_single = NULL;
1960 	atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
1961 	TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1962 		int s;
1963 
1964 		if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
1965 			continue;
1966 		atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE);
1967 
1968 		/*
1969 		 * if the thread was only stopped for single threading
1970 		 * then clearing that either makes it runnable or puts
1971 		 * it back into some sleep queue
1972 		 */
1973 		SCHED_LOCK(s);
1974 		if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
1975 			if (q->p_wchan == 0)
1976 				setrunnable(q);
1977 			else
1978 				q->p_stat = SSLEEP;
1979 		}
1980 		SCHED_UNLOCK(s);
1981 	}
1982 }
1983