xref: /openbsd-src/sys/kern/kern_sig.c (revision 48950c12d106c85f315112191a0228d7b83b9510)
1 /*	$OpenBSD: kern_sig.c,v 1.148 2013/02/08 04:30:37 guenther Exp $	*/
2 /*	$NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Theo de Raadt. All rights reserved.
6  * Copyright (c) 1982, 1986, 1989, 1991, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
39  */
40 
41 #define	SIGPROP		/* include signal properties table */
42 #include <sys/param.h>
43 #include <sys/signalvar.h>
44 #include <sys/resourcevar.h>
45 #include <sys/queue.h>
46 #include <sys/namei.h>
47 #include <sys/vnode.h>
48 #include <sys/event.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/times.h>
52 #include <sys/buf.h>
53 #include <sys/acct.h>
54 #include <sys/file.h>
55 #include <sys/kernel.h>
56 #include <sys/wait.h>
57 #include <sys/ktrace.h>
58 #include <sys/stat.h>
59 #include <sys/core.h>
60 #include <sys/malloc.h>
61 #include <sys/pool.h>
62 #include <sys/ptrace.h>
63 #include <sys/sched.h>
64 #include <sys/user.h>
65 
66 #include <sys/mount.h>
67 #include <sys/syscallargs.h>
68 
69 #include <machine/cpu.h>
70 
71 #include <uvm/uvm_extern.h>
72 
73 int	filt_sigattach(struct knote *kn);
74 void	filt_sigdetach(struct knote *kn);
75 int	filt_signal(struct knote *kn, long hint);
76 
77 struct filterops sig_filtops =
78 	{ 0, filt_sigattach, filt_sigdetach, filt_signal };
79 
80 void proc_stop(struct proc *p, int);
81 void proc_stop_sweep(void *);
82 struct timeout proc_stop_to;
83 
84 int cansignal(struct proc *, struct pcred *, struct proc *, int);
85 
86 struct pool sigacts_pool;	/* memory pool for sigacts structures */
87 
88 /*
89  * Can process p, with pcred pc, send the signal signum to process q?
90  */
91 int
92 cansignal(struct proc *p, struct pcred *pc, struct proc *q, int signum)
93 {
94 	if (pc->pc_ucred->cr_uid == 0)
95 		return (1);		/* root can always signal */
96 
97 	if (p == q)
98 		return (1);		/* process can always signal itself */
99 
100 	if (signum == SIGCONT && q->p_p->ps_session == p->p_p->ps_session)
101 		return (1);		/* SIGCONT in session */
102 
103 	/*
104 	 * Using kill(), only certain signals can be sent to setugid
105 	 * child processes
106 	 */
107 	if (q->p_p->ps_flags & PS_SUGID) {
108 		switch (signum) {
109 		case 0:
110 		case SIGKILL:
111 		case SIGINT:
112 		case SIGTERM:
113 		case SIGALRM:
114 		case SIGSTOP:
115 		case SIGTTIN:
116 		case SIGTTOU:
117 		case SIGTSTP:
118 		case SIGHUP:
119 		case SIGUSR1:
120 		case SIGUSR2:
121 			if (pc->p_ruid == q->p_cred->p_ruid ||
122 			    pc->pc_ucred->cr_uid == q->p_cred->p_ruid)
123 				return (1);
124 		}
125 		return (0);
126 	}
127 
128 	if (pc->p_ruid == q->p_cred->p_ruid ||
129 	    pc->p_ruid == q->p_cred->p_svuid ||
130 	    pc->pc_ucred->cr_uid == q->p_cred->p_ruid ||
131 	    pc->pc_ucred->cr_uid == q->p_cred->p_svuid)
132 		return (1);
133 	return (0);
134 }
135 
136 /*
137  * Initialize signal-related data structures.
138  */
139 void
140 signal_init(void)
141 {
142 	timeout_set(&proc_stop_to, proc_stop_sweep, NULL);
143 
144 	pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
145 	    &pool_allocator_nointr);
146 }
147 
148 /*
149  * Create an initial sigacts structure, using the same signal state
150  * as p.
151  */
152 struct sigacts *
153 sigactsinit(struct proc *p)
154 {
155 	struct sigacts *ps;
156 
157 	ps = pool_get(&sigacts_pool, PR_WAITOK);
158 	memcpy(ps, p->p_sigacts, sizeof(struct sigacts));
159 	ps->ps_refcnt = 1;
160 	return (ps);
161 }
162 
163 /*
164  * Share a sigacts structure.
165  */
166 struct sigacts *
167 sigactsshare(struct proc *p)
168 {
169 	p->p_sigacts->ps_refcnt++;
170 	return p->p_sigacts;
171 }
172 
173 /*
174  * Initialize a new sigaltstack structure.
175  */
176 void
177 sigstkinit(struct sigaltstack *ss)
178 {
179 	ss->ss_flags = SS_DISABLE;
180 	ss->ss_size = 0;
181 	ss->ss_sp = 0;
182 }
183 
184 /*
185  * Make this process not share its sigacts, maintaining all
186  * signal state.
187  */
188 void
189 sigactsunshare(struct proc *p)
190 {
191 	struct sigacts *newps;
192 
193 	if (p->p_sigacts->ps_refcnt == 1)
194 		return;
195 
196 	newps = sigactsinit(p);
197 	sigactsfree(p);
198 	p->p_sigacts = newps;
199 }
200 
201 /*
202  * Release a sigacts structure.
203  */
204 void
205 sigactsfree(struct proc *p)
206 {
207 	struct sigacts *ps = p->p_sigacts;
208 
209 	if (--ps->ps_refcnt > 0)
210 		return;
211 
212 	p->p_sigacts = NULL;
213 
214 	pool_put(&sigacts_pool, ps);
215 }
216 
217 /* ARGSUSED */
218 int
219 sys_sigaction(struct proc *p, void *v, register_t *retval)
220 {
221 	struct sys_sigaction_args /* {
222 		syscallarg(int) signum;
223 		syscallarg(const struct sigaction *) nsa;
224 		syscallarg(struct sigaction *) osa;
225 	} */ *uap = v;
226 	struct sigaction vec;
227 #ifdef KTRACE
228 	struct sigaction ovec;
229 #endif
230 	struct sigaction *sa;
231 	const struct sigaction *nsa;
232 	struct sigaction *osa;
233 	struct sigacts *ps = p->p_sigacts;
234 	int signum;
235 	int bit, error;
236 
237 	signum = SCARG(uap, signum);
238 	nsa = SCARG(uap, nsa);
239 	osa = SCARG(uap, osa);
240 
241 	if (signum <= 0 || signum >= NSIG ||
242 	    (nsa && (signum == SIGKILL || signum == SIGSTOP)))
243 		return (EINVAL);
244 	sa = &vec;
245 	if (osa) {
246 		sa->sa_handler = ps->ps_sigact[signum];
247 		sa->sa_mask = ps->ps_catchmask[signum];
248 		bit = sigmask(signum);
249 		sa->sa_flags = 0;
250 		if ((ps->ps_sigonstack & bit) != 0)
251 			sa->sa_flags |= SA_ONSTACK;
252 		if ((ps->ps_sigintr & bit) == 0)
253 			sa->sa_flags |= SA_RESTART;
254 		if ((ps->ps_sigreset & bit) != 0)
255 			sa->sa_flags |= SA_RESETHAND;
256 		if ((ps->ps_siginfo & bit) != 0)
257 			sa->sa_flags |= SA_SIGINFO;
258 		if (signum == SIGCHLD) {
259 			if ((ps->ps_flags & SAS_NOCLDSTOP) != 0)
260 				sa->sa_flags |= SA_NOCLDSTOP;
261 			if ((ps->ps_flags & SAS_NOCLDWAIT) != 0)
262 				sa->sa_flags |= SA_NOCLDWAIT;
263 		}
264 		if ((sa->sa_mask & bit) == 0)
265 			sa->sa_flags |= SA_NODEFER;
266 		sa->sa_mask &= ~bit;
267 		error = copyout(sa, osa, sizeof (vec));
268 		if (error)
269 			return (error);
270 #ifdef KTRACE
271 		if (KTRPOINT(p, KTR_STRUCT))
272 			ovec = vec;
273 #endif
274 	}
275 	if (nsa) {
276 		error = copyin(nsa, sa, sizeof (vec));
277 		if (error)
278 			return (error);
279 #ifdef KTRACE
280 		if (KTRPOINT(p, KTR_STRUCT))
281 			ktrsigaction(p, sa);
282 #endif
283 		setsigvec(p, signum, sa);
284 	}
285 #ifdef KTRACE
286 	if (osa && KTRPOINT(p, KTR_STRUCT))
287 		ktrsigaction(p, &ovec);
288 #endif
289 	return (0);
290 }
291 
292 void
293 setsigvec(struct proc *p, int signum, struct sigaction *sa)
294 {
295 	struct sigacts *ps = p->p_sigacts;
296 	int bit;
297 	int s;
298 
299 	bit = sigmask(signum);
300 	/*
301 	 * Change setting atomically.
302 	 */
303 	s = splhigh();
304 	ps->ps_sigact[signum] = sa->sa_handler;
305 	if ((sa->sa_flags & SA_NODEFER) == 0)
306 		sa->sa_mask |= sigmask(signum);
307 	ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
308 	if (signum == SIGCHLD) {
309 		if (sa->sa_flags & SA_NOCLDSTOP)
310 			atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP);
311 		else
312 			atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP);
313 		/*
314 		 * If the SA_NOCLDWAIT flag is set or the handler
315 		 * is SIG_IGN we reparent the dying child to PID 1
316 		 * (init) which will reap the zombie.  Because we use
317 		 * init to do our dirty work we never set SAS_NOCLDWAIT
318 		 * for PID 1.
319 		 */
320 		if (initproc->p_sigacts != ps &&
321 		    ((sa->sa_flags & SA_NOCLDWAIT) ||
322 		    sa->sa_handler == SIG_IGN))
323 			atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT);
324 		else
325 			atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT);
326 	}
327 	if ((sa->sa_flags & SA_RESETHAND) != 0)
328 		ps->ps_sigreset |= bit;
329 	else
330 		ps->ps_sigreset &= ~bit;
331 	if ((sa->sa_flags & SA_SIGINFO) != 0)
332 		ps->ps_siginfo |= bit;
333 	else
334 		ps->ps_siginfo &= ~bit;
335 	if ((sa->sa_flags & SA_RESTART) == 0)
336 		ps->ps_sigintr |= bit;
337 	else
338 		ps->ps_sigintr &= ~bit;
339 	if ((sa->sa_flags & SA_ONSTACK) != 0)
340 		ps->ps_sigonstack |= bit;
341 	else
342 		ps->ps_sigonstack &= ~bit;
343 	/*
344 	 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
345 	 * and for signals set to SIG_DFL where the default is to ignore.
346 	 * However, don't put SIGCONT in ps_sigignore,
347 	 * as we have to restart the process.
348 	 */
349 	if (sa->sa_handler == SIG_IGN ||
350 	    (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
351 		atomic_clearbits_int(&p->p_siglist, bit);
352 		if (signum != SIGCONT)
353 			ps->ps_sigignore |= bit;	/* easier in psignal */
354 		ps->ps_sigcatch &= ~bit;
355 	} else {
356 		ps->ps_sigignore &= ~bit;
357 		if (sa->sa_handler == SIG_DFL)
358 			ps->ps_sigcatch &= ~bit;
359 		else
360 			ps->ps_sigcatch |= bit;
361 	}
362 	splx(s);
363 }
364 
365 /*
366  * Initialize signal state for process 0;
367  * set to ignore signals that are ignored by default.
368  */
369 void
370 siginit(struct proc *p)
371 {
372 	struct sigacts *ps = p->p_sigacts;
373 	int i;
374 
375 	for (i = 0; i < NSIG; i++)
376 		if (sigprop[i] & SA_IGNORE && i != SIGCONT)
377 			ps->ps_sigignore |= sigmask(i);
378 	ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP;
379 }
380 
381 /*
382  * Reset signals for an exec of the specified process.
383  */
384 void
385 execsigs(struct proc *p)
386 {
387 	struct sigacts *ps;
388 	int nc, mask;
389 
390 	sigactsunshare(p);
391 	ps = p->p_sigacts;
392 
393 	/*
394 	 * Reset caught signals.  Held signals remain held
395 	 * through p_sigmask (unless they were caught,
396 	 * and are now ignored by default).
397 	 */
398 	while (ps->ps_sigcatch) {
399 		nc = ffs((long)ps->ps_sigcatch);
400 		mask = sigmask(nc);
401 		ps->ps_sigcatch &= ~mask;
402 		if (sigprop[nc] & SA_IGNORE) {
403 			if (nc != SIGCONT)
404 				ps->ps_sigignore |= mask;
405 			atomic_clearbits_int(&p->p_siglist, mask);
406 		}
407 		ps->ps_sigact[nc] = SIG_DFL;
408 	}
409 	/*
410 	 * Reset stack state to the user stack.
411 	 * Clear set of signals caught on the signal stack.
412 	 */
413 	sigstkinit(&p->p_sigstk);
414 	ps->ps_flags &= ~SAS_NOCLDWAIT;
415 	if (ps->ps_sigact[SIGCHLD] == SIG_IGN)
416 		ps->ps_sigact[SIGCHLD] = SIG_DFL;
417 }
418 
419 /*
420  * Manipulate signal mask.
421  * Note that we receive new mask, not pointer,
422  * and return old mask as return value;
423  * the library stub does the rest.
424  */
425 int
426 sys_sigprocmask(struct proc *p, void *v, register_t *retval)
427 {
428 	struct sys_sigprocmask_args /* {
429 		syscallarg(int) how;
430 		syscallarg(sigset_t) mask;
431 	} */ *uap = v;
432 	int error = 0;
433 	int s;
434 	sigset_t mask;
435 
436 	*retval = p->p_sigmask;
437 	mask = SCARG(uap, mask);
438 	s = splhigh();
439 
440 	switch (SCARG(uap, how)) {
441 	case SIG_BLOCK:
442 		p->p_sigmask |= mask &~ sigcantmask;
443 		break;
444 	case SIG_UNBLOCK:
445 		p->p_sigmask &= ~mask;
446 		break;
447 	case SIG_SETMASK:
448 		p->p_sigmask = mask &~ sigcantmask;
449 		break;
450 	default:
451 		error = EINVAL;
452 		break;
453 	}
454 	splx(s);
455 	return (error);
456 }
457 
458 /* ARGSUSED */
459 int
460 sys_sigpending(struct proc *p, void *v, register_t *retval)
461 {
462 
463 	*retval = p->p_siglist;
464 	return (0);
465 }
466 
467 /*
468  * Suspend process until signal, providing mask to be set
469  * in the meantime.  Note nonstandard calling convention:
470  * libc stub passes mask, not pointer, to save a copyin.
471  */
472 /* ARGSUSED */
473 int
474 sys_sigsuspend(struct proc *p, void *v, register_t *retval)
475 {
476 	struct sys_sigsuspend_args /* {
477 		syscallarg(int) mask;
478 	} */ *uap = v;
479 	struct sigacts *ps = p->p_sigacts;
480 
481 	/*
482 	 * When returning from sigpause, we want
483 	 * the old mask to be restored after the
484 	 * signal handler has finished.  Thus, we
485 	 * save it here and mark the sigacts structure
486 	 * to indicate this.
487 	 */
488 	p->p_oldmask = p->p_sigmask;
489 	atomic_setbits_int(&p->p_flag, P_SIGSUSPEND);
490 	p->p_sigmask = SCARG(uap, mask) &~ sigcantmask;
491 	while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0)
492 		/* void */;
493 	/* always return EINTR rather than ERESTART... */
494 	return (EINTR);
495 }
496 
497 int
498 sigonstack(size_t stack)
499 {
500 	const struct sigaltstack *ss = &curproc->p_sigstk;
501 
502 	return (ss->ss_flags & SS_DISABLE ? 0 :
503 	    (stack - (size_t)ss->ss_sp < ss->ss_size));
504 }
505 
506 int
507 sys_sigaltstack(struct proc *p, void *v, register_t *retval)
508 {
509 	struct sys_sigaltstack_args /* {
510 		syscallarg(const struct sigaltstack *) nss;
511 		syscallarg(struct sigaltstack *) oss;
512 	} */ *uap = v;
513 	struct sigaltstack ss;
514 	const struct sigaltstack *nss;
515 	struct sigaltstack *oss;
516 	int onstack = sigonstack(PROC_STACK(p));
517 	int error;
518 
519 	nss = SCARG(uap, nss);
520 	oss = SCARG(uap, oss);
521 
522 	if (oss != NULL) {
523 		ss = p->p_sigstk;
524 		if (onstack)
525 			ss.ss_flags |= SS_ONSTACK;
526 		if ((error = copyout(&ss, oss, sizeof(ss))))
527 			return (error);
528 	}
529 	if (nss == NULL)
530 		return (0);
531 	error = copyin(nss, &ss, sizeof(ss));
532 	if (error)
533 		return (error);
534 	if (onstack)
535 		return (EPERM);
536 	if (ss.ss_flags & ~SS_DISABLE)
537 		return (EINVAL);
538 	if (ss.ss_flags & SS_DISABLE) {
539 		p->p_sigstk.ss_flags = ss.ss_flags;
540 		return (0);
541 	}
542 	if (ss.ss_size < MINSIGSTKSZ)
543 		return (ENOMEM);
544 	p->p_sigstk = ss;
545 	return (0);
546 }
547 
548 /* ARGSUSED */
549 int
550 sys_kill(struct proc *cp, void *v, register_t *retval)
551 {
552 	struct sys_kill_args /* {
553 		syscallarg(int) pid;
554 		syscallarg(int) signum;
555 	} */ *uap = v;
556 	struct proc *p;
557 	struct pcred *pc = cp->p_cred;
558 	int pid = SCARG(uap, pid);
559 	int signum = SCARG(uap, signum);
560 
561 	if (((u_int)signum) >= NSIG)
562 		return (EINVAL);
563 	if (pid > 0) {
564 		enum signal_type type = SPROCESS;
565 
566 		/*
567 		 * If the target pid is > THREAD_PID_OFFSET then this
568 		 * must be a kill of another thread in the same process.
569 		 * Otherwise, this is a process kill and the target must
570 		 * be a main thread.
571 		 */
572 		if (pid > THREAD_PID_OFFSET) {
573 			if ((p = pfind(pid - THREAD_PID_OFFSET)) == NULL)
574 				return (ESRCH);
575 			if (p->p_p != cp->p_p)
576 				return (ESRCH);
577 			type = STHREAD;
578 		} else {
579 			if ((p = pfind(pid)) == NULL)
580 				return (ESRCH);
581 			if (p->p_flag & P_THREAD)
582 				return (ESRCH);
583 			if (!cansignal(cp, pc, p, signum))
584 				return (EPERM);
585 		}
586 
587 		/* kill single process or thread */
588 		if (signum)
589 			ptsignal(p, signum, type);
590 		return (0);
591 	}
592 	switch (pid) {
593 	case -1:		/* broadcast signal */
594 		return (killpg1(cp, signum, 0, 1));
595 	case 0:			/* signal own process group */
596 		return (killpg1(cp, signum, 0, 0));
597 	default:		/* negative explicit process group */
598 		return (killpg1(cp, signum, -pid, 0));
599 	}
600 	/* NOTREACHED */
601 }
602 
603 /*
604  * Common code for kill process group/broadcast kill.
605  * cp is calling process.
606  */
607 int
608 killpg1(struct proc *cp, int signum, int pgid, int all)
609 {
610 	struct proc *p;
611 	struct process *pr;
612 	struct pcred *pc = cp->p_cred;
613 	struct pgrp *pgrp;
614 	int nfound = 0;
615 
616 	if (all)
617 		/*
618 		 * broadcast
619 		 */
620 		LIST_FOREACH(p, &allproc, p_list) {
621 			if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) ||
622 			    p == cp || !cansignal(cp, pc, p, signum))
623 				continue;
624 			nfound++;
625 			if (signum)
626 				psignal(p, signum);
627 		}
628 	else {
629 		if (pgid == 0)
630 			/*
631 			 * zero pgid means send to my process group.
632 			 */
633 			pgrp = cp->p_p->ps_pgrp;
634 		else {
635 			pgrp = pgfind(pgid);
636 			if (pgrp == NULL)
637 				return (ESRCH);
638 		}
639 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) {
640 			p = pr->ps_mainproc;
641 			if (p->p_pid <= 1 || p->p_flag & (P_SYSTEM|P_THREAD) ||
642 			    !cansignal(cp, pc, p, signum))
643 				continue;
644 			nfound++;
645 			if (signum && P_ZOMBIE(p) == 0)
646 				psignal(p, signum);
647 		}
648 	}
649 	return (nfound ? 0 : ESRCH);
650 }
651 
652 #define CANDELIVER(uid, euid, pr) \
653 	(euid == 0 || \
654 	(uid) == (pr)->ps_cred->p_ruid || \
655 	(uid) == (pr)->ps_cred->p_svuid || \
656 	(uid) == (pr)->ps_cred->pc_ucred->cr_uid || \
657 	(euid) == (pr)->ps_cred->p_ruid || \
658 	(euid) == (pr)->ps_cred->p_svuid || \
659 	(euid) == (pr)->ps_cred->pc_ucred->cr_uid)
660 
661 /*
662  * Deliver signum to pgid, but first check uid/euid against each
663  * process and see if it is permitted.
664  */
665 void
666 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid)
667 {
668 	struct pgrp *pgrp;
669 	struct process *pr;
670 
671 	if (pgid == 0)
672 		return;
673 	if (pgid < 0) {
674 		pgid = -pgid;
675 		if ((pgrp = pgfind(pgid)) == NULL)
676 			return;
677 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
678 			if (CANDELIVER(uid, euid, pr))
679 				prsignal(pr, signum);
680 	} else {
681 		if ((pr = prfind(pgid)) == NULL)
682 			return;
683 		if (CANDELIVER(uid, euid, pr))
684 			prsignal(pr, signum);
685 	}
686 }
687 
688 /*
689  * Send a signal to a process group.
690  */
691 void
692 gsignal(int pgid, int signum)
693 {
694 	struct pgrp *pgrp;
695 
696 	if (pgid && (pgrp = pgfind(pgid)))
697 		pgsignal(pgrp, signum, 0);
698 }
699 
700 /*
701  * Send a signal to a process group.  If checktty is 1,
702  * limit to members which have a controlling terminal.
703  */
704 void
705 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
706 {
707 	struct process *pr;
708 
709 	if (pgrp)
710 		LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
711 			if (checkctty == 0 || pr->ps_flags & PS_CONTROLT)
712 				prsignal(pr, signum);
713 }
714 
715 /*
716  * Send a signal caused by a trap to the current process.
717  * If it will be caught immediately, deliver it with correct code.
718  * Otherwise, post it normally.
719  */
720 void
721 trapsignal(struct proc *p, int signum, u_long trapno, int code,
722     union sigval sigval)
723 {
724 	struct sigacts *ps = p->p_sigacts;
725 	int mask;
726 
727 	mask = sigmask(signum);
728 	if ((p->p_p->ps_flags & PS_TRACED) == 0 &&
729 	    (ps->ps_sigcatch & mask) != 0 &&
730 	    (p->p_sigmask & mask) == 0) {
731 #ifdef KTRACE
732 		if (KTRPOINT(p, KTR_PSIG)) {
733 			siginfo_t si;
734 
735 			initsiginfo(&si, signum, trapno, code, sigval);
736 			ktrpsig(p, signum, ps->ps_sigact[signum],
737 			    p->p_sigmask, code, &si);
738 		}
739 #endif
740 		p->p_ru.ru_nsignals++;
741 		(*p->p_emul->e_sendsig)(ps->ps_sigact[signum], signum,
742 		    p->p_sigmask, trapno, code, sigval);
743 		p->p_sigmask |= ps->ps_catchmask[signum];
744 		if ((ps->ps_sigreset & mask) != 0) {
745 			ps->ps_sigcatch &= ~mask;
746 			if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
747 				ps->ps_sigignore |= mask;
748 			ps->ps_sigact[signum] = SIG_DFL;
749 		}
750 	} else {
751 		p->p_sisig = signum;
752 		p->p_sitrapno = trapno;	/* XXX for core dump/debugger */
753 		p->p_sicode = code;
754 		p->p_sigval = sigval;
755 		ptsignal(p, signum, STHREAD);
756 	}
757 }
758 
759 /*
760  * Send the signal to the process.  If the signal has an action, the action
761  * is usually performed by the target process rather than the caller; we add
762  * the signal to the set of pending signals for the process.
763  *
764  * Exceptions:
765  *   o When a stop signal is sent to a sleeping process that takes the
766  *     default action, the process is stopped without awakening it.
767  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
768  *     regardless of the signal action (eg, blocked or ignored).
769  *
770  * Other ignored signals are discarded immediately.
771  */
772 void
773 psignal(struct proc *p, int signum)
774 {
775 	ptsignal(p, signum, SPROCESS);
776 }
777 
778 /*
779  * type = SPROCESS	process signal, can be diverted (sigwait())
780  *	XXX if blocked in all threads, mark as pending in struct process
781  * type = STHREAD	thread signal, but should be propagated if unhandled
782  * type = SPROPAGATED	propagated to this thread, so don't propagate again
783  */
784 void
785 ptsignal(struct proc *p, int signum, enum signal_type type)
786 {
787 	int s, prop;
788 	sig_t action;
789 	int mask;
790 	struct process *pr = p->p_p;
791 	struct proc *q;
792 	int wakeparent = 0;
793 
794 #ifdef DIAGNOSTIC
795 	if ((u_int)signum >= NSIG || signum == 0)
796 		panic("psignal signal number");
797 #endif
798 
799 	/* Ignore signal if we are exiting */
800 	if (pr->ps_flags & PS_EXITING)
801 		return;
802 
803 	mask = sigmask(signum);
804 
805 	if (type == SPROCESS) {
806 		/* Accept SIGKILL to coredumping processes */
807 		if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) {
808 			if (pr->ps_single != NULL)
809 				p = pr->ps_single;
810 			atomic_setbits_int(&p->p_siglist, mask);
811 			return;
812 		}
813 
814 		/*
815 		 * A process-wide signal can be diverted to a different
816 		 * thread that's in sigwait() for this signal.  If there
817 		 * isn't such a thread, then pick a thread that doesn't
818 		 * have it blocked so that the stop/kill consideration
819 		 * isn't delayed.  Otherwise, mark it pending on the
820 		 * main thread.
821 		 */
822 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
823 			/* ignore exiting threads */
824 			if (q->p_flag & P_WEXIT)
825 				continue;
826 
827 			/* sigwait: definitely go to this thread */
828 			if (q->p_sigdivert & mask) {
829 				p = q;
830 				break;
831 			}
832 
833 			/* unblocked: possibly go to this thread */
834 			if ((q->p_sigmask & mask) == 0)
835 				p = q;
836 		}
837 	}
838 
839 	if (type != SPROPAGATED)
840 		KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum);
841 
842 	prop = sigprop[signum];
843 
844 	/*
845 	 * If proc is traced, always give parent a chance.
846 	 * XXX give sigwait() priority until it's fixed to do this
847 	 * XXX from issignal/postsig
848 	 */
849 	if (p->p_sigdivert & mask) {
850 		p->p_sigwait = signum;
851 		atomic_clearbits_int(&p->p_sigdivert, ~0);
852 		action = SIG_CATCH;
853 		wakeup(&p->p_sigdivert);
854 	} else if (pr->ps_flags & PS_TRACED) {
855 		action = SIG_DFL;
856 		atomic_setbits_int(&p->p_siglist, mask);
857 	} else {
858 		/*
859 		 * If the signal is being ignored,
860 		 * then we forget about it immediately.
861 		 * (Note: we don't set SIGCONT in ps_sigignore,
862 		 * and if it is set to SIG_IGN,
863 		 * action will be SIG_DFL here.)
864 		 */
865 		if (p->p_sigacts->ps_sigignore & mask)
866 			return;
867 		if (p->p_sigmask & mask)
868 			action = SIG_HOLD;
869 		else if (p->p_sigacts->ps_sigcatch & mask)
870 			action = SIG_CATCH;
871 		else {
872 			action = SIG_DFL;
873 
874 			if (prop & SA_KILL &&  pr->ps_nice > NZERO)
875 				 pr->ps_nice = NZERO;
876 
877 			/*
878 			 * If sending a tty stop signal to a member of an
879 			 * orphaned process group, discard the signal here if
880 			 * the action is default; don't stop the process below
881 			 * if sleeping, and don't clear any pending SIGCONT.
882 			 */
883 			if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0)
884 				return;
885 		}
886 
887 		atomic_setbits_int(&p->p_siglist, mask);
888 	}
889 
890 	if (prop & SA_CONT) {
891 		atomic_clearbits_int(&p->p_siglist, stopsigmask);
892 	}
893 
894 	if (prop & SA_STOP) {
895 		atomic_clearbits_int(&p->p_siglist, contsigmask);
896 		atomic_clearbits_int(&p->p_flag, P_CONTINUED);
897 	}
898 
899 	/*
900 	 * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
901 	 */
902 	if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) {
903 		TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
904 			if (q != p)
905 				ptsignal(q, signum, SPROPAGATED);
906 		}
907 	}
908 
909 	/*
910 	 * Defer further processing for signals which are held,
911 	 * except that stopped processes must be continued by SIGCONT.
912 	 */
913 	if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
914 		return;
915 
916 	SCHED_LOCK(s);
917 
918 	switch (p->p_stat) {
919 
920 	case SSLEEP:
921 		/*
922 		 * If process is sleeping uninterruptibly
923 		 * we can't interrupt the sleep... the signal will
924 		 * be noticed when the process returns through
925 		 * trap() or syscall().
926 		 */
927 		if ((p->p_flag & P_SINTR) == 0)
928 			goto out;
929 		/*
930 		 * Process is sleeping and traced... make it runnable
931 		 * so it can discover the signal in issignal() and stop
932 		 * for the parent.
933 		 */
934 		if (pr->ps_flags & PS_TRACED)
935 			goto run;
936 		/*
937 		 * If SIGCONT is default (or ignored) and process is
938 		 * asleep, we are finished; the process should not
939 		 * be awakened.
940 		 */
941 		if ((prop & SA_CONT) && action == SIG_DFL) {
942 			atomic_clearbits_int(&p->p_siglist, mask);
943 			goto out;
944 		}
945 		/*
946 		 * When a sleeping process receives a stop
947 		 * signal, process immediately if possible.
948 		 */
949 		if ((prop & SA_STOP) && action == SIG_DFL) {
950 			/*
951 			 * If a child holding parent blocked,
952 			 * stopping could cause deadlock.
953 			 */
954 			if (pr->ps_flags & PS_PPWAIT)
955 				goto out;
956 			atomic_clearbits_int(&p->p_siglist, mask);
957 			p->p_xstat = signum;
958 			proc_stop(p, 0);
959 			goto out;
960 		}
961 		/*
962 		 * All other (caught or default) signals
963 		 * cause the process to run.
964 		 */
965 		goto runfast;
966 		/*NOTREACHED*/
967 
968 	case SSTOP:
969 		/*
970 		 * If traced process is already stopped,
971 		 * then no further action is necessary.
972 		 */
973 		if (pr->ps_flags & PS_TRACED)
974 			goto out;
975 
976 		/*
977 		 * Kill signal always sets processes running.
978 		 */
979 		if (signum == SIGKILL) {
980 			atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
981 			goto runfast;
982 		}
983 
984 		if (prop & SA_CONT) {
985 			/*
986 			 * If SIGCONT is default (or ignored), we continue the
987 			 * process but don't leave the signal in p_siglist, as
988 			 * it has no further action.  If SIGCONT is held, we
989 			 * continue the process and leave the signal in
990 			 * p_siglist.  If the process catches SIGCONT, let it
991 			 * handle the signal itself.  If it isn't waiting on
992 			 * an event, then it goes back to run state.
993 			 * Otherwise, process goes back to sleep state.
994 			 */
995 			atomic_setbits_int(&p->p_flag, P_CONTINUED);
996 			atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
997 			wakeparent = 1;
998 			if (action == SIG_DFL)
999 				atomic_clearbits_int(&p->p_siglist, mask);
1000 			if (action == SIG_CATCH)
1001 				goto runfast;
1002 			if (p->p_wchan == 0)
1003 				goto run;
1004 			p->p_stat = SSLEEP;
1005 			goto out;
1006 		}
1007 
1008 		if (prop & SA_STOP) {
1009 			/*
1010 			 * Already stopped, don't need to stop again.
1011 			 * (If we did the shell could get confused.)
1012 			 */
1013 			atomic_clearbits_int(&p->p_siglist, mask);
1014 			goto out;
1015 		}
1016 
1017 		/*
1018 		 * If process is sleeping interruptibly, then simulate a
1019 		 * wakeup so that when it is continued, it will be made
1020 		 * runnable and can look at the signal.  But don't make
1021 		 * the process runnable, leave it stopped.
1022 		 */
1023 		if (p->p_wchan && p->p_flag & P_SINTR)
1024 			unsleep(p);
1025 		goto out;
1026 
1027 	case SONPROC:
1028 		signotify(p);
1029 		/* FALLTHROUGH */
1030 	default:
1031 		/*
1032 		 * SRUN, SIDL, SZOMB do nothing with the signal,
1033 		 * other than kicking ourselves if we are running.
1034 		 * It will either never be noticed, or noticed very soon.
1035 		 */
1036 		goto out;
1037 	}
1038 	/*NOTREACHED*/
1039 
1040 runfast:
1041 	/*
1042 	 * Raise priority to at least PUSER.
1043 	 */
1044 	if (p->p_priority > PUSER)
1045 		p->p_priority = PUSER;
1046 run:
1047 	setrunnable(p);
1048 out:
1049 	SCHED_UNLOCK(s);
1050 	if (wakeparent)
1051 		wakeup(pr->ps_pptr);
1052 }
1053 
1054 /*
1055  * If the current process has received a signal (should be caught or cause
1056  * termination, should interrupt current syscall), return the signal number.
1057  * Stop signals with default action are processed immediately, then cleared;
1058  * they aren't returned.  This is checked after each entry to the system for
1059  * a syscall or trap (though this can usually be done without calling issignal
1060  * by checking the pending signal masks in the CURSIG macro.) The normal call
1061  * sequence is
1062  *
1063  *	while (signum = CURSIG(curproc))
1064  *		postsig(signum);
1065  */
1066 int
1067 issignal(struct proc *p)
1068 {
1069 	struct process *pr = p->p_p;
1070 	int signum, mask, prop;
1071 	int dolock = (p->p_flag & P_SINTR) == 0;
1072 	int s;
1073 
1074 	for (;;) {
1075 		mask = p->p_siglist & ~p->p_sigmask;
1076 		if (pr->ps_flags & PS_PPWAIT)
1077 			mask &= ~stopsigmask;
1078 		if (mask == 0)	 	/* no signal to send */
1079 			return (0);
1080 		signum = ffs((long)mask);
1081 		mask = sigmask(signum);
1082 		atomic_clearbits_int(&p->p_siglist, mask);
1083 
1084 		/*
1085 		 * We should see pending but ignored signals
1086 		 * only if P_TRACED was on when they were posted.
1087 		 */
1088 		if (mask & p->p_sigacts->ps_sigignore &&
1089 		    (pr->ps_flags & PS_TRACED) == 0)
1090 			continue;
1091 
1092 		if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) {
1093 			/*
1094 			 * If traced, always stop, and stay
1095 			 * stopped until released by the debugger.
1096 			 */
1097 			p->p_xstat = signum;
1098 
1099 			KERNEL_LOCK();
1100 			single_thread_set(p, SINGLE_SUSPEND, 0);
1101 			KERNEL_UNLOCK();
1102 
1103 			if (dolock)
1104 				SCHED_LOCK(s);
1105 			proc_stop(p, 1);
1106 			if (dolock)
1107 				SCHED_UNLOCK(s);
1108 
1109 			KERNEL_LOCK();
1110 			single_thread_clear(p, 0);
1111 			KERNEL_UNLOCK();
1112 
1113 			/*
1114 			 * If we are no longer being traced, or the parent
1115 			 * didn't give us a signal, look for more signals.
1116 			 */
1117 			if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0)
1118 				continue;
1119 
1120 			/*
1121 			 * If the new signal is being masked, look for other
1122 			 * signals.
1123 			 */
1124 			signum = p->p_xstat;
1125 			mask = sigmask(signum);
1126 			if ((p->p_sigmask & mask) != 0)
1127 				continue;
1128 
1129 			/* take the signal! */
1130 			atomic_clearbits_int(&p->p_siglist, mask);
1131 		}
1132 
1133 		prop = sigprop[signum];
1134 
1135 		/*
1136 		 * Decide whether the signal should be returned.
1137 		 * Return the signal's number, or fall through
1138 		 * to clear it from the pending mask.
1139 		 */
1140 		switch ((long)p->p_sigacts->ps_sigact[signum]) {
1141 
1142 		case (long)SIG_DFL:
1143 			/*
1144 			 * Don't take default actions on system processes.
1145 			 */
1146 			if (p->p_pid <= 1) {
1147 #ifdef DIAGNOSTIC
1148 				/*
1149 				 * Are you sure you want to ignore SIGSEGV
1150 				 * in init? XXX
1151 				 */
1152 				printf("Process (pid %d) got signal %d\n",
1153 				    p->p_pid, signum);
1154 #endif
1155 				break;		/* == ignore */
1156 			}
1157 			/*
1158 			 * If there is a pending stop signal to process
1159 			 * with default action, stop here,
1160 			 * then clear the signal.  However,
1161 			 * if process is member of an orphaned
1162 			 * process group, ignore tty stop signals.
1163 			 */
1164 			if (prop & SA_STOP) {
1165 				if (pr->ps_flags & PS_TRACED ||
1166 		    		    (pr->ps_pgrp->pg_jobc == 0 &&
1167 				    prop & SA_TTYSTOP))
1168 					break;	/* == ignore */
1169 				p->p_xstat = signum;
1170 				if (dolock)
1171 					SCHED_LOCK(s);
1172 				proc_stop(p, 1);
1173 				if (dolock)
1174 					SCHED_UNLOCK(s);
1175 				break;
1176 			} else if (prop & SA_IGNORE) {
1177 				/*
1178 				 * Except for SIGCONT, shouldn't get here.
1179 				 * Default action is to ignore; drop it.
1180 				 */
1181 				break;		/* == ignore */
1182 			} else
1183 				goto keep;
1184 			/*NOTREACHED*/
1185 
1186 		case (long)SIG_IGN:
1187 			/*
1188 			 * Masking above should prevent us ever trying
1189 			 * to take action on an ignored signal other
1190 			 * than SIGCONT, unless process is traced.
1191 			 */
1192 			if ((prop & SA_CONT) == 0 &&
1193 			    (pr->ps_flags & PS_TRACED) == 0)
1194 				printf("issignal\n");
1195 			break;		/* == ignore */
1196 
1197 		default:
1198 			/*
1199 			 * This signal has an action, let
1200 			 * postsig() process it.
1201 			 */
1202 			goto keep;
1203 		}
1204 	}
1205 	/* NOTREACHED */
1206 
1207 keep:
1208 	atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */
1209 	return (signum);
1210 }
1211 
1212 /*
1213  * Put the argument process into the stopped state and notify the parent
1214  * via wakeup.  Signals are handled elsewhere.  The process must not be
1215  * on the run queue.
1216  */
1217 void
1218 proc_stop(struct proc *p, int sw)
1219 {
1220 	extern void *softclock_si;
1221 
1222 #ifdef MULTIPROCESSOR
1223 	SCHED_ASSERT_LOCKED();
1224 #endif
1225 
1226 	p->p_stat = SSTOP;
1227 	atomic_clearbits_int(&p->p_p->ps_flags, PS_WAITED);
1228 	atomic_setbits_int(&p->p_flag, P_STOPPED|P_SUSPSIG);
1229 	if (!timeout_pending(&proc_stop_to)) {
1230 		timeout_add(&proc_stop_to, 0);
1231 		/*
1232 		 * We need this soft interrupt to be handled fast.
1233 		 * Extra calls to softclock don't hurt.
1234 		 */
1235                 softintr_schedule(softclock_si);
1236 	}
1237 	if (sw)
1238 		mi_switch();
1239 }
1240 
1241 /*
1242  * Called from a timeout to send signals to the parents of stopped processes.
1243  * We can't do this in proc_stop because it's called with nasty locks held
1244  * and we would need recursive scheduler lock to deal with that.
1245  */
1246 void
1247 proc_stop_sweep(void *v)
1248 {
1249 	struct proc *p;
1250 
1251 	LIST_FOREACH(p, &allproc, p_list) {
1252 		if ((p->p_flag & P_STOPPED) == 0)
1253 			continue;
1254 		atomic_clearbits_int(&p->p_flag, P_STOPPED);
1255 
1256 		if ((p->p_p->ps_pptr->ps_mainproc->p_sigacts->ps_flags &
1257 		    SAS_NOCLDSTOP) == 0)
1258 			prsignal(p->p_p->ps_pptr, SIGCHLD);
1259 		wakeup(p->p_p->ps_pptr);
1260 	}
1261 }
1262 
1263 /*
1264  * Take the action for the specified signal
1265  * from the current set of pending signals.
1266  */
1267 void
1268 postsig(int signum)
1269 {
1270 	struct proc *p = curproc;
1271 	struct sigacts *ps = p->p_sigacts;
1272 	sig_t action;
1273 	u_long trapno;
1274 	int mask, returnmask;
1275 	union sigval sigval;
1276 	int s, code;
1277 
1278 #ifdef DIAGNOSTIC
1279 	if (signum == 0)
1280 		panic("postsig");
1281 #endif
1282 
1283 	KERNEL_LOCK();
1284 
1285 	mask = sigmask(signum);
1286 	atomic_clearbits_int(&p->p_siglist, mask);
1287 	action = ps->ps_sigact[signum];
1288 	sigval.sival_ptr = 0;
1289 
1290 	if (p->p_sisig != signum) {
1291 		trapno = 0;
1292 		code = SI_USER;
1293 		sigval.sival_ptr = 0;
1294 	} else {
1295 		trapno = p->p_sitrapno;
1296 		code = p->p_sicode;
1297 		sigval = p->p_sigval;
1298 	}
1299 
1300 #ifdef KTRACE
1301 	if (KTRPOINT(p, KTR_PSIG)) {
1302 		siginfo_t si;
1303 
1304 		initsiginfo(&si, signum, trapno, code, sigval);
1305 		ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ?
1306 		    p->p_oldmask : p->p_sigmask, code, &si);
1307 	}
1308 #endif
1309 	if (action == SIG_DFL) {
1310 		/*
1311 		 * Default action, where the default is to kill
1312 		 * the process.  (Other cases were ignored above.)
1313 		 */
1314 		sigexit(p, signum);
1315 		/* NOTREACHED */
1316 	} else {
1317 		/*
1318 		 * If we get here, the signal must be caught.
1319 		 */
1320 #ifdef DIAGNOSTIC
1321 		if (action == SIG_IGN || (p->p_sigmask & mask))
1322 			panic("postsig action");
1323 #endif
1324 		/*
1325 		 * Set the new mask value and also defer further
1326 		 * occurrences of this signal.
1327 		 *
1328 		 * Special case: user has done a sigpause.  Here the
1329 		 * current mask is not of interest, but rather the
1330 		 * mask from before the sigpause is what we want
1331 		 * restored after the signal processing is completed.
1332 		 */
1333 #ifdef MULTIPROCESSOR
1334 		s = splsched();
1335 #else
1336 		s = splhigh();
1337 #endif
1338 		if (p->p_flag & P_SIGSUSPEND) {
1339 			atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
1340 			returnmask = p->p_oldmask;
1341 		} else
1342 			returnmask = p->p_sigmask;
1343 		p->p_sigmask |= ps->ps_catchmask[signum];
1344 		if ((ps->ps_sigreset & mask) != 0) {
1345 			ps->ps_sigcatch &= ~mask;
1346 			if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
1347 				ps->ps_sigignore |= mask;
1348 			ps->ps_sigact[signum] = SIG_DFL;
1349 		}
1350 		splx(s);
1351 		p->p_ru.ru_nsignals++;
1352 		if (p->p_sisig == signum) {
1353 			p->p_sisig = 0;
1354 			p->p_sitrapno = 0;
1355 			p->p_sicode = SI_USER;
1356 			p->p_sigval.sival_ptr = NULL;
1357 		}
1358 
1359 		(*p->p_emul->e_sendsig)(action, signum, returnmask, trapno,
1360 		    code, sigval);
1361 	}
1362 
1363 	KERNEL_UNLOCK();
1364 }
1365 
1366 /*
1367  * Force the current process to exit with the specified signal, dumping core
1368  * if appropriate.  We bypass the normal tests for masked and caught signals,
1369  * allowing unrecoverable failures to terminate the process without changing
1370  * signal state.  Mark the accounting record with the signal termination.
1371  * If dumping core, save the signal number for the debugger.  Calls exit and
1372  * does not return.
1373  */
1374 void
1375 sigexit(struct proc *p, int signum)
1376 {
1377 	/* Mark process as going away */
1378 	atomic_setbits_int(&p->p_flag, P_WEXIT);
1379 
1380 	p->p_p->ps_acflag |= AXSIG;
1381 	if (sigprop[signum] & SA_CORE) {
1382 		p->p_sisig = signum;
1383 
1384 		/* if there are other threads, pause them */
1385 		if (TAILQ_FIRST(&p->p_p->ps_threads) != p ||
1386 		    TAILQ_NEXT(p, p_thr_link) != NULL)
1387 			single_thread_set(p, SINGLE_SUSPEND, 0);
1388 
1389 		if (coredump(p) == 0)
1390 			signum |= WCOREFLAG;
1391 	}
1392 	exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL);
1393 	/* NOTREACHED */
1394 }
1395 
1396 int nosuidcoredump = 1;
1397 
1398 struct coredump_iostate {
1399 	struct proc *io_proc;
1400 	struct vnode *io_vp;
1401 	struct ucred *io_cred;
1402 	off_t io_offset;
1403 };
1404 
1405 /*
1406  * Dump core, into a file named "progname.core", unless the process was
1407  * setuid/setgid.
1408  */
1409 int
1410 coredump(struct proc *p)
1411 {
1412 #ifdef SMALL_KERNEL
1413 	return EPERM;
1414 #else
1415 	struct vnode *vp;
1416 	struct ucred *cred = p->p_ucred;
1417 	struct vmspace *vm = p->p_vmspace;
1418 	struct nameidata nd;
1419 	struct vattr vattr;
1420 	struct coredump_iostate	io;
1421 	int error, len;
1422 	char name[sizeof("/var/crash/") + MAXCOMLEN + sizeof(".core")];
1423 	char *dir = "";
1424 
1425 	p->p_p->ps_flags |= PS_COREDUMP;
1426 
1427 	/*
1428 	 * Don't dump if not root and the process has used set user or
1429 	 * group privileges, unless the nosuidcoredump sysctl is set to 2,
1430 	 * in which case dumps are put into /var/crash/.
1431 	 */
1432 	if (((p->p_p->ps_flags & PS_SUGID) && (error = suser(p, 0))) ||
1433 	   ((p->p_p->ps_flags & PS_SUGID) && nosuidcoredump)) {
1434 		if (nosuidcoredump == 2)
1435 			dir = "/var/crash/";
1436 		else
1437 			return (EPERM);
1438 	}
1439 
1440 	/* Don't dump if will exceed file size limit. */
1441 	if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >=
1442 	    p->p_rlimit[RLIMIT_CORE].rlim_cur)
1443 		return (EFBIG);
1444 
1445 	len = snprintf(name, sizeof(name), "%s%s.core", dir, p->p_comm);
1446 	if (len >= sizeof(name))
1447 		return (EACCES);
1448 
1449 	/*
1450 	 * ... but actually write it as UID
1451 	 */
1452 	cred = crdup(cred);
1453 	cred->cr_uid = p->p_cred->p_ruid;
1454 	cred->cr_gid = p->p_cred->p_rgid;
1455 
1456 	NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
1457 
1458 	error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR);
1459 
1460 	if (error)
1461 		goto out;
1462 
1463 	/*
1464 	 * Don't dump to non-regular files, files with links, or files
1465 	 * owned by someone else.
1466 	 */
1467 	vp = nd.ni_vp;
1468 	if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
1469 		VOP_UNLOCK(vp, 0, p);
1470 		vn_close(vp, FWRITE, cred, p);
1471 		goto out;
1472 	}
1473 	if (vp->v_type != VREG || vattr.va_nlink != 1 ||
1474 	    vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) ||
1475 	    vattr.va_uid != cred->cr_uid) {
1476 		error = EACCES;
1477 		VOP_UNLOCK(vp, 0, p);
1478 		vn_close(vp, FWRITE, cred, p);
1479 		goto out;
1480 	}
1481 	VATTR_NULL(&vattr);
1482 	vattr.va_size = 0;
1483 	VOP_SETATTR(vp, &vattr, cred, p);
1484 	p->p_p->ps_acflag |= ACORE;
1485 
1486 	io.io_proc = p;
1487 	io.io_vp = vp;
1488 	io.io_cred = cred;
1489 	io.io_offset = 0;
1490 	VOP_UNLOCK(vp, 0, p);
1491 	vref(vp);
1492 	error = vn_close(vp, FWRITE, cred, p);
1493 	if (error == 0)
1494 		error = (*p->p_emul->e_coredump)(p, &io);
1495 	vrele(vp);
1496 out:
1497 	crfree(cred);
1498 	return (error);
1499 #endif
1500 }
1501 
1502 int
1503 coredump_trad(struct proc *p, void *cookie)
1504 {
1505 #ifdef SMALL_KERNEL
1506 	return EPERM;
1507 #else
1508 	struct coredump_iostate *io = cookie;
1509 	struct vmspace *vm = io->io_proc->p_vmspace;
1510 	struct vnode *vp = io->io_vp;
1511 	struct ucred *cred = io->io_cred;
1512 	struct core core;
1513 	int error;
1514 
1515 	core.c_midmag = 0;
1516 	strlcpy(core.c_name, p->p_comm, sizeof(core.c_name));
1517 	core.c_nseg = 0;
1518 	core.c_signo = p->p_sisig;
1519 	core.c_ucode = p->p_sitrapno;
1520 	core.c_cpusize = 0;
1521 	core.c_tsize = (u_long)ptoa(vm->vm_tsize);
1522 	core.c_dsize = (u_long)ptoa(vm->vm_dsize);
1523 	core.c_ssize = (u_long)round_page(ptoa(vm->vm_ssize));
1524 	error = cpu_coredump(p, vp, cred, &core);
1525 	if (error)
1526 		return (error);
1527 	/*
1528 	 * uvm_coredump() spits out all appropriate segments.
1529 	 * All that's left to do is to write the core header.
1530 	 */
1531 	error = uvm_coredump(p, vp, cred, &core);
1532 	if (error)
1533 		return (error);
1534 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&core,
1535 	    (int)core.c_hdrsize, (off_t)0,
1536 	    UIO_SYSSPACE, IO_UNIT, cred, NULL, p);
1537 	return (error);
1538 #endif
1539 }
1540 
1541 #ifndef SMALL_KERNEL
1542 int
1543 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
1544 {
1545 	struct coredump_iostate *io = cookie;
1546 	off_t coffset = 0;
1547 	size_t csize;
1548 	int chunk, error;
1549 
1550 	csize = len;
1551 	do {
1552 		if (io->io_proc->p_siglist & sigmask(SIGKILL))
1553 			return (EINTR);
1554 
1555 		/* Rest of the loop sleeps with lock held, so... */
1556 		yield();
1557 
1558 		chunk = MIN(csize, MAXPHYS);
1559 		error = vn_rdwr(UIO_WRITE, io->io_vp,
1560 		    (caddr_t)data + coffset, chunk,
1561 		    io->io_offset + coffset, segflg,
1562 		    IO_UNIT, io->io_cred, NULL, io->io_proc);
1563 		if (error) {
1564 			printf("pid %d (%s): %s write of %lu@%p"
1565 			    " at %lld failed: %d\n",
1566 			    io->io_proc->p_pid, io->io_proc->p_comm,
1567 			    segflg == UIO_USERSPACE ? "user" : "system",
1568 			    len, data, (long long) io->io_offset, error);
1569 			return (error);
1570 		}
1571 
1572 		coffset += chunk;
1573 		csize -= chunk;
1574 	} while (csize > 0);
1575 
1576 	io->io_offset += len;
1577 	return (0);
1578 }
1579 
1580 void
1581 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
1582 {
1583 	struct coredump_iostate *io = cookie;
1584 
1585 	uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
1586 }
1587 
1588 #endif	/* !SMALL_KERNEL */
1589 
1590 /*
1591  * Nonexistent system call-- signal process (may want to handle it).
1592  * Flag error in case process won't see signal immediately (blocked or ignored).
1593  */
1594 /* ARGSUSED */
1595 int
1596 sys_nosys(struct proc *p, void *v, register_t *retval)
1597 {
1598 
1599 	ptsignal(p, SIGSYS, STHREAD);
1600 	return (ENOSYS);
1601 }
1602 
1603 int
1604 sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
1605 {
1606 	struct sys___thrsigdivert_args /* {
1607 		syscallarg(sigset_t) sigmask;
1608 		syscallarg(siginfo_t *) info;
1609 		syscallarg(const struct timespec *) timeout;
1610 	} */ *uap = v;
1611 	sigset_t mask;
1612 	sigset_t *m;
1613 	long long to_ticks = 0;
1614 	int error;
1615 
1616 	if (!rthreads_enabled)
1617 		return (ENOTSUP);
1618 
1619 	m = NULL;
1620 	mask = SCARG(uap, sigmask) &~ sigcantmask;
1621 
1622 	/* pending signal for this thread? */
1623 	if (p->p_siglist & mask)
1624 		m = &p->p_siglist;
1625 	else if (p->p_p->ps_mainproc->p_siglist & mask)
1626 		m = &p->p_p->ps_mainproc->p_siglist;
1627 	if (m != NULL) {
1628 		int sig = ffs((long)(*m & mask));
1629 		atomic_clearbits_int(m, sigmask(sig));
1630 		*retval = sig;
1631 		return (0);
1632 	}
1633 
1634 	if (SCARG(uap, timeout) != NULL) {
1635 		struct timespec ts;
1636 		if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0)
1637 			return (error);
1638 #ifdef KTRACE
1639 		if (KTRPOINT(p, KTR_STRUCT))
1640 			ktrreltimespec(p, &ts);
1641 #endif
1642 		to_ticks = (long long)hz * ts.tv_sec +
1643 		    ts.tv_nsec / (tick * 1000);
1644 		if (to_ticks > INT_MAX)
1645 			to_ticks = INT_MAX;
1646 	}
1647 
1648 	p->p_sigwait = 0;
1649 	atomic_setbits_int(&p->p_sigdivert, mask);
1650 	error = tsleep(&p->p_sigdivert, PPAUSE|PCATCH, "sigwait",
1651 	    (int)to_ticks);
1652 	if (p->p_sigdivert) {
1653 		/* interrupted */
1654 		KASSERT(error != 0);
1655 		atomic_clearbits_int(&p->p_sigdivert, ~0);
1656 		if (error == EINTR)
1657 			error = ERESTART;
1658 		else if (error == ETIMEDOUT)
1659 			error = EAGAIN;
1660 		return (error);
1661 
1662 	}
1663 	KASSERT(p->p_sigwait != 0);
1664 	*retval = p->p_sigwait;
1665 
1666 	if (SCARG(uap, info) == NULL) {
1667 		error = 0;
1668 	} else {
1669 		siginfo_t si;
1670 
1671 		bzero(&si, sizeof si);
1672 		si.si_signo = p->p_sigwait;
1673 		error = copyout(&si, SCARG(uap, info), sizeof(si));
1674 	}
1675 	return (error);
1676 }
1677 
1678 void
1679 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
1680 {
1681 	bzero(si, sizeof *si);
1682 
1683 	si->si_signo = sig;
1684 	si->si_code = code;
1685 	if (code == SI_USER) {
1686 		si->si_value = val;
1687 	} else {
1688 		switch (sig) {
1689 		case SIGSEGV:
1690 		case SIGILL:
1691 		case SIGBUS:
1692 		case SIGFPE:
1693 			si->si_addr = val.sival_ptr;
1694 			si->si_trapno = trapno;
1695 			break;
1696 		case SIGXFSZ:
1697 			break;
1698 		}
1699 	}
1700 }
1701 
1702 int
1703 filt_sigattach(struct knote *kn)
1704 {
1705 	struct process *pr = curproc->p_p;
1706 
1707 	kn->kn_ptr.p_process = pr;
1708 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
1709 
1710 	/* XXX lock the proc here while adding to the list? */
1711 	SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext);
1712 
1713 	return (0);
1714 }
1715 
1716 void
1717 filt_sigdetach(struct knote *kn)
1718 {
1719 	struct process *pr = kn->kn_ptr.p_process;
1720 
1721 	SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext);
1722 }
1723 
1724 /*
1725  * signal knotes are shared with proc knotes, so we apply a mask to
1726  * the hint in order to differentiate them from process hints.  This
1727  * could be avoided by using a signal-specific knote list, but probably
1728  * isn't worth the trouble.
1729  */
1730 int
1731 filt_signal(struct knote *kn, long hint)
1732 {
1733 
1734 	if (hint & NOTE_SIGNAL) {
1735 		hint &= ~NOTE_SIGNAL;
1736 
1737 		if (kn->kn_id == hint)
1738 			kn->kn_data++;
1739 	}
1740 	return (kn->kn_data != 0);
1741 }
1742 
1743 void
1744 userret(struct proc *p)
1745 {
1746 	int sig;
1747 
1748 	while ((sig = CURSIG(p)) != 0)
1749 		postsig(sig);
1750 
1751 	if (p->p_flag & P_SUSPSINGLE) {
1752 		KERNEL_LOCK();
1753 		single_thread_check(p, 0);
1754 		KERNEL_UNLOCK();
1755 	}
1756 
1757 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
1758 }
1759 
1760 int
1761 single_thread_check(struct proc *p, int deep)
1762 {
1763 	struct process *pr = p->p_p;
1764 
1765 	if (pr->ps_single != NULL && pr->ps_single != p) {
1766 		do {
1767 			int s;
1768 
1769 			/* if we're in deep, we need to unwind to the edge */
1770 			if (deep) {
1771 				if (pr->ps_flags & PS_SINGLEUNWIND)
1772 					return (ERESTART);
1773 				if (pr->ps_flags & PS_SINGLEEXIT)
1774 					return (EINTR);
1775 			}
1776 
1777 			if (--pr->ps_singlecount == 0)
1778 				wakeup(&pr->ps_singlecount);
1779 			if (pr->ps_flags & PS_SINGLEEXIT)
1780 				exit1(p, 0, EXIT_THREAD_NOCHECK);
1781 
1782 			/* not exiting and don't need to unwind, so suspend */
1783 			SCHED_LOCK(s);
1784 			p->p_stat = SSTOP;
1785 			mi_switch();
1786 			SCHED_UNLOCK(s);
1787 		} while (pr->ps_single != NULL);
1788 	}
1789 
1790 	return (0);
1791 }
1792 
1793 /*
1794  * Stop other threads in the process.  The mode controls how and
1795  * where the other threads should stop:
1796  *  - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit
1797  *    (by setting to SINGLE_EXIT) or be released (via single_thread_clear())
1798  *  - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
1799  *    or released as with SINGLE_SUSPEND
1800  *  - SINGLE_EXIT: unwind to kernel boundary and exit
1801  */
1802 int
1803 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep)
1804 {
1805 	struct process *pr = p->p_p;
1806 	struct proc *q;
1807 	int error;
1808 
1809 	if ((error = single_thread_check(p, deep)))
1810 		return error;
1811 
1812 	switch (mode) {
1813 	case SINGLE_SUSPEND:
1814 		break;
1815 	case SINGLE_UNWIND:
1816 		atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
1817 		break;
1818 	case SINGLE_EXIT:
1819 		atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT);
1820 		atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
1821 		break;
1822 #ifdef DIAGNOSTIC
1823 	default:
1824 		panic("single_thread_mode = %d", mode);
1825 #endif
1826 	}
1827 	pr->ps_single = p;
1828 	pr->ps_singlecount = 0;
1829 	TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1830 		int s;
1831 
1832 		if (q == p)
1833 			continue;
1834 		if (q->p_flag & P_WEXIT) {
1835 			if (mode == SINGLE_EXIT) {
1836 				SCHED_LOCK(s);
1837 				if (q->p_stat == SSTOP) {
1838 					setrunnable(q);
1839 					pr->ps_singlecount++;
1840 				}
1841 				SCHED_UNLOCK(s);
1842 			}
1843 			continue;
1844 		}
1845 		SCHED_LOCK(s);
1846 		atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
1847 		switch (q->p_stat) {
1848 		case SIDL:
1849 		case SRUN:
1850 			pr->ps_singlecount++;
1851 			break;
1852 		case SSLEEP:
1853 			/* if it's not interruptible, then just have to wait */
1854 			if (q->p_flag & P_SINTR) {
1855 				/* merely need to suspend?  just stop it */
1856 				if (mode == SINGLE_SUSPEND) {
1857 					q->p_stat = SSTOP;
1858 					break;
1859 				}
1860 				/* need to unwind or exit, so wake it */
1861 				setrunnable(q);
1862 			}
1863 			pr->ps_singlecount++;
1864 			break;
1865 		case SSTOP:
1866 			if (mode == SINGLE_EXIT) {
1867 				setrunnable(q);
1868 				pr->ps_singlecount++;
1869 			}
1870 			break;
1871 		case SZOMB:
1872 		case SDEAD:
1873 			break;
1874 		case SONPROC:
1875 			pr->ps_singlecount++;
1876 			signotify(q);
1877 			break;
1878 		}
1879 		SCHED_UNLOCK(s);
1880 	}
1881 
1882 	/* wait until they're all suspended */
1883 	while (pr->ps_singlecount > 0)
1884 		tsleep(&pr->ps_singlecount, PUSER, "suspend", 0);
1885 	return 0;
1886 }
1887 
1888 void
1889 single_thread_clear(struct proc *p, int flag)
1890 {
1891 	struct process *pr = p->p_p;
1892 	struct proc *q;
1893 
1894 	KASSERT(pr->ps_single == p);
1895 
1896 	pr->ps_single = NULL;
1897 	atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
1898 	TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1899 		int s;
1900 
1901 		if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
1902 			continue;
1903 		atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE);
1904 
1905 		/*
1906 		 * if the thread was only stopped for single threading
1907 		 * then clearing that either makes it runnable or puts
1908 		 * it back into some sleep queue
1909 		 */
1910 		SCHED_LOCK(s);
1911 		if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
1912 			if (q->p_wchan == 0)
1913 				setrunnable(q);
1914 			else
1915 				q->p_stat = SSLEEP;
1916 		}
1917 		SCHED_UNLOCK(s);
1918 	}
1919 }
1920