xref: /netbsd-src/sys/kern/sys_sig.c (revision 75996a401adf173278b026a036cd3ac0756959c7)
1 /*	$NetBSD: sys_sig.c,v 1.58 2024/07/14 05:10:40 kre Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1989, 1991, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  * (c) UNIX System Laboratories, Inc.
36  * All or some portions of this file are derived from material licensed
37  * to the University of California by American Telephone and Telegraph
38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39  * the permission of UNIX System Laboratories, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)kern_sig.c	8.14 (Berkeley) 5/14/95
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: sys_sig.c,v 1.58 2024/07/14 05:10:40 kre Exp $");
70 
71 #include "opt_dtrace.h"
72 
73 #include <sys/param.h>
74 #include <sys/kernel.h>
75 #include <sys/signalvar.h>
76 #include <sys/proc.h>
77 #include <sys/pool.h>
78 #include <sys/syscallargs.h>
79 #include <sys/kauth.h>
80 #include <sys/wait.h>
81 #include <sys/kmem.h>
82 #include <sys/module.h>
83 #include <sys/sdt.h>
84 #include <sys/compat_stub.h>
85 
86 SDT_PROVIDER_DECLARE(proc);
87 SDT_PROBE_DEFINE2(proc, kernel, , signal__clear,
88     "int", 		/* signal */
89     "ksiginfo_t *");	/* signal-info */
90 
91 int
sys___sigaction_sigtramp(struct lwp * l,const struct sys___sigaction_sigtramp_args * uap,register_t * retval)92 sys___sigaction_sigtramp(struct lwp *l,
93     const struct sys___sigaction_sigtramp_args *uap, register_t *retval)
94 {
95 	/* {
96 		syscallarg(int)				signum;
97 		syscallarg(const struct sigaction *)	nsa;
98 		syscallarg(struct sigaction *)		osa;
99 		syscallarg(void *)			tramp;
100 		syscallarg(int)				vers;
101 	} */
102 	struct sigaction nsa, osa;
103 	int error;
104 
105 	if (SCARG(uap, nsa)) {
106 		error = copyin(SCARG(uap, nsa), &nsa, sizeof(nsa));
107 		if (error)
108 			return (error);
109 	}
110 	error = sigaction1(l, SCARG(uap, signum),
111 	    SCARG(uap, nsa) ? &nsa : 0, SCARG(uap, osa) ? &osa : 0,
112 	    SCARG(uap, tramp), SCARG(uap, vers));
113 	if (error)
114 		return (error);
115 	if (SCARG(uap, osa)) {
116 		error = copyout(&osa, SCARG(uap, osa), sizeof(osa));
117 		if (error)
118 			return (error);
119 	}
120 	return 0;
121 }
122 
123 /*
124  * Manipulate signal mask.  Note that we receive new mask, not pointer, and
125  * return old mask as return value; the library stub does the rest.
126  */
127 int
sys___sigprocmask14(struct lwp * l,const struct sys___sigprocmask14_args * uap,register_t * retval)128 sys___sigprocmask14(struct lwp *l, const struct sys___sigprocmask14_args *uap,
129     register_t *retval)
130 {
131 	/* {
132 		syscallarg(int)			how;
133 		syscallarg(const sigset_t *)	set;
134 		syscallarg(sigset_t *)		oset;
135 	} */
136 	struct proc	*p = l->l_proc;
137 	sigset_t	nss, oss;
138 	int		error;
139 
140 	if (SCARG(uap, set)) {
141 		error = copyin(SCARG(uap, set), &nss, sizeof(nss));
142 		if (error)
143 			return error;
144 	}
145 	mutex_enter(p->p_lock);
146 	error = sigprocmask1(l, SCARG(uap, how),
147 	    SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0);
148 	mutex_exit(p->p_lock);
149 	if (error)
150 		return error;
151 	if (SCARG(uap, oset)) {
152 		error = copyout(&oss, SCARG(uap, oset), sizeof(oss));
153 		if (error)
154 			return error;
155 	}
156 	return 0;
157 }
158 
159 int
sys___sigpending14(struct lwp * l,const struct sys___sigpending14_args * uap,register_t * retval)160 sys___sigpending14(struct lwp *l, const struct sys___sigpending14_args *uap,
161     register_t *retval)
162 {
163 	/* {
164 		syscallarg(sigset_t *)	set;
165 	} */
166 	sigset_t ss;
167 
168 	sigpending1(l, &ss);
169 	return copyout(&ss, SCARG(uap, set), sizeof(ss));
170 }
171 
172 /*
173  * Suspend process until signal, providing mask to be set in the meantime.
174  * Note nonstandard calling convention: libc stub passes mask, not pointer,
175  * to save a copyin.
176  */
177 int
sys___sigsuspend14(struct lwp * l,const struct sys___sigsuspend14_args * uap,register_t * retval)178 sys___sigsuspend14(struct lwp *l, const struct sys___sigsuspend14_args *uap,
179     register_t *retval)
180 {
181 	/* {
182 		syscallarg(const sigset_t *)	set;
183 	} */
184 	sigset_t	ss;
185 	int		error;
186 
187 	if (SCARG(uap, set)) {
188 		error = copyin(SCARG(uap, set), &ss, sizeof(ss));
189 		if (error)
190 			return error;
191 	}
192 	return sigsuspend1(l, SCARG(uap, set) ? &ss : 0);
193 }
194 
195 int
sys___sigaltstack14(struct lwp * l,const struct sys___sigaltstack14_args * uap,register_t * retval)196 sys___sigaltstack14(struct lwp *l, const struct sys___sigaltstack14_args *uap,
197     register_t *retval)
198 {
199 	/* {
200 		syscallarg(const struct sigaltstack *)	nss;
201 		syscallarg(struct sigaltstack *)	oss;
202 	} */
203 	stack_t	nss, oss;
204 	int	error;
205 
206 	if (SCARG(uap, nss)) {
207 		error = copyin(SCARG(uap, nss), &nss, sizeof(nss));
208 		if (error)
209 			return error;
210 	}
211 	error = sigaltstack1(l,
212 	    SCARG(uap, nss) ? &nss : 0, SCARG(uap, oss) ? &oss : 0);
213 	if (error)
214 		return error;
215 	if (SCARG(uap, oss)) {
216 		error = copyout(&oss, SCARG(uap, oss), sizeof(oss));
217 		if (error)
218 			return error;
219 	}
220 	return 0;
221 }
222 
223 int
kill1(struct lwp * l,pid_t pid,ksiginfo_t * ksi,register_t * retval)224 kill1(struct lwp *l, pid_t pid, ksiginfo_t *ksi, register_t *retval)
225 {
226 	int error;
227 	struct proc *p;
228 
229 	if ((u_int)ksi->ksi_signo >= NSIG)
230 		return EINVAL;
231 
232 	if (pid != l->l_proc->p_pid) {
233 		if (ksi->ksi_pid != l->l_proc->p_pid)
234 			return EPERM;
235 
236 		if (ksi->ksi_uid != kauth_cred_geteuid(l->l_cred))
237 			return EPERM;
238 
239 		switch (ksi->ksi_code) {
240 		case SI_USER:
241 		case SI_QUEUE:
242 			break;
243 		default:
244 			return EPERM;
245 		}
246 	}
247 
248 	if (pid > 0) {
249 		/* kill single process */
250 		mutex_enter(&proc_lock);
251 		p = proc_find_raw(pid);
252 		if (p == NULL || (p->p_stat != SACTIVE && p->p_stat != SSTOP)) {
253 			mutex_exit(&proc_lock);
254 			/* IEEE Std 1003.1-2001: return success for zombies */
255 			return p ? 0 : ESRCH;
256 		}
257 		mutex_enter(p->p_lock);
258 		error = kauth_authorize_process(l->l_cred,
259 		    KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(ksi->ksi_signo),
260 		    NULL, NULL);
261 		if (!error && ksi->ksi_signo) {
262 			error = kpsignal2(p, ksi);
263 		}
264 		mutex_exit(p->p_lock);
265 		mutex_exit(&proc_lock);
266 		return error;
267 	}
268 
269 	switch (pid) {
270 	case -1:		/* broadcast signal */
271 		return killpg1(l, ksi, 0, 1);
272 	case 0:			/* signal own process group */
273 		return killpg1(l, ksi, 0, 0);
274 	default:		/* negative explicit process group */
275 		if (pid <= INT_MIN)
276 			return ESRCH;
277 		return killpg1(l, ksi, -pid, 0);
278 	}
279 	/* NOTREACHED */
280 }
281 
282 int
sys_sigqueueinfo(struct lwp * l,const struct sys_sigqueueinfo_args * uap,register_t * retval)283 sys_sigqueueinfo(struct lwp *l, const struct sys_sigqueueinfo_args *uap,
284     register_t *retval)
285 {
286 	/* {
287 		syscallarg(pid_t int)	pid;
288 		syscallarg(const siginfo_t *)	info;
289 	} */
290 	ksiginfo_t	ksi;
291 	int error;
292 
293 	KSI_INIT(&ksi);
294 
295 	if ((error = copyin(&SCARG(uap, info)->_info, &ksi.ksi_info,
296 	    sizeof(ksi.ksi_info))) != 0)
297 		return error;
298 
299 	return kill1(l, SCARG(uap, pid), &ksi, retval);
300 }
301 
302 int
sys_kill(struct lwp * l,const struct sys_kill_args * uap,register_t * retval)303 sys_kill(struct lwp *l, const struct sys_kill_args *uap, register_t *retval)
304 {
305 	/* {
306 		syscallarg(pid_t)	pid;
307 		syscallarg(int)	signum;
308 	} */
309 	ksiginfo_t	ksi;
310 
311 	KSI_INIT(&ksi);
312 
313 	ksi.ksi_signo = SCARG(uap, signum);
314 	ksi.ksi_code = SI_USER;
315 	ksi.ksi_pid = l->l_proc->p_pid;
316 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
317 
318 	return kill1(l, SCARG(uap, pid), &ksi, retval);
319 }
320 
321 int
sys_getcontext(struct lwp * l,const struct sys_getcontext_args * uap,register_t * retval)322 sys_getcontext(struct lwp *l, const struct sys_getcontext_args *uap,
323     register_t *retval)
324 {
325 	/* {
326 		syscallarg(struct __ucontext *) ucp;
327 	} */
328 	struct proc *p = l->l_proc;
329 	ucontext_t uc;
330 
331 	memset(&uc, 0, sizeof(uc));
332 
333 	mutex_enter(p->p_lock);
334 	getucontext(l, &uc);
335 	mutex_exit(p->p_lock);
336 
337 	return copyout(&uc, SCARG(uap, ucp), sizeof (*SCARG(uap, ucp)));
338 }
339 
340 int
sys_setcontext(struct lwp * l,const struct sys_setcontext_args * uap,register_t * retval)341 sys_setcontext(struct lwp *l, const struct sys_setcontext_args *uap,
342     register_t *retval)
343 {
344 	/* {
345 		syscallarg(const ucontext_t *) ucp;
346 	} */
347 	struct proc *p = l->l_proc;
348 	ucontext_t uc;
349 	int error;
350 
351 	error = copyin(SCARG(uap, ucp), &uc, sizeof (uc));
352 	if (error)
353 		return error;
354 	if ((uc.uc_flags & _UC_CPU) == 0)
355 		return EINVAL;
356 	mutex_enter(p->p_lock);
357 	error = setucontext(l, &uc);
358 	mutex_exit(p->p_lock);
359 	if (error)
360  		return error;
361 
362 	return EJUSTRETURN;
363 }
364 
365 /*
366  * sigtimedwait(2) system call, used also for implementation
367  * of sigwaitinfo() and sigwait().
368  *
369  * This only handles single LWP in signal wait. libpthread provides
370  * its own sigtimedwait() wrapper to DTRT WRT individual threads.
371  */
372 int
sys_____sigtimedwait50(struct lwp * l,const struct sys_____sigtimedwait50_args * uap,register_t * retval)373 sys_____sigtimedwait50(struct lwp *l,
374     const struct sys_____sigtimedwait50_args *uap, register_t *retval)
375 {
376 
377 	return sigtimedwait1(l, uap, retval, copyin, copyout, copyin, copyout);
378 }
379 
380 int
sigaction1(struct lwp * l,int signum,const struct sigaction * nsa,struct sigaction * osa,const void * tramp,int vers)381 sigaction1(struct lwp *l, int signum, const struct sigaction *nsa,
382 	struct sigaction *osa, const void *tramp, int vers)
383 {
384 	struct proc *p;
385 	struct sigacts *ps;
386 	sigset_t tset;
387 	int prop, error;
388 	ksiginfoq_t kq;
389 	static bool v0v1valid;
390 
391 	if (signum <= 0 || signum >= NSIG)
392 		return EINVAL;
393 
394 	p = l->l_proc;
395 	error = 0;
396 	ksiginfo_queue_init(&kq);
397 
398 	/*
399 	 * Trampoline ABI version __SIGTRAMP_SIGCODE_VERSION (0) is reserved
400 	 * for the legacy kernel provided on-stack trampoline.  Conversely,
401 	 * if we are using a non-0 ABI version, we must have a trampoline.
402 	 * Only validate the vers if a new sigaction was supplied and there
403 	 * was an actual handler specified (not SIG_IGN or SIG_DFL), which
404 	 * don't require a trampoline. Emulations use legacy kernel
405 	 * trampolines with version 0, alternatively check for that too.
406 	 *
407 	 * If version < __SIGTRAMP_SIGINFO_VERSION_MIN (usually 2), we try
408 	 * to autoload the compat module.  Note that we interlock with the
409 	 * unload check in compat_modcmd() using kernconfig_lock.  If the
410 	 * autoload fails, we don't try it again for this process.
411 	 */
412 	if (nsa != NULL && nsa->sa_handler != SIG_IGN
413 	    && nsa->sa_handler != SIG_DFL) {
414 		if (__predict_false(vers < __SIGTRAMP_SIGINFO_VERSION_MIN)) {
415 			if (vers == __SIGTRAMP_SIGCODE_VERSION &&
416 			    p->p_sigctx.ps_sigcode != NULL) {
417 				/*
418 				 * if sigcode is used for this emulation,
419 				 * version 0 is allowed.
420 				 */
421 			}
422 #ifdef __HAVE_STRUCT_SIGCONTEXT
423 			else if (p->p_flag & PK_32) {
424 				/*
425 				 * The 32-bit compat module will have
426 				 * pre-validated this for us.
427 				 */
428 				v0v1valid = true;
429 			} else if ((p->p_lflag & PL_SIGCOMPAT) == 0) {
430 				kernconfig_lock();
431 				(void)module_autoload("compat_16",
432 				    MODULE_CLASS_ANY);
433 				if (sendsig_sigcontext_16_hook.hooked) {
434 					/*
435 					 * We need to remember if the
436 					 * sigcontext method may be useable,
437 					 * because libc may use it even
438 					 * if siginfo is available.
439 					 */
440 					v0v1valid = true;
441 				}
442 				mutex_enter(&proc_lock);
443 				/*
444 				 * Prevent unload of compat module while
445 				 * this process remains.
446 				 */
447 				p->p_lflag |= PL_SIGCOMPAT;
448 				mutex_exit(&proc_lock);
449 				kernconfig_unlock();
450 			}
451 #endif /* __HAVE_STRUCT_SIGCONTEXT */
452 		}
453 
454 		switch (vers) {
455 		case __SIGTRAMP_SIGCODE_VERSION:
456 			/* kernel supplied trampoline. */
457 			if (tramp != NULL ||
458 			    (p->p_sigctx.ps_sigcode == NULL && !v0v1valid)) {
459 				return EINVAL;
460 			}
461 			break;
462 #ifdef __HAVE_STRUCT_SIGCONTEXT
463 		case __SIGTRAMP_SIGCONTEXT_VERSION_MIN ...
464 		     __SIGTRAMP_SIGCONTEXT_VERSION_MAX:
465 			/* sigcontext, user supplied trampoline. */
466 			if (tramp == NULL || !v0v1valid) {
467 				return EINVAL;
468 			}
469 			break;
470 #endif /* __HAVE_STRUCT_SIGCONTEXT */
471 		case __SIGTRAMP_SIGINFO_VERSION_MIN ...
472 		     __SIGTRAMP_SIGINFO_VERSION_MAX:
473 			/* siginfo, user supplied trampoline. */
474 			if (tramp == NULL) {
475 				return EINVAL;
476 			}
477 			break;
478 		default:
479 			/* Invalid trampoline version. */
480 			return EINVAL;
481 		}
482 	}
483 
484 	mutex_enter(p->p_lock);
485 
486 	ps = p->p_sigacts;
487 	if (osa)
488 		sigaction_copy(osa, &SIGACTION_PS(ps, signum));
489 	if (!nsa)
490 		goto out;
491 
492 	prop = sigprop[signum];
493 	if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) {
494 		error = EINVAL;
495 		goto out;
496 	}
497 
498 	sigaction_copy(&SIGACTION_PS(ps, signum), nsa);
499 	ps->sa_sigdesc[signum].sd_tramp = tramp;
500 	ps->sa_sigdesc[signum].sd_vers = vers;
501 	sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask);
502 
503 	if ((prop & SA_NORESET) != 0)
504 		SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND;
505 
506 	if (signum == SIGCHLD) {
507 		if (nsa->sa_flags & SA_NOCLDSTOP)
508 			p->p_sflag |= PS_NOCLDSTOP;
509 		else
510 			p->p_sflag &= ~PS_NOCLDSTOP;
511 		if (nsa->sa_flags & SA_NOCLDWAIT) {
512 			/*
513 			 * Paranoia: since SA_NOCLDWAIT is implemented by
514 			 * reparenting the dying child to PID 1 (and trust
515 			 * it to reap the zombie), PID 1 itself is forbidden
516 			 * to set SA_NOCLDWAIT.
517 			 */
518 			if (p->p_pid == 1)
519 				p->p_flag &= ~PK_NOCLDWAIT;
520 			else
521 				p->p_flag |= PK_NOCLDWAIT;
522 		} else
523 			p->p_flag &= ~PK_NOCLDWAIT;
524 
525 		if (nsa->sa_handler == SIG_IGN) {
526 			/*
527 			 * Paranoia: same as above.
528 			 */
529 			if (p->p_pid == 1)
530 				p->p_flag &= ~PK_CLDSIGIGN;
531 			else
532 				p->p_flag |= PK_CLDSIGIGN;
533 		} else
534 			p->p_flag &= ~PK_CLDSIGIGN;
535 	}
536 
537 	if ((nsa->sa_flags & SA_NODEFER) == 0)
538 		sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum);
539 	else
540 		sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum);
541 
542 	/*
543 	 * Set bit in p_sigctx.ps_sigignore for signals that are set to
544 	 * SIG_IGN, and for signals set to SIG_DFL where the default is to
545 	 * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as
546 	 * we have to restart the process.
547 	 */
548 	if (nsa->sa_handler == SIG_IGN ||
549 	    (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) {
550 		/* Never to be seen again. */
551 		sigemptyset(&tset);
552 		sigaddset(&tset, signum);
553 		sigclearall(p, &tset, &kq);
554 		if (signum != SIGCONT) {
555 			/* Easier in psignal */
556 			sigaddset(&p->p_sigctx.ps_sigignore, signum);
557 		}
558 		sigdelset(&p->p_sigctx.ps_sigcatch, signum);
559 	} else {
560 		sigdelset(&p->p_sigctx.ps_sigignore, signum);
561 		if (nsa->sa_handler == SIG_DFL)
562 			sigdelset(&p->p_sigctx.ps_sigcatch, signum);
563 		else
564 			sigaddset(&p->p_sigctx.ps_sigcatch, signum);
565 	}
566 
567 	/*
568 	 * Previously held signals may now have become visible.  Ensure that
569 	 * we check for them before returning to userspace.
570 	 */
571 	if (sigispending(l, 0)) {
572 		lwp_lock(l);
573 		l->l_flag |= LW_PENDSIG;
574 		lwp_need_userret(l);
575 		lwp_unlock(l);
576 	}
577 out:
578 	mutex_exit(p->p_lock);
579 	ksiginfo_queue_drain(&kq);
580 
581 	return error;
582 }
583 
584 int
sigprocmask1(struct lwp * l,int how,const sigset_t * nss,sigset_t * oss)585 sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss)
586 {
587 	sigset_t *mask = &l->l_sigmask;
588 	bool more;
589 
590 	KASSERT(mutex_owned(l->l_proc->p_lock));
591 
592 	if (oss) {
593 		*oss = *mask;
594 	}
595 
596 	if (nss == NULL) {
597 		return 0;
598 	}
599 
600 	switch (how) {
601 	case SIG_BLOCK:
602 		sigplusset(nss, mask);
603 		more = false;
604 		break;
605 	case SIG_UNBLOCK:
606 		sigminusset(nss, mask);
607 		more = true;
608 		break;
609 	case SIG_SETMASK:
610 		*mask = *nss;
611 		more = true;
612 		break;
613 	default:
614 		return EINVAL;
615 	}
616 	sigminusset(&sigcantmask, mask);
617 	if (more && sigispending(l, 0)) {
618 		/*
619 		 * Check for pending signals on return to user.
620 		 */
621 		lwp_lock(l);
622 		l->l_flag |= LW_PENDSIG;
623 		lwp_need_userret(l);
624 		lwp_unlock(l);
625 	}
626 	return 0;
627 }
628 
629 void
sigpending1(struct lwp * l,sigset_t * ss)630 sigpending1(struct lwp *l, sigset_t *ss)
631 {
632 	struct proc *p = l->l_proc;
633 
634 	mutex_enter(p->p_lock);
635 	*ss = l->l_sigpend.sp_set;
636 	sigplusset(&p->p_sigpend.sp_set, ss);
637 	mutex_exit(p->p_lock);
638 }
639 
640 void
sigsuspendsetup(struct lwp * l,const sigset_t * ss)641 sigsuspendsetup(struct lwp *l, const sigset_t *ss)
642 {
643 	struct proc *p = l->l_proc;
644 
645 	/*
646 	 * When returning from sigsuspend/pselect/pollts, we want
647 	 * the old mask to be restored after the
648 	 * signal handler has finished.  Thus, we
649 	 * save it here and mark the sigctx structure
650 	 * to indicate this.
651 	 */
652 	mutex_enter(p->p_lock);
653 	l->l_sigrestore = 1;
654 	l->l_sigoldmask = l->l_sigmask;
655 	l->l_sigmask = *ss;
656 	sigminusset(&sigcantmask, &l->l_sigmask);
657 
658 	/* Check for pending signals when sleeping. */
659 	if (sigispending(l, 0)) {
660 		lwp_lock(l);
661 		l->l_flag |= LW_PENDSIG;
662 		lwp_need_userret(l);
663 		lwp_unlock(l);
664 	}
665 	mutex_exit(p->p_lock);
666 }
667 
668 void
sigsuspendteardown(struct lwp * l)669 sigsuspendteardown(struct lwp *l)
670 {
671 	struct proc *p = l->l_proc;
672 
673 	mutex_enter(p->p_lock);
674 	/* Check for pending signals when sleeping. */
675 	if (l->l_sigrestore) {
676 		if (sigispending(l, 0)) {
677 			lwp_lock(l);
678 			l->l_flag |= LW_PENDSIG;
679 			lwp_need_userret(l);
680 			lwp_unlock(l);
681 		} else {
682 			l->l_sigrestore = 0;
683 			l->l_sigmask = l->l_sigoldmask;
684 		}
685 	}
686 	mutex_exit(p->p_lock);
687 }
688 
689 int
sigsuspend1(struct lwp * l,const sigset_t * ss)690 sigsuspend1(struct lwp *l, const sigset_t *ss)
691 {
692 
693 	if (ss)
694 		sigsuspendsetup(l, ss);
695 
696 	while (kpause("pause", true, 0, NULL) == 0)
697 		;
698 
699 	/* always return EINTR rather than ERESTART... */
700 	return EINTR;
701 }
702 
703 int
sigaltstack1(struct lwp * l,const stack_t * nss,stack_t * oss)704 sigaltstack1(struct lwp *l, const stack_t *nss, stack_t *oss)
705 {
706 	struct proc *p = l->l_proc;
707 	int error = 0;
708 
709 	mutex_enter(p->p_lock);
710 
711 	if (oss)
712 		*oss = l->l_sigstk;
713 
714 	if (nss) {
715 		if (nss->ss_flags & ~SS_ALLBITS)
716 			error = EINVAL;
717 		else if (nss->ss_flags & SS_DISABLE) {
718 			if (l->l_sigstk.ss_flags & SS_ONSTACK)
719 				error = EINVAL;
720 		} else if (nss->ss_size < MINSIGSTKSZ)
721 			error = ENOMEM;
722 
723 		if (!error)
724 			l->l_sigstk = *nss;
725 	}
726 
727 	mutex_exit(p->p_lock);
728 
729 	return error;
730 }
731 
732 int
sigtimedwait1(struct lwp * l,const struct sys_____sigtimedwait50_args * uap,register_t * retval,copyin_t fetchss,copyout_t storeinf,copyin_t fetchts,copyout_t storets)733 sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap,
734     register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts,
735     copyout_t storets)
736 {
737 	/* {
738 		syscallarg(const sigset_t *) set;
739 		syscallarg(siginfo_t *) info;
740 		syscallarg(struct timespec *) timeout;
741 	} */
742 	struct proc *p = l->l_proc;
743 	int error, signum, timo;
744 	struct timespec ts, tsstart, tsnow;
745 	ksiginfo_t ksi;
746 
747 	/*
748 	 * Calculate timeout, if it was specified.
749 	 *
750 	 * NULL pointer means an infinite timeout.
751 	 * {.tv_sec = 0, .tv_nsec = 0} means do not block.
752 	 */
753 	if (SCARG(uap, timeout)) {
754 		error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts));
755 		if (error)
756 			return error;
757 
758 		if ((error = itimespecfix(&ts)) != 0)
759 			return error;
760 
761 		timo = tstohz(&ts);
762 		if (timo == 0) {
763 			if (ts.tv_sec == 0 && ts.tv_nsec == 0)
764 				timo = -1; /* do not block */
765 			else
766 				timo = 1; /* the shortest possible timeout */
767 		}
768 
769 		/*
770 		 * Remember current uptime, it would be used in
771 		 * ECANCELED/ERESTART case.
772 		 */
773 		getnanouptime(&tsstart);
774 	} else {
775 		memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */
776 		timo = 0; /* infinite timeout */
777 	}
778 
779 	error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset,
780 	    sizeof(l->l_sigwaitset));
781 	if (error)
782 		return error;
783 
784 	/*
785 	 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
786 	 * SA_CANTMASK signals in waitset, we do this only for the below
787 	 * siglist check.
788 	 */
789 	sigminusset(&sigcantmask, &l->l_sigwaitset);
790 
791 	memset(&ksi.ksi_info, 0, sizeof(ksi.ksi_info));
792 
793 	mutex_enter(p->p_lock);
794 
795 	/* Check for pending signals in the process, if no - then in LWP. */
796 	if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0)
797 		signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset);
798 
799 	if (signum != 0) {
800 		/* If found a pending signal, just copy it out to the user. */
801 		mutex_exit(p->p_lock);
802 		goto out;
803 	}
804 
805 	if (timo < 0) {
806 		/* If not allowed to block, return an error */
807 		mutex_exit(p->p_lock);
808 		return EAGAIN;
809 	}
810 
811 	/*
812 	 * Set up the sigwait list and wait for signal to arrive.
813 	 * We can either be woken up or time out.
814 	 */
815 	l->l_sigwaited = &ksi;
816 	LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
817 	error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo);
818 
819 	/*
820 	 * Need to find out if we woke as a result of _lwp_wakeup() or a
821 	 * signal outside our wait set.
822 	 */
823 	if (l->l_sigwaited != NULL) {
824 		if (error == EINTR) {
825 			/* Wakeup via _lwp_wakeup(). */
826 			error = ECANCELED;
827 		} else if (!error) {
828 			/* Spurious wakeup - arrange for syscall restart. */
829 			error = ERESTART;
830 		}
831 		l->l_sigwaited = NULL;
832 		LIST_REMOVE(l, l_sigwaiter);
833 	}
834 	mutex_exit(p->p_lock);
835 
836 	/*
837 	 * If the sleep was interrupted (either by signal or wakeup), update
838 	 * the timeout and copyout new value back.  It would be used when
839 	 * the syscall would be restarted or called again.
840 	 */
841 	if (timo && (error == ERESTART || error == ECANCELED)) {
842 		getnanouptime(&tsnow);
843 
844 		/* Compute how much time has passed since start. */
845 		timespecsub(&tsnow, &tsstart, &tsnow);
846 
847 		/* Subtract passed time from timeout. */
848 		timespecsub(&ts, &tsnow, &ts);
849 
850 		if (ts.tv_sec < 0)
851 			error = EAGAIN;
852 		else {
853 			/* Copy updated timeout to userland. */
854 			error = (*storets)(&ts, SCARG(uap, timeout),
855 			    sizeof(ts));
856 		}
857 	}
858 out:
859 	/*
860 	 * If a signal from the wait set arrived, copy it to userland.
861 	 * Copy only the used part of siginfo, the padding part is
862 	 * left unchanged (userland is not supposed to touch it anyway).
863 	 */
864 	if (error == 0 && SCARG(uap, info)) {
865 		error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info),
866 		    sizeof(ksi.ksi_info));
867 	}
868 	if (error == 0) {
869 		*retval = ksi.ksi_info._signo;
870 		SDT_PROBE(proc, kernel, , signal__clear, *retval,
871 		    &ksi, 0, 0, 0);
872 	}
873 	return error;
874 }
875