xref: /openbsd-src/sys/kern/kern_exit.c (revision 5c389b79544373bccfce668b646e62e7ba9802a3)
1 /*	$OpenBSD: kern_exit.c,v 1.211 2023/04/25 18:14:06 claudio Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/time.h>
44 #include <sys/resource.h>
45 #include <sys/wait.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/ptrace.h>
50 #include <sys/acct.h>
51 #include <sys/filedesc.h>
52 #include <sys/signalvar.h>
53 #include <sys/sched.h>
54 #include <sys/ktrace.h>
55 #include <sys/pool.h>
56 #include <sys/mutex.h>
57 #ifdef SYSVSEM
58 #include <sys/sem.h>
59 #endif
60 #include <sys/witness.h>
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include "kcov.h"
68 #if NKCOV > 0
69 #include <sys/kcov.h>
70 #endif
71 
72 void	proc_finish_wait(struct proc *, struct proc *);
73 void	process_clear_orphan(struct process *);
74 void	process_zap(struct process *);
75 void	proc_free(struct proc *);
76 void	unveil_destroy(struct process *ps);
77 
78 /*
79  * exit --
80  *	Death of process.
81  */
82 int
83 sys_exit(struct proc *p, void *v, register_t *retval)
84 {
85 	struct sys_exit_args /* {
86 		syscallarg(int) rval;
87 	} */ *uap = v;
88 
89 	exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL);
90 	/* NOTREACHED */
91 	return (0);
92 }
93 
94 int
95 sys___threxit(struct proc *p, void *v, register_t *retval)
96 {
97 	struct sys___threxit_args /* {
98 		syscallarg(pid_t *) notdead;
99 	} */ *uap = v;
100 
101 	if (SCARG(uap, notdead) != NULL) {
102 		pid_t zero = 0;
103 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
104 			psignal(p, SIGSEGV);
105 	}
106 	exit1(p, 0, 0, EXIT_THREAD);
107 
108 	return (0);
109 }
110 
111 /*
112  * Exit: deallocate address space and other resources, change proc state
113  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
114  * status and rusage for wait().  Check for child processes and orphan them.
115  */
116 void
117 exit1(struct proc *p, int xexit, int xsig, int flags)
118 {
119 	struct process *pr, *qr, *nqr;
120 	struct rusage *rup;
121 	int s;
122 
123 	atomic_setbits_int(&p->p_flag, P_WEXIT);
124 
125 	pr = p->p_p;
126 
127 	/* single-threaded? */
128 	if (!P_HASSIBLING(p)) {
129 		flags = EXIT_NORMAL;
130 	} else {
131 		/* nope, multi-threaded */
132 		if (flags == EXIT_NORMAL)
133 			single_thread_set(p, SINGLE_EXIT, 1);
134 		else if (flags == EXIT_THREAD)
135 			single_thread_check(p, 0);
136 	}
137 
138 	if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) {
139 		if (pr->ps_pid == 1)
140 			panic("init died (signal %d, exit %d)", xsig, xexit);
141 
142 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
143 		pr->ps_xexit = xexit;
144 		pr->ps_xsig  = xsig;
145 
146 		/*
147 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
148 		 * is set; we wake up the parent early to avoid deadlock.
149 		 */
150 		if (pr->ps_flags & PS_PPWAIT) {
151 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
152 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
153 			    PS_ISPWAIT);
154 			wakeup(pr->ps_pptr);
155 		}
156 	}
157 
158 	/* unlink ourselves from the active threads */
159 	SCHED_LOCK(s);
160 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
161 	SCHED_UNLOCK(s);
162 
163 	if ((p->p_flag & P_THREAD) == 0) {
164 		/* main thread gotta wait because it has the pid, et al */
165 		while (pr->ps_threadcnt > 1)
166 			tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
167 		if (pr->ps_flags & PS_PROFIL)
168 			stopprofclock(pr);
169 	}
170 
171 	rup = pr->ps_ru;
172 	if (rup == NULL) {
173 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
174 		if (pr->ps_ru == NULL) {
175 			pr->ps_ru = rup;
176 		} else {
177 			pool_put(&rusage_pool, rup);
178 			rup = pr->ps_ru;
179 		}
180 	}
181 	p->p_siglist = 0;
182 	if ((p->p_flag & P_THREAD) == 0)
183 		pr->ps_siglist = 0;
184 
185 	kqpoll_exit();
186 
187 #if NKCOV > 0
188 	kcov_exit(p);
189 #endif
190 
191 	if ((p->p_flag & P_THREAD) == 0) {
192 		sigio_freelist(&pr->ps_sigiolst);
193 
194 		/* close open files and release open-file table */
195 		fdfree(p);
196 
197 		cancel_all_itimers();
198 
199 		timeout_del(&pr->ps_rucheck_to);
200 #ifdef SYSVSEM
201 		semexit(pr);
202 #endif
203 		killjobc(pr);
204 #ifdef ACCOUNTING
205 		acct_process(p);
206 #endif
207 
208 #ifdef KTRACE
209 		/* release trace file */
210 		if (pr->ps_tracevp)
211 			ktrcleartrace(pr);
212 #endif
213 
214 		unveil_destroy(pr);
215 
216 		/*
217 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
218 		 * going to become a zombie.
219 		 */
220 		if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT)
221 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
222 	}
223 
224 	p->p_fd = NULL;		/* zap the thread's copy */
225 
226         /*
227 	 * Remove proc from pidhash chain and allproc so looking
228 	 * it up won't work.  We will put the proc on the
229 	 * deadproc list later (using the p_hash member), and
230 	 * wake up the reaper when we do.  If this is the last
231 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
232 	 * the process on the zombprocess list below.
233 	 */
234 	/*
235 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
236 	 */
237 	p->p_stat = SDEAD;
238 
239 	LIST_REMOVE(p, p_hash);
240 	LIST_REMOVE(p, p_list);
241 
242 	if ((p->p_flag & P_THREAD) == 0) {
243 		LIST_REMOVE(pr, ps_hash);
244 		LIST_REMOVE(pr, ps_list);
245 
246 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
247 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
248 		else {
249 			/*
250 			 * Not going to be a zombie, so it's now off all
251 			 * the lists scanned by ispidtaken(), so block
252 			 * fast reuse of the pid now.
253 			 */
254 			freepid(pr->ps_pid);
255 		}
256 
257 		/*
258 		 * Reparent children to their original parent, in case
259 		 * they were being traced, or to init(8).
260 		 */
261 		qr = LIST_FIRST(&pr->ps_children);
262 		if (qr)		/* only need this if any child is S_ZOMB */
263 			wakeup(initprocess);
264 		for (; qr != NULL; qr = nqr) {
265 			nqr = LIST_NEXT(qr, ps_sibling);
266 			/*
267 			 * Traced processes are killed since their
268 			 * existence means someone is screwing up.
269 			 */
270 			if (qr->ps_flags & PS_TRACED &&
271 			    !(qr->ps_flags & PS_EXITING)) {
272 				process_untrace(qr);
273 
274 				/*
275 				 * If single threading is active,
276 				 * direct the signal to the active
277 				 * thread to avoid deadlock.
278 				 */
279 				if (qr->ps_single)
280 					ptsignal(qr->ps_single, SIGKILL,
281 					    STHREAD);
282 				else
283 					prsignal(qr, SIGKILL);
284 			} else {
285 				process_reparent(qr, initprocess);
286 			}
287 		}
288 
289 		/*
290 		 * Make sure orphans won't remember the exiting process.
291 		 */
292 		while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) {
293 			KASSERT(qr->ps_oppid == pr->ps_pid);
294 			qr->ps_oppid = 0;
295 			process_clear_orphan(qr);
296 		}
297 	}
298 
299 	/* add thread's accumulated rusage into the process's total */
300 	ruadd(rup, &p->p_ru);
301 	tuagg(pr, p);
302 
303 	/*
304 	 * clear %cpu usage during swap
305 	 */
306 	p->p_pctcpu = 0;
307 
308 	if ((p->p_flag & P_THREAD) == 0) {
309 		/*
310 		 * Final thread has died, so add on our children's rusage
311 		 * and calculate the total times
312 		 */
313 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
314 		ruadd(rup, &pr->ps_cru);
315 
316 		/*
317 		 * Notify parent that we're gone.  If we're not going to
318 		 * become a zombie, reparent to process 1 (init) so that
319 		 * we can wake our original parent to possibly unblock
320 		 * wait4() to return ECHILD.
321 		 */
322 		if (pr->ps_flags & PS_NOZOMBIE) {
323 			struct process *ppr = pr->ps_pptr;
324 			process_reparent(pr, initprocess);
325 			wakeup(ppr);
326 		}
327 	}
328 
329 	/* just a thread? detach it from its process */
330 	if (p->p_flag & P_THREAD) {
331 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
332 		if (--pr->ps_threadcnt == 1)
333 			wakeup(&pr->ps_threads);
334 		KASSERT(pr->ps_threadcnt > 0);
335 	}
336 
337 	/* Release the thread's read reference of resource limit structure. */
338 	if (p->p_limit != NULL) {
339 		struct plimit *limit;
340 
341 		limit = p->p_limit;
342 		p->p_limit = NULL;
343 		lim_free(limit);
344 	}
345 
346 	/*
347 	 * Other substructures are freed from reaper and wait().
348 	 */
349 
350 	/*
351 	 * Finally, call machine-dependent code to switch to a new
352 	 * context (possibly the idle context).  Once we are no longer
353 	 * using the dead process's vmspace and stack, exit2() will be
354 	 * called to schedule those resources to be released by the
355 	 * reaper thread.
356 	 *
357 	 * Note that cpu_exit() will end with a call equivalent to
358 	 * cpu_switch(), finishing our execution (pun intended).
359 	 */
360 	uvmexp.swtch++;
361 	cpu_exit(p);
362 	panic("cpu_exit returned");
363 }
364 
365 /*
366  * Locking of this proclist is special; it's accessed in a
367  * critical section of process exit, and thus locking it can't
368  * modify interrupt state.  We use a simple spin lock for this
369  * proclist.  We use the p_hash member to linkup to deadproc.
370  */
371 struct mutex deadproc_mutex =
372     MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS);
373 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
374 
375 /*
376  * We are called from cpu_exit() once it is safe to schedule the
377  * dead process's resources to be freed.
378  *
379  * NOTE: One must be careful with locking in this routine.  It's
380  * called from a critical section in machine-dependent code, so
381  * we should refrain from changing any interrupt state.
382  *
383  * We lock the deadproc list, place the proc on that list (using
384  * the p_hash member), and wake up the reaper.
385  */
386 void
387 exit2(struct proc *p)
388 {
389 	mtx_enter(&deadproc_mutex);
390 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
391 	mtx_leave(&deadproc_mutex);
392 
393 	wakeup(&deadproc);
394 }
395 
396 void
397 proc_free(struct proc *p)
398 {
399 	crfree(p->p_ucred);
400 	pool_put(&proc_pool, p);
401 	nthreads--;
402 }
403 
404 /*
405  * Process reaper.  This is run by a kernel thread to free the resources
406  * of a dead process.  Once the resources are free, the process becomes
407  * a zombie, and the parent is allowed to read the undead's status.
408  */
409 void
410 reaper(void *arg)
411 {
412 	struct proc *p;
413 
414 	KERNEL_UNLOCK();
415 
416 	SCHED_ASSERT_UNLOCKED();
417 
418 	for (;;) {
419 		mtx_enter(&deadproc_mutex);
420 		while ((p = LIST_FIRST(&deadproc)) == NULL)
421 			msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper",
422 			    INFSLP);
423 
424 		/* Remove us from the deadproc list. */
425 		LIST_REMOVE(p, p_hash);
426 		mtx_leave(&deadproc_mutex);
427 
428 		WITNESS_THREAD_EXIT(p);
429 
430 		KERNEL_LOCK();
431 
432 		/*
433 		 * Free the VM resources we're still holding on to.
434 		 * We must do this from a valid thread because doing
435 		 * so may block.
436 		 */
437 		uvm_uarea_free(p);
438 		p->p_vmspace = NULL;		/* zap the thread's copy */
439 
440 		if (p->p_flag & P_THREAD) {
441 			/* Just a thread */
442 			proc_free(p);
443 		} else {
444 			struct process *pr = p->p_p;
445 
446 			/* Release the rest of the process's vmspace */
447 			uvm_exit(pr);
448 
449 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
450 				/* Process is now a true zombie. */
451 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
452 			}
453 
454 			/* Notify listeners of our demise and clean up. */
455 			knote_processexit(pr);
456 
457 			if (pr->ps_flags & PS_ZOMBIE) {
458 				/* Post SIGCHLD and wake up parent. */
459 				prsignal(pr->ps_pptr, SIGCHLD);
460 				wakeup(pr->ps_pptr);
461 			} else {
462 				/* No one will wait for us, just zap it. */
463 				process_zap(pr);
464 			}
465 		}
466 
467 		KERNEL_UNLOCK();
468 	}
469 }
470 
471 int
472 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options,
473     struct rusage *rusage, siginfo_t *info, register_t *retval)
474 {
475 	int nfound;
476 	struct process *pr;
477 	struct proc *p;
478 	int error;
479 
480 	if (info != NULL)
481 		memset(info, 0, sizeof(*info));
482 
483 loop:
484 	nfound = 0;
485 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
486 		if ((pr->ps_flags & PS_NOZOMBIE) ||
487 		    (idtype == P_PID && id != pr->ps_pid) ||
488 		    (idtype == P_PGID && id != pr->ps_pgid))
489 			continue;
490 
491 		p = pr->ps_mainproc;
492 
493 		nfound++;
494 		if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) {
495 			*retval = pr->ps_pid;
496 			if (info != NULL) {
497 				info->si_pid = pr->ps_pid;
498 				info->si_uid = pr->ps_ucred->cr_uid;
499 				info->si_signo = SIGCHLD;
500 				if (pr->ps_xsig == 0) {
501 					info->si_code = CLD_EXITED;
502 					info->si_status = pr->ps_xexit;
503 				} else if (WCOREDUMP(pr->ps_xsig)) {
504 					info->si_code = CLD_DUMPED;
505 					info->si_status = _WSTATUS(pr->ps_xsig);
506 				} else {
507 					info->si_code = CLD_KILLED;
508 					info->si_status = _WSTATUS(pr->ps_xsig);
509 				}
510 			}
511 
512 			if (statusp != NULL)
513 				*statusp = W_EXITCODE(pr->ps_xexit,
514 				    pr->ps_xsig);
515 			if (rusage != NULL)
516 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
517 			if ((options & WNOWAIT) == 0)
518 				proc_finish_wait(q, p);
519 			return (0);
520 		}
521 		if ((options & WTRAPPED) &&
522 		    pr->ps_flags & PS_TRACED &&
523 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
524 		    pr->ps_single->p_stat == SSTOP &&
525 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
526 			if (single_thread_wait(pr, 0))
527 				goto loop;
528 
529 			if ((options & WNOWAIT) == 0)
530 				atomic_setbits_int(&pr->ps_flags, PS_WAITED);
531 
532 			*retval = pr->ps_pid;
533 			if (info != NULL) {
534 				info->si_pid = pr->ps_pid;
535 				info->si_uid = pr->ps_ucred->cr_uid;
536 				info->si_signo = SIGCHLD;
537 				info->si_code = CLD_TRAPPED;
538 				info->si_status = pr->ps_xsig;
539 			}
540 
541 			if (statusp != NULL)
542 				*statusp = W_STOPCODE(pr->ps_xsig);
543 			if (rusage != NULL)
544 				memset(rusage, 0, sizeof(*rusage));
545 			return (0);
546 		}
547 		if (p->p_stat == SSTOP &&
548 		    (pr->ps_flags & PS_WAITED) == 0 &&
549 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
550 		    (pr->ps_flags & PS_TRACED ||
551 		    options & WUNTRACED)) {
552 			if ((options & WNOWAIT) == 0)
553 				atomic_setbits_int(&pr->ps_flags, PS_WAITED);
554 
555 			*retval = pr->ps_pid;
556 			if (info != 0) {
557 				info->si_pid = pr->ps_pid;
558 				info->si_uid = pr->ps_ucred->cr_uid;
559 				info->si_signo = SIGCHLD;
560 				info->si_code = CLD_STOPPED;
561 				info->si_status = pr->ps_xsig;
562 			}
563 
564 			if (statusp != NULL)
565 				*statusp = W_STOPCODE(pr->ps_xsig);
566 			if (rusage != NULL)
567 				memset(rusage, 0, sizeof(*rusage));
568 			return (0);
569 		}
570 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
571 			if ((options & WNOWAIT) == 0)
572 				atomic_clearbits_int(&p->p_flag, P_CONTINUED);
573 
574 			*retval = pr->ps_pid;
575 			if (info != NULL) {
576 				info->si_pid = pr->ps_pid;
577 				info->si_uid = pr->ps_ucred->cr_uid;
578 				info->si_signo = SIGCHLD;
579 				info->si_code = CLD_CONTINUED;
580 				info->si_status = SIGCONT;
581 			}
582 
583 			if (statusp != NULL)
584 				*statusp = _WCONTINUED;
585 			if (rusage != NULL)
586 				memset(rusage, 0, sizeof(*rusage));
587 			return (0);
588 		}
589 	}
590 	/*
591 	 * Look in the orphans list too, to allow the parent to
592 	 * collect its child's exit status even if child is being
593 	 * debugged.
594 	 *
595 	 * Debugger detaches from the parent upon successful
596 	 * switch-over from parent to child.  At this point due to
597 	 * re-parenting the parent loses the child to debugger and a
598 	 * wait4(2) call would report that it has no children to wait
599 	 * for.  By maintaining a list of orphans we allow the parent
600 	 * to successfully wait until the child becomes a zombie.
601 	 */
602 	if (nfound == 0) {
603 		LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) {
604 			if ((pr->ps_flags & PS_NOZOMBIE) ||
605 			    (idtype == P_PID && id != pr->ps_pid) ||
606 			    (idtype == P_PGID && id != pr->ps_pgid))
607 				continue;
608 			nfound++;
609 			break;
610 		}
611 	}
612 	if (nfound == 0)
613 		return (ECHILD);
614 	if (options & WNOHANG) {
615 		*retval = 0;
616 		return (0);
617 	}
618 	if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0)
619 		return (error);
620 	goto loop;
621 }
622 
623 int
624 sys_wait4(struct proc *q, void *v, register_t *retval)
625 {
626 	struct sys_wait4_args /* {
627 		syscallarg(pid_t) pid;
628 		syscallarg(int *) status;
629 		syscallarg(int) options;
630 		syscallarg(struct rusage *) rusage;
631 	} */ *uap = v;
632 	struct rusage ru;
633 	pid_t pid = SCARG(uap, pid);
634 	int options = SCARG(uap, options);
635 	int status, error;
636 	idtype_t idtype;
637 	id_t id;
638 
639 	if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED))
640 		return (EINVAL);
641 	options |= WEXITED | WTRAPPED;
642 
643 	if (SCARG(uap, pid) == WAIT_MYPGRP) {
644 		idtype = P_PGID;
645 		id = q->p_p->ps_pgid;
646 	} else if (SCARG(uap, pid) == WAIT_ANY) {
647 		idtype = P_ALL;
648 		id = 0;
649 	} else if (pid < 0) {
650 		idtype = P_PGID;
651 		id = -pid;
652 	} else {
653 		idtype = P_PID;
654 		id = pid;
655 	}
656 
657 	error = dowait6(q, idtype, id,
658 	    SCARG(uap, status) ? &status : NULL, options,
659 	    SCARG(uap, rusage) ? &ru : NULL, NULL, retval);
660 	if (error == 0 && *retval > 0 && SCARG(uap, status)) {
661 		error = copyout(&status, SCARG(uap, status), sizeof(status));
662 	}
663 	if (error == 0 && *retval > 0 && SCARG(uap, rusage)) {
664 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
665 #ifdef KTRACE
666 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
667 			ktrrusage(q, &ru);
668 #endif
669 	}
670 	return (error);
671 }
672 
673 int
674 sys_waitid(struct proc *q, void *v, register_t *retval)
675 {
676 	struct sys_waitid_args /* {
677 		syscallarg(idtype_t) idtype;
678 		syscallarg(id_t) id;
679 		syscallarg(siginfo_t) info;
680 		syscallarg(int) options;
681 	} */ *uap = v;
682 	siginfo_t info;
683 	idtype_t idtype = SCARG(uap, idtype);
684 	int options = SCARG(uap, options);
685 	int error;
686 
687 	if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT))
688 		return (EINVAL);
689 	if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0)
690 		return (EINVAL);
691 	if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID)
692 		return (EINVAL);
693 
694 	error = dowait6(q, idtype, SCARG(uap, id), NULL,
695 	    options, NULL, &info, retval);
696 	if (error == 0) {
697 		error = copyout(&info, SCARG(uap, info), sizeof(info));
698 #ifdef KTRACE
699 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
700 			ktrsiginfo(q, &info);
701 #endif
702 	}
703 	if (error == 0)
704 		*retval = 0;
705 	return (error);
706 }
707 
708 void
709 proc_finish_wait(struct proc *waiter, struct proc *p)
710 {
711 	struct process *pr, *tr;
712 	struct rusage *rup;
713 
714 	/*
715 	 * If we got the child via a ptrace 'attach',
716 	 * we need to give it back to the old parent.
717 	 */
718 	pr = p->p_p;
719 	if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) &&
720 	   (tr = prfind(pr->ps_oppid))) {
721 		pr->ps_oppid = 0;
722 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
723 		process_reparent(pr, tr);
724 		prsignal(tr, SIGCHLD);
725 		wakeup(tr);
726 	} else {
727 		scheduler_wait_hook(waiter, p);
728 		rup = &waiter->p_p->ps_cru;
729 		ruadd(rup, pr->ps_ru);
730 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
731 		freepid(pr->ps_pid);
732 		process_zap(pr);
733 	}
734 }
735 
736 /*
737  * give process back to original parent or init(8)
738  */
739 void
740 process_untrace(struct process *pr)
741 {
742 	struct process *ppr = NULL;
743 
744 	KASSERT(pr->ps_flags & PS_TRACED);
745 
746 	if (pr->ps_oppid != 0 &&
747 	    (pr->ps_oppid != pr->ps_pptr->ps_pid))
748 		ppr = prfind(pr->ps_oppid);
749 
750 	/* not being traced any more */
751 	pr->ps_oppid = 0;
752 	atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
753 	process_reparent(pr, ppr ? ppr : initprocess);
754 }
755 
756 void
757 process_clear_orphan(struct process *pr)
758 {
759 	if (pr->ps_flags & PS_ORPHAN) {
760 		LIST_REMOVE(pr, ps_orphan);
761 		atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN);
762 	}
763 }
764 
765 /*
766  * make process 'parent' the new parent of process 'child'.
767  */
768 void
769 process_reparent(struct process *child, struct process *parent)
770 {
771 
772 	if (child->ps_pptr == parent)
773 		return;
774 
775 	KASSERT(child->ps_oppid == 0 ||
776 		child->ps_oppid == child->ps_pptr->ps_pid);
777 
778 	LIST_REMOVE(child, ps_sibling);
779 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
780 
781 	process_clear_orphan(child);
782 	if (child->ps_flags & PS_TRACED) {
783 		atomic_setbits_int(&child->ps_flags, PS_ORPHAN);
784 		LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan);
785 	}
786 
787 	child->ps_pptr = parent;
788 	child->ps_ppid = parent->ps_pid;
789 }
790 
791 void
792 process_zap(struct process *pr)
793 {
794 	struct vnode *otvp;
795 	struct proc *p = pr->ps_mainproc;
796 
797 	/*
798 	 * Finally finished with old proc entry.
799 	 * Unlink it from its process group and free it.
800 	 */
801 	leavepgrp(pr);
802 	LIST_REMOVE(pr, ps_sibling);
803 	process_clear_orphan(pr);
804 
805 	/*
806 	 * Decrement the count of procs running with this uid.
807 	 */
808 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
809 
810 	/*
811 	 * Release reference to text vnode
812 	 */
813 	otvp = pr->ps_textvp;
814 	pr->ps_textvp = NULL;
815 	if (otvp)
816 		vrele(otvp);
817 
818 	KASSERT(pr->ps_threadcnt == 1);
819 	if (pr->ps_ptstat != NULL)
820 		free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
821 	pool_put(&rusage_pool, pr->ps_ru);
822 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
823 	sigactsfree(pr->ps_sigacts);
824 	lim_free(pr->ps_limit);
825 	crfree(pr->ps_ucred);
826 	pool_put(&process_pool, pr);
827 	nprocesses--;
828 
829 	proc_free(p);
830 }
831