xref: /openbsd-src/sys/kern/kern_exit.c (revision 4260963390db52f81c998a36b1c71b872da4968f)
1 /*	$OpenBSD: kern_exit.c,v 1.213 2023/09/04 13:18:41 claudio Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/time.h>
44 #include <sys/resource.h>
45 #include <sys/wait.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/ptrace.h>
50 #include <sys/acct.h>
51 #include <sys/filedesc.h>
52 #include <sys/signalvar.h>
53 #include <sys/sched.h>
54 #include <sys/ktrace.h>
55 #include <sys/pool.h>
56 #include <sys/mutex.h>
57 #ifdef SYSVSEM
58 #include <sys/sem.h>
59 #endif
60 #include <sys/witness.h>
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include "kcov.h"
68 #if NKCOV > 0
69 #include <sys/kcov.h>
70 #endif
71 
72 void	proc_finish_wait(struct proc *, struct proc *);
73 void	process_clear_orphan(struct process *);
74 void	process_zap(struct process *);
75 void	proc_free(struct proc *);
76 void	unveil_destroy(struct process *ps);
77 
78 /*
79  * exit --
80  *	Death of process.
81  */
82 int
83 sys_exit(struct proc *p, void *v, register_t *retval)
84 {
85 	struct sys_exit_args /* {
86 		syscallarg(int) rval;
87 	} */ *uap = v;
88 
89 	exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL);
90 	/* NOTREACHED */
91 	return (0);
92 }
93 
94 int
95 sys___threxit(struct proc *p, void *v, register_t *retval)
96 {
97 	struct sys___threxit_args /* {
98 		syscallarg(pid_t *) notdead;
99 	} */ *uap = v;
100 
101 	if (SCARG(uap, notdead) != NULL) {
102 		pid_t zero = 0;
103 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
104 			psignal(p, SIGSEGV);
105 	}
106 	exit1(p, 0, 0, EXIT_THREAD);
107 
108 	return (0);
109 }
110 
111 /*
112  * Exit: deallocate address space and other resources, change proc state
113  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
114  * status and rusage for wait().  Check for child processes and orphan them.
115  */
116 void
117 exit1(struct proc *p, int xexit, int xsig, int flags)
118 {
119 	struct process *pr, *qr, *nqr;
120 	struct rusage *rup;
121 	struct timespec ts;
122 	int s, wake;
123 
124 	atomic_setbits_int(&p->p_flag, P_WEXIT);
125 
126 	pr = p->p_p;
127 
128 	/* single-threaded? */
129 	if (!P_HASSIBLING(p)) {
130 		flags = EXIT_NORMAL;
131 	} else {
132 		/* nope, multi-threaded */
133 		if (flags == EXIT_NORMAL)
134 			single_thread_set(p, SINGLE_EXIT, 1);
135 		else if (flags == EXIT_THREAD)
136 			single_thread_check(p, 0);
137 	}
138 
139 	if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) {
140 		if (pr->ps_pid == 1)
141 			panic("init died (signal %d, exit %d)", xsig, xexit);
142 
143 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
144 		pr->ps_xexit = xexit;
145 		pr->ps_xsig  = xsig;
146 
147 		/*
148 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
149 		 * is set; we wake up the parent early to avoid deadlock.
150 		 */
151 		if (pr->ps_flags & PS_PPWAIT) {
152 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
153 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
154 			    PS_ISPWAIT);
155 			wakeup(pr->ps_pptr);
156 		}
157 	}
158 
159 	/* unlink ourselves from the active threads */
160 	SCHED_LOCK(s);
161 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
162 	SCHED_UNLOCK(s);
163 
164 	mtx_enter(&pr->ps_mtx);
165 	pr->ps_threadcnt--;
166 	wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt);
167 	mtx_leave(&pr->ps_mtx);
168 	if (wake)
169 		wakeup(&pr->ps_singlecnt);
170 
171 	if ((p->p_flag & P_THREAD) == 0) {
172 		/* main thread gotta wait because it has the pid, et al */
173 		/* XXX locking depends on kernel lock here. */
174 		while (pr->ps_threadcnt > 0)
175 			tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
176 		if (pr->ps_flags & PS_PROFIL)
177 			stopprofclock(pr);
178 	}
179 
180 	rup = pr->ps_ru;
181 	if (rup == NULL) {
182 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
183 		if (pr->ps_ru == NULL) {
184 			pr->ps_ru = rup;
185 		} else {
186 			pool_put(&rusage_pool, rup);
187 			rup = pr->ps_ru;
188 		}
189 	}
190 	p->p_siglist = 0;
191 	if ((p->p_flag & P_THREAD) == 0)
192 		pr->ps_siglist = 0;
193 
194 	kqpoll_exit();
195 
196 #if NKCOV > 0
197 	kcov_exit(p);
198 #endif
199 
200 	if ((p->p_flag & P_THREAD) == 0) {
201 		sigio_freelist(&pr->ps_sigiolst);
202 
203 		/* close open files and release open-file table */
204 		fdfree(p);
205 
206 		cancel_all_itimers();
207 
208 		timeout_del(&pr->ps_rucheck_to);
209 #ifdef SYSVSEM
210 		semexit(pr);
211 #endif
212 		killjobc(pr);
213 #ifdef ACCOUNTING
214 		acct_process(p);
215 #endif
216 
217 #ifdef KTRACE
218 		/* release trace file */
219 		if (pr->ps_tracevp)
220 			ktrcleartrace(pr);
221 #endif
222 
223 		unveil_destroy(pr);
224 
225 		/*
226 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
227 		 * going to become a zombie.
228 		 */
229 		if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT)
230 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
231 	}
232 
233 	p->p_fd = NULL;		/* zap the thread's copy */
234 
235         /*
236 	 * Remove proc from pidhash chain and allproc so looking
237 	 * it up won't work.  We will put the proc on the
238 	 * deadproc list later (using the p_hash member), and
239 	 * wake up the reaper when we do.  If this is the last
240 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
241 	 * the process on the zombprocess list below.
242 	 */
243 	/*
244 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
245 	 */
246 	p->p_stat = SDEAD;
247 
248 	LIST_REMOVE(p, p_hash);
249 	LIST_REMOVE(p, p_list);
250 
251 	if ((p->p_flag & P_THREAD) == 0) {
252 		LIST_REMOVE(pr, ps_hash);
253 		LIST_REMOVE(pr, ps_list);
254 
255 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
256 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
257 		else {
258 			/*
259 			 * Not going to be a zombie, so it's now off all
260 			 * the lists scanned by ispidtaken(), so block
261 			 * fast reuse of the pid now.
262 			 */
263 			freepid(pr->ps_pid);
264 		}
265 
266 		/*
267 		 * Reparent children to their original parent, in case
268 		 * they were being traced, or to init(8).
269 		 */
270 		qr = LIST_FIRST(&pr->ps_children);
271 		if (qr)		/* only need this if any child is S_ZOMB */
272 			wakeup(initprocess);
273 		for (; qr != NULL; qr = nqr) {
274 			nqr = LIST_NEXT(qr, ps_sibling);
275 			/*
276 			 * Traced processes are killed since their
277 			 * existence means someone is screwing up.
278 			 */
279 			if (qr->ps_flags & PS_TRACED &&
280 			    !(qr->ps_flags & PS_EXITING)) {
281 				process_untrace(qr);
282 
283 				/*
284 				 * If single threading is active,
285 				 * direct the signal to the active
286 				 * thread to avoid deadlock.
287 				 */
288 				if (qr->ps_single)
289 					ptsignal(qr->ps_single, SIGKILL,
290 					    STHREAD);
291 				else
292 					prsignal(qr, SIGKILL);
293 			} else {
294 				process_reparent(qr, initprocess);
295 			}
296 		}
297 
298 		/*
299 		 * Make sure orphans won't remember the exiting process.
300 		 */
301 		while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) {
302 			KASSERT(qr->ps_oppid == pr->ps_pid);
303 			qr->ps_oppid = 0;
304 			process_clear_orphan(qr);
305 		}
306 	}
307 
308 	/* add thread's accumulated rusage into the process's total */
309 	ruadd(rup, &p->p_ru);
310 	nanouptime(&ts);
311 	if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <))
312 		timespecclear(&ts);
313 	else
314 		timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts);
315 	SCHED_LOCK(s);
316 	tuagg_locked(pr, p, &ts);
317 	SCHED_UNLOCK(s);
318 
319 	/*
320 	 * clear %cpu usage during swap
321 	 */
322 	p->p_pctcpu = 0;
323 
324 	if ((p->p_flag & P_THREAD) == 0) {
325 		/*
326 		 * Final thread has died, so add on our children's rusage
327 		 * and calculate the total times
328 		 */
329 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
330 		ruadd(rup, &pr->ps_cru);
331 
332 		/*
333 		 * Notify parent that we're gone.  If we're not going to
334 		 * become a zombie, reparent to process 1 (init) so that
335 		 * we can wake our original parent to possibly unblock
336 		 * wait4() to return ECHILD.
337 		 */
338 		if (pr->ps_flags & PS_NOZOMBIE) {
339 			struct process *ppr = pr->ps_pptr;
340 			process_reparent(pr, initprocess);
341 			wakeup(ppr);
342 		}
343 	}
344 
345 	/* just a thread? detach it from its process */
346 	if (p->p_flag & P_THREAD) {
347 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
348 		if (pr->ps_threadcnt == 0)
349 			wakeup(&pr->ps_threads);
350 	}
351 
352 	/* Release the thread's read reference of resource limit structure. */
353 	if (p->p_limit != NULL) {
354 		struct plimit *limit;
355 
356 		limit = p->p_limit;
357 		p->p_limit = NULL;
358 		lim_free(limit);
359 	}
360 
361 	/*
362 	 * Other substructures are freed from reaper and wait().
363 	 */
364 
365 	/*
366 	 * Finally, call machine-dependent code to switch to a new
367 	 * context (possibly the idle context).  Once we are no longer
368 	 * using the dead process's vmspace and stack, exit2() will be
369 	 * called to schedule those resources to be released by the
370 	 * reaper thread.
371 	 *
372 	 * Note that cpu_exit() will end with a call equivalent to
373 	 * cpu_switch(), finishing our execution (pun intended).
374 	 */
375 	uvmexp.swtch++;
376 	cpu_exit(p);
377 	panic("cpu_exit returned");
378 }
379 
380 /*
381  * Locking of this proclist is special; it's accessed in a
382  * critical section of process exit, and thus locking it can't
383  * modify interrupt state.  We use a simple spin lock for this
384  * proclist.  We use the p_hash member to linkup to deadproc.
385  */
386 struct mutex deadproc_mutex =
387     MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS);
388 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
389 
390 /*
391  * We are called from cpu_exit() once it is safe to schedule the
392  * dead process's resources to be freed.
393  *
394  * NOTE: One must be careful with locking in this routine.  It's
395  * called from a critical section in machine-dependent code, so
396  * we should refrain from changing any interrupt state.
397  *
398  * We lock the deadproc list, place the proc on that list (using
399  * the p_hash member), and wake up the reaper.
400  */
401 void
402 exit2(struct proc *p)
403 {
404 	mtx_enter(&deadproc_mutex);
405 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
406 	mtx_leave(&deadproc_mutex);
407 
408 	wakeup(&deadproc);
409 }
410 
411 void
412 proc_free(struct proc *p)
413 {
414 	crfree(p->p_ucred);
415 	pool_put(&proc_pool, p);
416 	nthreads--;
417 }
418 
419 /*
420  * Process reaper.  This is run by a kernel thread to free the resources
421  * of a dead process.  Once the resources are free, the process becomes
422  * a zombie, and the parent is allowed to read the undead's status.
423  */
424 void
425 reaper(void *arg)
426 {
427 	struct proc *p;
428 
429 	KERNEL_UNLOCK();
430 
431 	SCHED_ASSERT_UNLOCKED();
432 
433 	for (;;) {
434 		mtx_enter(&deadproc_mutex);
435 		while ((p = LIST_FIRST(&deadproc)) == NULL)
436 			msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper",
437 			    INFSLP);
438 
439 		/* Remove us from the deadproc list. */
440 		LIST_REMOVE(p, p_hash);
441 		mtx_leave(&deadproc_mutex);
442 
443 		WITNESS_THREAD_EXIT(p);
444 
445 		KERNEL_LOCK();
446 
447 		/*
448 		 * Free the VM resources we're still holding on to.
449 		 * We must do this from a valid thread because doing
450 		 * so may block.
451 		 */
452 		uvm_uarea_free(p);
453 		p->p_vmspace = NULL;		/* zap the thread's copy */
454 
455 		if (p->p_flag & P_THREAD) {
456 			/* Just a thread */
457 			proc_free(p);
458 		} else {
459 			struct process *pr = p->p_p;
460 
461 			/* Release the rest of the process's vmspace */
462 			uvm_exit(pr);
463 
464 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
465 				/* Process is now a true zombie. */
466 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
467 			}
468 
469 			/* Notify listeners of our demise and clean up. */
470 			knote_processexit(pr);
471 
472 			if (pr->ps_flags & PS_ZOMBIE) {
473 				/* Post SIGCHLD and wake up parent. */
474 				prsignal(pr->ps_pptr, SIGCHLD);
475 				wakeup(pr->ps_pptr);
476 			} else {
477 				/* No one will wait for us, just zap it. */
478 				process_zap(pr);
479 			}
480 		}
481 
482 		KERNEL_UNLOCK();
483 	}
484 }
485 
486 int
487 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options,
488     struct rusage *rusage, siginfo_t *info, register_t *retval)
489 {
490 	int nfound;
491 	struct process *pr;
492 	struct proc *p;
493 	int error;
494 
495 	if (info != NULL)
496 		memset(info, 0, sizeof(*info));
497 
498 loop:
499 	nfound = 0;
500 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
501 		if ((pr->ps_flags & PS_NOZOMBIE) ||
502 		    (idtype == P_PID && id != pr->ps_pid) ||
503 		    (idtype == P_PGID && id != pr->ps_pgid))
504 			continue;
505 
506 		p = pr->ps_mainproc;
507 
508 		nfound++;
509 		if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) {
510 			*retval = pr->ps_pid;
511 			if (info != NULL) {
512 				info->si_pid = pr->ps_pid;
513 				info->si_uid = pr->ps_ucred->cr_uid;
514 				info->si_signo = SIGCHLD;
515 				if (pr->ps_xsig == 0) {
516 					info->si_code = CLD_EXITED;
517 					info->si_status = pr->ps_xexit;
518 				} else if (WCOREDUMP(pr->ps_xsig)) {
519 					info->si_code = CLD_DUMPED;
520 					info->si_status = _WSTATUS(pr->ps_xsig);
521 				} else {
522 					info->si_code = CLD_KILLED;
523 					info->si_status = _WSTATUS(pr->ps_xsig);
524 				}
525 			}
526 
527 			if (statusp != NULL)
528 				*statusp = W_EXITCODE(pr->ps_xexit,
529 				    pr->ps_xsig);
530 			if (rusage != NULL)
531 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
532 			if ((options & WNOWAIT) == 0)
533 				proc_finish_wait(q, p);
534 			return (0);
535 		}
536 		if ((options & WTRAPPED) &&
537 		    pr->ps_flags & PS_TRACED &&
538 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
539 		    pr->ps_single->p_stat == SSTOP &&
540 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
541 			if (single_thread_wait(pr, 0))
542 				goto loop;
543 
544 			if ((options & WNOWAIT) == 0)
545 				atomic_setbits_int(&pr->ps_flags, PS_WAITED);
546 
547 			*retval = pr->ps_pid;
548 			if (info != NULL) {
549 				info->si_pid = pr->ps_pid;
550 				info->si_uid = pr->ps_ucred->cr_uid;
551 				info->si_signo = SIGCHLD;
552 				info->si_code = CLD_TRAPPED;
553 				info->si_status = pr->ps_xsig;
554 			}
555 
556 			if (statusp != NULL)
557 				*statusp = W_STOPCODE(pr->ps_xsig);
558 			if (rusage != NULL)
559 				memset(rusage, 0, sizeof(*rusage));
560 			return (0);
561 		}
562 		if (p->p_stat == SSTOP &&
563 		    (pr->ps_flags & PS_WAITED) == 0 &&
564 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
565 		    (pr->ps_flags & PS_TRACED ||
566 		    options & WUNTRACED)) {
567 			if ((options & WNOWAIT) == 0)
568 				atomic_setbits_int(&pr->ps_flags, PS_WAITED);
569 
570 			*retval = pr->ps_pid;
571 			if (info != 0) {
572 				info->si_pid = pr->ps_pid;
573 				info->si_uid = pr->ps_ucred->cr_uid;
574 				info->si_signo = SIGCHLD;
575 				info->si_code = CLD_STOPPED;
576 				info->si_status = pr->ps_xsig;
577 			}
578 
579 			if (statusp != NULL)
580 				*statusp = W_STOPCODE(pr->ps_xsig);
581 			if (rusage != NULL)
582 				memset(rusage, 0, sizeof(*rusage));
583 			return (0);
584 		}
585 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
586 			if ((options & WNOWAIT) == 0)
587 				atomic_clearbits_int(&p->p_flag, P_CONTINUED);
588 
589 			*retval = pr->ps_pid;
590 			if (info != NULL) {
591 				info->si_pid = pr->ps_pid;
592 				info->si_uid = pr->ps_ucred->cr_uid;
593 				info->si_signo = SIGCHLD;
594 				info->si_code = CLD_CONTINUED;
595 				info->si_status = SIGCONT;
596 			}
597 
598 			if (statusp != NULL)
599 				*statusp = _WCONTINUED;
600 			if (rusage != NULL)
601 				memset(rusage, 0, sizeof(*rusage));
602 			return (0);
603 		}
604 	}
605 	/*
606 	 * Look in the orphans list too, to allow the parent to
607 	 * collect its child's exit status even if child is being
608 	 * debugged.
609 	 *
610 	 * Debugger detaches from the parent upon successful
611 	 * switch-over from parent to child.  At this point due to
612 	 * re-parenting the parent loses the child to debugger and a
613 	 * wait4(2) call would report that it has no children to wait
614 	 * for.  By maintaining a list of orphans we allow the parent
615 	 * to successfully wait until the child becomes a zombie.
616 	 */
617 	if (nfound == 0) {
618 		LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) {
619 			if ((pr->ps_flags & PS_NOZOMBIE) ||
620 			    (idtype == P_PID && id != pr->ps_pid) ||
621 			    (idtype == P_PGID && id != pr->ps_pgid))
622 				continue;
623 			nfound++;
624 			break;
625 		}
626 	}
627 	if (nfound == 0)
628 		return (ECHILD);
629 	if (options & WNOHANG) {
630 		*retval = 0;
631 		return (0);
632 	}
633 	if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0)
634 		return (error);
635 	goto loop;
636 }
637 
638 int
639 sys_wait4(struct proc *q, void *v, register_t *retval)
640 {
641 	struct sys_wait4_args /* {
642 		syscallarg(pid_t) pid;
643 		syscallarg(int *) status;
644 		syscallarg(int) options;
645 		syscallarg(struct rusage *) rusage;
646 	} */ *uap = v;
647 	struct rusage ru;
648 	pid_t pid = SCARG(uap, pid);
649 	int options = SCARG(uap, options);
650 	int status, error;
651 	idtype_t idtype;
652 	id_t id;
653 
654 	if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED))
655 		return (EINVAL);
656 	options |= WEXITED | WTRAPPED;
657 
658 	if (SCARG(uap, pid) == WAIT_MYPGRP) {
659 		idtype = P_PGID;
660 		id = q->p_p->ps_pgid;
661 	} else if (SCARG(uap, pid) == WAIT_ANY) {
662 		idtype = P_ALL;
663 		id = 0;
664 	} else if (pid < 0) {
665 		idtype = P_PGID;
666 		id = -pid;
667 	} else {
668 		idtype = P_PID;
669 		id = pid;
670 	}
671 
672 	error = dowait6(q, idtype, id,
673 	    SCARG(uap, status) ? &status : NULL, options,
674 	    SCARG(uap, rusage) ? &ru : NULL, NULL, retval);
675 	if (error == 0 && *retval > 0 && SCARG(uap, status)) {
676 		error = copyout(&status, SCARG(uap, status), sizeof(status));
677 	}
678 	if (error == 0 && *retval > 0 && SCARG(uap, rusage)) {
679 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
680 #ifdef KTRACE
681 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
682 			ktrrusage(q, &ru);
683 #endif
684 	}
685 	return (error);
686 }
687 
688 int
689 sys_waitid(struct proc *q, void *v, register_t *retval)
690 {
691 	struct sys_waitid_args /* {
692 		syscallarg(idtype_t) idtype;
693 		syscallarg(id_t) id;
694 		syscallarg(siginfo_t) info;
695 		syscallarg(int) options;
696 	} */ *uap = v;
697 	siginfo_t info;
698 	idtype_t idtype = SCARG(uap, idtype);
699 	int options = SCARG(uap, options);
700 	int error;
701 
702 	if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT))
703 		return (EINVAL);
704 	if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0)
705 		return (EINVAL);
706 	if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID)
707 		return (EINVAL);
708 
709 	error = dowait6(q, idtype, SCARG(uap, id), NULL,
710 	    options, NULL, &info, retval);
711 	if (error == 0) {
712 		error = copyout(&info, SCARG(uap, info), sizeof(info));
713 #ifdef KTRACE
714 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
715 			ktrsiginfo(q, &info);
716 #endif
717 	}
718 	if (error == 0)
719 		*retval = 0;
720 	return (error);
721 }
722 
723 void
724 proc_finish_wait(struct proc *waiter, struct proc *p)
725 {
726 	struct process *pr, *tr;
727 	struct rusage *rup;
728 
729 	/*
730 	 * If we got the child via a ptrace 'attach',
731 	 * we need to give it back to the old parent.
732 	 */
733 	pr = p->p_p;
734 	if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) &&
735 	   (tr = prfind(pr->ps_oppid))) {
736 		pr->ps_oppid = 0;
737 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
738 		process_reparent(pr, tr);
739 		prsignal(tr, SIGCHLD);
740 		wakeup(tr);
741 	} else {
742 		scheduler_wait_hook(waiter, p);
743 		rup = &waiter->p_p->ps_cru;
744 		ruadd(rup, pr->ps_ru);
745 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
746 		freepid(pr->ps_pid);
747 		process_zap(pr);
748 	}
749 }
750 
751 /*
752  * give process back to original parent or init(8)
753  */
754 void
755 process_untrace(struct process *pr)
756 {
757 	struct process *ppr = NULL;
758 
759 	KASSERT(pr->ps_flags & PS_TRACED);
760 
761 	if (pr->ps_oppid != 0 &&
762 	    (pr->ps_oppid != pr->ps_pptr->ps_pid))
763 		ppr = prfind(pr->ps_oppid);
764 
765 	/* not being traced any more */
766 	pr->ps_oppid = 0;
767 	atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
768 	process_reparent(pr, ppr ? ppr : initprocess);
769 }
770 
771 void
772 process_clear_orphan(struct process *pr)
773 {
774 	if (pr->ps_flags & PS_ORPHAN) {
775 		LIST_REMOVE(pr, ps_orphan);
776 		atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN);
777 	}
778 }
779 
780 /*
781  * make process 'parent' the new parent of process 'child'.
782  */
783 void
784 process_reparent(struct process *child, struct process *parent)
785 {
786 
787 	if (child->ps_pptr == parent)
788 		return;
789 
790 	KASSERT(child->ps_oppid == 0 ||
791 		child->ps_oppid == child->ps_pptr->ps_pid);
792 
793 	LIST_REMOVE(child, ps_sibling);
794 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
795 
796 	process_clear_orphan(child);
797 	if (child->ps_flags & PS_TRACED) {
798 		atomic_setbits_int(&child->ps_flags, PS_ORPHAN);
799 		LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan);
800 	}
801 
802 	child->ps_pptr = parent;
803 	child->ps_ppid = parent->ps_pid;
804 }
805 
806 void
807 process_zap(struct process *pr)
808 {
809 	struct vnode *otvp;
810 	struct proc *p = pr->ps_mainproc;
811 
812 	/*
813 	 * Finally finished with old proc entry.
814 	 * Unlink it from its process group and free it.
815 	 */
816 	leavepgrp(pr);
817 	LIST_REMOVE(pr, ps_sibling);
818 	process_clear_orphan(pr);
819 
820 	/*
821 	 * Decrement the count of procs running with this uid.
822 	 */
823 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
824 
825 	/*
826 	 * Release reference to text vnode
827 	 */
828 	otvp = pr->ps_textvp;
829 	pr->ps_textvp = NULL;
830 	if (otvp)
831 		vrele(otvp);
832 
833 	KASSERT(pr->ps_threadcnt == 0);
834 	if (pr->ps_ptstat != NULL)
835 		free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
836 	pool_put(&rusage_pool, pr->ps_ru);
837 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
838 	sigactsfree(pr->ps_sigacts);
839 	lim_free(pr->ps_limit);
840 	crfree(pr->ps_ucred);
841 	pool_put(&process_pool, pr);
842 	nprocesses--;
843 
844 	proc_free(p);
845 }
846