xref: /openbsd-src/sys/kern/kern_exit.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: kern_exit.c,v 1.157 2016/04/25 20:00:33 tedu Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #include <sys/pledge.h>
64 #ifdef SYSVSEM
65 #include <sys/sem.h>
66 #endif
67 
68 #include <sys/mount.h>
69 #include <sys/syscallargs.h>
70 
71 #include <uvm/uvm_extern.h>
72 
73 void	proc_finish_wait(struct proc *, struct proc *);
74 void	process_zap(struct process *);
75 void	proc_free(struct proc *);
76 
77 /*
78  * exit --
79  *	Death of process.
80  */
81 int
82 sys_exit(struct proc *p, void *v, register_t *retval)
83 {
84 	struct sys_exit_args /* {
85 		syscallarg(int) rval;
86 	} */ *uap = v;
87 
88 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
89 	/* NOTREACHED */
90 	return (0);
91 }
92 
93 int
94 sys___threxit(struct proc *p, void *v, register_t *retval)
95 {
96 	struct sys___threxit_args /* {
97 		syscallarg(pid_t *) notdead;
98 	} */ *uap = v;
99 
100 	if (SCARG(uap, notdead) != NULL) {
101 		pid_t zero = 0;
102 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
103 			psignal(p, SIGSEGV);
104 	}
105 	exit1(p, 0, EXIT_THREAD);
106 
107 	return (0);
108 }
109 
110 /*
111  * Exit: deallocate address space and other resources, change proc state
112  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
113  * status and rusage for wait().  Check for child processes and orphan them.
114  */
115 void
116 exit1(struct proc *p, int rv, int flags)
117 {
118 	struct process *pr, *qr, *nqr;
119 	struct rusage *rup;
120 	struct vnode *ovp;
121 
122 	atomic_setbits_int(&p->p_flag, P_WEXIT);
123 
124 	pr = p->p_p;
125 
126 	/* single-threaded? */
127 	if (!P_HASSIBLING(p)) {
128 		flags = EXIT_NORMAL;
129 	} else {
130 		/* nope, multi-threaded */
131 		if (flags == EXIT_NORMAL)
132 			single_thread_set(p, SINGLE_EXIT, 0);
133 		else if (flags == EXIT_THREAD)
134 			single_thread_check(p, 0);
135 	}
136 
137 	if (flags == EXIT_NORMAL) {
138 		if (pr->ps_pid == 1)
139 			panic("init died (signal %d, exit %d)",
140 			    WTERMSIG(rv), WEXITSTATUS(rv));
141 
142 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
143 		pr->ps_mainproc->p_xstat = rv;
144 
145 		/*
146 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
147 		 * is set; we wake up the parent early to avoid deadlock.
148 		 */
149 		if (pr->ps_flags & PS_PPWAIT) {
150 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
151 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
152 			    PS_ISPWAIT);
153 			wakeup(pr->ps_pptr);
154 		}
155 	}
156 
157 	/* unlink ourselves from the active threads */
158 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
159 	if ((p->p_flag & P_THREAD) == 0) {
160 		/* main thread gotta wait because it has the pid, et al */
161 		while (pr->ps_refcnt > 1)
162 			tsleep(&pr->ps_threads, PUSER, "thrdeath", 0);
163 		if (pr->ps_flags & PS_PROFIL)
164 			stopprofclock(pr);
165 	}
166 
167 	rup = pr->ps_ru;
168 	if (rup == NULL) {
169 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
170 		if (pr->ps_ru == NULL) {
171 			pr->ps_ru = rup;
172 		} else {
173 			pool_put(&rusage_pool, rup);
174 			rup = pr->ps_ru;
175 		}
176 	}
177 	p->p_siglist = 0;
178 
179 	if ((p->p_flag & P_THREAD) == 0) {
180 		/* close open files and release open-file table */
181 		fdfree(p);
182 
183 		timeout_del(&pr->ps_realit_to);
184 #ifdef SYSVSEM
185 		semexit(pr);
186 #endif
187 		if (SESS_LEADER(pr)) {
188 			struct session *sp = pr->ps_session;
189 
190 			if (sp->s_ttyvp) {
191 				/*
192 				 * Controlling process.
193 				 * Signal foreground pgrp,
194 				 * drain controlling terminal
195 				 * and revoke access to controlling terminal.
196 				 */
197 				if (sp->s_ttyp->t_session == sp) {
198 					if (sp->s_ttyp->t_pgrp)
199 						pgsignal(sp->s_ttyp->t_pgrp,
200 						    SIGHUP, 1);
201 					ttywait(sp->s_ttyp);
202 					/*
203 					 * The tty could have been revoked
204 					 * if we blocked.
205 					 */
206 					if (sp->s_ttyvp)
207 						VOP_REVOKE(sp->s_ttyvp,
208 						    REVOKEALL);
209 				}
210 				ovp = sp->s_ttyvp;
211 				sp->s_ttyvp = NULL;
212 				if (ovp)
213 					vrele(ovp);
214 				/*
215 				 * s_ttyp is not zero'd; we use this to
216 				 * indicate that the session once had a
217 				 * controlling terminal.  (for logging and
218 				 * informational purposes)
219 				 */
220 			}
221 			sp->s_leader = NULL;
222 		}
223 		fixjobc(pr, pr->ps_pgrp, 0);
224 
225 #ifdef ACCOUNTING
226 		acct_process(p);
227 #endif
228 
229 #ifdef KTRACE
230 		/* release trace file */
231 		if (pr->ps_tracevp)
232 			ktrcleartrace(pr);
233 #endif
234 
235 		/*
236 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
237 		 * going to become a zombie.
238 		 */
239 		if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT)
240 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
241 	}
242 
243 	p->p_fd = NULL;		/* zap the thread's copy */
244 
245 	/*
246 	 * If emulation has thread exit hook, call it now.
247 	 */
248 	if (pr->ps_emul->e_proc_exit)
249 		(*pr->ps_emul->e_proc_exit)(p);
250 
251         /*
252 	 * Remove proc from pidhash chain and allproc so looking
253 	 * it up won't work.  We will put the proc on the
254 	 * deadproc list later (using the p_hash member), and
255 	 * wake up the reaper when we do.  If this is the last
256 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
257 	 * the process on the zombprocess list below.
258 	 */
259 	/*
260 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
261 	 */
262 	p->p_stat = SDEAD;
263 
264 	LIST_REMOVE(p, p_hash);
265 	LIST_REMOVE(p, p_list);
266 
267 	if ((p->p_flag & P_THREAD) == 0) {
268 		LIST_REMOVE(pr, ps_list);
269 
270 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
271 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
272 		else {
273 			/*
274 			 * Not going to be a zombie, so it's now off all
275 			 * the lists scanned by ispidtaken(), so block
276 			 * fast reuse of the pid now.
277 			 */
278 			freepid(p->p_pid);
279 		}
280 
281 		/*
282 		 * Give orphaned children to init(8).
283 		 */
284 		qr = LIST_FIRST(&pr->ps_children);
285 		if (qr)		/* only need this if any child is S_ZOMB */
286 			wakeup(initprocess);
287 		for (; qr != 0; qr = nqr) {
288 			nqr = LIST_NEXT(qr, ps_sibling);
289 			proc_reparent(qr, initprocess);
290 			/*
291 			 * Traced processes are killed since their
292 			 * existence means someone is screwing up.
293 			 */
294 			if (qr->ps_flags & PS_TRACED &&
295 			    !(qr->ps_flags & PS_EXITING)) {
296 				atomic_clearbits_int(&qr->ps_flags, PS_TRACED);
297 				/*
298 				 * If single threading is active,
299 				 * direct the signal to the active
300 				 * thread to avoid deadlock.
301 				 */
302 				if (qr->ps_single)
303 					ptsignal(qr->ps_single, SIGKILL,
304 					    STHREAD);
305 				else
306 					prsignal(qr, SIGKILL);
307 			}
308 		}
309 	}
310 
311 	/* add thread's accumulated rusage into the process's total */
312 	ruadd(rup, &p->p_ru);
313 	tuagg(pr, p);
314 
315 	/*
316 	 * clear %cpu usage during swap
317 	 */
318 	p->p_pctcpu = 0;
319 
320 	if ((p->p_flag & P_THREAD) == 0) {
321 		/*
322 		 * Final thread has died, so add on our children's rusage
323 		 * and calculate the total times
324 		 */
325 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
326 		ruadd(rup, &pr->ps_cru);
327 
328 		/* notify interested parties of our demise and clean up */
329 		knote_processexit(p);
330 
331 		/*
332 		 * Notify parent that we're gone.  If we're not going to
333 		 * become a zombie, reparent to process 1 (init) so that
334 		 * we can wake our original parent to possibly unblock
335 		 * wait4() to return ECHILD.
336 		 */
337 		if (pr->ps_flags & PS_NOZOMBIE) {
338 			struct process *ppr = pr->ps_pptr;
339 			proc_reparent(pr, initprocess);
340 			wakeup(ppr);
341 		}
342 
343 		/*
344 		 * Release the process's signal state.
345 		 */
346 		sigactsfree(pr);
347 	}
348 
349 	/* just a thread? detach it from its process */
350 	if (p->p_flag & P_THREAD) {
351 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
352 		if (--pr->ps_refcnt == 1)
353 			wakeup(&pr->ps_threads);
354 		KASSERT(pr->ps_refcnt > 0);
355 	}
356 
357 	/*
358 	 * Other substructures are freed from reaper and wait().
359 	 */
360 
361 	/*
362 	 * Finally, call machine-dependent code to switch to a new
363 	 * context (possibly the idle context).  Once we are no longer
364 	 * using the dead process's vmspace and stack, exit2() will be
365 	 * called to schedule those resources to be released by the
366 	 * reaper thread.
367 	 *
368 	 * Note that cpu_exit() will end with a call equivalent to
369 	 * cpu_switch(), finishing our execution (pun intended).
370 	 */
371 	uvmexp.swtch++;
372 	cpu_exit(p);
373 	panic("cpu_exit returned");
374 }
375 
376 /*
377  * Locking of this proclist is special; it's accessed in a
378  * critical section of process exit, and thus locking it can't
379  * modify interrupt state.  We use a simple spin lock for this
380  * proclist.  We use the p_hash member to linkup to deadproc.
381  */
382 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
383 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
384 
385 /*
386  * We are called from cpu_exit() once it is safe to schedule the
387  * dead process's resources to be freed.
388  *
389  * NOTE: One must be careful with locking in this routine.  It's
390  * called from a critical section in machine-dependent code, so
391  * we should refrain from changing any interrupt state.
392  *
393  * We lock the deadproc list, place the proc on that list (using
394  * the p_hash member), and wake up the reaper.
395  */
396 void
397 exit2(struct proc *p)
398 {
399 	mtx_enter(&deadproc_mutex);
400 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
401 	mtx_leave(&deadproc_mutex);
402 
403 	wakeup(&deadproc);
404 }
405 
406 void
407 proc_free(struct proc *p)
408 {
409 	crfree(p->p_ucred);
410 	pool_put(&proc_pool, p);
411 	nthreads--;
412 }
413 
414 /*
415  * Process reaper.  This is run by a kernel thread to free the resources
416  * of a dead process.  Once the resources are free, the process becomes
417  * a zombie, and the parent is allowed to read the undead's status.
418  */
419 void
420 reaper(void)
421 {
422 	struct proc *p;
423 
424 	KERNEL_UNLOCK();
425 
426 	SCHED_ASSERT_UNLOCKED();
427 
428 	for (;;) {
429 		mtx_enter(&deadproc_mutex);
430 		while ((p = LIST_FIRST(&deadproc)) == NULL)
431 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
432 
433 		/* Remove us from the deadproc list. */
434 		LIST_REMOVE(p, p_hash);
435 		mtx_leave(&deadproc_mutex);
436 
437 		KERNEL_LOCK();
438 
439 		/*
440 		 * Free the VM resources we're still holding on to.
441 		 * We must do this from a valid thread because doing
442 		 * so may block.
443 		 */
444 		uvm_uarea_free(p);
445 		p->p_vmspace = NULL;		/* zap the thread's copy */
446 
447 		if (p->p_flag & P_THREAD) {
448 			/* Just a thread */
449 			proc_free(p);
450 		} else {
451 			struct process *pr = p->p_p;
452 
453 			/* Release the rest of the process's vmspace */
454 			uvm_exit(pr);
455 
456 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
457 				/* Process is now a true zombie. */
458 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
459 				prsignal(pr->ps_pptr, SIGCHLD);
460 
461 				/* Wake up the parent so it can get exit status. */
462 				wakeup(pr->ps_pptr);
463 			} else {
464 				/* No one will wait for us. Just zap the process now */
465 				process_zap(pr);
466 			}
467 		}
468 
469 		KERNEL_UNLOCK();
470 	}
471 }
472 
473 int
474 sys_wait4(struct proc *q, void *v, register_t *retval)
475 {
476 	struct sys_wait4_args /* {
477 		syscallarg(pid_t) pid;
478 		syscallarg(int *) status;
479 		syscallarg(int) options;
480 		syscallarg(struct rusage *) rusage;
481 	} */ *uap = v;
482 	struct rusage ru;
483 	int status, error;
484 
485 	error = dowait4(q, SCARG(uap, pid),
486 	    SCARG(uap, status) ? &status : NULL,
487 	    SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval);
488 	if (error == 0 && retval[0] > 0 && SCARG(uap, status)) {
489 		error = copyout(&status, SCARG(uap, status), sizeof(status));
490 	}
491 	if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) {
492 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
493 #ifdef KTRACE
494 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
495 			ktrrusage(q, &ru);
496 #endif
497 	}
498 	return (error);
499 }
500 
501 int
502 dowait4(struct proc *q, pid_t pid, int *statusp, int options,
503     struct rusage *rusage, register_t *retval)
504 {
505 	int nfound;
506 	struct process *pr;
507 	struct proc *p;
508 	int error;
509 
510 	if (pid == 0)
511 		pid = -q->p_p->ps_pgid;
512 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED))
513 		return (EINVAL);
514 
515 loop:
516 	nfound = 0;
517 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
518 		p = pr->ps_mainproc;
519 		if ((pr->ps_flags & PS_NOZOMBIE) ||
520 		    (pid != WAIT_ANY &&
521 		    p->p_pid != pid &&
522 		    pr->ps_pgid != -pid))
523 			continue;
524 
525 		nfound++;
526 		if (pr->ps_flags & PS_ZOMBIE) {
527 			retval[0] = p->p_pid;
528 
529 			if (statusp != NULL)
530 				*statusp = p->p_xstat;	/* convert to int */
531 			if (rusage != NULL)
532 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
533 			proc_finish_wait(q, p);
534 			return (0);
535 		}
536 		if (pr->ps_flags & PS_TRACED &&
537 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
538 		    pr->ps_single->p_stat == SSTOP &&
539 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
540 			single_thread_wait(pr);
541 
542 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
543 			retval[0] = p->p_pid;
544 
545 			if (statusp != NULL)
546 				*statusp = W_STOPCODE(pr->ps_single->p_xstat);
547 			if (rusage != NULL)
548 				memset(rusage, 0, sizeof(*rusage));
549 			return (0);
550 		}
551 		if (p->p_stat == SSTOP &&
552 		    (pr->ps_flags & PS_WAITED) == 0 &&
553 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
554 		    (pr->ps_flags & PS_TRACED ||
555 		    options & WUNTRACED)) {
556 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
557 			retval[0] = p->p_pid;
558 
559 			if (statusp != NULL)
560 				*statusp = W_STOPCODE(p->p_xstat);
561 			if (rusage != NULL)
562 				memset(rusage, 0, sizeof(*rusage));
563 			return (0);
564 		}
565 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
566 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
567 			retval[0] = p->p_pid;
568 
569 			if (statusp != NULL)
570 				*statusp = _WCONTINUED;
571 			if (rusage != NULL)
572 				memset(rusage, 0, sizeof(*rusage));
573 			return (0);
574 		}
575 	}
576 	if (nfound == 0)
577 		return (ECHILD);
578 	if (options & WNOHANG) {
579 		retval[0] = 0;
580 		return (0);
581 	}
582 	if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0)
583 		return (error);
584 	goto loop;
585 }
586 
587 void
588 proc_finish_wait(struct proc *waiter, struct proc *p)
589 {
590 	struct process *pr, *tr;
591 	struct rusage *rup;
592 
593 	/*
594 	 * If we got the child via a ptrace 'attach',
595 	 * we need to give it back to the old parent.
596 	 */
597 	pr = p->p_p;
598 	if (pr->ps_oppid && (tr = prfind(pr->ps_oppid))) {
599 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
600 		pr->ps_oppid = 0;
601 		proc_reparent(pr, tr);
602 		prsignal(tr, SIGCHLD);
603 		wakeup(tr);
604 	} else {
605 		scheduler_wait_hook(waiter, p);
606 		p->p_xstat = 0;
607 		rup = &waiter->p_p->ps_cru;
608 		ruadd(rup, pr->ps_ru);
609 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
610 		freepid(p->p_pid);
611 		process_zap(pr);
612 	}
613 }
614 
615 /*
616  * make process 'parent' the new parent of process 'child'.
617  */
618 void
619 proc_reparent(struct process *child, struct process *parent)
620 {
621 
622 	if (child->ps_pptr == parent)
623 		return;
624 
625 	LIST_REMOVE(child, ps_sibling);
626 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
627 	child->ps_pptr = parent;
628 }
629 
630 void
631 process_zap(struct process *pr)
632 {
633 	struct vnode *otvp;
634 	struct proc *p = pr->ps_mainproc;
635 
636 	/*
637 	 * Finally finished with old proc entry.
638 	 * Unlink it from its process group and free it.
639 	 */
640 	leavepgrp(pr);
641 	LIST_REMOVE(pr, ps_sibling);
642 
643 	/*
644 	 * Decrement the count of procs running with this uid.
645 	 */
646 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
647 
648 	pledge_dropwpaths(pr);
649 
650 	/*
651 	 * Release reference to text vnode
652 	 */
653 	otvp = pr->ps_textvp;
654 	pr->ps_textvp = NULL;
655 	if (otvp)
656 		vrele(otvp);
657 
658 	KASSERT(pr->ps_refcnt == 1);
659 	if (pr->ps_ptstat != NULL)
660 		free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
661 	pool_put(&rusage_pool, pr->ps_ru);
662 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
663 	limfree(pr->ps_limit);
664 	crfree(pr->ps_ucred);
665 	pool_put(&process_pool, pr);
666 	nprocesses--;
667 
668 	proc_free(p);
669 }
670