xref: /openbsd-src/sys/kern/kern_exit.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: kern_exit.c,v 1.161 2017/08/29 02:51:27 deraadt Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #include <sys/pledge.h>
64 #ifdef SYSVSEM
65 #include <sys/sem.h>
66 #endif
67 #include <sys/witness.h>
68 
69 #include <sys/mount.h>
70 #include <sys/syscallargs.h>
71 
72 #include <uvm/uvm_extern.h>
73 
74 void	proc_finish_wait(struct proc *, struct proc *);
75 void	process_zap(struct process *);
76 void	proc_free(struct proc *);
77 
78 /*
79  * exit --
80  *	Death of process.
81  */
82 int
83 sys_exit(struct proc *p, void *v, register_t *retval)
84 {
85 	struct sys_exit_args /* {
86 		syscallarg(int) rval;
87 	} */ *uap = v;
88 
89 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
90 	/* NOTREACHED */
91 	return (0);
92 }
93 
94 int
95 sys___threxit(struct proc *p, void *v, register_t *retval)
96 {
97 	struct sys___threxit_args /* {
98 		syscallarg(pid_t *) notdead;
99 	} */ *uap = v;
100 
101 	if (SCARG(uap, notdead) != NULL) {
102 		pid_t zero = 0;
103 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
104 			psignal(p, SIGSEGV);
105 	}
106 	exit1(p, 0, EXIT_THREAD);
107 
108 	return (0);
109 }
110 
111 /*
112  * Exit: deallocate address space and other resources, change proc state
113  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
114  * status and rusage for wait().  Check for child processes and orphan them.
115  */
116 void
117 exit1(struct proc *p, int rv, int flags)
118 {
119 	struct process *pr, *qr, *nqr;
120 	struct rusage *rup;
121 	struct vnode *ovp;
122 
123 	atomic_setbits_int(&p->p_flag, P_WEXIT);
124 
125 	pr = p->p_p;
126 
127 	/* single-threaded? */
128 	if (!P_HASSIBLING(p)) {
129 		flags = EXIT_NORMAL;
130 	} else {
131 		/* nope, multi-threaded */
132 		if (flags == EXIT_NORMAL)
133 			single_thread_set(p, SINGLE_EXIT, 0);
134 		else if (flags == EXIT_THREAD)
135 			single_thread_check(p, 0);
136 	}
137 
138 	if (flags == EXIT_NORMAL) {
139 		if (pr->ps_pid == 1)
140 			panic("init died (signal %d, exit %d)",
141 			    WTERMSIG(rv), WEXITSTATUS(rv));
142 
143 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
144 		pr->ps_mainproc->p_xstat = rv;
145 
146 		/*
147 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
148 		 * is set; we wake up the parent early to avoid deadlock.
149 		 */
150 		if (pr->ps_flags & PS_PPWAIT) {
151 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
152 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
153 			    PS_ISPWAIT);
154 			wakeup(pr->ps_pptr);
155 		}
156 	}
157 
158 	/* unlink ourselves from the active threads */
159 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
160 	if ((p->p_flag & P_THREAD) == 0) {
161 		/* main thread gotta wait because it has the pid, et al */
162 		while (pr->ps_refcnt > 1)
163 			tsleep(&pr->ps_threads, PUSER, "thrdeath", 0);
164 		if (pr->ps_flags & PS_PROFIL)
165 			stopprofclock(pr);
166 	}
167 
168 	rup = pr->ps_ru;
169 	if (rup == NULL) {
170 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
171 		if (pr->ps_ru == NULL) {
172 			pr->ps_ru = rup;
173 		} else {
174 			pool_put(&rusage_pool, rup);
175 			rup = pr->ps_ru;
176 		}
177 	}
178 	p->p_siglist = 0;
179 
180 	if ((p->p_flag & P_THREAD) == 0) {
181 		/* close open files and release open-file table */
182 		fdfree(p);
183 
184 		timeout_del(&pr->ps_realit_to);
185 #ifdef SYSVSEM
186 		semexit(pr);
187 #endif
188 		if (SESS_LEADER(pr)) {
189 			struct session *sp = pr->ps_session;
190 
191 			if (sp->s_ttyvp) {
192 				/*
193 				 * Controlling process.
194 				 * Signal foreground pgrp,
195 				 * drain controlling terminal
196 				 * and revoke access to controlling terminal.
197 				 */
198 				if (sp->s_ttyp->t_session == sp) {
199 					if (sp->s_ttyp->t_pgrp)
200 						pgsignal(sp->s_ttyp->t_pgrp,
201 						    SIGHUP, 1);
202 					ttywait(sp->s_ttyp);
203 					/*
204 					 * The tty could have been revoked
205 					 * if we blocked.
206 					 */
207 					if (sp->s_ttyvp)
208 						VOP_REVOKE(sp->s_ttyvp,
209 						    REVOKEALL);
210 				}
211 				ovp = sp->s_ttyvp;
212 				sp->s_ttyvp = NULL;
213 				if (ovp)
214 					vrele(ovp);
215 				/*
216 				 * s_ttyp is not zero'd; we use this to
217 				 * indicate that the session once had a
218 				 * controlling terminal.  (for logging and
219 				 * informational purposes)
220 				 */
221 			}
222 			sp->s_leader = NULL;
223 		}
224 		fixjobc(pr, pr->ps_pgrp, 0);
225 
226 #ifdef ACCOUNTING
227 		acct_process(p);
228 #endif
229 
230 #ifdef KTRACE
231 		/* release trace file */
232 		if (pr->ps_tracevp)
233 			ktrcleartrace(pr);
234 #endif
235 
236 		/*
237 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
238 		 * going to become a zombie.
239 		 */
240 		if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT)
241 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
242 	}
243 
244 	p->p_fd = NULL;		/* zap the thread's copy */
245 
246         /*
247 	 * Remove proc from pidhash chain and allproc so looking
248 	 * it up won't work.  We will put the proc on the
249 	 * deadproc list later (using the p_hash member), and
250 	 * wake up the reaper when we do.  If this is the last
251 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
252 	 * the process on the zombprocess list below.
253 	 */
254 	/*
255 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
256 	 */
257 	p->p_stat = SDEAD;
258 
259 	LIST_REMOVE(p, p_hash);
260 	LIST_REMOVE(p, p_list);
261 
262 	if ((p->p_flag & P_THREAD) == 0) {
263 		LIST_REMOVE(pr, ps_hash);
264 		LIST_REMOVE(pr, ps_list);
265 
266 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
267 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
268 		else {
269 			/*
270 			 * Not going to be a zombie, so it's now off all
271 			 * the lists scanned by ispidtaken(), so block
272 			 * fast reuse of the pid now.
273 			 */
274 			freepid(pr->ps_pid);
275 		}
276 
277 		/*
278 		 * Give orphaned children to init(8).
279 		 */
280 		qr = LIST_FIRST(&pr->ps_children);
281 		if (qr)		/* only need this if any child is S_ZOMB */
282 			wakeup(initprocess);
283 		for (; qr != 0; qr = nqr) {
284 			nqr = LIST_NEXT(qr, ps_sibling);
285 			proc_reparent(qr, initprocess);
286 			/*
287 			 * Traced processes are killed since their
288 			 * existence means someone is screwing up.
289 			 */
290 			if (qr->ps_flags & PS_TRACED &&
291 			    !(qr->ps_flags & PS_EXITING)) {
292 				atomic_clearbits_int(&qr->ps_flags, PS_TRACED);
293 				/*
294 				 * If single threading is active,
295 				 * direct the signal to the active
296 				 * thread to avoid deadlock.
297 				 */
298 				if (qr->ps_single)
299 					ptsignal(qr->ps_single, SIGKILL,
300 					    STHREAD);
301 				else
302 					prsignal(qr, SIGKILL);
303 			}
304 		}
305 	}
306 
307 	/* add thread's accumulated rusage into the process's total */
308 	ruadd(rup, &p->p_ru);
309 	tuagg(pr, p);
310 
311 	/*
312 	 * clear %cpu usage during swap
313 	 */
314 	p->p_pctcpu = 0;
315 
316 	if ((p->p_flag & P_THREAD) == 0) {
317 		/*
318 		 * Final thread has died, so add on our children's rusage
319 		 * and calculate the total times
320 		 */
321 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
322 		ruadd(rup, &pr->ps_cru);
323 
324 		/* notify interested parties of our demise and clean up */
325 		knote_processexit(p);
326 
327 		/*
328 		 * Notify parent that we're gone.  If we're not going to
329 		 * become a zombie, reparent to process 1 (init) so that
330 		 * we can wake our original parent to possibly unblock
331 		 * wait4() to return ECHILD.
332 		 */
333 		if (pr->ps_flags & PS_NOZOMBIE) {
334 			struct process *ppr = pr->ps_pptr;
335 			proc_reparent(pr, initprocess);
336 			wakeup(ppr);
337 		}
338 
339 		/*
340 		 * Release the process's signal state.
341 		 */
342 		sigactsfree(pr);
343 	}
344 
345 	/* just a thread? detach it from its process */
346 	if (p->p_flag & P_THREAD) {
347 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
348 		if (--pr->ps_refcnt == 1)
349 			wakeup(&pr->ps_threads);
350 		KASSERT(pr->ps_refcnt > 0);
351 	}
352 
353 	/*
354 	 * Other substructures are freed from reaper and wait().
355 	 */
356 
357 	/*
358 	 * Finally, call machine-dependent code to switch to a new
359 	 * context (possibly the idle context).  Once we are no longer
360 	 * using the dead process's vmspace and stack, exit2() will be
361 	 * called to schedule those resources to be released by the
362 	 * reaper thread.
363 	 *
364 	 * Note that cpu_exit() will end with a call equivalent to
365 	 * cpu_switch(), finishing our execution (pun intended).
366 	 */
367 	uvmexp.swtch++;
368 	cpu_exit(p);
369 	panic("cpu_exit returned");
370 }
371 
372 /*
373  * Locking of this proclist is special; it's accessed in a
374  * critical section of process exit, and thus locking it can't
375  * modify interrupt state.  We use a simple spin lock for this
376  * proclist.  We use the p_hash member to linkup to deadproc.
377  */
378 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
379 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
380 
381 /*
382  * We are called from cpu_exit() once it is safe to schedule the
383  * dead process's resources to be freed.
384  *
385  * NOTE: One must be careful with locking in this routine.  It's
386  * called from a critical section in machine-dependent code, so
387  * we should refrain from changing any interrupt state.
388  *
389  * We lock the deadproc list, place the proc on that list (using
390  * the p_hash member), and wake up the reaper.
391  */
392 void
393 exit2(struct proc *p)
394 {
395 	mtx_enter(&deadproc_mutex);
396 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
397 	mtx_leave(&deadproc_mutex);
398 
399 	wakeup(&deadproc);
400 }
401 
402 void
403 proc_free(struct proc *p)
404 {
405 	crfree(p->p_ucred);
406 	pool_put(&proc_pool, p);
407 	nthreads--;
408 }
409 
410 /*
411  * Process reaper.  This is run by a kernel thread to free the resources
412  * of a dead process.  Once the resources are free, the process becomes
413  * a zombie, and the parent is allowed to read the undead's status.
414  */
415 void
416 reaper(void)
417 {
418 	struct proc *p;
419 
420 	KERNEL_UNLOCK();
421 
422 	SCHED_ASSERT_UNLOCKED();
423 
424 	for (;;) {
425 		mtx_enter(&deadproc_mutex);
426 		while ((p = LIST_FIRST(&deadproc)) == NULL)
427 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
428 
429 		/* Remove us from the deadproc list. */
430 		LIST_REMOVE(p, p_hash);
431 		mtx_leave(&deadproc_mutex);
432 
433 		WITNESS_THREAD_EXIT(p);
434 
435 		KERNEL_LOCK();
436 
437 		/*
438 		 * Free the VM resources we're still holding on to.
439 		 * We must do this from a valid thread because doing
440 		 * so may block.
441 		 */
442 		uvm_uarea_free(p);
443 		p->p_vmspace = NULL;		/* zap the thread's copy */
444 
445 		if (p->p_flag & P_THREAD) {
446 			/* Just a thread */
447 			proc_free(p);
448 		} else {
449 			struct process *pr = p->p_p;
450 
451 			/* Release the rest of the process's vmspace */
452 			uvm_exit(pr);
453 
454 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
455 				/* Process is now a true zombie. */
456 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
457 				prsignal(pr->ps_pptr, SIGCHLD);
458 
459 				/* Wake up the parent so it can get exit status. */
460 				wakeup(pr->ps_pptr);
461 			} else {
462 				/* No one will wait for us. Just zap the process now */
463 				process_zap(pr);
464 			}
465 		}
466 
467 		KERNEL_UNLOCK();
468 	}
469 }
470 
471 int
472 sys_wait4(struct proc *q, void *v, register_t *retval)
473 {
474 	struct sys_wait4_args /* {
475 		syscallarg(pid_t) pid;
476 		syscallarg(int *) status;
477 		syscallarg(int) options;
478 		syscallarg(struct rusage *) rusage;
479 	} */ *uap = v;
480 	struct rusage ru;
481 	int status, error;
482 
483 	error = dowait4(q, SCARG(uap, pid),
484 	    SCARG(uap, status) ? &status : NULL,
485 	    SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval);
486 	if (error == 0 && retval[0] > 0 && SCARG(uap, status)) {
487 		error = copyout(&status, SCARG(uap, status), sizeof(status));
488 	}
489 	if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) {
490 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
491 #ifdef KTRACE
492 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
493 			ktrrusage(q, &ru);
494 #endif
495 	}
496 	return (error);
497 }
498 
499 int
500 dowait4(struct proc *q, pid_t pid, int *statusp, int options,
501     struct rusage *rusage, register_t *retval)
502 {
503 	int nfound;
504 	struct process *pr;
505 	struct proc *p;
506 	int error;
507 
508 	if (pid == 0)
509 		pid = -q->p_p->ps_pgid;
510 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED))
511 		return (EINVAL);
512 
513 loop:
514 	nfound = 0;
515 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
516 		if ((pr->ps_flags & PS_NOZOMBIE) ||
517 		    (pid != WAIT_ANY &&
518 		    pr->ps_pid != pid &&
519 		    pr->ps_pgid != -pid))
520 			continue;
521 
522 		p = pr->ps_mainproc;
523 
524 		nfound++;
525 		if (pr->ps_flags & PS_ZOMBIE) {
526 			retval[0] = pr->ps_pid;
527 
528 			if (statusp != NULL)
529 				*statusp = p->p_xstat;	/* convert to int */
530 			if (rusage != NULL)
531 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
532 			proc_finish_wait(q, p);
533 			return (0);
534 		}
535 		if (pr->ps_flags & PS_TRACED &&
536 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
537 		    pr->ps_single->p_stat == SSTOP &&
538 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
539 			single_thread_wait(pr);
540 
541 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
542 			retval[0] = pr->ps_pid;
543 
544 			if (statusp != NULL)
545 				*statusp = W_STOPCODE(pr->ps_single->p_xstat);
546 			if (rusage != NULL)
547 				memset(rusage, 0, sizeof(*rusage));
548 			return (0);
549 		}
550 		if (p->p_stat == SSTOP &&
551 		    (pr->ps_flags & PS_WAITED) == 0 &&
552 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
553 		    (pr->ps_flags & PS_TRACED ||
554 		    options & WUNTRACED)) {
555 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
556 			retval[0] = pr->ps_pid;
557 
558 			if (statusp != NULL)
559 				*statusp = W_STOPCODE(p->p_xstat);
560 			if (rusage != NULL)
561 				memset(rusage, 0, sizeof(*rusage));
562 			return (0);
563 		}
564 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
565 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
566 			retval[0] = pr->ps_pid;
567 
568 			if (statusp != NULL)
569 				*statusp = _WCONTINUED;
570 			if (rusage != NULL)
571 				memset(rusage, 0, sizeof(*rusage));
572 			return (0);
573 		}
574 	}
575 	if (nfound == 0)
576 		return (ECHILD);
577 	if (options & WNOHANG) {
578 		retval[0] = 0;
579 		return (0);
580 	}
581 	if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0)
582 		return (error);
583 	goto loop;
584 }
585 
586 void
587 proc_finish_wait(struct proc *waiter, struct proc *p)
588 {
589 	struct process *pr, *tr;
590 	struct rusage *rup;
591 
592 	/*
593 	 * If we got the child via a ptrace 'attach',
594 	 * we need to give it back to the old parent.
595 	 */
596 	pr = p->p_p;
597 	if (pr->ps_oppid && (tr = prfind(pr->ps_oppid))) {
598 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
599 		pr->ps_oppid = 0;
600 		proc_reparent(pr, tr);
601 		prsignal(tr, SIGCHLD);
602 		wakeup(tr);
603 	} else {
604 		scheduler_wait_hook(waiter, p);
605 		p->p_xstat = 0;
606 		rup = &waiter->p_p->ps_cru;
607 		ruadd(rup, pr->ps_ru);
608 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
609 		freepid(pr->ps_pid);
610 		process_zap(pr);
611 	}
612 }
613 
614 /*
615  * make process 'parent' the new parent of process 'child'.
616  */
617 void
618 proc_reparent(struct process *child, struct process *parent)
619 {
620 
621 	if (child->ps_pptr == parent)
622 		return;
623 
624 	LIST_REMOVE(child, ps_sibling);
625 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
626 	child->ps_pptr = parent;
627 }
628 
629 void
630 process_zap(struct process *pr)
631 {
632 	struct vnode *otvp;
633 	struct proc *p = pr->ps_mainproc;
634 
635 	/*
636 	 * Finally finished with old proc entry.
637 	 * Unlink it from its process group and free it.
638 	 */
639 	leavepgrp(pr);
640 	LIST_REMOVE(pr, ps_sibling);
641 
642 	/*
643 	 * Decrement the count of procs running with this uid.
644 	 */
645 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
646 
647 	/*
648 	 * Release reference to text vnode
649 	 */
650 	otvp = pr->ps_textvp;
651 	pr->ps_textvp = NULL;
652 	if (otvp)
653 		vrele(otvp);
654 
655 	KASSERT(pr->ps_refcnt == 1);
656 	if (pr->ps_ptstat != NULL)
657 		free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
658 	pool_put(&rusage_pool, pr->ps_ru);
659 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
660 	limfree(pr->ps_limit);
661 	crfree(pr->ps_ucred);
662 	pool_put(&process_pool, pr);
663 	nprocesses--;
664 
665 	proc_free(p);
666 }
667