xref: /openbsd-src/sys/kern/kern_exit.c (revision 9e8577e7fffeab9111fc01d9e1c7ca42905be86a)
1 /*	$OpenBSD: kern_exit.c,v 1.147 2014/07/12 18:43:32 tedu Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/buf.h>
50 #include <sys/wait.h>
51 #include <sys/file.h>
52 #include <sys/vnode.h>
53 #include <sys/syslog.h>
54 #include <sys/malloc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/ptrace.h>
57 #include <sys/acct.h>
58 #include <sys/filedesc.h>
59 #include <sys/signalvar.h>
60 #include <sys/sched.h>
61 #include <sys/ktrace.h>
62 #include <sys/pool.h>
63 #include <sys/mutex.h>
64 #ifdef SYSVSEM
65 #include <sys/sem.h>
66 #endif
67 
68 #include "systrace.h"
69 #include <dev/systrace.h>
70 
71 #include <sys/mount.h>
72 #include <sys/syscallargs.h>
73 
74 /*
75  * exit --
76  *	Death of process.
77  */
78 int
79 sys_exit(struct proc *p, void *v, register_t *retval)
80 {
81 	struct sys_exit_args /* {
82 		syscallarg(int) rval;
83 	} */ *uap = v;
84 
85 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
86 	/* NOTREACHED */
87 	return (0);
88 }
89 
90 int
91 sys___threxit(struct proc *p, void *v, register_t *retval)
92 {
93 	struct sys___threxit_args /* {
94 		syscallarg(pid_t *) notdead;
95 	} */ *uap = v;
96 
97 	if (SCARG(uap, notdead) != NULL) {
98 		pid_t zero = 0;
99 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
100 			psignal(p, SIGSEGV);
101 	}
102 	exit1(p, 0, EXIT_THREAD);
103 
104 	return (0);
105 }
106 
107 /*
108  * Exit: deallocate address space and other resources, change proc state
109  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
110  * status and rusage for wait().  Check for child processes and orphan them.
111  */
112 void
113 exit1(struct proc *p, int rv, int flags)
114 {
115 	struct process *pr, *qr, *nqr;
116 	struct rusage *rup;
117 	struct vnode *ovp;
118 
119 	atomic_setbits_int(&p->p_flag, P_WEXIT);
120 
121 	pr = p->p_p;
122 
123 	/* single-threaded? */
124 	if (TAILQ_FIRST(&pr->ps_threads) == p &&
125 	    TAILQ_NEXT(p, p_thr_link) == NULL) {
126 		flags = EXIT_NORMAL;
127 	} else {
128 		/* nope, multi-threaded */
129 		if (flags == EXIT_NORMAL)
130 			single_thread_set(p, SINGLE_EXIT, 0);
131 		else if (flags == EXIT_THREAD)
132 			single_thread_check(p, 0);
133 	}
134 
135 	if (flags == EXIT_NORMAL) {
136 		if (pr->ps_pid == 1)
137 			panic("init died (signal %d, exit %d)",
138 			    WTERMSIG(rv), WEXITSTATUS(rv));
139 
140 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
141 		pr->ps_mainproc->p_xstat = rv;
142 
143 		/*
144 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
145 		 * is set; we wake up the parent early to avoid deadlock.
146 		 */
147 		if (pr->ps_flags & PS_PPWAIT) {
148 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
149 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
150 			    PS_ISPWAIT);
151 			wakeup(pr->ps_pptr);
152 		}
153 	}
154 
155 	/* unlink ourselves from the active threads */
156 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
157 	if ((p->p_flag & P_THREAD) == 0) {
158 		/* main thread gotta wait because it has the pid, et al */
159 		while (pr->ps_refcnt > 1)
160 			tsleep(&pr->ps_threads, PUSER, "thrdeath", 0);
161 		if (pr->ps_flags & PS_PROFIL)
162 			stopprofclock(pr);
163 	}
164 
165 	rup = pr->ps_ru;
166 	if (rup == NULL) {
167 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
168 		if (pr->ps_ru == NULL) {
169 			pr->ps_ru = rup;
170 		} else {
171 			pool_put(&rusage_pool, rup);
172 			rup = pr->ps_ru;
173 		}
174 	}
175 	p->p_siglist = 0;
176 
177 	if ((p->p_flag & P_THREAD) == 0) {
178 		/* close open files and release open-file table */
179 		fdfree(p);
180 
181 		timeout_del(&pr->ps_realit_to);
182 #ifdef SYSVSEM
183 		semexit(pr);
184 #endif
185 		if (SESS_LEADER(pr)) {
186 			struct session *sp = pr->ps_session;
187 
188 			if (sp->s_ttyvp) {
189 				/*
190 				 * Controlling process.
191 				 * Signal foreground pgrp,
192 				 * drain controlling terminal
193 				 * and revoke access to controlling terminal.
194 				 */
195 				if (sp->s_ttyp->t_session == sp) {
196 					if (sp->s_ttyp->t_pgrp)
197 						pgsignal(sp->s_ttyp->t_pgrp,
198 						    SIGHUP, 1);
199 					ttywait(sp->s_ttyp);
200 					/*
201 					 * The tty could have been revoked
202 					 * if we blocked.
203 					 */
204 					if (sp->s_ttyvp)
205 						VOP_REVOKE(sp->s_ttyvp,
206 						    REVOKEALL);
207 				}
208 				ovp = sp->s_ttyvp;
209 				sp->s_ttyvp = NULL;
210 				if (ovp)
211 					vrele(ovp);
212 				/*
213 				 * s_ttyp is not zero'd; we use this to
214 				 * indicate that the session once had a
215 				 * controlling terminal.  (for logging and
216 				 * informational purposes)
217 				 */
218 			}
219 			sp->s_leader = NULL;
220 		}
221 		fixjobc(pr, pr->ps_pgrp, 0);
222 
223 #ifdef ACCOUNTING
224 		acct_process(p);
225 #endif
226 
227 #ifdef KTRACE
228 		/* release trace file */
229 		if (pr->ps_tracevp)
230 			ktrcleartrace(pr);
231 #endif
232 
233 		/*
234 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
235 		 * going to become a zombie.
236 		 */
237 		if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT)
238 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
239 	}
240 
241 	p->p_fd = NULL;		/* zap the thread's copy */
242 
243 #if NSYSTRACE > 0
244 	if (ISSET(p->p_flag, P_SYSTRACE))
245 		systrace_exit(p);
246 #endif
247 
248 	/*
249 	 * If emulation has thread exit hook, call it now.
250 	 */
251 	if (pr->ps_emul->e_proc_exit)
252 		(*pr->ps_emul->e_proc_exit)(p);
253 
254         /*
255 	 * Remove proc from pidhash chain and allproc so looking
256 	 * it up won't work.  We will put the proc on the
257 	 * deadproc list later (using the p_hash member), and
258 	 * wake up the reaper when we do.  If this is the last
259 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
260 	 * the process on the zombprocess list below.
261 	 */
262 	/*
263 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
264 	 */
265 	p->p_stat = SDEAD;
266 
267 	LIST_REMOVE(p, p_hash);
268 	LIST_REMOVE(p, p_list);
269 
270 	if ((p->p_flag & P_THREAD) == 0) {
271 		LIST_REMOVE(pr, ps_list);
272 
273 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
274 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
275 		else {
276 			/*
277 			 * Not going to be a zombie, so it's now off all
278 			 * the lists scanned by ispidtaken(), so block
279 			 * fast reuse of the pid now.
280 			 */
281 			freepid(p->p_pid);
282 		}
283 
284 		/*
285 		 * Give orphaned children to init(8).
286 		 */
287 		qr = LIST_FIRST(&pr->ps_children);
288 		if (qr)		/* only need this if any child is S_ZOMB */
289 			wakeup(initprocess);
290 		for (; qr != 0; qr = nqr) {
291 			nqr = LIST_NEXT(qr, ps_sibling);
292 			proc_reparent(qr, initprocess);
293 			/*
294 			 * Traced processes are killed since their
295 			 * existence means someone is screwing up.
296 			 */
297 			if (qr->ps_flags & PS_TRACED &&
298 			    !(qr->ps_flags & PS_EXITING)) {
299 				atomic_clearbits_int(&qr->ps_flags, PS_TRACED);
300 				/*
301 				 * If single threading is active,
302 				 * direct the signal to the active
303 				 * thread to avoid deadlock.
304 				 */
305 				if (qr->ps_single)
306 					ptsignal(qr->ps_single, SIGKILL,
307 					    STHREAD);
308 				else
309 					prsignal(qr, SIGKILL);
310 			}
311 		}
312 	}
313 
314 	/* add thread's accumulated rusage into the process's total */
315 	ruadd(rup, &p->p_ru);
316 	tuagg(pr, p);
317 
318 	/*
319 	 * clear %cpu usage during swap
320 	 */
321 	p->p_pctcpu = 0;
322 
323 	if ((p->p_flag & P_THREAD) == 0) {
324 		/*
325 		 * Final thread has died, so add on our children's rusage
326 		 * and calculate the total times
327 		 */
328 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
329 		ruadd(rup, &pr->ps_cru);
330 
331 		/* notify interested parties of our demise and clean up */
332 		knote_processexit(p);
333 
334 		/*
335 		 * Notify parent that we're gone.  If we're not going to
336 		 * become a zombie, reparent to process 1 (init) so that
337 		 * we can wake our original parent to possibly unblock
338 		 * wait4() to return ECHILD.
339 		 */
340 		if (pr->ps_flags & PS_NOZOMBIE) {
341 			struct process *ppr = pr->ps_pptr;
342 			proc_reparent(pr, initprocess);
343 			wakeup(ppr);
344 		}
345 
346 		/*
347 		 * Release the process's signal state.
348 		 */
349 		sigactsfree(pr);
350 	}
351 
352 	/* just a thread? detach it from its process */
353 	if (p->p_flag & P_THREAD) {
354 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
355 		if (--pr->ps_refcnt == 1)
356 			wakeup(&pr->ps_threads);
357 		KASSERT(pr->ps_refcnt > 0);
358 	}
359 
360 	/*
361 	 * Other substructures are freed from reaper and wait().
362 	 */
363 
364 	/*
365 	 * Finally, call machine-dependent code to switch to a new
366 	 * context (possibly the idle context).  Once we are no longer
367 	 * using the dead process's vmspace and stack, exit2() will be
368 	 * called to schedule those resources to be released by the
369 	 * reaper thread.
370 	 *
371 	 * Note that cpu_exit() will end with a call equivalent to
372 	 * cpu_switch(), finishing our execution (pun intended).
373 	 */
374 	uvmexp.swtch++;
375 	cpu_exit(p);
376 	panic("cpu_exit returned");
377 }
378 
379 /*
380  * Locking of this proclist is special; it's accessed in a
381  * critical section of process exit, and thus locking it can't
382  * modify interrupt state.  We use a simple spin lock for this
383  * proclist.  We use the p_hash member to linkup to deadproc.
384  */
385 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
386 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
387 
388 /*
389  * We are called from cpu_exit() once it is safe to schedule the
390  * dead process's resources to be freed.
391  *
392  * NOTE: One must be careful with locking in this routine.  It's
393  * called from a critical section in machine-dependent code, so
394  * we should refrain from changing any interrupt state.
395  *
396  * We lock the deadproc list, place the proc on that list (using
397  * the p_hash member), and wake up the reaper.
398  */
399 void
400 exit2(struct proc *p)
401 {
402 	mtx_enter(&deadproc_mutex);
403 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
404 	mtx_leave(&deadproc_mutex);
405 
406 	wakeup(&deadproc);
407 }
408 
409 void
410 proc_free(struct proc *p)
411 {
412 	crfree(p->p_ucred);
413 	pool_put(&proc_pool, p);
414 	nthreads--;
415 }
416 
417 /*
418  * Process reaper.  This is run by a kernel thread to free the resources
419  * of a dead process.  Once the resources are free, the process becomes
420  * a zombie, and the parent is allowed to read the undead's status.
421  */
422 void
423 reaper(void)
424 {
425 	struct proc *p;
426 
427 	KERNEL_UNLOCK();
428 
429 	SCHED_ASSERT_UNLOCKED();
430 
431 	for (;;) {
432 		mtx_enter(&deadproc_mutex);
433 		while ((p = LIST_FIRST(&deadproc)) == NULL)
434 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
435 
436 		/* Remove us from the deadproc list. */
437 		LIST_REMOVE(p, p_hash);
438 		mtx_leave(&deadproc_mutex);
439 
440 		KERNEL_LOCK();
441 
442 		/*
443 		 * Free the VM resources we're still holding on to.
444 		 * We must do this from a valid thread because doing
445 		 * so may block.
446 		 */
447 		uvm_uarea_free(p);
448 		p->p_vmspace = NULL;		/* zap the thread's copy */
449 
450 		if (p->p_flag & P_THREAD) {
451 			/* Just a thread */
452 			proc_free(p);
453 		} else {
454 			struct process *pr = p->p_p;
455 
456 			/* Release the rest of the process's vmspace */
457 			uvm_exit(pr);
458 
459 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
460 				/* Process is now a true zombie. */
461 				atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
462 				prsignal(pr->ps_pptr, SIGCHLD);
463 
464 				/* Wake up the parent so it can get exit status. */
465 				wakeup(pr->ps_pptr);
466 			} else {
467 				/* No one will wait for us. Just zap the process now */
468 				process_zap(pr);
469 			}
470 		}
471 
472 		KERNEL_UNLOCK();
473 	}
474 }
475 
476 int
477 sys_wait4(struct proc *q, void *v, register_t *retval)
478 {
479 	struct sys_wait4_args /* {
480 		syscallarg(pid_t) pid;
481 		syscallarg(int *) status;
482 		syscallarg(int) options;
483 		syscallarg(struct rusage *) rusage;
484 	} */ *uap = v;
485 	struct rusage ru;
486 	int status, error;
487 
488 	error = dowait4(q, SCARG(uap, pid),
489 	    SCARG(uap, status) ? &status : NULL,
490 	    SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval);
491 	if (error == 0 && retval[0] > 0 && SCARG(uap, status)) {
492 		error = copyout(&status, SCARG(uap, status), sizeof(status));
493 	}
494 	if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) {
495 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
496 #ifdef KTRACE
497 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
498 			ktrrusage(q, &ru);
499 #endif
500 	}
501 	return (error);
502 }
503 
504 int
505 dowait4(struct proc *q, pid_t pid, int *statusp, int options,
506     struct rusage *rusage, register_t *retval)
507 {
508 	int nfound;
509 	struct process *pr;
510 	struct proc *p;
511 	int error;
512 
513 	if (pid == 0)
514 		pid = -q->p_p->ps_pgid;
515 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED))
516 		return (EINVAL);
517 
518 loop:
519 	nfound = 0;
520 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
521 		p = pr->ps_mainproc;
522 		if ((pr->ps_flags & PS_NOZOMBIE) ||
523 		    (pid != WAIT_ANY &&
524 		    p->p_pid != pid &&
525 		    pr->ps_pgid != -pid))
526 			continue;
527 
528 		nfound++;
529 		if (pr->ps_flags & PS_ZOMBIE) {
530 			retval[0] = p->p_pid;
531 
532 			if (statusp != NULL)
533 				*statusp = p->p_xstat;	/* convert to int */
534 			if (rusage != NULL)
535 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
536 			proc_finish_wait(q, p);
537 			return (0);
538 		}
539 		if (pr->ps_flags & PS_TRACED &&
540 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
541 		    pr->ps_single->p_stat == SSTOP &&
542 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
543 			single_thread_wait(pr);
544 
545 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
546 			retval[0] = p->p_pid;
547 
548 			if (statusp != NULL)
549 				*statusp = W_STOPCODE(pr->ps_single->p_xstat);
550 			if (rusage != NULL)
551 				memset(rusage, 0, sizeof(*rusage));
552 			return (0);
553 		}
554 		if (p->p_stat == SSTOP &&
555 		    (pr->ps_flags & PS_WAITED) == 0 &&
556 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
557 		    (pr->ps_flags & PS_TRACED ||
558 		    options & WUNTRACED)) {
559 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
560 			retval[0] = p->p_pid;
561 
562 			if (statusp != NULL)
563 				*statusp = W_STOPCODE(p->p_xstat);
564 			if (rusage != NULL)
565 				memset(rusage, 0, sizeof(*rusage));
566 			return (0);
567 		}
568 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
569 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
570 			retval[0] = p->p_pid;
571 
572 			if (statusp != NULL)
573 				*statusp = _WCONTINUED;
574 			if (rusage != NULL)
575 				memset(rusage, 0, sizeof(*rusage));
576 			return (0);
577 		}
578 	}
579 	if (nfound == 0)
580 		return (ECHILD);
581 	if (options & WNOHANG) {
582 		retval[0] = 0;
583 		return (0);
584 	}
585 	if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0)
586 		return (error);
587 	goto loop;
588 }
589 
590 void
591 proc_finish_wait(struct proc *waiter, struct proc *p)
592 {
593 	struct process *pr, *tr;
594 	struct rusage *rup;
595 
596 	/*
597 	 * If we got the child via a ptrace 'attach',
598 	 * we need to give it back to the old parent.
599 	 */
600 	pr = p->p_p;
601 	if (pr->ps_oppid && (tr = prfind(pr->ps_oppid))) {
602 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
603 		pr->ps_oppid = 0;
604 		proc_reparent(pr, tr);
605 		prsignal(tr, SIGCHLD);
606 		wakeup(tr);
607 	} else {
608 		scheduler_wait_hook(waiter, p);
609 		p->p_xstat = 0;
610 		rup = &waiter->p_p->ps_cru;
611 		ruadd(rup, pr->ps_ru);
612 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
613 		freepid(p->p_pid);
614 		process_zap(pr);
615 	}
616 }
617 
618 /*
619  * make process 'parent' the new parent of process 'child'.
620  */
621 void
622 proc_reparent(struct process *child, struct process *parent)
623 {
624 
625 	if (child->ps_pptr == parent)
626 		return;
627 
628 	LIST_REMOVE(child, ps_sibling);
629 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
630 	child->ps_pptr = parent;
631 }
632 
633 void
634 process_zap(struct process *pr)
635 {
636 	struct vnode *otvp;
637 	struct proc *p = pr->ps_mainproc;
638 
639 	/*
640 	 * Finally finished with old proc entry.
641 	 * Unlink it from its process group and free it.
642 	 */
643 	leavepgrp(pr);
644 	LIST_REMOVE(pr, ps_sibling);
645 
646 	/*
647 	 * Decrement the count of procs running with this uid.
648 	 */
649 	(void)chgproccnt(pr->ps_ucred->cr_ruid, -1);
650 
651 	/*
652 	 * Release reference to text vnode
653 	 */
654 	otvp = pr->ps_textvp;
655 	pr->ps_textvp = NULL;
656 	if (otvp)
657 		vrele(otvp);
658 
659 	KASSERT(pr->ps_refcnt == 1);
660 	if (pr->ps_ptstat != NULL)
661 		free(pr->ps_ptstat, M_SUBPROC, 0);
662 	pool_put(&rusage_pool, pr->ps_ru);
663 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
664 	limfree(pr->ps_limit);
665 	crfree(pr->ps_ucred);
666 	pool_put(&process_pool, pr);
667 	nprocesses--;
668 
669 	proc_free(p);
670 }
671