xref: /openbsd-src/sys/kern/kern_exit.c (revision 8f76f5add34ff13fa8a804c10695e708224e99b4)
1 /*	$OpenBSD: kern_exit.c,v 1.137 2014/03/26 05:23:42 guenther Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #ifdef SYSVSEM
64 #include <sys/sem.h>
65 #endif
66 
67 #include "systrace.h"
68 #include <dev/systrace.h>
69 
70 #include <sys/mount.h>
71 #include <sys/syscallargs.h>
72 
73 
74 #include <uvm/uvm_extern.h>
75 
76 /*
77  * exit --
78  *	Death of process.
79  */
80 int
81 sys_exit(struct proc *p, void *v, register_t *retval)
82 {
83 	struct sys_exit_args /* {
84 		syscallarg(int) rval;
85 	} */ *uap = v;
86 
87 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
88 	/* NOTREACHED */
89 	return (0);
90 }
91 
92 int
93 sys___threxit(struct proc *p, void *v, register_t *retval)
94 {
95 	struct sys___threxit_args /* {
96 		syscallarg(pid_t *) notdead;
97 	} */ *uap = v;
98 
99 	if (SCARG(uap, notdead) != NULL) {
100 		pid_t zero = 0;
101 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero)))
102 			psignal(p, SIGSEGV);
103 	}
104 	exit1(p, 0, EXIT_THREAD);
105 
106 	return (0);
107 }
108 
109 /*
110  * Exit: deallocate address space and other resources, change proc state
111  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
112  * status and rusage for wait().  Check for child processes and orphan them.
113  */
114 void
115 exit1(struct proc *p, int rv, int flags)
116 {
117 	struct process *pr, *qr, *nqr;
118 	struct rusage *rup;
119 	struct vnode *ovp;
120 
121 	if (p->p_pid == 1)
122 		panic("init died (signal %d, exit %d)",
123 		    WTERMSIG(rv), WEXITSTATUS(rv));
124 
125 	atomic_setbits_int(&p->p_flag, P_WEXIT);
126 
127 	pr = p->p_p;
128 
129 	/* single-threaded? */
130 	if (TAILQ_FIRST(&pr->ps_threads) == p &&
131 	    TAILQ_NEXT(p, p_thr_link) == NULL) {
132 		flags = EXIT_NORMAL;
133 	} else {
134 		/* nope, multi-threaded */
135 		if (flags == EXIT_NORMAL)
136 			single_thread_set(p, SINGLE_EXIT, 0);
137 		else if (flags == EXIT_THREAD)
138 			single_thread_check(p, 0);
139 	}
140 
141 	if (flags == EXIT_NORMAL) {
142 		atomic_setbits_int(&pr->ps_flags, PS_EXITING);
143 		pr->ps_mainproc->p_xstat = rv;
144 
145 		/*
146 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
147 		 * is set; we wake up the parent early to avoid deadlock.
148 		 */
149 		if (pr->ps_flags & PS_PPWAIT) {
150 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
151 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
152 			    PS_ISPWAIT);
153 			wakeup(pr->ps_pptr);
154 		}
155 	}
156 
157 	/* unlink ourselves from the active threads */
158 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
159 	if ((p->p_flag & P_THREAD) == 0) {
160 		/* main thread gotta wait because it has the pid, et al */
161 		while (!TAILQ_EMPTY(&pr->ps_threads))
162 			tsleep(&pr->ps_threads, PUSER, "thrdeath", 0);
163 		if (pr->ps_flags & PS_PROFIL)
164 			stopprofclock(pr);
165 	} else if (TAILQ_EMPTY(&pr->ps_threads)) {
166 		wakeup(&pr->ps_threads);
167 	}
168 
169 	rup = pr->ps_ru;
170 	if (rup == NULL) {
171 		rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO);
172 		if (pr->ps_ru == NULL) {
173 			pr->ps_ru = rup;
174 		} else {
175 			pool_put(&rusage_pool, rup);
176 			rup = pr->ps_ru;
177 		}
178 	}
179 	p->p_siglist = 0;
180 
181 	/*
182 	 * Close open files and release open-file table.
183 	 */
184 	fdfree(p);
185 
186 	if ((p->p_flag & P_THREAD) == 0) {
187 		timeout_del(&pr->ps_realit_to);
188 #ifdef SYSVSEM
189 		semexit(pr);
190 #endif
191 		if (SESS_LEADER(pr)) {
192 			struct session *sp = pr->ps_session;
193 
194 			if (sp->s_ttyvp) {
195 				/*
196 				 * Controlling process.
197 				 * Signal foreground pgrp,
198 				 * drain controlling terminal
199 				 * and revoke access to controlling terminal.
200 				 */
201 				if (sp->s_ttyp->t_session == sp) {
202 					if (sp->s_ttyp->t_pgrp)
203 						pgsignal(sp->s_ttyp->t_pgrp,
204 						    SIGHUP, 1);
205 					ttywait(sp->s_ttyp);
206 					/*
207 					 * The tty could have been revoked
208 					 * if we blocked.
209 					 */
210 					if (sp->s_ttyvp)
211 						VOP_REVOKE(sp->s_ttyvp,
212 						    REVOKEALL);
213 				}
214 				ovp = sp->s_ttyvp;
215 				sp->s_ttyvp = NULL;
216 				if (ovp)
217 					vrele(ovp);
218 				/*
219 				 * s_ttyp is not zero'd; we use this to
220 				 * indicate that the session once had a
221 				 * controlling terminal.  (for logging and
222 				 * informational purposes)
223 				 */
224 			}
225 			sp->s_leader = NULL;
226 		}
227 		fixjobc(pr, pr->ps_pgrp, 0);
228 
229 #ifdef ACCOUNTING
230 		acct_process(p);
231 #endif
232 
233 #ifdef KTRACE
234 		/* release trace file */
235 		if (pr->ps_tracevp)
236 			ktrcleartrace(pr);
237 #endif
238 
239 		/*
240 		 * If parent has the SAS_NOCLDWAIT flag set, we're not
241 		 * going to become a zombie.
242 		 */
243 		if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT)
244 			atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
245 	}
246 
247 #if NSYSTRACE > 0
248 	if (ISSET(p->p_flag, P_SYSTRACE))
249 		systrace_exit(p);
250 #endif
251 
252 	/*
253 	 * If emulation has thread exit hook, call it now.
254 	 */
255 	if (pr->ps_emul->e_proc_exit)
256 		(*pr->ps_emul->e_proc_exit)(p);
257 
258         /*
259 	 * Remove proc from pidhash chain and allproc so looking
260 	 * it up won't work.  We will put the proc on the
261 	 * deadproc list later (using the p_hash member), and
262 	 * wake up the reaper when we do.  If this is the last
263 	 * thread of a process that isn't PS_NOZOMBIE, we'll put
264 	 * the process on the zombprocess list below.
265 	 */
266 	/*
267 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
268 	 */
269 	p->p_stat = SDEAD;
270 
271 	LIST_REMOVE(p, p_hash);
272 	LIST_REMOVE(p, p_list);
273 
274 	if ((p->p_flag & P_THREAD) == 0) {
275 		LIST_REMOVE(pr, ps_list);
276 
277 		if ((pr->ps_flags & PS_NOZOMBIE) == 0)
278 			LIST_INSERT_HEAD(&zombprocess, pr, ps_list);
279 		else {
280 			/*
281 			 * Not going to be a zombie, so it's now off all
282 			 * the lists scanned by ispidtaken(), so block
283 			 * fast reuse of the pid now.
284 			 */
285 			freepid(p->p_pid);
286 		}
287 
288 		/*
289 		 * Give orphaned children to init(8).
290 		 */
291 		qr = LIST_FIRST(&pr->ps_children);
292 		if (qr)		/* only need this if any child is S_ZOMB */
293 			wakeup(initproc->p_p);
294 		for (; qr != 0; qr = nqr) {
295 			nqr = LIST_NEXT(qr, ps_sibling);
296 			proc_reparent(qr, initproc->p_p);
297 			/*
298 			 * Traced processes are killed since their
299 			 * existence means someone is screwing up.
300 			 */
301 			if (qr->ps_flags & PS_TRACED &&
302 			    !(qr->ps_flags & PS_EXITING)) {
303 				atomic_clearbits_int(&qr->ps_flags, PS_TRACED);
304 				/*
305 				 * If single threading is active,
306 				 * direct the signal to the active
307 				 * thread to avoid deadlock.
308 				 */
309 				if (qr->ps_single)
310 					ptsignal(qr->ps_single, SIGKILL,
311 					    STHREAD);
312 				else
313 					prsignal(qr, SIGKILL);
314 			}
315 		}
316 	}
317 
318 	/* add thread's accumulated rusage into the process's total */
319 	ruadd(rup, &p->p_ru);
320 	tuagg(pr, p);
321 
322 	/*
323 	 * clear %cpu usage during swap
324 	 */
325 	p->p_pctcpu = 0;
326 
327 	if ((p->p_flag & P_THREAD) == 0) {
328 		/*
329 		 * Final thread has died, so add on our children's rusage
330 		 * and calculate the total times
331 		 */
332 		calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
333 		ruadd(rup, &pr->ps_cru);
334 
335 		/* notify interested parties of our demise and clean up */
336 		knote_processexit(pr);
337 
338 		/*
339 		 * Notify parent that we're gone.  If we're not going to
340 		 * become a zombie, reparent to process 1 (init) so that
341 		 * we can wake our original parent to possibly unblock
342 		 * wait4() to return ECHILD.
343 		 */
344 		if (pr->ps_flags & PS_NOZOMBIE) {
345 			struct process *ppr = pr->ps_pptr;
346 			proc_reparent(pr, initproc->p_p);
347 			wakeup(ppr);
348 		}
349 
350 		/*
351 		 * Release the process's signal state.
352 		 */
353 		sigactsfree(pr);
354 	}
355 
356 	/* just a thread? detach it from its process */
357 	if (p->p_flag & P_THREAD) {
358 		/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
359 		--pr->ps_refcnt;
360 		KASSERT(pr->ps_refcnt > 0);
361 	}
362 
363 	/*
364 	 * Other substructures are freed from reaper and wait().
365 	 */
366 
367 	/*
368 	 * Finally, call machine-dependent code to switch to a new
369 	 * context (possibly the idle context).  Once we are no longer
370 	 * using the dead process's vmspace and stack, exit2() will be
371 	 * called to schedule those resources to be released by the
372 	 * reaper thread.
373 	 *
374 	 * Note that cpu_exit() will end with a call equivalent to
375 	 * cpu_switch(), finishing our execution (pun intended).
376 	 */
377 	uvmexp.swtch++;
378 	cpu_exit(p);
379 	panic("cpu_exit returned");
380 }
381 
382 /*
383  * Locking of this proclist is special; it's accessed in a
384  * critical section of process exit, and thus locking it can't
385  * modify interrupt state.  We use a simple spin lock for this
386  * proclist.  We use the p_hash member to linkup to deadproc.
387  */
388 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
389 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
390 
391 /*
392  * We are called from cpu_exit() once it is safe to schedule the
393  * dead process's resources to be freed.
394  *
395  * NOTE: One must be careful with locking in this routine.  It's
396  * called from a critical section in machine-dependent code, so
397  * we should refrain from changing any interrupt state.
398  *
399  * We lock the deadproc list, place the proc on that list (using
400  * the p_hash member), and wake up the reaper.
401  */
402 void
403 exit2(struct proc *p)
404 {
405 	mtx_enter(&deadproc_mutex);
406 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
407 	mtx_leave(&deadproc_mutex);
408 
409 	wakeup(&deadproc);
410 }
411 
412 void
413 proc_free(struct proc *p)
414 {
415 	pool_put(&proc_pool, p);
416 	nthreads--;
417 }
418 
419 /*
420  * Process reaper.  This is run by a kernel thread to free the resources
421  * of a dead process.  Once the resources are free, the process becomes
422  * a zombie, and the parent is allowed to read the undead's status.
423  */
424 void
425 reaper(void)
426 {
427 	struct proc *p;
428 
429 	KERNEL_UNLOCK();
430 
431 	SCHED_ASSERT_UNLOCKED();
432 
433 	for (;;) {
434 		mtx_enter(&deadproc_mutex);
435 		while ((p = LIST_FIRST(&deadproc)) == NULL)
436 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
437 
438 		/* Remove us from the deadproc list. */
439 		LIST_REMOVE(p, p_hash);
440 		mtx_leave(&deadproc_mutex);
441 
442 		KERNEL_LOCK();
443 
444 		/*
445 		 * Free the VM resources we're still holding on to.
446 		 * We must do this from a valid thread because doing
447 		 * so may block.
448 		 */
449 		uvm_exit(p);
450 
451 		if (p->p_flag & P_THREAD) {
452 			/* Just a thread */
453 			proc_free(p);
454 		} else {
455 			struct process *pr = p->p_p;
456 
457 			if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
458 				/* Process is now a true zombie. */
459 				p->p_stat = SZOMB;
460 				prsignal(pr->ps_pptr, SIGCHLD);
461 
462 				/* Wake up the parent so it can get exit status. */
463 				wakeup(pr->ps_pptr);
464 			} else {
465 				/* No one will wait for us. Just zap the process now */
466 				process_zap(pr);
467 			}
468 		}
469 
470 		KERNEL_UNLOCK();
471 	}
472 }
473 
474 int
475 sys_wait4(struct proc *q, void *v, register_t *retval)
476 {
477 	struct sys_wait4_args /* {
478 		syscallarg(pid_t) pid;
479 		syscallarg(int *) status;
480 		syscallarg(int) options;
481 		syscallarg(struct rusage *) rusage;
482 	} */ *uap = v;
483 	struct rusage ru;
484 	int status, error;
485 
486 	error = dowait4(q, SCARG(uap, pid),
487 	    SCARG(uap, status) ? &status : NULL,
488 	    SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval);
489 	if (error == 0 && SCARG(uap, status)) {
490 		error = copyout(&status, SCARG(uap, status), sizeof(status));
491 	}
492 	if (error == 0 && SCARG(uap, rusage)) {
493 		error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
494 #ifdef KTRACE
495 		if (error == 0 && KTRPOINT(q, KTR_STRUCT))
496 			ktrrusage(q, &ru);
497 #endif
498 	}
499 	return (error);
500 }
501 
502 int
503 dowait4(struct proc *q, pid_t pid, int *statusp, int options,
504     struct rusage *rusage, register_t *retval)
505 {
506 	int nfound;
507 	struct process *pr;
508 	struct proc *p;
509 	int error;
510 
511 	if (pid == 0)
512 		pid = -q->p_p->ps_pgid;
513 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED))
514 		return (EINVAL);
515 
516 loop:
517 	nfound = 0;
518 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
519 		p = pr->ps_mainproc;
520 		if ((pr->ps_flags & PS_NOZOMBIE) ||
521 		    (pid != WAIT_ANY &&
522 		    p->p_pid != pid &&
523 		    pr->ps_pgid != -pid))
524 			continue;
525 
526 		nfound++;
527 		if (p->p_stat == SZOMB) {
528 			retval[0] = p->p_pid;
529 
530 			if (statusp != NULL)
531 				*statusp = p->p_xstat;	/* convert to int */
532 			if (rusage != NULL)
533 				memcpy(rusage, pr->ps_ru, sizeof(*rusage));
534 			proc_finish_wait(q, p);
535 			return (0);
536 		}
537 		if (pr->ps_flags & PS_TRACED &&
538 		    (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
539 		    pr->ps_single->p_stat == SSTOP &&
540 		    (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
541 			single_thread_wait(pr);
542 
543 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
544 			retval[0] = p->p_pid;
545 
546 			if (statusp != NULL)
547 				*statusp = W_STOPCODE(pr->ps_single->p_xstat);
548 			return (0);
549 		}
550 		if (p->p_stat == SSTOP &&
551 		    (pr->ps_flags & PS_WAITED) == 0 &&
552 		    (p->p_flag & P_SUSPSINGLE) == 0 &&
553 		    (pr->ps_flags & PS_TRACED ||
554 		    options & WUNTRACED)) {
555 			atomic_setbits_int(&pr->ps_flags, PS_WAITED);
556 			retval[0] = p->p_pid;
557 
558 			if (statusp != NULL)
559 				*statusp = W_STOPCODE(p->p_xstat);
560 			return (0);
561 		}
562 		if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
563 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
564 			retval[0] = p->p_pid;
565 
566 			if (statusp != NULL)
567 				*statusp = _WCONTINUED;
568 			return (0);
569 		}
570 	}
571 	if (nfound == 0)
572 		return (ECHILD);
573 	if (options & WNOHANG) {
574 		retval[0] = 0;
575 		return (0);
576 	}
577 	if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0)
578 		return (error);
579 	goto loop;
580 }
581 
582 void
583 proc_finish_wait(struct proc *waiter, struct proc *p)
584 {
585 	struct process *pr, *tr;
586 	struct rusage *rup;
587 
588 	/*
589 	 * If we got the child via a ptrace 'attach',
590 	 * we need to give it back to the old parent.
591 	 */
592 	pr = p->p_p;
593 	if (pr->ps_oppid && (tr = prfind(pr->ps_oppid))) {
594 		atomic_clearbits_int(&pr->ps_flags, PS_TRACED);
595 		pr->ps_oppid = 0;
596 		proc_reparent(pr, tr);
597 		prsignal(tr, SIGCHLD);
598 		wakeup(tr);
599 	} else {
600 		scheduler_wait_hook(waiter, p);
601 		p->p_xstat = 0;
602 		rup = &waiter->p_p->ps_cru;
603 		ruadd(rup, pr->ps_ru);
604 		LIST_REMOVE(pr, ps_list);	/* off zombprocess */
605 		freepid(p->p_pid);
606 		process_zap(pr);
607 	}
608 }
609 
610 /*
611  * make process 'parent' the new parent of process 'child'.
612  */
613 void
614 proc_reparent(struct process *child, struct process *parent)
615 {
616 
617 	if (child->ps_pptr == parent)
618 		return;
619 
620 	LIST_REMOVE(child, ps_sibling);
621 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
622 	child->ps_pptr = parent;
623 }
624 
625 void
626 process_zap(struct process *pr)
627 {
628 	struct vnode *otvp;
629 	struct proc *p = pr->ps_mainproc;
630 
631 	/*
632 	 * Finally finished with old proc entry.
633 	 * Unlink it from its process group and free it.
634 	 */
635 	leavepgrp(pr);
636 	LIST_REMOVE(pr, ps_sibling);
637 
638 	/*
639 	 * Decrement the count of procs running with this uid.
640 	 */
641 	(void)chgproccnt(pr->ps_cred->p_ruid, -1);
642 
643 	/*
644 	 * Release reference to text vnode
645 	 */
646 	otvp = pr->ps_textvp;
647 	pr->ps_textvp = NULL;
648 	if (otvp)
649 		vrele(otvp);
650 
651 	KASSERT(pr->ps_refcnt == 1);
652 	if (pr->ps_ptstat != NULL)
653 		free(pr->ps_ptstat, M_SUBPROC);
654 	pool_put(&rusage_pool, pr->ps_ru);
655 	KASSERT(TAILQ_EMPTY(&pr->ps_threads));
656 	limfree(pr->ps_limit);
657 	crfree(pr->ps_cred->pc_ucred);
658 	pool_put(&pcred_pool, pr->ps_cred);
659 	pool_put(&process_pool, pr);
660 	nprocesses--;
661 
662 	proc_free(p);
663 }
664