xref: /openbsd-src/sys/kern/kern_exit.c (revision ce0272ab83a2aa56bfa8d51be526ac59843bef3f)
1 /*	$OpenBSD: kern_exit.c,v 1.91 2010/05/18 22:26:10 tedu Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #ifdef SYSVSEM
64 #include <sys/sem.h>
65 #endif
66 
67 #include "systrace.h"
68 #include <dev/systrace.h>
69 
70 #include <sys/mount.h>
71 #include <sys/syscallargs.h>
72 
73 #include <machine/cpu.h>
74 
75 #include <uvm/uvm_extern.h>
76 
77 /*
78  * exit --
79  *	Death of process.
80  */
81 int
82 sys_exit(struct proc *p, void *v, register_t *retval)
83 {
84 	struct sys_exit_args /* {
85 		syscallarg(int) rval;
86 	} */ *uap = v;
87 
88 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
89 	/* NOTREACHED */
90 	return (0);
91 }
92 
93 #ifdef RTHREADS
94 int
95 sys_threxit(struct proc *p, void *v, register_t *retval)
96 {
97 	struct sys_threxit_args /* {
98 		syscallarg(pid_t *) notdead;
99 	} */ *uap = v;
100 
101 	if (SCARG(uap, notdead) != NULL) {
102 		pid_t zero = 0;
103 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) {
104 			psignal(p, SIGSEGV);
105 		}
106 	}
107 	exit1(p, 0, EXIT_THREAD);
108 
109 	return (0);
110 }
111 #endif
112 
113 /*
114  * Exit: deallocate address space and other resources, change proc state
115  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
116  * status and rusage for wait().  Check for child processes and orphan them.
117  */
118 void
119 exit1(struct proc *p, int rv, int flags)
120 {
121 	struct proc *q, *nq;
122 
123 	if (p->p_pid == 1)
124 		panic("init died (signal %d, exit %d)",
125 		    WTERMSIG(rv), WEXITSTATUS(rv));
126 
127 	/* unlink ourselves from the active threads */
128 	TAILQ_REMOVE(&p->p_p->ps_threads, p, p_thr_link);
129 #ifdef RTHREADS
130 	if (TAILQ_EMPTY(&p->p_p->ps_threads))
131 		wakeup(&p->p_p->ps_threads);
132 	/*
133 	 * if one thread calls exit, we take down everybody.
134 	 * we have to be careful not to get recursively caught.
135 	 * this is kinda sick.
136 	 */
137 	if (flags == EXIT_NORMAL && (p->p_flag & P_THREAD) &&
138 	    (p->p_p->ps_mainproc->p_flag & P_WEXIT) == 0) {
139 		/*
140 		 * we are one of the threads.  we SIGKILL the parent,
141 		 * it will wake us up again, then we proceed.
142 		 */
143 		atomic_setbits_int(&p->p_p->ps_mainproc->p_flag, P_IGNEXITRV);
144 		p->p_p->ps_mainproc->p_xstat = rv;
145 		ptsignal(p->p_p->ps_mainproc, SIGKILL, SPROPAGATED);
146 		tsleep(p->p_p, PUSER, "thrdying", 0);
147 	} else if ((p->p_flag & P_THREAD) == 0) {
148 		atomic_setbits_int(&p->p_flag, P_WEXIT);
149 		if (flags == EXIT_NORMAL) {
150 			q = TAILQ_FIRST(&p->p_p->ps_threads);
151 			for (; q != NULL; q = nq) {
152 				nq = TAILQ_NEXT(q, p_thr_link);
153 				atomic_setbits_int(&q->p_flag, P_IGNEXITRV);
154 				q->p_xstat = rv;
155 				ptsignal(q, SIGKILL, SPROPAGATED);
156 			}
157 		}
158 		wakeup(p->p_p);
159 		while (!TAILQ_EMPTY(&p->p_p->ps_threads))
160 			tsleep(&p->p_p->ps_threads, PUSER, "thrdeath", 0);
161 	}
162 #endif
163 
164 	if (p->p_flag & P_PROFIL)
165 		stopprofclock(p);
166 	p->p_ru = pool_get(&rusage_pool, PR_WAITOK);
167 	/*
168 	 * If parent is waiting for us to exit or exec, P_PPWAIT is set; we
169 	 * wake up the parent early to avoid deadlock.
170 	 */
171 	atomic_setbits_int(&p->p_flag, P_WEXIT);
172 	if (p->p_flag & P_PPWAIT) {
173 		atomic_clearbits_int(&p->p_flag, P_PPWAIT);
174 		wakeup(p->p_pptr);
175 	}
176 	p->p_sigignore = ~0;
177 	p->p_siglist = 0;
178 	timeout_del(&p->p_realit_to);
179 	timeout_del(&p->p_stats->p_virt_to);
180 	timeout_del(&p->p_stats->p_prof_to);
181 
182 	/*
183 	 * Close open files and release open-file table.
184 	 */
185 	fdfree(p);
186 
187 #ifdef SYSVSEM
188 	if ((p->p_flag & P_THREAD) == 0)
189 		semexit(p->p_p);
190 #endif
191 	if (SESS_LEADER(p)) {
192 		struct session *sp = p->p_session;
193 
194 		if (sp->s_ttyvp) {
195 			/*
196 			 * Controlling process.
197 			 * Signal foreground pgrp,
198 			 * drain controlling terminal
199 			 * and revoke access to controlling terminal.
200 			 */
201 			if (sp->s_ttyp->t_session == sp) {
202 				if (sp->s_ttyp->t_pgrp)
203 					pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
204 				(void) ttywait(sp->s_ttyp);
205 				/*
206 				 * The tty could have been revoked
207 				 * if we blocked.
208 				 */
209 				if (sp->s_ttyvp)
210 					VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
211 			}
212 			if (sp->s_ttyvp)
213 				vrele(sp->s_ttyvp);
214 			sp->s_ttyvp = NULL;
215 			/*
216 			 * s_ttyp is not zero'd; we use this to indicate
217 			 * that the session once had a controlling terminal.
218 			 * (for logging and informational purposes)
219 			 */
220 		}
221 		sp->s_leader = NULL;
222 	}
223 	fixjobc(p, p->p_pgrp, 0);
224 #ifdef ACCOUNTING
225 	(void)acct_process(p);
226 #endif
227 #ifdef KTRACE
228 	/*
229 	 * release trace file
230 	 */
231 	p->p_traceflag = 0;	/* don't trace the vrele() */
232 	if (p->p_tracep)
233 		ktrsettracevnode(p, NULL);
234 #endif
235 #if NSYSTRACE > 0
236 	if (ISSET(p->p_flag, P_SYSTRACE))
237 		systrace_exit(p);
238 #endif
239 	/*
240 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
241 	 */
242 	p->p_stat = SDEAD;
243 
244         /*
245          * Remove proc from pidhash chain so looking it up won't
246          * work.  Move it from allproc to zombproc, but do not yet
247          * wake up the reaper.  We will put the proc on the
248          * deadproc list later (using the p_hash member), and
249          * wake up the reaper when we do.
250          */
251 	rw_enter_write(&allproclk);
252 	LIST_REMOVE(p, p_hash);
253 	LIST_REMOVE(p, p_list);
254 	LIST_INSERT_HEAD(&zombproc, p, p_list);
255 	rw_exit_write(&allproclk);
256 
257 	/*
258 	 * Give orphaned children to init(8).
259 	 */
260 	q = LIST_FIRST(&p->p_children);
261 	if (q)		/* only need this if any child is S_ZOMB */
262 		wakeup(initproc);
263 	for (; q != 0; q = nq) {
264 		nq = LIST_NEXT(q, p_sibling);
265 		proc_reparent(q, initproc);
266 		/*
267 		 * Traced processes are killed
268 		 * since their existence means someone is screwing up.
269 		 */
270 		if (q->p_flag & P_TRACED) {
271 			atomic_clearbits_int(&q->p_flag, P_TRACED);
272 			psignal(q, SIGKILL);
273 		}
274 	}
275 
276 
277 	/*
278 	 * Save exit status and final rusage info, adding in child rusage
279 	 * info and self times.
280 	 */
281 	if (!(p->p_flag & P_IGNEXITRV))
282 		p->p_xstat = rv;
283 	*p->p_ru = p->p_stats->p_ru;
284 	calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
285 	ruadd(p->p_ru, &p->p_stats->p_cru);
286 
287 	/*
288 	 * clear %cpu usage during swap
289 	 */
290 	p->p_pctcpu = 0;
291 
292 	/*
293 	 * notify interested parties of our demise.
294 	 */
295 	if (p == p->p_p->ps_mainproc)
296 		KNOTE(&p->p_p->ps_klist, NOTE_EXIT);
297 
298 	/*
299 	 * Notify parent that we're gone.  If we have P_NOZOMBIE or parent has
300 	 * the P_NOCLDWAIT flag set, notify process 1 instead (and hope it
301 	 * will handle this situation).
302 	 */
303 	if ((p->p_flag & P_NOZOMBIE) || (p->p_pptr->p_flag & P_NOCLDWAIT)) {
304 		struct proc *pp = p->p_pptr;
305 		proc_reparent(p, initproc);
306 		/*
307 		 * If this was the last child of our parent, notify
308 		 * parent, so in case he was wait(2)ing, he will
309 		 * continue.
310 		 */
311 		if (LIST_EMPTY(&pp->p_children))
312 			wakeup(pp);
313 	}
314 
315 	/*
316 	 * Release the process's signal state.
317 	 */
318 	sigactsfree(p);
319 
320 	/*
321 	 * Other substructures are freed from reaper and wait().
322 	 */
323 
324 	/*
325 	 * If emulation has process exit hook, call it now.
326 	 */
327 	if (p->p_emul->e_proc_exit)
328 		(*p->p_emul->e_proc_exit)(p);
329 
330 	/*
331 	 * Finally, call machine-dependent code to switch to a new
332 	 * context (possibly the idle context).  Once we are no longer
333 	 * using the dead process's vmspace and stack, exit2() will be
334 	 * called to schedule those resources to be released by the
335 	 * reaper thread.
336 	 *
337 	 * Note that cpu_exit() will end with a call equivalent to
338 	 * cpu_switch(), finishing our execution (pun intended).
339 	 */
340 	uvmexp.swtch++;
341 	cpu_exit(p);
342 	panic("cpu_exit returned");
343 }
344 
345 /*
346  * Locking of this proclist is special; it's accessed in a
347  * critical section of process exit, and thus locking it can't
348  * modify interrupt state.  We use a simple spin lock for this
349  * proclist.  Processes on this proclist are also on zombproc;
350  * we use the p_hash member to linkup to deadproc.
351  */
352 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
353 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
354 
355 /*
356  * We are called from cpu_exit() once it is safe to schedule the
357  * dead process's resources to be freed.
358  *
359  * NOTE: One must be careful with locking in this routine.  It's
360  * called from a critical section in machine-dependent code, so
361  * we should refrain from changing any interrupt state.
362  *
363  * We lock the deadproc list, place the proc on that list (using
364  * the p_hash member), and wake up the reaper.
365  */
366 void
367 exit2(struct proc *p)
368 {
369 	mtx_enter(&deadproc_mutex);
370 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
371 	mtx_leave(&deadproc_mutex);
372 
373 	wakeup(&deadproc);
374 }
375 
376 /*
377  * Process reaper.  This is run by a kernel thread to free the resources
378  * of a dead process.  Once the resources are free, the process becomes
379  * a zombie, and the parent is allowed to read the undead's status.
380  */
381 void
382 reaper(void)
383 {
384 	struct proc *p;
385 
386 	KERNEL_PROC_UNLOCK(curproc);
387 
388 	SCHED_ASSERT_UNLOCKED();
389 
390 	for (;;) {
391 		mtx_enter(&deadproc_mutex);
392 		while ((p = LIST_FIRST(&deadproc)) == NULL)
393 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
394 
395 		/* Remove us from the deadproc list. */
396 		LIST_REMOVE(p, p_hash);
397 		mtx_leave(&deadproc_mutex);
398 
399 		KERNEL_PROC_LOCK(curproc);
400 
401 		/*
402 		 * Free the VM resources we're still holding on to.
403 		 * We must do this from a valid thread because doing
404 		 * so may block.
405 		 */
406 		uvm_exit(p);
407 
408 		/* Process is now a true zombie. */
409 		if ((p->p_flag & P_NOZOMBIE) == 0) {
410 			p->p_stat = SZOMB;
411 
412 			if (P_EXITSIG(p) != 0)
413 				psignal(p->p_pptr, P_EXITSIG(p));
414 			/* Wake up the parent so it can get exit status. */
415 			wakeup(p->p_pptr);
416 		} else {
417 			/* Noone will wait for us. Just zap the process now */
418 			proc_zap(p);
419 		}
420 
421 		KERNEL_PROC_UNLOCK(curproc);
422 	}
423 }
424 
425 pid_t
426 sys_wait4(struct proc *q, void *v, register_t *retval)
427 {
428 	struct sys_wait4_args /* {
429 		syscallarg(pid_t) pid;
430 		syscallarg(int *) status;
431 		syscallarg(int) options;
432 		syscallarg(struct rusage *) rusage;
433 	} */ *uap = v;
434 	int nfound;
435 	struct proc *p;
436 	int status, error;
437 
438 	if (SCARG(uap, pid) == 0)
439 		SCARG(uap, pid) = -q->p_pgid;
440 	if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED))
441 		return (EINVAL);
442 
443 loop:
444 	nfound = 0;
445 	LIST_FOREACH(p, &q->p_children, p_sibling) {
446 		if ((p->p_flag & P_NOZOMBIE) ||
447 		    (SCARG(uap, pid) != WAIT_ANY &&
448 		    p->p_pid != SCARG(uap, pid) &&
449 		    p->p_pgid != -SCARG(uap, pid)))
450 			continue;
451 
452 		/*
453 		 * Wait for processes with p_exitsig != SIGCHLD processes only
454 		 * if WALTSIG is set; wait for processes with pexitsig ==
455 		 * SIGCHLD only if WALTSIG is clear.
456 		 */
457 		if ((SCARG(uap, options) & WALTSIG) ?
458 		    (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD))
459 			continue;
460 
461 		nfound++;
462 		if (p->p_stat == SZOMB) {
463 			retval[0] = p->p_pid;
464 
465 			if (SCARG(uap, status)) {
466 				status = p->p_xstat;	/* convert to int */
467 				error = copyout(&status,
468 				    SCARG(uap, status), sizeof(status));
469 				if (error)
470 					return (error);
471 			}
472 			if (SCARG(uap, rusage) &&
473 			    (error = copyout(p->p_ru,
474 			    SCARG(uap, rusage), sizeof(struct rusage))))
475 				return (error);
476 			proc_finish_wait(q, p);
477 			return (0);
478 		}
479 		if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
480 		    (p->p_flag & P_TRACED || SCARG(uap, options) & WUNTRACED)) {
481 			atomic_setbits_int(&p->p_flag, P_WAITED);
482 			retval[0] = p->p_pid;
483 
484 			if (SCARG(uap, status)) {
485 				status = W_STOPCODE(p->p_xstat);
486 				error = copyout(&status, SCARG(uap, status),
487 				    sizeof(status));
488 			} else
489 				error = 0;
490 			return (error);
491 		}
492 		if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
493 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
494 			retval[0] = p->p_pid;
495 
496 			if (SCARG(uap, status)) {
497 				status = _WCONTINUED;
498 				error = copyout(&status, SCARG(uap, status),
499 				    sizeof(status));
500 			} else
501 				error = 0;
502 			return (error);
503 		}
504 	}
505 	if (nfound == 0)
506 		return (ECHILD);
507 	if (SCARG(uap, options) & WNOHANG) {
508 		retval[0] = 0;
509 		return (0);
510 	}
511 	if ((error = tsleep(q, PWAIT | PCATCH, "wait", 0)) != 0)
512 		return (error);
513 	goto loop;
514 }
515 
516 void
517 proc_finish_wait(struct proc *waiter, struct proc *p)
518 {
519 	struct proc *t;
520 
521 	/*
522 	 * If we got the child via a ptrace 'attach',
523 	 * we need to give it back to the old parent.
524 	 */
525 	if (p->p_oppid && (t = pfind(p->p_oppid))) {
526 		atomic_clearbits_int(&p->p_flag, P_TRACED);
527 		p->p_oppid = 0;
528 		proc_reparent(p, t);
529 		if (p->p_exitsig != 0)
530 			psignal(t, p->p_exitsig);
531 		wakeup(t);
532 	} else {
533 		scheduler_wait_hook(waiter, p);
534 		p->p_xstat = 0;
535 		ruadd(&waiter->p_stats->p_cru, p->p_ru);
536 		proc_zap(p);
537 	}
538 }
539 
540 /*
541  * make process 'parent' the new parent of process 'child'.
542  */
543 void
544 proc_reparent(struct proc *child, struct proc *parent)
545 {
546 
547 	if (child->p_pptr == parent)
548 		return;
549 
550 	if (parent == initproc)
551 		child->p_exitsig = SIGCHLD;
552 
553 	LIST_REMOVE(child, p_sibling);
554 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
555 	child->p_pptr = parent;
556 }
557 
558 void
559 proc_zap(struct proc *p)
560 {
561 	pool_put(&rusage_pool, p->p_ru);
562 	if (p->p_ptstat)
563 		free(p->p_ptstat, M_SUBPROC);
564 
565 	/*
566 	 * Finally finished with old proc entry.
567 	 * Unlink it from its process group and free it.
568 	 */
569 	leavepgrp(p);
570 	rw_enter_write(&allproclk);
571 	LIST_REMOVE(p, p_list);	/* off zombproc */
572 	rw_exit_write(&allproclk);
573 	LIST_REMOVE(p, p_sibling);
574 
575 	/*
576 	 * Decrement the count of procs running with this uid.
577 	 */
578 	(void)chgproccnt(p->p_cred->p_ruid, -1);
579 
580 	/*
581 	 * Release reference to text vnode
582 	 */
583 	if (p->p_textvp)
584 		vrele(p->p_textvp);
585 
586 	/*
587 	 * Remove us from our process list, possibly killing the process
588 	 * in the process (pun intended).
589 	 */
590 	if (--p->p_p->ps_refcnt == 0) {
591 		KASSERT(TAILQ_EMPTY(&p->p_p->ps_threads));
592 		limfree(p->p_p->ps_limit);
593 		if (--p->p_p->ps_cred->p_refcnt == 0) {
594 			crfree(p->p_p->ps_cred->pc_ucred);
595 			pool_put(&pcred_pool, p->p_p->ps_cred);
596 		}
597 		pool_put(&process_pool, p->p_p);
598 	}
599 
600 	pool_put(&proc_pool, p);
601 	nprocs--;
602 }
603 
604