xref: /openbsd-src/sys/kern/kern_exit.c (revision a2e04e14c7bba4f83f5db2cc788417da45b260c4)
1 /*	$OpenBSD: kern_exit.c,v 1.105 2011/12/14 07:32:16 guenther Exp $	*/
2 /*	$NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/proc.h>
44 #include <sys/tty.h>
45 #include <sys/time.h>
46 #include <sys/resource.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/wait.h>
50 #include <sys/file.h>
51 #include <sys/vnode.h>
52 #include <sys/syslog.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>
57 #include <sys/filedesc.h>
58 #include <sys/signalvar.h>
59 #include <sys/sched.h>
60 #include <sys/ktrace.h>
61 #include <sys/pool.h>
62 #include <sys/mutex.h>
63 #ifdef SYSVSEM
64 #include <sys/sem.h>
65 #endif
66 
67 #include "systrace.h"
68 #include <dev/systrace.h>
69 
70 #include <sys/mount.h>
71 #include <sys/syscallargs.h>
72 
73 #include <machine/cpu.h>
74 
75 #include <uvm/uvm_extern.h>
76 
77 /*
78  * exit --
79  *	Death of process.
80  */
81 int
82 sys_exit(struct proc *p, void *v, register_t *retval)
83 {
84 	struct sys_exit_args /* {
85 		syscallarg(int) rval;
86 	} */ *uap = v;
87 
88 	exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL);
89 	/* NOTREACHED */
90 	return (0);
91 }
92 
93 int
94 sys_threxit(struct proc *p, void *v, register_t *retval)
95 {
96 	struct sys_threxit_args /* {
97 		syscallarg(pid_t *) notdead;
98 	} */ *uap = v;
99 
100 	if (!rthreads_enabled)
101 		return (EINVAL);
102 
103 	if (SCARG(uap, notdead) != NULL) {
104 		pid_t zero = 0;
105 		if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) {
106 			psignal(p, SIGSEGV);
107 		}
108 	}
109 	exit1(p, 0, EXIT_THREAD);
110 
111 	return (0);
112 }
113 
114 /*
115  * Exit: deallocate address space and other resources, change proc state
116  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
117  * status and rusage for wait().  Check for child processes and orphan them.
118  */
119 void
120 exit1(struct proc *p, int rv, int flags)
121 {
122 	struct process *pr, *qr, *nqr;
123 
124 	if (p->p_pid == 1)
125 		panic("init died (signal %d, exit %d)",
126 		    WTERMSIG(rv), WEXITSTATUS(rv));
127 
128 	atomic_setbits_int(&p->p_flag, P_WEXIT);
129 
130 	pr = p->p_p;
131 
132 	/* single-threaded? */
133 	if (TAILQ_FIRST(&pr->ps_threads) == p &&
134 	    TAILQ_NEXT(p, p_thr_link) == NULL)
135 		flags = EXIT_NORMAL;
136 	else {
137 		/* nope, multi-threaded */
138 		if (flags == EXIT_NORMAL)
139 			single_thread_set(p, SINGLE_EXIT, 0);
140 	}
141 
142 	if (flags == EXIT_NORMAL) {
143 		pr->ps_mainproc->p_xstat = rv;
144 
145 		/*
146 		 * If parent is waiting for us to exit or exec, PS_PPWAIT
147 		 * is set; we wake up the parent early to avoid deadlock.
148 		 */
149 		if (pr->ps_flags & PS_PPWAIT) {
150 			atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
151 			atomic_clearbits_int(&pr->ps_pptr->ps_flags,
152 			    PS_ISPWAIT);
153 			wakeup(pr->ps_pptr);
154 		}
155 	}
156 
157 	/* unlink ourselves from the active threads */
158 	TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
159 	if ((p->p_flag & P_THREAD) == 0) {
160 		/* main thread gotta wait because it has the pid, et al */
161 		while (! TAILQ_EMPTY(&pr->ps_threads))
162 			tsleep(&pr->ps_threads, PUSER, "thrdeath", 0);
163 	} else if (TAILQ_EMPTY(&pr->ps_threads))
164 		wakeup(&pr->ps_threads);
165 
166 	if (p->p_flag & P_PROFIL)
167 		stopprofclock(p);
168 	p->p_ru = pool_get(&rusage_pool, PR_WAITOK);
169 	p->p_siglist = 0;
170 	timeout_del(&p->p_realit_to);
171 	timeout_del(&p->p_stats->p_virt_to);
172 	timeout_del(&p->p_stats->p_prof_to);
173 
174 	/*
175 	 * Close open files and release open-file table.
176 	 */
177 	fdfree(p);
178 
179 	if ((p->p_flag & P_THREAD) == 0) {
180 #ifdef SYSVSEM
181 		semexit(pr);
182 #endif
183 		if (SESS_LEADER(pr)) {
184 			struct session *sp = pr->ps_session;
185 
186 			if (sp->s_ttyvp) {
187 				/*
188 				 * Controlling process.
189 				 * Signal foreground pgrp,
190 				 * drain controlling terminal
191 				 * and revoke access to controlling terminal.
192 				 */
193 				if (sp->s_ttyp->t_session == sp) {
194 					if (sp->s_ttyp->t_pgrp)
195 						pgsignal(sp->s_ttyp->t_pgrp,
196 						    SIGHUP, 1);
197 					(void) ttywait(sp->s_ttyp);
198 					/*
199 					 * The tty could have been revoked
200 					 * if we blocked.
201 					 */
202 					if (sp->s_ttyvp)
203 						VOP_REVOKE(sp->s_ttyvp,
204 						    REVOKEALL);
205 				}
206 				if (sp->s_ttyvp)
207 					vrele(sp->s_ttyvp);
208 				sp->s_ttyvp = NULL;
209 				/*
210 				 * s_ttyp is not zero'd; we use this to
211 				 * indicate that the session once had a
212 				 * controlling terminal.  (for logging and
213 				 * informational purposes)
214 				 */
215 			}
216 			sp->s_leader = NULL;
217 		}
218 		fixjobc(pr, pr->ps_pgrp, 0);
219 
220 #ifdef ACCOUNTING
221 		(void)acct_process(p);
222 #endif
223 
224 #ifdef KTRACE
225 		/* release trace file */
226 		if (pr->ps_tracevp)
227 			ktrcleartrace(pr);
228 #endif
229 	}
230 
231 #if NSYSTRACE > 0
232 	if (ISSET(p->p_flag, P_SYSTRACE))
233 		systrace_exit(p);
234 #endif
235         /*
236          * Remove proc from pidhash chain so looking it up won't
237          * work.  Move it from allproc to zombproc, but do not yet
238          * wake up the reaper.  We will put the proc on the
239          * deadproc list later (using the p_hash member), and
240          * wake up the reaper when we do.
241          */
242 	/*
243 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
244 	 */
245 	p->p_stat = SDEAD;
246 
247 	LIST_REMOVE(p, p_hash);
248 	LIST_REMOVE(p, p_list);
249 	LIST_INSERT_HEAD(&zombproc, p, p_list);
250 
251 	/*
252 	 * Give orphaned children to init(8).
253 	 */
254 	if ((p->p_flag & P_THREAD) == 0) {
255 		qr = LIST_FIRST(&pr->ps_children);
256 		if (qr)		/* only need this if any child is S_ZOMB */
257 			wakeup(initproc->p_p);
258 		for (; qr != 0; qr = nqr) {
259 			nqr = LIST_NEXT(qr, ps_sibling);
260 			proc_reparent(qr, initproc->p_p);
261 			/*
262 			 * Traced processes are killed
263 			 * since their existence means someone is screwing up.
264 			 */
265 			if (qr->ps_mainproc->p_flag & P_TRACED) {
266 				atomic_clearbits_int(&qr->ps_mainproc->p_flag,
267 				    P_TRACED);
268 				prsignal(qr, SIGKILL);
269 			}
270 		}
271 	}
272 
273 
274 	/*
275 	 * Save exit status and final rusage info, adding in child rusage
276 	 * info and self times.
277 	 */
278 	*p->p_ru = p->p_stats->p_ru;
279 	calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
280 	ruadd(p->p_ru, &p->p_stats->p_cru);
281 
282 	/*
283 	 * clear %cpu usage during swap
284 	 */
285 	p->p_pctcpu = 0;
286 
287 	if ((p->p_flag & P_THREAD) == 0) {
288 		/* notify interested parties of our demise and clean up */
289 		knote_processexit(pr);
290 
291 		/*
292 		 * Notify parent that we're gone.  If we have P_NOZOMBIE
293 		 * or parent has the SAS_NOCLDWAIT flag set, notify process 1
294 		 * instead (and hope it will handle this situation).
295 		 */
296 		if ((p->p_flag & P_NOZOMBIE) ||
297 		    (pr->ps_pptr->ps_mainproc->p_sigacts->ps_flags &
298 		    SAS_NOCLDWAIT)) {
299 			struct process *ppr = pr->ps_pptr;
300 			proc_reparent(pr, initproc->p_p);
301 			/*
302 			 * If this was the last child of our parent, notify
303 			 * parent, so in case he was wait(2)ing, he will
304 			 * continue.
305 			 */
306 			if (LIST_EMPTY(&ppr->ps_children))
307 				wakeup(ppr);
308 		}
309 	}
310 
311 	/*
312 	 * Release the process's signal state.
313 	 */
314 	sigactsfree(p);
315 
316 	/*
317 	 * Other substructures are freed from reaper and wait().
318 	 */
319 
320 	/*
321 	 * If emulation has process exit hook, call it now.
322 	 */
323 	if (p->p_emul->e_proc_exit)
324 		(*p->p_emul->e_proc_exit)(p);
325 
326 	/*
327 	 * Finally, call machine-dependent code to switch to a new
328 	 * context (possibly the idle context).  Once we are no longer
329 	 * using the dead process's vmspace and stack, exit2() will be
330 	 * called to schedule those resources to be released by the
331 	 * reaper thread.
332 	 *
333 	 * Note that cpu_exit() will end with a call equivalent to
334 	 * cpu_switch(), finishing our execution (pun intended).
335 	 */
336 	uvmexp.swtch++;
337 	cpu_exit(p);
338 	panic("cpu_exit returned");
339 }
340 
341 /*
342  * Locking of this proclist is special; it's accessed in a
343  * critical section of process exit, and thus locking it can't
344  * modify interrupt state.  We use a simple spin lock for this
345  * proclist.  Processes on this proclist are also on zombproc;
346  * we use the p_hash member to linkup to deadproc.
347  */
348 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE);
349 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
350 
351 /*
352  * We are called from cpu_exit() once it is safe to schedule the
353  * dead process's resources to be freed.
354  *
355  * NOTE: One must be careful with locking in this routine.  It's
356  * called from a critical section in machine-dependent code, so
357  * we should refrain from changing any interrupt state.
358  *
359  * We lock the deadproc list, place the proc on that list (using
360  * the p_hash member), and wake up the reaper.
361  */
362 void
363 exit2(struct proc *p)
364 {
365 	mtx_enter(&deadproc_mutex);
366 	LIST_INSERT_HEAD(&deadproc, p, p_hash);
367 	mtx_leave(&deadproc_mutex);
368 
369 	wakeup(&deadproc);
370 }
371 
372 /*
373  * Process reaper.  This is run by a kernel thread to free the resources
374  * of a dead process.  Once the resources are free, the process becomes
375  * a zombie, and the parent is allowed to read the undead's status.
376  */
377 void
378 reaper(void)
379 {
380 	struct proc *p;
381 
382 	KERNEL_UNLOCK();
383 
384 	SCHED_ASSERT_UNLOCKED();
385 
386 	for (;;) {
387 		mtx_enter(&deadproc_mutex);
388 		while ((p = LIST_FIRST(&deadproc)) == NULL)
389 			msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0);
390 
391 		/* Remove us from the deadproc list. */
392 		LIST_REMOVE(p, p_hash);
393 		mtx_leave(&deadproc_mutex);
394 
395 		KERNEL_LOCK();
396 
397 		/*
398 		 * Free the VM resources we're still holding on to.
399 		 * We must do this from a valid thread because doing
400 		 * so may block.
401 		 */
402 		uvm_exit(p);
403 
404 		/* Process is now a true zombie. */
405 		if ((p->p_flag & P_NOZOMBIE) == 0) {
406 			p->p_stat = SZOMB;
407 
408 			if (P_EXITSIG(p) != 0)
409 				prsignal(p->p_p->ps_pptr, P_EXITSIG(p));
410 			/* Wake up the parent so it can get exit status. */
411 			wakeup(p->p_p->ps_pptr);
412 		} else {
413 			/* Noone will wait for us. Just zap the process now */
414 			proc_zap(p);
415 		}
416 
417 		KERNEL_UNLOCK();
418 	}
419 }
420 
421 int
422 sys_wait4(struct proc *q, void *v, register_t *retval)
423 {
424 	struct sys_wait4_args /* {
425 		syscallarg(pid_t) pid;
426 		syscallarg(int *) status;
427 		syscallarg(int) options;
428 		syscallarg(struct rusage *) rusage;
429 	} */ *uap = v;
430 	int nfound;
431 	struct process *pr;
432 	struct proc *p;
433 	int status, error;
434 
435 	if (SCARG(uap, pid) == 0)
436 		SCARG(uap, pid) = -q->p_p->ps_pgid;
437 	if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED))
438 		return (EINVAL);
439 
440 loop:
441 	nfound = 0;
442 	LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) {
443 		p = pr->ps_mainproc;
444 		if ((p->p_flag & P_NOZOMBIE) ||
445 		    (SCARG(uap, pid) != WAIT_ANY &&
446 		    p->p_pid != SCARG(uap, pid) &&
447 		    pr->ps_pgid != -SCARG(uap, pid)))
448 			continue;
449 
450 		/*
451 		 * Wait for processes with p_exitsig != SIGCHLD processes only
452 		 * if WALTSIG is set; wait for processes with pexitsig ==
453 		 * SIGCHLD only if WALTSIG is clear.
454 		 */
455 		if ((SCARG(uap, options) & WALTSIG) ?
456 		    (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD))
457 			continue;
458 
459 		nfound++;
460 		if (p->p_stat == SZOMB) {
461 			retval[0] = p->p_pid;
462 
463 			if (SCARG(uap, status)) {
464 				status = p->p_xstat;	/* convert to int */
465 				error = copyout(&status,
466 				    SCARG(uap, status), sizeof(status));
467 				if (error)
468 					return (error);
469 			}
470 			if (SCARG(uap, rusage) &&
471 			    (error = copyout(p->p_ru,
472 			    SCARG(uap, rusage), sizeof(struct rusage))))
473 				return (error);
474 			proc_finish_wait(q, p);
475 			return (0);
476 		}
477 		if (p->p_stat == SSTOP &&
478 		    (p->p_flag & (P_WAITED|P_SUSPSINGLE)) == 0 &&
479 		    (p->p_flag & P_TRACED || SCARG(uap, options) & WUNTRACED)) {
480 			atomic_setbits_int(&p->p_flag, P_WAITED);
481 			retval[0] = p->p_pid;
482 
483 			if (SCARG(uap, status)) {
484 				status = W_STOPCODE(p->p_xstat);
485 				error = copyout(&status, SCARG(uap, status),
486 				    sizeof(status));
487 			} else
488 				error = 0;
489 			return (error);
490 		}
491 		if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) {
492 			atomic_clearbits_int(&p->p_flag, P_CONTINUED);
493 			retval[0] = p->p_pid;
494 
495 			if (SCARG(uap, status)) {
496 				status = _WCONTINUED;
497 				error = copyout(&status, SCARG(uap, status),
498 				    sizeof(status));
499 			} else
500 				error = 0;
501 			return (error);
502 		}
503 	}
504 	if (nfound == 0)
505 		return (ECHILD);
506 	if (SCARG(uap, options) & WNOHANG) {
507 		retval[0] = 0;
508 		return (0);
509 	}
510 	if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0)
511 		return (error);
512 	goto loop;
513 }
514 
515 void
516 proc_finish_wait(struct proc *waiter, struct proc *p)
517 {
518 	struct process *tr;
519 
520 	/*
521 	 * If we got the child via a ptrace 'attach',
522 	 * we need to give it back to the old parent.
523 	 */
524 	if (p->p_oppid && (tr = prfind(p->p_oppid))) {
525 		atomic_clearbits_int(&p->p_flag, P_TRACED);
526 		p->p_oppid = 0;
527 		proc_reparent(p->p_p, tr);
528 		if (p->p_exitsig != 0)
529 			prsignal(tr, p->p_exitsig);
530 		wakeup(tr);
531 	} else {
532 		scheduler_wait_hook(waiter, p);
533 		p->p_xstat = 0;
534 		ruadd(&waiter->p_stats->p_cru, p->p_ru);
535 		proc_zap(p);
536 	}
537 }
538 
539 /*
540  * make process 'parent' the new parent of process 'child'.
541  */
542 void
543 proc_reparent(struct process *child, struct process *parent)
544 {
545 
546 	if (child->ps_pptr == parent)
547 		return;
548 
549 	if (parent == initproc->p_p)
550 		child->ps_mainproc->p_exitsig = SIGCHLD;
551 
552 	LIST_REMOVE(child, ps_sibling);
553 	LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling);
554 	child->ps_pptr = parent;
555 }
556 
557 void
558 proc_zap(struct proc *p)
559 {
560 	struct process *pr = p->p_p;
561 
562 	pool_put(&rusage_pool, p->p_ru);
563 	if (p->p_ptstat)
564 		free(p->p_ptstat, M_SUBPROC);
565 
566 	/*
567 	 * Finally finished with old proc entry.
568 	 * Unlink it from its process group and free it.
569 	 */
570 	if ((p->p_flag & P_THREAD) == 0)
571 		leavepgrp(pr);
572 	LIST_REMOVE(p, p_list);	/* off zombproc */
573 	if ((p->p_flag & P_THREAD) == 0)
574 		LIST_REMOVE(pr, ps_sibling);
575 
576 	/*
577 	 * Decrement the count of procs running with this uid.
578 	 */
579 	(void)chgproccnt(p->p_cred->p_ruid, -1);
580 
581 	/*
582 	 * Release reference to text vnode
583 	 */
584 	if (p->p_textvp)
585 		vrele(p->p_textvp);
586 
587 	/*
588 	 * Remove us from our process list, possibly killing the process
589 	 * in the process (pun intended).
590 	 */
591 	if (--pr->ps_refcnt == 0) {
592 		KASSERT(TAILQ_EMPTY(&pr->ps_threads));
593 		limfree(pr->ps_limit);
594 		crfree(pr->ps_cred->pc_ucred);
595 		pool_put(&pcred_pool, pr->ps_cred);
596 		pool_put(&process_pool, pr);
597 	}
598 
599 	pool_put(&proc_pool, p);
600 	nprocs--;
601 }
602