1 /* $OpenBSD: kern_exit.c,v 1.64 2007/04/03 08:05:43 art Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/wait.h> 50 #include <sys/file.h> 51 #include <sys/vnode.h> 52 #include <sys/syslog.h> 53 #include <sys/malloc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> 57 #include <sys/filedesc.h> 58 #include <sys/signalvar.h> 59 #include <sys/sched.h> 60 #include <sys/ktrace.h> 61 #include <sys/pool.h> 62 #include <sys/mutex.h> 63 #ifdef SYSVSHM 64 #include <sys/shm.h> 65 #endif 66 #ifdef SYSVSEM 67 #include <sys/sem.h> 68 #endif 69 70 #include "systrace.h" 71 #include <dev/systrace.h> 72 73 #include <sys/mount.h> 74 #include <sys/syscallargs.h> 75 76 #include <machine/cpu.h> 77 78 #include <uvm/uvm_extern.h> 79 80 /* 81 * exit -- 82 * Death of process. 83 */ 84 int 85 sys_exit(struct proc *p, void *v, register_t *retval) 86 { 87 struct sys_exit_args /* { 88 syscallarg(int) rval; 89 } */ *uap = v; 90 91 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL); 92 /* NOTREACHED */ 93 return (0); 94 } 95 96 #ifdef RTHREADS 97 int 98 sys_threxit(struct proc *p, void *v, register_t *retval) 99 { 100 struct sys_threxit_args *uap = v; 101 102 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_THREAD); 103 104 return (0); 105 } 106 #endif 107 108 /* 109 * Exit: deallocate address space and other resources, change proc state 110 * to zombie, and unlink proc from allproc and parent's lists. Save exit 111 * status and rusage for wait(). Check for child processes and orphan them. 112 */ 113 void 114 exit1(struct proc *p, int rv, int flags) 115 { 116 struct proc *q, *nq; 117 118 if (p->p_pid == 1) 119 panic("init died (signal %d, exit %d)", 120 WTERMSIG(rv), WEXITSTATUS(rv)); 121 122 #ifdef RTHREADS 123 /* 124 * if one thread calls exit, we take down everybody. 125 * we have to be careful not to get recursively caught. 126 * this is kinda sick. 127 */ 128 if (flags == EXIT_NORMAL && p->p_p->ps_mainproc != p && 129 (p->p_p->ps_mainproc->p_flag & P_WEXIT) == 0) { 130 /* 131 * we are one of the threads. we SIGKILL the parent, 132 * it will wake us up again, then we proceed. 133 */ 134 atomic_setbits_int(&p->p_thrparent->p_flag, P_IGNEXITRV); 135 p->p_p->ps_mainproc->p_xstat = rv; 136 psignal(p->p_p->ps_mainproc, SIGKILL); 137 tsleep(p->p_p, PUSER, "thrdying", 0); 138 } else if (p == p->p_p->ps_mainproc) { 139 atomic_setbits_int(&p->p_flag, P_WEXIT); 140 if (flags == EXIT_NORMAL) { 141 q = TAILQ_FIRST(&p->p_p->ps_threads); 142 for (; q != NULL; q = nq) { 143 nq = TAILQ_NEXT(q, p_thr_link); 144 145 /* 146 * Don't shoot ourselves again. 147 */ 148 if (q == p) 149 continue; 150 atomic_setbits_int(&q->p_flag, P_IGNEXITRV); 151 q->p_xstat = rv; 152 psignal(q, SIGKILL); 153 } 154 } 155 wakeup(p->p_p); 156 while (!TAILQ_EMPTY(&p->p_p->ps_threads)) 157 tsleep(&p->p_p->ps_threads, PUSER, "thrdeath", 0); 158 } 159 #endif 160 161 if (p->p_flag & P_PROFIL) 162 stopprofclock(p); 163 p->p_ru = pool_get(&rusage_pool, PR_WAITOK); 164 /* 165 * If parent is waiting for us to exit or exec, P_PPWAIT is set; we 166 * wake up the parent early to avoid deadlock. 167 */ 168 atomic_setbits_int(&p->p_flag, P_WEXIT); 169 atomic_clearbits_int(&p->p_flag, P_TRACED); 170 if (p->p_flag & P_PPWAIT) { 171 atomic_clearbits_int(&p->p_flag, P_PPWAIT); 172 wakeup(p->p_pptr); 173 } 174 p->p_sigignore = ~0; 175 p->p_siglist = 0; 176 timeout_del(&p->p_realit_to); 177 timeout_del(&p->p_stats->p_virt_to); 178 timeout_del(&p->p_stats->p_prof_to); 179 180 /* 181 * Close open files and release open-file table. 182 * This may block! 183 */ 184 fdfree(p); 185 186 #ifdef SYSVSEM 187 semexit(p); 188 #endif 189 if (SESS_LEADER(p)) { 190 struct session *sp = p->p_session; 191 192 if (sp->s_ttyvp) { 193 /* 194 * Controlling process. 195 * Signal foreground pgrp, 196 * drain controlling terminal 197 * and revoke access to controlling terminal. 198 */ 199 if (sp->s_ttyp->t_session == sp) { 200 if (sp->s_ttyp->t_pgrp) 201 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 202 (void) ttywait(sp->s_ttyp); 203 /* 204 * The tty could have been revoked 205 * if we blocked. 206 */ 207 if (sp->s_ttyvp) 208 VOP_REVOKE(sp->s_ttyvp, REVOKEALL); 209 } 210 if (sp->s_ttyvp) 211 vrele(sp->s_ttyvp); 212 sp->s_ttyvp = NULL; 213 /* 214 * s_ttyp is not zero'd; we use this to indicate 215 * that the session once had a controlling terminal. 216 * (for logging and informational purposes) 217 */ 218 } 219 sp->s_leader = NULL; 220 } 221 fixjobc(p, p->p_pgrp, 0); 222 #ifdef ACCOUNTING 223 (void)acct_process(p); 224 #endif 225 #ifdef KTRACE 226 /* 227 * release trace file 228 */ 229 p->p_traceflag = 0; /* don't trace the vrele() */ 230 if (p->p_tracep) 231 ktrsettracevnode(p, NULL); 232 #endif 233 #if NSYSTRACE > 0 234 if (ISSET(p->p_flag, P_SYSTRACE)) 235 systrace_exit(p); 236 #endif 237 /* 238 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 239 */ 240 p->p_stat = SDEAD; 241 242 /* 243 * Remove proc from pidhash chain so looking it up won't 244 * work. Move it from allproc to zombproc, but do not yet 245 * wake up the reaper. We will put the proc on the 246 * deadproc list later (using the p_hash member), and 247 * wake up the reaper when we do. 248 */ 249 LIST_REMOVE(p, p_hash); 250 LIST_REMOVE(p, p_list); 251 LIST_INSERT_HEAD(&zombproc, p, p_list); 252 253 /* 254 * Give orphaned children to init(8). 255 */ 256 q = LIST_FIRST(&p->p_children); 257 if (q) /* only need this if any child is S_ZOMB */ 258 wakeup(initproc); 259 for (; q != 0; q = nq) { 260 nq = LIST_NEXT(q, p_sibling); 261 proc_reparent(q, initproc); 262 /* 263 * Traced processes are killed 264 * since their existence means someone is screwing up. 265 */ 266 if (q->p_flag & P_TRACED) { 267 atomic_clearbits_int(&q->p_flag, P_TRACED); 268 psignal(q, SIGKILL); 269 } 270 } 271 272 /* unlink oursleves from the active threads */ 273 TAILQ_REMOVE(&p->p_p->ps_threads, p, p_thr_link); 274 #ifdef RTHREADS 275 if (TAILQ_EMPTY(&p->p_p->ps_threads)) 276 wakeup(&p->p_p->ps_threads); 277 #endif 278 279 /* 280 * Save exit status and final rusage info, adding in child rusage 281 * info and self times. 282 */ 283 if (!(p->p_flag & P_IGNEXITRV)) 284 p->p_xstat = rv; 285 *p->p_ru = p->p_stats->p_ru; 286 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 287 ruadd(p->p_ru, &p->p_stats->p_cru); 288 289 /* 290 * clear %cpu usage during swap 291 */ 292 p->p_pctcpu = 0; 293 294 /* 295 * notify interested parties of our demise. 296 */ 297 KNOTE(&p->p_klist, NOTE_EXIT); 298 299 /* 300 * Notify parent that we're gone. If we have P_NOZOMBIE or parent has 301 * the P_NOCLDWAIT flag set, notify process 1 instead (and hope it 302 * will handle this situation). 303 */ 304 if ((p->p_flag & P_NOZOMBIE) || (p->p_pptr->p_flag & P_NOCLDWAIT)) { 305 struct proc *pp = p->p_pptr; 306 proc_reparent(p, initproc); 307 /* 308 * If this was the last child of our parent, notify 309 * parent, so in case he was wait(2)ing, he will 310 * continue. 311 */ 312 if (LIST_EMPTY(&pp->p_children)) 313 wakeup(pp); 314 } 315 316 if (p->p_exitsig != 0) 317 psignal(p->p_pptr, P_EXITSIG(p)); 318 wakeup(p->p_pptr); 319 320 /* 321 * Release the process's signal state. 322 */ 323 sigactsfree(p); 324 325 /* 326 * Clear curproc after we've done all operations 327 * that could block, and before tearing down the rest 328 * of the process state that might be used from clock, etc. 329 * Also, can't clear curproc while we're still runnable, 330 * as we're not on a run queue (we are current, just not 331 * a proper proc any longer!). 332 * 333 * Other substructures are freed from wait(). 334 */ 335 curproc = NULL; 336 limfree(p->p_limit); 337 p->p_limit = NULL; 338 339 /* 340 * If emulation has process exit hook, call it now. 341 */ 342 if (p->p_emul->e_proc_exit) 343 (*p->p_emul->e_proc_exit)(p); 344 345 /* This process no longer needs to hold the kernel lock. */ 346 KERNEL_PROC_UNLOCK(p); 347 348 /* 349 * Finally, call machine-dependent code to switch to a new 350 * context (possibly the idle context). Once we are no longer 351 * using the dead process's vmspace and stack, exit2() will be 352 * called to schedule those resources to be released by the 353 * reaper thread. 354 * 355 * Note that cpu_exit() will end with a call equivalent to 356 * cpu_switch(), finishing our execution (pun intended). 357 */ 358 uvmexp.swtch++; 359 cpu_exit(p); 360 } 361 362 /* 363 * Locking of this proclist is special; it's accessed in a 364 * critical section of process exit, and thus locking it can't 365 * modify interrupt state. We use a simple spin lock for this 366 * proclist. Processes on this proclist are also on zombproc; 367 * we use the p_hash member to linkup to deadproc. 368 */ 369 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE); 370 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 371 372 /* 373 * We are called from cpu_exit() once it is safe to schedule the 374 * dead process's resources to be freed. 375 * 376 * NOTE: One must be careful with locking in this routine. It's 377 * called from a critical section in machine-dependent code, so 378 * we should refrain from changing any interrupt state. 379 * 380 * We lock the deadproc list, place the proc on that list (using 381 * the p_hash member), and wake up the reaper. 382 */ 383 void 384 exit2(struct proc *p) 385 { 386 int s; 387 388 mtx_enter(&deadproc_mutex); 389 LIST_INSERT_HEAD(&deadproc, p, p_hash); 390 mtx_leave(&deadproc_mutex); 391 392 wakeup(&deadproc); 393 394 SCHED_LOCK(s); 395 } 396 397 /* 398 * Process reaper. This is run by a kernel thread to free the resources 399 * of a dead process. Once the resources are free, the process becomes 400 * a zombie, and the parent is allowed to read the undead's status. 401 */ 402 void 403 reaper(void) 404 { 405 struct proc *p; 406 407 KERNEL_PROC_UNLOCK(curproc); 408 409 for (;;) { 410 mtx_enter(&deadproc_mutex); 411 p = LIST_FIRST(&deadproc); 412 if (p == NULL) { 413 /* No work for us; go to sleep until someone exits. */ 414 mtx_leave(&deadproc_mutex); 415 (void) tsleep(&deadproc, PVM, "reaper", 0); 416 continue; 417 } 418 419 /* Remove us from the deadproc list. */ 420 LIST_REMOVE(p, p_hash); 421 mtx_leave(&deadproc_mutex); 422 KERNEL_PROC_LOCK(curproc); 423 424 /* 425 * Give machine-dependent code a chance to free any 426 * resources it couldn't free while still running on 427 * that process's context. This must be done before 428 * uvm_exit(), in case these resources are in the PCB. 429 */ 430 cpu_wait(p); 431 432 /* 433 * Free the VM resources we're still holding on to. 434 * We must do this from a valid thread because doing 435 * so may block. 436 */ 437 uvm_exit(p); 438 439 /* Process is now a true zombie. */ 440 if ((p->p_flag & P_NOZOMBIE) == 0) { 441 p->p_stat = SZOMB; 442 443 /* Wake up the parent so it can get exit status. */ 444 psignal(p->p_pptr, SIGCHLD); 445 wakeup(p->p_pptr); 446 } else { 447 /* Noone will wait for us. Just zap the process now */ 448 proc_zap(p); 449 } 450 451 KERNEL_PROC_UNLOCK(curproc); 452 } 453 } 454 455 pid_t 456 sys_wait4(struct proc *q, void *v, register_t *retval) 457 { 458 struct sys_wait4_args /* { 459 syscallarg(pid_t) pid; 460 syscallarg(int *) status; 461 syscallarg(int) options; 462 syscallarg(struct rusage *) rusage; 463 } */ *uap = v; 464 int nfound; 465 struct proc *p, *t; 466 int status, error; 467 468 if (SCARG(uap, pid) == 0) 469 SCARG(uap, pid) = -q->p_pgid; 470 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED)) 471 return (EINVAL); 472 473 loop: 474 nfound = 0; 475 LIST_FOREACH(p, &q->p_children, p_sibling) { 476 if ((p->p_flag & P_NOZOMBIE) || 477 (SCARG(uap, pid) != WAIT_ANY && 478 p->p_pid != SCARG(uap, pid) && 479 p->p_pgid != -SCARG(uap, pid))) 480 continue; 481 482 /* 483 * Wait for processes with p_exitsig != SIGCHLD processes only 484 * if WALTSIG is set; wait for processes with pexitsig == 485 * SIGCHLD only if WALTSIG is clear. 486 */ 487 if ((SCARG(uap, options) & WALTSIG) ? 488 (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD)) 489 continue; 490 491 nfound++; 492 if (p->p_stat == SZOMB) { 493 retval[0] = p->p_pid; 494 495 if (SCARG(uap, status)) { 496 status = p->p_xstat; /* convert to int */ 497 error = copyout(&status, 498 SCARG(uap, status), sizeof(status)); 499 if (error) 500 return (error); 501 } 502 if (SCARG(uap, rusage) && 503 (error = copyout(p->p_ru, 504 SCARG(uap, rusage), sizeof(struct rusage)))) 505 return (error); 506 507 /* 508 * If we got the child via a ptrace 'attach', 509 * we need to give it back to the old parent. 510 */ 511 if (p->p_oppid && (t = pfind(p->p_oppid))) { 512 p->p_oppid = 0; 513 proc_reparent(p, t); 514 if (p->p_exitsig != 0) 515 psignal(t, P_EXITSIG(p)); 516 wakeup(t); 517 return (0); 518 } 519 520 scheduler_wait_hook(q, p); 521 p->p_xstat = 0; 522 ruadd(&q->p_stats->p_cru, p->p_ru); 523 524 proc_zap(p); 525 526 return (0); 527 } 528 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 529 (p->p_flag & P_TRACED || SCARG(uap, options) & WUNTRACED)) { 530 atomic_setbits_int(&p->p_flag, P_WAITED); 531 retval[0] = p->p_pid; 532 533 if (SCARG(uap, status)) { 534 status = W_STOPCODE(p->p_xstat); 535 error = copyout(&status, SCARG(uap, status), 536 sizeof(status)); 537 } else 538 error = 0; 539 return (error); 540 } 541 if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 542 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 543 retval[0] = p->p_pid; 544 545 if (SCARG(uap, status)) { 546 status = _WCONTINUED; 547 error = copyout(&status, SCARG(uap, status), 548 sizeof(status)); 549 } else 550 error = 0; 551 return (error); 552 } 553 } 554 if (nfound == 0) 555 return (ECHILD); 556 if (SCARG(uap, options) & WNOHANG) { 557 retval[0] = 0; 558 return (0); 559 } 560 if ((error = tsleep(q, PWAIT | PCATCH, "wait", 0)) != 0) 561 return (error); 562 goto loop; 563 } 564 565 /* 566 * make process 'parent' the new parent of process 'child'. 567 */ 568 void 569 proc_reparent(struct proc *child, struct proc *parent) 570 { 571 572 if (child->p_pptr == parent) 573 return; 574 575 if (parent == initproc) 576 child->p_exitsig = SIGCHLD; 577 578 LIST_REMOVE(child, p_sibling); 579 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 580 child->p_pptr = parent; 581 } 582 583 void 584 proc_zap(struct proc *p) 585 { 586 pool_put(&rusage_pool, p->p_ru); 587 if (p->p_ptstat) 588 free(p->p_ptstat, M_SUBPROC); 589 590 /* 591 * Finally finished with old proc entry. 592 * Unlink it from its process group and free it. 593 */ 594 leavepgrp(p); 595 LIST_REMOVE(p, p_list); /* off zombproc */ 596 LIST_REMOVE(p, p_sibling); 597 598 /* 599 * Decrement the count of procs running with this uid. 600 */ 601 (void)chgproccnt(p->p_cred->p_ruid, -1); 602 603 /* 604 * Free up credentials. 605 */ 606 if (--p->p_cred->p_refcnt == 0) { 607 crfree(p->p_cred->pc_ucred); 608 pool_put(&pcred_pool, p->p_cred); 609 } 610 611 /* 612 * Release reference to text vnode 613 */ 614 if (p->p_textvp) 615 vrele(p->p_textvp); 616 617 /* 618 * Remove us from our process list, possibly killing the process 619 * in the process (pun intended). 620 */ 621 622 TAILQ_REMOVE(&p->p_p->ps_threads, p, p_thr_link); 623 if (TAILQ_EMPTY(&p->p_p->ps_threads)) 624 pool_put(&process_pool, p->p_p); 625 626 pool_put(&proc_pool, p); 627 nprocs--; 628 } 629 630