1 /* $OpenBSD: kern_exit.c,v 1.121 2013/03/30 06:32:25 tedu Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/wait.h> 50 #include <sys/file.h> 51 #include <sys/vnode.h> 52 #include <sys/syslog.h> 53 #include <sys/malloc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> 57 #include <sys/filedesc.h> 58 #include <sys/signalvar.h> 59 #include <sys/sched.h> 60 #include <sys/ktrace.h> 61 #include <sys/pool.h> 62 #include <sys/mutex.h> 63 #ifdef SYSVSEM 64 #include <sys/sem.h> 65 #endif 66 67 #include "systrace.h" 68 #include <dev/systrace.h> 69 70 #include <sys/mount.h> 71 #include <sys/syscallargs.h> 72 73 74 #include <uvm/uvm_extern.h> 75 76 /* 77 * exit -- 78 * Death of process. 79 */ 80 int 81 sys_exit(struct proc *p, void *v, register_t *retval) 82 { 83 struct sys_exit_args /* { 84 syscallarg(int) rval; 85 } */ *uap = v; 86 87 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL); 88 /* NOTREACHED */ 89 return (0); 90 } 91 92 int 93 sys___threxit(struct proc *p, void *v, register_t *retval) 94 { 95 struct sys___threxit_args /* { 96 syscallarg(pid_t *) notdead; 97 } */ *uap = v; 98 99 if (!rthreads_enabled) 100 return (EINVAL); 101 102 if (SCARG(uap, notdead) != NULL) { 103 pid_t zero = 0; 104 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) { 105 psignal(p, SIGSEGV); 106 } 107 } 108 exit1(p, 0, EXIT_THREAD); 109 110 return (0); 111 } 112 113 /* 114 * Exit: deallocate address space and other resources, change proc state 115 * to zombie, and unlink proc from allproc and parent's lists. Save exit 116 * status and rusage for wait(). Check for child processes and orphan them. 117 */ 118 void 119 exit1(struct proc *p, int rv, int flags) 120 { 121 struct process *pr, *qr, *nqr; 122 struct rusage *rup; 123 struct vnode *ovp; 124 125 if (p->p_pid == 1) 126 panic("init died (signal %d, exit %d)", 127 WTERMSIG(rv), WEXITSTATUS(rv)); 128 129 atomic_setbits_int(&p->p_flag, P_WEXIT); 130 131 pr = p->p_p; 132 133 /* single-threaded? */ 134 if (TAILQ_FIRST(&pr->ps_threads) == p && 135 TAILQ_NEXT(p, p_thr_link) == NULL) 136 flags = EXIT_NORMAL; 137 else { 138 /* nope, multi-threaded */ 139 if (flags == EXIT_NORMAL) 140 single_thread_set(p, SINGLE_EXIT, 0); 141 else if (flags == EXIT_THREAD) 142 single_thread_check(p, 0); 143 } 144 145 if (flags == EXIT_NORMAL) { 146 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 147 pr->ps_mainproc->p_xstat = rv; 148 149 /* 150 * If parent is waiting for us to exit or exec, PS_PPWAIT 151 * is set; we wake up the parent early to avoid deadlock. 152 */ 153 if (pr->ps_flags & PS_PPWAIT) { 154 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 155 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 156 PS_ISPWAIT); 157 wakeup(pr->ps_pptr); 158 } 159 } 160 161 /* unlink ourselves from the active threads */ 162 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 163 if ((p->p_flag & P_THREAD) == 0) { 164 /* main thread gotta wait because it has the pid, et al */ 165 while (! TAILQ_EMPTY(&pr->ps_threads)) 166 tsleep(&pr->ps_threads, PUSER, "thrdeath", 0); 167 if (pr->ps_flags & PS_PROFIL) 168 stopprofclock(pr); 169 } else if (TAILQ_EMPTY(&pr->ps_threads)) 170 wakeup(&pr->ps_threads); 171 172 rup = pr->ps_ru; 173 if (rup == NULL) { 174 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 175 176 if (pr->ps_ru == NULL) 177 pr->ps_ru = rup; 178 else { 179 pool_put(&rusage_pool, rup); 180 rup = pr->ps_ru; 181 } 182 } 183 p->p_siglist = 0; 184 185 /* 186 * Close open files and release open-file table. 187 */ 188 fdfree(p); 189 190 if ((p->p_flag & P_THREAD) == 0) { 191 timeout_del(&pr->ps_realit_to); 192 timeout_del(&pr->ps_virt_to); 193 timeout_del(&pr->ps_prof_to); 194 #ifdef SYSVSEM 195 semexit(pr); 196 #endif 197 if (SESS_LEADER(pr)) { 198 struct session *sp = pr->ps_session; 199 200 if (sp->s_ttyvp) { 201 /* 202 * Controlling process. 203 * Signal foreground pgrp, 204 * drain controlling terminal 205 * and revoke access to controlling terminal. 206 */ 207 if (sp->s_ttyp->t_session == sp) { 208 if (sp->s_ttyp->t_pgrp) 209 pgsignal(sp->s_ttyp->t_pgrp, 210 SIGHUP, 1); 211 (void) ttywait(sp->s_ttyp); 212 /* 213 * The tty could have been revoked 214 * if we blocked. 215 */ 216 if (sp->s_ttyvp) 217 VOP_REVOKE(sp->s_ttyvp, 218 REVOKEALL); 219 } 220 ovp = sp->s_ttyvp; 221 sp->s_ttyvp = NULL; 222 if (ovp) 223 vrele(ovp); 224 /* 225 * s_ttyp is not zero'd; we use this to 226 * indicate that the session once had a 227 * controlling terminal. (for logging and 228 * informational purposes) 229 */ 230 } 231 sp->s_leader = NULL; 232 } 233 fixjobc(pr, pr->ps_pgrp, 0); 234 235 #ifdef ACCOUNTING 236 (void)acct_process(p); 237 #endif 238 239 #ifdef KTRACE 240 /* release trace file */ 241 if (pr->ps_tracevp) 242 ktrcleartrace(pr); 243 #endif 244 } 245 246 #if NSYSTRACE > 0 247 if (ISSET(p->p_flag, P_SYSTRACE)) 248 systrace_exit(p); 249 #endif 250 251 /* 252 * If emulation has process exit hook, call it now. 253 */ 254 if (p->p_emul->e_proc_exit) 255 (*p->p_emul->e_proc_exit)(p); 256 257 /* 258 * Remove proc from pidhash chain so looking it up won't 259 * work. Move it from allproc to zombproc, but do not yet 260 * wake up the reaper. We will put the proc on the 261 * deadproc list later (using the p_hash member), and 262 * wake up the reaper when we do. 263 */ 264 /* 265 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 266 */ 267 p->p_stat = SDEAD; 268 269 LIST_REMOVE(p, p_hash); 270 LIST_REMOVE(p, p_list); 271 LIST_INSERT_HEAD(&zombproc, p, p_list); 272 273 /* 274 * Give orphaned children to init(8). 275 */ 276 if ((p->p_flag & P_THREAD) == 0) { 277 qr = LIST_FIRST(&pr->ps_children); 278 if (qr) /* only need this if any child is S_ZOMB */ 279 wakeup(initproc->p_p); 280 for (; qr != 0; qr = nqr) { 281 nqr = LIST_NEXT(qr, ps_sibling); 282 proc_reparent(qr, initproc->p_p); 283 /* 284 * Traced processes are killed since their 285 * existence means someone is screwing up. 286 */ 287 if (qr->ps_flags & PS_TRACED && 288 !(qr->ps_flags & PS_EXITING)) { 289 atomic_clearbits_int(&qr->ps_flags, PS_TRACED); 290 /* 291 * If single threading is active, 292 * direct the signal to the active 293 * thread to avoid deadlock. 294 */ 295 if (qr->ps_single) 296 ptsignal(qr->ps_single, SIGKILL, 297 STHREAD); 298 else 299 prsignal(qr, SIGKILL); 300 } 301 } 302 } 303 304 305 /* add thread's accumulated rusage into the process's total */ 306 ruadd(rup, &p->p_ru); 307 308 /* 309 * clear %cpu usage during swap 310 */ 311 p->p_pctcpu = 0; 312 313 if ((p->p_flag & P_THREAD) == 0) { 314 /* 315 * Final thread has died, so add on our children's rusage 316 * and calculate the total times 317 */ 318 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 319 ruadd(rup, &pr->ps_cru); 320 321 /* notify interested parties of our demise and clean up */ 322 knote_processexit(pr); 323 324 /* 325 * Notify parent that we're gone. If we have P_NOZOMBIE 326 * or parent has the SAS_NOCLDWAIT flag set, notify process 1 327 * instead (and hope it will handle this situation). 328 */ 329 if ((p->p_flag & P_NOZOMBIE) || 330 (pr->ps_pptr->ps_mainproc->p_sigacts->ps_flags & 331 SAS_NOCLDWAIT)) { 332 struct process *ppr = pr->ps_pptr; 333 proc_reparent(pr, initproc->p_p); 334 /* 335 * If this was the last child of our parent, notify 336 * parent, so in case he was wait(2)ing, he will 337 * continue. 338 */ 339 if (LIST_EMPTY(&ppr->ps_children)) 340 wakeup(ppr); 341 } 342 } 343 344 /* 345 * Release the process's signal state. 346 */ 347 sigactsfree(p); 348 349 /* 350 * Other substructures are freed from reaper and wait(). 351 */ 352 353 /* 354 * Finally, call machine-dependent code to switch to a new 355 * context (possibly the idle context). Once we are no longer 356 * using the dead process's vmspace and stack, exit2() will be 357 * called to schedule those resources to be released by the 358 * reaper thread. 359 * 360 * Note that cpu_exit() will end with a call equivalent to 361 * cpu_switch(), finishing our execution (pun intended). 362 */ 363 uvmexp.swtch++; 364 cpu_exit(p); 365 panic("cpu_exit returned"); 366 } 367 368 /* 369 * Locking of this proclist is special; it's accessed in a 370 * critical section of process exit, and thus locking it can't 371 * modify interrupt state. We use a simple spin lock for this 372 * proclist. Processes on this proclist are also on zombproc; 373 * we use the p_hash member to linkup to deadproc. 374 */ 375 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE); 376 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 377 378 /* 379 * We are called from cpu_exit() once it is safe to schedule the 380 * dead process's resources to be freed. 381 * 382 * NOTE: One must be careful with locking in this routine. It's 383 * called from a critical section in machine-dependent code, so 384 * we should refrain from changing any interrupt state. 385 * 386 * We lock the deadproc list, place the proc on that list (using 387 * the p_hash member), and wake up the reaper. 388 */ 389 void 390 exit2(struct proc *p) 391 { 392 mtx_enter(&deadproc_mutex); 393 LIST_INSERT_HEAD(&deadproc, p, p_hash); 394 mtx_leave(&deadproc_mutex); 395 396 wakeup(&deadproc); 397 } 398 399 /* 400 * Process reaper. This is run by a kernel thread to free the resources 401 * of a dead process. Once the resources are free, the process becomes 402 * a zombie, and the parent is allowed to read the undead's status. 403 */ 404 void 405 reaper(void) 406 { 407 struct proc *p; 408 409 KERNEL_UNLOCK(); 410 411 SCHED_ASSERT_UNLOCKED(); 412 413 for (;;) { 414 mtx_enter(&deadproc_mutex); 415 while ((p = LIST_FIRST(&deadproc)) == NULL) 416 msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0); 417 418 /* Remove us from the deadproc list. */ 419 LIST_REMOVE(p, p_hash); 420 mtx_leave(&deadproc_mutex); 421 422 KERNEL_LOCK(); 423 424 /* 425 * Free the VM resources we're still holding on to. 426 * We must do this from a valid thread because doing 427 * so may block. 428 */ 429 uvm_exit(p); 430 431 /* Process is now a true zombie. */ 432 if ((p->p_flag & P_NOZOMBIE) == 0) { 433 p->p_stat = SZOMB; 434 435 if (P_EXITSIG(p) != 0) 436 prsignal(p->p_p->ps_pptr, P_EXITSIG(p)); 437 /* Wake up the parent so it can get exit status. */ 438 wakeup(p->p_p->ps_pptr); 439 } else { 440 /* Noone will wait for us. Just zap the process now */ 441 proc_zap(p); 442 } 443 444 KERNEL_UNLOCK(); 445 } 446 } 447 448 int 449 sys_wait4(struct proc *q, void *v, register_t *retval) 450 { 451 struct sys_wait4_args /* { 452 syscallarg(pid_t) pid; 453 syscallarg(int *) status; 454 syscallarg(int) options; 455 syscallarg(struct rusage *) rusage; 456 } */ *uap = v; 457 int nfound; 458 struct process *pr; 459 struct proc *p; 460 int status, error; 461 462 if (SCARG(uap, pid) == 0) 463 SCARG(uap, pid) = -q->p_p->ps_pgid; 464 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED)) 465 return (EINVAL); 466 467 loop: 468 nfound = 0; 469 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 470 p = pr->ps_mainproc; 471 if ((p->p_flag & P_NOZOMBIE) || 472 (SCARG(uap, pid) != WAIT_ANY && 473 p->p_pid != SCARG(uap, pid) && 474 pr->ps_pgid != -SCARG(uap, pid))) 475 continue; 476 477 /* 478 * Wait for processes with p_exitsig != SIGCHLD processes only 479 * if WALTSIG is set; wait for processes with pexitsig == 480 * SIGCHLD only if WALTSIG is clear. 481 */ 482 if ((SCARG(uap, options) & WALTSIG) ? 483 (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD)) 484 continue; 485 486 nfound++; 487 if (p->p_stat == SZOMB) { 488 retval[0] = p->p_pid; 489 490 if (SCARG(uap, status)) { 491 status = p->p_xstat; /* convert to int */ 492 error = copyout(&status, 493 SCARG(uap, status), sizeof(status)); 494 if (error) 495 return (error); 496 } 497 if (SCARG(uap, rusage) && 498 (error = copyout(pr->ps_ru, 499 SCARG(uap, rusage), sizeof(struct rusage)))) 500 return (error); 501 proc_finish_wait(q, p); 502 return (0); 503 } 504 if (pr->ps_flags & PS_TRACED && 505 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 506 pr->ps_single->p_stat == SSTOP && 507 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 508 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 509 retval[0] = p->p_pid; 510 511 if (SCARG(uap, status)) { 512 status = W_STOPCODE(pr->ps_single->p_xstat); 513 error = copyout(&status, SCARG(uap, status), 514 sizeof(status)); 515 } else 516 error = 0; 517 return (error); 518 } 519 if (p->p_stat == SSTOP && 520 (pr->ps_flags & PS_WAITED) == 0 && 521 (p->p_flag & P_SUSPSINGLE) == 0 && 522 (pr->ps_flags & PS_TRACED || 523 SCARG(uap, options) & WUNTRACED)) { 524 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 525 retval[0] = p->p_pid; 526 527 if (SCARG(uap, status)) { 528 status = W_STOPCODE(p->p_xstat); 529 error = copyout(&status, SCARG(uap, status), 530 sizeof(status)); 531 } else 532 error = 0; 533 return (error); 534 } 535 if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 536 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 537 retval[0] = p->p_pid; 538 539 if (SCARG(uap, status)) { 540 status = _WCONTINUED; 541 error = copyout(&status, SCARG(uap, status), 542 sizeof(status)); 543 } else 544 error = 0; 545 return (error); 546 } 547 } 548 if (nfound == 0) 549 return (ECHILD); 550 if (SCARG(uap, options) & WNOHANG) { 551 retval[0] = 0; 552 return (0); 553 } 554 if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0) 555 return (error); 556 goto loop; 557 } 558 559 void 560 proc_finish_wait(struct proc *waiter, struct proc *p) 561 { 562 struct process *pr, *tr; 563 struct rusage *rup; 564 565 /* 566 * If we got the child via a ptrace 'attach', 567 * we need to give it back to the old parent. 568 */ 569 pr = p->p_p; 570 if ((p->p_flag & P_THREAD) == 0 && pr->ps_oppid && 571 (tr = prfind(pr->ps_oppid))) { 572 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 573 pr->ps_oppid = 0; 574 proc_reparent(pr, tr); 575 if (p->p_exitsig != 0) 576 prsignal(tr, p->p_exitsig); 577 wakeup(tr); 578 } else { 579 scheduler_wait_hook(waiter, p); 580 p->p_xstat = 0; 581 rup = &waiter->p_p->ps_cru; 582 ruadd(rup, pr->ps_ru); 583 proc_zap(p); 584 } 585 } 586 587 /* 588 * make process 'parent' the new parent of process 'child'. 589 */ 590 void 591 proc_reparent(struct process *child, struct process *parent) 592 { 593 594 if (child->ps_pptr == parent) 595 return; 596 597 if (parent == initproc->p_p) 598 child->ps_mainproc->p_exitsig = SIGCHLD; 599 600 LIST_REMOVE(child, ps_sibling); 601 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 602 child->ps_pptr = parent; 603 } 604 605 void 606 proc_zap(struct proc *p) 607 { 608 struct process *pr = p->p_p; 609 struct vnode *otvp; 610 611 /* 612 * Finally finished with old proc entry. 613 * Unlink it from its process group and free it. 614 */ 615 if ((p->p_flag & P_THREAD) == 0) 616 leavepgrp(pr); 617 LIST_REMOVE(p, p_list); /* off zombproc */ 618 if ((p->p_flag & P_THREAD) == 0) { 619 LIST_REMOVE(pr, ps_sibling); 620 621 /* 622 * Decrement the count of procs running with this uid. 623 */ 624 (void)chgproccnt(p->p_cred->p_ruid, -1); 625 } 626 627 /* 628 * Release reference to text vnode 629 */ 630 otvp = p->p_textvp; 631 p->p_textvp = NULL; 632 if (otvp) 633 vrele(otvp); 634 635 /* 636 * Remove us from our process list, possibly killing the process 637 * in the process (pun intended). 638 */ 639 if (--pr->ps_refcnt == 0) { 640 if (pr->ps_ptstat != NULL) 641 free(pr->ps_ptstat, M_SUBPROC); 642 pool_put(&rusage_pool, pr->ps_ru); 643 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 644 limfree(pr->ps_limit); 645 crfree(pr->ps_cred->pc_ucred); 646 pool_put(&pcred_pool, pr->ps_cred); 647 pool_put(&process_pool, pr); 648 nprocesses--; 649 } 650 651 pool_put(&proc_pool, p); 652 nthreads--; 653 } 654