1 /* $OpenBSD: kern_exit.c,v 1.113 2012/04/13 16:37:51 kettenis Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/wait.h> 50 #include <sys/file.h> 51 #include <sys/vnode.h> 52 #include <sys/syslog.h> 53 #include <sys/malloc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> 57 #include <sys/filedesc.h> 58 #include <sys/signalvar.h> 59 #include <sys/sched.h> 60 #include <sys/ktrace.h> 61 #include <sys/pool.h> 62 #include <sys/mutex.h> 63 #ifdef SYSVSEM 64 #include <sys/sem.h> 65 #endif 66 67 #include "systrace.h" 68 #include <dev/systrace.h> 69 70 #include <sys/mount.h> 71 #include <sys/syscallargs.h> 72 73 #include <machine/cpu.h> 74 75 #include <uvm/uvm_extern.h> 76 77 /* 78 * exit -- 79 * Death of process. 80 */ 81 int 82 sys_exit(struct proc *p, void *v, register_t *retval) 83 { 84 struct sys_exit_args /* { 85 syscallarg(int) rval; 86 } */ *uap = v; 87 88 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL); 89 /* NOTREACHED */ 90 return (0); 91 } 92 93 int 94 sys___threxit(struct proc *p, void *v, register_t *retval) 95 { 96 struct sys___threxit_args /* { 97 syscallarg(pid_t *) notdead; 98 } */ *uap = v; 99 100 if (!rthreads_enabled) 101 return (EINVAL); 102 103 if (SCARG(uap, notdead) != NULL) { 104 pid_t zero = 0; 105 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) { 106 psignal(p, SIGSEGV); 107 } 108 } 109 exit1(p, 0, EXIT_THREAD); 110 111 return (0); 112 } 113 114 /* 115 * Exit: deallocate address space and other resources, change proc state 116 * to zombie, and unlink proc from allproc and parent's lists. Save exit 117 * status and rusage for wait(). Check for child processes and orphan them. 118 */ 119 void 120 exit1(struct proc *p, int rv, int flags) 121 { 122 struct process *pr, *qr, *nqr; 123 struct rusage *rup; 124 125 if (p->p_pid == 1) 126 panic("init died (signal %d, exit %d)", 127 WTERMSIG(rv), WEXITSTATUS(rv)); 128 129 atomic_setbits_int(&p->p_flag, P_WEXIT); 130 131 pr = p->p_p; 132 133 /* single-threaded? */ 134 if (TAILQ_FIRST(&pr->ps_threads) == p && 135 TAILQ_NEXT(p, p_thr_link) == NULL) 136 flags = EXIT_NORMAL; 137 else { 138 /* nope, multi-threaded */ 139 if (flags == EXIT_NORMAL) 140 single_thread_set(p, SINGLE_EXIT, 0); 141 } 142 143 if (flags == EXIT_NORMAL) { 144 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 145 pr->ps_mainproc->p_xstat = rv; 146 147 /* 148 * If parent is waiting for us to exit or exec, PS_PPWAIT 149 * is set; we wake up the parent early to avoid deadlock. 150 */ 151 if (pr->ps_flags & PS_PPWAIT) { 152 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 153 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 154 PS_ISPWAIT); 155 wakeup(pr->ps_pptr); 156 } 157 } 158 159 /* unlink ourselves from the active threads */ 160 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 161 if (ISSET(p->p_flag, P_SUSPSINGLE)) { 162 if (--pr->ps_singlecount == 0) 163 wakeup(&pr->ps_singlecount); 164 } 165 if ((p->p_flag & P_THREAD) == 0) { 166 /* main thread gotta wait because it has the pid, et al */ 167 while (! TAILQ_EMPTY(&pr->ps_threads)) 168 tsleep(&pr->ps_threads, PUSER, "thrdeath", 0); 169 } else if (TAILQ_EMPTY(&pr->ps_threads)) 170 wakeup(&pr->ps_threads); 171 172 if (p->p_flag & P_PROFIL) 173 stopprofclock(p); 174 rup = pr->ps_ru; 175 if (rup == NULL) { 176 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 177 178 if (pr->ps_ru == NULL) 179 pr->ps_ru = rup; 180 else { 181 pool_put(&rusage_pool, rup); 182 rup = pr->ps_ru; 183 } 184 } 185 p->p_siglist = 0; 186 187 /* 188 * Close open files and release open-file table. 189 */ 190 fdfree(p); 191 192 if ((p->p_flag & P_THREAD) == 0) { 193 timeout_del(&pr->ps_realit_to); 194 timeout_del(&pr->ps_virt_to); 195 timeout_del(&pr->ps_prof_to); 196 #ifdef SYSVSEM 197 semexit(pr); 198 #endif 199 if (SESS_LEADER(pr)) { 200 struct session *sp = pr->ps_session; 201 202 if (sp->s_ttyvp) { 203 /* 204 * Controlling process. 205 * Signal foreground pgrp, 206 * drain controlling terminal 207 * and revoke access to controlling terminal. 208 */ 209 if (sp->s_ttyp->t_session == sp) { 210 if (sp->s_ttyp->t_pgrp) 211 pgsignal(sp->s_ttyp->t_pgrp, 212 SIGHUP, 1); 213 (void) ttywait(sp->s_ttyp); 214 /* 215 * The tty could have been revoked 216 * if we blocked. 217 */ 218 if (sp->s_ttyvp) 219 VOP_REVOKE(sp->s_ttyvp, 220 REVOKEALL); 221 } 222 if (sp->s_ttyvp) 223 vrele(sp->s_ttyvp); 224 sp->s_ttyvp = NULL; 225 /* 226 * s_ttyp is not zero'd; we use this to 227 * indicate that the session once had a 228 * controlling terminal. (for logging and 229 * informational purposes) 230 */ 231 } 232 sp->s_leader = NULL; 233 } 234 fixjobc(pr, pr->ps_pgrp, 0); 235 236 #ifdef ACCOUNTING 237 (void)acct_process(p); 238 #endif 239 240 #ifdef KTRACE 241 /* release trace file */ 242 if (pr->ps_tracevp) 243 ktrcleartrace(pr); 244 #endif 245 } 246 247 #if NSYSTRACE > 0 248 if (ISSET(p->p_flag, P_SYSTRACE)) 249 systrace_exit(p); 250 #endif 251 /* 252 * Remove proc from pidhash chain so looking it up won't 253 * work. Move it from allproc to zombproc, but do not yet 254 * wake up the reaper. We will put the proc on the 255 * deadproc list later (using the p_hash member), and 256 * wake up the reaper when we do. 257 */ 258 /* 259 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 260 */ 261 p->p_stat = SDEAD; 262 263 LIST_REMOVE(p, p_hash); 264 LIST_REMOVE(p, p_list); 265 LIST_INSERT_HEAD(&zombproc, p, p_list); 266 267 /* 268 * Give orphaned children to init(8). 269 */ 270 if ((p->p_flag & P_THREAD) == 0) { 271 qr = LIST_FIRST(&pr->ps_children); 272 if (qr) /* only need this if any child is S_ZOMB */ 273 wakeup(initproc->p_p); 274 for (; qr != 0; qr = nqr) { 275 nqr = LIST_NEXT(qr, ps_sibling); 276 proc_reparent(qr, initproc->p_p); 277 /* 278 * Traced processes are killed 279 * since their existence means someone is screwing up. 280 */ 281 if (qr->ps_flags & PS_TRACED) { 282 atomic_clearbits_int(&qr->ps_flags, PS_TRACED); 283 prsignal(qr, SIGKILL); 284 } 285 } 286 } 287 288 289 /* add thread's accumulated rusage into the process's total */ 290 ruadd(rup, &p->p_ru); 291 292 /* 293 * clear %cpu usage during swap 294 */ 295 p->p_pctcpu = 0; 296 297 if ((p->p_flag & P_THREAD) == 0) { 298 /* 299 * Final thread has died, so add on our children's rusage 300 * and calculate the total times 301 */ 302 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 303 ruadd(rup, &pr->ps_cru); 304 305 /* notify interested parties of our demise and clean up */ 306 knote_processexit(pr); 307 308 /* 309 * Notify parent that we're gone. If we have P_NOZOMBIE 310 * or parent has the SAS_NOCLDWAIT flag set, notify process 1 311 * instead (and hope it will handle this situation). 312 */ 313 if ((p->p_flag & P_NOZOMBIE) || 314 (pr->ps_pptr->ps_mainproc->p_sigacts->ps_flags & 315 SAS_NOCLDWAIT)) { 316 struct process *ppr = pr->ps_pptr; 317 proc_reparent(pr, initproc->p_p); 318 /* 319 * If this was the last child of our parent, notify 320 * parent, so in case he was wait(2)ing, he will 321 * continue. 322 */ 323 if (LIST_EMPTY(&ppr->ps_children)) 324 wakeup(ppr); 325 } 326 } 327 328 /* 329 * Release the process's signal state. 330 */ 331 sigactsfree(p); 332 333 /* 334 * Other substructures are freed from reaper and wait(). 335 */ 336 337 /* 338 * If emulation has process exit hook, call it now. 339 */ 340 if (p->p_emul->e_proc_exit) 341 (*p->p_emul->e_proc_exit)(p); 342 343 /* 344 * Finally, call machine-dependent code to switch to a new 345 * context (possibly the idle context). Once we are no longer 346 * using the dead process's vmspace and stack, exit2() will be 347 * called to schedule those resources to be released by the 348 * reaper thread. 349 * 350 * Note that cpu_exit() will end with a call equivalent to 351 * cpu_switch(), finishing our execution (pun intended). 352 */ 353 uvmexp.swtch++; 354 cpu_exit(p); 355 panic("cpu_exit returned"); 356 } 357 358 /* 359 * Locking of this proclist is special; it's accessed in a 360 * critical section of process exit, and thus locking it can't 361 * modify interrupt state. We use a simple spin lock for this 362 * proclist. Processes on this proclist are also on zombproc; 363 * we use the p_hash member to linkup to deadproc. 364 */ 365 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE); 366 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 367 368 /* 369 * We are called from cpu_exit() once it is safe to schedule the 370 * dead process's resources to be freed. 371 * 372 * NOTE: One must be careful with locking in this routine. It's 373 * called from a critical section in machine-dependent code, so 374 * we should refrain from changing any interrupt state. 375 * 376 * We lock the deadproc list, place the proc on that list (using 377 * the p_hash member), and wake up the reaper. 378 */ 379 void 380 exit2(struct proc *p) 381 { 382 mtx_enter(&deadproc_mutex); 383 LIST_INSERT_HEAD(&deadproc, p, p_hash); 384 mtx_leave(&deadproc_mutex); 385 386 wakeup(&deadproc); 387 } 388 389 /* 390 * Process reaper. This is run by a kernel thread to free the resources 391 * of a dead process. Once the resources are free, the process becomes 392 * a zombie, and the parent is allowed to read the undead's status. 393 */ 394 void 395 reaper(void) 396 { 397 struct proc *p; 398 399 KERNEL_UNLOCK(); 400 401 SCHED_ASSERT_UNLOCKED(); 402 403 for (;;) { 404 mtx_enter(&deadproc_mutex); 405 while ((p = LIST_FIRST(&deadproc)) == NULL) 406 msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0); 407 408 /* Remove us from the deadproc list. */ 409 LIST_REMOVE(p, p_hash); 410 mtx_leave(&deadproc_mutex); 411 412 KERNEL_LOCK(); 413 414 /* 415 * Free the VM resources we're still holding on to. 416 * We must do this from a valid thread because doing 417 * so may block. 418 */ 419 uvm_exit(p); 420 421 /* Process is now a true zombie. */ 422 if ((p->p_flag & P_NOZOMBIE) == 0) { 423 p->p_stat = SZOMB; 424 425 if (P_EXITSIG(p) != 0) 426 prsignal(p->p_p->ps_pptr, P_EXITSIG(p)); 427 /* Wake up the parent so it can get exit status. */ 428 wakeup(p->p_p->ps_pptr); 429 } else { 430 /* Noone will wait for us. Just zap the process now */ 431 proc_zap(p); 432 } 433 434 KERNEL_UNLOCK(); 435 } 436 } 437 438 int 439 sys_wait4(struct proc *q, void *v, register_t *retval) 440 { 441 struct sys_wait4_args /* { 442 syscallarg(pid_t) pid; 443 syscallarg(int *) status; 444 syscallarg(int) options; 445 syscallarg(struct rusage *) rusage; 446 } */ *uap = v; 447 int nfound; 448 struct process *pr; 449 struct proc *p; 450 int status, error; 451 452 if (SCARG(uap, pid) == 0) 453 SCARG(uap, pid) = -q->p_p->ps_pgid; 454 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED)) 455 return (EINVAL); 456 457 loop: 458 nfound = 0; 459 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 460 p = pr->ps_mainproc; 461 if ((p->p_flag & P_NOZOMBIE) || 462 (SCARG(uap, pid) != WAIT_ANY && 463 p->p_pid != SCARG(uap, pid) && 464 pr->ps_pgid != -SCARG(uap, pid))) 465 continue; 466 467 /* 468 * Wait for processes with p_exitsig != SIGCHLD processes only 469 * if WALTSIG is set; wait for processes with pexitsig == 470 * SIGCHLD only if WALTSIG is clear. 471 */ 472 if ((SCARG(uap, options) & WALTSIG) ? 473 (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD)) 474 continue; 475 476 nfound++; 477 if (p->p_stat == SZOMB) { 478 retval[0] = p->p_pid; 479 480 if (SCARG(uap, status)) { 481 status = p->p_xstat; /* convert to int */ 482 error = copyout(&status, 483 SCARG(uap, status), sizeof(status)); 484 if (error) 485 return (error); 486 } 487 if (SCARG(uap, rusage) && 488 (error = copyout(pr->ps_ru, 489 SCARG(uap, rusage), sizeof(struct rusage)))) 490 return (error); 491 proc_finish_wait(q, p); 492 return (0); 493 } 494 if (pr->ps_flags & PS_TRACED && 495 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 496 pr->ps_single->p_stat == SSTOP && 497 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 498 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 499 retval[0] = p->p_pid; 500 501 if (SCARG(uap, status)) { 502 status = W_STOPCODE(pr->ps_single->p_xstat); 503 error = copyout(&status, SCARG(uap, status), 504 sizeof(status)); 505 } else 506 error = 0; 507 return (error); 508 } 509 if (p->p_stat == SSTOP && 510 (pr->ps_flags & PS_WAITED) == 0 && 511 (p->p_flag & P_SUSPSINGLE) == 0 && 512 (pr->ps_flags & PS_TRACED || 513 SCARG(uap, options) & WUNTRACED)) { 514 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 515 retval[0] = p->p_pid; 516 517 if (SCARG(uap, status)) { 518 status = W_STOPCODE(p->p_xstat); 519 error = copyout(&status, SCARG(uap, status), 520 sizeof(status)); 521 } else 522 error = 0; 523 return (error); 524 } 525 if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 526 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 527 retval[0] = p->p_pid; 528 529 if (SCARG(uap, status)) { 530 status = _WCONTINUED; 531 error = copyout(&status, SCARG(uap, status), 532 sizeof(status)); 533 } else 534 error = 0; 535 return (error); 536 } 537 } 538 if (nfound == 0) 539 return (ECHILD); 540 if (SCARG(uap, options) & WNOHANG) { 541 retval[0] = 0; 542 return (0); 543 } 544 if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0) 545 return (error); 546 goto loop; 547 } 548 549 void 550 proc_finish_wait(struct proc *waiter, struct proc *p) 551 { 552 struct process *pr, *tr; 553 struct rusage *rup; 554 555 /* 556 * If we got the child via a ptrace 'attach', 557 * we need to give it back to the old parent. 558 */ 559 pr = p->p_p; 560 if ((p->p_flag & P_THREAD) == 0 && pr->ps_oppid && 561 (tr = prfind(pr->ps_oppid))) { 562 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 563 pr->ps_oppid = 0; 564 proc_reparent(pr, tr); 565 if (p->p_exitsig != 0) 566 prsignal(tr, p->p_exitsig); 567 wakeup(tr); 568 } else { 569 scheduler_wait_hook(waiter, p); 570 p->p_xstat = 0; 571 rup = &waiter->p_p->ps_cru; 572 ruadd(rup, pr->ps_ru); 573 proc_zap(p); 574 } 575 } 576 577 /* 578 * make process 'parent' the new parent of process 'child'. 579 */ 580 void 581 proc_reparent(struct process *child, struct process *parent) 582 { 583 584 if (child->ps_pptr == parent) 585 return; 586 587 if (parent == initproc->p_p) 588 child->ps_mainproc->p_exitsig = SIGCHLD; 589 590 LIST_REMOVE(child, ps_sibling); 591 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 592 child->ps_pptr = parent; 593 } 594 595 void 596 proc_zap(struct proc *p) 597 { 598 struct process *pr = p->p_p; 599 600 /* 601 * Finally finished with old proc entry. 602 * Unlink it from its process group and free it. 603 */ 604 if ((p->p_flag & P_THREAD) == 0) 605 leavepgrp(pr); 606 LIST_REMOVE(p, p_list); /* off zombproc */ 607 if ((p->p_flag & P_THREAD) == 0) { 608 LIST_REMOVE(pr, ps_sibling); 609 610 /* 611 * Decrement the count of procs running with this uid. 612 */ 613 (void)chgproccnt(p->p_cred->p_ruid, -1); 614 } 615 616 /* 617 * Release reference to text vnode 618 */ 619 if (p->p_textvp) 620 vrele(p->p_textvp); 621 622 /* 623 * Remove us from our process list, possibly killing the process 624 * in the process (pun intended). 625 */ 626 if (--pr->ps_refcnt == 0) { 627 if (pr->ps_ptstat != NULL) 628 free(pr->ps_ptstat, M_SUBPROC); 629 pool_put(&rusage_pool, pr->ps_ru); 630 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 631 limfree(pr->ps_limit); 632 crfree(pr->ps_cred->pc_ucred); 633 pool_put(&pcred_pool, pr->ps_cred); 634 pool_put(&process_pool, pr); 635 nprocesses--; 636 } 637 638 pool_put(&proc_pool, p); 639 nthreads--; 640 } 641