1 /* $OpenBSD: kern_exit.c,v 1.124 2013/06/01 04:05:26 tedu Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/wait.h> 50 #include <sys/file.h> 51 #include <sys/vnode.h> 52 #include <sys/syslog.h> 53 #include <sys/malloc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> 57 #include <sys/filedesc.h> 58 #include <sys/signalvar.h> 59 #include <sys/sched.h> 60 #include <sys/ktrace.h> 61 #include <sys/pool.h> 62 #include <sys/mutex.h> 63 #ifdef SYSVSEM 64 #include <sys/sem.h> 65 #endif 66 67 #include "systrace.h" 68 #include <dev/systrace.h> 69 70 #include <sys/mount.h> 71 #include <sys/syscallargs.h> 72 73 74 #include <uvm/uvm_extern.h> 75 76 /* 77 * exit -- 78 * Death of process. 79 */ 80 int 81 sys_exit(struct proc *p, void *v, register_t *retval) 82 { 83 struct sys_exit_args /* { 84 syscallarg(int) rval; 85 } */ *uap = v; 86 87 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL); 88 /* NOTREACHED */ 89 return (0); 90 } 91 92 int 93 sys___threxit(struct proc *p, void *v, register_t *retval) 94 { 95 struct sys___threxit_args /* { 96 syscallarg(pid_t *) notdead; 97 } */ *uap = v; 98 99 if (SCARG(uap, notdead) != NULL) { 100 pid_t zero = 0; 101 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 102 psignal(p, SIGSEGV); 103 } 104 exit1(p, 0, EXIT_THREAD); 105 106 return (0); 107 } 108 109 /* 110 * Exit: deallocate address space and other resources, change proc state 111 * to zombie, and unlink proc from allproc and parent's lists. Save exit 112 * status and rusage for wait(). Check for child processes and orphan them. 113 */ 114 void 115 exit1(struct proc *p, int rv, int flags) 116 { 117 struct process *pr, *qr, *nqr; 118 struct rusage *rup; 119 struct vnode *ovp; 120 121 if (p->p_pid == 1) 122 panic("init died (signal %d, exit %d)", 123 WTERMSIG(rv), WEXITSTATUS(rv)); 124 125 atomic_setbits_int(&p->p_flag, P_WEXIT); 126 127 pr = p->p_p; 128 129 /* single-threaded? */ 130 if (TAILQ_FIRST(&pr->ps_threads) == p && 131 TAILQ_NEXT(p, p_thr_link) == NULL) { 132 flags = EXIT_NORMAL; 133 } else { 134 /* nope, multi-threaded */ 135 if (flags == EXIT_NORMAL) 136 single_thread_set(p, SINGLE_EXIT, 0); 137 else if (flags == EXIT_THREAD) 138 single_thread_check(p, 0); 139 } 140 141 if (flags == EXIT_NORMAL) { 142 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 143 pr->ps_mainproc->p_xstat = rv; 144 145 /* 146 * If parent is waiting for us to exit or exec, PS_PPWAIT 147 * is set; we wake up the parent early to avoid deadlock. 148 */ 149 if (pr->ps_flags & PS_PPWAIT) { 150 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 151 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 152 PS_ISPWAIT); 153 wakeup(pr->ps_pptr); 154 } 155 } 156 157 /* unlink ourselves from the active threads */ 158 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 159 if ((p->p_flag & P_THREAD) == 0) { 160 /* main thread gotta wait because it has the pid, et al */ 161 while (!TAILQ_EMPTY(&pr->ps_threads)) 162 tsleep(&pr->ps_threads, PUSER, "thrdeath", 0); 163 if (pr->ps_flags & PS_PROFIL) 164 stopprofclock(pr); 165 } else if (TAILQ_EMPTY(&pr->ps_threads)) { 166 wakeup(&pr->ps_threads); 167 } 168 169 rup = pr->ps_ru; 170 if (rup == NULL) { 171 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 172 if (pr->ps_ru == NULL) { 173 pr->ps_ru = rup; 174 } else { 175 pool_put(&rusage_pool, rup); 176 rup = pr->ps_ru; 177 } 178 } 179 p->p_siglist = 0; 180 181 /* 182 * Close open files and release open-file table. 183 */ 184 fdfree(p); 185 186 if ((p->p_flag & P_THREAD) == 0) { 187 timeout_del(&pr->ps_realit_to); 188 timeout_del(&pr->ps_virt_to); 189 timeout_del(&pr->ps_prof_to); 190 #ifdef SYSVSEM 191 semexit(pr); 192 #endif 193 if (SESS_LEADER(pr)) { 194 struct session *sp = pr->ps_session; 195 196 if (sp->s_ttyvp) { 197 /* 198 * Controlling process. 199 * Signal foreground pgrp, 200 * drain controlling terminal 201 * and revoke access to controlling terminal. 202 */ 203 if (sp->s_ttyp->t_session == sp) { 204 if (sp->s_ttyp->t_pgrp) 205 pgsignal(sp->s_ttyp->t_pgrp, 206 SIGHUP, 1); 207 ttywait(sp->s_ttyp); 208 /* 209 * The tty could have been revoked 210 * if we blocked. 211 */ 212 if (sp->s_ttyvp) 213 VOP_REVOKE(sp->s_ttyvp, 214 REVOKEALL); 215 } 216 ovp = sp->s_ttyvp; 217 sp->s_ttyvp = NULL; 218 if (ovp) 219 vrele(ovp); 220 /* 221 * s_ttyp is not zero'd; we use this to 222 * indicate that the session once had a 223 * controlling terminal. (for logging and 224 * informational purposes) 225 */ 226 } 227 sp->s_leader = NULL; 228 } 229 fixjobc(pr, pr->ps_pgrp, 0); 230 231 #ifdef ACCOUNTING 232 acct_process(p); 233 #endif 234 235 #ifdef KTRACE 236 /* release trace file */ 237 if (pr->ps_tracevp) 238 ktrcleartrace(pr); 239 #endif 240 } 241 242 #if NSYSTRACE > 0 243 if (ISSET(p->p_flag, P_SYSTRACE)) 244 systrace_exit(p); 245 #endif 246 247 /* 248 * If emulation has process exit hook, call it now. 249 */ 250 if (p->p_emul->e_proc_exit) 251 (*p->p_emul->e_proc_exit)(p); 252 253 /* 254 * Remove proc from pidhash chain so looking it up won't 255 * work. Move it from allproc to zombproc, but do not yet 256 * wake up the reaper. We will put the proc on the 257 * deadproc list later (using the p_hash member), and 258 * wake up the reaper when we do. 259 */ 260 /* 261 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 262 */ 263 p->p_stat = SDEAD; 264 265 LIST_REMOVE(p, p_hash); 266 LIST_REMOVE(p, p_list); 267 LIST_INSERT_HEAD(&zombproc, p, p_list); 268 269 /* 270 * Give orphaned children to init(8). 271 */ 272 if ((p->p_flag & P_THREAD) == 0) { 273 qr = LIST_FIRST(&pr->ps_children); 274 if (qr) /* only need this if any child is S_ZOMB */ 275 wakeup(initproc->p_p); 276 for (; qr != 0; qr = nqr) { 277 nqr = LIST_NEXT(qr, ps_sibling); 278 proc_reparent(qr, initproc->p_p); 279 /* 280 * Traced processes are killed since their 281 * existence means someone is screwing up. 282 */ 283 if (qr->ps_flags & PS_TRACED && 284 !(qr->ps_flags & PS_EXITING)) { 285 atomic_clearbits_int(&qr->ps_flags, PS_TRACED); 286 /* 287 * If single threading is active, 288 * direct the signal to the active 289 * thread to avoid deadlock. 290 */ 291 if (qr->ps_single) 292 ptsignal(qr->ps_single, SIGKILL, 293 STHREAD); 294 else 295 prsignal(qr, SIGKILL); 296 } 297 } 298 } 299 300 301 /* add thread's accumulated rusage into the process's total */ 302 ruadd(rup, &p->p_ru); 303 304 /* 305 * clear %cpu usage during swap 306 */ 307 p->p_pctcpu = 0; 308 309 if ((p->p_flag & P_THREAD) == 0) { 310 /* 311 * Final thread has died, so add on our children's rusage 312 * and calculate the total times 313 */ 314 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 315 ruadd(rup, &pr->ps_cru); 316 317 /* notify interested parties of our demise and clean up */ 318 knote_processexit(pr); 319 320 /* 321 * Notify parent that we're gone. If we have P_NOZOMBIE 322 * or parent has the SAS_NOCLDWAIT flag set, notify process 1 323 * instead (and hope it will handle this situation). 324 */ 325 if ((p->p_flag & P_NOZOMBIE) || 326 (pr->ps_pptr->ps_mainproc->p_sigacts->ps_flags & 327 SAS_NOCLDWAIT)) { 328 struct process *ppr = pr->ps_pptr; 329 proc_reparent(pr, initproc->p_p); 330 331 /* 332 * Notify parent, so in case he was wait(2)ing or 333 * executing waitpid(2) with our pid, he will 334 * continue. 335 */ 336 wakeup(ppr); 337 } 338 } 339 340 /* 341 * Release the process's signal state. 342 */ 343 sigactsfree(p); 344 345 /* 346 * Other substructures are freed from reaper and wait(). 347 */ 348 349 /* 350 * Finally, call machine-dependent code to switch to a new 351 * context (possibly the idle context). Once we are no longer 352 * using the dead process's vmspace and stack, exit2() will be 353 * called to schedule those resources to be released by the 354 * reaper thread. 355 * 356 * Note that cpu_exit() will end with a call equivalent to 357 * cpu_switch(), finishing our execution (pun intended). 358 */ 359 uvmexp.swtch++; 360 cpu_exit(p); 361 panic("cpu_exit returned"); 362 } 363 364 /* 365 * Locking of this proclist is special; it's accessed in a 366 * critical section of process exit, and thus locking it can't 367 * modify interrupt state. We use a simple spin lock for this 368 * proclist. Processes on this proclist are also on zombproc; 369 * we use the p_hash member to linkup to deadproc. 370 */ 371 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE); 372 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 373 374 /* 375 * We are called from cpu_exit() once it is safe to schedule the 376 * dead process's resources to be freed. 377 * 378 * NOTE: One must be careful with locking in this routine. It's 379 * called from a critical section in machine-dependent code, so 380 * we should refrain from changing any interrupt state. 381 * 382 * We lock the deadproc list, place the proc on that list (using 383 * the p_hash member), and wake up the reaper. 384 */ 385 void 386 exit2(struct proc *p) 387 { 388 mtx_enter(&deadproc_mutex); 389 LIST_INSERT_HEAD(&deadproc, p, p_hash); 390 mtx_leave(&deadproc_mutex); 391 392 wakeup(&deadproc); 393 } 394 395 /* 396 * Process reaper. This is run by a kernel thread to free the resources 397 * of a dead process. Once the resources are free, the process becomes 398 * a zombie, and the parent is allowed to read the undead's status. 399 */ 400 void 401 reaper(void) 402 { 403 struct proc *p; 404 405 KERNEL_UNLOCK(); 406 407 SCHED_ASSERT_UNLOCKED(); 408 409 for (;;) { 410 mtx_enter(&deadproc_mutex); 411 while ((p = LIST_FIRST(&deadproc)) == NULL) 412 msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0); 413 414 /* Remove us from the deadproc list. */ 415 LIST_REMOVE(p, p_hash); 416 mtx_leave(&deadproc_mutex); 417 418 KERNEL_LOCK(); 419 420 /* 421 * Free the VM resources we're still holding on to. 422 * We must do this from a valid thread because doing 423 * so may block. 424 */ 425 uvm_exit(p); 426 427 /* Process is now a true zombie. */ 428 if ((p->p_flag & P_NOZOMBIE) == 0) { 429 p->p_stat = SZOMB; 430 431 if (P_EXITSIG(p) != 0) 432 prsignal(p->p_p->ps_pptr, P_EXITSIG(p)); 433 /* Wake up the parent so it can get exit status. */ 434 wakeup(p->p_p->ps_pptr); 435 } else { 436 /* Noone will wait for us. Just zap the process now */ 437 proc_zap(p); 438 } 439 440 KERNEL_UNLOCK(); 441 } 442 } 443 444 int 445 sys_wait4(struct proc *q, void *v, register_t *retval) 446 { 447 struct sys_wait4_args /* { 448 syscallarg(pid_t) pid; 449 syscallarg(int *) status; 450 syscallarg(int) options; 451 syscallarg(struct rusage *) rusage; 452 } */ *uap = v; 453 int nfound; 454 struct process *pr; 455 struct proc *p; 456 int status, error; 457 458 if (SCARG(uap, pid) == 0) 459 SCARG(uap, pid) = -q->p_p->ps_pgid; 460 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WALTSIG|WCONTINUED)) 461 return (EINVAL); 462 463 loop: 464 nfound = 0; 465 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 466 p = pr->ps_mainproc; 467 if ((p->p_flag & P_NOZOMBIE) || 468 (SCARG(uap, pid) != WAIT_ANY && 469 p->p_pid != SCARG(uap, pid) && 470 pr->ps_pgid != -SCARG(uap, pid))) 471 continue; 472 473 /* 474 * Wait for processes with p_exitsig != SIGCHLD processes only 475 * if WALTSIG is set; wait for processes with pexitsig == 476 * SIGCHLD only if WALTSIG is clear. 477 */ 478 if ((SCARG(uap, options) & WALTSIG) ? 479 (p->p_exitsig == SIGCHLD) : (P_EXITSIG(p) != SIGCHLD)) 480 continue; 481 482 nfound++; 483 if (p->p_stat == SZOMB) { 484 retval[0] = p->p_pid; 485 486 if (SCARG(uap, status)) { 487 status = p->p_xstat; /* convert to int */ 488 error = copyout(&status, 489 SCARG(uap, status), sizeof(status)); 490 if (error) 491 return (error); 492 } 493 if (SCARG(uap, rusage) && 494 (error = copyout(pr->ps_ru, 495 SCARG(uap, rusage), sizeof(struct rusage)))) 496 return (error); 497 proc_finish_wait(q, p); 498 return (0); 499 } 500 if (pr->ps_flags & PS_TRACED && 501 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 502 pr->ps_single->p_stat == SSTOP && 503 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 504 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 505 retval[0] = p->p_pid; 506 507 if (SCARG(uap, status)) { 508 status = W_STOPCODE(pr->ps_single->p_xstat); 509 error = copyout(&status, SCARG(uap, status), 510 sizeof(status)); 511 } else 512 error = 0; 513 return (error); 514 } 515 if (p->p_stat == SSTOP && 516 (pr->ps_flags & PS_WAITED) == 0 && 517 (p->p_flag & P_SUSPSINGLE) == 0 && 518 (pr->ps_flags & PS_TRACED || 519 SCARG(uap, options) & WUNTRACED)) { 520 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 521 retval[0] = p->p_pid; 522 523 if (SCARG(uap, status)) { 524 status = W_STOPCODE(p->p_xstat); 525 error = copyout(&status, SCARG(uap, status), 526 sizeof(status)); 527 } else 528 error = 0; 529 return (error); 530 } 531 if ((SCARG(uap, options) & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 532 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 533 retval[0] = p->p_pid; 534 535 if (SCARG(uap, status)) { 536 status = _WCONTINUED; 537 error = copyout(&status, SCARG(uap, status), 538 sizeof(status)); 539 } else 540 error = 0; 541 return (error); 542 } 543 } 544 if (nfound == 0) 545 return (ECHILD); 546 if (SCARG(uap, options) & WNOHANG) { 547 retval[0] = 0; 548 return (0); 549 } 550 if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0) 551 return (error); 552 goto loop; 553 } 554 555 void 556 proc_finish_wait(struct proc *waiter, struct proc *p) 557 { 558 struct process *pr, *tr; 559 struct rusage *rup; 560 561 /* 562 * If we got the child via a ptrace 'attach', 563 * we need to give it back to the old parent. 564 */ 565 pr = p->p_p; 566 if ((p->p_flag & P_THREAD) == 0 && pr->ps_oppid && 567 (tr = prfind(pr->ps_oppid))) { 568 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 569 pr->ps_oppid = 0; 570 proc_reparent(pr, tr); 571 if (p->p_exitsig != 0) 572 prsignal(tr, p->p_exitsig); 573 wakeup(tr); 574 } else { 575 scheduler_wait_hook(waiter, p); 576 p->p_xstat = 0; 577 rup = &waiter->p_p->ps_cru; 578 ruadd(rup, pr->ps_ru); 579 proc_zap(p); 580 } 581 } 582 583 /* 584 * make process 'parent' the new parent of process 'child'. 585 */ 586 void 587 proc_reparent(struct process *child, struct process *parent) 588 { 589 590 if (child->ps_pptr == parent) 591 return; 592 593 if (parent == initproc->p_p) 594 child->ps_mainproc->p_exitsig = SIGCHLD; 595 596 LIST_REMOVE(child, ps_sibling); 597 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 598 child->ps_pptr = parent; 599 } 600 601 void 602 proc_zap(struct proc *p) 603 { 604 struct process *pr = p->p_p; 605 struct vnode *otvp; 606 607 /* 608 * Finally finished with old proc entry. 609 * Unlink it from its process group and free it. 610 */ 611 if ((p->p_flag & P_THREAD) == 0) 612 leavepgrp(pr); 613 LIST_REMOVE(p, p_list); /* off zombproc */ 614 if ((p->p_flag & P_THREAD) == 0) { 615 LIST_REMOVE(pr, ps_sibling); 616 617 /* 618 * Decrement the count of procs running with this uid. 619 */ 620 (void)chgproccnt(p->p_cred->p_ruid, -1); 621 } 622 623 /* 624 * Release reference to text vnode 625 */ 626 otvp = p->p_textvp; 627 p->p_textvp = NULL; 628 if (otvp) 629 vrele(otvp); 630 631 /* 632 * Remove us from our process list, possibly killing the process 633 * in the process (pun intended). 634 */ 635 if (--pr->ps_refcnt == 0) { 636 if (pr->ps_ptstat != NULL) 637 free(pr->ps_ptstat, M_SUBPROC); 638 pool_put(&rusage_pool, pr->ps_ru); 639 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 640 limfree(pr->ps_limit); 641 crfree(pr->ps_cred->pc_ucred); 642 pool_put(&pcred_pool, pr->ps_cred); 643 pool_put(&process_pool, pr); 644 nprocesses--; 645 } 646 647 pool_put(&proc_pool, p); 648 nthreads--; 649 } 650