1 /* $OpenBSD: kern_exit.c,v 1.156 2016/03/29 08:46:08 mpi Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/wait.h> 50 #include <sys/file.h> 51 #include <sys/vnode.h> 52 #include <sys/syslog.h> 53 #include <sys/malloc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> 57 #include <sys/filedesc.h> 58 #include <sys/signalvar.h> 59 #include <sys/sched.h> 60 #include <sys/ktrace.h> 61 #include <sys/pool.h> 62 #include <sys/mutex.h> 63 #include <sys/pledge.h> 64 #ifdef SYSVSEM 65 #include <sys/sem.h> 66 #endif 67 68 #include "systrace.h" 69 #include <dev/systrace.h> 70 71 #include <sys/mount.h> 72 #include <sys/syscallargs.h> 73 74 #include <uvm/uvm_extern.h> 75 76 void proc_finish_wait(struct proc *, struct proc *); 77 void process_zap(struct process *); 78 void proc_free(struct proc *); 79 80 /* 81 * exit -- 82 * Death of process. 83 */ 84 int 85 sys_exit(struct proc *p, void *v, register_t *retval) 86 { 87 struct sys_exit_args /* { 88 syscallarg(int) rval; 89 } */ *uap = v; 90 91 exit1(p, W_EXITCODE(SCARG(uap, rval), 0), EXIT_NORMAL); 92 /* NOTREACHED */ 93 return (0); 94 } 95 96 int 97 sys___threxit(struct proc *p, void *v, register_t *retval) 98 { 99 struct sys___threxit_args /* { 100 syscallarg(pid_t *) notdead; 101 } */ *uap = v; 102 103 if (SCARG(uap, notdead) != NULL) { 104 pid_t zero = 0; 105 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 106 psignal(p, SIGSEGV); 107 } 108 exit1(p, 0, EXIT_THREAD); 109 110 return (0); 111 } 112 113 /* 114 * Exit: deallocate address space and other resources, change proc state 115 * to zombie, and unlink proc from allproc and parent's lists. Save exit 116 * status and rusage for wait(). Check for child processes and orphan them. 117 */ 118 void 119 exit1(struct proc *p, int rv, int flags) 120 { 121 struct process *pr, *qr, *nqr; 122 struct rusage *rup; 123 struct vnode *ovp; 124 125 atomic_setbits_int(&p->p_flag, P_WEXIT); 126 127 pr = p->p_p; 128 129 /* single-threaded? */ 130 if (!P_HASSIBLING(p)) { 131 flags = EXIT_NORMAL; 132 } else { 133 /* nope, multi-threaded */ 134 if (flags == EXIT_NORMAL) 135 single_thread_set(p, SINGLE_EXIT, 0); 136 else if (flags == EXIT_THREAD) 137 single_thread_check(p, 0); 138 } 139 140 if (flags == EXIT_NORMAL) { 141 if (pr->ps_pid == 1) 142 panic("init died (signal %d, exit %d)", 143 WTERMSIG(rv), WEXITSTATUS(rv)); 144 145 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 146 pr->ps_mainproc->p_xstat = rv; 147 148 /* 149 * If parent is waiting for us to exit or exec, PS_PPWAIT 150 * is set; we wake up the parent early to avoid deadlock. 151 */ 152 if (pr->ps_flags & PS_PPWAIT) { 153 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 154 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 155 PS_ISPWAIT); 156 wakeup(pr->ps_pptr); 157 } 158 } 159 160 /* unlink ourselves from the active threads */ 161 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 162 if ((p->p_flag & P_THREAD) == 0) { 163 /* main thread gotta wait because it has the pid, et al */ 164 while (pr->ps_refcnt > 1) 165 tsleep(&pr->ps_threads, PUSER, "thrdeath", 0); 166 if (pr->ps_flags & PS_PROFIL) 167 stopprofclock(pr); 168 } 169 170 rup = pr->ps_ru; 171 if (rup == NULL) { 172 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 173 if (pr->ps_ru == NULL) { 174 pr->ps_ru = rup; 175 } else { 176 pool_put(&rusage_pool, rup); 177 rup = pr->ps_ru; 178 } 179 } 180 p->p_siglist = 0; 181 182 if ((p->p_flag & P_THREAD) == 0) { 183 /* close open files and release open-file table */ 184 fdfree(p); 185 186 timeout_del(&pr->ps_realit_to); 187 #ifdef SYSVSEM 188 semexit(pr); 189 #endif 190 if (SESS_LEADER(pr)) { 191 struct session *sp = pr->ps_session; 192 193 if (sp->s_ttyvp) { 194 /* 195 * Controlling process. 196 * Signal foreground pgrp, 197 * drain controlling terminal 198 * and revoke access to controlling terminal. 199 */ 200 if (sp->s_ttyp->t_session == sp) { 201 if (sp->s_ttyp->t_pgrp) 202 pgsignal(sp->s_ttyp->t_pgrp, 203 SIGHUP, 1); 204 ttywait(sp->s_ttyp); 205 /* 206 * The tty could have been revoked 207 * if we blocked. 208 */ 209 if (sp->s_ttyvp) 210 VOP_REVOKE(sp->s_ttyvp, 211 REVOKEALL); 212 } 213 ovp = sp->s_ttyvp; 214 sp->s_ttyvp = NULL; 215 if (ovp) 216 vrele(ovp); 217 /* 218 * s_ttyp is not zero'd; we use this to 219 * indicate that the session once had a 220 * controlling terminal. (for logging and 221 * informational purposes) 222 */ 223 } 224 sp->s_leader = NULL; 225 } 226 fixjobc(pr, pr->ps_pgrp, 0); 227 228 #ifdef ACCOUNTING 229 acct_process(p); 230 #endif 231 232 #ifdef KTRACE 233 /* release trace file */ 234 if (pr->ps_tracevp) 235 ktrcleartrace(pr); 236 #endif 237 238 /* 239 * If parent has the SAS_NOCLDWAIT flag set, we're not 240 * going to become a zombie. 241 */ 242 if (pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDWAIT) 243 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 244 } 245 246 p->p_fd = NULL; /* zap the thread's copy */ 247 248 #if NSYSTRACE > 0 249 if (ISSET(p->p_flag, P_SYSTRACE)) 250 systrace_exit(p); 251 #endif 252 253 /* 254 * If emulation has thread exit hook, call it now. 255 */ 256 if (pr->ps_emul->e_proc_exit) 257 (*pr->ps_emul->e_proc_exit)(p); 258 259 /* 260 * Remove proc from pidhash chain and allproc so looking 261 * it up won't work. We will put the proc on the 262 * deadproc list later (using the p_hash member), and 263 * wake up the reaper when we do. If this is the last 264 * thread of a process that isn't PS_NOZOMBIE, we'll put 265 * the process on the zombprocess list below. 266 */ 267 /* 268 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 269 */ 270 p->p_stat = SDEAD; 271 272 LIST_REMOVE(p, p_hash); 273 LIST_REMOVE(p, p_list); 274 275 if ((p->p_flag & P_THREAD) == 0) { 276 LIST_REMOVE(pr, ps_list); 277 278 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 279 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 280 else { 281 /* 282 * Not going to be a zombie, so it's now off all 283 * the lists scanned by ispidtaken(), so block 284 * fast reuse of the pid now. 285 */ 286 freepid(p->p_pid); 287 } 288 289 /* 290 * Give orphaned children to init(8). 291 */ 292 qr = LIST_FIRST(&pr->ps_children); 293 if (qr) /* only need this if any child is S_ZOMB */ 294 wakeup(initprocess); 295 for (; qr != 0; qr = nqr) { 296 nqr = LIST_NEXT(qr, ps_sibling); 297 proc_reparent(qr, initprocess); 298 /* 299 * Traced processes are killed since their 300 * existence means someone is screwing up. 301 */ 302 if (qr->ps_flags & PS_TRACED && 303 !(qr->ps_flags & PS_EXITING)) { 304 atomic_clearbits_int(&qr->ps_flags, PS_TRACED); 305 /* 306 * If single threading is active, 307 * direct the signal to the active 308 * thread to avoid deadlock. 309 */ 310 if (qr->ps_single) 311 ptsignal(qr->ps_single, SIGKILL, 312 STHREAD); 313 else 314 prsignal(qr, SIGKILL); 315 } 316 } 317 } 318 319 /* add thread's accumulated rusage into the process's total */ 320 ruadd(rup, &p->p_ru); 321 tuagg(pr, p); 322 323 /* 324 * clear %cpu usage during swap 325 */ 326 p->p_pctcpu = 0; 327 328 if ((p->p_flag & P_THREAD) == 0) { 329 /* 330 * Final thread has died, so add on our children's rusage 331 * and calculate the total times 332 */ 333 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 334 ruadd(rup, &pr->ps_cru); 335 336 /* notify interested parties of our demise and clean up */ 337 knote_processexit(p); 338 339 /* 340 * Notify parent that we're gone. If we're not going to 341 * become a zombie, reparent to process 1 (init) so that 342 * we can wake our original parent to possibly unblock 343 * wait4() to return ECHILD. 344 */ 345 if (pr->ps_flags & PS_NOZOMBIE) { 346 struct process *ppr = pr->ps_pptr; 347 proc_reparent(pr, initprocess); 348 wakeup(ppr); 349 } 350 351 /* 352 * Release the process's signal state. 353 */ 354 sigactsfree(pr); 355 } 356 357 /* just a thread? detach it from its process */ 358 if (p->p_flag & P_THREAD) { 359 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 360 if (--pr->ps_refcnt == 1) 361 wakeup(&pr->ps_threads); 362 KASSERT(pr->ps_refcnt > 0); 363 } 364 365 /* 366 * Other substructures are freed from reaper and wait(). 367 */ 368 369 /* 370 * Finally, call machine-dependent code to switch to a new 371 * context (possibly the idle context). Once we are no longer 372 * using the dead process's vmspace and stack, exit2() will be 373 * called to schedule those resources to be released by the 374 * reaper thread. 375 * 376 * Note that cpu_exit() will end with a call equivalent to 377 * cpu_switch(), finishing our execution (pun intended). 378 */ 379 uvmexp.swtch++; 380 cpu_exit(p); 381 panic("cpu_exit returned"); 382 } 383 384 /* 385 * Locking of this proclist is special; it's accessed in a 386 * critical section of process exit, and thus locking it can't 387 * modify interrupt state. We use a simple spin lock for this 388 * proclist. We use the p_hash member to linkup to deadproc. 389 */ 390 struct mutex deadproc_mutex = MUTEX_INITIALIZER(IPL_NONE); 391 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 392 393 /* 394 * We are called from cpu_exit() once it is safe to schedule the 395 * dead process's resources to be freed. 396 * 397 * NOTE: One must be careful with locking in this routine. It's 398 * called from a critical section in machine-dependent code, so 399 * we should refrain from changing any interrupt state. 400 * 401 * We lock the deadproc list, place the proc on that list (using 402 * the p_hash member), and wake up the reaper. 403 */ 404 void 405 exit2(struct proc *p) 406 { 407 mtx_enter(&deadproc_mutex); 408 LIST_INSERT_HEAD(&deadproc, p, p_hash); 409 mtx_leave(&deadproc_mutex); 410 411 wakeup(&deadproc); 412 } 413 414 void 415 proc_free(struct proc *p) 416 { 417 crfree(p->p_ucred); 418 pool_put(&proc_pool, p); 419 nthreads--; 420 } 421 422 /* 423 * Process reaper. This is run by a kernel thread to free the resources 424 * of a dead process. Once the resources are free, the process becomes 425 * a zombie, and the parent is allowed to read the undead's status. 426 */ 427 void 428 reaper(void) 429 { 430 struct proc *p; 431 432 KERNEL_UNLOCK(); 433 434 SCHED_ASSERT_UNLOCKED(); 435 436 for (;;) { 437 mtx_enter(&deadproc_mutex); 438 while ((p = LIST_FIRST(&deadproc)) == NULL) 439 msleep(&deadproc, &deadproc_mutex, PVM, "reaper", 0); 440 441 /* Remove us from the deadproc list. */ 442 LIST_REMOVE(p, p_hash); 443 mtx_leave(&deadproc_mutex); 444 445 KERNEL_LOCK(); 446 447 /* 448 * Free the VM resources we're still holding on to. 449 * We must do this from a valid thread because doing 450 * so may block. 451 */ 452 uvm_uarea_free(p); 453 p->p_vmspace = NULL; /* zap the thread's copy */ 454 455 if (p->p_flag & P_THREAD) { 456 /* Just a thread */ 457 proc_free(p); 458 } else { 459 struct process *pr = p->p_p; 460 461 /* Release the rest of the process's vmspace */ 462 uvm_exit(pr); 463 464 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 465 /* Process is now a true zombie. */ 466 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 467 prsignal(pr->ps_pptr, SIGCHLD); 468 469 /* Wake up the parent so it can get exit status. */ 470 wakeup(pr->ps_pptr); 471 } else { 472 /* No one will wait for us. Just zap the process now */ 473 process_zap(pr); 474 } 475 } 476 477 KERNEL_UNLOCK(); 478 } 479 } 480 481 int 482 sys_wait4(struct proc *q, void *v, register_t *retval) 483 { 484 struct sys_wait4_args /* { 485 syscallarg(pid_t) pid; 486 syscallarg(int *) status; 487 syscallarg(int) options; 488 syscallarg(struct rusage *) rusage; 489 } */ *uap = v; 490 struct rusage ru; 491 int status, error; 492 493 error = dowait4(q, SCARG(uap, pid), 494 SCARG(uap, status) ? &status : NULL, 495 SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval); 496 if (error == 0 && retval[0] > 0 && SCARG(uap, status)) { 497 error = copyout(&status, SCARG(uap, status), sizeof(status)); 498 } 499 if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) { 500 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 501 #ifdef KTRACE 502 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 503 ktrrusage(q, &ru); 504 #endif 505 } 506 return (error); 507 } 508 509 int 510 dowait4(struct proc *q, pid_t pid, int *statusp, int options, 511 struct rusage *rusage, register_t *retval) 512 { 513 int nfound; 514 struct process *pr; 515 struct proc *p; 516 int error; 517 518 if (pid == 0) 519 pid = -q->p_p->ps_pgid; 520 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED)) 521 return (EINVAL); 522 523 loop: 524 nfound = 0; 525 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 526 p = pr->ps_mainproc; 527 if ((pr->ps_flags & PS_NOZOMBIE) || 528 (pid != WAIT_ANY && 529 p->p_pid != pid && 530 pr->ps_pgid != -pid)) 531 continue; 532 533 nfound++; 534 if (pr->ps_flags & PS_ZOMBIE) { 535 retval[0] = p->p_pid; 536 537 if (statusp != NULL) 538 *statusp = p->p_xstat; /* convert to int */ 539 if (rusage != NULL) 540 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 541 proc_finish_wait(q, p); 542 return (0); 543 } 544 if (pr->ps_flags & PS_TRACED && 545 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 546 pr->ps_single->p_stat == SSTOP && 547 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 548 single_thread_wait(pr); 549 550 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 551 retval[0] = p->p_pid; 552 553 if (statusp != NULL) 554 *statusp = W_STOPCODE(pr->ps_single->p_xstat); 555 if (rusage != NULL) 556 memset(rusage, 0, sizeof(*rusage)); 557 return (0); 558 } 559 if (p->p_stat == SSTOP && 560 (pr->ps_flags & PS_WAITED) == 0 && 561 (p->p_flag & P_SUSPSINGLE) == 0 && 562 (pr->ps_flags & PS_TRACED || 563 options & WUNTRACED)) { 564 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 565 retval[0] = p->p_pid; 566 567 if (statusp != NULL) 568 *statusp = W_STOPCODE(p->p_xstat); 569 if (rusage != NULL) 570 memset(rusage, 0, sizeof(*rusage)); 571 return (0); 572 } 573 if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 574 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 575 retval[0] = p->p_pid; 576 577 if (statusp != NULL) 578 *statusp = _WCONTINUED; 579 if (rusage != NULL) 580 memset(rusage, 0, sizeof(*rusage)); 581 return (0); 582 } 583 } 584 if (nfound == 0) 585 return (ECHILD); 586 if (options & WNOHANG) { 587 retval[0] = 0; 588 return (0); 589 } 590 if ((error = tsleep(q->p_p, PWAIT | PCATCH, "wait", 0)) != 0) 591 return (error); 592 goto loop; 593 } 594 595 void 596 proc_finish_wait(struct proc *waiter, struct proc *p) 597 { 598 struct process *pr, *tr; 599 struct rusage *rup; 600 601 /* 602 * If we got the child via a ptrace 'attach', 603 * we need to give it back to the old parent. 604 */ 605 pr = p->p_p; 606 if (pr->ps_oppid && (tr = prfind(pr->ps_oppid))) { 607 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 608 pr->ps_oppid = 0; 609 proc_reparent(pr, tr); 610 prsignal(tr, SIGCHLD); 611 wakeup(tr); 612 } else { 613 scheduler_wait_hook(waiter, p); 614 p->p_xstat = 0; 615 rup = &waiter->p_p->ps_cru; 616 ruadd(rup, pr->ps_ru); 617 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 618 freepid(p->p_pid); 619 process_zap(pr); 620 } 621 } 622 623 /* 624 * make process 'parent' the new parent of process 'child'. 625 */ 626 void 627 proc_reparent(struct process *child, struct process *parent) 628 { 629 630 if (child->ps_pptr == parent) 631 return; 632 633 LIST_REMOVE(child, ps_sibling); 634 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 635 child->ps_pptr = parent; 636 } 637 638 void 639 process_zap(struct process *pr) 640 { 641 struct vnode *otvp; 642 struct proc *p = pr->ps_mainproc; 643 644 /* 645 * Finally finished with old proc entry. 646 * Unlink it from its process group and free it. 647 */ 648 leavepgrp(pr); 649 LIST_REMOVE(pr, ps_sibling); 650 651 /* 652 * Decrement the count of procs running with this uid. 653 */ 654 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 655 656 pledge_dropwpaths(pr); 657 658 /* 659 * Release reference to text vnode 660 */ 661 otvp = pr->ps_textvp; 662 pr->ps_textvp = NULL; 663 if (otvp) 664 vrele(otvp); 665 666 KASSERT(pr->ps_refcnt == 1); 667 if (pr->ps_ptstat != NULL) 668 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 669 pool_put(&rusage_pool, pr->ps_ru); 670 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 671 limfree(pr->ps_limit); 672 crfree(pr->ps_ucred); 673 pool_put(&process_pool, pr); 674 nprocesses--; 675 676 proc_free(p); 677 } 678