1 /* $NetBSD: kern_exit.c,v 1.157 2006/07/19 21:11:37 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1989, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. Neither the name of the University nor the names of its contributors 58 * may be used to endorse or promote products derived from this software 59 * without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 * 73 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.157 2006/07/19 21:11:37 ad Exp $"); 78 79 #include "opt_ktrace.h" 80 #include "opt_perfctrs.h" 81 #include "opt_systrace.h" 82 #include "opt_sysv.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/ioctl.h> 87 #include <sys/proc.h> 88 #include <sys/tty.h> 89 #include <sys/time.h> 90 #include <sys/resource.h> 91 #include <sys/kernel.h> 92 #include <sys/ktrace.h> 93 #include <sys/proc.h> 94 #include <sys/buf.h> 95 #include <sys/wait.h> 96 #include <sys/file.h> 97 #include <sys/vnode.h> 98 #include <sys/syslog.h> 99 #include <sys/malloc.h> 100 #include <sys/pool.h> 101 #include <sys/resourcevar.h> 102 #if defined(PERFCTRS) 103 #include <sys/pmc.h> 104 #endif 105 #include <sys/ptrace.h> 106 #include <sys/acct.h> 107 #include <sys/filedesc.h> 108 #include <sys/ras.h> 109 #include <sys/signalvar.h> 110 #include <sys/sched.h> 111 #include <sys/sa.h> 112 #include <sys/savar.h> 113 #include <sys/mount.h> 114 #include <sys/syscallargs.h> 115 #include <sys/systrace.h> 116 #include <sys/kauth.h> 117 118 #include <machine/cpu.h> 119 120 #include <uvm/uvm_extern.h> 121 122 #define DEBUG_EXIT 123 124 #ifdef DEBUG_EXIT 125 int debug_exit = 0; 126 #define DPRINTF(x) if (debug_exit) printf x 127 #else 128 #define DPRINTF(x) 129 #endif 130 131 static void lwp_exit_hook(struct lwp *, void *); 132 133 /* 134 * Fill in the appropriate signal information, and signal the parent. 135 */ 136 static void 137 exit_psignal(struct proc *p, struct proc *pp, ksiginfo_t *ksi) 138 { 139 140 (void)memset(ksi, 0, sizeof(ksiginfo_t)); 141 if ((ksi->ksi_signo = P_EXITSIG(p)) == SIGCHLD) { 142 if (WIFSIGNALED(p->p_xstat)) { 143 if (WCOREDUMP(p->p_xstat)) 144 ksi->ksi_code = CLD_DUMPED; 145 else 146 ksi->ksi_code = CLD_KILLED; 147 } else { 148 ksi->ksi_code = CLD_EXITED; 149 } 150 } 151 /* 152 * we fill those in, even for non-SIGCHLD. 153 */ 154 ksi->ksi_pid = p->p_pid; 155 ksi->ksi_uid = kauth_cred_geteuid(p->p_cred); 156 ksi->ksi_status = p->p_xstat; 157 /* XXX: is this still valid? */ 158 ksi->ksi_utime = p->p_ru->ru_utime.tv_sec; 159 ksi->ksi_stime = p->p_ru->ru_stime.tv_sec; 160 } 161 162 /* 163 * exit -- 164 * Death of process. 165 */ 166 int 167 sys_exit(struct lwp *l, void *v, register_t *retval) 168 { 169 struct sys_exit_args /* { 170 syscallarg(int) rval; 171 } */ *uap = v; 172 173 /* Don't call exit1() multiple times in the same process.*/ 174 if (l->l_proc->p_flag & P_WEXIT) 175 lwp_exit(l); 176 177 exit1(l, W_EXITCODE(SCARG(uap, rval), 0)); 178 /* NOTREACHED */ 179 return (0); 180 } 181 182 /* 183 * Exit: deallocate address space and other resources, change proc state 184 * to zombie, and unlink proc from allproc and parent's lists. Save exit 185 * status and rusage for wait(). Check for child processes and orphan them. 186 */ 187 void 188 exit1(struct lwp *l, int rv) 189 { 190 struct proc *p, *q, *nq; 191 int s, sa; 192 struct plimit *plim; 193 struct pstats *pstats; 194 struct sigacts *ps; 195 ksiginfo_t ksi; 196 int do_psignal = 0; 197 198 p = l->l_proc; 199 200 if (__predict_false(p == initproc)) 201 panic("init died (signal %d, exit %d)", 202 WTERMSIG(rv), WEXITSTATUS(rv)); 203 204 p->p_flag |= P_WEXIT; 205 if (p->p_flag & P_STOPEXIT) { 206 sigminusset(&contsigmask, &p->p_sigctx.ps_siglist); 207 SCHED_LOCK(s); 208 p->p_stat = SSTOP; 209 l->l_stat = LSSTOP; 210 p->p_nrlwps--; 211 mi_switch(l, NULL); 212 SCHED_ASSERT_UNLOCKED(); 213 splx(s); 214 } 215 216 DPRINTF(("exit1: %d.%d exiting.\n", p->p_pid, l->l_lid)); 217 /* 218 * Disable scheduler activation upcalls. 219 * We're trying to get out of here. 220 */ 221 sa = 0; 222 if (p->p_sa != NULL) { 223 l->l_flag &= ~L_SA; 224 #if 0 225 p->p_flag &= ~P_SA; 226 #endif 227 sa = 1; 228 } 229 230 #ifdef PGINPROF 231 vmsizmon(); 232 #endif 233 if (p->p_flag & P_PROFIL) 234 stopprofclock(p); 235 p->p_ru = pool_get(&rusage_pool, PR_WAITOK); 236 /* 237 * If parent is waiting for us to exit or exec, P_PPWAIT is set; we 238 * wake up the parent early to avoid deadlock. 239 */ 240 if (p->p_flag & P_PPWAIT) { 241 p->p_flag &= ~P_PPWAIT; 242 wakeup(p->p_pptr); 243 } 244 sigfillset(&p->p_sigctx.ps_sigignore); 245 sigemptyset(&p->p_sigctx.ps_siglist); 246 p->p_sigctx.ps_sigcheck = 0; 247 timers_free(p, TIMERS_ALL); 248 249 if (sa || (p->p_nlwps > 1)) { 250 exit_lwps(l); 251 252 /* 253 * Collect thread u-areas. 254 */ 255 uvm_uarea_drain(FALSE); 256 } 257 258 #if defined(__HAVE_RAS) 259 ras_purgeall(p); 260 #endif 261 262 /* 263 * Close open files and release open-file table. 264 * This may block! 265 */ 266 fdfree(l); 267 cwdfree(p->p_cwdi); 268 p->p_cwdi = 0; 269 270 doexithooks(p); 271 272 if (SESS_LEADER(p)) { 273 struct session *sp = p->p_session; 274 struct tty *tp; 275 276 if (sp->s_ttyvp) { 277 /* 278 * Controlling process. 279 * Signal foreground pgrp, 280 * drain controlling terminal 281 * and revoke access to controlling terminal. 282 */ 283 tp = sp->s_ttyp; 284 s = spltty(); 285 TTY_LOCK(tp); 286 if (tp->t_session == sp) { 287 if (tp->t_pgrp) 288 pgsignal(tp->t_pgrp, SIGHUP, 1); 289 /* we can't guarantee the revoke will do this */ 290 tp->t_pgrp = NULL; 291 tp->t_session = NULL; 292 TTY_UNLOCK(tp); 293 splx(s); 294 SESSRELE(sp); 295 (void) ttywait(tp); 296 /* 297 * The tty could have been revoked 298 * if we blocked. 299 */ 300 if (sp->s_ttyvp) 301 VOP_REVOKE(sp->s_ttyvp, REVOKEALL); 302 } else { 303 TTY_UNLOCK(tp); 304 splx(s); 305 } 306 if (sp->s_ttyvp) 307 vrele(sp->s_ttyvp); 308 sp->s_ttyvp = NULL; 309 /* 310 * s_ttyp is not zero'd; we use this to indicate 311 * that the session once had a controlling terminal. 312 * (for logging and informational purposes) 313 */ 314 } 315 sp->s_leader = NULL; 316 } 317 fixjobc(p, p->p_pgrp, 0); 318 319 /* 320 * Collect accounting flags from the last remaining LWP (this one), 321 * and write out accounting data. 322 */ 323 p->p_acflag |= l->l_acflag; 324 (void)acct_process(l); 325 326 #ifdef KTRACE 327 /* 328 * Release trace file. 329 */ 330 ktrderef(p); 331 #endif 332 #ifdef SYSTRACE 333 systrace_sys_exit(p); 334 #endif 335 336 /* 337 * If emulation has process exit hook, call it now. 338 */ 339 if (p->p_emul->e_proc_exit) 340 (*p->p_emul->e_proc_exit)(p); 341 342 /* 343 * Free the VM resources we're still holding on to. 344 * We must do this from a valid thread because doing 345 * so may block. This frees vmspace, which we don't 346 * need anymore. The only remaining lwp is the one 347 * we run at this moment, nothing runs in userland 348 * anymore. 349 */ 350 uvm_proc_exit(p); 351 352 /* 353 * Give machine-dependent code a chance to free any 354 * MD LWP resources while we can still block. This must be done 355 * before uvm_lwp_exit(), in case these resources are in the 356 * PCB. 357 * THIS IS LAST BLOCKING OPERATION. 358 */ 359 #ifndef __NO_CPU_LWP_FREE 360 cpu_lwp_free(l, 1); 361 #endif 362 363 pmap_deactivate(l); 364 365 /* 366 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 367 */ 368 369 /* 370 * Save exit status and final rusage info, adding in child rusage 371 * info and self times. 372 * In order to pick up the time for the current execution, we must 373 * do this before unlinking the lwp from l_list. 374 */ 375 p->p_xstat = rv; 376 *p->p_ru = p->p_stats->p_ru; 377 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 378 ruadd(p->p_ru, &p->p_stats->p_cru); 379 380 /* 381 * Notify interested parties of our demise. 382 */ 383 KNOTE(&p->p_klist, NOTE_EXIT); 384 385 #if PERFCTRS 386 /* 387 * Save final PMC information in parent process & clean up. 388 */ 389 if (PMC_ENABLED(p)) { 390 pmc_save_context(p); 391 pmc_accumulate(p->p_pptr, p); 392 pmc_process_exit(p); 393 } 394 #endif 395 396 s = proclist_lock_write(); 397 /* 398 * Reset p_opptr pointer of all former children which got 399 * traced by another process and were reparented. We reset 400 * it to NULL here; the trace detach code then reparents 401 * the child to initproc. We only check allproc list, since 402 * eventual former children on zombproc list won't reference 403 * p_opptr anymore. 404 */ 405 if (p->p_flag & P_CHTRACED) { 406 PROCLIST_FOREACH(q, &allproc) { 407 if (q->p_opptr == p) 408 q->p_opptr = NULL; 409 } 410 } 411 412 /* 413 * Give orphaned children to init(8). 414 */ 415 q = LIST_FIRST(&p->p_children); 416 if (q) /* only need this if any child is SZOMB */ 417 wakeup(initproc); 418 for (; q != NULL; q = nq) { 419 nq = LIST_NEXT(q, p_sibling); 420 421 /* 422 * Traced processes are killed since their existence 423 * means someone is screwing up. Since we reset the 424 * trace flags, the logic in sys_wait4() would not be 425 * triggered to reparent the process to its 426 * original parent, so we must do this here. 427 */ 428 if (q->p_flag & P_TRACED) { 429 if (q->p_opptr != q->p_pptr) { 430 struct proc *t = q->p_opptr; 431 proc_reparent(q, t ? t : initproc); 432 q->p_opptr = NULL; 433 } else 434 proc_reparent(q, initproc); 435 q->p_flag &= ~(P_TRACED|P_WAITED|P_FSTRACE|P_SYSCALL); 436 killproc(q, "orphaned traced process"); 437 } else { 438 proc_reparent(q, initproc); 439 } 440 } 441 442 /* 443 * Move proc from allproc to zombproc, it's now ready 444 * to be collected by parent. Remaining lwp resources 445 * will be freed in lwp_exit2() once we've switch to idle 446 * context. 447 * Changing the state to SZOMB stops it being found by pfind(). 448 */ 449 LIST_REMOVE(p, p_list); 450 LIST_INSERT_HEAD(&zombproc, p, p_list); 451 p->p_stat = SZOMB; 452 453 LIST_REMOVE(l, l_list); 454 LIST_REMOVE(l, l_sibling); 455 l->l_flag |= L_DETACHED; /* detached from proc too */ 456 l->l_stat = LSDEAD; 457 458 KASSERT(p->p_nrlwps == 1); 459 KASSERT(p->p_nlwps == 1); 460 p->p_nrlwps--; 461 p->p_nlwps--; 462 463 /* Put in front of parent's sibling list for parent to collect it */ 464 q = p->p_pptr; 465 q->p_nstopchild++; 466 if (LIST_FIRST(&q->p_children) != p) { 467 /* Put child where it can be found quickly */ 468 LIST_REMOVE(p, p_sibling); 469 LIST_INSERT_HEAD(&q->p_children, p, p_sibling); 470 } 471 472 /* 473 * Notify parent that we're gone. If parent has the P_NOCLDWAIT 474 * flag set, notify init instead (and hope it will handle 475 * this situation). 476 */ 477 if (q->p_flag & (P_NOCLDWAIT|P_CLDSIGIGN)) { 478 proc_reparent(p, initproc); 479 480 /* 481 * If this was the last child of our parent, notify 482 * parent, so in case he was wait(2)ing, he will 483 * continue. 484 */ 485 if (LIST_FIRST(&q->p_children) == NULL) 486 wakeup(q); 487 } 488 489 /* 490 * Clear curlwp after we've done all operations 491 * that could block, and before tearing down the rest 492 * of the process state that might be used from clock, etc. 493 * Also, can't clear curlwp while we're still runnable, 494 * as we're not on a run queue (we are current, just not 495 * a proper proc any longer!). 496 * 497 * Other substructures are freed from wait(). 498 */ 499 curlwp = NULL; 500 501 /* Delay release until after dropping the proclist lock */ 502 plim = p->p_limit; 503 pstats = p->p_stats; 504 ps = p->p_sigacts; 505 506 p->p_limit = NULL; 507 p->p_stats = NULL; 508 p->p_sigacts = NULL; 509 510 /* Reload parent pointer, since p may have been reparented above */ 511 q = p->p_pptr; 512 513 if ((p->p_flag & P_FSTRACE) == 0 && p->p_exitsig != 0) { 514 exit_psignal(p, q, &ksi); 515 do_psignal = 1; 516 } 517 518 /* 519 * Once we release the proclist lock, we shouldn't touch the 520 * process structure anymore, since it's now on the zombie 521 * list and available for collection by the parent. 522 */ 523 proclist_unlock_write(s); 524 525 if (do_psignal) 526 kpsignal(q, &ksi, NULL); 527 528 /* Wake up the parent so it can get exit status. */ 529 wakeup(q); 530 531 /* Release substructures */ 532 sigactsfree(ps); 533 limfree(plim); 534 pstatsfree(pstats); 535 536 /* Release cached credentials. */ 537 kauth_cred_free(l->l_cred); 538 539 #ifdef DEBUG 540 /* Nothing should use the process link anymore */ 541 l->l_proc = NULL; 542 #endif 543 544 /* This process no longer needs to hold the kernel lock. */ 545 KERNEL_PROC_UNLOCK(l); 546 547 /* 548 * Finally, call machine-dependent code to switch to a new 549 * context (possibly the idle context). Once we are no longer 550 * using the dead lwp's stack, lwp_exit2() will be called 551 * to arrange for the resources to be released. 552 * 553 * Note that cpu_exit() will end with a call equivalent to 554 * cpu_switch(), finishing our execution (pun intended). 555 */ 556 557 uvmexp.swtch++; 558 cpu_exit(l); 559 } 560 561 void 562 exit_lwps(struct lwp *l) 563 { 564 struct proc *p; 565 struct lwp *l2; 566 struct sadata_vp *vp; 567 int s, error; 568 lwpid_t waited; 569 570 p = l->l_proc; 571 572 /* XXX SMP 573 * This would be the right place to IPI any LWPs running on 574 * other processors so that they can notice the userret exit hook. 575 */ 576 p->p_userret = lwp_exit_hook; 577 p->p_userret_arg = NULL; 578 579 if (p->p_sa) { 580 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 581 /* 582 * Make SA-cached LWPs normal process runnable 583 * LWPs so that they'll also self-destruct. 584 */ 585 DPRINTF(("exit_lwps: Making cached LWPs of %d on VP %d runnable: ", 586 p->p_pid, vp->savp_id)); 587 SCHED_LOCK(s); 588 while ((l2 = sa_getcachelwp(vp)) != 0) { 589 l2->l_priority = l2->l_usrpri; 590 setrunnable(l2); 591 DPRINTF(("%d ", l2->l_lid)); 592 } 593 SCHED_UNLOCK(s); 594 DPRINTF(("\n")); 595 596 /* 597 * Clear wokenq, the LWPs on the queue will 598 * run below. 599 */ 600 vp->savp_wokenq_head = NULL; 601 } 602 } 603 604 retry: 605 /* 606 * Interrupt LWPs in interruptable sleep, unsuspend suspended 607 * LWPs, make detached LWPs undetached (so we can wait for 608 * them) and then wait for everyone else to finish. 609 */ 610 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 611 l2->l_flag &= ~(L_DETACHED|L_SA); 612 613 SCHED_LOCK(s); 614 if ((l2->l_stat == LSSLEEP && (l2->l_flag & L_SINTR)) || 615 l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) { 616 setrunnable(l2); 617 DPRINTF(("exit_lwps: Made %d.%d runnable\n", 618 p->p_pid, l2->l_lid)); 619 } 620 SCHED_UNLOCK(s); 621 } 622 623 624 while (p->p_nlwps > 1) { 625 DPRINTF(("exit_lwps: waiting for %d LWPs (%d runnable, %d zombies)\n", 626 p->p_nlwps, p->p_nrlwps, p->p_nzlwps)); 627 error = lwp_wait1(l, 0, &waited, LWPWAIT_EXITCONTROL); 628 if (error == EDEADLK) { 629 /* 630 * LWPs can get suspended/slept behind us. 631 * (eg. sa_setwoken) 632 * kick them again and retry. 633 */ 634 goto retry; 635 } 636 if (error) 637 panic("exit_lwps: lwp_wait1 failed with error %d", 638 error); 639 DPRINTF(("exit_lwps: Got LWP %d from lwp_wait1()\n", waited)); 640 } 641 642 p->p_userret = NULL; 643 } 644 645 /* Wrapper function for use in p_userret */ 646 static void 647 lwp_exit_hook(struct lwp *l, void *arg) 648 { 649 KERNEL_PROC_LOCK(l); 650 lwp_exit(l); 651 } 652 653 int 654 sys_wait4(struct lwp *l, void *v, register_t *retval) 655 { 656 struct sys_wait4_args /* { 657 syscallarg(int) pid; 658 syscallarg(int *) status; 659 syscallarg(int) options; 660 syscallarg(struct rusage *) rusage; 661 } */ *uap = v; 662 struct proc *child, *parent; 663 int status, error; 664 665 parent = l->l_proc; 666 667 if (SCARG(uap, pid) == 0) 668 SCARG(uap, pid) = -parent->p_pgid; 669 if (SCARG(uap, options) & ~(WUNTRACED|WNOHANG|WALTSIG|WALLSIG)) 670 return (EINVAL); 671 672 error = find_stopped_child(parent, SCARG(uap,pid), SCARG(uap,options), 673 &child); 674 if (error != 0) 675 return error; 676 if (child == NULL) { 677 *retval = 0; 678 return 0; 679 } 680 681 /* 682 * Collect child u-areas. 683 */ 684 uvm_uarea_drain(FALSE); 685 686 retval[0] = child->p_pid; 687 688 if (P_ZOMBIE(child)) { 689 if (SCARG(uap, status)) { 690 status = child->p_xstat; /* convert to int */ 691 error = copyout(&status, SCARG(uap, status), 692 sizeof(status)); 693 if (error) 694 return (error); 695 } 696 if (SCARG(uap, rusage)) { 697 error = copyout(child->p_ru, SCARG(uap, rusage), 698 sizeof(struct rusage)); 699 if (error) 700 return (error); 701 } 702 703 proc_free(child); 704 return 0; 705 } 706 707 /* child state must be SSTOP */ 708 if (SCARG(uap, status)) { 709 status = W_STOPCODE(child->p_xstat); 710 return copyout(&status, SCARG(uap, status), sizeof(status)); 711 } 712 return 0; 713 } 714 715 /* 716 * Scan list of child processes for a child process that has stopped or 717 * exited. Used by sys_wait4 and 'compat' equivalents. 718 */ 719 int 720 find_stopped_child(struct proc *parent, pid_t pid, int options, 721 struct proc **child_p) 722 { 723 struct proc *child; 724 int error; 725 726 for (;;) { 727 proclist_lock_read(); 728 error = ECHILD; 729 LIST_FOREACH(child, &parent->p_children, p_sibling) { 730 if (pid >= 0) { 731 if (child->p_pid != pid) { 732 child = p_find(pid, PFIND_ZOMBIE | 733 PFIND_LOCKED); 734 if (child == NULL 735 || child->p_pptr != parent) { 736 child = NULL; 737 break; 738 } 739 } 740 } else 741 if (pid != WAIT_ANY && child->p_pgid != -pid) 742 /* child not in correct pgrp */ 743 continue; 744 /* 745 * Wait for processes with p_exitsig != SIGCHLD 746 * processes only if WALTSIG is set; wait for 747 * processes with p_exitsig == SIGCHLD only 748 * if WALTSIG is clear. 749 */ 750 if (((options & WALLSIG) == 0) && 751 (options & WALTSIG ? child->p_exitsig == SIGCHLD 752 : P_EXITSIG(child) != SIGCHLD)){ 753 if (child->p_pid == pid) { 754 child = NULL; 755 break; 756 } 757 continue; 758 } 759 760 error = 0; 761 if (child->p_stat == SZOMB && !(options & WNOZOMBIE)) 762 break; 763 764 if (child->p_stat == SSTOP && 765 (child->p_flag & P_WAITED) == 0 && 766 (child->p_flag & P_TRACED || options & WUNTRACED)) { 767 if ((options & WNOWAIT) == 0) { 768 child->p_flag |= P_WAITED; 769 parent->p_nstopchild--; 770 } 771 break; 772 } 773 if (parent->p_nstopchild == 0 || child->p_pid == pid) { 774 child = NULL; 775 break; 776 } 777 } 778 proclist_unlock_read(); 779 if (child != NULL || error != 0 || options & WNOHANG) { 780 *child_p = child; 781 return error; 782 } 783 error = tsleep(parent, PWAIT | PCATCH, "wait", 0); 784 if (error != 0) 785 return error; 786 } 787 } 788 789 /* 790 * Free a process after parent has taken all the state info. 791 */ 792 void 793 proc_free(struct proc *p) 794 { 795 struct proc *parent = p->p_pptr; 796 ksiginfo_t ksi; 797 int s; 798 799 KASSERT(p->p_nlwps == 0); 800 KASSERT(p->p_nzlwps == 0); 801 KASSERT(p->p_nrlwps == 0); 802 KASSERT(LIST_EMPTY(&p->p_lwps)); 803 804 /* 805 * If we got the child via ptrace(2) or procfs, and 806 * the parent is different (meaning the process was 807 * attached, rather than run as a child), then we need 808 * to give it back to the old parent, and send the 809 * parent the exit signal. The rest of the cleanup 810 * will be done when the old parent waits on the child. 811 */ 812 if ((p->p_flag & P_TRACED) && p->p_opptr != parent){ 813 parent = p->p_opptr; 814 if (parent == NULL) 815 parent = initproc; 816 proc_reparent(p, parent); 817 p->p_opptr = NULL; 818 p->p_flag &= ~(P_TRACED|P_WAITED|P_FSTRACE|P_SYSCALL); 819 if (p->p_exitsig != 0) { 820 exit_psignal(p, parent, &ksi); 821 kpsignal(parent, &ksi, NULL); 822 } 823 wakeup(parent); 824 return; 825 } 826 827 scheduler_wait_hook(parent, p); 828 p->p_xstat = 0; 829 830 ruadd(&parent->p_stats->p_cru, p->p_ru); 831 832 /* 833 * At this point we are going to start freeing the final resources. 834 * If anyone tries to access the proc structure after here they 835 * will get a shock - bits are missing. 836 * Attempt to make it hard! 837 */ 838 839 p->p_stat = SIDL; /* not even a zombie any more */ 840 841 pool_put(&rusage_pool, p->p_ru); 842 843 /* 844 * Finally finished with old proc entry. 845 * Unlink it from its process group and free it. 846 */ 847 leavepgrp(p); 848 849 s = proclist_lock_write(); 850 LIST_REMOVE(p, p_list); /* off zombproc */ 851 p->p_pptr->p_nstopchild--; 852 LIST_REMOVE(p, p_sibling); 853 proclist_unlock_write(s); 854 855 /* 856 * Decrement the count of procs running with this uid. 857 */ 858 (void)chgproccnt(kauth_cred_getuid(p->p_cred), -1); 859 860 /* 861 * Free up credentials. 862 */ 863 kauth_cred_free(p->p_cred); 864 865 /* 866 * Release reference to text vnode 867 */ 868 if (p->p_textvp) 869 vrele(p->p_textvp); 870 871 /* Release any SA state. */ 872 if (p->p_sa) 873 sa_release(p); 874 875 /* Free proc structure and let pid be reallocated */ 876 proc_free_mem(p); 877 } 878 879 /* 880 * make process 'parent' the new parent of process 'child'. 881 * 882 * Must be called with proclist_lock_write() held. 883 */ 884 void 885 proc_reparent(struct proc *child, struct proc *parent) 886 { 887 888 if (child->p_pptr == parent) 889 return; 890 891 if (child->p_stat == SZOMB 892 || (child->p_stat == SSTOP && !(child->p_flag & P_WAITED))) { 893 child->p_pptr->p_nstopchild--; 894 parent->p_nstopchild++; 895 } 896 if (parent == initproc) 897 child->p_exitsig = SIGCHLD; 898 899 LIST_REMOVE(child, p_sibling); 900 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 901 child->p_pptr = parent; 902 } 903