1 /* $OpenBSD: kern_exit.c,v 1.193 2020/12/09 18:58:19 mpi Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/proc.h> 44 #include <sys/tty.h> 45 #include <sys/time.h> 46 #include <sys/resource.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/wait.h> 50 #include <sys/vnode.h> 51 #include <sys/syslog.h> 52 #include <sys/malloc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/ptrace.h> 55 #include <sys/acct.h> 56 #include <sys/filedesc.h> 57 #include <sys/signalvar.h> 58 #include <sys/sched.h> 59 #include <sys/ktrace.h> 60 #include <sys/pool.h> 61 #include <sys/mutex.h> 62 #include <sys/pledge.h> 63 #ifdef SYSVSEM 64 #include <sys/sem.h> 65 #endif 66 #include <sys/smr.h> 67 #include <sys/witness.h> 68 69 #include <sys/mount.h> 70 #include <sys/syscallargs.h> 71 72 #include <uvm/uvm_extern.h> 73 74 #include "kcov.h" 75 #if NKCOV > 0 76 #include <sys/kcov.h> 77 #endif 78 79 void proc_finish_wait(struct proc *, struct proc *); 80 void process_clear_orphan(struct process *); 81 void process_zap(struct process *); 82 void proc_free(struct proc *); 83 void unveil_destroy(struct process *ps); 84 85 /* 86 * exit -- 87 * Death of process. 88 */ 89 int 90 sys_exit(struct proc *p, void *v, register_t *retval) 91 { 92 struct sys_exit_args /* { 93 syscallarg(int) rval; 94 } */ *uap = v; 95 96 exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL); 97 /* NOTREACHED */ 98 return (0); 99 } 100 101 int 102 sys___threxit(struct proc *p, void *v, register_t *retval) 103 { 104 struct sys___threxit_args /* { 105 syscallarg(pid_t *) notdead; 106 } */ *uap = v; 107 108 if (SCARG(uap, notdead) != NULL) { 109 pid_t zero = 0; 110 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 111 psignal(p, SIGSEGV); 112 } 113 exit1(p, 0, 0, EXIT_THREAD); 114 115 return (0); 116 } 117 118 /* 119 * Exit: deallocate address space and other resources, change proc state 120 * to zombie, and unlink proc from allproc and parent's lists. Save exit 121 * status and rusage for wait(). Check for child processes and orphan them. 122 */ 123 void 124 exit1(struct proc *p, int xexit, int xsig, int flags) 125 { 126 struct process *pr, *qr, *nqr; 127 struct rusage *rup; 128 129 atomic_setbits_int(&p->p_flag, P_WEXIT); 130 131 pr = p->p_p; 132 133 /* single-threaded? */ 134 if (!P_HASSIBLING(p)) { 135 flags = EXIT_NORMAL; 136 } else { 137 /* nope, multi-threaded */ 138 if (flags == EXIT_NORMAL) 139 single_thread_set(p, SINGLE_EXIT, 0); 140 else if (flags == EXIT_THREAD) 141 single_thread_check(p, 0); 142 } 143 144 if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) { 145 if (pr->ps_pid == 1) 146 panic("init died (signal %d, exit %d)", xsig, xexit); 147 148 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 149 pr->ps_xexit = xexit; 150 pr->ps_xsig = xsig; 151 152 /* 153 * If parent is waiting for us to exit or exec, PS_PPWAIT 154 * is set; we wake up the parent early to avoid deadlock. 155 */ 156 if (pr->ps_flags & PS_PPWAIT) { 157 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 158 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 159 PS_ISPWAIT); 160 wakeup(pr->ps_pptr); 161 } 162 } 163 164 /* unlink ourselves from the active threads */ 165 SMR_TAILQ_REMOVE_LOCKED(&pr->ps_threads, p, p_thr_link); 166 if ((p->p_flag & P_THREAD) == 0) { 167 /* main thread gotta wait because it has the pid, et al */ 168 while (pr->ps_refcnt > 1) 169 tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP); 170 if (pr->ps_flags & PS_PROFIL) 171 stopprofclock(pr); 172 } 173 174 rup = pr->ps_ru; 175 if (rup == NULL) { 176 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 177 if (pr->ps_ru == NULL) { 178 pr->ps_ru = rup; 179 } else { 180 pool_put(&rusage_pool, rup); 181 rup = pr->ps_ru; 182 } 183 } 184 p->p_siglist = 0; 185 if ((p->p_flag & P_THREAD) == 0) 186 pr->ps_siglist = 0; 187 188 kqpoll_exit(); 189 190 #if NKCOV > 0 191 kcov_exit(p); 192 #endif 193 194 if ((p->p_flag & P_THREAD) == 0) { 195 sigio_freelist(&pr->ps_sigiolst); 196 197 /* close open files and release open-file table */ 198 fdfree(p); 199 200 cancel_all_itimers(); 201 202 timeout_del(&pr->ps_rucheck_to); 203 #ifdef SYSVSEM 204 semexit(pr); 205 #endif 206 killjobc(pr); 207 #ifdef ACCOUNTING 208 acct_process(p); 209 #endif 210 211 #ifdef KTRACE 212 /* release trace file */ 213 if (pr->ps_tracevp) 214 ktrcleartrace(pr); 215 #endif 216 217 unveil_destroy(pr); 218 219 /* 220 * If parent has the SAS_NOCLDWAIT flag set, we're not 221 * going to become a zombie. 222 */ 223 if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) 224 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 225 } 226 227 p->p_fd = NULL; /* zap the thread's copy */ 228 229 /* 230 * Remove proc from pidhash chain and allproc so looking 231 * it up won't work. We will put the proc on the 232 * deadproc list later (using the p_hash member), and 233 * wake up the reaper when we do. If this is the last 234 * thread of a process that isn't PS_NOZOMBIE, we'll put 235 * the process on the zombprocess list below. 236 */ 237 /* 238 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 239 */ 240 p->p_stat = SDEAD; 241 242 LIST_REMOVE(p, p_hash); 243 LIST_REMOVE(p, p_list); 244 245 if ((p->p_flag & P_THREAD) == 0) { 246 LIST_REMOVE(pr, ps_hash); 247 LIST_REMOVE(pr, ps_list); 248 249 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 250 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 251 else { 252 /* 253 * Not going to be a zombie, so it's now off all 254 * the lists scanned by ispidtaken(), so block 255 * fast reuse of the pid now. 256 */ 257 freepid(pr->ps_pid); 258 } 259 260 /* 261 * Reparent children to their original parent, in case 262 * they were being traced, or to init(8). 263 */ 264 qr = LIST_FIRST(&pr->ps_children); 265 if (qr) /* only need this if any child is S_ZOMB */ 266 wakeup(initprocess); 267 for (; qr != 0; qr = nqr) { 268 nqr = LIST_NEXT(qr, ps_sibling); 269 /* 270 * Traced processes are killed since their 271 * existence means someone is screwing up. 272 */ 273 if (qr->ps_flags & PS_TRACED && 274 !(qr->ps_flags & PS_EXITING)) { 275 process_untrace(qr); 276 277 /* 278 * If single threading is active, 279 * direct the signal to the active 280 * thread to avoid deadlock. 281 */ 282 if (qr->ps_single) 283 ptsignal(qr->ps_single, SIGKILL, 284 STHREAD); 285 else 286 prsignal(qr, SIGKILL); 287 } else { 288 process_reparent(qr, initprocess); 289 } 290 } 291 292 /* 293 * Make sure orphans won't remember the exiting process. 294 */ 295 while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) { 296 KASSERT(qr->ps_oppid == pr->ps_pid); 297 qr->ps_oppid = 0; 298 process_clear_orphan(qr); 299 } 300 } 301 302 /* add thread's accumulated rusage into the process's total */ 303 ruadd(rup, &p->p_ru); 304 tuagg(pr, p); 305 306 /* 307 * clear %cpu usage during swap 308 */ 309 p->p_pctcpu = 0; 310 311 if ((p->p_flag & P_THREAD) == 0) { 312 /* 313 * Final thread has died, so add on our children's rusage 314 * and calculate the total times 315 */ 316 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 317 ruadd(rup, &pr->ps_cru); 318 319 /* notify interested parties of our demise and clean up */ 320 knote_processexit(p); 321 322 /* 323 * Notify parent that we're gone. If we're not going to 324 * become a zombie, reparent to process 1 (init) so that 325 * we can wake our original parent to possibly unblock 326 * wait4() to return ECHILD. 327 */ 328 if (pr->ps_flags & PS_NOZOMBIE) { 329 struct process *ppr = pr->ps_pptr; 330 process_reparent(pr, initprocess); 331 wakeup(ppr); 332 } 333 334 /* 335 * Release the process's signal state. 336 */ 337 sigactsfree(pr); 338 } 339 340 /* just a thread? detach it from its process */ 341 if (p->p_flag & P_THREAD) { 342 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 343 if (--pr->ps_refcnt == 1) 344 wakeup(&pr->ps_threads); 345 KASSERT(pr->ps_refcnt > 0); 346 } 347 348 /* Release the thread's read reference of resource limit structure. */ 349 if (p->p_limit != NULL) { 350 struct plimit *limit; 351 352 limit = p->p_limit; 353 p->p_limit = NULL; 354 lim_free(limit); 355 } 356 357 /* 358 * Other substructures are freed from reaper and wait(). 359 */ 360 361 /* 362 * Finally, call machine-dependent code to switch to a new 363 * context (possibly the idle context). Once we are no longer 364 * using the dead process's vmspace and stack, exit2() will be 365 * called to schedule those resources to be released by the 366 * reaper thread. 367 * 368 * Note that cpu_exit() will end with a call equivalent to 369 * cpu_switch(), finishing our execution (pun intended). 370 */ 371 uvmexp.swtch++; 372 cpu_exit(p); 373 panic("cpu_exit returned"); 374 } 375 376 /* 377 * Locking of this proclist is special; it's accessed in a 378 * critical section of process exit, and thus locking it can't 379 * modify interrupt state. We use a simple spin lock for this 380 * proclist. We use the p_hash member to linkup to deadproc. 381 */ 382 struct mutex deadproc_mutex = 383 MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS); 384 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 385 386 /* 387 * We are called from cpu_exit() once it is safe to schedule the 388 * dead process's resources to be freed. 389 * 390 * NOTE: One must be careful with locking in this routine. It's 391 * called from a critical section in machine-dependent code, so 392 * we should refrain from changing any interrupt state. 393 * 394 * We lock the deadproc list, place the proc on that list (using 395 * the p_hash member), and wake up the reaper. 396 */ 397 void 398 exit2(struct proc *p) 399 { 400 mtx_enter(&deadproc_mutex); 401 LIST_INSERT_HEAD(&deadproc, p, p_hash); 402 mtx_leave(&deadproc_mutex); 403 404 wakeup(&deadproc); 405 } 406 407 void 408 proc_free(struct proc *p) 409 { 410 crfree(p->p_ucred); 411 pool_put(&proc_pool, p); 412 nthreads--; 413 } 414 415 /* 416 * Process reaper. This is run by a kernel thread to free the resources 417 * of a dead process. Once the resources are free, the process becomes 418 * a zombie, and the parent is allowed to read the undead's status. 419 */ 420 void 421 reaper(void *arg) 422 { 423 struct proc *p; 424 425 KERNEL_UNLOCK(); 426 427 SCHED_ASSERT_UNLOCKED(); 428 429 for (;;) { 430 mtx_enter(&deadproc_mutex); 431 while ((p = LIST_FIRST(&deadproc)) == NULL) 432 msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper", 433 INFSLP); 434 435 /* Remove us from the deadproc list. */ 436 LIST_REMOVE(p, p_hash); 437 mtx_leave(&deadproc_mutex); 438 439 WITNESS_THREAD_EXIT(p); 440 441 KERNEL_LOCK(); 442 443 /* 444 * Free the VM resources we're still holding on to. 445 * We must do this from a valid thread because doing 446 * so may block. 447 */ 448 uvm_uarea_free(p); 449 p->p_vmspace = NULL; /* zap the thread's copy */ 450 451 if (p->p_flag & P_THREAD) { 452 /* Just a thread */ 453 proc_free(p); 454 } else { 455 struct process *pr = p->p_p; 456 457 /* Release the rest of the process's vmspace */ 458 uvm_exit(pr); 459 460 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 461 /* Process is now a true zombie. */ 462 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 463 prsignal(pr->ps_pptr, SIGCHLD); 464 465 /* Wake up the parent so it can get exit status. */ 466 wakeup(pr->ps_pptr); 467 } else { 468 /* No one will wait for us. Just zap the process now */ 469 process_zap(pr); 470 } 471 } 472 473 KERNEL_UNLOCK(); 474 } 475 } 476 477 int 478 sys_wait4(struct proc *q, void *v, register_t *retval) 479 { 480 struct sys_wait4_args /* { 481 syscallarg(pid_t) pid; 482 syscallarg(int *) status; 483 syscallarg(int) options; 484 syscallarg(struct rusage *) rusage; 485 } */ *uap = v; 486 struct rusage ru; 487 int status, error; 488 489 error = dowait4(q, SCARG(uap, pid), 490 SCARG(uap, status) ? &status : NULL, 491 SCARG(uap, options), SCARG(uap, rusage) ? &ru : NULL, retval); 492 if (error == 0 && retval[0] > 0 && SCARG(uap, status)) { 493 error = copyout(&status, SCARG(uap, status), sizeof(status)); 494 } 495 if (error == 0 && retval[0] > 0 && SCARG(uap, rusage)) { 496 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 497 #ifdef KTRACE 498 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 499 ktrrusage(q, &ru); 500 #endif 501 } 502 return (error); 503 } 504 505 int 506 dowait4(struct proc *q, pid_t pid, int *statusp, int options, 507 struct rusage *rusage, register_t *retval) 508 { 509 int nfound; 510 struct process *pr; 511 struct proc *p; 512 int error; 513 514 if (pid == 0) 515 pid = -q->p_p->ps_pgid; 516 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED)) 517 return (EINVAL); 518 519 loop: 520 nfound = 0; 521 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 522 if ((pr->ps_flags & PS_NOZOMBIE) || 523 (pid != WAIT_ANY && 524 pr->ps_pid != pid && 525 pr->ps_pgid != -pid)) 526 continue; 527 528 p = pr->ps_mainproc; 529 530 nfound++; 531 if (pr->ps_flags & PS_ZOMBIE) { 532 retval[0] = pr->ps_pid; 533 534 if (statusp != NULL) 535 *statusp = W_EXITCODE(pr->ps_xexit, 536 pr->ps_xsig); 537 if (rusage != NULL) 538 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 539 proc_finish_wait(q, p); 540 return (0); 541 } 542 if (pr->ps_flags & PS_TRACED && 543 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 544 pr->ps_single->p_stat == SSTOP && 545 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 546 if (single_thread_wait(pr, 0)) 547 goto loop; 548 549 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 550 retval[0] = pr->ps_pid; 551 552 if (statusp != NULL) 553 *statusp = W_STOPCODE(pr->ps_xsig); 554 if (rusage != NULL) 555 memset(rusage, 0, sizeof(*rusage)); 556 return (0); 557 } 558 if (p->p_stat == SSTOP && 559 (pr->ps_flags & PS_WAITED) == 0 && 560 (p->p_flag & P_SUSPSINGLE) == 0 && 561 (pr->ps_flags & PS_TRACED || 562 options & WUNTRACED)) { 563 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 564 retval[0] = pr->ps_pid; 565 566 if (statusp != NULL) 567 *statusp = W_STOPCODE(pr->ps_xsig); 568 if (rusage != NULL) 569 memset(rusage, 0, sizeof(*rusage)); 570 return (0); 571 } 572 if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 573 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 574 retval[0] = pr->ps_pid; 575 576 if (statusp != NULL) 577 *statusp = _WCONTINUED; 578 if (rusage != NULL) 579 memset(rusage, 0, sizeof(*rusage)); 580 return (0); 581 } 582 } 583 /* 584 * Look in the orphans list too, to allow the parent to 585 * collect it's child exit status even if child is being 586 * debugged. 587 * 588 * Debugger detaches from the parent upon successful 589 * switch-over from parent to child. At this point due to 590 * re-parenting the parent loses the child to debugger and a 591 * wait4(2) call would report that it has no children to wait 592 * for. By maintaining a list of orphans we allow the parent 593 * to successfully wait until the child becomes a zombie. 594 */ 595 if (nfound == 0) { 596 LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) { 597 if ((pr->ps_flags & PS_NOZOMBIE) || 598 (pid != WAIT_ANY && 599 pr->ps_pid != pid && 600 pr->ps_pgid != -pid)) 601 continue; 602 nfound++; 603 break; 604 } 605 } 606 if (nfound == 0) 607 return (ECHILD); 608 if (options & WNOHANG) { 609 retval[0] = 0; 610 return (0); 611 } 612 if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0) 613 return (error); 614 goto loop; 615 } 616 617 void 618 proc_finish_wait(struct proc *waiter, struct proc *p) 619 { 620 struct process *pr, *tr; 621 struct rusage *rup; 622 623 /* 624 * If we got the child via a ptrace 'attach', 625 * we need to give it back to the old parent. 626 */ 627 pr = p->p_p; 628 if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) && 629 (tr = prfind(pr->ps_oppid))) { 630 pr->ps_oppid = 0; 631 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 632 process_reparent(pr, tr); 633 prsignal(tr, SIGCHLD); 634 wakeup(tr); 635 } else { 636 scheduler_wait_hook(waiter, p); 637 rup = &waiter->p_p->ps_cru; 638 ruadd(rup, pr->ps_ru); 639 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 640 freepid(pr->ps_pid); 641 process_zap(pr); 642 } 643 } 644 645 /* 646 * give process back to original parent or init(8) 647 */ 648 void 649 process_untrace(struct process *pr) 650 { 651 struct process *ppr = NULL; 652 653 KASSERT(pr->ps_flags & PS_TRACED); 654 655 if (pr->ps_oppid != 0 && 656 (pr->ps_oppid != pr->ps_pptr->ps_pid)) 657 ppr = prfind(pr->ps_oppid); 658 659 /* not being traced any more */ 660 pr->ps_oppid = 0; 661 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 662 process_reparent(pr, ppr ? ppr : initprocess); 663 } 664 665 void 666 process_clear_orphan(struct process *pr) 667 { 668 if (pr->ps_flags & PS_ORPHAN) { 669 LIST_REMOVE(pr, ps_orphan); 670 atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN); 671 } 672 } 673 674 /* 675 * make process 'parent' the new parent of process 'child'. 676 */ 677 void 678 process_reparent(struct process *child, struct process *parent) 679 { 680 681 if (child->ps_pptr == parent) 682 return; 683 684 KASSERT(child->ps_oppid == 0 || 685 child->ps_oppid == child->ps_pptr->ps_pid); 686 687 LIST_REMOVE(child, ps_sibling); 688 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 689 690 process_clear_orphan(child); 691 if (child->ps_flags & PS_TRACED) { 692 atomic_setbits_int(&child->ps_flags, PS_ORPHAN); 693 LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan); 694 } 695 696 child->ps_pptr = parent; 697 } 698 699 void 700 process_zap(struct process *pr) 701 { 702 struct vnode *otvp; 703 struct proc *p = pr->ps_mainproc; 704 705 /* 706 * Finally finished with old proc entry. 707 * Unlink it from its process group and free it. 708 */ 709 leavepgrp(pr); 710 LIST_REMOVE(pr, ps_sibling); 711 process_clear_orphan(pr); 712 713 /* 714 * Decrement the count of procs running with this uid. 715 */ 716 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 717 718 /* 719 * Release reference to text vnode 720 */ 721 otvp = pr->ps_textvp; 722 pr->ps_textvp = NULL; 723 if (otvp) 724 vrele(otvp); 725 726 KASSERT(pr->ps_refcnt == 1); 727 if (pr->ps_ptstat != NULL) 728 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 729 pool_put(&rusage_pool, pr->ps_ru); 730 KASSERT(SMR_TAILQ_EMPTY_LOCKED(&pr->ps_threads)); 731 lim_free(pr->ps_limit); 732 crfree(pr->ps_ucred); 733 pool_put(&process_pool, pr); 734 nprocesses--; 735 736 proc_free(p); 737 } 738