1 /* $OpenBSD: kern_exit.c,v 1.218 2024/01/15 15:47:37 mvs Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/time.h> 44 #include <sys/resource.h> 45 #include <sys/wait.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/ptrace.h> 50 #include <sys/acct.h> 51 #include <sys/filedesc.h> 52 #include <sys/signalvar.h> 53 #include <sys/sched.h> 54 #include <sys/ktrace.h> 55 #include <sys/pool.h> 56 #include <sys/mutex.h> 57 #ifdef SYSVSEM 58 #include <sys/sem.h> 59 #endif 60 #include <sys/witness.h> 61 62 #include <sys/mount.h> 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include "kcov.h" 68 #if NKCOV > 0 69 #include <sys/kcov.h> 70 #endif 71 72 void proc_finish_wait(struct proc *, struct proc *); 73 void process_clear_orphan(struct process *); 74 void process_zap(struct process *); 75 void proc_free(struct proc *); 76 void unveil_destroy(struct process *ps); 77 78 /* 79 * exit -- 80 * Death of process. 81 */ 82 int 83 sys_exit(struct proc *p, void *v, register_t *retval) 84 { 85 struct sys_exit_args /* { 86 syscallarg(int) rval; 87 } */ *uap = v; 88 89 exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL); 90 /* NOTREACHED */ 91 return (0); 92 } 93 94 int 95 sys___threxit(struct proc *p, void *v, register_t *retval) 96 { 97 struct sys___threxit_args /* { 98 syscallarg(pid_t *) notdead; 99 } */ *uap = v; 100 101 if (SCARG(uap, notdead) != NULL) { 102 pid_t zero = 0; 103 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 104 psignal(p, SIGSEGV); 105 } 106 exit1(p, 0, 0, EXIT_THREAD); 107 108 return (0); 109 } 110 111 /* 112 * Exit: deallocate address space and other resources, change proc state 113 * to zombie, and unlink proc from allproc and parent's lists. Save exit 114 * status and rusage for wait(). Check for child processes and orphan them. 115 */ 116 void 117 exit1(struct proc *p, int xexit, int xsig, int flags) 118 { 119 struct process *pr, *qr, *nqr; 120 struct rusage *rup; 121 struct timespec ts; 122 int s; 123 124 atomic_setbits_int(&p->p_flag, P_WEXIT); 125 126 pr = p->p_p; 127 128 /* single-threaded? */ 129 if (!P_HASSIBLING(p)) { 130 flags = EXIT_NORMAL; 131 } else { 132 /* nope, multi-threaded */ 133 if (flags == EXIT_NORMAL) 134 single_thread_set(p, SINGLE_EXIT); 135 else if (flags == EXIT_THREAD) 136 single_thread_check(p, 0); 137 } 138 139 if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) { 140 if (pr->ps_pid == 1) 141 panic("init died (signal %d, exit %d)", xsig, xexit); 142 143 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 144 pr->ps_xexit = xexit; 145 pr->ps_xsig = xsig; 146 147 /* 148 * If parent is waiting for us to exit or exec, PS_PPWAIT 149 * is set; we wake up the parent early to avoid deadlock. 150 */ 151 if (pr->ps_flags & PS_PPWAIT) { 152 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 153 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 154 PS_ISPWAIT); 155 wakeup(pr->ps_pptr); 156 } 157 } 158 159 /* unlink ourselves from the active threads */ 160 SCHED_LOCK(s); 161 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 162 SCHED_UNLOCK(s); 163 164 if ((p->p_flag & P_THREAD) == 0) { 165 /* main thread gotta wait because it has the pid, et al */ 166 while (pr->ps_threadcnt > 1) 167 tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP); 168 LIST_REMOVE(pr, ps_list); 169 refcnt_finalize(&pr->ps_refcnt, "psdtor"); 170 } 171 172 rup = pr->ps_ru; 173 if (rup == NULL) { 174 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 175 if (pr->ps_ru == NULL) { 176 pr->ps_ru = rup; 177 } else { 178 pool_put(&rusage_pool, rup); 179 rup = pr->ps_ru; 180 } 181 } 182 p->p_siglist = 0; 183 if ((p->p_flag & P_THREAD) == 0) 184 pr->ps_siglist = 0; 185 186 kqpoll_exit(); 187 188 #if NKCOV > 0 189 kcov_exit(p); 190 #endif 191 192 if ((p->p_flag & P_THREAD) == 0) { 193 if (pr->ps_flags & PS_PROFIL) 194 stopprofclock(pr); 195 196 sigio_freelist(&pr->ps_sigiolst); 197 198 /* close open files and release open-file table */ 199 fdfree(p); 200 201 cancel_all_itimers(); 202 203 timeout_del(&pr->ps_rucheck_to); 204 #ifdef SYSVSEM 205 semexit(pr); 206 #endif 207 killjobc(pr); 208 #ifdef ACCOUNTING 209 acct_process(p); 210 #endif 211 212 #ifdef KTRACE 213 /* release trace file */ 214 if (pr->ps_tracevp) 215 ktrcleartrace(pr); 216 #endif 217 218 unveil_destroy(pr); 219 220 /* 221 * If parent has the SAS_NOCLDWAIT flag set, we're not 222 * going to become a zombie. 223 */ 224 if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) 225 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 226 } 227 228 p->p_fd = NULL; /* zap the thread's copy */ 229 230 /* Release the thread's read reference of resource limit structure. */ 231 if (p->p_limit != NULL) { 232 struct plimit *limit; 233 234 limit = p->p_limit; 235 p->p_limit = NULL; 236 lim_free(limit); 237 } 238 239 /* 240 * Remove proc from pidhash chain and allproc so looking 241 * it up won't work. We will put the proc on the 242 * deadproc list later (using the p_hash member), and 243 * wake up the reaper when we do. If this is the last 244 * thread of a process that isn't PS_NOZOMBIE, we'll put 245 * the process on the zombprocess list below. 246 */ 247 /* 248 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 249 */ 250 p->p_stat = SDEAD; 251 252 LIST_REMOVE(p, p_hash); 253 LIST_REMOVE(p, p_list); 254 255 if ((p->p_flag & P_THREAD) == 0) { 256 LIST_REMOVE(pr, ps_hash); 257 258 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 259 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 260 else { 261 /* 262 * Not going to be a zombie, so it's now off all 263 * the lists scanned by ispidtaken(), so block 264 * fast reuse of the pid now. 265 */ 266 freepid(pr->ps_pid); 267 } 268 269 /* 270 * Reparent children to their original parent, in case 271 * they were being traced, or to init(8). 272 */ 273 qr = LIST_FIRST(&pr->ps_children); 274 if (qr) /* only need this if any child is S_ZOMB */ 275 wakeup(initprocess); 276 for (; qr != NULL; qr = nqr) { 277 nqr = LIST_NEXT(qr, ps_sibling); 278 /* 279 * Traced processes are killed since their 280 * existence means someone is screwing up. 281 */ 282 if (qr->ps_flags & PS_TRACED && 283 !(qr->ps_flags & PS_EXITING)) { 284 process_untrace(qr); 285 286 /* 287 * If single threading is active, 288 * direct the signal to the active 289 * thread to avoid deadlock. 290 */ 291 if (qr->ps_single) 292 ptsignal(qr->ps_single, SIGKILL, 293 STHREAD); 294 else 295 prsignal(qr, SIGKILL); 296 } else { 297 process_reparent(qr, initprocess); 298 } 299 } 300 301 /* 302 * Make sure orphans won't remember the exiting process. 303 */ 304 while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) { 305 KASSERT(qr->ps_oppid == pr->ps_pid); 306 qr->ps_oppid = 0; 307 process_clear_orphan(qr); 308 } 309 } 310 311 /* add thread's accumulated rusage into the process's total */ 312 ruadd(rup, &p->p_ru); 313 nanouptime(&ts); 314 if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <)) 315 timespecclear(&ts); 316 else 317 timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts); 318 SCHED_LOCK(s); 319 tuagg_locked(pr, p, &ts); 320 SCHED_UNLOCK(s); 321 322 /* 323 * clear %cpu usage during swap 324 */ 325 p->p_pctcpu = 0; 326 327 if ((p->p_flag & P_THREAD) == 0) { 328 /* 329 * Final thread has died, so add on our children's rusage 330 * and calculate the total times 331 */ 332 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 333 ruadd(rup, &pr->ps_cru); 334 335 /* 336 * Notify parent that we're gone. If we're not going to 337 * become a zombie, reparent to process 1 (init) so that 338 * we can wake our original parent to possibly unblock 339 * wait4() to return ECHILD. 340 */ 341 if (pr->ps_flags & PS_NOZOMBIE) { 342 struct process *ppr = pr->ps_pptr; 343 process_reparent(pr, initprocess); 344 wakeup(ppr); 345 } 346 } 347 348 /* just a thread? detach it from its process */ 349 if (p->p_flag & P_THREAD) { 350 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 351 if (--pr->ps_threadcnt == 1) 352 wakeup(&pr->ps_threads); 353 KASSERT(pr->ps_threadcnt > 0); 354 } 355 356 /* 357 * Other substructures are freed from reaper and wait(). 358 */ 359 360 /* 361 * Finally, call machine-dependent code to switch to a new 362 * context (possibly the idle context). Once we are no longer 363 * using the dead process's vmspace and stack, exit2() will be 364 * called to schedule those resources to be released by the 365 * reaper thread. 366 * 367 * Note that cpu_exit() will end with a call equivalent to 368 * cpu_switch(), finishing our execution (pun intended). 369 */ 370 uvmexp.swtch++; 371 cpu_exit(p); 372 panic("cpu_exit returned"); 373 } 374 375 /* 376 * Locking of this proclist is special; it's accessed in a 377 * critical section of process exit, and thus locking it can't 378 * modify interrupt state. We use a simple spin lock for this 379 * proclist. We use the p_hash member to linkup to deadproc. 380 */ 381 struct mutex deadproc_mutex = 382 MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS); 383 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 384 385 /* 386 * We are called from cpu_exit() once it is safe to schedule the 387 * dead process's resources to be freed. 388 * 389 * NOTE: One must be careful with locking in this routine. It's 390 * called from a critical section in machine-dependent code, so 391 * we should refrain from changing any interrupt state. 392 * 393 * We lock the deadproc list, place the proc on that list (using 394 * the p_hash member), and wake up the reaper. 395 */ 396 void 397 exit2(struct proc *p) 398 { 399 mtx_enter(&deadproc_mutex); 400 LIST_INSERT_HEAD(&deadproc, p, p_hash); 401 mtx_leave(&deadproc_mutex); 402 403 wakeup(&deadproc); 404 } 405 406 void 407 proc_free(struct proc *p) 408 { 409 crfree(p->p_ucred); 410 pool_put(&proc_pool, p); 411 nthreads--; 412 } 413 414 /* 415 * Process reaper. This is run by a kernel thread to free the resources 416 * of a dead process. Once the resources are free, the process becomes 417 * a zombie, and the parent is allowed to read the undead's status. 418 */ 419 void 420 reaper(void *arg) 421 { 422 struct proc *p; 423 424 KERNEL_UNLOCK(); 425 426 SCHED_ASSERT_UNLOCKED(); 427 428 for (;;) { 429 mtx_enter(&deadproc_mutex); 430 while ((p = LIST_FIRST(&deadproc)) == NULL) 431 msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper", 432 INFSLP); 433 434 /* Remove us from the deadproc list. */ 435 LIST_REMOVE(p, p_hash); 436 mtx_leave(&deadproc_mutex); 437 438 WITNESS_THREAD_EXIT(p); 439 440 KERNEL_LOCK(); 441 442 /* 443 * Free the VM resources we're still holding on to. 444 * We must do this from a valid thread because doing 445 * so may block. 446 */ 447 uvm_uarea_free(p); 448 p->p_vmspace = NULL; /* zap the thread's copy */ 449 450 if (p->p_flag & P_THREAD) { 451 /* Just a thread */ 452 proc_free(p); 453 } else { 454 struct process *pr = p->p_p; 455 456 /* Release the rest of the process's vmspace */ 457 uvm_exit(pr); 458 459 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 460 /* Process is now a true zombie. */ 461 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 462 } 463 464 /* Notify listeners of our demise and clean up. */ 465 knote_processexit(pr); 466 467 if (pr->ps_flags & PS_ZOMBIE) { 468 /* Post SIGCHLD and wake up parent. */ 469 prsignal(pr->ps_pptr, SIGCHLD); 470 wakeup(pr->ps_pptr); 471 } else { 472 /* No one will wait for us, just zap it. */ 473 process_zap(pr); 474 } 475 } 476 477 KERNEL_UNLOCK(); 478 } 479 } 480 481 int 482 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options, 483 struct rusage *rusage, siginfo_t *info, register_t *retval) 484 { 485 int nfound; 486 struct process *pr; 487 struct proc *p; 488 int error; 489 490 if (info != NULL) 491 memset(info, 0, sizeof(*info)); 492 493 loop: 494 nfound = 0; 495 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 496 if ((pr->ps_flags & PS_NOZOMBIE) || 497 (idtype == P_PID && id != pr->ps_pid) || 498 (idtype == P_PGID && id != pr->ps_pgid)) 499 continue; 500 501 p = pr->ps_mainproc; 502 503 nfound++; 504 if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) { 505 *retval = pr->ps_pid; 506 if (info != NULL) { 507 info->si_pid = pr->ps_pid; 508 info->si_uid = pr->ps_ucred->cr_uid; 509 info->si_signo = SIGCHLD; 510 if (pr->ps_xsig == 0) { 511 info->si_code = CLD_EXITED; 512 info->si_status = pr->ps_xexit; 513 } else if (WCOREDUMP(pr->ps_xsig)) { 514 info->si_code = CLD_DUMPED; 515 info->si_status = _WSTATUS(pr->ps_xsig); 516 } else { 517 info->si_code = CLD_KILLED; 518 info->si_status = _WSTATUS(pr->ps_xsig); 519 } 520 } 521 522 if (statusp != NULL) 523 *statusp = W_EXITCODE(pr->ps_xexit, 524 pr->ps_xsig); 525 if (rusage != NULL) 526 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 527 if ((options & WNOWAIT) == 0) 528 proc_finish_wait(q, p); 529 return (0); 530 } 531 if ((options & WTRAPPED) && 532 pr->ps_flags & PS_TRACED && 533 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 534 pr->ps_single->p_stat == SSTOP && 535 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 536 if (single_thread_wait(pr, 0)) 537 goto loop; 538 539 if ((options & WNOWAIT) == 0) 540 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 541 542 *retval = pr->ps_pid; 543 if (info != NULL) { 544 info->si_pid = pr->ps_pid; 545 info->si_uid = pr->ps_ucred->cr_uid; 546 info->si_signo = SIGCHLD; 547 info->si_code = CLD_TRAPPED; 548 info->si_status = pr->ps_xsig; 549 } 550 551 if (statusp != NULL) 552 *statusp = W_STOPCODE(pr->ps_xsig); 553 if (rusage != NULL) 554 memset(rusage, 0, sizeof(*rusage)); 555 return (0); 556 } 557 if (p->p_stat == SSTOP && 558 (pr->ps_flags & PS_WAITED) == 0 && 559 (p->p_flag & P_SUSPSINGLE) == 0 && 560 (pr->ps_flags & PS_TRACED || 561 options & WUNTRACED)) { 562 if ((options & WNOWAIT) == 0) 563 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 564 565 *retval = pr->ps_pid; 566 if (info != 0) { 567 info->si_pid = pr->ps_pid; 568 info->si_uid = pr->ps_ucred->cr_uid; 569 info->si_signo = SIGCHLD; 570 info->si_code = CLD_STOPPED; 571 info->si_status = pr->ps_xsig; 572 } 573 574 if (statusp != NULL) 575 *statusp = W_STOPCODE(pr->ps_xsig); 576 if (rusage != NULL) 577 memset(rusage, 0, sizeof(*rusage)); 578 return (0); 579 } 580 if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 581 if ((options & WNOWAIT) == 0) 582 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 583 584 *retval = pr->ps_pid; 585 if (info != NULL) { 586 info->si_pid = pr->ps_pid; 587 info->si_uid = pr->ps_ucred->cr_uid; 588 info->si_signo = SIGCHLD; 589 info->si_code = CLD_CONTINUED; 590 info->si_status = SIGCONT; 591 } 592 593 if (statusp != NULL) 594 *statusp = _WCONTINUED; 595 if (rusage != NULL) 596 memset(rusage, 0, sizeof(*rusage)); 597 return (0); 598 } 599 } 600 /* 601 * Look in the orphans list too, to allow the parent to 602 * collect its child's exit status even if child is being 603 * debugged. 604 * 605 * Debugger detaches from the parent upon successful 606 * switch-over from parent to child. At this point due to 607 * re-parenting the parent loses the child to debugger and a 608 * wait4(2) call would report that it has no children to wait 609 * for. By maintaining a list of orphans we allow the parent 610 * to successfully wait until the child becomes a zombie. 611 */ 612 if (nfound == 0) { 613 LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) { 614 if ((pr->ps_flags & PS_NOZOMBIE) || 615 (idtype == P_PID && id != pr->ps_pid) || 616 (idtype == P_PGID && id != pr->ps_pgid)) 617 continue; 618 nfound++; 619 break; 620 } 621 } 622 if (nfound == 0) 623 return (ECHILD); 624 if (options & WNOHANG) { 625 *retval = 0; 626 return (0); 627 } 628 if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0) 629 return (error); 630 goto loop; 631 } 632 633 int 634 sys_wait4(struct proc *q, void *v, register_t *retval) 635 { 636 struct sys_wait4_args /* { 637 syscallarg(pid_t) pid; 638 syscallarg(int *) status; 639 syscallarg(int) options; 640 syscallarg(struct rusage *) rusage; 641 } */ *uap = v; 642 struct rusage ru; 643 pid_t pid = SCARG(uap, pid); 644 int options = SCARG(uap, options); 645 int status, error; 646 idtype_t idtype; 647 id_t id; 648 649 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED)) 650 return (EINVAL); 651 options |= WEXITED | WTRAPPED; 652 653 if (SCARG(uap, pid) == WAIT_MYPGRP) { 654 idtype = P_PGID; 655 id = q->p_p->ps_pgid; 656 } else if (SCARG(uap, pid) == WAIT_ANY) { 657 idtype = P_ALL; 658 id = 0; 659 } else if (pid < 0) { 660 idtype = P_PGID; 661 id = -pid; 662 } else { 663 idtype = P_PID; 664 id = pid; 665 } 666 667 error = dowait6(q, idtype, id, 668 SCARG(uap, status) ? &status : NULL, options, 669 SCARG(uap, rusage) ? &ru : NULL, NULL, retval); 670 if (error == 0 && *retval > 0 && SCARG(uap, status)) { 671 error = copyout(&status, SCARG(uap, status), sizeof(status)); 672 } 673 if (error == 0 && *retval > 0 && SCARG(uap, rusage)) { 674 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 675 #ifdef KTRACE 676 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 677 ktrrusage(q, &ru); 678 #endif 679 } 680 return (error); 681 } 682 683 int 684 sys_waitid(struct proc *q, void *v, register_t *retval) 685 { 686 struct sys_waitid_args /* { 687 syscallarg(idtype_t) idtype; 688 syscallarg(id_t) id; 689 syscallarg(siginfo_t) info; 690 syscallarg(int) options; 691 } */ *uap = v; 692 siginfo_t info; 693 idtype_t idtype = SCARG(uap, idtype); 694 int options = SCARG(uap, options); 695 int error; 696 697 if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT)) 698 return (EINVAL); 699 if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0) 700 return (EINVAL); 701 if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID) 702 return (EINVAL); 703 704 error = dowait6(q, idtype, SCARG(uap, id), NULL, 705 options, NULL, &info, retval); 706 if (error == 0) { 707 error = copyout(&info, SCARG(uap, info), sizeof(info)); 708 #ifdef KTRACE 709 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 710 ktrsiginfo(q, &info); 711 #endif 712 } 713 if (error == 0) 714 *retval = 0; 715 return (error); 716 } 717 718 void 719 proc_finish_wait(struct proc *waiter, struct proc *p) 720 { 721 struct process *pr, *tr; 722 struct rusage *rup; 723 724 /* 725 * If we got the child via a ptrace 'attach', 726 * we need to give it back to the old parent. 727 */ 728 pr = p->p_p; 729 if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) && 730 (tr = prfind(pr->ps_oppid))) { 731 pr->ps_oppid = 0; 732 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 733 process_reparent(pr, tr); 734 prsignal(tr, SIGCHLD); 735 wakeup(tr); 736 } else { 737 scheduler_wait_hook(waiter, p); 738 rup = &waiter->p_p->ps_cru; 739 ruadd(rup, pr->ps_ru); 740 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 741 freepid(pr->ps_pid); 742 process_zap(pr); 743 } 744 } 745 746 /* 747 * give process back to original parent or init(8) 748 */ 749 void 750 process_untrace(struct process *pr) 751 { 752 struct process *ppr = NULL; 753 754 KASSERT(pr->ps_flags & PS_TRACED); 755 756 if (pr->ps_oppid != 0 && 757 (pr->ps_oppid != pr->ps_pptr->ps_pid)) 758 ppr = prfind(pr->ps_oppid); 759 760 /* not being traced any more */ 761 pr->ps_oppid = 0; 762 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 763 process_reparent(pr, ppr ? ppr : initprocess); 764 } 765 766 void 767 process_clear_orphan(struct process *pr) 768 { 769 if (pr->ps_flags & PS_ORPHAN) { 770 LIST_REMOVE(pr, ps_orphan); 771 atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN); 772 } 773 } 774 775 /* 776 * make process 'parent' the new parent of process 'child'. 777 */ 778 void 779 process_reparent(struct process *child, struct process *parent) 780 { 781 782 if (child->ps_pptr == parent) 783 return; 784 785 KASSERT(child->ps_oppid == 0 || 786 child->ps_oppid == child->ps_pptr->ps_pid); 787 788 LIST_REMOVE(child, ps_sibling); 789 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 790 791 process_clear_orphan(child); 792 if (child->ps_flags & PS_TRACED) { 793 atomic_setbits_int(&child->ps_flags, PS_ORPHAN); 794 LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan); 795 } 796 797 child->ps_pptr = parent; 798 child->ps_ppid = parent->ps_pid; 799 } 800 801 void 802 process_zap(struct process *pr) 803 { 804 struct vnode *otvp; 805 struct proc *p = pr->ps_mainproc; 806 807 /* 808 * Finally finished with old proc entry. 809 * Unlink it from its process group and free it. 810 */ 811 leavepgrp(pr); 812 LIST_REMOVE(pr, ps_sibling); 813 process_clear_orphan(pr); 814 815 /* 816 * Decrement the count of procs running with this uid. 817 */ 818 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 819 820 /* 821 * Release reference to text vnode 822 */ 823 otvp = pr->ps_textvp; 824 pr->ps_textvp = NULL; 825 if (otvp) 826 vrele(otvp); 827 828 KASSERT(pr->ps_threadcnt == 1); 829 if (pr->ps_ptstat != NULL) 830 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 831 pool_put(&rusage_pool, pr->ps_ru); 832 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 833 sigactsfree(pr->ps_sigacts); 834 lim_free(pr->ps_limit); 835 crfree(pr->ps_ucred); 836 pool_put(&process_pool, pr); 837 nprocesses--; 838 839 proc_free(p); 840 } 841