1 /* $OpenBSD: kern_exit.c,v 1.240 2024/12/17 14:45:00 claudio Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/time.h> 44 #include <sys/resource.h> 45 #include <sys/wait.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/ptrace.h> 50 #include <sys/acct.h> 51 #include <sys/filedesc.h> 52 #include <sys/signalvar.h> 53 #include <sys/sched.h> 54 #include <sys/ktrace.h> 55 #include <sys/pool.h> 56 #include <sys/mutex.h> 57 #ifdef SYSVSEM 58 #include <sys/sem.h> 59 #endif 60 #include <sys/witness.h> 61 62 #include <sys/mount.h> 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include "kcov.h" 68 #if NKCOV > 0 69 #include <sys/kcov.h> 70 #endif 71 72 void proc_finish_wait(struct proc *, struct process *); 73 void process_clear_orphan(struct process *); 74 void process_zap(struct process *); 75 void proc_free(struct proc *); 76 void unveil_destroy(struct process *ps); 77 78 /* 79 * exit -- 80 * Death of process. 81 */ 82 int 83 sys_exit(struct proc *p, void *v, register_t *retval) 84 { 85 struct sys_exit_args /* { 86 syscallarg(int) rval; 87 } */ *uap = v; 88 89 exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL); 90 /* NOTREACHED */ 91 return (0); 92 } 93 94 int 95 sys___threxit(struct proc *p, void *v, register_t *retval) 96 { 97 struct sys___threxit_args /* { 98 syscallarg(pid_t *) notdead; 99 } */ *uap = v; 100 101 if (SCARG(uap, notdead) != NULL) { 102 pid_t zero = 0; 103 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 104 psignal(p, SIGSEGV); 105 } 106 exit1(p, 0, 0, EXIT_THREAD); 107 108 return (0); 109 } 110 111 /* 112 * Exit: deallocate address space and other resources, change proc state 113 * to zombie, and unlink proc from allproc and parent's lists. Save exit 114 * status and rusage for wait(). Check for child processes and orphan them. 115 */ 116 void 117 exit1(struct proc *p, int xexit, int xsig, int flags) 118 { 119 struct process *pr, *qr, *nqr; 120 struct rusage *rup; 121 122 atomic_setbits_int(&p->p_flag, P_WEXIT); 123 124 pr = p->p_p; 125 126 /* single-threaded? */ 127 if (!P_HASSIBLING(p)) { 128 flags = EXIT_NORMAL; 129 } else { 130 /* nope, multi-threaded */ 131 if (flags == EXIT_NORMAL) 132 single_thread_set(p, SINGLE_EXIT); 133 } 134 135 if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) { 136 if (pr->ps_pid == 1) 137 panic("init died (signal %d, exit %d)", xsig, xexit); 138 139 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 140 pr->ps_xexit = xexit; 141 pr->ps_xsig = xsig; 142 143 /* 144 * If parent is waiting for us to exit or exec, PS_PPWAIT 145 * is set; we wake up the parent early to avoid deadlock. 146 */ 147 if (pr->ps_flags & PS_PPWAIT) { 148 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 149 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 150 PS_ISPWAIT); 151 wakeup(pr->ps_pptr); 152 } 153 154 /* Wait for concurrent `allprocess' loops */ 155 refcnt_finalize(&pr->ps_refcnt, "psdtor"); 156 } 157 158 /* unlink ourselves from the active threads */ 159 mtx_enter(&pr->ps_mtx); 160 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 161 pr->ps_threadcnt--; 162 pr->ps_exitcnt++; 163 164 /* 165 * if somebody else wants to take us to single threaded mode, 166 * count ourselves out. 167 */ 168 if (pr->ps_single) { 169 if (--pr->ps_singlecnt == 0) 170 wakeup(&pr->ps_singlecnt); 171 } 172 173 /* proc is off ps_threads list so update accounting of process now */ 174 tuagg_add_runtime(); 175 tuagg_add_process(pr, p); 176 177 if ((p->p_flag & P_THREAD) == 0) { 178 /* main thread gotta wait because it has the pid, et al */ 179 while (pr->ps_threadcnt + pr->ps_exitcnt > 1) 180 msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT, 181 "thrdeath", INFSLP); 182 } 183 mtx_leave(&pr->ps_mtx); 184 185 rup = pr->ps_ru; 186 if (rup == NULL) { 187 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 188 if (pr->ps_ru == NULL) { 189 pr->ps_ru = rup; 190 } else { 191 pool_put(&rusage_pool, rup); 192 rup = pr->ps_ru; 193 } 194 } 195 p->p_siglist = 0; 196 if ((p->p_flag & P_THREAD) == 0) 197 pr->ps_siglist = 0; 198 199 kqpoll_exit(); 200 201 #if NKCOV > 0 202 kcov_exit(p); 203 #endif 204 205 if ((p->p_flag & P_THREAD) == 0) { 206 if (pr->ps_flags & PS_PROFIL) 207 stopprofclock(pr); 208 209 sigio_freelist(&pr->ps_sigiolst); 210 211 /* close open files and release open-file table */ 212 fdfree(p); 213 214 cancel_all_itimers(); 215 216 timeout_del(&pr->ps_rucheck_to); 217 #ifdef SYSVSEM 218 semexit(pr); 219 #endif 220 killjobc(pr); 221 #ifdef ACCOUNTING 222 acct_process(p); 223 #endif 224 225 #ifdef KTRACE 226 /* release trace file */ 227 if (pr->ps_tracevp) 228 ktrcleartrace(pr); 229 #endif 230 231 unveil_destroy(pr); 232 233 free(pr->ps_pin.pn_pins, M_PINSYSCALL, 234 pr->ps_pin.pn_npins * sizeof(u_int)); 235 free(pr->ps_libcpin.pn_pins, M_PINSYSCALL, 236 pr->ps_libcpin.pn_npins * sizeof(u_int)); 237 238 /* 239 * If parent has the SAS_NOCLDWAIT flag set, we're not 240 * going to become a zombie. 241 */ 242 if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) 243 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 244 } 245 246 p->p_fd = NULL; /* zap the thread's copy */ 247 248 /* Release the thread's read reference of resource limit structure. */ 249 if (p->p_limit != NULL) { 250 struct plimit *limit; 251 252 limit = p->p_limit; 253 p->p_limit = NULL; 254 lim_free(limit); 255 } 256 257 /* 258 * Remove proc from pidhash chain and allproc so looking 259 * it up won't work. We will put the proc on the 260 * deadproc list later (using the p_hash member), and 261 * wake up the reaper when we do. If this is the last 262 * thread of a process that isn't PS_NOZOMBIE, we'll put 263 * the process on the zombprocess list below. 264 */ 265 /* 266 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 267 */ 268 p->p_stat = SDEAD; 269 270 LIST_REMOVE(p, p_hash); 271 LIST_REMOVE(p, p_list); 272 273 if ((p->p_flag & P_THREAD) == 0) { 274 LIST_REMOVE(pr, ps_hash); 275 LIST_REMOVE(pr, ps_list); 276 277 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 278 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 279 else { 280 /* 281 * Not going to be a zombie, so it's now off all 282 * the lists scanned by ispidtaken(), so block 283 * fast reuse of the pid now. 284 */ 285 freepid(pr->ps_pid); 286 } 287 288 /* 289 * Reparent children to their original parent, in case 290 * they were being traced, or to init(8). 291 */ 292 qr = LIST_FIRST(&pr->ps_children); 293 if (qr) /* only need this if any child is S_ZOMB */ 294 wakeup(initprocess); 295 for (; qr != NULL; qr = nqr) { 296 nqr = LIST_NEXT(qr, ps_sibling); 297 /* 298 * Traced processes are killed since their 299 * existence means someone is screwing up. 300 */ 301 mtx_enter(&qr->ps_mtx); 302 if (qr->ps_flags & PS_TRACED && 303 !(qr->ps_flags & PS_EXITING)) { 304 process_untrace(qr); 305 mtx_leave(&qr->ps_mtx); 306 307 /* 308 * If single threading is active, 309 * direct the signal to the active 310 * thread to avoid deadlock. 311 */ 312 if (qr->ps_single) 313 ptsignal(qr->ps_single, SIGKILL, 314 STHREAD); 315 else 316 prsignal(qr, SIGKILL); 317 } else { 318 process_reparent(qr, initprocess); 319 mtx_leave(&qr->ps_mtx); 320 } 321 } 322 323 /* 324 * Make sure orphans won't remember the exiting process. 325 */ 326 while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) { 327 mtx_enter(&qr->ps_mtx); 328 KASSERT(qr->ps_opptr == pr); 329 qr->ps_opptr = NULL; 330 process_clear_orphan(qr); 331 mtx_leave(&qr->ps_mtx); 332 } 333 } 334 335 /* add thread's accumulated rusage into the process's total */ 336 ruadd(rup, &p->p_ru); 337 338 /* 339 * clear %cpu usage during swap 340 */ 341 p->p_pctcpu = 0; 342 343 if ((p->p_flag & P_THREAD) == 0) { 344 /* 345 * Final thread has died, so add on our children's rusage 346 * and calculate the total times. 347 */ 348 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 349 ruadd(rup, &pr->ps_cru); 350 351 /* 352 * Notify parent that we're gone. If we're not going to 353 * become a zombie, reparent to process 1 (init) so that 354 * we can wake our original parent to possibly unblock 355 * wait4() to return ECHILD. 356 */ 357 mtx_enter(&pr->ps_mtx); 358 if (pr->ps_flags & PS_NOZOMBIE) { 359 struct process *ppr = pr->ps_pptr; 360 process_reparent(pr, initprocess); 361 wakeup(ppr); 362 } 363 mtx_leave(&pr->ps_mtx); 364 } 365 366 /* just a thread? check if last one standing. */ 367 if (p->p_flag & P_THREAD) { 368 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 369 mtx_enter(&pr->ps_mtx); 370 pr->ps_exitcnt--; 371 if (pr->ps_threadcnt + pr->ps_exitcnt == 1) 372 wakeup(&pr->ps_threads); 373 mtx_leave(&pr->ps_mtx); 374 } 375 376 /* 377 * Other substructures are freed from reaper and wait(). 378 */ 379 380 /* 381 * Finally, call machine-dependent code to switch to a new 382 * context (possibly the idle context). Once we are no longer 383 * using the dead process's vmspace and stack, exit2() will be 384 * called to schedule those resources to be released by the 385 * reaper thread. 386 * 387 * Note that cpu_exit() will end with a call equivalent to 388 * cpu_switch(), finishing our execution (pun intended). 389 */ 390 cpu_exit(p); 391 panic("cpu_exit returned"); 392 } 393 394 /* 395 * Locking of this proclist is special; it's accessed in a 396 * critical section of process exit, and thus locking it can't 397 * modify interrupt state. We use a simple spin lock for this 398 * proclist. We use the p_hash member to linkup to deadproc. 399 */ 400 struct mutex deadproc_mutex = 401 MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS); 402 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 403 404 /* 405 * We are called from sched_idle() once it is safe to schedule the 406 * dead process's resources to be freed. So this is not allowed to sleep. 407 * 408 * We lock the deadproc list, place the proc on that list (using 409 * the p_hash member), and wake up the reaper. 410 */ 411 void 412 exit2(struct proc *p) 413 { 414 /* account the remainder of time spent in exit1() */ 415 mtx_enter(&p->p_p->ps_mtx); 416 tuagg_add_process(p->p_p, p); 417 mtx_leave(&p->p_p->ps_mtx); 418 419 mtx_enter(&deadproc_mutex); 420 LIST_INSERT_HEAD(&deadproc, p, p_hash); 421 mtx_leave(&deadproc_mutex); 422 423 wakeup(&deadproc); 424 } 425 426 void 427 proc_free(struct proc *p) 428 { 429 crfree(p->p_ucred); 430 pool_put(&proc_pool, p); 431 atomic_dec_int(&nthreads); 432 } 433 434 /* 435 * Process reaper. This is run by a kernel thread to free the resources 436 * of a dead process. Once the resources are free, the process becomes 437 * a zombie, and the parent is allowed to read the undead's status. 438 */ 439 void 440 reaper(void *arg) 441 { 442 struct proc *p; 443 444 KERNEL_UNLOCK(); 445 446 SCHED_ASSERT_UNLOCKED(); 447 448 for (;;) { 449 mtx_enter(&deadproc_mutex); 450 while ((p = LIST_FIRST(&deadproc)) == NULL) 451 msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper", 452 INFSLP); 453 454 /* Remove us from the deadproc list. */ 455 LIST_REMOVE(p, p_hash); 456 mtx_leave(&deadproc_mutex); 457 458 WITNESS_THREAD_EXIT(p); 459 460 /* 461 * Free the VM resources we're still holding on to. 462 * We must do this from a valid thread because doing 463 * so may block. 464 */ 465 uvm_uarea_free(p); 466 p->p_vmspace = NULL; /* zap the thread's copy */ 467 468 if (p->p_flag & P_THREAD) { 469 /* Just a thread */ 470 proc_free(p); 471 } else { 472 struct process *pr = p->p_p; 473 474 /* Release the rest of the process's vmspace */ 475 uvm_exit(pr); 476 477 KERNEL_LOCK(); 478 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 479 /* Process is now a true zombie. */ 480 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 481 } 482 483 /* Notify listeners of our demise and clean up. */ 484 knote_processexit(pr); 485 486 if (pr->ps_flags & PS_ZOMBIE) { 487 /* Post SIGCHLD and wake up parent. */ 488 prsignal(pr->ps_pptr, SIGCHLD); 489 wakeup(pr->ps_pptr); 490 } else { 491 /* No one will wait for us, just zap it. */ 492 process_zap(pr); 493 } 494 KERNEL_UNLOCK(); 495 } 496 } 497 } 498 499 int 500 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options, 501 struct rusage *rusage, siginfo_t *info, register_t *retval) 502 { 503 int nfound; 504 struct process *pr; 505 int error; 506 507 if (info != NULL) 508 memset(info, 0, sizeof(*info)); 509 510 loop: 511 nfound = 0; 512 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 513 if ((pr->ps_flags & PS_NOZOMBIE) || 514 (idtype == P_PID && id != pr->ps_pid) || 515 (idtype == P_PGID && id != pr->ps_pgid)) 516 continue; 517 518 nfound++; 519 if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) { 520 *retval = pr->ps_pid; 521 if (info != NULL) { 522 info->si_pid = pr->ps_pid; 523 info->si_uid = pr->ps_ucred->cr_uid; 524 info->si_signo = SIGCHLD; 525 if (pr->ps_xsig == 0) { 526 info->si_code = CLD_EXITED; 527 info->si_status = pr->ps_xexit; 528 } else if (WCOREDUMP(pr->ps_xsig)) { 529 info->si_code = CLD_DUMPED; 530 info->si_status = _WSTATUS(pr->ps_xsig); 531 } else { 532 info->si_code = CLD_KILLED; 533 info->si_status = _WSTATUS(pr->ps_xsig); 534 } 535 } 536 537 if (statusp != NULL) 538 *statusp = W_EXITCODE(pr->ps_xexit, 539 pr->ps_xsig); 540 if (rusage != NULL) 541 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 542 if ((options & WNOWAIT) == 0) 543 proc_finish_wait(q, pr); 544 return (0); 545 } 546 if ((options & WTRAPPED) && (pr->ps_flags & PS_TRACED) && 547 (pr->ps_flags & PS_WAITED) == 0 && 548 (pr->ps_flags & PS_TRAPPED)) { 549 if (single_thread_wait(pr, 0)) 550 goto loop; 551 552 if ((options & WNOWAIT) == 0) 553 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 554 555 *retval = pr->ps_pid; 556 if (info != NULL) { 557 info->si_pid = pr->ps_pid; 558 info->si_uid = pr->ps_ucred->cr_uid; 559 info->si_signo = SIGCHLD; 560 info->si_code = CLD_TRAPPED; 561 info->si_status = pr->ps_xsig; 562 } 563 564 if (statusp != NULL) 565 *statusp = W_STOPCODE(pr->ps_xsig); 566 if (rusage != NULL) 567 memset(rusage, 0, sizeof(*rusage)); 568 return (0); 569 } 570 if (((pr->ps_flags & PS_TRACED) || (options & WUNTRACED)) && 571 (pr->ps_flags & PS_WAITED) == 0 && 572 (pr->ps_flags & PS_STOPPED) && 573 (pr->ps_flags & PS_TRAPPED) == 0) { 574 if ((options & WNOWAIT) == 0) 575 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 576 577 *retval = pr->ps_pid; 578 if (info != 0) { 579 info->si_pid = pr->ps_pid; 580 info->si_uid = pr->ps_ucred->cr_uid; 581 info->si_signo = SIGCHLD; 582 info->si_code = CLD_STOPPED; 583 info->si_status = pr->ps_xsig; 584 } 585 586 if (statusp != NULL) 587 *statusp = W_STOPCODE(pr->ps_xsig); 588 if (rusage != NULL) 589 memset(rusage, 0, sizeof(*rusage)); 590 return (0); 591 } 592 if ((options & WCONTINUED) && (pr->ps_flags & PS_CONTINUED)) { 593 if ((options & WNOWAIT) == 0) 594 atomic_clearbits_int(&pr->ps_flags, 595 PS_CONTINUED); 596 597 *retval = pr->ps_pid; 598 if (info != NULL) { 599 info->si_pid = pr->ps_pid; 600 info->si_uid = pr->ps_ucred->cr_uid; 601 info->si_signo = SIGCHLD; 602 info->si_code = CLD_CONTINUED; 603 info->si_status = SIGCONT; 604 } 605 606 if (statusp != NULL) 607 *statusp = _WCONTINUED; 608 if (rusage != NULL) 609 memset(rusage, 0, sizeof(*rusage)); 610 return (0); 611 } 612 } 613 /* 614 * Look in the orphans list too, to allow the parent to 615 * collect its child's exit status even if child is being 616 * debugged. 617 * 618 * Debugger detaches from the parent upon successful 619 * switch-over from parent to child. At this point due to 620 * re-parenting the parent loses the child to debugger and a 621 * wait4(2) call would report that it has no children to wait 622 * for. By maintaining a list of orphans we allow the parent 623 * to successfully wait until the child becomes a zombie. 624 */ 625 if (nfound == 0) { 626 LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) { 627 if ((pr->ps_flags & PS_NOZOMBIE) || 628 (idtype == P_PID && id != pr->ps_pid) || 629 (idtype == P_PGID && id != pr->ps_pgid)) 630 continue; 631 nfound++; 632 break; 633 } 634 } 635 if (nfound == 0) 636 return (ECHILD); 637 if (options & WNOHANG) { 638 *retval = 0; 639 return (0); 640 } 641 if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0) 642 return (error); 643 goto loop; 644 } 645 646 int 647 sys_wait4(struct proc *q, void *v, register_t *retval) 648 { 649 struct sys_wait4_args /* { 650 syscallarg(pid_t) pid; 651 syscallarg(int *) status; 652 syscallarg(int) options; 653 syscallarg(struct rusage *) rusage; 654 } */ *uap = v; 655 struct rusage ru; 656 pid_t pid = SCARG(uap, pid); 657 int options = SCARG(uap, options); 658 int status, error; 659 idtype_t idtype; 660 id_t id; 661 662 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED)) 663 return (EINVAL); 664 options |= WEXITED | WTRAPPED; 665 666 if (SCARG(uap, pid) == WAIT_MYPGRP) { 667 idtype = P_PGID; 668 id = q->p_p->ps_pgid; 669 } else if (SCARG(uap, pid) == WAIT_ANY) { 670 idtype = P_ALL; 671 id = 0; 672 } else if (pid < 0) { 673 idtype = P_PGID; 674 id = -pid; 675 } else { 676 idtype = P_PID; 677 id = pid; 678 } 679 680 error = dowait6(q, idtype, id, 681 SCARG(uap, status) ? &status : NULL, options, 682 SCARG(uap, rusage) ? &ru : NULL, NULL, retval); 683 if (error == 0 && *retval > 0 && SCARG(uap, status)) { 684 error = copyout(&status, SCARG(uap, status), sizeof(status)); 685 } 686 if (error == 0 && *retval > 0 && SCARG(uap, rusage)) { 687 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 688 #ifdef KTRACE 689 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 690 ktrrusage(q, &ru); 691 #endif 692 } 693 return (error); 694 } 695 696 int 697 sys_waitid(struct proc *q, void *v, register_t *retval) 698 { 699 struct sys_waitid_args /* { 700 syscallarg(idtype_t) idtype; 701 syscallarg(id_t) id; 702 syscallarg(siginfo_t) info; 703 syscallarg(int) options; 704 } */ *uap = v; 705 siginfo_t info; 706 idtype_t idtype = SCARG(uap, idtype); 707 int options = SCARG(uap, options); 708 int error; 709 710 if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT)) 711 return (EINVAL); 712 if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0) 713 return (EINVAL); 714 if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID) 715 return (EINVAL); 716 717 error = dowait6(q, idtype, SCARG(uap, id), NULL, 718 options, NULL, &info, retval); 719 if (error == 0) { 720 error = copyout(&info, SCARG(uap, info), sizeof(info)); 721 #ifdef KTRACE 722 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 723 ktrsiginfo(q, &info); 724 #endif 725 } 726 if (error == 0) 727 *retval = 0; 728 return (error); 729 } 730 731 void 732 proc_finish_wait(struct proc *waiter, struct process *pr) 733 { 734 struct process *tr; 735 struct rusage *rup; 736 737 /* 738 * If we got the child via a ptrace 'attach', 739 * we need to give it back to the old parent. 740 */ 741 mtx_enter(&pr->ps_mtx); 742 if (pr->ps_opptr != NULL && (pr->ps_opptr != pr->ps_pptr)) { 743 tr = pr->ps_opptr; 744 pr->ps_opptr = NULL; 745 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 746 process_reparent(pr, tr); 747 mtx_leave(&pr->ps_mtx); 748 prsignal(tr, SIGCHLD); 749 wakeup(tr); 750 } else { 751 mtx_leave(&pr->ps_mtx); 752 scheduler_wait_hook(waiter, pr->ps_mainproc); 753 rup = &waiter->p_p->ps_cru; 754 ruadd(rup, pr->ps_ru); 755 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 756 freepid(pr->ps_pid); 757 process_zap(pr); 758 } 759 } 760 761 /* 762 * give process back to original parent or init(8) 763 */ 764 void 765 process_untrace(struct process *pr) 766 { 767 struct process *ppr = NULL; 768 769 KASSERT(pr->ps_flags & PS_TRACED); 770 MUTEX_ASSERT_LOCKED(&pr->ps_mtx); 771 772 if (pr->ps_opptr != NULL && 773 (pr->ps_opptr != pr->ps_pptr)) 774 ppr = pr->ps_opptr; 775 776 /* not being traced any more */ 777 pr->ps_opptr = NULL; 778 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 779 process_reparent(pr, ppr ? ppr : initprocess); 780 } 781 782 void 783 process_clear_orphan(struct process *pr) 784 { 785 if (pr->ps_flags & PS_ORPHAN) { 786 LIST_REMOVE(pr, ps_orphan); 787 atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN); 788 } 789 } 790 791 /* 792 * make process 'parent' the new parent of process 'child'. 793 */ 794 void 795 process_reparent(struct process *child, struct process *parent) 796 { 797 798 if (child->ps_pptr == parent) 799 return; 800 801 KASSERT(child->ps_opptr == NULL || 802 child->ps_opptr == child->ps_pptr); 803 804 LIST_REMOVE(child, ps_sibling); 805 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 806 807 process_clear_orphan(child); 808 if (child->ps_flags & PS_TRACED) { 809 atomic_setbits_int(&child->ps_flags, PS_ORPHAN); 810 LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan); 811 } 812 813 MUTEX_ASSERT_LOCKED(&child->ps_mtx); 814 child->ps_pptr = parent; 815 child->ps_ppid = parent->ps_pid; 816 } 817 818 void 819 process_zap(struct process *pr) 820 { 821 struct vnode *otvp; 822 struct proc *p = pr->ps_mainproc; 823 824 /* 825 * Finally finished with old proc entry. 826 * Unlink it from its process group and free it. 827 */ 828 leavepgrp(pr); 829 LIST_REMOVE(pr, ps_sibling); 830 process_clear_orphan(pr); 831 832 /* 833 * Decrement the count of procs running with this uid. 834 */ 835 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 836 837 /* 838 * Release reference to text vnode 839 */ 840 otvp = pr->ps_textvp; 841 pr->ps_textvp = NULL; 842 if (otvp) 843 vrele(otvp); 844 845 KASSERT(pr->ps_threadcnt == 0); 846 KASSERT(pr->ps_exitcnt == 1); 847 if (pr->ps_ptstat != NULL) 848 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 849 pool_put(&rusage_pool, pr->ps_ru); 850 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 851 sigactsfree(pr->ps_sigacts); 852 lim_free(pr->ps_limit); 853 crfree(pr->ps_ucred); 854 pool_put(&process_pool, pr); 855 nprocesses--; 856 857 proc_free(p); 858 } 859