1 /* $OpenBSD: kern_exit.c,v 1.235 2024/10/08 09:05:40 claudio Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/time.h> 44 #include <sys/resource.h> 45 #include <sys/wait.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/ptrace.h> 50 #include <sys/acct.h> 51 #include <sys/filedesc.h> 52 #include <sys/signalvar.h> 53 #include <sys/sched.h> 54 #include <sys/ktrace.h> 55 #include <sys/pool.h> 56 #include <sys/mutex.h> 57 #ifdef SYSVSEM 58 #include <sys/sem.h> 59 #endif 60 #include <sys/witness.h> 61 62 #include <sys/mount.h> 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include "kcov.h" 68 #if NKCOV > 0 69 #include <sys/kcov.h> 70 #endif 71 72 void proc_finish_wait(struct proc *, struct process *); 73 void process_clear_orphan(struct process *); 74 void process_zap(struct process *); 75 void proc_free(struct proc *); 76 void unveil_destroy(struct process *ps); 77 78 /* 79 * exit -- 80 * Death of process. 81 */ 82 int 83 sys_exit(struct proc *p, void *v, register_t *retval) 84 { 85 struct sys_exit_args /* { 86 syscallarg(int) rval; 87 } */ *uap = v; 88 89 exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL); 90 /* NOTREACHED */ 91 return (0); 92 } 93 94 int 95 sys___threxit(struct proc *p, void *v, register_t *retval) 96 { 97 struct sys___threxit_args /* { 98 syscallarg(pid_t *) notdead; 99 } */ *uap = v; 100 101 if (SCARG(uap, notdead) != NULL) { 102 pid_t zero = 0; 103 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 104 psignal(p, SIGSEGV); 105 } 106 exit1(p, 0, 0, EXIT_THREAD); 107 108 return (0); 109 } 110 111 /* 112 * Exit: deallocate address space and other resources, change proc state 113 * to zombie, and unlink proc from allproc and parent's lists. Save exit 114 * status and rusage for wait(). Check for child processes and orphan them. 115 */ 116 void 117 exit1(struct proc *p, int xexit, int xsig, int flags) 118 { 119 struct process *pr, *qr, *nqr; 120 struct rusage *rup; 121 struct timespec ts, pts; 122 123 atomic_setbits_int(&p->p_flag, P_WEXIT); 124 125 pr = p->p_p; 126 127 /* single-threaded? */ 128 if (!P_HASSIBLING(p)) { 129 flags = EXIT_NORMAL; 130 } else { 131 /* nope, multi-threaded */ 132 if (flags == EXIT_NORMAL) 133 single_thread_set(p, SINGLE_EXIT); 134 } 135 136 if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) { 137 if (pr->ps_pid == 1) 138 panic("init died (signal %d, exit %d)", xsig, xexit); 139 140 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 141 pr->ps_xexit = xexit; 142 pr->ps_xsig = xsig; 143 144 /* 145 * If parent is waiting for us to exit or exec, PS_PPWAIT 146 * is set; we wake up the parent early to avoid deadlock. 147 */ 148 if (pr->ps_flags & PS_PPWAIT) { 149 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 150 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 151 PS_ISPWAIT); 152 wakeup(pr->ps_pptr); 153 } 154 155 /* Wait for concurrent `allprocess' loops */ 156 refcnt_finalize(&pr->ps_refcnt, "psdtor"); 157 } 158 159 /* unlink ourselves from the active threads */ 160 mtx_enter(&pr->ps_mtx); 161 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 162 pr->ps_threadcnt--; 163 pr->ps_exitcnt++; 164 165 /* 166 * if somebody else wants to take us to single threaded mode, 167 * count ourselves out. 168 */ 169 if (pr->ps_single) { 170 if (--pr->ps_singlecnt == 0) 171 wakeup(&pr->ps_singlecnt); 172 } 173 174 /* proc is off ps_threads list so update accounting of process now */ 175 nanouptime(&ts); 176 if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <)) 177 timespecclear(&pts); 178 else 179 timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &pts); 180 tu_enter(&p->p_tu); 181 timespecadd(&p->p_tu.tu_runtime, &pts, &p->p_tu.tu_runtime); 182 tu_leave(&p->p_tu); 183 /* adjust spc_runtime to not double account the runtime from above */ 184 curcpu()->ci_schedstate.spc_runtime = ts; 185 tuagg_add_process(p->p_p, p); 186 187 if ((p->p_flag & P_THREAD) == 0) { 188 /* main thread gotta wait because it has the pid, et al */ 189 while (pr->ps_threadcnt + pr->ps_exitcnt > 1) 190 msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT, 191 "thrdeath", INFSLP); 192 } 193 mtx_leave(&pr->ps_mtx); 194 195 rup = pr->ps_ru; 196 if (rup == NULL) { 197 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 198 if (pr->ps_ru == NULL) { 199 pr->ps_ru = rup; 200 } else { 201 pool_put(&rusage_pool, rup); 202 rup = pr->ps_ru; 203 } 204 } 205 p->p_siglist = 0; 206 if ((p->p_flag & P_THREAD) == 0) 207 pr->ps_siglist = 0; 208 209 kqpoll_exit(); 210 211 #if NKCOV > 0 212 kcov_exit(p); 213 #endif 214 215 if ((p->p_flag & P_THREAD) == 0) { 216 if (pr->ps_flags & PS_PROFIL) 217 stopprofclock(pr); 218 219 sigio_freelist(&pr->ps_sigiolst); 220 221 /* close open files and release open-file table */ 222 fdfree(p); 223 224 cancel_all_itimers(); 225 226 timeout_del(&pr->ps_rucheck_to); 227 #ifdef SYSVSEM 228 semexit(pr); 229 #endif 230 killjobc(pr); 231 #ifdef ACCOUNTING 232 acct_process(p); 233 #endif 234 235 #ifdef KTRACE 236 /* release trace file */ 237 if (pr->ps_tracevp) 238 ktrcleartrace(pr); 239 #endif 240 241 unveil_destroy(pr); 242 243 free(pr->ps_pin.pn_pins, M_PINSYSCALL, 244 pr->ps_pin.pn_npins * sizeof(u_int)); 245 free(pr->ps_libcpin.pn_pins, M_PINSYSCALL, 246 pr->ps_libcpin.pn_npins * sizeof(u_int)); 247 248 /* 249 * If parent has the SAS_NOCLDWAIT flag set, we're not 250 * going to become a zombie. 251 */ 252 if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) 253 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 254 } 255 256 p->p_fd = NULL; /* zap the thread's copy */ 257 258 /* Release the thread's read reference of resource limit structure. */ 259 if (p->p_limit != NULL) { 260 struct plimit *limit; 261 262 limit = p->p_limit; 263 p->p_limit = NULL; 264 lim_free(limit); 265 } 266 267 /* 268 * Remove proc from pidhash chain and allproc so looking 269 * it up won't work. We will put the proc on the 270 * deadproc list later (using the p_hash member), and 271 * wake up the reaper when we do. If this is the last 272 * thread of a process that isn't PS_NOZOMBIE, we'll put 273 * the process on the zombprocess list below. 274 */ 275 /* 276 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 277 */ 278 p->p_stat = SDEAD; 279 280 LIST_REMOVE(p, p_hash); 281 LIST_REMOVE(p, p_list); 282 283 if ((p->p_flag & P_THREAD) == 0) { 284 LIST_REMOVE(pr, ps_hash); 285 LIST_REMOVE(pr, ps_list); 286 287 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 288 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 289 else { 290 /* 291 * Not going to be a zombie, so it's now off all 292 * the lists scanned by ispidtaken(), so block 293 * fast reuse of the pid now. 294 */ 295 freepid(pr->ps_pid); 296 } 297 298 /* 299 * Reparent children to their original parent, in case 300 * they were being traced, or to init(8). 301 */ 302 qr = LIST_FIRST(&pr->ps_children); 303 if (qr) /* only need this if any child is S_ZOMB */ 304 wakeup(initprocess); 305 for (; qr != NULL; qr = nqr) { 306 nqr = LIST_NEXT(qr, ps_sibling); 307 /* 308 * Traced processes are killed since their 309 * existence means someone is screwing up. 310 */ 311 mtx_enter(&qr->ps_mtx); 312 if (qr->ps_flags & PS_TRACED && 313 !(qr->ps_flags & PS_EXITING)) { 314 process_untrace(qr); 315 mtx_leave(&qr->ps_mtx); 316 317 /* 318 * If single threading is active, 319 * direct the signal to the active 320 * thread to avoid deadlock. 321 */ 322 if (qr->ps_single) 323 ptsignal(qr->ps_single, SIGKILL, 324 STHREAD); 325 else 326 prsignal(qr, SIGKILL); 327 } else { 328 process_reparent(qr, initprocess); 329 mtx_leave(&qr->ps_mtx); 330 } 331 } 332 333 /* 334 * Make sure orphans won't remember the exiting process. 335 */ 336 while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) { 337 mtx_enter(&qr->ps_mtx); 338 KASSERT(qr->ps_oppid == pr->ps_pid); 339 qr->ps_oppid = 0; 340 process_clear_orphan(qr); 341 mtx_leave(&qr->ps_mtx); 342 } 343 } 344 345 /* add thread's accumulated rusage into the process's total */ 346 ruadd(rup, &p->p_ru); 347 348 /* 349 * clear %cpu usage during swap 350 */ 351 p->p_pctcpu = 0; 352 353 if ((p->p_flag & P_THREAD) == 0) { 354 /* 355 * Final thread has died, so add on our children's rusage 356 * and calculate the total times. 357 */ 358 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 359 ruadd(rup, &pr->ps_cru); 360 361 /* 362 * Notify parent that we're gone. If we're not going to 363 * become a zombie, reparent to process 1 (init) so that 364 * we can wake our original parent to possibly unblock 365 * wait4() to return ECHILD. 366 */ 367 mtx_enter(&pr->ps_mtx); 368 if (pr->ps_flags & PS_NOZOMBIE) { 369 struct process *ppr = pr->ps_pptr; 370 process_reparent(pr, initprocess); 371 wakeup(ppr); 372 } 373 mtx_leave(&pr->ps_mtx); 374 } 375 376 /* just a thread? check if last one standing. */ 377 if (p->p_flag & P_THREAD) { 378 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 379 mtx_enter(&pr->ps_mtx); 380 pr->ps_exitcnt--; 381 if (pr->ps_threadcnt + pr->ps_exitcnt == 1) 382 wakeup(&pr->ps_threads); 383 mtx_leave(&pr->ps_mtx); 384 } 385 386 /* 387 * Other substructures are freed from reaper and wait(). 388 */ 389 390 /* 391 * Finally, call machine-dependent code to switch to a new 392 * context (possibly the idle context). Once we are no longer 393 * using the dead process's vmspace and stack, exit2() will be 394 * called to schedule those resources to be released by the 395 * reaper thread. 396 * 397 * Note that cpu_exit() will end with a call equivalent to 398 * cpu_switch(), finishing our execution (pun intended). 399 */ 400 cpu_exit(p); 401 panic("cpu_exit returned"); 402 } 403 404 /* 405 * Locking of this proclist is special; it's accessed in a 406 * critical section of process exit, and thus locking it can't 407 * modify interrupt state. We use a simple spin lock for this 408 * proclist. We use the p_hash member to linkup to deadproc. 409 */ 410 struct mutex deadproc_mutex = 411 MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS); 412 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 413 414 /* 415 * We are called from sched_idle() once it is safe to schedule the 416 * dead process's resources to be freed. So this is not allowed to sleep. 417 * 418 * We lock the deadproc list, place the proc on that list (using 419 * the p_hash member), and wake up the reaper. 420 */ 421 void 422 exit2(struct proc *p) 423 { 424 /* account the remainder of time spent in exit1() */ 425 mtx_enter(&p->p_p->ps_mtx); 426 tuagg_add_process(p->p_p, p); 427 mtx_leave(&p->p_p->ps_mtx); 428 429 mtx_enter(&deadproc_mutex); 430 LIST_INSERT_HEAD(&deadproc, p, p_hash); 431 mtx_leave(&deadproc_mutex); 432 433 wakeup(&deadproc); 434 } 435 436 void 437 proc_free(struct proc *p) 438 { 439 crfree(p->p_ucred); 440 pool_put(&proc_pool, p); 441 atomic_dec_int(&nthreads); 442 } 443 444 /* 445 * Process reaper. This is run by a kernel thread to free the resources 446 * of a dead process. Once the resources are free, the process becomes 447 * a zombie, and the parent is allowed to read the undead's status. 448 */ 449 void 450 reaper(void *arg) 451 { 452 struct proc *p; 453 454 KERNEL_UNLOCK(); 455 456 SCHED_ASSERT_UNLOCKED(); 457 458 for (;;) { 459 mtx_enter(&deadproc_mutex); 460 while ((p = LIST_FIRST(&deadproc)) == NULL) 461 msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper", 462 INFSLP); 463 464 /* Remove us from the deadproc list. */ 465 LIST_REMOVE(p, p_hash); 466 mtx_leave(&deadproc_mutex); 467 468 WITNESS_THREAD_EXIT(p); 469 470 /* 471 * Free the VM resources we're still holding on to. 472 * We must do this from a valid thread because doing 473 * so may block. 474 */ 475 uvm_uarea_free(p); 476 p->p_vmspace = NULL; /* zap the thread's copy */ 477 478 if (p->p_flag & P_THREAD) { 479 /* Just a thread */ 480 proc_free(p); 481 } else { 482 struct process *pr = p->p_p; 483 484 /* Release the rest of the process's vmspace */ 485 uvm_exit(pr); 486 487 KERNEL_LOCK(); 488 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 489 /* Process is now a true zombie. */ 490 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 491 } 492 493 /* Notify listeners of our demise and clean up. */ 494 knote_processexit(pr); 495 496 if (pr->ps_flags & PS_ZOMBIE) { 497 /* Post SIGCHLD and wake up parent. */ 498 prsignal(pr->ps_pptr, SIGCHLD); 499 wakeup(pr->ps_pptr); 500 } else { 501 /* No one will wait for us, just zap it. */ 502 process_zap(pr); 503 } 504 KERNEL_UNLOCK(); 505 } 506 } 507 } 508 509 int 510 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options, 511 struct rusage *rusage, siginfo_t *info, register_t *retval) 512 { 513 int nfound; 514 struct process *pr; 515 struct proc *p; 516 int error; 517 518 if (info != NULL) 519 memset(info, 0, sizeof(*info)); 520 521 loop: 522 nfound = 0; 523 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 524 if ((pr->ps_flags & PS_NOZOMBIE) || 525 (idtype == P_PID && id != pr->ps_pid) || 526 (idtype == P_PGID && id != pr->ps_pgid)) 527 continue; 528 529 p = pr->ps_mainproc; 530 531 nfound++; 532 if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) { 533 *retval = pr->ps_pid; 534 if (info != NULL) { 535 info->si_pid = pr->ps_pid; 536 info->si_uid = pr->ps_ucred->cr_uid; 537 info->si_signo = SIGCHLD; 538 if (pr->ps_xsig == 0) { 539 info->si_code = CLD_EXITED; 540 info->si_status = pr->ps_xexit; 541 } else if (WCOREDUMP(pr->ps_xsig)) { 542 info->si_code = CLD_DUMPED; 543 info->si_status = _WSTATUS(pr->ps_xsig); 544 } else { 545 info->si_code = CLD_KILLED; 546 info->si_status = _WSTATUS(pr->ps_xsig); 547 } 548 } 549 550 if (statusp != NULL) 551 *statusp = W_EXITCODE(pr->ps_xexit, 552 pr->ps_xsig); 553 if (rusage != NULL) 554 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 555 if ((options & WNOWAIT) == 0) 556 proc_finish_wait(q, pr); 557 return (0); 558 } 559 if ((options & WTRAPPED) && 560 (pr->ps_flags & PS_TRACED) && 561 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 562 pr->ps_single->p_stat == SSTOP) { 563 if (single_thread_wait(pr, 0)) 564 goto loop; 565 566 if ((options & WNOWAIT) == 0) 567 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 568 569 *retval = pr->ps_pid; 570 if (info != NULL) { 571 info->si_pid = pr->ps_pid; 572 info->si_uid = pr->ps_ucred->cr_uid; 573 info->si_signo = SIGCHLD; 574 info->si_code = CLD_TRAPPED; 575 info->si_status = pr->ps_xsig; 576 } 577 578 if (statusp != NULL) 579 *statusp = W_STOPCODE(pr->ps_xsig); 580 if (rusage != NULL) 581 memset(rusage, 0, sizeof(*rusage)); 582 return (0); 583 } 584 if (p->p_stat == SSTOP && 585 (pr->ps_flags & PS_WAITED) == 0 && 586 (p->p_flag & P_SUSPSINGLE) == 0 && 587 ((pr->ps_flags & PS_TRACED) || 588 (options & WUNTRACED))) { 589 if ((options & WNOWAIT) == 0) 590 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 591 592 *retval = pr->ps_pid; 593 if (info != 0) { 594 info->si_pid = pr->ps_pid; 595 info->si_uid = pr->ps_ucred->cr_uid; 596 info->si_signo = SIGCHLD; 597 info->si_code = CLD_STOPPED; 598 info->si_status = pr->ps_xsig; 599 } 600 601 if (statusp != NULL) 602 *statusp = W_STOPCODE(pr->ps_xsig); 603 if (rusage != NULL) 604 memset(rusage, 0, sizeof(*rusage)); 605 return (0); 606 } 607 if ((options & WCONTINUED) && (pr->ps_flags & PS_CONTINUED)) { 608 if ((options & WNOWAIT) == 0) 609 atomic_clearbits_int(&pr->ps_flags, 610 PS_CONTINUED); 611 612 *retval = pr->ps_pid; 613 if (info != NULL) { 614 info->si_pid = pr->ps_pid; 615 info->si_uid = pr->ps_ucred->cr_uid; 616 info->si_signo = SIGCHLD; 617 info->si_code = CLD_CONTINUED; 618 info->si_status = SIGCONT; 619 } 620 621 if (statusp != NULL) 622 *statusp = _WCONTINUED; 623 if (rusage != NULL) 624 memset(rusage, 0, sizeof(*rusage)); 625 return (0); 626 } 627 } 628 /* 629 * Look in the orphans list too, to allow the parent to 630 * collect its child's exit status even if child is being 631 * debugged. 632 * 633 * Debugger detaches from the parent upon successful 634 * switch-over from parent to child. At this point due to 635 * re-parenting the parent loses the child to debugger and a 636 * wait4(2) call would report that it has no children to wait 637 * for. By maintaining a list of orphans we allow the parent 638 * to successfully wait until the child becomes a zombie. 639 */ 640 if (nfound == 0) { 641 LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) { 642 if ((pr->ps_flags & PS_NOZOMBIE) || 643 (idtype == P_PID && id != pr->ps_pid) || 644 (idtype == P_PGID && id != pr->ps_pgid)) 645 continue; 646 nfound++; 647 break; 648 } 649 } 650 if (nfound == 0) 651 return (ECHILD); 652 if (options & WNOHANG) { 653 *retval = 0; 654 return (0); 655 } 656 if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0) 657 return (error); 658 goto loop; 659 } 660 661 int 662 sys_wait4(struct proc *q, void *v, register_t *retval) 663 { 664 struct sys_wait4_args /* { 665 syscallarg(pid_t) pid; 666 syscallarg(int *) status; 667 syscallarg(int) options; 668 syscallarg(struct rusage *) rusage; 669 } */ *uap = v; 670 struct rusage ru; 671 pid_t pid = SCARG(uap, pid); 672 int options = SCARG(uap, options); 673 int status, error; 674 idtype_t idtype; 675 id_t id; 676 677 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED)) 678 return (EINVAL); 679 options |= WEXITED | WTRAPPED; 680 681 if (SCARG(uap, pid) == WAIT_MYPGRP) { 682 idtype = P_PGID; 683 id = q->p_p->ps_pgid; 684 } else if (SCARG(uap, pid) == WAIT_ANY) { 685 idtype = P_ALL; 686 id = 0; 687 } else if (pid < 0) { 688 idtype = P_PGID; 689 id = -pid; 690 } else { 691 idtype = P_PID; 692 id = pid; 693 } 694 695 error = dowait6(q, idtype, id, 696 SCARG(uap, status) ? &status : NULL, options, 697 SCARG(uap, rusage) ? &ru : NULL, NULL, retval); 698 if (error == 0 && *retval > 0 && SCARG(uap, status)) { 699 error = copyout(&status, SCARG(uap, status), sizeof(status)); 700 } 701 if (error == 0 && *retval > 0 && SCARG(uap, rusage)) { 702 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 703 #ifdef KTRACE 704 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 705 ktrrusage(q, &ru); 706 #endif 707 } 708 return (error); 709 } 710 711 int 712 sys_waitid(struct proc *q, void *v, register_t *retval) 713 { 714 struct sys_waitid_args /* { 715 syscallarg(idtype_t) idtype; 716 syscallarg(id_t) id; 717 syscallarg(siginfo_t) info; 718 syscallarg(int) options; 719 } */ *uap = v; 720 siginfo_t info; 721 idtype_t idtype = SCARG(uap, idtype); 722 int options = SCARG(uap, options); 723 int error; 724 725 if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT)) 726 return (EINVAL); 727 if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0) 728 return (EINVAL); 729 if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID) 730 return (EINVAL); 731 732 error = dowait6(q, idtype, SCARG(uap, id), NULL, 733 options, NULL, &info, retval); 734 if (error == 0) { 735 error = copyout(&info, SCARG(uap, info), sizeof(info)); 736 #ifdef KTRACE 737 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 738 ktrsiginfo(q, &info); 739 #endif 740 } 741 if (error == 0) 742 *retval = 0; 743 return (error); 744 } 745 746 void 747 proc_finish_wait(struct proc *waiter, struct process *pr) 748 { 749 struct process *tr; 750 struct rusage *rup; 751 752 /* 753 * If we got the child via a ptrace 'attach', 754 * we need to give it back to the old parent. 755 */ 756 mtx_enter(&pr->ps_mtx); 757 if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_ppid) && 758 (tr = prfind(pr->ps_oppid))) { 759 pr->ps_oppid = 0; 760 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 761 process_reparent(pr, tr); 762 mtx_leave(&pr->ps_mtx); 763 prsignal(tr, SIGCHLD); 764 wakeup(tr); 765 } else { 766 mtx_leave(&pr->ps_mtx); 767 scheduler_wait_hook(waiter, pr->ps_mainproc); 768 rup = &waiter->p_p->ps_cru; 769 ruadd(rup, pr->ps_ru); 770 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 771 freepid(pr->ps_pid); 772 process_zap(pr); 773 } 774 } 775 776 /* 777 * give process back to original parent or init(8) 778 */ 779 void 780 process_untrace(struct process *pr) 781 { 782 struct process *ppr = NULL; 783 784 KASSERT(pr->ps_flags & PS_TRACED); 785 MUTEX_ASSERT_LOCKED(&pr->ps_mtx); 786 787 if (pr->ps_oppid != 0 && 788 (pr->ps_oppid != pr->ps_ppid)) 789 ppr = prfind(pr->ps_oppid); 790 791 /* not being traced any more */ 792 pr->ps_oppid = 0; 793 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 794 process_reparent(pr, ppr ? ppr : initprocess); 795 } 796 797 void 798 process_clear_orphan(struct process *pr) 799 { 800 if (pr->ps_flags & PS_ORPHAN) { 801 LIST_REMOVE(pr, ps_orphan); 802 atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN); 803 } 804 } 805 806 /* 807 * make process 'parent' the new parent of process 'child'. 808 */ 809 void 810 process_reparent(struct process *child, struct process *parent) 811 { 812 813 if (child->ps_pptr == parent) 814 return; 815 816 KASSERT(child->ps_oppid == 0 || 817 child->ps_oppid == child->ps_ppid); 818 819 LIST_REMOVE(child, ps_sibling); 820 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 821 822 process_clear_orphan(child); 823 if (child->ps_flags & PS_TRACED) { 824 atomic_setbits_int(&child->ps_flags, PS_ORPHAN); 825 LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan); 826 } 827 828 MUTEX_ASSERT_LOCKED(&child->ps_mtx); 829 child->ps_pptr = parent; 830 child->ps_ppid = parent->ps_pid; 831 } 832 833 void 834 process_zap(struct process *pr) 835 { 836 struct vnode *otvp; 837 struct proc *p = pr->ps_mainproc; 838 839 /* 840 * Finally finished with old proc entry. 841 * Unlink it from its process group and free it. 842 */ 843 leavepgrp(pr); 844 LIST_REMOVE(pr, ps_sibling); 845 process_clear_orphan(pr); 846 847 /* 848 * Decrement the count of procs running with this uid. 849 */ 850 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 851 852 /* 853 * Release reference to text vnode 854 */ 855 otvp = pr->ps_textvp; 856 pr->ps_textvp = NULL; 857 if (otvp) 858 vrele(otvp); 859 860 KASSERT(pr->ps_threadcnt == 0); 861 KASSERT(pr->ps_exitcnt == 1); 862 if (pr->ps_ptstat != NULL) 863 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 864 pool_put(&rusage_pool, pr->ps_ru); 865 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 866 sigactsfree(pr->ps_sigacts); 867 lim_free(pr->ps_limit); 868 crfree(pr->ps_ucred); 869 pool_put(&process_pool, pr); 870 nprocesses--; 871 872 proc_free(p); 873 } 874