1 /* $OpenBSD: kern_exit.c,v 1.222 2024/06/03 12:48:25 claudio Exp $ */ 2 /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/time.h> 44 #include <sys/resource.h> 45 #include <sys/wait.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/ptrace.h> 50 #include <sys/acct.h> 51 #include <sys/filedesc.h> 52 #include <sys/signalvar.h> 53 #include <sys/sched.h> 54 #include <sys/ktrace.h> 55 #include <sys/pool.h> 56 #include <sys/mutex.h> 57 #ifdef SYSVSEM 58 #include <sys/sem.h> 59 #endif 60 #include <sys/witness.h> 61 62 #include <sys/mount.h> 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include "kcov.h" 68 #if NKCOV > 0 69 #include <sys/kcov.h> 70 #endif 71 72 void proc_finish_wait(struct proc *, struct proc *); 73 void process_clear_orphan(struct process *); 74 void process_zap(struct process *); 75 void proc_free(struct proc *); 76 void unveil_destroy(struct process *ps); 77 78 /* 79 * exit -- 80 * Death of process. 81 */ 82 int 83 sys_exit(struct proc *p, void *v, register_t *retval) 84 { 85 struct sys_exit_args /* { 86 syscallarg(int) rval; 87 } */ *uap = v; 88 89 exit1(p, SCARG(uap, rval), 0, EXIT_NORMAL); 90 /* NOTREACHED */ 91 return (0); 92 } 93 94 int 95 sys___threxit(struct proc *p, void *v, register_t *retval) 96 { 97 struct sys___threxit_args /* { 98 syscallarg(pid_t *) notdead; 99 } */ *uap = v; 100 101 if (SCARG(uap, notdead) != NULL) { 102 pid_t zero = 0; 103 if (copyout(&zero, SCARG(uap, notdead), sizeof(zero))) 104 psignal(p, SIGSEGV); 105 } 106 exit1(p, 0, 0, EXIT_THREAD); 107 108 return (0); 109 } 110 111 /* 112 * Exit: deallocate address space and other resources, change proc state 113 * to zombie, and unlink proc from allproc and parent's lists. Save exit 114 * status and rusage for wait(). Check for child processes and orphan them. 115 */ 116 void 117 exit1(struct proc *p, int xexit, int xsig, int flags) 118 { 119 struct process *pr, *qr, *nqr; 120 struct rusage *rup; 121 struct timespec ts; 122 123 atomic_setbits_int(&p->p_flag, P_WEXIT); 124 125 pr = p->p_p; 126 127 /* single-threaded? */ 128 if (!P_HASSIBLING(p)) { 129 flags = EXIT_NORMAL; 130 } else { 131 /* nope, multi-threaded */ 132 if (flags == EXIT_NORMAL) 133 single_thread_set(p, SINGLE_EXIT); 134 } 135 136 if (flags == EXIT_NORMAL && !(pr->ps_flags & PS_EXITING)) { 137 if (pr->ps_pid == 1) 138 panic("init died (signal %d, exit %d)", xsig, xexit); 139 140 atomic_setbits_int(&pr->ps_flags, PS_EXITING); 141 pr->ps_xexit = xexit; 142 pr->ps_xsig = xsig; 143 144 /* 145 * If parent is waiting for us to exit or exec, PS_PPWAIT 146 * is set; we wake up the parent early to avoid deadlock. 147 */ 148 if (pr->ps_flags & PS_PPWAIT) { 149 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 150 atomic_clearbits_int(&pr->ps_pptr->ps_flags, 151 PS_ISPWAIT); 152 wakeup(pr->ps_pptr); 153 } 154 } 155 156 /* unlink ourselves from the active threads */ 157 mtx_enter(&pr->ps_mtx); 158 TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); 159 pr->ps_threadcnt--; 160 pr->ps_exitcnt++; 161 162 /* 163 * if somebody else wants to take us to single threaded mode, 164 * count ourselves out. 165 */ 166 if (pr->ps_single) { 167 if (--pr->ps_singlecnt == 0) 168 wakeup(&pr->ps_singlecnt); 169 } 170 171 if ((p->p_flag & P_THREAD) == 0) { 172 /* main thread gotta wait because it has the pid, et al */ 173 while (pr->ps_threadcnt + pr->ps_exitcnt > 1) 174 msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT, 175 "thrdeath", INFSLP); 176 } 177 mtx_leave(&pr->ps_mtx); 178 179 rup = pr->ps_ru; 180 if (rup == NULL) { 181 rup = pool_get(&rusage_pool, PR_WAITOK | PR_ZERO); 182 if (pr->ps_ru == NULL) { 183 pr->ps_ru = rup; 184 } else { 185 pool_put(&rusage_pool, rup); 186 rup = pr->ps_ru; 187 } 188 } 189 p->p_siglist = 0; 190 if ((p->p_flag & P_THREAD) == 0) 191 pr->ps_siglist = 0; 192 193 kqpoll_exit(); 194 195 #if NKCOV > 0 196 kcov_exit(p); 197 #endif 198 199 if ((p->p_flag & P_THREAD) == 0) { 200 if (pr->ps_flags & PS_PROFIL) 201 stopprofclock(pr); 202 203 sigio_freelist(&pr->ps_sigiolst); 204 205 /* close open files and release open-file table */ 206 fdfree(p); 207 208 cancel_all_itimers(); 209 210 timeout_del(&pr->ps_rucheck_to); 211 #ifdef SYSVSEM 212 semexit(pr); 213 #endif 214 killjobc(pr); 215 #ifdef ACCOUNTING 216 acct_process(p); 217 #endif 218 219 #ifdef KTRACE 220 /* release trace file */ 221 if (pr->ps_tracevp) 222 ktrcleartrace(pr); 223 #endif 224 225 unveil_destroy(pr); 226 227 free(pr->ps_pin.pn_pins, M_PINSYSCALL, 228 pr->ps_pin.pn_npins * sizeof(u_int)); 229 free(pr->ps_libcpin.pn_pins, M_PINSYSCALL, 230 pr->ps_libcpin.pn_npins * sizeof(u_int)); 231 232 /* 233 * If parent has the SAS_NOCLDWAIT flag set, we're not 234 * going to become a zombie. 235 */ 236 if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) 237 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 238 } 239 240 p->p_fd = NULL; /* zap the thread's copy */ 241 242 /* Release the thread's read reference of resource limit structure. */ 243 if (p->p_limit != NULL) { 244 struct plimit *limit; 245 246 limit = p->p_limit; 247 p->p_limit = NULL; 248 lim_free(limit); 249 } 250 251 /* 252 * Remove proc from pidhash chain and allproc so looking 253 * it up won't work. We will put the proc on the 254 * deadproc list later (using the p_hash member), and 255 * wake up the reaper when we do. If this is the last 256 * thread of a process that isn't PS_NOZOMBIE, we'll put 257 * the process on the zombprocess list below. 258 */ 259 /* 260 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 261 */ 262 p->p_stat = SDEAD; 263 264 LIST_REMOVE(p, p_hash); 265 LIST_REMOVE(p, p_list); 266 267 if ((p->p_flag & P_THREAD) == 0) { 268 LIST_REMOVE(pr, ps_hash); 269 LIST_REMOVE(pr, ps_list); 270 271 if ((pr->ps_flags & PS_NOZOMBIE) == 0) 272 LIST_INSERT_HEAD(&zombprocess, pr, ps_list); 273 else { 274 /* 275 * Not going to be a zombie, so it's now off all 276 * the lists scanned by ispidtaken(), so block 277 * fast reuse of the pid now. 278 */ 279 freepid(pr->ps_pid); 280 } 281 282 /* 283 * Reparent children to their original parent, in case 284 * they were being traced, or to init(8). 285 */ 286 qr = LIST_FIRST(&pr->ps_children); 287 if (qr) /* only need this if any child is S_ZOMB */ 288 wakeup(initprocess); 289 for (; qr != NULL; qr = nqr) { 290 nqr = LIST_NEXT(qr, ps_sibling); 291 /* 292 * Traced processes are killed since their 293 * existence means someone is screwing up. 294 */ 295 if (qr->ps_flags & PS_TRACED && 296 !(qr->ps_flags & PS_EXITING)) { 297 process_untrace(qr); 298 299 /* 300 * If single threading is active, 301 * direct the signal to the active 302 * thread to avoid deadlock. 303 */ 304 if (qr->ps_single) 305 ptsignal(qr->ps_single, SIGKILL, 306 STHREAD); 307 else 308 prsignal(qr, SIGKILL); 309 } else { 310 process_reparent(qr, initprocess); 311 } 312 } 313 314 /* 315 * Make sure orphans won't remember the exiting process. 316 */ 317 while ((qr = LIST_FIRST(&pr->ps_orphans)) != NULL) { 318 KASSERT(qr->ps_oppid == pr->ps_pid); 319 qr->ps_oppid = 0; 320 process_clear_orphan(qr); 321 } 322 } 323 324 /* add thread's accumulated rusage into the process's total */ 325 ruadd(rup, &p->p_ru); 326 nanouptime(&ts); 327 if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <)) 328 timespecclear(&ts); 329 else 330 timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts); 331 SCHED_LOCK(); 332 tuagg_locked(pr, p, &ts); 333 SCHED_UNLOCK(); 334 335 /* 336 * clear %cpu usage during swap 337 */ 338 p->p_pctcpu = 0; 339 340 if ((p->p_flag & P_THREAD) == 0) { 341 /* 342 * Final thread has died, so add on our children's rusage 343 * and calculate the total times 344 */ 345 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 346 ruadd(rup, &pr->ps_cru); 347 348 /* 349 * Notify parent that we're gone. If we're not going to 350 * become a zombie, reparent to process 1 (init) so that 351 * we can wake our original parent to possibly unblock 352 * wait4() to return ECHILD. 353 */ 354 if (pr->ps_flags & PS_NOZOMBIE) { 355 struct process *ppr = pr->ps_pptr; 356 process_reparent(pr, initprocess); 357 wakeup(ppr); 358 } 359 } 360 361 /* just a thread? detach it from its process */ 362 if (p->p_flag & P_THREAD) { 363 /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ 364 mtx_enter(&pr->ps_mtx); 365 pr->ps_exitcnt--; 366 if (pr->ps_threadcnt + pr->ps_exitcnt == 1) 367 wakeup(&pr->ps_threads); 368 mtx_leave(&pr->ps_mtx); 369 } 370 371 /* 372 * Other substructures are freed from reaper and wait(). 373 */ 374 375 /* 376 * Finally, call machine-dependent code to switch to a new 377 * context (possibly the idle context). Once we are no longer 378 * using the dead process's vmspace and stack, exit2() will be 379 * called to schedule those resources to be released by the 380 * reaper thread. 381 * 382 * Note that cpu_exit() will end with a call equivalent to 383 * cpu_switch(), finishing our execution (pun intended). 384 */ 385 uvmexp.swtch++; 386 cpu_exit(p); 387 panic("cpu_exit returned"); 388 } 389 390 /* 391 * Locking of this proclist is special; it's accessed in a 392 * critical section of process exit, and thus locking it can't 393 * modify interrupt state. We use a simple spin lock for this 394 * proclist. We use the p_hash member to linkup to deadproc. 395 */ 396 struct mutex deadproc_mutex = 397 MUTEX_INITIALIZER_FLAGS(IPL_NONE, "deadproc", MTX_NOWITNESS); 398 struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc); 399 400 /* 401 * We are called from cpu_exit() once it is safe to schedule the 402 * dead process's resources to be freed. 403 * 404 * NOTE: One must be careful with locking in this routine. It's 405 * called from a critical section in machine-dependent code, so 406 * we should refrain from changing any interrupt state. 407 * 408 * We lock the deadproc list, place the proc on that list (using 409 * the p_hash member), and wake up the reaper. 410 */ 411 void 412 exit2(struct proc *p) 413 { 414 mtx_enter(&deadproc_mutex); 415 LIST_INSERT_HEAD(&deadproc, p, p_hash); 416 mtx_leave(&deadproc_mutex); 417 418 wakeup(&deadproc); 419 } 420 421 void 422 proc_free(struct proc *p) 423 { 424 crfree(p->p_ucred); 425 pool_put(&proc_pool, p); 426 nthreads--; 427 } 428 429 /* 430 * Process reaper. This is run by a kernel thread to free the resources 431 * of a dead process. Once the resources are free, the process becomes 432 * a zombie, and the parent is allowed to read the undead's status. 433 */ 434 void 435 reaper(void *arg) 436 { 437 struct proc *p; 438 439 KERNEL_UNLOCK(); 440 441 SCHED_ASSERT_UNLOCKED(); 442 443 for (;;) { 444 mtx_enter(&deadproc_mutex); 445 while ((p = LIST_FIRST(&deadproc)) == NULL) 446 msleep_nsec(&deadproc, &deadproc_mutex, PVM, "reaper", 447 INFSLP); 448 449 /* Remove us from the deadproc list. */ 450 LIST_REMOVE(p, p_hash); 451 mtx_leave(&deadproc_mutex); 452 453 WITNESS_THREAD_EXIT(p); 454 455 KERNEL_LOCK(); 456 457 /* 458 * Free the VM resources we're still holding on to. 459 * We must do this from a valid thread because doing 460 * so may block. 461 */ 462 uvm_uarea_free(p); 463 p->p_vmspace = NULL; /* zap the thread's copy */ 464 465 if (p->p_flag & P_THREAD) { 466 /* Just a thread */ 467 proc_free(p); 468 } else { 469 struct process *pr = p->p_p; 470 471 /* Release the rest of the process's vmspace */ 472 uvm_exit(pr); 473 474 if ((pr->ps_flags & PS_NOZOMBIE) == 0) { 475 /* Process is now a true zombie. */ 476 atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE); 477 } 478 479 /* Notify listeners of our demise and clean up. */ 480 knote_processexit(pr); 481 482 if (pr->ps_flags & PS_ZOMBIE) { 483 /* Post SIGCHLD and wake up parent. */ 484 prsignal(pr->ps_pptr, SIGCHLD); 485 wakeup(pr->ps_pptr); 486 } else { 487 /* No one will wait for us, just zap it. */ 488 process_zap(pr); 489 } 490 } 491 492 KERNEL_UNLOCK(); 493 } 494 } 495 496 int 497 dowait6(struct proc *q, idtype_t idtype, id_t id, int *statusp, int options, 498 struct rusage *rusage, siginfo_t *info, register_t *retval) 499 { 500 int nfound; 501 struct process *pr; 502 struct proc *p; 503 int error; 504 505 if (info != NULL) 506 memset(info, 0, sizeof(*info)); 507 508 loop: 509 nfound = 0; 510 LIST_FOREACH(pr, &q->p_p->ps_children, ps_sibling) { 511 if ((pr->ps_flags & PS_NOZOMBIE) || 512 (idtype == P_PID && id != pr->ps_pid) || 513 (idtype == P_PGID && id != pr->ps_pgid)) 514 continue; 515 516 p = pr->ps_mainproc; 517 518 nfound++; 519 if ((options & WEXITED) && (pr->ps_flags & PS_ZOMBIE)) { 520 *retval = pr->ps_pid; 521 if (info != NULL) { 522 info->si_pid = pr->ps_pid; 523 info->si_uid = pr->ps_ucred->cr_uid; 524 info->si_signo = SIGCHLD; 525 if (pr->ps_xsig == 0) { 526 info->si_code = CLD_EXITED; 527 info->si_status = pr->ps_xexit; 528 } else if (WCOREDUMP(pr->ps_xsig)) { 529 info->si_code = CLD_DUMPED; 530 info->si_status = _WSTATUS(pr->ps_xsig); 531 } else { 532 info->si_code = CLD_KILLED; 533 info->si_status = _WSTATUS(pr->ps_xsig); 534 } 535 } 536 537 if (statusp != NULL) 538 *statusp = W_EXITCODE(pr->ps_xexit, 539 pr->ps_xsig); 540 if (rusage != NULL) 541 memcpy(rusage, pr->ps_ru, sizeof(*rusage)); 542 if ((options & WNOWAIT) == 0) 543 proc_finish_wait(q, p); 544 return (0); 545 } 546 if ((options & WTRAPPED) && 547 pr->ps_flags & PS_TRACED && 548 (pr->ps_flags & PS_WAITED) == 0 && pr->ps_single && 549 pr->ps_single->p_stat == SSTOP && 550 (pr->ps_single->p_flag & P_SUSPSINGLE) == 0) { 551 if (single_thread_wait(pr, 0)) 552 goto loop; 553 554 if ((options & WNOWAIT) == 0) 555 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 556 557 *retval = pr->ps_pid; 558 if (info != NULL) { 559 info->si_pid = pr->ps_pid; 560 info->si_uid = pr->ps_ucred->cr_uid; 561 info->si_signo = SIGCHLD; 562 info->si_code = CLD_TRAPPED; 563 info->si_status = pr->ps_xsig; 564 } 565 566 if (statusp != NULL) 567 *statusp = W_STOPCODE(pr->ps_xsig); 568 if (rusage != NULL) 569 memset(rusage, 0, sizeof(*rusage)); 570 return (0); 571 } 572 if (p->p_stat == SSTOP && 573 (pr->ps_flags & PS_WAITED) == 0 && 574 (p->p_flag & P_SUSPSINGLE) == 0 && 575 (pr->ps_flags & PS_TRACED || 576 options & WUNTRACED)) { 577 if ((options & WNOWAIT) == 0) 578 atomic_setbits_int(&pr->ps_flags, PS_WAITED); 579 580 *retval = pr->ps_pid; 581 if (info != 0) { 582 info->si_pid = pr->ps_pid; 583 info->si_uid = pr->ps_ucred->cr_uid; 584 info->si_signo = SIGCHLD; 585 info->si_code = CLD_STOPPED; 586 info->si_status = pr->ps_xsig; 587 } 588 589 if (statusp != NULL) 590 *statusp = W_STOPCODE(pr->ps_xsig); 591 if (rusage != NULL) 592 memset(rusage, 0, sizeof(*rusage)); 593 return (0); 594 } 595 if ((options & WCONTINUED) && (p->p_flag & P_CONTINUED)) { 596 if ((options & WNOWAIT) == 0) 597 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 598 599 *retval = pr->ps_pid; 600 if (info != NULL) { 601 info->si_pid = pr->ps_pid; 602 info->si_uid = pr->ps_ucred->cr_uid; 603 info->si_signo = SIGCHLD; 604 info->si_code = CLD_CONTINUED; 605 info->si_status = SIGCONT; 606 } 607 608 if (statusp != NULL) 609 *statusp = _WCONTINUED; 610 if (rusage != NULL) 611 memset(rusage, 0, sizeof(*rusage)); 612 return (0); 613 } 614 } 615 /* 616 * Look in the orphans list too, to allow the parent to 617 * collect its child's exit status even if child is being 618 * debugged. 619 * 620 * Debugger detaches from the parent upon successful 621 * switch-over from parent to child. At this point due to 622 * re-parenting the parent loses the child to debugger and a 623 * wait4(2) call would report that it has no children to wait 624 * for. By maintaining a list of orphans we allow the parent 625 * to successfully wait until the child becomes a zombie. 626 */ 627 if (nfound == 0) { 628 LIST_FOREACH(pr, &q->p_p->ps_orphans, ps_orphan) { 629 if ((pr->ps_flags & PS_NOZOMBIE) || 630 (idtype == P_PID && id != pr->ps_pid) || 631 (idtype == P_PGID && id != pr->ps_pgid)) 632 continue; 633 nfound++; 634 break; 635 } 636 } 637 if (nfound == 0) 638 return (ECHILD); 639 if (options & WNOHANG) { 640 *retval = 0; 641 return (0); 642 } 643 if ((error = tsleep_nsec(q->p_p, PWAIT | PCATCH, "wait", INFSLP)) != 0) 644 return (error); 645 goto loop; 646 } 647 648 int 649 sys_wait4(struct proc *q, void *v, register_t *retval) 650 { 651 struct sys_wait4_args /* { 652 syscallarg(pid_t) pid; 653 syscallarg(int *) status; 654 syscallarg(int) options; 655 syscallarg(struct rusage *) rusage; 656 } */ *uap = v; 657 struct rusage ru; 658 pid_t pid = SCARG(uap, pid); 659 int options = SCARG(uap, options); 660 int status, error; 661 idtype_t idtype; 662 id_t id; 663 664 if (SCARG(uap, options) &~ (WUNTRACED|WNOHANG|WCONTINUED)) 665 return (EINVAL); 666 options |= WEXITED | WTRAPPED; 667 668 if (SCARG(uap, pid) == WAIT_MYPGRP) { 669 idtype = P_PGID; 670 id = q->p_p->ps_pgid; 671 } else if (SCARG(uap, pid) == WAIT_ANY) { 672 idtype = P_ALL; 673 id = 0; 674 } else if (pid < 0) { 675 idtype = P_PGID; 676 id = -pid; 677 } else { 678 idtype = P_PID; 679 id = pid; 680 } 681 682 error = dowait6(q, idtype, id, 683 SCARG(uap, status) ? &status : NULL, options, 684 SCARG(uap, rusage) ? &ru : NULL, NULL, retval); 685 if (error == 0 && *retval > 0 && SCARG(uap, status)) { 686 error = copyout(&status, SCARG(uap, status), sizeof(status)); 687 } 688 if (error == 0 && *retval > 0 && SCARG(uap, rusage)) { 689 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 690 #ifdef KTRACE 691 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 692 ktrrusage(q, &ru); 693 #endif 694 } 695 return (error); 696 } 697 698 int 699 sys_waitid(struct proc *q, void *v, register_t *retval) 700 { 701 struct sys_waitid_args /* { 702 syscallarg(idtype_t) idtype; 703 syscallarg(id_t) id; 704 syscallarg(siginfo_t) info; 705 syscallarg(int) options; 706 } */ *uap = v; 707 siginfo_t info; 708 idtype_t idtype = SCARG(uap, idtype); 709 int options = SCARG(uap, options); 710 int error; 711 712 if (options &~ (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED|WNOHANG|WNOWAIT)) 713 return (EINVAL); 714 if ((options & (WSTOPPED|WCONTINUED|WEXITED|WTRAPPED)) == 0) 715 return (EINVAL); 716 if (idtype != P_ALL && idtype != P_PID && idtype != P_PGID) 717 return (EINVAL); 718 719 error = dowait6(q, idtype, SCARG(uap, id), NULL, 720 options, NULL, &info, retval); 721 if (error == 0) { 722 error = copyout(&info, SCARG(uap, info), sizeof(info)); 723 #ifdef KTRACE 724 if (error == 0 && KTRPOINT(q, KTR_STRUCT)) 725 ktrsiginfo(q, &info); 726 #endif 727 } 728 if (error == 0) 729 *retval = 0; 730 return (error); 731 } 732 733 void 734 proc_finish_wait(struct proc *waiter, struct proc *p) 735 { 736 struct process *pr, *tr; 737 struct rusage *rup; 738 739 /* 740 * If we got the child via a ptrace 'attach', 741 * we need to give it back to the old parent. 742 */ 743 pr = p->p_p; 744 if (pr->ps_oppid != 0 && (pr->ps_oppid != pr->ps_pptr->ps_pid) && 745 (tr = prfind(pr->ps_oppid))) { 746 pr->ps_oppid = 0; 747 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 748 process_reparent(pr, tr); 749 prsignal(tr, SIGCHLD); 750 wakeup(tr); 751 } else { 752 scheduler_wait_hook(waiter, p); 753 rup = &waiter->p_p->ps_cru; 754 ruadd(rup, pr->ps_ru); 755 LIST_REMOVE(pr, ps_list); /* off zombprocess */ 756 freepid(pr->ps_pid); 757 process_zap(pr); 758 } 759 } 760 761 /* 762 * give process back to original parent or init(8) 763 */ 764 void 765 process_untrace(struct process *pr) 766 { 767 struct process *ppr = NULL; 768 769 KASSERT(pr->ps_flags & PS_TRACED); 770 771 if (pr->ps_oppid != 0 && 772 (pr->ps_oppid != pr->ps_pptr->ps_pid)) 773 ppr = prfind(pr->ps_oppid); 774 775 /* not being traced any more */ 776 pr->ps_oppid = 0; 777 atomic_clearbits_int(&pr->ps_flags, PS_TRACED); 778 process_reparent(pr, ppr ? ppr : initprocess); 779 } 780 781 void 782 process_clear_orphan(struct process *pr) 783 { 784 if (pr->ps_flags & PS_ORPHAN) { 785 LIST_REMOVE(pr, ps_orphan); 786 atomic_clearbits_int(&pr->ps_flags, PS_ORPHAN); 787 } 788 } 789 790 /* 791 * make process 'parent' the new parent of process 'child'. 792 */ 793 void 794 process_reparent(struct process *child, struct process *parent) 795 { 796 797 if (child->ps_pptr == parent) 798 return; 799 800 KASSERT(child->ps_oppid == 0 || 801 child->ps_oppid == child->ps_pptr->ps_pid); 802 803 LIST_REMOVE(child, ps_sibling); 804 LIST_INSERT_HEAD(&parent->ps_children, child, ps_sibling); 805 806 process_clear_orphan(child); 807 if (child->ps_flags & PS_TRACED) { 808 atomic_setbits_int(&child->ps_flags, PS_ORPHAN); 809 LIST_INSERT_HEAD(&child->ps_pptr->ps_orphans, child, ps_orphan); 810 } 811 812 child->ps_pptr = parent; 813 child->ps_ppid = parent->ps_pid; 814 } 815 816 void 817 process_zap(struct process *pr) 818 { 819 struct vnode *otvp; 820 struct proc *p = pr->ps_mainproc; 821 822 /* 823 * Finally finished with old proc entry. 824 * Unlink it from its process group and free it. 825 */ 826 leavepgrp(pr); 827 LIST_REMOVE(pr, ps_sibling); 828 process_clear_orphan(pr); 829 830 /* 831 * Decrement the count of procs running with this uid. 832 */ 833 (void)chgproccnt(pr->ps_ucred->cr_ruid, -1); 834 835 /* 836 * Release reference to text vnode 837 */ 838 otvp = pr->ps_textvp; 839 pr->ps_textvp = NULL; 840 if (otvp) 841 vrele(otvp); 842 843 KASSERT(pr->ps_threadcnt == 0); 844 KASSERT(pr->ps_exitcnt == 1); 845 if (pr->ps_ptstat != NULL) 846 free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); 847 pool_put(&rusage_pool, pr->ps_ru); 848 KASSERT(TAILQ_EMPTY(&pr->ps_threads)); 849 sigactsfree(pr->ps_sigacts); 850 lim_free(pr->ps_limit); 851 crfree(pr->ps_ucred); 852 pool_put(&process_pool, pr); 853 nprocesses--; 854 855 proc_free(p); 856 } 857