1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $ 41 */ 42 43 #include "opt_compat.h" 44 #include "opt_ktrace.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/proc.h> 52 #include <sys/ktrace.h> 53 #include <sys/pioctl.h> 54 #include <sys/tty.h> 55 #include <sys/wait.h> 56 #include <sys/vnode.h> 57 #include <sys/resourcevar.h> 58 #include <sys/signalvar.h> 59 #include <sys/taskqueue.h> 60 #include <sys/ptrace.h> 61 #include <sys/acct.h> /* for acct_process() function prototype */ 62 #include <sys/filedesc.h> 63 #include <sys/shm.h> 64 #include <sys/sem.h> 65 #include <sys/aio.h> 66 #include <sys/jail.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/upcall.h> 69 #include <sys/caps.h> 70 #include <sys/unistd.h> 71 #include <sys/eventhandler.h> 72 #include <sys/dsched.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_param.h> 76 #include <sys/lock.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_extern.h> 80 #include <sys/user.h> 81 82 #include <sys/thread2.h> 83 #include <sys/sysref2.h> 84 #include <sys/mplock2.h> 85 86 static void reaplwps(void *context, int dummy); 87 static void reaplwp(struct lwp *lp); 88 static void killlwps(struct lwp *lp); 89 90 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 91 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 92 93 /* 94 * callout list for things to do at exit time 95 */ 96 struct exitlist { 97 exitlist_fn function; 98 TAILQ_ENTRY(exitlist) next; 99 }; 100 101 TAILQ_HEAD(exit_list_head, exitlist); 102 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 103 104 /* 105 * LWP reaper data 106 */ 107 struct task *deadlwp_task[MAXCPU]; 108 struct lwplist deadlwp_list[MAXCPU]; 109 110 /* 111 * exit -- 112 * Death of process. 113 * 114 * SYS_EXIT_ARGS(int rval) 115 * 116 * MPALMOSTSAFE 117 */ 118 int 119 sys_exit(struct exit_args *uap) 120 { 121 exit1(W_EXITCODE(uap->rval, 0)); 122 /* NOTREACHED */ 123 } 124 125 /* 126 * Extended exit -- 127 * Death of a lwp or process with optional bells and whistles. 128 * 129 * MPALMOSTSAFE 130 */ 131 int 132 sys_extexit(struct extexit_args *uap) 133 { 134 int action, who; 135 int error; 136 137 action = EXTEXIT_ACTION(uap->how); 138 who = EXTEXIT_WHO(uap->how); 139 140 /* Check parameters before we might perform some action */ 141 switch (who) { 142 case EXTEXIT_PROC: 143 case EXTEXIT_LWP: 144 break; 145 default: 146 return (EINVAL); 147 } 148 149 switch (action) { 150 case EXTEXIT_SIMPLE: 151 break; 152 case EXTEXIT_SETINT: 153 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 154 if (error) 155 return (error); 156 break; 157 default: 158 return (EINVAL); 159 } 160 161 get_mplock(); 162 163 switch (who) { 164 case EXTEXIT_LWP: 165 /* 166 * Be sure only to perform a simple lwp exit if there is at 167 * least one more lwp in the proc, which will call exit1() 168 * later, otherwise the proc will be an UNDEAD and not even a 169 * SZOMB! 170 */ 171 if (curproc->p_nthreads > 1) { 172 lwp_exit(0); 173 /* NOT REACHED */ 174 } 175 /* else last lwp in proc: do the real thing */ 176 /* FALLTHROUGH */ 177 default: /* to help gcc */ 178 case EXTEXIT_PROC: 179 exit1(W_EXITCODE(uap->status, 0)); 180 /* NOTREACHED */ 181 } 182 183 /* NOTREACHED */ 184 rel_mplock(); /* safety */ 185 } 186 187 /* 188 * Kill all lwps associated with the current process except the 189 * current lwp. Return an error if we race another thread trying to 190 * do the same thing and lose the race. 191 * 192 * If forexec is non-zero the current thread and process flags are 193 * cleaned up so they can be reused. 194 */ 195 int 196 killalllwps(int forexec) 197 { 198 struct lwp *lp = curthread->td_lwp; 199 struct proc *p = lp->lwp_proc; 200 201 /* 202 * Interlock against P_WEXIT. Only one of the process's thread 203 * is allowed to do the master exit. 204 */ 205 if (p->p_flag & P_WEXIT) 206 return (EALREADY); 207 p->p_flag |= P_WEXIT; 208 209 /* 210 * Interlock with LWP_WEXIT and kill any remaining LWPs 211 */ 212 lp->lwp_flag |= LWP_WEXIT; 213 if (p->p_nthreads > 1) 214 killlwps(lp); 215 216 /* 217 * If doing this for an exec, clean up the remaining thread 218 * (us) for continuing operation after all the other threads 219 * have been killed. 220 */ 221 if (forexec) { 222 lp->lwp_flag &= ~LWP_WEXIT; 223 p->p_flag &= ~P_WEXIT; 224 } 225 return(0); 226 } 227 228 /* 229 * Kill all LWPs except the current one. Do not try to signal 230 * LWPs which have exited on their own or have already been 231 * signaled. 232 */ 233 static void 234 killlwps(struct lwp *lp) 235 { 236 struct proc *p = lp->lwp_proc; 237 struct lwp *tlp; 238 239 /* 240 * Kill the remaining LWPs. We must send the signal before setting 241 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce 242 * races. tlp must be held across the call as it might block and 243 * allow the target lwp to rip itself out from under our loop. 244 */ 245 FOREACH_LWP_IN_PROC(tlp, p) { 246 LWPHOLD(tlp); 247 if ((tlp->lwp_flag & LWP_WEXIT) == 0) { 248 lwpsignal(p, tlp, SIGKILL); 249 tlp->lwp_flag |= LWP_WEXIT; 250 } 251 LWPRELE(tlp); 252 } 253 254 /* 255 * Wait for everything to clear out. 256 */ 257 while (p->p_nthreads > 1) { 258 tsleep(&p->p_nthreads, 0, "killlwps", 0); 259 } 260 } 261 262 /* 263 * Exit: deallocate address space and other resources, change proc state 264 * to zombie, and unlink proc from allproc and parent's lists. Save exit 265 * status and rusage for wait(). Check for child processes and orphan them. 266 */ 267 void 268 exit1(int rv) 269 { 270 struct thread *td = curthread; 271 struct proc *p = td->td_proc; 272 struct lwp *lp = td->td_lwp; 273 struct proc *q, *nq; 274 struct vmspace *vm; 275 struct vnode *vtmp; 276 struct exitlist *ep; 277 int error; 278 279 if (p->p_pid == 1) { 280 kprintf("init died (signal %d, exit %d)\n", 281 WTERMSIG(rv), WEXITSTATUS(rv)); 282 panic("Going nowhere without my init!"); 283 } 284 285 get_mplock(); 286 287 varsymset_clean(&p->p_varsymset); 288 lockuninit(&p->p_varsymset.vx_lock); 289 /* 290 * Kill all lwps associated with the current process, return an 291 * error if we race another thread trying to do the same thing 292 * and lose the race. 293 */ 294 error = killalllwps(0); 295 if (error) { 296 lwp_exit(0); 297 /* NOT REACHED */ 298 } 299 300 caps_exit(lp->lwp_thread); 301 aio_proc_rundown(p); 302 303 /* are we a task leader? */ 304 if (p == p->p_leader) { 305 struct kill_args killArgs; 306 killArgs.signum = SIGKILL; 307 q = p->p_peers; 308 while(q) { 309 killArgs.pid = q->p_pid; 310 /* 311 * The interface for kill is better 312 * than the internal signal 313 */ 314 sys_kill(&killArgs); 315 nq = q; 316 q = q->p_peers; 317 } 318 while (p->p_peers) 319 tsleep((caddr_t)p, 0, "exit1", 0); 320 } 321 322 #ifdef PGINPROF 323 vmsizmon(); 324 #endif 325 STOPEVENT(p, S_EXIT, rv); 326 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 327 328 /* 329 * Check if any loadable modules need anything done at process exit. 330 * e.g. SYSV IPC stuff 331 * XXX what if one of these generates an error? 332 */ 333 p->p_xstat = rv; 334 EVENTHANDLER_INVOKE(process_exit, p); 335 336 /* 337 * XXX: imho, the eventhandler stuff is much cleaner than this. 338 * Maybe we should move everything to use eventhandler. 339 */ 340 TAILQ_FOREACH(ep, &exit_list, next) 341 (*ep->function)(td); 342 343 if (p->p_flag & P_PROFIL) 344 stopprofclock(p); 345 /* 346 * If parent is waiting for us to exit or exec, 347 * P_PPWAIT is set; we will wakeup the parent below. 348 */ 349 p->p_flag &= ~(P_TRACED | P_PPWAIT); 350 SIGEMPTYSET(p->p_siglist); 351 SIGEMPTYSET(lp->lwp_siglist); 352 if (timevalisset(&p->p_realtimer.it_value)) 353 callout_stop(&p->p_ithandle); 354 355 /* 356 * Reset any sigio structures pointing to us as a result of 357 * F_SETOWN with our pid. 358 */ 359 funsetownlst(&p->p_sigiolst); 360 361 /* 362 * Close open files and release open-file table. 363 * This may block! 364 */ 365 fdfree(p, NULL); 366 367 if(p->p_leader->p_peers) { 368 q = p->p_leader; 369 while(q->p_peers != p) 370 q = q->p_peers; 371 q->p_peers = p->p_peers; 372 wakeup((caddr_t)p->p_leader); 373 } 374 375 /* 376 * XXX Shutdown SYSV semaphores 377 */ 378 semexit(p); 379 380 KKASSERT(p->p_numposixlocks == 0); 381 382 /* The next two chunks should probably be moved to vmspace_exit. */ 383 vm = p->p_vmspace; 384 385 /* 386 * Release upcalls associated with this process 387 */ 388 if (vm->vm_upcalls) 389 upc_release(vm, lp); 390 391 /* 392 * Clean up data related to virtual kernel operation. Clean up 393 * any vkernel context related to the current lwp now so we can 394 * destroy p_vkernel. 395 */ 396 if (p->p_vkernel) { 397 vkernel_lwp_exit(lp); 398 vkernel_exit(p); 399 } 400 401 /* 402 * Release user portion of address space. 403 * This releases references to vnodes, 404 * which could cause I/O if the file has been unlinked. 405 * Need to do this early enough that we can still sleep. 406 * Can't free the entire vmspace as the kernel stack 407 * may be mapped within that space also. 408 * 409 * Processes sharing the same vmspace may exit in one order, and 410 * get cleaned up by vmspace_exit() in a different order. The 411 * last exiting process to reach this point releases as much of 412 * the environment as it can, and the last process cleaned up 413 * by vmspace_exit() (which decrements exitingcnt) cleans up the 414 * remainder. 415 */ 416 vmspace_exitbump(vm); 417 sysref_put(&vm->vm_sysref); 418 419 if (SESS_LEADER(p)) { 420 struct session *sp = p->p_session; 421 422 if (sp->s_ttyvp) { 423 /* 424 * We are the controlling process. Signal the 425 * foreground process group, drain the controlling 426 * terminal, and revoke access to the controlling 427 * terminal. 428 * 429 * NOTE: while waiting for the process group to exit 430 * it is possible that one of the processes in the 431 * group will revoke the tty, so the ttyclosesession() 432 * function will re-check sp->s_ttyvp. 433 */ 434 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 435 if (sp->s_ttyp->t_pgrp) 436 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 437 ttywait(sp->s_ttyp); 438 ttyclosesession(sp, 1); /* also revoke */ 439 } 440 /* 441 * Release the tty. If someone has it open via 442 * /dev/tty then close it (since they no longer can 443 * once we've NULL'd it out). 444 */ 445 ttyclosesession(sp, 0); 446 447 /* 448 * s_ttyp is not zero'd; we use this to indicate 449 * that the session once had a controlling terminal. 450 * (for logging and informational purposes) 451 */ 452 } 453 sp->s_leader = NULL; 454 } 455 fixjobc(p, p->p_pgrp, 0); 456 (void)acct_process(p); 457 #ifdef KTRACE 458 /* 459 * release trace file 460 */ 461 if (p->p_tracenode) 462 ktrdestroy(&p->p_tracenode); 463 p->p_traceflag = 0; 464 #endif 465 /* 466 * Release reference to text vnode 467 */ 468 if ((vtmp = p->p_textvp) != NULL) { 469 p->p_textvp = NULL; 470 vrele(vtmp); 471 } 472 473 /* Release namecache handle to text file */ 474 if (p->p_textnch.ncp) 475 cache_drop(&p->p_textnch); 476 477 /* 478 * Move the process to the zombie list. This will block 479 * until the process p_lock count reaches 0. The process will 480 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 481 * which is called from cpu_proc_exit(). 482 */ 483 proc_move_allproc_zombie(p); 484 485 q = LIST_FIRST(&p->p_children); 486 if (q) /* only need this if any child is S_ZOMB */ 487 wakeup((caddr_t) initproc); 488 for (; q != 0; q = nq) { 489 nq = LIST_NEXT(q, p_sibling); 490 LIST_REMOVE(q, p_sibling); 491 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 492 q->p_pptr = initproc; 493 q->p_sigparent = SIGCHLD; 494 /* 495 * Traced processes are killed 496 * since their existence means someone is screwing up. 497 */ 498 if (q->p_flag & P_TRACED) { 499 q->p_flag &= ~P_TRACED; 500 ksignal(q, SIGKILL); 501 } 502 } 503 504 /* 505 * Save exit status and final rusage info, adding in child rusage 506 * info and self times. 507 */ 508 calcru_proc(p, &p->p_ru); 509 ruadd(&p->p_ru, &p->p_cru); 510 511 /* 512 * notify interested parties of our demise. 513 */ 514 KNOTE(&p->p_klist, NOTE_EXIT); 515 516 /* 517 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 518 * flag set, notify process 1 instead (and hope it will handle 519 * this situation). 520 */ 521 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) { 522 struct proc *pp = p->p_pptr; 523 proc_reparent(p, initproc); 524 /* 525 * If this was the last child of our parent, notify 526 * parent, so in case he was wait(2)ing, he will 527 * continue. 528 */ 529 if (LIST_EMPTY(&pp->p_children)) 530 wakeup((caddr_t)pp); 531 } 532 533 /* lwkt_gettoken(&proc_token); */ 534 q = p->p_pptr; 535 if (p->p_sigparent && q != initproc) { 536 PHOLD(q); 537 ksignal(q, p->p_sigparent); 538 PRELE(q); 539 } else { 540 ksignal(q, SIGCHLD); 541 } 542 /* lwkt_reltoken(&proc_token); */ 543 /* NOTE: p->p_pptr can get ripped out */ 544 545 wakeup(p->p_pptr); 546 /* 547 * cpu_exit is responsible for clearing curproc, since 548 * it is heavily integrated with the thread/switching sequence. 549 * 550 * Other substructures are freed from wait(). 551 */ 552 plimit_free(p); 553 554 /* 555 * Release the current user process designation on the process so 556 * the userland scheduler can work in someone else. 557 */ 558 p->p_usched->release_curproc(lp); 559 560 /* 561 * Finally, call machine-dependent code to release as many of the 562 * lwp's resources as we can and halt execution of this thread. 563 */ 564 lwp_exit(1); 565 } 566 567 /* 568 * Eventually called by every exiting LWP 569 */ 570 void 571 lwp_exit(int masterexit) 572 { 573 struct thread *td = curthread; 574 struct lwp *lp = td->td_lwp; 575 struct proc *p = lp->lwp_proc; 576 577 /* 578 * lwp_exit() may be called without setting LWP_WEXIT, so 579 * make sure it is set here. 580 */ 581 lp->lwp_flag |= LWP_WEXIT; 582 583 /* 584 * Clean up any virtualization 585 */ 586 if (lp->lwp_vkernel) 587 vkernel_lwp_exit(lp); 588 589 /* 590 * Clean up select/poll support 591 */ 592 kqueue_terminate(&lp->lwp_kqueue); 593 594 /* 595 * Clean up any syscall-cached ucred 596 */ 597 if (td->td_ucred) { 598 crfree(td->td_ucred); 599 td->td_ucred = NULL; 600 } 601 602 /* 603 * Nobody actually wakes us when the lock 604 * count reaches zero, so just wait one tick. 605 */ 606 while (lp->lwp_lock > 0) 607 tsleep(lp, 0, "lwpexit", 1); 608 609 /* Hand down resource usage to our proc */ 610 ruadd(&p->p_ru, &lp->lwp_ru); 611 612 /* 613 * If we don't hold the process until the LWP is reaped wait*() 614 * may try to dispose of its vmspace before all the LWPs have 615 * actually terminated. 616 */ 617 PHOLD(p); 618 619 /* 620 * Do any remaining work that might block on us. We should be 621 * coded such that further blocking is ok after decrementing 622 * p_nthreads but don't take the chance. 623 */ 624 dsched_exit_thread(td); 625 biosched_done(curthread); 626 627 /* 628 * We have to use the reaper for all the LWPs except the one doing 629 * the master exit. The LWP doing the master exit can just be 630 * left on p_lwps and the process reaper will deal with it 631 * synchronously, which is much faster. 632 * 633 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 634 */ 635 if (masterexit == 0) { 636 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 637 --p->p_nthreads; 638 if (p->p_nthreads <= 1) 639 wakeup(&p->p_nthreads); 640 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry); 641 taskqueue_enqueue(taskqueue_thread[mycpuid], 642 deadlwp_task[mycpuid]); 643 } else { 644 --p->p_nthreads; 645 if (p->p_nthreads <= 1) 646 wakeup(&p->p_nthreads); 647 } 648 cpu_lwp_exit(); 649 } 650 651 /* 652 * Wait until a lwp is completely dead. 653 * 654 * If the thread is still executing, which can't be waited upon, 655 * return failure. The caller is responsible of waiting a little 656 * bit and checking again. 657 * 658 * Suggested use: 659 * while (!lwp_wait(lp)) 660 * tsleep(lp, 0, "lwpwait", 1); 661 */ 662 static int 663 lwp_wait(struct lwp *lp) 664 { 665 struct thread *td = lp->lwp_thread;; 666 667 KKASSERT(lwkt_preempted_proc() != lp); 668 669 while (lp->lwp_lock > 0) 670 tsleep(lp, 0, "lwpwait1", 1); 671 672 lwkt_wait_free(td); 673 674 /* 675 * The lwp's thread may still be in the middle 676 * of switching away, we can't rip its stack out from 677 * under it until TDF_EXITING is set and both 678 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 679 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 680 * will be cleared temporarily if a thread gets 681 * preempted. 682 * 683 * YYY no wakeup occurs, so we simply return failure 684 * and let the caller deal with sleeping and calling 685 * us again. 686 */ 687 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 688 TDF_EXITING|TDF_RUNQ)) != TDF_EXITING) { 689 return (0); 690 } 691 KASSERT((td->td_flags & TDF_TSLEEPQ) == 0, 692 ("lwp_wait: td %p (%s) still on sleep queue", td, td->td_comm)); 693 return (1); 694 } 695 696 /* 697 * Release the resources associated with a lwp. 698 * The lwp must be completely dead. 699 */ 700 void 701 lwp_dispose(struct lwp *lp) 702 { 703 struct thread *td = lp->lwp_thread;; 704 705 KKASSERT(lwkt_preempted_proc() != lp); 706 KKASSERT(td->td_refs == 0); 707 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) == 708 TDF_EXITING); 709 710 PRELE(lp->lwp_proc); 711 lp->lwp_proc = NULL; 712 if (td != NULL) { 713 td->td_proc = NULL; 714 td->td_lwp = NULL; 715 lp->lwp_thread = NULL; 716 lwkt_free_thread(td); 717 } 718 kfree(lp, M_LWP); 719 } 720 721 /* 722 * MPSAFE 723 */ 724 int 725 sys_wait4(struct wait_args *uap) 726 { 727 struct rusage rusage; 728 int error, status; 729 730 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 731 uap->options, (uap->rusage ? &rusage : NULL), 732 &uap->sysmsg_result); 733 734 if (error == 0 && uap->status) 735 error = copyout(&status, uap->status, sizeof(*uap->status)); 736 if (error == 0 && uap->rusage) 737 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 738 return (error); 739 } 740 741 /* 742 * wait1() 743 * 744 * wait_args(int pid, int *status, int options, struct rusage *rusage) 745 * 746 * MPALMOSTSAFE 747 */ 748 int 749 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 750 { 751 struct thread *td = curthread; 752 struct lwp *lp; 753 struct proc *q = td->td_proc; 754 struct proc *p, *t; 755 int nfound, error; 756 757 if (pid == 0) 758 pid = -q->p_pgid; 759 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 760 return (EINVAL); 761 get_mplock(); 762 loop: 763 /* 764 * Hack for backwards compatibility with badly written user code. 765 * Or perhaps we have to do this anyway, it is unclear. XXX 766 * 767 * The problem is that if a process group is stopped and the parent 768 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 769 * of the child and then stop itself when it tries to return from the 770 * system call. When the process group is resumed the parent will 771 * then get the STOP status even though the child has now resumed 772 * (a followup wait*() will get the CONT status). 773 * 774 * Previously the CONT would overwrite the STOP because the tstop 775 * was handled within tsleep(), and the parent would only see 776 * the CONT when both are stopped and continued together. This litte 777 * two-line hack restores this effect. 778 */ 779 while (q->p_stat == SSTOP) 780 tstop(); 781 782 nfound = 0; 783 LIST_FOREACH(p, &q->p_children, p_sibling) { 784 if (pid != WAIT_ANY && 785 p->p_pid != pid && p->p_pgid != -pid) 786 continue; 787 788 /* This special case handles a kthread spawned by linux_clone 789 * (see linux_misc.c). The linux_wait4 and linux_waitpid 790 * functions need to be able to distinguish between waiting 791 * on a process and waiting on a thread. It is a thread if 792 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 793 * signifies we want to wait for threads and not processes. 794 */ 795 if ((p->p_sigparent != SIGCHLD) ^ 796 ((options & WLINUXCLONE) != 0)) { 797 continue; 798 } 799 800 nfound++; 801 if (p->p_stat == SZOMB) { 802 /* 803 * We may go into SZOMB with threads still present. 804 * We must wait for them to exit before we can reap 805 * the master thread, otherwise we may race reaping 806 * non-master threads. 807 */ 808 while (p->p_nthreads > 0) { 809 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 810 } 811 812 /* 813 * Reap any LWPs left in p->p_lwps. This is usually 814 * just the last LWP. This must be done before 815 * we loop on p_lock since the lwps hold a ref on 816 * it as a vmspace interlock. 817 * 818 * Once that is accomplished p_nthreads had better 819 * be zero. 820 */ 821 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 822 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 823 reaplwp(lp); 824 } 825 KKASSERT(p->p_nthreads == 0); 826 827 /* 828 * Don't do anything really bad until all references 829 * to the process go away. This may include other 830 * LWPs which are still in the process of being 831 * reaped. We can't just pull the rug out from under 832 * them because they may still be using the VM space. 833 * 834 * Certain kernel facilities such as /proc will also 835 * put a hold on the process for short periods of 836 * time. 837 */ 838 while (p->p_lock) 839 tsleep(p, 0, "reap3", hz); 840 841 /* scheduling hook for heuristic */ 842 /* XXX no lwp available, we need a different heuristic */ 843 /* 844 p->p_usched->heuristic_exiting(td->td_lwp, deadlp); 845 */ 846 847 /* Take care of our return values. */ 848 *res = p->p_pid; 849 if (status) 850 *status = p->p_xstat; 851 if (rusage) 852 *rusage = p->p_ru; 853 /* 854 * If we got the child via a ptrace 'attach', 855 * we need to give it back to the old parent. 856 */ 857 if (p->p_oppid && (t = pfind(p->p_oppid))) { 858 p->p_oppid = 0; 859 proc_reparent(p, t); 860 ksignal(t, SIGCHLD); 861 wakeup((caddr_t)t); 862 error = 0; 863 goto done; 864 } 865 866 /* 867 * Unlink the proc from its process group so that 868 * the following operations won't lead to an 869 * inconsistent state for processes running down 870 * the zombie list. 871 */ 872 KKASSERT(p->p_lock == 0); 873 proc_remove_zombie(p); 874 leavepgrp(p); 875 876 p->p_xstat = 0; 877 ruadd(&q->p_cru, &p->p_ru); 878 879 /* 880 * Decrement the count of procs running with this uid. 881 */ 882 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 883 884 /* 885 * Free up credentials. 886 */ 887 crfree(p->p_ucred); 888 p->p_ucred = NULL; 889 890 /* 891 * Remove unused arguments 892 */ 893 if (p->p_args && --p->p_args->ar_ref == 0) 894 FREE(p->p_args, M_PARGS); 895 896 if (--p->p_sigacts->ps_refcnt == 0) { 897 kfree(p->p_sigacts, M_SUBPROC); 898 p->p_sigacts = NULL; 899 } 900 901 vm_waitproc(p); 902 kfree(p, M_PROC); 903 nprocs--; 904 error = 0; 905 goto done; 906 } 907 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 908 (p->p_flag & P_TRACED || options & WUNTRACED)) { 909 p->p_flag |= P_WAITED; 910 911 *res = p->p_pid; 912 if (status) 913 *status = W_STOPCODE(p->p_xstat); 914 /* Zero rusage so we get something consistent. */ 915 if (rusage) 916 bzero(rusage, sizeof(rusage)); 917 error = 0; 918 goto done; 919 } 920 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) { 921 *res = p->p_pid; 922 p->p_flag &= ~P_CONTINUED; 923 924 if (status) 925 *status = SIGCONT; 926 error = 0; 927 goto done; 928 } 929 } 930 if (nfound == 0) { 931 error = ECHILD; 932 goto done; 933 } 934 if (options & WNOHANG) { 935 *res = 0; 936 error = 0; 937 goto done; 938 } 939 error = tsleep((caddr_t)q, PCATCH, "wait", 0); 940 if (error) { 941 done: 942 rel_mplock(); 943 return (error); 944 } 945 goto loop; 946 } 947 948 /* 949 * make process 'parent' the new parent of process 'child'. 950 */ 951 void 952 proc_reparent(struct proc *child, struct proc *parent) 953 { 954 955 if (child->p_pptr == parent) 956 return; 957 958 LIST_REMOVE(child, p_sibling); 959 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 960 child->p_pptr = parent; 961 } 962 963 /* 964 * The next two functions are to handle adding/deleting items on the 965 * exit callout list 966 * 967 * at_exit(): 968 * Take the arguments given and put them onto the exit callout list, 969 * However first make sure that it's not already there. 970 * returns 0 on success. 971 */ 972 973 int 974 at_exit(exitlist_fn function) 975 { 976 struct exitlist *ep; 977 978 #ifdef INVARIANTS 979 /* Be noisy if the programmer has lost track of things */ 980 if (rm_at_exit(function)) 981 kprintf("WARNING: exit callout entry (%p) already present\n", 982 function); 983 #endif 984 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 985 if (ep == NULL) 986 return (ENOMEM); 987 ep->function = function; 988 TAILQ_INSERT_TAIL(&exit_list, ep, next); 989 return (0); 990 } 991 992 /* 993 * Scan the exit callout list for the given item and remove it. 994 * Returns the number of items removed (0 or 1) 995 */ 996 int 997 rm_at_exit(exitlist_fn function) 998 { 999 struct exitlist *ep; 1000 1001 TAILQ_FOREACH(ep, &exit_list, next) { 1002 if (ep->function == function) { 1003 TAILQ_REMOVE(&exit_list, ep, next); 1004 kfree(ep, M_ATEXIT); 1005 return(1); 1006 } 1007 } 1008 return (0); 1009 } 1010 1011 /* 1012 * LWP reaper related code. 1013 */ 1014 static void 1015 reaplwps(void *context, int dummy) 1016 { 1017 struct lwplist *lwplist = context; 1018 struct lwp *lp; 1019 1020 get_mplock(); 1021 while ((lp = LIST_FIRST(lwplist))) { 1022 LIST_REMOVE(lp, u.lwp_reap_entry); 1023 reaplwp(lp); 1024 } 1025 rel_mplock(); 1026 } 1027 1028 static void 1029 reaplwp(struct lwp *lp) 1030 { 1031 while (lwp_wait(lp) == 0) 1032 tsleep(lp, 0, "lwpreap", 1); 1033 lwp_dispose(lp); 1034 } 1035 1036 static void 1037 deadlwp_init(void) 1038 { 1039 int cpu; 1040 1041 for (cpu = 0; cpu < ncpus; cpu++) { 1042 LIST_INIT(&deadlwp_list[cpu]); 1043 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK); 1044 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1045 } 1046 } 1047 1048 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1049