1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 36 */ 37 38 #include "opt_compat.h" 39 #include "opt_ktrace.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysproto.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/proc.h> 47 #include <sys/ktrace.h> 48 #include <sys/pioctl.h> 49 #include <sys/tty.h> 50 #include <sys/wait.h> 51 #include <sys/vnode.h> 52 #include <sys/resourcevar.h> 53 #include <sys/signalvar.h> 54 #include <sys/taskqueue.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> /* for acct_process() function prototype */ 57 #include <sys/filedesc.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/jail.h> 61 #include <sys/kern_syscall.h> 62 #include <sys/unistd.h> 63 #include <sys/eventhandler.h> 64 #include <sys/dsched.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_extern.h> 72 #include <sys/user.h> 73 74 #include <sys/refcount.h> 75 #include <sys/thread2.h> 76 #include <sys/sysref2.h> 77 #include <sys/mplock2.h> 78 79 #include <machine/vmm.h> 80 81 static void reaplwps(void *context, int dummy); 82 static void reaplwp(struct lwp *lp); 83 static void killlwps(struct lwp *lp); 84 85 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 86 87 /* 88 * callout list for things to do at exit time 89 */ 90 struct exitlist { 91 exitlist_fn function; 92 TAILQ_ENTRY(exitlist) next; 93 }; 94 95 TAILQ_HEAD(exit_list_head, exitlist); 96 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 97 98 /* 99 * LWP reaper data 100 */ 101 static struct task *deadlwp_task[MAXCPU]; 102 static struct lwplist deadlwp_list[MAXCPU]; 103 static struct lwkt_token deadlwp_token[MAXCPU]; 104 105 /* 106 * exit -- 107 * Death of process. 108 * 109 * SYS_EXIT_ARGS(int rval) 110 */ 111 int 112 sys_exit(struct exit_args *uap) 113 { 114 exit1(W_EXITCODE(uap->rval, 0)); 115 /* NOTREACHED */ 116 } 117 118 /* 119 * Extended exit -- 120 * Death of a lwp or process with optional bells and whistles. 121 * 122 * MPALMOSTSAFE 123 */ 124 int 125 sys_extexit(struct extexit_args *uap) 126 { 127 struct proc *p = curproc; 128 int action, who; 129 int error; 130 131 action = EXTEXIT_ACTION(uap->how); 132 who = EXTEXIT_WHO(uap->how); 133 134 /* Check parameters before we might perform some action */ 135 switch (who) { 136 case EXTEXIT_PROC: 137 case EXTEXIT_LWP: 138 break; 139 default: 140 return (EINVAL); 141 } 142 143 switch (action) { 144 case EXTEXIT_SIMPLE: 145 break; 146 case EXTEXIT_SETINT: 147 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 148 if (error) 149 return (error); 150 break; 151 default: 152 return (EINVAL); 153 } 154 155 lwkt_gettoken(&p->p_token); 156 157 switch (who) { 158 case EXTEXIT_LWP: 159 /* 160 * Be sure only to perform a simple lwp exit if there is at 161 * least one more lwp in the proc, which will call exit1() 162 * later, otherwise the proc will be an UNDEAD and not even a 163 * SZOMB! 164 */ 165 if (p->p_nthreads > 1) { 166 lwp_exit(0); /* called w/ p_token held */ 167 /* NOT REACHED */ 168 } 169 /* else last lwp in proc: do the real thing */ 170 /* FALLTHROUGH */ 171 default: /* to help gcc */ 172 case EXTEXIT_PROC: 173 lwkt_reltoken(&p->p_token); 174 exit1(W_EXITCODE(uap->status, 0)); 175 /* NOTREACHED */ 176 } 177 178 /* NOTREACHED */ 179 lwkt_reltoken(&p->p_token); /* safety */ 180 } 181 182 /* 183 * Kill all lwps associated with the current process except the 184 * current lwp. Return an error if we race another thread trying to 185 * do the same thing and lose the race. 186 * 187 * If forexec is non-zero the current thread and process flags are 188 * cleaned up so they can be reused. 189 * 190 * Caller must hold curproc->p_token 191 */ 192 int 193 killalllwps(int forexec) 194 { 195 struct lwp *lp = curthread->td_lwp; 196 struct proc *p = lp->lwp_proc; 197 198 /* 199 * Interlock against P_WEXIT. Only one of the process's thread 200 * is allowed to do the master exit. 201 */ 202 if (p->p_flags & P_WEXIT) 203 return (EALREADY); 204 p->p_flags |= P_WEXIT; 205 206 /* 207 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 208 */ 209 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 210 if (p->p_nthreads > 1) 211 killlwps(lp); 212 213 /* 214 * If doing this for an exec, clean up the remaining thread 215 * (us) for continuing operation after all the other threads 216 * have been killed. 217 */ 218 if (forexec) { 219 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 220 p->p_flags &= ~P_WEXIT; 221 } 222 return(0); 223 } 224 225 /* 226 * Kill all LWPs except the current one. Do not try to signal 227 * LWPs which have exited on their own or have already been 228 * signaled. 229 */ 230 static void 231 killlwps(struct lwp *lp) 232 { 233 struct proc *p = lp->lwp_proc; 234 struct lwp *tlp; 235 236 /* 237 * Kill the remaining LWPs. We must send the signal before setting 238 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 239 * races. tlp must be held across the call as it might block and 240 * allow the target lwp to rip itself out from under our loop. 241 */ 242 FOREACH_LWP_IN_PROC(tlp, p) { 243 LWPHOLD(tlp); 244 lwkt_gettoken(&tlp->lwp_token); 245 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 246 lwpsignal(p, tlp, SIGKILL); 247 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 248 } 249 lwkt_reltoken(&tlp->lwp_token); 250 LWPRELE(tlp); 251 } 252 253 /* 254 * Wait for everything to clear out. 255 */ 256 while (p->p_nthreads > 1) { 257 tsleep(&p->p_nthreads, 0, "killlwps", 0); 258 } 259 } 260 261 /* 262 * Exit: deallocate address space and other resources, change proc state 263 * to zombie, and unlink proc from allproc and parent's lists. Save exit 264 * status and rusage for wait(). Check for child processes and orphan them. 265 */ 266 void 267 exit1(int rv) 268 { 269 struct thread *td = curthread; 270 struct proc *p = td->td_proc; 271 struct lwp *lp = td->td_lwp; 272 struct proc *q; 273 struct vmspace *vm; 274 struct vnode *vtmp; 275 struct exitlist *ep; 276 int error; 277 278 lwkt_gettoken(&p->p_token); 279 280 if (p->p_pid == 1) { 281 kprintf("init died (signal %d, exit %d)\n", 282 WTERMSIG(rv), WEXITSTATUS(rv)); 283 panic("Going nowhere without my init!"); 284 } 285 varsymset_clean(&p->p_varsymset); 286 lockuninit(&p->p_varsymset.vx_lock); 287 288 /* 289 * Kill all lwps associated with the current process, return an 290 * error if we race another thread trying to do the same thing 291 * and lose the race. 292 */ 293 error = killalllwps(0); 294 if (error) { 295 lwp_exit(0); 296 /* NOT REACHED */ 297 } 298 299 /* are we a task leader? */ 300 if (p == p->p_leader) { 301 struct kill_args killArgs; 302 killArgs.signum = SIGKILL; 303 q = p->p_peers; 304 while(q) { 305 killArgs.pid = q->p_pid; 306 /* 307 * The interface for kill is better 308 * than the internal signal 309 */ 310 sys_kill(&killArgs); 311 q = q->p_peers; 312 } 313 while (p->p_peers) 314 tsleep((caddr_t)p, 0, "exit1", 0); 315 } 316 317 #ifdef PGINPROF 318 vmsizmon(); 319 #endif 320 STOPEVENT(p, S_EXIT, rv); 321 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 322 323 /* 324 * Check if any loadable modules need anything done at process exit. 325 * e.g. SYSV IPC stuff 326 * XXX what if one of these generates an error? 327 */ 328 p->p_xstat = rv; 329 EVENTHANDLER_INVOKE(process_exit, p); 330 331 /* 332 * XXX: imho, the eventhandler stuff is much cleaner than this. 333 * Maybe we should move everything to use eventhandler. 334 */ 335 TAILQ_FOREACH(ep, &exit_list, next) 336 (*ep->function)(td); 337 338 if (p->p_flags & P_PROFIL) 339 stopprofclock(p); 340 341 SIGEMPTYSET(p->p_siglist); 342 SIGEMPTYSET(lp->lwp_siglist); 343 if (timevalisset(&p->p_realtimer.it_value)) 344 callout_stop_sync(&p->p_ithandle); 345 346 /* 347 * Reset any sigio structures pointing to us as a result of 348 * F_SETOWN with our pid. 349 */ 350 funsetownlst(&p->p_sigiolst); 351 352 /* 353 * Close open files and release open-file table. 354 * This may block! 355 */ 356 fdfree(p, NULL); 357 358 if(p->p_leader->p_peers) { 359 q = p->p_leader; 360 while(q->p_peers != p) 361 q = q->p_peers; 362 q->p_peers = p->p_peers; 363 wakeup((caddr_t)p->p_leader); 364 } 365 366 /* 367 * XXX Shutdown SYSV semaphores 368 */ 369 semexit(p); 370 371 KKASSERT(p->p_numposixlocks == 0); 372 373 /* The next two chunks should probably be moved to vmspace_exit. */ 374 vm = p->p_vmspace; 375 376 /* 377 * Clean up data related to virtual kernel operation. Clean up 378 * any vkernel context related to the current lwp now so we can 379 * destroy p_vkernel. 380 */ 381 if (p->p_vkernel) { 382 vkernel_lwp_exit(lp); 383 vkernel_exit(p); 384 } 385 386 /* 387 * Release user portion of address space. 388 * This releases references to vnodes, 389 * which could cause I/O if the file has been unlinked. 390 * Need to do this early enough that we can still sleep. 391 * Can't free the entire vmspace as the kernel stack 392 * may be mapped within that space also. 393 * 394 * Processes sharing the same vmspace may exit in one order, and 395 * get cleaned up by vmspace_exit() in a different order. The 396 * last exiting process to reach this point releases as much of 397 * the environment as it can, and the last process cleaned up 398 * by vmspace_exit() (which decrements exitingcnt) cleans up the 399 * remainder. 400 */ 401 vmspace_exitbump(vm); 402 sysref_put(&vm->vm_sysref); 403 404 if (SESS_LEADER(p)) { 405 struct session *sp = p->p_session; 406 407 if (sp->s_ttyvp) { 408 /* 409 * We are the controlling process. Signal the 410 * foreground process group, drain the controlling 411 * terminal, and revoke access to the controlling 412 * terminal. 413 * 414 * NOTE: while waiting for the process group to exit 415 * it is possible that one of the processes in the 416 * group will revoke the tty, so the ttyclosesession() 417 * function will re-check sp->s_ttyvp. 418 */ 419 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 420 if (sp->s_ttyp->t_pgrp) 421 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 422 ttywait(sp->s_ttyp); 423 ttyclosesession(sp, 1); /* also revoke */ 424 } 425 /* 426 * Release the tty. If someone has it open via 427 * /dev/tty then close it (since they no longer can 428 * once we've NULL'd it out). 429 */ 430 ttyclosesession(sp, 0); 431 432 /* 433 * s_ttyp is not zero'd; we use this to indicate 434 * that the session once had a controlling terminal. 435 * (for logging and informational purposes) 436 */ 437 } 438 sp->s_leader = NULL; 439 } 440 fixjobc(p, p->p_pgrp, 0); 441 (void)acct_process(p); 442 #ifdef KTRACE 443 /* 444 * release trace file 445 */ 446 if (p->p_tracenode) 447 ktrdestroy(&p->p_tracenode); 448 p->p_traceflag = 0; 449 #endif 450 /* 451 * Release reference to text vnode 452 */ 453 if ((vtmp = p->p_textvp) != NULL) { 454 p->p_textvp = NULL; 455 vrele(vtmp); 456 } 457 458 /* Release namecache handle to text file */ 459 if (p->p_textnch.ncp) 460 cache_drop(&p->p_textnch); 461 462 /* 463 * We have to handle PPWAIT here or proc_move_allproc_zombie() 464 * will block on the PHOLD() the parent is doing. 465 */ 466 if (p->p_flags & P_PPWAIT) { 467 p->p_flags &= ~P_PPWAIT; 468 wakeup(p->p_pptr); 469 } 470 471 /* 472 * Move the process to the zombie list. This will block 473 * until the process p_lock count reaches 0. The process will 474 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 475 * which is called from cpu_proc_exit(). 476 */ 477 proc_move_allproc_zombie(p); 478 479 /* 480 * Reparent all of this process's children to the init process. 481 * We must hold initproc->p_token in order to mess with 482 * initproc->p_children. We already hold p->p_token (to remove 483 * the children from our list). 484 */ 485 q = LIST_FIRST(&p->p_children); 486 if (q) { 487 lwkt_gettoken(&initproc->p_token); 488 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 489 PHOLD(q); 490 lwkt_gettoken(&q->p_token); 491 if (q != LIST_FIRST(&p->p_children)) { 492 lwkt_reltoken(&q->p_token); 493 PRELE(q); 494 continue; 495 } 496 LIST_REMOVE(q, p_sibling); 497 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 498 q->p_pptr = initproc; 499 q->p_sigparent = SIGCHLD; 500 501 /* 502 * Traced processes are killed 503 * since their existence means someone is screwing up. 504 */ 505 if (q->p_flags & P_TRACED) { 506 q->p_flags &= ~P_TRACED; 507 ksignal(q, SIGKILL); 508 } 509 lwkt_reltoken(&q->p_token); 510 PRELE(q); 511 } 512 lwkt_reltoken(&initproc->p_token); 513 wakeup(initproc); 514 } 515 516 /* 517 * Save exit status and final rusage info, adding in child rusage 518 * info and self times. 519 */ 520 calcru_proc(p, &p->p_ru); 521 ruadd(&p->p_ru, &p->p_cru); 522 523 /* 524 * notify interested parties of our demise. 525 */ 526 KNOTE(&p->p_klist, NOTE_EXIT); 527 528 /* 529 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 530 * flag set, or if the handler is set to SIG_IGN, notify process 1 531 * instead (and hope it will handle this situation). 532 */ 533 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 534 proc_reparent(p, initproc); 535 } 536 537 /* lwkt_gettoken(&proc_token); */ 538 q = p->p_pptr; 539 PHOLD(q); 540 if (p->p_sigparent && q != initproc) { 541 ksignal(q, p->p_sigparent); 542 } else { 543 ksignal(q, SIGCHLD); 544 } 545 546 p->p_flags &= ~P_TRACED; 547 wakeup(p->p_pptr); 548 549 PRELE(q); 550 /* lwkt_reltoken(&proc_token); */ 551 /* NOTE: p->p_pptr can get ripped out */ 552 /* 553 * cpu_exit is responsible for clearing curproc, since 554 * it is heavily integrated with the thread/switching sequence. 555 * 556 * Other substructures are freed from wait(). 557 */ 558 plimit_free(p); 559 560 /* 561 * Finally, call machine-dependent code to release as many of the 562 * lwp's resources as we can and halt execution of this thread. 563 */ 564 lwp_exit(1); 565 } 566 567 /* 568 * Eventually called by every exiting LWP 569 * 570 * p->p_token must be held. mplock may be held and will be released. 571 */ 572 void 573 lwp_exit(int masterexit) 574 { 575 struct thread *td = curthread; 576 struct lwp *lp = td->td_lwp; 577 struct proc *p = lp->lwp_proc; 578 int dowake = 0; 579 580 /* 581 * Release the current user process designation on the process so 582 * the userland scheduler can work in someone else. 583 */ 584 p->p_usched->release_curproc(lp); 585 586 /* 587 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 588 * make sure it is set here. 589 */ 590 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 591 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 592 593 /* 594 * Clean up any virtualization 595 */ 596 if (lp->lwp_vkernel) 597 vkernel_lwp_exit(lp); 598 599 if (td->td_vmm) 600 vmm_vmdestroy(); 601 602 /* 603 * Clean up select/poll support 604 */ 605 kqueue_terminate(&lp->lwp_kqueue); 606 607 /* 608 * Clean up any syscall-cached ucred 609 */ 610 if (td->td_ucred) { 611 crfree(td->td_ucred); 612 td->td_ucred = NULL; 613 } 614 615 /* 616 * Nobody actually wakes us when the lock 617 * count reaches zero, so just wait one tick. 618 */ 619 while (lp->lwp_lock > 0) 620 tsleep(lp, 0, "lwpexit", 1); 621 622 /* Hand down resource usage to our proc */ 623 ruadd(&p->p_ru, &lp->lwp_ru); 624 625 /* 626 * If we don't hold the process until the LWP is reaped wait*() 627 * may try to dispose of its vmspace before all the LWPs have 628 * actually terminated. 629 */ 630 PHOLD(p); 631 632 /* 633 * Do any remaining work that might block on us. We should be 634 * coded such that further blocking is ok after decrementing 635 * p_nthreads but don't take the chance. 636 */ 637 dsched_exit_thread(td); 638 biosched_done(curthread); 639 640 /* 641 * We have to use the reaper for all the LWPs except the one doing 642 * the master exit. The LWP doing the master exit can just be 643 * left on p_lwps and the process reaper will deal with it 644 * synchronously, which is much faster. 645 * 646 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 647 * 648 * The process is left held until the reaper calls lwp_dispose() on 649 * the lp (after calling lwp_wait()). 650 */ 651 if (masterexit == 0) { 652 int cpu = mycpuid; 653 654 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 655 --p->p_nthreads; 656 if (p->p_nthreads <= 1) 657 dowake = 1; 658 lwkt_gettoken(&deadlwp_token[cpu]); 659 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry); 660 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]); 661 lwkt_reltoken(&deadlwp_token[cpu]); 662 } else { 663 --p->p_nthreads; 664 if (p->p_nthreads <= 1) 665 dowake = 1; 666 } 667 668 /* 669 * Release p_token. Issue the wakeup() on p_nthreads if necessary, 670 * as late as possible to give us a chance to actually deschedule and 671 * switch away before another cpu core hits reaplwp(). 672 */ 673 lwkt_reltoken(&p->p_token); 674 if (dowake) 675 wakeup(&p->p_nthreads); 676 677 /* 678 * Tell the userland scheduler that we are going away 679 */ 680 p->p_usched->heuristic_exiting(lp, p); 681 682 cpu_lwp_exit(); 683 } 684 685 /* 686 * Wait until a lwp is completely dead. The final interlock in this drama 687 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 688 * switchout. 689 * 690 * At the point TDF_EXITING is set a complete exit is accomplished when 691 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 692 * post-switch interlock flags that can be used to wait for the TDF_ 693 * flags to clear. 694 * 695 * Returns non-zero on success, and zero if the caller needs to retry 696 * the lwp_wait(). 697 */ 698 static int 699 lwp_wait(struct lwp *lp) 700 { 701 struct thread *td = lp->lwp_thread; 702 u_int mpflags; 703 704 KKASSERT(lwkt_preempted_proc() != lp); 705 706 /* 707 * This bit of code uses the thread destruction interlock 708 * managed by lwkt_switch_return() to wait for the lwp's 709 * thread to completely disengage. 710 * 711 * It is possible for us to race another cpu core so we 712 * have to do this correctly. 713 */ 714 for (;;) { 715 mpflags = td->td_mpflags; 716 cpu_ccfence(); 717 if (mpflags & TDF_MP_EXITSIG) 718 break; 719 tsleep_interlock(td, 0); 720 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 721 mpflags | TDF_MP_EXITWAIT)) { 722 tsleep(td, PINTERLOCKED, "lwpxt", 0); 723 } 724 } 725 726 /* 727 * We've already waited for the core exit but there can still 728 * be other refs from e.g. process scans and such. 729 */ 730 if (lp->lwp_lock > 0) { 731 tsleep(lp, 0, "lwpwait1", 1); 732 return(0); 733 } 734 if (td->td_refs) { 735 tsleep(td, 0, "lwpwait2", 1); 736 return(0); 737 } 738 739 /* 740 * Now that we have the thread destruction interlock these flags 741 * really should already be cleaned up, keep a check for safety. 742 * 743 * We can't rip its stack out from under it until TDF_EXITING is 744 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 745 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 746 * will be cleared temporarily if a thread gets preempted. 747 */ 748 while ((td->td_flags & (TDF_RUNNING | 749 TDF_PREEMPT_LOCK | 750 TDF_EXITING)) != TDF_EXITING) { 751 tsleep(lp, 0, "lwpwait3", 1); 752 return (0); 753 } 754 755 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 756 ("lwp_wait: td %p (%s) still on run or sleep queue", 757 td, td->td_comm)); 758 return (1); 759 } 760 761 /* 762 * Release the resources associated with a lwp. 763 * The lwp must be completely dead. 764 */ 765 void 766 lwp_dispose(struct lwp *lp) 767 { 768 struct thread *td = lp->lwp_thread; 769 770 KKASSERT(lwkt_preempted_proc() != lp); 771 KKASSERT(td->td_refs == 0); 772 KKASSERT((td->td_flags & (TDF_RUNNING | 773 TDF_PREEMPT_LOCK | 774 TDF_EXITING)) == TDF_EXITING); 775 776 PRELE(lp->lwp_proc); 777 lp->lwp_proc = NULL; 778 if (td != NULL) { 779 td->td_proc = NULL; 780 td->td_lwp = NULL; 781 lp->lwp_thread = NULL; 782 lwkt_free_thread(td); 783 } 784 kfree(lp, M_LWP); 785 } 786 787 /* 788 * MPSAFE 789 */ 790 int 791 sys_wait4(struct wait_args *uap) 792 { 793 struct rusage rusage; 794 int error, status; 795 796 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 797 uap->options, (uap->rusage ? &rusage : NULL), 798 &uap->sysmsg_result); 799 800 if (error == 0 && uap->status) 801 error = copyout(&status, uap->status, sizeof(*uap->status)); 802 if (error == 0 && uap->rusage) 803 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 804 return (error); 805 } 806 807 /* 808 * wait1() 809 * 810 * wait_args(int pid, int *status, int options, struct rusage *rusage) 811 * 812 * MPALMOSTSAFE 813 */ 814 int 815 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 816 { 817 struct thread *td = curthread; 818 struct lwp *lp; 819 struct proc *q = td->td_proc; 820 struct proc *p, *t; 821 struct pargs *pa; 822 struct sigacts *ps; 823 int nfound, error; 824 825 if (pid == 0) 826 pid = -q->p_pgid; 827 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 828 return (EINVAL); 829 830 lwkt_gettoken(&q->p_token); 831 loop: 832 /* 833 * All sorts of things can change due to blocking so we have to loop 834 * all the way back up here. 835 * 836 * The problem is that if a process group is stopped and the parent 837 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 838 * of the child and then stop itself when it tries to return from the 839 * system call. When the process group is resumed the parent will 840 * then get the STOP status even though the child has now resumed 841 * (a followup wait*() will get the CONT status). 842 * 843 * Previously the CONT would overwrite the STOP because the tstop 844 * was handled within tsleep(), and the parent would only see 845 * the CONT when both are stopped and continued together. This little 846 * two-line hack restores this effect. 847 */ 848 while (q->p_stat == SSTOP) 849 tstop(); 850 851 nfound = 0; 852 853 /* 854 * Loop on children. 855 * 856 * NOTE: We don't want to break q's p_token in the loop for the 857 * case where no children are found or we risk breaking the 858 * interlock between child and parent. 859 */ 860 LIST_FOREACH(p, &q->p_children, p_sibling) { 861 if (pid != WAIT_ANY && 862 p->p_pid != pid && p->p_pgid != -pid) { 863 continue; 864 } 865 866 /* 867 * This special case handles a kthread spawned by linux_clone 868 * (see linux_misc.c). The linux_wait4 and linux_waitpid 869 * functions need to be able to distinguish between waiting 870 * on a process and waiting on a thread. It is a thread if 871 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 872 * signifies we want to wait for threads and not processes. 873 */ 874 if ((p->p_sigparent != SIGCHLD) ^ 875 ((options & WLINUXCLONE) != 0)) { 876 continue; 877 } 878 879 nfound++; 880 if (p->p_stat == SZOMB) { 881 /* 882 * We may go into SZOMB with threads still present. 883 * We must wait for them to exit before we can reap 884 * the master thread, otherwise we may race reaping 885 * non-master threads. 886 * 887 * Only this routine can remove a process from 888 * the zombie list and destroy it, use PACQUIREZOMB() 889 * to serialize us and loop if it blocks (interlocked 890 * by the parent's q->p_token). 891 * 892 * WARNING! (p) can be invalid when PHOLDZOMB(p) 893 * returns non-zero. Be sure not to 894 * mess with it. 895 */ 896 if (PHOLDZOMB(p)) 897 goto loop; 898 lwkt_gettoken(&p->p_token); 899 if (p->p_pptr != q) { 900 lwkt_reltoken(&p->p_token); 901 PRELEZOMB(p); 902 goto loop; 903 } 904 while (p->p_nthreads > 0) { 905 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 906 } 907 908 /* 909 * Reap any LWPs left in p->p_lwps. This is usually 910 * just the last LWP. This must be done before 911 * we loop on p_lock since the lwps hold a ref on 912 * it as a vmspace interlock. 913 * 914 * Once that is accomplished p_nthreads had better 915 * be zero. 916 */ 917 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 918 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 919 reaplwp(lp); 920 } 921 KKASSERT(p->p_nthreads == 0); 922 923 /* 924 * Don't do anything really bad until all references 925 * to the process go away. This may include other 926 * LWPs which are still in the process of being 927 * reaped. We can't just pull the rug out from under 928 * them because they may still be using the VM space. 929 * 930 * Certain kernel facilities such as /proc will also 931 * put a hold on the process for short periods of 932 * time. 933 */ 934 PRELE(p); 935 PSTALL(p, "reap3", 0); 936 937 /* Take care of our return values. */ 938 *res = p->p_pid; 939 940 if (status) 941 *status = p->p_xstat; 942 if (rusage) 943 *rusage = p->p_ru; 944 /* 945 * If we got the child via a ptrace 'attach', 946 * we need to give it back to the old parent. 947 */ 948 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 949 PHOLD(p); 950 p->p_oppid = 0; 951 proc_reparent(p, t); 952 ksignal(t, SIGCHLD); 953 wakeup((caddr_t)t); 954 error = 0; 955 PRELE(t); 956 lwkt_reltoken(&p->p_token); 957 PRELEZOMB(p); 958 goto done; 959 } 960 961 /* 962 * Unlink the proc from its process group so that 963 * the following operations won't lead to an 964 * inconsistent state for processes running down 965 * the zombie list. 966 */ 967 proc_remove_zombie(p); 968 lwkt_reltoken(&p->p_token); 969 leavepgrp(p); 970 971 p->p_xstat = 0; 972 ruadd(&q->p_cru, &p->p_ru); 973 974 /* 975 * Decrement the count of procs running with this uid. 976 */ 977 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 978 979 /* 980 * Free up credentials. 981 */ 982 crfree(p->p_ucred); 983 p->p_ucred = NULL; 984 985 /* 986 * Remove unused arguments 987 */ 988 pa = p->p_args; 989 p->p_args = NULL; 990 if (pa && refcount_release(&pa->ar_ref)) { 991 kfree(pa, M_PARGS); 992 pa = NULL; 993 } 994 995 ps = p->p_sigacts; 996 p->p_sigacts = NULL; 997 if (ps && refcount_release(&ps->ps_refcnt)) { 998 kfree(ps, M_SUBPROC); 999 ps = NULL; 1000 } 1001 1002 /* 1003 * Our exitingcount was incremented when the process 1004 * became a zombie, now that the process has been 1005 * removed from (almost) all lists we should be able 1006 * to safely destroy its vmspace. Wait for any current 1007 * holders to go away (so the vmspace remains stable), 1008 * then scrap it. 1009 */ 1010 PSTALL(p, "reap4", 0); 1011 vmspace_exitfree(p); 1012 PSTALL(p, "reap5", 0); 1013 1014 /* 1015 * NOTE: We have to officially release ZOMB in order 1016 * to ensure that a racing thread in kern_wait() 1017 * which blocked on ZOMB is woken up. 1018 */ 1019 PHOLD(p); 1020 PRELEZOMB(p); 1021 kfree(p, M_PROC); 1022 atomic_add_int(&nprocs, -1); 1023 error = 0; 1024 goto done; 1025 } 1026 if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 && 1027 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { 1028 PHOLD(p); 1029 lwkt_gettoken(&p->p_token); 1030 if (p->p_pptr != q) { 1031 lwkt_reltoken(&p->p_token); 1032 PRELE(p); 1033 goto loop; 1034 } 1035 if (p->p_stat != SSTOP || 1036 (p->p_flags & P_WAITED) != 0 || 1037 ((p->p_flags & P_TRACED) == 0 && 1038 (options & WUNTRACED) == 0)) { 1039 lwkt_reltoken(&p->p_token); 1040 PRELE(p); 1041 goto loop; 1042 } 1043 1044 p->p_flags |= P_WAITED; 1045 1046 *res = p->p_pid; 1047 if (status) 1048 *status = W_STOPCODE(p->p_xstat); 1049 /* Zero rusage so we get something consistent. */ 1050 if (rusage) 1051 bzero(rusage, sizeof(*rusage)); 1052 error = 0; 1053 lwkt_reltoken(&p->p_token); 1054 PRELE(p); 1055 goto done; 1056 } 1057 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1058 PHOLD(p); 1059 lwkt_gettoken(&p->p_token); 1060 if (p->p_pptr != q) { 1061 lwkt_reltoken(&p->p_token); 1062 PRELE(p); 1063 goto loop; 1064 } 1065 if ((p->p_flags & P_CONTINUED) == 0) { 1066 lwkt_reltoken(&p->p_token); 1067 PRELE(p); 1068 goto loop; 1069 } 1070 1071 *res = p->p_pid; 1072 p->p_flags &= ~P_CONTINUED; 1073 1074 if (status) 1075 *status = SIGCONT; 1076 error = 0; 1077 lwkt_reltoken(&p->p_token); 1078 PRELE(p); 1079 goto done; 1080 } 1081 } 1082 if (nfound == 0) { 1083 error = ECHILD; 1084 goto done; 1085 } 1086 if (options & WNOHANG) { 1087 *res = 0; 1088 error = 0; 1089 goto done; 1090 } 1091 1092 /* 1093 * Wait for signal - interlocked using q->p_token. 1094 */ 1095 error = tsleep(q, PCATCH, "wait", 0); 1096 if (error) { 1097 done: 1098 lwkt_reltoken(&q->p_token); 1099 return (error); 1100 } 1101 goto loop; 1102 } 1103 1104 /* 1105 * Change child's parent process to parent. 1106 * 1107 * p_children/p_sibling requires the parent's token, and 1108 * changing pptr requires the child's token, so we have to 1109 * get three tokens to do this operation. We also need to 1110 * hold pointers that might get ripped out from under us to 1111 * preserve structural integrity. 1112 * 1113 * It is possible to race another reparent or disconnect or other 1114 * similar operation. We must retry when this situation occurs. 1115 * Once we successfully reparent the process we no longer care 1116 * about any races. 1117 */ 1118 void 1119 proc_reparent(struct proc *child, struct proc *parent) 1120 { 1121 struct proc *opp; 1122 1123 PHOLD(parent); 1124 while ((opp = child->p_pptr) != parent) { 1125 PHOLD(opp); 1126 lwkt_gettoken(&opp->p_token); 1127 lwkt_gettoken(&child->p_token); 1128 lwkt_gettoken(&parent->p_token); 1129 if (child->p_pptr != opp) { 1130 lwkt_reltoken(&parent->p_token); 1131 lwkt_reltoken(&child->p_token); 1132 lwkt_reltoken(&opp->p_token); 1133 PRELE(opp); 1134 continue; 1135 } 1136 LIST_REMOVE(child, p_sibling); 1137 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1138 child->p_pptr = parent; 1139 lwkt_reltoken(&parent->p_token); 1140 lwkt_reltoken(&child->p_token); 1141 lwkt_reltoken(&opp->p_token); 1142 if (LIST_EMPTY(&opp->p_children)) 1143 wakeup(opp); 1144 PRELE(opp); 1145 break; 1146 } 1147 PRELE(parent); 1148 } 1149 1150 /* 1151 * The next two functions are to handle adding/deleting items on the 1152 * exit callout list 1153 * 1154 * at_exit(): 1155 * Take the arguments given and put them onto the exit callout list, 1156 * However first make sure that it's not already there. 1157 * returns 0 on success. 1158 */ 1159 1160 int 1161 at_exit(exitlist_fn function) 1162 { 1163 struct exitlist *ep; 1164 1165 #ifdef INVARIANTS 1166 /* Be noisy if the programmer has lost track of things */ 1167 if (rm_at_exit(function)) 1168 kprintf("WARNING: exit callout entry (%p) already present\n", 1169 function); 1170 #endif 1171 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1172 if (ep == NULL) 1173 return (ENOMEM); 1174 ep->function = function; 1175 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1176 return (0); 1177 } 1178 1179 /* 1180 * Scan the exit callout list for the given item and remove it. 1181 * Returns the number of items removed (0 or 1) 1182 */ 1183 int 1184 rm_at_exit(exitlist_fn function) 1185 { 1186 struct exitlist *ep; 1187 1188 TAILQ_FOREACH(ep, &exit_list, next) { 1189 if (ep->function == function) { 1190 TAILQ_REMOVE(&exit_list, ep, next); 1191 kfree(ep, M_ATEXIT); 1192 return(1); 1193 } 1194 } 1195 return (0); 1196 } 1197 1198 /* 1199 * LWP reaper related code. 1200 */ 1201 static void 1202 reaplwps(void *context, int dummy) 1203 { 1204 struct lwplist *lwplist = context; 1205 struct lwp *lp; 1206 int cpu = mycpuid; 1207 1208 lwkt_gettoken(&deadlwp_token[cpu]); 1209 while ((lp = LIST_FIRST(lwplist))) { 1210 LIST_REMOVE(lp, u.lwp_reap_entry); 1211 reaplwp(lp); 1212 } 1213 lwkt_reltoken(&deadlwp_token[cpu]); 1214 } 1215 1216 static void 1217 reaplwp(struct lwp *lp) 1218 { 1219 while (lwp_wait(lp) == 0) 1220 ; 1221 lwp_dispose(lp); 1222 } 1223 1224 static void 1225 deadlwp_init(void) 1226 { 1227 int cpu; 1228 1229 for (cpu = 0; cpu < ncpus; cpu++) { 1230 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl"); 1231 LIST_INIT(&deadlwp_list[cpu]); 1232 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1233 M_DEVBUF, M_WAITOK); 1234 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1235 } 1236 } 1237 1238 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1239