1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 36 */ 37 38 #include "opt_compat.h" 39 #include "opt_ktrace.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysproto.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/proc.h> 47 #include <sys/ktrace.h> 48 #include <sys/pioctl.h> 49 #include <sys/tty.h> 50 #include <sys/wait.h> 51 #include <sys/vnode.h> 52 #include <sys/resourcevar.h> 53 #include <sys/signalvar.h> 54 #include <sys/taskqueue.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> /* for acct_process() function prototype */ 57 #include <sys/filedesc.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/jail.h> 61 #include <sys/kern_syscall.h> 62 #include <sys/unistd.h> 63 #include <sys/eventhandler.h> 64 #include <sys/dsched.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_extern.h> 72 #include <sys/user.h> 73 74 #include <sys/refcount.h> 75 #include <sys/thread2.h> 76 #include <sys/sysref2.h> 77 #include <sys/mplock2.h> 78 79 #include <machine/vmm.h> 80 81 static void reaplwps(void *context, int dummy); 82 static void reaplwp(struct lwp *lp); 83 static void killlwps(struct lwp *lp); 84 85 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 86 87 /* 88 * callout list for things to do at exit time 89 */ 90 struct exitlist { 91 exitlist_fn function; 92 TAILQ_ENTRY(exitlist) next; 93 }; 94 95 TAILQ_HEAD(exit_list_head, exitlist); 96 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 97 98 /* 99 * LWP reaper data 100 */ 101 static struct task *deadlwp_task[MAXCPU]; 102 static struct lwplist deadlwp_list[MAXCPU]; 103 static struct lwkt_token deadlwp_token[MAXCPU]; 104 105 /* 106 * exit -- 107 * Death of process. 108 * 109 * SYS_EXIT_ARGS(int rval) 110 */ 111 int 112 sys_exit(struct exit_args *uap) 113 { 114 exit1(W_EXITCODE(uap->rval, 0)); 115 /* NOTREACHED */ 116 } 117 118 /* 119 * Extended exit -- 120 * Death of a lwp or process with optional bells and whistles. 121 */ 122 int 123 sys_extexit(struct extexit_args *uap) 124 { 125 struct proc *p = curproc; 126 int action, who; 127 int error; 128 129 action = EXTEXIT_ACTION(uap->how); 130 who = EXTEXIT_WHO(uap->how); 131 132 /* Check parameters before we might perform some action */ 133 switch (who) { 134 case EXTEXIT_PROC: 135 case EXTEXIT_LWP: 136 break; 137 default: 138 return (EINVAL); 139 } 140 141 switch (action) { 142 case EXTEXIT_SIMPLE: 143 break; 144 case EXTEXIT_SETINT: 145 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 146 if (error) 147 return (error); 148 break; 149 default: 150 return (EINVAL); 151 } 152 153 lwkt_gettoken(&p->p_token); 154 155 switch (who) { 156 case EXTEXIT_LWP: 157 /* 158 * Be sure only to perform a simple lwp exit if there is at 159 * least one more lwp in the proc, which will call exit1() 160 * later, otherwise the proc will be an UNDEAD and not even a 161 * SZOMB! 162 */ 163 if (p->p_nthreads > 1) { 164 lwp_exit(0, NULL); /* called w/ p_token held */ 165 /* NOT REACHED */ 166 } 167 /* else last lwp in proc: do the real thing */ 168 /* FALLTHROUGH */ 169 default: /* to help gcc */ 170 case EXTEXIT_PROC: 171 lwkt_reltoken(&p->p_token); 172 exit1(W_EXITCODE(uap->status, 0)); 173 /* NOTREACHED */ 174 } 175 176 /* NOTREACHED */ 177 lwkt_reltoken(&p->p_token); /* safety */ 178 } 179 180 /* 181 * Kill all lwps associated with the current process except the 182 * current lwp. Return an error if we race another thread trying to 183 * do the same thing and lose the race. 184 * 185 * If forexec is non-zero the current thread and process flags are 186 * cleaned up so they can be reused. 187 * 188 * Caller must hold curproc->p_token 189 */ 190 int 191 killalllwps(int forexec) 192 { 193 struct lwp *lp = curthread->td_lwp; 194 struct proc *p = lp->lwp_proc; 195 196 /* 197 * Interlock against P_WEXIT. Only one of the process's thread 198 * is allowed to do the master exit. 199 */ 200 if (p->p_flags & P_WEXIT) 201 return (EALREADY); 202 p->p_flags |= P_WEXIT; 203 204 /* 205 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 206 */ 207 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 208 if (p->p_nthreads > 1) 209 killlwps(lp); 210 211 /* 212 * If doing this for an exec, clean up the remaining thread 213 * (us) for continuing operation after all the other threads 214 * have been killed. 215 */ 216 if (forexec) { 217 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 218 p->p_flags &= ~P_WEXIT; 219 } 220 return(0); 221 } 222 223 /* 224 * Kill all LWPs except the current one. Do not try to signal 225 * LWPs which have exited on their own or have already been 226 * signaled. 227 */ 228 static void 229 killlwps(struct lwp *lp) 230 { 231 struct proc *p = lp->lwp_proc; 232 struct lwp *tlp; 233 234 /* 235 * Kill the remaining LWPs. We must send the signal before setting 236 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 237 * races. tlp must be held across the call as it might block and 238 * allow the target lwp to rip itself out from under our loop. 239 */ 240 FOREACH_LWP_IN_PROC(tlp, p) { 241 LWPHOLD(tlp); 242 lwkt_gettoken(&tlp->lwp_token); 243 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 244 lwpsignal(p, tlp, SIGKILL); 245 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 246 } 247 lwkt_reltoken(&tlp->lwp_token); 248 LWPRELE(tlp); 249 } 250 251 /* 252 * Wait for everything to clear out. 253 */ 254 while (p->p_nthreads > 1) 255 tsleep(&p->p_nthreads, 0, "killlwps", 0); 256 } 257 258 /* 259 * Exit: deallocate address space and other resources, change proc state 260 * to zombie, and unlink proc from allproc and parent's lists. Save exit 261 * status and rusage for wait(). Check for child processes and orphan them. 262 */ 263 void 264 exit1(int rv) 265 { 266 struct thread *td = curthread; 267 struct proc *p = td->td_proc; 268 struct lwp *lp = td->td_lwp; 269 struct proc *q; 270 struct proc *pp; 271 struct vmspace *vm; 272 struct vnode *vtmp; 273 struct exitlist *ep; 274 int error; 275 276 lwkt_gettoken(&p->p_token); 277 278 if (p->p_pid == 1) { 279 kprintf("init died (signal %d, exit %d)\n", 280 WTERMSIG(rv), WEXITSTATUS(rv)); 281 panic("Going nowhere without my init!"); 282 } 283 varsymset_clean(&p->p_varsymset); 284 lockuninit(&p->p_varsymset.vx_lock); 285 286 /* 287 * Kill all lwps associated with the current process, return an 288 * error if we race another thread trying to do the same thing 289 * and lose the race. 290 */ 291 error = killalllwps(0); 292 if (error) { 293 lwp_exit(0, NULL); 294 /* NOT REACHED */ 295 } 296 297 /* are we a task leader? */ 298 if (p == p->p_leader) { 299 struct kill_args killArgs; 300 killArgs.signum = SIGKILL; 301 q = p->p_peers; 302 while(q) { 303 killArgs.pid = q->p_pid; 304 /* 305 * The interface for kill is better 306 * than the internal signal 307 */ 308 sys_kill(&killArgs); 309 q = q->p_peers; 310 } 311 while (p->p_peers) 312 tsleep((caddr_t)p, 0, "exit1", 0); 313 } 314 315 #ifdef PGINPROF 316 vmsizmon(); 317 #endif 318 STOPEVENT(p, S_EXIT, rv); 319 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 320 321 /* 322 * Check if any loadable modules need anything done at process exit. 323 * e.g. SYSV IPC stuff 324 * XXX what if one of these generates an error? 325 */ 326 p->p_xstat = rv; 327 EVENTHANDLER_INVOKE(process_exit, p); 328 329 /* 330 * XXX: imho, the eventhandler stuff is much cleaner than this. 331 * Maybe we should move everything to use eventhandler. 332 */ 333 TAILQ_FOREACH(ep, &exit_list, next) 334 (*ep->function)(td); 335 336 if (p->p_flags & P_PROFIL) 337 stopprofclock(p); 338 339 SIGEMPTYSET(p->p_siglist); 340 SIGEMPTYSET(lp->lwp_siglist); 341 if (timevalisset(&p->p_realtimer.it_value)) 342 callout_stop_sync(&p->p_ithandle); 343 344 /* 345 * Reset any sigio structures pointing to us as a result of 346 * F_SETOWN with our pid. 347 */ 348 funsetownlst(&p->p_sigiolst); 349 350 /* 351 * Close open files and release open-file table. 352 * This may block! 353 */ 354 fdfree(p, NULL); 355 356 if (p->p_leader->p_peers) { 357 q = p->p_leader; 358 while(q->p_peers != p) 359 q = q->p_peers; 360 q->p_peers = p->p_peers; 361 wakeup((caddr_t)p->p_leader); 362 } 363 364 /* 365 * XXX Shutdown SYSV semaphores 366 */ 367 semexit(p); 368 369 KKASSERT(p->p_numposixlocks == 0); 370 371 /* The next two chunks should probably be moved to vmspace_exit. */ 372 vm = p->p_vmspace; 373 374 /* 375 * Clean up data related to virtual kernel operation. Clean up 376 * any vkernel context related to the current lwp now so we can 377 * destroy p_vkernel. 378 */ 379 if (p->p_vkernel) { 380 vkernel_lwp_exit(lp); 381 vkernel_exit(p); 382 } 383 384 /* 385 * Release the user portion of address space. The exitbump prevents 386 * the vmspace from being completely eradicated (using holdcnt). 387 * This releases references to vnodes, which could cause I/O if the 388 * file has been unlinked. We need to do this early enough that 389 * we can still sleep. 390 * 391 * We can't free the entire vmspace as the kernel stack may be mapped 392 * within that space also. 393 * 394 * Processes sharing the same vmspace may exit in one order, and 395 * get cleaned up by vmspace_exit() in a different order. The 396 * last exiting process to reach this point releases as much of 397 * the environment as it can, and the last process cleaned up 398 * by vmspace_exit() (which decrements exitingcnt) cleans up the 399 * remainder. 400 */ 401 vmspace_relexit(vm); 402 403 if (SESS_LEADER(p)) { 404 struct session *sp = p->p_session; 405 406 if (sp->s_ttyvp) { 407 /* 408 * We are the controlling process. Signal the 409 * foreground process group, drain the controlling 410 * terminal, and revoke access to the controlling 411 * terminal. 412 * 413 * NOTE: while waiting for the process group to exit 414 * it is possible that one of the processes in the 415 * group will revoke the tty, so the ttyclosesession() 416 * function will re-check sp->s_ttyvp. 417 */ 418 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 419 if (sp->s_ttyp->t_pgrp) 420 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 421 ttywait(sp->s_ttyp); 422 ttyclosesession(sp, 1); /* also revoke */ 423 } 424 /* 425 * Release the tty. If someone has it open via 426 * /dev/tty then close it (since they no longer can 427 * once we've NULL'd it out). 428 */ 429 ttyclosesession(sp, 0); 430 431 /* 432 * s_ttyp is not zero'd; we use this to indicate 433 * that the session once had a controlling terminal. 434 * (for logging and informational purposes) 435 */ 436 } 437 sp->s_leader = NULL; 438 } 439 fixjobc(p, p->p_pgrp, 0); 440 (void)acct_process(p); 441 #ifdef KTRACE 442 /* 443 * release trace file 444 */ 445 if (p->p_tracenode) 446 ktrdestroy(&p->p_tracenode); 447 p->p_traceflag = 0; 448 #endif 449 /* 450 * Release reference to text vnode 451 */ 452 if ((vtmp = p->p_textvp) != NULL) { 453 p->p_textvp = NULL; 454 vrele(vtmp); 455 } 456 457 /* Release namecache handle to text file */ 458 if (p->p_textnch.ncp) 459 cache_drop(&p->p_textnch); 460 461 /* 462 * We have to handle PPWAIT here or proc_move_allproc_zombie() 463 * will block on the PHOLD() the parent is doing. 464 * 465 * We are using the flag as an interlock so an atomic op is 466 * necessary to synchronize with the parent's cpu. 467 */ 468 if (p->p_flags & P_PPWAIT) { 469 if (p->p_pptr && p->p_pptr->p_upmap) 470 p->p_pptr->p_upmap->invfork = 0; 471 atomic_clear_int(&p->p_flags, P_PPWAIT); 472 wakeup(p->p_pptr); 473 } 474 475 /* 476 * Move the process to the zombie list. This will block 477 * until the process p_lock count reaches 0. The process will 478 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 479 * which is called from cpu_proc_exit(). 480 * 481 * Interlock against waiters using p_waitgen. We increment 482 * p_waitgen after completing the move of our process to the 483 * zombie list. 484 * 485 * WARNING: pp becomes stale when we block, clear it now as a 486 * reminder. 487 */ 488 proc_move_allproc_zombie(p); 489 pp = p->p_pptr; 490 atomic_add_long(&pp->p_waitgen, 1); 491 pp = NULL; 492 493 /* 494 * Reparent all of this process's children to the init process. 495 * We must hold initproc->p_token in order to mess with 496 * initproc->p_children. We already hold p->p_token (to remove 497 * the children from our list). 498 */ 499 q = LIST_FIRST(&p->p_children); 500 if (q) { 501 lwkt_gettoken(&initproc->p_token); 502 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 503 PHOLD(q); 504 lwkt_gettoken(&q->p_token); 505 if (q != LIST_FIRST(&p->p_children)) { 506 lwkt_reltoken(&q->p_token); 507 PRELE(q); 508 continue; 509 } 510 LIST_REMOVE(q, p_sibling); 511 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 512 q->p_pptr = initproc; 513 q->p_sigparent = SIGCHLD; 514 515 /* 516 * Traced processes are killed 517 * since their existence means someone is screwing up. 518 */ 519 if (q->p_flags & P_TRACED) { 520 q->p_flags &= ~P_TRACED; 521 ksignal(q, SIGKILL); 522 } 523 lwkt_reltoken(&q->p_token); 524 PRELE(q); 525 } 526 lwkt_reltoken(&initproc->p_token); 527 wakeup(initproc); 528 } 529 530 /* 531 * Save exit status and final rusage info, adding in child rusage 532 * info and self times. 533 */ 534 calcru_proc(p, &p->p_ru); 535 ruadd(&p->p_ru, &p->p_cru); 536 537 /* 538 * notify interested parties of our demise. 539 */ 540 KNOTE(&p->p_klist, NOTE_EXIT); 541 542 /* 543 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 544 * flag set, or if the handler is set to SIG_IGN, notify process 1 545 * instead (and hope it will handle this situation). 546 * 547 * (must reload pp) 548 */ 549 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 550 proc_reparent(p, initproc); 551 } 552 553 pp = p->p_pptr; 554 PHOLD(pp); 555 if (p->p_sigparent && pp != initproc) { 556 ksignal(pp, p->p_sigparent); 557 } else { 558 ksignal(pp, SIGCHLD); 559 } 560 p->p_flags &= ~P_TRACED; 561 PRELE(pp); 562 563 /* 564 * cpu_exit is responsible for clearing curproc, since 565 * it is heavily integrated with the thread/switching sequence. 566 * 567 * Other substructures are freed from wait(). 568 */ 569 plimit_free(p); 570 571 /* 572 * Finally, call machine-dependent code to release as many of the 573 * lwp's resources as we can and halt execution of this thread. 574 * 575 * pp is a wild pointer now but still the correct wakeup() target. 576 * lwp_exit() only uses it to send the wakeup() signal to the likely 577 * parent. Any reparenting race that occurs will get a signal 578 * automatically and not be an issue. 579 */ 580 lwp_exit(1, pp); 581 } 582 583 /* 584 * Eventually called by every exiting LWP 585 * 586 * p->p_token must be held. mplock may be held and will be released. 587 */ 588 void 589 lwp_exit(int masterexit, void *waddr) 590 { 591 struct thread *td = curthread; 592 struct lwp *lp = td->td_lwp; 593 struct proc *p = lp->lwp_proc; 594 int dowake = 0; 595 596 /* 597 * Release the current user process designation on the process so 598 * the userland scheduler can work in someone else. 599 */ 600 p->p_usched->release_curproc(lp); 601 602 /* 603 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 604 * make sure it is set here. 605 */ 606 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 607 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 608 609 /* 610 * Clean up any virtualization 611 */ 612 if (lp->lwp_vkernel) 613 vkernel_lwp_exit(lp); 614 615 if (td->td_vmm) 616 vmm_vmdestroy(); 617 618 /* 619 * Clean up select/poll support 620 */ 621 kqueue_terminate(&lp->lwp_kqueue); 622 623 /* 624 * Clean up any syscall-cached ucred 625 */ 626 if (td->td_ucred) { 627 crfree(td->td_ucred); 628 td->td_ucred = NULL; 629 } 630 631 /* 632 * Nobody actually wakes us when the lock 633 * count reaches zero, so just wait one tick. 634 */ 635 while (lp->lwp_lock > 0) 636 tsleep(lp, 0, "lwpexit", 1); 637 638 /* Hand down resource usage to our proc */ 639 ruadd(&p->p_ru, &lp->lwp_ru); 640 641 /* 642 * If we don't hold the process until the LWP is reaped wait*() 643 * may try to dispose of its vmspace before all the LWPs have 644 * actually terminated. 645 */ 646 PHOLD(p); 647 648 /* 649 * Do any remaining work that might block on us. We should be 650 * coded such that further blocking is ok after decrementing 651 * p_nthreads but don't take the chance. 652 */ 653 dsched_exit_thread(td); 654 biosched_done(curthread); 655 656 /* 657 * We have to use the reaper for all the LWPs except the one doing 658 * the master exit. The LWP doing the master exit can just be 659 * left on p_lwps and the process reaper will deal with it 660 * synchronously, which is much faster. 661 * 662 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 663 * 664 * The process is left held until the reaper calls lwp_dispose() on 665 * the lp (after calling lwp_wait()). 666 */ 667 if (masterexit == 0) { 668 int cpu = mycpuid; 669 670 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 671 --p->p_nthreads; 672 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 673 dowake = 1; 674 lwkt_gettoken(&deadlwp_token[cpu]); 675 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry); 676 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]); 677 lwkt_reltoken(&deadlwp_token[cpu]); 678 } else { 679 --p->p_nthreads; 680 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 681 dowake = 1; 682 } 683 684 /* 685 * We no longer need p_token. 686 * 687 * Tell the userland scheduler that we are going away 688 */ 689 lwkt_reltoken(&p->p_token); 690 p->p_usched->heuristic_exiting(lp, p); 691 692 /* 693 * Issue late wakeups after releasing our token to give us a chance 694 * to deschedule and switch away before another cpu in a wait*() 695 * reaps us. This is done as late as possible to reduce contention. 696 */ 697 if (dowake) 698 wakeup(&p->p_nthreads); 699 if (waddr) 700 wakeup(waddr); 701 702 cpu_lwp_exit(); 703 } 704 705 /* 706 * Wait until a lwp is completely dead. The final interlock in this drama 707 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 708 * switchout. 709 * 710 * At the point TDF_EXITING is set a complete exit is accomplished when 711 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 712 * post-switch interlock flags that can be used to wait for the TDF_ 713 * flags to clear. 714 * 715 * Returns non-zero on success, and zero if the caller needs to retry 716 * the lwp_wait(). 717 */ 718 static int 719 lwp_wait(struct lwp *lp) 720 { 721 struct thread *td = lp->lwp_thread; 722 u_int mpflags; 723 724 KKASSERT(lwkt_preempted_proc() != lp); 725 726 /* 727 * This bit of code uses the thread destruction interlock 728 * managed by lwkt_switch_return() to wait for the lwp's 729 * thread to completely disengage. 730 * 731 * It is possible for us to race another cpu core so we 732 * have to do this correctly. 733 */ 734 for (;;) { 735 mpflags = td->td_mpflags; 736 cpu_ccfence(); 737 if (mpflags & TDF_MP_EXITSIG) 738 break; 739 tsleep_interlock(td, 0); 740 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 741 mpflags | TDF_MP_EXITWAIT)) { 742 tsleep(td, PINTERLOCKED, "lwpxt", 0); 743 } 744 } 745 746 /* 747 * We've already waited for the core exit but there can still 748 * be other refs from e.g. process scans and such. 749 */ 750 if (lp->lwp_lock > 0) { 751 tsleep(lp, 0, "lwpwait1", 1); 752 return(0); 753 } 754 if (td->td_refs) { 755 tsleep(td, 0, "lwpwait2", 1); 756 return(0); 757 } 758 759 /* 760 * Now that we have the thread destruction interlock these flags 761 * really should already be cleaned up, keep a check for safety. 762 * 763 * We can't rip its stack out from under it until TDF_EXITING is 764 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 765 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 766 * will be cleared temporarily if a thread gets preempted. 767 */ 768 while ((td->td_flags & (TDF_RUNNING | 769 TDF_RUNQ | 770 TDF_PREEMPT_LOCK | 771 TDF_EXITING)) != TDF_EXITING) { 772 tsleep(lp, 0, "lwpwait3", 1); 773 return (0); 774 } 775 776 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 777 ("lwp_wait: td %p (%s) still on run or sleep queue", 778 td, td->td_comm)); 779 return (1); 780 } 781 782 /* 783 * Release the resources associated with a lwp. 784 * The lwp must be completely dead. 785 */ 786 void 787 lwp_dispose(struct lwp *lp) 788 { 789 struct thread *td = lp->lwp_thread; 790 791 KKASSERT(lwkt_preempted_proc() != lp); 792 KKASSERT(td->td_refs == 0); 793 KKASSERT((td->td_flags & (TDF_RUNNING | 794 TDF_RUNQ | 795 TDF_PREEMPT_LOCK | 796 TDF_EXITING)) == TDF_EXITING); 797 798 PRELE(lp->lwp_proc); 799 lp->lwp_proc = NULL; 800 if (td != NULL) { 801 td->td_proc = NULL; 802 td->td_lwp = NULL; 803 lp->lwp_thread = NULL; 804 lwkt_free_thread(td); 805 } 806 kfree(lp, M_LWP); 807 } 808 809 int 810 sys_wait4(struct wait_args *uap) 811 { 812 struct rusage rusage; 813 int error, status; 814 815 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 816 uap->options, (uap->rusage ? &rusage : NULL), 817 &uap->sysmsg_result); 818 819 if (error == 0 && uap->status) 820 error = copyout(&status, uap->status, sizeof(*uap->status)); 821 if (error == 0 && uap->rusage) 822 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 823 return (error); 824 } 825 826 /* 827 * wait1() 828 * 829 * wait_args(int pid, int *status, int options, struct rusage *rusage) 830 */ 831 int 832 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 833 { 834 struct thread *td = curthread; 835 struct lwp *lp; 836 struct proc *q = td->td_proc; 837 struct proc *p, *t; 838 struct pargs *pa; 839 struct sigacts *ps; 840 int nfound, error; 841 long waitgen; 842 843 if (pid == 0) 844 pid = -q->p_pgid; 845 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 846 return (EINVAL); 847 848 /* 849 * Protect the q->p_children list 850 */ 851 lwkt_gettoken(&q->p_token); 852 loop: 853 /* 854 * All sorts of things can change due to blocking so we have to loop 855 * all the way back up here. 856 * 857 * The problem is that if a process group is stopped and the parent 858 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 859 * of the child and then stop itself when it tries to return from the 860 * system call. When the process group is resumed the parent will 861 * then get the STOP status even though the child has now resumed 862 * (a followup wait*() will get the CONT status). 863 * 864 * Previously the CONT would overwrite the STOP because the tstop 865 * was handled within tsleep(), and the parent would only see 866 * the CONT when both are stopped and continued together. This little 867 * two-line hack restores this effect. 868 */ 869 while (q->p_stat == SSTOP) 870 tstop(); 871 872 nfound = 0; 873 874 /* 875 * Loop on children. 876 * 877 * NOTE: We don't want to break q's p_token in the loop for the 878 * case where no children are found or we risk breaking the 879 * interlock between child and parent. 880 */ 881 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 882 LIST_FOREACH(p, &q->p_children, p_sibling) { 883 if (pid != WAIT_ANY && 884 p->p_pid != pid && p->p_pgid != -pid) { 885 continue; 886 } 887 888 /* 889 * This special case handles a kthread spawned by linux_clone 890 * (see linux_misc.c). The linux_wait4 and linux_waitpid 891 * functions need to be able to distinguish between waiting 892 * on a process and waiting on a thread. It is a thread if 893 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 894 * signifies we want to wait for threads and not processes. 895 */ 896 if ((p->p_sigparent != SIGCHLD) ^ 897 ((options & WLINUXCLONE) != 0)) { 898 continue; 899 } 900 901 nfound++; 902 if (p->p_stat == SZOMB) { 903 /* 904 * We may go into SZOMB with threads still present. 905 * We must wait for them to exit before we can reap 906 * the master thread, otherwise we may race reaping 907 * non-master threads. 908 * 909 * Only this routine can remove a process from 910 * the zombie list and destroy it, use PACQUIREZOMB() 911 * to serialize us and loop if it blocks (interlocked 912 * by the parent's q->p_token). 913 * 914 * WARNING! (p) can be invalid when PHOLDZOMB(p) 915 * returns non-zero. Be sure not to 916 * mess with it. 917 */ 918 if (PHOLDZOMB(p)) 919 goto loop; 920 lwkt_gettoken(&p->p_token); 921 if (p->p_pptr != q) { 922 lwkt_reltoken(&p->p_token); 923 PRELEZOMB(p); 924 goto loop; 925 } 926 while (p->p_nthreads > 0) { 927 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 928 } 929 930 /* 931 * Reap any LWPs left in p->p_lwps. This is usually 932 * just the last LWP. This must be done before 933 * we loop on p_lock since the lwps hold a ref on 934 * it as a vmspace interlock. 935 * 936 * Once that is accomplished p_nthreads had better 937 * be zero. 938 */ 939 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 940 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 941 reaplwp(lp); 942 } 943 KKASSERT(p->p_nthreads == 0); 944 945 /* 946 * Don't do anything really bad until all references 947 * to the process go away. This may include other 948 * LWPs which are still in the process of being 949 * reaped. We can't just pull the rug out from under 950 * them because they may still be using the VM space. 951 * 952 * Certain kernel facilities such as /proc will also 953 * put a hold on the process for short periods of 954 * time. 955 */ 956 PRELE(p); 957 PSTALL(p, "reap3", 0); 958 959 /* Take care of our return values. */ 960 *res = p->p_pid; 961 962 if (status) 963 *status = p->p_xstat; 964 if (rusage) 965 *rusage = p->p_ru; 966 967 /* 968 * If we got the child via a ptrace 'attach', 969 * we need to give it back to the old parent. 970 */ 971 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 972 PHOLD(p); 973 p->p_oppid = 0; 974 proc_reparent(p, t); 975 ksignal(t, SIGCHLD); 976 wakeup((caddr_t)t); 977 error = 0; 978 PRELE(t); 979 lwkt_reltoken(&p->p_token); 980 PRELEZOMB(p); 981 goto done; 982 } 983 984 /* 985 * Unlink the proc from its process group so that 986 * the following operations won't lead to an 987 * inconsistent state for processes running down 988 * the zombie list. 989 */ 990 proc_remove_zombie(p); 991 proc_userunmap(p); 992 lwkt_reltoken(&p->p_token); 993 leavepgrp(p); 994 995 p->p_xstat = 0; 996 ruadd(&q->p_cru, &p->p_ru); 997 998 /* 999 * Decrement the count of procs running with this uid. 1000 */ 1001 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 1002 1003 /* 1004 * Free up credentials. 1005 */ 1006 crfree(p->p_ucred); 1007 p->p_ucred = NULL; 1008 1009 /* 1010 * Remove unused arguments 1011 */ 1012 pa = p->p_args; 1013 p->p_args = NULL; 1014 if (pa && refcount_release(&pa->ar_ref)) { 1015 kfree(pa, M_PARGS); 1016 pa = NULL; 1017 } 1018 1019 ps = p->p_sigacts; 1020 p->p_sigacts = NULL; 1021 if (ps && refcount_release(&ps->ps_refcnt)) { 1022 kfree(ps, M_SUBPROC); 1023 ps = NULL; 1024 } 1025 1026 /* 1027 * Our exitingcount was incremented when the process 1028 * became a zombie, now that the process has been 1029 * removed from (almost) all lists we should be able 1030 * to safely destroy its vmspace. Wait for any current 1031 * holders to go away (so the vmspace remains stable), 1032 * then scrap it. 1033 */ 1034 PSTALL(p, "reap4", 0); 1035 vmspace_exitfree(p); 1036 PSTALL(p, "reap5", 0); 1037 1038 /* 1039 * NOTE: We have to officially release ZOMB in order 1040 * to ensure that a racing thread in kern_wait() 1041 * which blocked on ZOMB is woken up. 1042 */ 1043 PHOLD(p); 1044 PRELEZOMB(p); 1045 kfree(p, M_PROC); 1046 atomic_add_int(&nprocs, -1); 1047 error = 0; 1048 goto done; 1049 } 1050 if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 && 1051 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { 1052 PHOLD(p); 1053 lwkt_gettoken(&p->p_token); 1054 if (p->p_pptr != q) { 1055 lwkt_reltoken(&p->p_token); 1056 PRELE(p); 1057 goto loop; 1058 } 1059 if (p->p_stat != SSTOP || 1060 (p->p_flags & P_WAITED) != 0 || 1061 ((p->p_flags & P_TRACED) == 0 && 1062 (options & WUNTRACED) == 0)) { 1063 lwkt_reltoken(&p->p_token); 1064 PRELE(p); 1065 goto loop; 1066 } 1067 1068 p->p_flags |= P_WAITED; 1069 1070 *res = p->p_pid; 1071 if (status) 1072 *status = W_STOPCODE(p->p_xstat); 1073 /* Zero rusage so we get something consistent. */ 1074 if (rusage) 1075 bzero(rusage, sizeof(*rusage)); 1076 error = 0; 1077 lwkt_reltoken(&p->p_token); 1078 PRELE(p); 1079 goto done; 1080 } 1081 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1082 PHOLD(p); 1083 lwkt_gettoken(&p->p_token); 1084 if (p->p_pptr != q) { 1085 lwkt_reltoken(&p->p_token); 1086 PRELE(p); 1087 goto loop; 1088 } 1089 if ((p->p_flags & P_CONTINUED) == 0) { 1090 lwkt_reltoken(&p->p_token); 1091 PRELE(p); 1092 goto loop; 1093 } 1094 1095 *res = p->p_pid; 1096 p->p_flags &= ~P_CONTINUED; 1097 1098 if (status) 1099 *status = SIGCONT; 1100 error = 0; 1101 lwkt_reltoken(&p->p_token); 1102 PRELE(p); 1103 goto done; 1104 } 1105 } 1106 if (nfound == 0) { 1107 error = ECHILD; 1108 goto done; 1109 } 1110 if (options & WNOHANG) { 1111 *res = 0; 1112 error = 0; 1113 goto done; 1114 } 1115 1116 /* 1117 * Wait for signal - interlocked using q->p_waitgen. 1118 */ 1119 error = 0; 1120 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1121 tsleep_interlock(q, PCATCH); 1122 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 1123 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1124 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0); 1125 break; 1126 } 1127 } 1128 if (error) { 1129 done: 1130 lwkt_reltoken(&q->p_token); 1131 return (error); 1132 } 1133 goto loop; 1134 } 1135 1136 /* 1137 * Change child's parent process to parent. 1138 * 1139 * p_children/p_sibling requires the parent's token, and 1140 * changing pptr requires the child's token, so we have to 1141 * get three tokens to do this operation. We also need to 1142 * hold pointers that might get ripped out from under us to 1143 * preserve structural integrity. 1144 * 1145 * It is possible to race another reparent or disconnect or other 1146 * similar operation. We must retry when this situation occurs. 1147 * Once we successfully reparent the process we no longer care 1148 * about any races. 1149 */ 1150 void 1151 proc_reparent(struct proc *child, struct proc *parent) 1152 { 1153 struct proc *opp; 1154 1155 PHOLD(parent); 1156 while ((opp = child->p_pptr) != parent) { 1157 PHOLD(opp); 1158 lwkt_gettoken(&opp->p_token); 1159 lwkt_gettoken(&child->p_token); 1160 lwkt_gettoken(&parent->p_token); 1161 if (child->p_pptr != opp) { 1162 lwkt_reltoken(&parent->p_token); 1163 lwkt_reltoken(&child->p_token); 1164 lwkt_reltoken(&opp->p_token); 1165 PRELE(opp); 1166 continue; 1167 } 1168 LIST_REMOVE(child, p_sibling); 1169 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1170 child->p_pptr = parent; 1171 lwkt_reltoken(&parent->p_token); 1172 lwkt_reltoken(&child->p_token); 1173 lwkt_reltoken(&opp->p_token); 1174 if (LIST_EMPTY(&opp->p_children)) 1175 wakeup(opp); 1176 PRELE(opp); 1177 break; 1178 } 1179 PRELE(parent); 1180 } 1181 1182 /* 1183 * The next two functions are to handle adding/deleting items on the 1184 * exit callout list 1185 * 1186 * at_exit(): 1187 * Take the arguments given and put them onto the exit callout list, 1188 * However first make sure that it's not already there. 1189 * returns 0 on success. 1190 */ 1191 1192 int 1193 at_exit(exitlist_fn function) 1194 { 1195 struct exitlist *ep; 1196 1197 #ifdef INVARIANTS 1198 /* Be noisy if the programmer has lost track of things */ 1199 if (rm_at_exit(function)) 1200 kprintf("WARNING: exit callout entry (%p) already present\n", 1201 function); 1202 #endif 1203 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1204 if (ep == NULL) 1205 return (ENOMEM); 1206 ep->function = function; 1207 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1208 return (0); 1209 } 1210 1211 /* 1212 * Scan the exit callout list for the given item and remove it. 1213 * Returns the number of items removed (0 or 1) 1214 */ 1215 int 1216 rm_at_exit(exitlist_fn function) 1217 { 1218 struct exitlist *ep; 1219 1220 TAILQ_FOREACH(ep, &exit_list, next) { 1221 if (ep->function == function) { 1222 TAILQ_REMOVE(&exit_list, ep, next); 1223 kfree(ep, M_ATEXIT); 1224 return(1); 1225 } 1226 } 1227 return (0); 1228 } 1229 1230 /* 1231 * LWP reaper related code. 1232 */ 1233 static void 1234 reaplwps(void *context, int dummy) 1235 { 1236 struct lwplist *lwplist = context; 1237 struct lwp *lp; 1238 int cpu = mycpuid; 1239 1240 lwkt_gettoken(&deadlwp_token[cpu]); 1241 while ((lp = LIST_FIRST(lwplist))) { 1242 LIST_REMOVE(lp, u.lwp_reap_entry); 1243 reaplwp(lp); 1244 } 1245 lwkt_reltoken(&deadlwp_token[cpu]); 1246 } 1247 1248 static void 1249 reaplwp(struct lwp *lp) 1250 { 1251 while (lwp_wait(lp) == 0) 1252 ; 1253 lwp_dispose(lp); 1254 } 1255 1256 static void 1257 deadlwp_init(void) 1258 { 1259 int cpu; 1260 1261 for (cpu = 0; cpu < ncpus; cpu++) { 1262 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl"); 1263 LIST_INIT(&deadlwp_list[cpu]); 1264 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1265 M_DEVBUF, M_WAITOK); 1266 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1267 } 1268 } 1269 1270 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1271