1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/proc.h> 51 #include <sys/ktrace.h> 52 #include <sys/pioctl.h> 53 #include <sys/tty.h> 54 #include <sys/wait.h> 55 #include <sys/vnode.h> 56 #include <sys/resourcevar.h> 57 #include <sys/signalvar.h> 58 #include <sys/taskqueue.h> 59 #include <sys/ptrace.h> 60 #include <sys/acct.h> /* for acct_process() function prototype */ 61 #include <sys/filedesc.h> 62 #include <sys/shm.h> 63 #include <sys/sem.h> 64 #include <sys/jail.h> 65 #include <sys/kern_syscall.h> 66 #include <sys/upcall.h> 67 #include <sys/unistd.h> 68 #include <sys/eventhandler.h> 69 #include <sys/dsched.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_param.h> 73 #include <sys/lock.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_extern.h> 77 #include <sys/user.h> 78 79 #include <sys/refcount.h> 80 #include <sys/thread2.h> 81 #include <sys/sysref2.h> 82 #include <sys/mplock2.h> 83 84 static void reaplwps(void *context, int dummy); 85 static void reaplwp(struct lwp *lp); 86 static void killlwps(struct lwp *lp); 87 88 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 89 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 90 91 static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token); 92 93 /* 94 * callout list for things to do at exit time 95 */ 96 struct exitlist { 97 exitlist_fn function; 98 TAILQ_ENTRY(exitlist) next; 99 }; 100 101 TAILQ_HEAD(exit_list_head, exitlist); 102 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 103 104 /* 105 * LWP reaper data 106 */ 107 struct task *deadlwp_task[MAXCPU]; 108 struct lwplist deadlwp_list[MAXCPU]; 109 110 /* 111 * exit -- 112 * Death of process. 113 * 114 * SYS_EXIT_ARGS(int rval) 115 */ 116 int 117 sys_exit(struct exit_args *uap) 118 { 119 exit1(W_EXITCODE(uap->rval, 0)); 120 /* NOTREACHED */ 121 } 122 123 /* 124 * Extended exit -- 125 * Death of a lwp or process with optional bells and whistles. 126 * 127 * MPALMOSTSAFE 128 */ 129 int 130 sys_extexit(struct extexit_args *uap) 131 { 132 struct proc *p = curproc; 133 int action, who; 134 int error; 135 136 action = EXTEXIT_ACTION(uap->how); 137 who = EXTEXIT_WHO(uap->how); 138 139 /* Check parameters before we might perform some action */ 140 switch (who) { 141 case EXTEXIT_PROC: 142 case EXTEXIT_LWP: 143 break; 144 default: 145 return (EINVAL); 146 } 147 148 switch (action) { 149 case EXTEXIT_SIMPLE: 150 break; 151 case EXTEXIT_SETINT: 152 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 153 if (error) 154 return (error); 155 break; 156 default: 157 return (EINVAL); 158 } 159 160 lwkt_gettoken(&p->p_token); 161 162 switch (who) { 163 case EXTEXIT_LWP: 164 /* 165 * Be sure only to perform a simple lwp exit if there is at 166 * least one more lwp in the proc, which will call exit1() 167 * later, otherwise the proc will be an UNDEAD and not even a 168 * SZOMB! 169 */ 170 if (p->p_nthreads > 1) { 171 lwp_exit(0); /* called w/ p_token held */ 172 /* NOT REACHED */ 173 } 174 /* else last lwp in proc: do the real thing */ 175 /* FALLTHROUGH */ 176 default: /* to help gcc */ 177 case EXTEXIT_PROC: 178 lwkt_reltoken(&p->p_token); 179 exit1(W_EXITCODE(uap->status, 0)); 180 /* NOTREACHED */ 181 } 182 183 /* NOTREACHED */ 184 lwkt_reltoken(&p->p_token); /* safety */ 185 } 186 187 /* 188 * Kill all lwps associated with the current process except the 189 * current lwp. Return an error if we race another thread trying to 190 * do the same thing and lose the race. 191 * 192 * If forexec is non-zero the current thread and process flags are 193 * cleaned up so they can be reused. 194 * 195 * Caller must hold curproc->p_token 196 */ 197 int 198 killalllwps(int forexec) 199 { 200 struct lwp *lp = curthread->td_lwp; 201 struct proc *p = lp->lwp_proc; 202 203 /* 204 * Interlock against P_WEXIT. Only one of the process's thread 205 * is allowed to do the master exit. 206 */ 207 if (p->p_flags & P_WEXIT) 208 return (EALREADY); 209 p->p_flags |= P_WEXIT; 210 211 /* 212 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 213 */ 214 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 215 if (p->p_nthreads > 1) 216 killlwps(lp); 217 218 /* 219 * If doing this for an exec, clean up the remaining thread 220 * (us) for continuing operation after all the other threads 221 * have been killed. 222 */ 223 if (forexec) { 224 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 225 p->p_flags &= ~P_WEXIT; 226 } 227 return(0); 228 } 229 230 /* 231 * Kill all LWPs except the current one. Do not try to signal 232 * LWPs which have exited on their own or have already been 233 * signaled. 234 */ 235 static void 236 killlwps(struct lwp *lp) 237 { 238 struct proc *p = lp->lwp_proc; 239 struct lwp *tlp; 240 241 /* 242 * Kill the remaining LWPs. We must send the signal before setting 243 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 244 * races. tlp must be held across the call as it might block and 245 * allow the target lwp to rip itself out from under our loop. 246 */ 247 FOREACH_LWP_IN_PROC(tlp, p) { 248 LWPHOLD(tlp); 249 lwkt_gettoken(&tlp->lwp_token); 250 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 251 lwpsignal(p, tlp, SIGKILL); 252 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 253 } 254 lwkt_reltoken(&tlp->lwp_token); 255 LWPRELE(tlp); 256 } 257 258 /* 259 * Wait for everything to clear out. 260 */ 261 while (p->p_nthreads > 1) { 262 tsleep(&p->p_nthreads, 0, "killlwps", 0); 263 } 264 } 265 266 /* 267 * Exit: deallocate address space and other resources, change proc state 268 * to zombie, and unlink proc from allproc and parent's lists. Save exit 269 * status and rusage for wait(). Check for child processes and orphan them. 270 */ 271 void 272 exit1(int rv) 273 { 274 struct thread *td = curthread; 275 struct proc *p = td->td_proc; 276 struct lwp *lp = td->td_lwp; 277 struct proc *q, *nq; 278 struct vmspace *vm; 279 struct vnode *vtmp; 280 struct exitlist *ep; 281 int error; 282 283 lwkt_gettoken(&p->p_token); 284 285 if (p->p_pid == 1) { 286 kprintf("init died (signal %d, exit %d)\n", 287 WTERMSIG(rv), WEXITSTATUS(rv)); 288 panic("Going nowhere without my init!"); 289 } 290 varsymset_clean(&p->p_varsymset); 291 lockuninit(&p->p_varsymset.vx_lock); 292 293 /* 294 * Kill all lwps associated with the current process, return an 295 * error if we race another thread trying to do the same thing 296 * and lose the race. 297 */ 298 error = killalllwps(0); 299 if (error) { 300 lwp_exit(0); 301 /* NOT REACHED */ 302 } 303 304 /* are we a task leader? */ 305 if (p == p->p_leader) { 306 struct kill_args killArgs; 307 killArgs.signum = SIGKILL; 308 q = p->p_peers; 309 while(q) { 310 killArgs.pid = q->p_pid; 311 /* 312 * The interface for kill is better 313 * than the internal signal 314 */ 315 sys_kill(&killArgs); 316 nq = q; 317 q = q->p_peers; 318 } 319 while (p->p_peers) 320 tsleep((caddr_t)p, 0, "exit1", 0); 321 } 322 323 #ifdef PGINPROF 324 vmsizmon(); 325 #endif 326 STOPEVENT(p, S_EXIT, rv); 327 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 328 329 /* 330 * Check if any loadable modules need anything done at process exit. 331 * e.g. SYSV IPC stuff 332 * XXX what if one of these generates an error? 333 */ 334 p->p_xstat = rv; 335 EVENTHANDLER_INVOKE(process_exit, p); 336 337 /* 338 * XXX: imho, the eventhandler stuff is much cleaner than this. 339 * Maybe we should move everything to use eventhandler. 340 */ 341 TAILQ_FOREACH(ep, &exit_list, next) 342 (*ep->function)(td); 343 344 if (p->p_flags & P_PROFIL) 345 stopprofclock(p); 346 347 SIGEMPTYSET(p->p_siglist); 348 SIGEMPTYSET(lp->lwp_siglist); 349 if (timevalisset(&p->p_realtimer.it_value)) 350 callout_stop_sync(&p->p_ithandle); 351 352 /* 353 * Reset any sigio structures pointing to us as a result of 354 * F_SETOWN with our pid. 355 */ 356 funsetownlst(&p->p_sigiolst); 357 358 /* 359 * Close open files and release open-file table. 360 * This may block! 361 */ 362 fdfree(p, NULL); 363 364 if(p->p_leader->p_peers) { 365 q = p->p_leader; 366 while(q->p_peers != p) 367 q = q->p_peers; 368 q->p_peers = p->p_peers; 369 wakeup((caddr_t)p->p_leader); 370 } 371 372 /* 373 * XXX Shutdown SYSV semaphores 374 */ 375 semexit(p); 376 377 KKASSERT(p->p_numposixlocks == 0); 378 379 /* The next two chunks should probably be moved to vmspace_exit. */ 380 vm = p->p_vmspace; 381 382 /* 383 * Release upcalls associated with this process 384 */ 385 if (vm->vm_upcalls) 386 upc_release(vm, lp); 387 388 /* 389 * Clean up data related to virtual kernel operation. Clean up 390 * any vkernel context related to the current lwp now so we can 391 * destroy p_vkernel. 392 */ 393 if (p->p_vkernel) { 394 vkernel_lwp_exit(lp); 395 vkernel_exit(p); 396 } 397 398 /* 399 * Release user portion of address space. 400 * This releases references to vnodes, 401 * which could cause I/O if the file has been unlinked. 402 * Need to do this early enough that we can still sleep. 403 * Can't free the entire vmspace as the kernel stack 404 * may be mapped within that space also. 405 * 406 * Processes sharing the same vmspace may exit in one order, and 407 * get cleaned up by vmspace_exit() in a different order. The 408 * last exiting process to reach this point releases as much of 409 * the environment as it can, and the last process cleaned up 410 * by vmspace_exit() (which decrements exitingcnt) cleans up the 411 * remainder. 412 */ 413 vmspace_exitbump(vm); 414 sysref_put(&vm->vm_sysref); 415 416 if (SESS_LEADER(p)) { 417 struct session *sp = p->p_session; 418 419 if (sp->s_ttyvp) { 420 /* 421 * We are the controlling process. Signal the 422 * foreground process group, drain the controlling 423 * terminal, and revoke access to the controlling 424 * terminal. 425 * 426 * NOTE: while waiting for the process group to exit 427 * it is possible that one of the processes in the 428 * group will revoke the tty, so the ttyclosesession() 429 * function will re-check sp->s_ttyvp. 430 */ 431 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 432 if (sp->s_ttyp->t_pgrp) 433 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 434 ttywait(sp->s_ttyp); 435 ttyclosesession(sp, 1); /* also revoke */ 436 } 437 /* 438 * Release the tty. If someone has it open via 439 * /dev/tty then close it (since they no longer can 440 * once we've NULL'd it out). 441 */ 442 ttyclosesession(sp, 0); 443 444 /* 445 * s_ttyp is not zero'd; we use this to indicate 446 * that the session once had a controlling terminal. 447 * (for logging and informational purposes) 448 */ 449 } 450 sp->s_leader = NULL; 451 } 452 fixjobc(p, p->p_pgrp, 0); 453 (void)acct_process(p); 454 #ifdef KTRACE 455 /* 456 * release trace file 457 */ 458 if (p->p_tracenode) 459 ktrdestroy(&p->p_tracenode); 460 p->p_traceflag = 0; 461 #endif 462 /* 463 * Release reference to text vnode 464 */ 465 if ((vtmp = p->p_textvp) != NULL) { 466 p->p_textvp = NULL; 467 vrele(vtmp); 468 } 469 470 /* Release namecache handle to text file */ 471 if (p->p_textnch.ncp) 472 cache_drop(&p->p_textnch); 473 474 /* 475 * We have to handle PPWAIT here or proc_move_allproc_zombie() 476 * will block on the PHOLD() the parent is doing. 477 */ 478 if (p->p_flags & P_PPWAIT) { 479 p->p_flags &= ~P_PPWAIT; 480 wakeup(p->p_pptr); 481 } 482 483 /* 484 * Move the process to the zombie list. This will block 485 * until the process p_lock count reaches 0. The process will 486 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 487 * which is called from cpu_proc_exit(). 488 */ 489 proc_move_allproc_zombie(p); 490 491 /* 492 * Reparent all of this process's children to the init process. 493 * We must hold initproc->p_token in order to mess with 494 * initproc->p_children. We already hold p->p_token (to remove 495 * the children from our list). 496 */ 497 q = LIST_FIRST(&p->p_children); 498 if (q) { 499 lwkt_gettoken(&initproc->p_token); 500 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 501 PHOLD(q); 502 lwkt_gettoken(&q->p_token); 503 if (q != LIST_FIRST(&p->p_children)) { 504 lwkt_reltoken(&q->p_token); 505 PRELE(q); 506 continue; 507 } 508 LIST_REMOVE(q, p_sibling); 509 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 510 q->p_pptr = initproc; 511 q->p_sigparent = SIGCHLD; 512 513 /* 514 * Traced processes are killed 515 * since their existence means someone is screwing up. 516 */ 517 if (q->p_flags & P_TRACED) { 518 q->p_flags &= ~P_TRACED; 519 ksignal(q, SIGKILL); 520 } 521 lwkt_reltoken(&q->p_token); 522 PRELE(q); 523 } 524 lwkt_reltoken(&initproc->p_token); 525 wakeup(initproc); 526 } 527 528 /* 529 * Save exit status and final rusage info, adding in child rusage 530 * info and self times. 531 */ 532 calcru_proc(p, &p->p_ru); 533 ruadd(&p->p_ru, &p->p_cru); 534 535 /* 536 * notify interested parties of our demise. 537 */ 538 KNOTE(&p->p_klist, NOTE_EXIT); 539 540 /* 541 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 542 * flag set, or if the handler is set to SIG_IGN, notify process 1 543 * instead (and hope it will handle this situation). 544 */ 545 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 546 struct proc *pp = p->p_pptr; 547 548 PHOLD(pp); 549 proc_reparent(p, initproc); 550 551 /* 552 * If this was the last child of our parent, notify 553 * parent, so in case he was wait(2)ing, he will 554 * continue. This function interlocks with pptr->p_token. 555 */ 556 if (LIST_EMPTY(&pp->p_children)) 557 wakeup((caddr_t)pp); 558 PRELE(pp); 559 } 560 561 /* lwkt_gettoken(&proc_token); */ 562 q = p->p_pptr; 563 PHOLD(q); 564 if (p->p_sigparent && q != initproc) { 565 ksignal(q, p->p_sigparent); 566 } else { 567 ksignal(q, SIGCHLD); 568 } 569 570 p->p_flags &= ~P_TRACED; 571 wakeup(p->p_pptr); 572 573 PRELE(q); 574 /* lwkt_reltoken(&proc_token); */ 575 /* NOTE: p->p_pptr can get ripped out */ 576 /* 577 * cpu_exit is responsible for clearing curproc, since 578 * it is heavily integrated with the thread/switching sequence. 579 * 580 * Other substructures are freed from wait(). 581 */ 582 plimit_free(p); 583 584 /* 585 * Release the current user process designation on the process so 586 * the userland scheduler can work in someone else. 587 */ 588 p->p_usched->release_curproc(lp); 589 590 /* 591 * Finally, call machine-dependent code to release as many of the 592 * lwp's resources as we can and halt execution of this thread. 593 */ 594 lwp_exit(1); 595 } 596 597 /* 598 * Eventually called by every exiting LWP 599 * 600 * p->p_token must be held. mplock may be held and will be released. 601 */ 602 void 603 lwp_exit(int masterexit) 604 { 605 struct thread *td = curthread; 606 struct lwp *lp = td->td_lwp; 607 struct proc *p = lp->lwp_proc; 608 int dowake = 0; 609 610 /* 611 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 612 * make sure it is set here. 613 */ 614 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 615 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 616 617 /* 618 * Clean up any virtualization 619 */ 620 if (lp->lwp_vkernel) 621 vkernel_lwp_exit(lp); 622 623 /* 624 * Clean up select/poll support 625 */ 626 kqueue_terminate(&lp->lwp_kqueue); 627 628 /* 629 * Clean up any syscall-cached ucred 630 */ 631 if (td->td_ucred) { 632 crfree(td->td_ucred); 633 td->td_ucred = NULL; 634 } 635 636 /* 637 * Nobody actually wakes us when the lock 638 * count reaches zero, so just wait one tick. 639 */ 640 while (lp->lwp_lock > 0) 641 tsleep(lp, 0, "lwpexit", 1); 642 643 /* Hand down resource usage to our proc */ 644 ruadd(&p->p_ru, &lp->lwp_ru); 645 646 /* 647 * If we don't hold the process until the LWP is reaped wait*() 648 * may try to dispose of its vmspace before all the LWPs have 649 * actually terminated. 650 */ 651 PHOLD(p); 652 653 /* 654 * Do any remaining work that might block on us. We should be 655 * coded such that further blocking is ok after decrementing 656 * p_nthreads but don't take the chance. 657 */ 658 dsched_exit_thread(td); 659 biosched_done(curthread); 660 661 /* 662 * We have to use the reaper for all the LWPs except the one doing 663 * the master exit. The LWP doing the master exit can just be 664 * left on p_lwps and the process reaper will deal with it 665 * synchronously, which is much faster. 666 * 667 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 668 * 669 * The process is left held until the reaper calls lwp_dispose() on 670 * the lp (after calling lwp_wait()). 671 */ 672 if (masterexit == 0) { 673 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 674 --p->p_nthreads; 675 if (p->p_nthreads <= 1) 676 dowake = 1; 677 lwkt_gettoken(&deadlwp_token); 678 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry); 679 taskqueue_enqueue(taskqueue_thread[mycpuid], 680 deadlwp_task[mycpuid]); 681 lwkt_reltoken(&deadlwp_token); 682 } else { 683 --p->p_nthreads; 684 if (p->p_nthreads <= 1) 685 dowake = 1; 686 } 687 688 /* 689 * Release p_token. Issue the wakeup() on p_nthreads if necessary, 690 * as late as possible to give us a chance to actually deschedule and 691 * switch away before another cpu core hits reaplwp(). 692 */ 693 lwkt_reltoken(&p->p_token); 694 if (dowake) 695 wakeup(&p->p_nthreads); 696 697 /* 698 * Tell the userland scheduler that we are going away 699 */ 700 p->p_usched->heuristic_exiting(lp, p); 701 702 cpu_lwp_exit(); 703 } 704 705 /* 706 * Wait until a lwp is completely dead. The final interlock in this drama 707 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 708 * switchout. 709 * 710 * At the point TDF_EXITING is set a complete exit is accomplished when 711 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 712 * post-switch interlock flags that can be used to wait for the TDF_ 713 * flags to clear. 714 * 715 * Returns non-zero on success, and zero if the caller needs to retry 716 * the lwp_wait(). 717 */ 718 static int 719 lwp_wait(struct lwp *lp) 720 { 721 struct thread *td = lp->lwp_thread;; 722 u_int mpflags; 723 724 KKASSERT(lwkt_preempted_proc() != lp); 725 726 /* 727 * This bit of code uses the thread destruction interlock 728 * managed by lwkt_switch_return() to wait for the lwp's 729 * thread to completely disengage. 730 * 731 * It is possible for us to race another cpu core so we 732 * have to do this correctly. 733 */ 734 for (;;) { 735 mpflags = td->td_mpflags; 736 cpu_ccfence(); 737 if (mpflags & TDF_MP_EXITSIG) 738 break; 739 tsleep_interlock(td, 0); 740 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 741 mpflags | TDF_MP_EXITWAIT)) { 742 tsleep(td, PINTERLOCKED, "lwpxt", 0); 743 } 744 } 745 746 /* 747 * We've already waited for the core exit but there can still 748 * be other refs from e.g. process scans and such. 749 */ 750 if (lp->lwp_lock > 0) { 751 tsleep(lp, 0, "lwpwait1", 1); 752 return(0); 753 } 754 if (td->td_refs) { 755 tsleep(td, 0, "lwpwait2", 1); 756 return(0); 757 } 758 759 /* 760 * Now that we have the thread destruction interlock these flags 761 * really should already be cleaned up, keep a check for safety. 762 * 763 * We can't rip its stack out from under it until TDF_EXITING is 764 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 765 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 766 * will be cleared temporarily if a thread gets preempted. 767 */ 768 while ((td->td_flags & (TDF_RUNNING | 769 TDF_PREEMPT_LOCK | 770 TDF_EXITING)) != TDF_EXITING) { 771 tsleep(lp, 0, "lwpwait3", 1); 772 return (0); 773 } 774 775 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 776 ("lwp_wait: td %p (%s) still on run or sleep queue", 777 td, td->td_comm)); 778 return (1); 779 } 780 781 /* 782 * Release the resources associated with a lwp. 783 * The lwp must be completely dead. 784 */ 785 void 786 lwp_dispose(struct lwp *lp) 787 { 788 struct thread *td = lp->lwp_thread;; 789 790 KKASSERT(lwkt_preempted_proc() != lp); 791 KKASSERT(td->td_refs == 0); 792 KKASSERT((td->td_flags & (TDF_RUNNING | 793 TDF_PREEMPT_LOCK | 794 TDF_EXITING)) == TDF_EXITING); 795 796 PRELE(lp->lwp_proc); 797 lp->lwp_proc = NULL; 798 if (td != NULL) { 799 td->td_proc = NULL; 800 td->td_lwp = NULL; 801 lp->lwp_thread = NULL; 802 lwkt_free_thread(td); 803 } 804 kfree(lp, M_LWP); 805 } 806 807 /* 808 * MPSAFE 809 */ 810 int 811 sys_wait4(struct wait_args *uap) 812 { 813 struct rusage rusage; 814 int error, status; 815 816 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 817 uap->options, (uap->rusage ? &rusage : NULL), 818 &uap->sysmsg_result); 819 820 if (error == 0 && uap->status) 821 error = copyout(&status, uap->status, sizeof(*uap->status)); 822 if (error == 0 && uap->rusage) 823 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 824 return (error); 825 } 826 827 /* 828 * wait1() 829 * 830 * wait_args(int pid, int *status, int options, struct rusage *rusage) 831 * 832 * MPALMOSTSAFE 833 */ 834 int 835 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 836 { 837 struct thread *td = curthread; 838 struct lwp *lp; 839 struct proc *q = td->td_proc; 840 struct proc *p, *t; 841 struct pargs *pa; 842 struct sigacts *ps; 843 int nfound, error; 844 845 if (pid == 0) 846 pid = -q->p_pgid; 847 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 848 return (EINVAL); 849 850 lwkt_gettoken(&q->p_token); 851 loop: 852 /* 853 * All sorts of things can change due to blocking so we have to loop 854 * all the way back up here. 855 * 856 * The problem is that if a process group is stopped and the parent 857 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 858 * of the child and then stop itself when it tries to return from the 859 * system call. When the process group is resumed the parent will 860 * then get the STOP status even though the child has now resumed 861 * (a followup wait*() will get the CONT status). 862 * 863 * Previously the CONT would overwrite the STOP because the tstop 864 * was handled within tsleep(), and the parent would only see 865 * the CONT when both are stopped and continued together. This little 866 * two-line hack restores this effect. 867 */ 868 while (q->p_stat == SSTOP) 869 tstop(); 870 871 nfound = 0; 872 873 /* 874 * Loop on children. 875 * 876 * NOTE: We don't want to break q's p_token in the loop for the 877 * case where no children are found or we risk breaking the 878 * interlock between child and parent. 879 */ 880 LIST_FOREACH(p, &q->p_children, p_sibling) { 881 if (pid != WAIT_ANY && 882 p->p_pid != pid && p->p_pgid != -pid) { 883 continue; 884 } 885 886 /* 887 * This special case handles a kthread spawned by linux_clone 888 * (see linux_misc.c). The linux_wait4 and linux_waitpid 889 * functions need to be able to distinguish between waiting 890 * on a process and waiting on a thread. It is a thread if 891 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 892 * signifies we want to wait for threads and not processes. 893 */ 894 if ((p->p_sigparent != SIGCHLD) ^ 895 ((options & WLINUXCLONE) != 0)) { 896 continue; 897 } 898 899 nfound++; 900 if (p->p_stat == SZOMB) { 901 /* 902 * We may go into SZOMB with threads still present. 903 * We must wait for them to exit before we can reap 904 * the master thread, otherwise we may race reaping 905 * non-master threads. 906 * 907 * Only this routine can remove a process from 908 * the zombie list and destroy it, use PACQUIREZOMB() 909 * to serialize us and loop if it blocks (interlocked 910 * by the parent's q->p_token). 911 * 912 * WARNING! (p) can be invalid when PHOLDZOMB(p) 913 * returns non-zero. Be sure not to 914 * mess with it. 915 */ 916 if (PHOLDZOMB(p)) 917 goto loop; 918 lwkt_gettoken(&p->p_token); 919 if (p->p_pptr != q) { 920 lwkt_reltoken(&p->p_token); 921 PRELEZOMB(p); 922 goto loop; 923 } 924 while (p->p_nthreads > 0) { 925 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 926 } 927 928 /* 929 * Reap any LWPs left in p->p_lwps. This is usually 930 * just the last LWP. This must be done before 931 * we loop on p_lock since the lwps hold a ref on 932 * it as a vmspace interlock. 933 * 934 * Once that is accomplished p_nthreads had better 935 * be zero. 936 */ 937 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 938 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 939 reaplwp(lp); 940 } 941 KKASSERT(p->p_nthreads == 0); 942 943 /* 944 * Don't do anything really bad until all references 945 * to the process go away. This may include other 946 * LWPs which are still in the process of being 947 * reaped. We can't just pull the rug out from under 948 * them because they may still be using the VM space. 949 * 950 * Certain kernel facilities such as /proc will also 951 * put a hold on the process for short periods of 952 * time. 953 */ 954 PRELE(p); 955 PSTALL(p, "reap3", 0); 956 957 /* Take care of our return values. */ 958 *res = p->p_pid; 959 960 if (status) 961 *status = p->p_xstat; 962 if (rusage) 963 *rusage = p->p_ru; 964 /* 965 * If we got the child via a ptrace 'attach', 966 * we need to give it back to the old parent. 967 */ 968 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 969 PHOLD(p); 970 p->p_oppid = 0; 971 proc_reparent(p, t); 972 ksignal(t, SIGCHLD); 973 wakeup((caddr_t)t); 974 error = 0; 975 PRELE(t); 976 lwkt_reltoken(&p->p_token); 977 PRELEZOMB(p); 978 goto done; 979 } 980 981 /* 982 * Unlink the proc from its process group so that 983 * the following operations won't lead to an 984 * inconsistent state for processes running down 985 * the zombie list. 986 */ 987 proc_remove_zombie(p); 988 lwkt_reltoken(&p->p_token); 989 leavepgrp(p); 990 991 p->p_xstat = 0; 992 ruadd(&q->p_cru, &p->p_ru); 993 994 /* 995 * Decrement the count of procs running with this uid. 996 */ 997 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 998 999 /* 1000 * Free up credentials. 1001 */ 1002 crfree(p->p_ucred); 1003 p->p_ucred = NULL; 1004 1005 /* 1006 * Remove unused arguments 1007 */ 1008 pa = p->p_args; 1009 p->p_args = NULL; 1010 if (pa && refcount_release(&pa->ar_ref)) { 1011 kfree(pa, M_PARGS); 1012 pa = NULL; 1013 } 1014 1015 ps = p->p_sigacts; 1016 p->p_sigacts = NULL; 1017 if (ps && refcount_release(&ps->ps_refcnt)) { 1018 kfree(ps, M_SUBPROC); 1019 ps = NULL; 1020 } 1021 1022 /* 1023 * Our exitingcount was incremented when the process 1024 * became a zombie, now that the process has been 1025 * removed from (almost) all lists we should be able 1026 * to safely destroy its vmspace. Wait for any current 1027 * holders to go away (so the vmspace remains stable), 1028 * then scrap it. 1029 */ 1030 PSTALL(p, "reap4", 0); 1031 vmspace_exitfree(p); 1032 PSTALL(p, "reap5", 0); 1033 1034 /* 1035 * NOTE: We have to officially release ZOMB in order 1036 * to ensure that a racing thread in kern_wait() 1037 * which blocked on ZOMB is woken up. 1038 */ 1039 PHOLD(p); 1040 PRELEZOMB(p); 1041 kfree(p, M_PROC); 1042 atomic_add_int(&nprocs, -1); 1043 error = 0; 1044 goto done; 1045 } 1046 if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 && 1047 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { 1048 PHOLD(p); 1049 lwkt_gettoken(&p->p_token); 1050 if (p->p_pptr != q) { 1051 lwkt_reltoken(&p->p_token); 1052 PRELE(p); 1053 goto loop; 1054 } 1055 if (p->p_stat != SSTOP || 1056 (p->p_flags & P_WAITED) != 0 || 1057 ((p->p_flags & P_TRACED) == 0 && 1058 (options & WUNTRACED) == 0)) { 1059 lwkt_reltoken(&p->p_token); 1060 PRELE(p); 1061 goto loop; 1062 } 1063 1064 p->p_flags |= P_WAITED; 1065 1066 *res = p->p_pid; 1067 if (status) 1068 *status = W_STOPCODE(p->p_xstat); 1069 /* Zero rusage so we get something consistent. */ 1070 if (rusage) 1071 bzero(rusage, sizeof(*rusage)); 1072 error = 0; 1073 lwkt_reltoken(&p->p_token); 1074 PRELE(p); 1075 goto done; 1076 } 1077 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1078 PHOLD(p); 1079 lwkt_gettoken(&p->p_token); 1080 if (p->p_pptr != q) { 1081 lwkt_reltoken(&p->p_token); 1082 PRELE(p); 1083 goto loop; 1084 } 1085 if ((p->p_flags & P_CONTINUED) == 0) { 1086 lwkt_reltoken(&p->p_token); 1087 PRELE(p); 1088 goto loop; 1089 } 1090 1091 *res = p->p_pid; 1092 p->p_flags &= ~P_CONTINUED; 1093 1094 if (status) 1095 *status = SIGCONT; 1096 error = 0; 1097 lwkt_reltoken(&p->p_token); 1098 PRELE(p); 1099 goto done; 1100 } 1101 } 1102 if (nfound == 0) { 1103 error = ECHILD; 1104 goto done; 1105 } 1106 if (options & WNOHANG) { 1107 *res = 0; 1108 error = 0; 1109 goto done; 1110 } 1111 1112 /* 1113 * Wait for signal - interlocked using q->p_token. 1114 */ 1115 error = tsleep(q, PCATCH, "wait", 0); 1116 if (error) { 1117 done: 1118 lwkt_reltoken(&q->p_token); 1119 return (error); 1120 } 1121 goto loop; 1122 } 1123 1124 /* 1125 * Make process 'parent' the new parent of process 'child'. 1126 * 1127 * p_children/p_sibling requires the parent's token, and 1128 * changing pptr requires the child's token, so we have to 1129 * get three tokens to do this operation. 1130 */ 1131 void 1132 proc_reparent(struct proc *child, struct proc *parent) 1133 { 1134 struct proc *opp = child->p_pptr; 1135 1136 if (opp == parent) 1137 return; 1138 PHOLD(opp); 1139 PHOLD(parent); 1140 lwkt_gettoken(&opp->p_token); 1141 lwkt_gettoken(&child->p_token); 1142 lwkt_gettoken(&parent->p_token); 1143 KKASSERT(child->p_pptr == opp); 1144 LIST_REMOVE(child, p_sibling); 1145 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1146 child->p_pptr = parent; 1147 lwkt_reltoken(&parent->p_token); 1148 lwkt_reltoken(&child->p_token); 1149 lwkt_reltoken(&opp->p_token); 1150 PRELE(parent); 1151 PRELE(opp); 1152 } 1153 1154 /* 1155 * The next two functions are to handle adding/deleting items on the 1156 * exit callout list 1157 * 1158 * at_exit(): 1159 * Take the arguments given and put them onto the exit callout list, 1160 * However first make sure that it's not already there. 1161 * returns 0 on success. 1162 */ 1163 1164 int 1165 at_exit(exitlist_fn function) 1166 { 1167 struct exitlist *ep; 1168 1169 #ifdef INVARIANTS 1170 /* Be noisy if the programmer has lost track of things */ 1171 if (rm_at_exit(function)) 1172 kprintf("WARNING: exit callout entry (%p) already present\n", 1173 function); 1174 #endif 1175 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1176 if (ep == NULL) 1177 return (ENOMEM); 1178 ep->function = function; 1179 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1180 return (0); 1181 } 1182 1183 /* 1184 * Scan the exit callout list for the given item and remove it. 1185 * Returns the number of items removed (0 or 1) 1186 */ 1187 int 1188 rm_at_exit(exitlist_fn function) 1189 { 1190 struct exitlist *ep; 1191 1192 TAILQ_FOREACH(ep, &exit_list, next) { 1193 if (ep->function == function) { 1194 TAILQ_REMOVE(&exit_list, ep, next); 1195 kfree(ep, M_ATEXIT); 1196 return(1); 1197 } 1198 } 1199 return (0); 1200 } 1201 1202 /* 1203 * LWP reaper related code. 1204 */ 1205 static void 1206 reaplwps(void *context, int dummy) 1207 { 1208 struct lwplist *lwplist = context; 1209 struct lwp *lp; 1210 1211 lwkt_gettoken(&deadlwp_token); 1212 while ((lp = LIST_FIRST(lwplist))) { 1213 LIST_REMOVE(lp, u.lwp_reap_entry); 1214 reaplwp(lp); 1215 } 1216 lwkt_reltoken(&deadlwp_token); 1217 } 1218 1219 static void 1220 reaplwp(struct lwp *lp) 1221 { 1222 while (lwp_wait(lp) == 0) 1223 ; 1224 lwp_dispose(lp); 1225 } 1226 1227 static void 1228 deadlwp_init(void) 1229 { 1230 int cpu; 1231 1232 for (cpu = 0; cpu < ncpus; cpu++) { 1233 LIST_INIT(&deadlwp_list[cpu]); 1234 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1235 M_DEVBUF, M_WAITOK); 1236 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1237 } 1238 } 1239 1240 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1241