1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.30 2004/01/18 12:29:49 dillon Exp $ 41 */ 42 43 #include "opt_compat.h" 44 #include "opt_ktrace.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/proc.h> 52 #include <sys/pioctl.h> 53 #include <sys/tty.h> 54 #include <sys/wait.h> 55 #include <sys/vnode.h> 56 #include <sys/resourcevar.h> 57 #include <sys/signalvar.h> 58 #include <sys/ptrace.h> 59 #include <sys/acct.h> /* for acct_process() function prototype */ 60 #include <sys/filedesc.h> 61 #include <sys/shm.h> 62 #include <sys/sem.h> 63 #include <sys/aio.h> 64 #include <sys/jail.h> 65 #include <sys/kern_syscall.h> 66 #include <sys/upcall.h> 67 #include <sys/caps.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <sys/lock.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_map.h> 74 #include <vm/vm_zone.h> 75 #include <vm/vm_extern.h> 76 #include <sys/user.h> 77 78 /* Required to be non-static for SysVR4 emulator */ 79 MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 80 81 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 82 83 /* 84 * callout list for things to do at exit time 85 */ 86 struct exitlist { 87 exitlist_fn function; 88 TAILQ_ENTRY(exitlist) next; 89 }; 90 91 TAILQ_HEAD(exit_list_head, exitlist); 92 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 93 94 /* 95 * exit -- 96 * Death of process. 97 * 98 * SYS_EXIT_ARGS(int rval) 99 */ 100 void 101 sys_exit(struct sys_exit_args *uap) 102 { 103 exit1(W_EXITCODE(uap->rval, 0)); 104 /* NOTREACHED */ 105 } 106 107 /* 108 * Exit: deallocate address space and other resources, change proc state 109 * to zombie, and unlink proc from allproc and parent's lists. Save exit 110 * status and rusage for wait(). Check for child processes and orphan them. 111 */ 112 void 113 exit1(int rv) 114 { 115 struct proc *p = curproc; 116 struct proc *q, *nq; 117 struct vmspace *vm; 118 struct vnode *vtmp; 119 struct exitlist *ep; 120 121 if (p->p_pid == 1) { 122 printf("init died (signal %d, exit %d)\n", 123 WTERMSIG(rv), WEXITSTATUS(rv)); 124 panic("Going nowhere without my init!"); 125 } 126 127 caps_exit(p->p_thread); 128 129 aio_proc_rundown(p); 130 131 /* are we a task leader? */ 132 if(p == p->p_leader) { 133 struct kill_args killArgs; 134 killArgs.signum = SIGKILL; 135 q = p->p_peers; 136 while(q) { 137 killArgs.pid = q->p_pid; 138 /* 139 * The interface for kill is better 140 * than the internal signal 141 */ 142 kill(&killArgs); 143 nq = q; 144 q = q->p_peers; 145 } 146 while (p->p_peers) 147 tsleep((caddr_t)p, 0, "exit1", 0); 148 } 149 150 #ifdef PGINPROF 151 vmsizmon(); 152 #endif 153 STOPEVENT(p, S_EXIT, rv); 154 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 155 156 /* 157 * Check if any loadable modules need anything done at process exit. 158 * e.g. SYSV IPC stuff 159 * XXX what if one of these generates an error? 160 */ 161 TAILQ_FOREACH(ep, &exit_list, next) 162 (*ep->function)(p->p_thread); 163 164 if (p->p_flag & P_PROFIL) 165 stopprofclock(p); 166 MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), 167 M_ZOMBIE, M_WAITOK); 168 /* 169 * If parent is waiting for us to exit or exec, 170 * P_PPWAIT is set; we will wakeup the parent below. 171 */ 172 p->p_flag &= ~(P_TRACED | P_PPWAIT); 173 p->p_flag |= P_WEXIT; 174 SIGEMPTYSET(p->p_siglist); 175 if (timevalisset(&p->p_realtimer.it_value)) 176 untimeout(realitexpire, (caddr_t)p, p->p_ithandle); 177 178 /* 179 * Reset any sigio structures pointing to us as a result of 180 * F_SETOWN with our pid. 181 */ 182 funsetownlst(&p->p_sigiolst); 183 184 /* 185 * Close open files and release open-file table. 186 * This may block! 187 */ 188 fdfree(p); 189 190 if(p->p_leader->p_peers) { 191 q = p->p_leader; 192 while(q->p_peers != p) 193 q = q->p_peers; 194 q->p_peers = p->p_peers; 195 wakeup((caddr_t)p->p_leader); 196 } 197 198 /* 199 * XXX Shutdown SYSV semaphores 200 */ 201 semexit(p); 202 203 /* The next two chunks should probably be moved to vmspace_exit. */ 204 vm = p->p_vmspace; 205 206 /* 207 * Release upcalls associated with this process 208 */ 209 if (vm->vm_upcalls) 210 upc_release(vm, p); 211 212 /* 213 * Release user portion of address space. 214 * This releases references to vnodes, 215 * which could cause I/O if the file has been unlinked. 216 * Need to do this early enough that we can still sleep. 217 * Can't free the entire vmspace as the kernel stack 218 * may be mapped within that space also. 219 * 220 * Processes sharing the same vmspace may exit in one order, and 221 * get cleaned up by vmspace_exit() in a different order. The 222 * last exiting process to reach this point releases as much of 223 * the environment as it can, and the last process cleaned up 224 * by vmspace_exit() (which decrements exitingcnt) cleans up the 225 * remainder. 226 */ 227 ++vm->vm_exitingcnt; 228 if (--vm->vm_refcnt == 0) { 229 shmexit(vm); 230 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS, 231 VM_MAXUSER_ADDRESS); 232 (void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, 233 VM_MAXUSER_ADDRESS); 234 } 235 236 if (SESS_LEADER(p)) { 237 struct session *sp = p->p_session; 238 239 if (sp->s_ttyvp) { 240 /* 241 * Controlling process. 242 * Signal foreground pgrp, 243 * drain controlling terminal 244 * and revoke access to controlling terminal. 245 */ 246 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 247 if (sp->s_ttyp->t_pgrp) 248 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 249 (void) ttywait(sp->s_ttyp); 250 /* 251 * The tty could have been revoked 252 * if we blocked. 253 */ 254 if (sp->s_ttyvp) 255 VOP_REVOKE(sp->s_ttyvp, REVOKEALL); 256 } 257 if (sp->s_ttyvp) 258 vrele(sp->s_ttyvp); 259 sp->s_ttyvp = NULL; 260 /* 261 * s_ttyp is not zero'd; we use this to indicate 262 * that the session once had a controlling terminal. 263 * (for logging and informational purposes) 264 */ 265 } 266 sp->s_leader = NULL; 267 } 268 fixjobc(p, p->p_pgrp, 0); 269 (void)acct_process(p); 270 #ifdef KTRACE 271 /* 272 * release trace file 273 */ 274 p->p_traceflag = 0; /* don't trace the vrele() */ 275 if ((vtmp = p->p_tracep) != NULL) { 276 p->p_tracep = NULL; 277 vrele(vtmp); 278 } 279 #endif 280 /* 281 * Release reference to text vnode 282 */ 283 if ((vtmp = p->p_textvp) != NULL) { 284 p->p_textvp = NULL; 285 vrele(vtmp); 286 } 287 288 /* 289 * Once we set SZOMB the process can get reaped. The wait1 code 290 * will also wait for TDF_RUNNING to be cleared in the thread's flags, 291 * indicating that it has been completely switched out. 292 */ 293 294 /* 295 * Remove proc from allproc queue and pidhash chain. 296 * Place onto zombproc. Unlink from parent's child list. 297 */ 298 LIST_REMOVE(p, p_list); 299 LIST_INSERT_HEAD(&zombproc, p, p_list); 300 p->p_stat = SZOMB; 301 302 LIST_REMOVE(p, p_hash); 303 304 q = LIST_FIRST(&p->p_children); 305 if (q) /* only need this if any child is S_ZOMB */ 306 wakeup((caddr_t) initproc); 307 for (; q != 0; q = nq) { 308 nq = LIST_NEXT(q, p_sibling); 309 LIST_REMOVE(q, p_sibling); 310 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 311 q->p_pptr = initproc; 312 q->p_sigparent = SIGCHLD; 313 /* 314 * Traced processes are killed 315 * since their existence means someone is screwing up. 316 */ 317 if (q->p_flag & P_TRACED) { 318 q->p_flag &= ~P_TRACED; 319 psignal(q, SIGKILL); 320 } 321 } 322 323 /* 324 * Save exit status and final rusage info, adding in child rusage 325 * info and self times. 326 */ 327 p->p_xstat = rv; 328 *p->p_ru = p->p_stats->p_ru; 329 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 330 ruadd(p->p_ru, &p->p_stats->p_cru); 331 332 /* 333 * notify interested parties of our demise. 334 */ 335 KNOTE(&p->p_klist, NOTE_EXIT); 336 337 /* 338 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 339 * flag set, notify process 1 instead (and hope it will handle 340 * this situation). 341 */ 342 if (p->p_pptr->p_procsig->ps_flag & PS_NOCLDWAIT) { 343 struct proc *pp = p->p_pptr; 344 proc_reparent(p, initproc); 345 /* 346 * If this was the last child of our parent, notify 347 * parent, so in case he was wait(2)ing, he will 348 * continue. 349 */ 350 if (LIST_EMPTY(&pp->p_children)) 351 wakeup((caddr_t)pp); 352 } 353 354 if (p->p_sigparent && p->p_pptr != initproc) { 355 psignal(p->p_pptr, p->p_sigparent); 356 } else { 357 psignal(p->p_pptr, SIGCHLD); 358 } 359 360 wakeup((caddr_t)p->p_pptr); 361 #if defined(tahoe) 362 /* move this to cpu_exit */ 363 p->p_thread->td_pcb->pcb_saveacc.faddr = (float *)NULL; 364 #endif 365 /* 366 * cpu_exit is responsible for clearing curproc, since 367 * it is heavily integrated with the thread/switching sequence. 368 * 369 * Other substructures are freed from wait(). 370 */ 371 if (--p->p_limit->p_refcnt == 0) { 372 FREE(p->p_limit, M_SUBPROC); 373 p->p_limit = NULL; 374 } 375 376 /* 377 * Release the P_CURPROC designation on the process so the userland 378 * scheduler can work in someone else. 379 */ 380 release_curproc(p); 381 382 /* 383 * Finally, call machine-dependent code to release the remaining 384 * resources including address space, the kernel stack and pcb. 385 * The address space is released by "vmspace_free(p->p_vmspace)"; 386 * This is machine-dependent, as we may have to change stacks 387 * or ensure that the current one isn't reallocated before we 388 * finish. cpu_exit will end with a call to cpu_switch(), finishing 389 * our execution (pun intended). 390 */ 391 cpu_proc_exit(); 392 } 393 394 int 395 wait4(struct wait_args *uap) 396 { 397 struct rusage rusage; 398 int error, status; 399 400 error = kern_wait(uap->pid, uap->status ? &status : NULL, 401 uap->options, uap->rusage ? &rusage : NULL, &uap->sysmsg_fds[0]); 402 403 if (error == 0 && uap->status) 404 error = copyout(&status, uap->status, sizeof(*uap->status)); 405 if (error == 0 && uap->rusage) 406 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 407 return (error); 408 } 409 410 /* 411 * wait1() 412 * 413 * wait_args(int pid, int *status, int options, struct rusage *rusage) 414 */ 415 int 416 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 417 { 418 struct thread *td = curthread; 419 struct proc *q = td->td_proc; 420 struct proc *p, *t; 421 int nfound, error; 422 423 if (pid == 0) 424 pid = -q->p_pgid; 425 if (options &~ (WUNTRACED|WNOHANG|WLINUXCLONE)) 426 return (EINVAL); 427 loop: 428 nfound = 0; 429 LIST_FOREACH(p, &q->p_children, p_sibling) { 430 if (pid != WAIT_ANY && 431 p->p_pid != pid && p->p_pgid != -pid) 432 continue; 433 434 /* This special case handles a kthread spawned by linux_clone 435 * (see linux_misc.c). The linux_wait4 and linux_waitpid functions 436 * need to be able to distinguish between waiting on a process and 437 * waiting on a thread. It is a thread if p_sigparent is not SIGCHLD, 438 * and the WLINUXCLONE option signifies we want to wait for threads 439 * and not processes. 440 */ 441 if ((p->p_sigparent != SIGCHLD) ^ ((options & WLINUXCLONE) != 0)) 442 continue; 443 444 nfound++; 445 if (p->p_stat == SZOMB) { 446 /* 447 * The process's thread may still be in the middle 448 * of switching away, we can't rip its stack out from 449 * under it until TDF_RUNNING clears! 450 * 451 * YYY no wakeup occurs so we depend on the timeout. 452 */ 453 if ((p->p_thread->td_flags & TDF_RUNNING) != 0) { 454 tsleep(p->p_thread, 0, "reap", 1); 455 goto loop; 456 } 457 458 /* 459 * Other kernel threads may be in the middle of 460 * accessing the proc. For example, kern/kern_proc.c 461 * could be blocked writing proc data to a sysctl. 462 * At the moment, if this occurs, we are not woken 463 * up and rely on a one-second retry. 464 */ 465 if (p->p_lock) { 466 while (p->p_lock) 467 tsleep(p, 0, "reap2", hz); 468 } 469 lwkt_wait_free(p->p_thread); 470 471 /* charge childs scheduling cpu usage to parent */ 472 if (curproc->p_pid != 1) { 473 curproc->p_estcpu = 474 ESTCPULIM(curproc->p_estcpu + p->p_estcpu); 475 } 476 477 /* Take care of our return values. */ 478 *res = p->p_pid; 479 if (status) 480 *status = p->p_xstat; 481 if (rusage) 482 *rusage = *p->p_ru; 483 /* 484 * If we got the child via a ptrace 'attach', 485 * we need to give it back to the old parent. 486 */ 487 if (p->p_oppid && (t = pfind(p->p_oppid))) { 488 p->p_oppid = 0; 489 proc_reparent(p, t); 490 psignal(t, SIGCHLD); 491 wakeup((caddr_t)t); 492 return (0); 493 } 494 p->p_xstat = 0; 495 ruadd(&q->p_stats->p_cru, p->p_ru); 496 FREE(p->p_ru, M_ZOMBIE); 497 p->p_ru = NULL; 498 499 /* 500 * Decrement the count of procs running with this uid. 501 */ 502 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 503 504 /* 505 * Free up credentials. 506 */ 507 crfree(p->p_ucred); 508 p->p_ucred = NULL; 509 510 /* 511 * Remove unused arguments 512 */ 513 if (p->p_args && --p->p_args->ar_ref == 0) 514 FREE(p->p_args, M_PARGS); 515 516 /* 517 * Finally finished with old proc entry. 518 * Unlink it from its process group and free it. 519 */ 520 leavepgrp(p); 521 LIST_REMOVE(p, p_list); /* off zombproc */ 522 LIST_REMOVE(p, p_sibling); 523 524 if (--p->p_procsig->ps_refcnt == 0) { 525 if (p->p_sigacts != &p->p_addr->u_sigacts) 526 FREE(p->p_sigacts, M_SUBPROC); 527 FREE(p->p_procsig, M_SUBPROC); 528 p->p_procsig = NULL; 529 } 530 531 vm_waitproc(p); 532 zfree(proc_zone, p); 533 nprocs--; 534 return (0); 535 } 536 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 537 (p->p_flag & P_TRACED || options & WUNTRACED)) { 538 p->p_flag |= P_WAITED; 539 540 *res = p->p_pid; 541 if (status) 542 *status = W_STOPCODE(p->p_xstat); 543 /* Zero rusage so we get something consistent. */ 544 if (rusage) 545 bzero(rusage, sizeof(rusage)); 546 return (0); 547 } 548 } 549 if (nfound == 0) 550 return (ECHILD); 551 if (options & WNOHANG) { 552 *res = 0; 553 return (0); 554 } 555 error = tsleep((caddr_t)q, PCATCH, "wait", 0); 556 if (error) 557 return (error); 558 goto loop; 559 } 560 561 /* 562 * make process 'parent' the new parent of process 'child'. 563 */ 564 void 565 proc_reparent(child, parent) 566 struct proc *child; 567 struct proc *parent; 568 { 569 570 if (child->p_pptr == parent) 571 return; 572 573 LIST_REMOVE(child, p_sibling); 574 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 575 child->p_pptr = parent; 576 } 577 578 /* 579 * The next two functions are to handle adding/deleting items on the 580 * exit callout list 581 * 582 * at_exit(): 583 * Take the arguments given and put them onto the exit callout list, 584 * However first make sure that it's not already there. 585 * returns 0 on success. 586 */ 587 588 int 589 at_exit(function) 590 exitlist_fn function; 591 { 592 struct exitlist *ep; 593 594 #ifdef INVARIANTS 595 /* Be noisy if the programmer has lost track of things */ 596 if (rm_at_exit(function)) 597 printf("WARNING: exit callout entry (%p) already present\n", 598 function); 599 #endif 600 ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 601 if (ep == NULL) 602 return (ENOMEM); 603 ep->function = function; 604 TAILQ_INSERT_TAIL(&exit_list, ep, next); 605 return (0); 606 } 607 608 /* 609 * Scan the exit callout list for the given item and remove it. 610 * Returns the number of items removed (0 or 1) 611 */ 612 int 613 rm_at_exit(function) 614 exitlist_fn function; 615 { 616 struct exitlist *ep; 617 618 TAILQ_FOREACH(ep, &exit_list, next) { 619 if (ep->function == function) { 620 TAILQ_REMOVE(&exit_list, ep, next); 621 free(ep, M_ATEXIT); 622 return(1); 623 } 624 } 625 return (0); 626 } 627 628 void check_sigacts (void) 629 { 630 struct proc *p = curproc; 631 struct sigacts *pss; 632 int s; 633 634 if (p->p_procsig->ps_refcnt == 1 && 635 p->p_sigacts != &p->p_addr->u_sigacts) { 636 pss = p->p_sigacts; 637 s = splhigh(); 638 p->p_addr->u_sigacts = *pss; 639 p->p_sigacts = &p->p_addr->u_sigacts; 640 splx(s); 641 FREE(pss, M_SUBPROC); 642 } 643 } 644 645