1 /* $OpenBSD: kern_fork.c,v 1.142 2012/08/02 03:18:48 guenther Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <dev/rndvar.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 60 #include <sys/syscallargs.h> 61 62 #include "systrace.h" 63 #include <dev/systrace.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <uvm/uvm_map.h> 67 68 #ifdef __HAVE_MD_TCB 69 # include <machine/tcb.h> 70 #endif 71 72 int nprocesses = 1; /* process 0 */ 73 int nthreads = 1; /* proc 0 */ 74 int randompid; /* when set to 1, pid's go random */ 75 pid_t lastpid; 76 struct forkstat forkstat; 77 78 void fork_return(void *); 79 void tfork_child_return(void *); 80 int pidtaken(pid_t); 81 82 void process_new(struct proc *, struct process *); 83 84 void 85 fork_return(void *arg) 86 { 87 struct proc *p = (struct proc *)arg; 88 89 if (p->p_p->ps_flags & PS_TRACED) 90 psignal(p, SIGTRAP); 91 92 child_return(p); 93 } 94 95 /*ARGSUSED*/ 96 int 97 sys_fork(struct proc *p, void *v, register_t *retval) 98 { 99 int flags; 100 101 flags = FORK_FORK; 102 if (p->p_p->ps_ptmask & PTRACE_FORK) 103 flags |= FORK_PTRACE; 104 return (fork1(p, SIGCHLD, flags, NULL, 0, 105 fork_return, NULL, retval, NULL)); 106 } 107 108 /*ARGSUSED*/ 109 int 110 sys_vfork(struct proc *p, void *v, register_t *retval) 111 { 112 return (fork1(p, SIGCHLD, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL, 113 NULL, retval, NULL)); 114 } 115 116 int 117 sys___tfork(struct proc *p, void *v, register_t *retval) 118 { 119 struct sys___tfork_args /* { 120 syscallarg(const struct __tfork) *param; 121 syscallarg(size_t) psize; 122 } */ *uap = v; 123 size_t psize = SCARG(uap, psize); 124 struct __tfork param = { 0 }; 125 int flags; 126 int error; 127 128 if (psize == 0 || psize > sizeof(param)) 129 return (EINVAL); 130 if ((error = copyin(SCARG(uap, param), ¶m, psize))) 131 return (error); 132 #ifdef KTRACE 133 if (KTRPOINT(p, KTR_STRUCT)) 134 ktrstruct(p, "tfork", ¶m, sizeof(param)); 135 #endif 136 137 flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM 138 | FORK_NOZOMBIE | FORK_SHAREFILES; 139 140 return (fork1(p, 0, flags, param.tf_stack, param.tf_tid, 141 tfork_child_return, param.tf_tcb, retval, NULL)); 142 } 143 144 #ifdef COMPAT_O51 145 int 146 compat_o51_sys___tfork(struct proc *p, void *v, register_t *retval) 147 { 148 struct compat_o51_sys___tfork_args /* { 149 syscallarg(struct __tfork51) *param; 150 } */ *uap = v; 151 struct __tfork51 param; 152 int flags; 153 int error; 154 155 if ((error = copyin(SCARG(uap, param), ¶m, sizeof(param)))) 156 return (error); 157 158 if (param.tf_flags != 0) 159 return (EINVAL); 160 161 flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM 162 | FORK_NOZOMBIE | FORK_SHAREFILES; 163 164 return (fork1(p, 0, flags, NULL, param.tf_tid, tfork_child_return, 165 param.tf_tcb, retval, NULL)); 166 } 167 #endif 168 169 void 170 tfork_child_return(void *arg) 171 { 172 struct proc *p = curproc; 173 174 TCB_SET(p, arg); 175 child_return(p); 176 } 177 178 /* 179 * Allocate and initialize a new process. 180 */ 181 void 182 process_new(struct proc *p, struct process *parent) 183 { 184 struct process *pr; 185 186 pr = pool_get(&process_pool, PR_WAITOK); 187 pr->ps_mainproc = p; 188 189 TAILQ_INIT(&pr->ps_threads); 190 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 191 pr->ps_pptr = parent; 192 LIST_INIT(&pr->ps_children); 193 pr->ps_refcnt = 1; 194 195 /* 196 * Make a process structure for the new process. 197 * Start by zeroing the section of proc that is zero-initialized, 198 * then copy the section that is copied directly from the parent. 199 */ 200 bzero(&pr->ps_startzero, 201 (unsigned) ((caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero)); 202 bcopy(&parent->ps_startcopy, &pr->ps_startcopy, 203 (unsigned) ((caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy)); 204 205 /* post-copy fixups */ 206 pr->ps_cred = pool_get(&pcred_pool, PR_WAITOK); 207 bcopy(parent->ps_cred, pr->ps_cred, sizeof(*pr->ps_cred)); 208 crhold(parent->ps_cred->pc_ucred); 209 pr->ps_limit->p_refcnt++; 210 211 timeout_set(&pr->ps_realit_to, realitexpire, pr); 212 timeout_set(&pr->ps_virt_to, virttimer_trampoline, pr); 213 timeout_set(&pr->ps_prof_to, proftimer_trampoline, pr); 214 215 pr->ps_flags = parent->ps_flags & (PS_SUGID | PS_SUGIDEXEC); 216 if (parent->ps_session->s_ttyvp != NULL && 217 parent->ps_flags & PS_CONTROLT) 218 atomic_setbits_int(&pr->ps_flags, PS_CONTROLT); 219 220 p->p_p = pr; 221 } 222 223 /* print the 'table full' message once per 10 seconds */ 224 struct timeval fork_tfmrate = { 10, 0 }; 225 226 int 227 fork1(struct proc *curp, int exitsig, int flags, void *stack, pid_t *tidptr, 228 void (*func)(void *), void *arg, register_t *retval, 229 struct proc **rnewprocp) 230 { 231 struct process *curpr = curp->p_p; 232 struct process *pr; 233 struct proc *p; 234 uid_t uid; 235 struct vmspace *vm; 236 int count; 237 vaddr_t uaddr; 238 int s; 239 struct ptrace_state *newptstat = NULL; 240 #if NSYSTRACE > 0 241 void *newstrp = NULL; 242 #endif 243 244 /* sanity check some flag combinations */ 245 if (flags & FORK_THREAD) { 246 if (!rthreads_enabled) 247 return (ENOTSUP); 248 if ((flags & (FORK_SIGHAND | FORK_NOZOMBIE)) != 249 (FORK_SIGHAND | FORK_NOZOMBIE)) 250 return (EINVAL); 251 } 252 if (flags & FORK_SIGHAND && (flags & FORK_SHAREVM) == 0) 253 return (EINVAL); 254 255 /* 256 * Although process entries are dynamically created, we still keep 257 * a global limit on the maximum number we will create. We reserve 258 * the last 5 processes to root. The variable nprocesses is the 259 * current number of processes, maxprocess is the limit. Similar 260 * rules for threads (struct proc): we reserve the last 5 to root; 261 * the variable nthreads is the current number of procs, maxthread is 262 * the limit. 263 */ 264 uid = curp->p_cred->p_ruid; 265 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) { 266 static struct timeval lasttfm; 267 268 if (ratecheck(&lasttfm, &fork_tfmrate)) 269 tablefull("proc"); 270 return (EAGAIN); 271 } 272 nthreads++; 273 274 if ((flags & FORK_THREAD) == 0) { 275 if ((nprocesses >= maxprocess - 5 && uid != 0) || 276 nprocesses >= maxprocess) { 277 static struct timeval lasttfm; 278 279 if (ratecheck(&lasttfm, &fork_tfmrate)) 280 tablefull("process"); 281 nthreads--; 282 return (EAGAIN); 283 } 284 nprocesses++; 285 286 /* 287 * Increment the count of processes running with 288 * this uid. Don't allow a nonprivileged user to 289 * exceed their current limit. 290 */ 291 count = chgproccnt(uid, 1); 292 if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) { 293 (void)chgproccnt(uid, -1); 294 nprocesses--; 295 nthreads--; 296 return (EAGAIN); 297 } 298 } 299 300 uaddr = uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, USPACE, 301 USPACE_ALIGN, UVM_KMF_ZERO, 302 no_constraint.ucr_low, no_constraint.ucr_high, 303 0, 0, USPACE/PAGE_SIZE); 304 if (uaddr == 0) { 305 chgproccnt(uid, -1); 306 nprocesses--; 307 nthreads--; 308 return (ENOMEM); 309 } 310 311 /* 312 * From now on, we're committed to the fork and cannot fail. 313 */ 314 315 /* Allocate new proc. */ 316 p = pool_get(&proc_pool, PR_WAITOK); 317 318 p->p_stat = SIDL; /* protect against others */ 319 p->p_exitsig = exitsig; 320 p->p_flag = 0; 321 p->p_xstat = 0; 322 323 if (flags & FORK_THREAD) { 324 atomic_setbits_int(&p->p_flag, P_THREAD); 325 p->p_p = pr = curpr; 326 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 327 pr->ps_refcnt++; 328 } else { 329 process_new(p, curpr); 330 pr = p->p_p; 331 } 332 333 /* 334 * Make a proc table entry for the new process. 335 * Start by zeroing the section of proc that is zero-initialized, 336 * then copy the section that is copied directly from the parent. 337 */ 338 bzero(&p->p_startzero, 339 (unsigned) ((caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero)); 340 bcopy(&curp->p_startcopy, &p->p_startcopy, 341 (unsigned) ((caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy)); 342 343 /* 344 * Initialize the timeouts. 345 */ 346 timeout_set(&p->p_sleep_to, endtsleep, p); 347 348 /* 349 * Duplicate sub-structures as needed. 350 * Increase reference counts on shared objects. 351 */ 352 if ((flags & FORK_THREAD) == 0) { 353 if (curpr->ps_flags & PS_PROFIL) 354 startprofclock(pr); 355 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 356 atomic_setbits_int(&pr->ps_flags, PS_TRACED); 357 } 358 359 /* bump references to the text vnode (for procfs) */ 360 p->p_textvp = curp->p_textvp; 361 if (p->p_textvp) 362 vref(p->p_textvp); 363 364 if (flags & FORK_SHAREFILES) 365 p->p_fd = fdshare(curp); 366 else 367 p->p_fd = fdcopy(curp); 368 369 if (flags & FORK_PPWAIT) { 370 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT); 371 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT); 372 } 373 if (flags & FORK_NOZOMBIE) 374 atomic_setbits_int(&p->p_flag, P_NOZOMBIE); 375 376 #ifdef KTRACE 377 /* 378 * Copy traceflag and tracefile if enabled. 379 * If not inherited, these were zeroed above. 380 */ 381 if ((flags & FORK_THREAD) == 0 && curpr->ps_traceflag & KTRFAC_INHERIT) 382 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp, 383 curpr->ps_tracecred); 384 #endif 385 386 /* 387 * set priority of child to be that of parent 388 * XXX should move p_estcpu into the region of struct proc which gets 389 * copied. 390 */ 391 scheduler_fork_hook(curp, p); 392 393 /* 394 * Create signal actions for the child process. 395 */ 396 if (flags & FORK_SIGHAND) 397 p->p_sigacts = sigactsshare(curp); 398 else 399 p->p_sigacts = sigactsinit(curp); 400 if (flags & FORK_THREAD) 401 sigstkinit(&p->p_sigstk); 402 403 /* 404 * If emulation has process fork hook, call it now. 405 */ 406 if (p->p_emul->e_proc_fork) 407 (*p->p_emul->e_proc_fork)(p, curp); 408 409 p->p_addr = (struct user *)uaddr; 410 411 /* 412 * Finish creating the child process. It will return through a 413 * different path later. 414 */ 415 uvm_fork(curp, p, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, 416 0, func ? func : child_return, arg ? arg : p); 417 418 vm = p->p_vmspace; 419 420 if (flags & FORK_FORK) { 421 forkstat.cntfork++; 422 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 423 } else if (flags & FORK_VFORK) { 424 forkstat.cntvfork++; 425 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 426 } else if (flags & FORK_TFORK) { 427 forkstat.cnttfork++; 428 } else { 429 forkstat.cntkthread++; 430 forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; 431 } 432 433 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK) 434 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK); 435 #if NSYSTRACE > 0 436 if (ISSET(curp->p_flag, P_SYSTRACE)) 437 newstrp = systrace_getproc(); 438 #endif 439 440 /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ 441 do { 442 lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; 443 } while (pidtaken(lastpid)); 444 p->p_pid = lastpid; 445 446 LIST_INSERT_HEAD(&allproc, p, p_list); 447 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 448 if ((flags & FORK_THREAD) == 0) { 449 LIST_INSERT_AFTER(curpr, pr, ps_pglist); 450 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling); 451 452 if (pr->ps_flags & PS_TRACED) { 453 pr->ps_oppid = curpr->ps_pid; 454 if (pr->ps_pptr != curpr->ps_pptr) 455 proc_reparent(pr, curpr->ps_pptr); 456 457 /* 458 * Set ptrace status. 459 */ 460 if (flags & FORK_FORK) { 461 pr->ps_ptstat = newptstat; 462 newptstat = NULL; 463 curpr->ps_ptstat->pe_report_event = PTRACE_FORK; 464 pr->ps_ptstat->pe_report_event = PTRACE_FORK; 465 curpr->ps_ptstat->pe_other_pid = pr->ps_pid; 466 pr->ps_ptstat->pe_other_pid = curpr->ps_pid; 467 } 468 } 469 } 470 471 #if NSYSTRACE > 0 472 if (newstrp) 473 systrace_fork(curp, p, newstrp); 474 #endif 475 476 if (tidptr != NULL) { 477 pid_t pid = p->p_pid + THREAD_PID_OFFSET; 478 479 if (copyout(&pid, tidptr, sizeof(pid))) 480 psignal(curp, SIGSEGV); 481 } 482 483 /* 484 * For new processes, set accounting bits 485 */ 486 if ((flags & FORK_THREAD) == 0) { 487 getmicrotime(&pr->ps_start); 488 pr->ps_acflag = AFORK; 489 } 490 491 /* 492 * Make child runnable and add to run queue. 493 */ 494 SCHED_LOCK(s); 495 p->p_stat = SRUN; 496 p->p_cpu = sched_choosecpu_fork(curp, flags); 497 setrunqueue(p); 498 SCHED_UNLOCK(s); 499 500 if (newptstat) 501 free(newptstat, M_SUBPROC); 502 503 /* 504 * Notify any interested parties about the new process. 505 */ 506 if ((flags & FORK_THREAD) == 0) 507 KNOTE(&curpr->ps_klist, NOTE_FORK | p->p_pid); 508 509 /* 510 * Update stats now that we know the fork was successful. 511 */ 512 uvmexp.forks++; 513 if (flags & FORK_PPWAIT) 514 uvmexp.forks_ppwait++; 515 if (flags & FORK_SHAREVM) 516 uvmexp.forks_sharevm++; 517 518 /* 519 * Pass a pointer to the new process to the caller. 520 */ 521 if (rnewprocp != NULL) 522 *rnewprocp = p; 523 524 /* 525 * Preserve synchronization semantics of vfork. If waiting for 526 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT 527 * on ourselves, and sleep on our process for the latter flag 528 * to go away. 529 * XXX Need to stop other rthreads in the parent 530 */ 531 if (flags & FORK_PPWAIT) 532 while (curpr->ps_flags & PS_ISPWAIT) 533 tsleep(curpr, PWAIT, "ppwait", 0); 534 535 /* 536 * If we're tracing the child, alert the parent too. 537 */ 538 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 539 psignal(curp, SIGTRAP); 540 541 /* 542 * Return child pid to parent process, 543 * marking us as parent via retval[1]. 544 */ 545 if (retval != NULL) { 546 retval[0] = p->p_pid + 547 (flags & FORK_THREAD ? THREAD_PID_OFFSET : 0); 548 retval[1] = 0; 549 } 550 return (0); 551 } 552 553 /* 554 * Checks for current use of a pid, either as a pid or pgid. 555 */ 556 int 557 pidtaken(pid_t pid) 558 { 559 struct proc *p; 560 561 if (pfind(pid) != NULL) 562 return (1); 563 if (pgfind(pid) != NULL) 564 return (1); 565 LIST_FOREACH(p, &zombproc, p_list) { 566 if (p->p_pid == pid || (p->p_p->ps_pgrp && p->p_p->ps_pgrp->pg_id == pid)) 567 return (1); 568 } 569 return (0); 570 } 571 572 #if defined(MULTIPROCESSOR) 573 /* 574 * XXX This is a slight hack to get newly-formed processes to 575 * XXX acquire the kernel lock as soon as they run. 576 */ 577 void 578 proc_trampoline_mp(void) 579 { 580 struct proc *p; 581 582 p = curproc; 583 584 SCHED_ASSERT_LOCKED(); 585 __mp_unlock(&sched_lock); 586 spl0(); 587 SCHED_ASSERT_UNLOCKED(); 588 KASSERT(__mp_lock_held(&kernel_lock) == 0); 589 590 KERNEL_LOCK(); 591 } 592 #endif 593