1 /* $OpenBSD: kern_fork.c,v 1.149 2013/06/03 22:35:15 guenther Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <dev/rndvar.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 60 #include <sys/syscallargs.h> 61 62 #include "systrace.h" 63 #include <dev/systrace.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <uvm/uvm_map.h> 67 68 #ifdef __HAVE_MD_TCB 69 # include <machine/tcb.h> 70 #endif 71 72 int nprocesses = 1; /* process 0 */ 73 int nthreads = 1; /* proc 0 */ 74 int randompid; /* when set to 1, pid's go random */ 75 pid_t lastpid; 76 struct forkstat forkstat; 77 78 void fork_return(void *); 79 void tfork_child_return(void *); 80 int pidtaken(pid_t); 81 82 void process_new(struct proc *, struct process *); 83 84 void 85 fork_return(void *arg) 86 { 87 struct proc *p = (struct proc *)arg; 88 89 if (p->p_p->ps_flags & PS_TRACED) 90 psignal(p, SIGTRAP); 91 92 child_return(p); 93 } 94 95 /*ARGSUSED*/ 96 int 97 sys_fork(struct proc *p, void *v, register_t *retval) 98 { 99 int flags; 100 101 flags = FORK_FORK; 102 if (p->p_p->ps_ptmask & PTRACE_FORK) 103 flags |= FORK_PTRACE; 104 return (fork1(p, SIGCHLD, flags, NULL, 0, 105 fork_return, NULL, retval, NULL)); 106 } 107 108 /*ARGSUSED*/ 109 int 110 sys_vfork(struct proc *p, void *v, register_t *retval) 111 { 112 return (fork1(p, SIGCHLD, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL, 113 NULL, retval, NULL)); 114 } 115 116 int 117 sys___tfork(struct proc *p, void *v, register_t *retval) 118 { 119 struct sys___tfork_args /* { 120 syscallarg(const struct __tfork) *param; 121 syscallarg(size_t) psize; 122 } */ *uap = v; 123 size_t psize = SCARG(uap, psize); 124 struct __tfork param = { 0 }; 125 int flags; 126 int error; 127 128 if (psize == 0 || psize > sizeof(param)) 129 return (EINVAL); 130 if ((error = copyin(SCARG(uap, param), ¶m, psize))) 131 return (error); 132 #ifdef KTRACE 133 if (KTRPOINT(p, KTR_STRUCT)) 134 ktrstruct(p, "tfork", ¶m, sizeof(param)); 135 #endif 136 137 flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM 138 | FORK_NOZOMBIE | FORK_SHAREFILES; 139 140 return (fork1(p, 0, flags, param.tf_stack, param.tf_tid, 141 tfork_child_return, param.tf_tcb, retval, NULL)); 142 } 143 144 #ifdef COMPAT_O51 145 int 146 compat_o51_sys___tfork(struct proc *p, void *v, register_t *retval) 147 { 148 struct compat_o51_sys___tfork_args /* { 149 syscallarg(struct __tfork51) *param; 150 } */ *uap = v; 151 struct __tfork51 param; 152 int flags; 153 int error; 154 155 if ((error = copyin(SCARG(uap, param), ¶m, sizeof(param)))) 156 return (error); 157 158 if (param.tf_flags != 0) 159 return (EINVAL); 160 161 flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM 162 | FORK_NOZOMBIE | FORK_SHAREFILES; 163 164 return (fork1(p, 0, flags, NULL, param.tf_tid, tfork_child_return, 165 param.tf_tcb, retval, NULL)); 166 } 167 #endif 168 169 void 170 tfork_child_return(void *arg) 171 { 172 struct proc *p = curproc; 173 174 TCB_SET(p, arg); 175 child_return(p); 176 } 177 178 /* 179 * Allocate and initialize a new process. 180 */ 181 void 182 process_new(struct proc *p, struct process *parent) 183 { 184 struct process *pr; 185 186 pr = pool_get(&process_pool, PR_WAITOK); 187 pr->ps_mainproc = p; 188 189 TAILQ_INIT(&pr->ps_threads); 190 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 191 pr->ps_pptr = parent; 192 LIST_INIT(&pr->ps_children); 193 pr->ps_refcnt = 1; 194 195 /* 196 * Make a process structure for the new process. 197 * Start by zeroing the section of proc that is zero-initialized, 198 * then copy the section that is copied directly from the parent. 199 */ 200 bzero(&pr->ps_startzero, 201 (unsigned) ((caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero)); 202 bcopy(&parent->ps_startcopy, &pr->ps_startcopy, 203 (unsigned) ((caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy)); 204 205 /* post-copy fixups */ 206 pr->ps_cred = pool_get(&pcred_pool, PR_WAITOK); 207 bcopy(parent->ps_cred, pr->ps_cred, sizeof(*pr->ps_cred)); 208 crhold(parent->ps_cred->pc_ucred); 209 pr->ps_limit->p_refcnt++; 210 211 timeout_set(&pr->ps_realit_to, realitexpire, pr); 212 timeout_set(&pr->ps_virt_to, virttimer_trampoline, pr); 213 timeout_set(&pr->ps_prof_to, proftimer_trampoline, pr); 214 215 pr->ps_flags = parent->ps_flags & (PS_SUGID | PS_SUGIDEXEC); 216 if (parent->ps_session->s_ttyvp != NULL && 217 parent->ps_flags & PS_CONTROLT) 218 atomic_setbits_int(&pr->ps_flags, PS_CONTROLT); 219 220 p->p_p = pr; 221 } 222 223 /* print the 'table full' message once per 10 seconds */ 224 struct timeval fork_tfmrate = { 10, 0 }; 225 226 int 227 fork1(struct proc *curp, int exitsig, int flags, void *stack, pid_t *tidptr, 228 void (*func)(void *), void *arg, register_t *retval, 229 struct proc **rnewprocp) 230 { 231 struct process *curpr = curp->p_p; 232 struct process *pr; 233 struct proc *p; 234 uid_t uid; 235 struct vmspace *vm; 236 int count; 237 vaddr_t uaddr; 238 int s; 239 struct ptrace_state *newptstat = NULL; 240 #if NSYSTRACE > 0 241 void *newstrp = NULL; 242 #endif 243 244 /* sanity check some flag combinations */ 245 if (flags & FORK_THREAD) { 246 if ((flags & (FORK_SIGHAND | FORK_NOZOMBIE)) != 247 (FORK_SIGHAND | FORK_NOZOMBIE)) 248 return (EINVAL); 249 } 250 if (flags & FORK_SIGHAND && (flags & FORK_SHAREVM) == 0) 251 return (EINVAL); 252 253 /* 254 * Although process entries are dynamically created, we still keep 255 * a global limit on the maximum number we will create. We reserve 256 * the last 5 processes to root. The variable nprocesses is the 257 * current number of processes, maxprocess is the limit. Similar 258 * rules for threads (struct proc): we reserve the last 5 to root; 259 * the variable nthreads is the current number of procs, maxthread is 260 * the limit. 261 */ 262 uid = curp->p_cred->p_ruid; 263 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) { 264 static struct timeval lasttfm; 265 266 if (ratecheck(&lasttfm, &fork_tfmrate)) 267 tablefull("proc"); 268 return (EAGAIN); 269 } 270 nthreads++; 271 272 if ((flags & FORK_THREAD) == 0) { 273 if ((nprocesses >= maxprocess - 5 && uid != 0) || 274 nprocesses >= maxprocess) { 275 static struct timeval lasttfm; 276 277 if (ratecheck(&lasttfm, &fork_tfmrate)) 278 tablefull("process"); 279 nthreads--; 280 return (EAGAIN); 281 } 282 nprocesses++; 283 284 /* 285 * Increment the count of processes running with 286 * this uid. Don't allow a nonprivileged user to 287 * exceed their current limit. 288 */ 289 count = chgproccnt(uid, 1); 290 if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) { 291 (void)chgproccnt(uid, -1); 292 nprocesses--; 293 nthreads--; 294 return (EAGAIN); 295 } 296 } 297 298 uaddr = uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, USPACE, 299 USPACE_ALIGN, UVM_KMF_ZERO, 300 no_constraint.ucr_low, no_constraint.ucr_high, 301 0, 0, USPACE/PAGE_SIZE); 302 if (uaddr == 0) { 303 if ((flags & FORK_THREAD) == 0) { 304 (void)chgproccnt(uid, -1); 305 nprocesses--; 306 } 307 nthreads--; 308 return (ENOMEM); 309 } 310 311 /* 312 * From now on, we're committed to the fork and cannot fail. 313 */ 314 315 /* Allocate new proc. */ 316 p = pool_get(&proc_pool, PR_WAITOK); 317 318 p->p_stat = SIDL; /* protect against others */ 319 p->p_exitsig = exitsig; 320 p->p_flag = 0; 321 p->p_xstat = 0; 322 323 if (flags & FORK_THREAD) { 324 atomic_setbits_int(&p->p_flag, P_THREAD); 325 p->p_p = pr = curpr; 326 pr->ps_refcnt++; 327 } else { 328 process_new(p, curpr); 329 pr = p->p_p; 330 } 331 332 /* 333 * Make a proc table entry for the new process. 334 * Start by zeroing the section of proc that is zero-initialized, 335 * then copy the section that is copied directly from the parent. 336 */ 337 bzero(&p->p_startzero, 338 (unsigned) ((caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero)); 339 bcopy(&curp->p_startcopy, &p->p_startcopy, 340 (unsigned) ((caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy)); 341 342 /* 343 * Initialize the timeouts. 344 */ 345 timeout_set(&p->p_sleep_to, endtsleep, p); 346 347 /* 348 * Duplicate sub-structures as needed. 349 * Increase reference counts on shared objects. 350 */ 351 if ((flags & FORK_THREAD) == 0) { 352 if (curpr->ps_flags & PS_PROFIL) 353 startprofclock(pr); 354 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 355 atomic_setbits_int(&pr->ps_flags, PS_TRACED); 356 } 357 358 /* bump references to the text vnode (for procfs) */ 359 p->p_textvp = curp->p_textvp; 360 if (p->p_textvp) 361 vref(p->p_textvp); 362 363 if (flags & FORK_SHAREFILES) 364 p->p_fd = fdshare(curp); 365 else 366 p->p_fd = fdcopy(curp); 367 368 if (flags & FORK_PPWAIT) { 369 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT); 370 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT); 371 } 372 if (flags & FORK_NOZOMBIE) 373 atomic_setbits_int(&p->p_flag, P_NOZOMBIE); 374 375 #ifdef KTRACE 376 /* 377 * Copy traceflag and tracefile if enabled. 378 * If not inherited, these were zeroed above. 379 */ 380 if ((flags & FORK_THREAD) == 0 && curpr->ps_traceflag & KTRFAC_INHERIT) 381 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp, 382 curpr->ps_tracecred); 383 #endif 384 385 /* 386 * set priority of child to be that of parent 387 * XXX should move p_estcpu into the region of struct proc which gets 388 * copied. 389 */ 390 scheduler_fork_hook(curp, p); 391 392 /* 393 * Create signal actions for the child process. 394 */ 395 if (flags & FORK_SIGHAND) 396 p->p_sigacts = sigactsshare(curp); 397 else 398 p->p_sigacts = sigactsinit(curp); 399 if (flags & FORK_THREAD) 400 sigstkinit(&p->p_sigstk); 401 402 /* 403 * If emulation has process fork hook, call it now. 404 */ 405 if (p->p_emul->e_proc_fork) 406 (*p->p_emul->e_proc_fork)(p, curp); 407 408 p->p_addr = (struct user *)uaddr; 409 410 /* 411 * Finish creating the child process. It will return through a 412 * different path later. 413 */ 414 uvm_fork(curp, p, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, 415 0, func ? func : child_return, arg ? arg : p); 416 417 vm = p->p_vmspace; 418 419 if (flags & FORK_FORK) { 420 forkstat.cntfork++; 421 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 422 } else if (flags & FORK_VFORK) { 423 forkstat.cntvfork++; 424 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 425 } else if (flags & FORK_TFORK) { 426 forkstat.cnttfork++; 427 } else { 428 forkstat.cntkthread++; 429 forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; 430 } 431 432 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK) 433 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK); 434 #if NSYSTRACE > 0 435 if (ISSET(curp->p_flag, P_SYSTRACE)) 436 newstrp = systrace_getproc(); 437 #endif 438 439 /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ 440 do { 441 lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; 442 } while (pidtaken(lastpid)); 443 p->p_pid = lastpid; 444 445 LIST_INSERT_HEAD(&allproc, p, p_list); 446 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 447 if ((flags & FORK_THREAD) == 0) { 448 LIST_INSERT_AFTER(curpr, pr, ps_pglist); 449 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling); 450 451 if (pr->ps_flags & PS_TRACED) { 452 pr->ps_oppid = curpr->ps_pid; 453 if (pr->ps_pptr != curpr->ps_pptr) 454 proc_reparent(pr, curpr->ps_pptr); 455 456 /* 457 * Set ptrace status. 458 */ 459 if (flags & FORK_FORK) { 460 pr->ps_ptstat = newptstat; 461 newptstat = NULL; 462 curpr->ps_ptstat->pe_report_event = PTRACE_FORK; 463 pr->ps_ptstat->pe_report_event = PTRACE_FORK; 464 curpr->ps_ptstat->pe_other_pid = pr->ps_pid; 465 pr->ps_ptstat->pe_other_pid = curpr->ps_pid; 466 } 467 } 468 } else { 469 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 470 /* 471 * if somebody else wants to take us to single threaded mode, 472 * count ourselves in. 473 */ 474 if (pr->ps_single) { 475 curpr->ps_singlecount++; 476 atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); 477 } 478 } 479 480 #if NSYSTRACE > 0 481 if (newstrp) 482 systrace_fork(curp, p, newstrp); 483 #endif 484 485 if (tidptr != NULL) { 486 pid_t pid = p->p_pid + THREAD_PID_OFFSET; 487 488 if (copyout(&pid, tidptr, sizeof(pid))) 489 psignal(curp, SIGSEGV); 490 } 491 492 /* 493 * For new processes, set accounting bits 494 */ 495 if ((flags & FORK_THREAD) == 0) { 496 getnanotime(&pr->ps_start); 497 pr->ps_acflag = AFORK; 498 } 499 500 /* 501 * Make child runnable and add to run queue. 502 */ 503 SCHED_LOCK(s); 504 p->p_stat = SRUN; 505 p->p_cpu = sched_choosecpu_fork(curp, flags); 506 setrunqueue(p); 507 SCHED_UNLOCK(s); 508 509 if (newptstat) 510 free(newptstat, M_SUBPROC); 511 512 /* 513 * Notify any interested parties about the new process. 514 */ 515 if ((flags & FORK_THREAD) == 0) 516 KNOTE(&curpr->ps_klist, NOTE_FORK | p->p_pid); 517 518 /* 519 * Update stats now that we know the fork was successful. 520 */ 521 uvmexp.forks++; 522 if (flags & FORK_PPWAIT) 523 uvmexp.forks_ppwait++; 524 if (flags & FORK_SHAREVM) 525 uvmexp.forks_sharevm++; 526 527 /* 528 * Pass a pointer to the new process to the caller. 529 */ 530 if (rnewprocp != NULL) 531 *rnewprocp = p; 532 533 /* 534 * Preserve synchronization semantics of vfork. If waiting for 535 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT 536 * on ourselves, and sleep on our process for the latter flag 537 * to go away. 538 * XXX Need to stop other rthreads in the parent 539 */ 540 if (flags & FORK_PPWAIT) 541 while (curpr->ps_flags & PS_ISPWAIT) 542 tsleep(curpr, PWAIT, "ppwait", 0); 543 544 /* 545 * If we're tracing the child, alert the parent too. 546 */ 547 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 548 psignal(curp, SIGTRAP); 549 550 /* 551 * Return child pid to parent process, 552 * marking us as parent via retval[1]. 553 */ 554 if (retval != NULL) { 555 retval[0] = p->p_pid + 556 (flags & FORK_THREAD ? THREAD_PID_OFFSET : 0); 557 retval[1] = 0; 558 } 559 return (0); 560 } 561 562 /* 563 * Checks for current use of a pid, either as a pid or pgid. 564 */ 565 int 566 pidtaken(pid_t pid) 567 { 568 struct proc *p; 569 570 if (pfind(pid) != NULL) 571 return (1); 572 if (pgfind(pid) != NULL) 573 return (1); 574 LIST_FOREACH(p, &zombproc, p_list) { 575 if (p->p_pid == pid || (p->p_p->ps_pgrp && p->p_p->ps_pgrp->pg_id == pid)) 576 return (1); 577 } 578 return (0); 579 } 580 581 #if defined(MULTIPROCESSOR) 582 /* 583 * XXX This is a slight hack to get newly-formed processes to 584 * XXX acquire the kernel lock as soon as they run. 585 */ 586 void 587 proc_trampoline_mp(void) 588 { 589 struct proc *p; 590 591 p = curproc; 592 593 SCHED_ASSERT_LOCKED(); 594 __mp_unlock(&sched_lock); 595 spl0(); 596 SCHED_ASSERT_UNLOCKED(); 597 KASSERT(__mp_lock_held(&kernel_lock) == 0); 598 599 KERNEL_LOCK(); 600 } 601 #endif 602