1 /* $OpenBSD: kern_fork.c,v 1.159 2014/03/22 06:05:45 guenther Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <dev/rndvar.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 60 #include <sys/syscallargs.h> 61 62 #include "systrace.h" 63 #include <dev/systrace.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <uvm/uvm_map.h> 67 68 #ifdef __HAVE_MD_TCB 69 # include <machine/tcb.h> 70 #endif 71 72 int nprocesses = 1; /* process 0 */ 73 int nthreads = 1; /* proc 0 */ 74 int randompid; /* when set to 1, pid's go random */ 75 struct forkstat forkstat; 76 77 void fork_return(void *); 78 void tfork_child_return(void *); 79 int pidtaken(pid_t); 80 81 void process_new(struct proc *, struct process *, int); 82 83 void 84 fork_return(void *arg) 85 { 86 struct proc *p = (struct proc *)arg; 87 88 if (p->p_p->ps_flags & PS_TRACED) 89 psignal(p, SIGTRAP); 90 91 child_return(p); 92 } 93 94 /*ARGSUSED*/ 95 int 96 sys_fork(struct proc *p, void *v, register_t *retval) 97 { 98 int flags; 99 100 flags = FORK_FORK; 101 if (p->p_p->ps_ptmask & PTRACE_FORK) 102 flags |= FORK_PTRACE; 103 return (fork1(p, flags, NULL, 0, fork_return, NULL, retval, NULL)); 104 } 105 106 /*ARGSUSED*/ 107 int 108 sys_vfork(struct proc *p, void *v, register_t *retval) 109 { 110 return (fork1(p, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL, 111 NULL, retval, NULL)); 112 } 113 114 int 115 sys___tfork(struct proc *p, void *v, register_t *retval) 116 { 117 struct sys___tfork_args /* { 118 syscallarg(const struct __tfork) *param; 119 syscallarg(size_t) psize; 120 } */ *uap = v; 121 size_t psize = SCARG(uap, psize); 122 struct __tfork param = { 0 }; 123 int flags; 124 int error; 125 126 if (psize == 0 || psize > sizeof(param)) 127 return (EINVAL); 128 if ((error = copyin(SCARG(uap, param), ¶m, psize))) 129 return (error); 130 #ifdef KTRACE 131 if (KTRPOINT(p, KTR_STRUCT)) 132 ktrstruct(p, "tfork", ¶m, sizeof(param)); 133 #endif 134 135 flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM 136 | FORK_SHAREFILES; 137 138 return (fork1(p, flags, param.tf_stack, param.tf_tid, 139 tfork_child_return, param.tf_tcb, retval, NULL)); 140 } 141 142 void 143 tfork_child_return(void *arg) 144 { 145 struct proc *p = curproc; 146 147 TCB_SET(p, arg); 148 child_return(p); 149 } 150 151 /* 152 * Allocate and initialize a new process. 153 */ 154 void 155 process_new(struct proc *p, struct process *parent, int flags) 156 { 157 struct process *pr; 158 159 pr = pool_get(&process_pool, PR_WAITOK); 160 pr->ps_mainproc = p; 161 162 TAILQ_INIT(&pr->ps_threads); 163 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 164 pr->ps_pptr = parent; 165 LIST_INIT(&pr->ps_children); 166 pr->ps_refcnt = 1; 167 168 /* 169 * Make a process structure for the new process. 170 * Start by zeroing the section of proc that is zero-initialized, 171 * then copy the section that is copied directly from the parent. 172 */ 173 memset(&pr->ps_startzero, 0, 174 (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero); 175 memcpy(&pr->ps_startcopy, &parent->ps_startcopy, 176 (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy); 177 178 /* post-copy fixups */ 179 pr->ps_cred = pool_get(&pcred_pool, PR_WAITOK); 180 memcpy(pr->ps_cred, parent->ps_cred, sizeof(*pr->ps_cred)); 181 crhold(parent->ps_cred->pc_ucred); 182 pr->ps_limit->p_refcnt++; 183 184 /* bump references to the text vnode (for procfs) */ 185 pr->ps_textvp = parent->ps_textvp; 186 if (pr->ps_textvp) 187 vref(pr->ps_textvp); 188 189 timeout_set(&pr->ps_realit_to, realitexpire, pr); 190 191 pr->ps_flags = parent->ps_flags & (PS_SUGID | PS_SUGIDEXEC); 192 if (parent->ps_session->s_ttyvp != NULL && 193 parent->ps_flags & PS_CONTROLT) 194 atomic_setbits_int(&pr->ps_flags, PS_CONTROLT); 195 196 p->p_p = pr; 197 198 /* 199 * Create signal actions for the child process. 200 */ 201 if (flags & FORK_SIGHAND) 202 pr->ps_sigacts = sigactsshare(parent); 203 else 204 pr->ps_sigacts = sigactsinit(parent); 205 206 if (parent->ps_flags & PS_PROFIL) 207 startprofclock(pr); 208 if ((flags & FORK_PTRACE) && (parent->ps_flags & PS_TRACED)) 209 atomic_setbits_int(&pr->ps_flags, PS_TRACED); 210 if (flags & FORK_NOZOMBIE) 211 atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); 212 213 /* it's sufficiently inited to be globally visible */ 214 LIST_INSERT_HEAD(&allprocess, pr, ps_list); 215 } 216 217 /* print the 'table full' message once per 10 seconds */ 218 struct timeval fork_tfmrate = { 10, 0 }; 219 220 int 221 fork1(struct proc *curp, int flags, void *stack, pid_t *tidptr, 222 void (*func)(void *), void *arg, register_t *retval, 223 struct proc **rnewprocp) 224 { 225 struct process *curpr = curp->p_p; 226 struct process *pr; 227 struct proc *p; 228 uid_t uid; 229 struct vmspace *vm; 230 int count; 231 vaddr_t uaddr; 232 int s; 233 struct ptrace_state *newptstat = NULL; 234 #if NSYSTRACE > 0 235 void *newstrp = NULL; 236 #endif 237 238 /* sanity check some flag combinations */ 239 if (flags & FORK_THREAD) { 240 if ((flags & FORK_SIGHAND) == 0) 241 return (EINVAL); 242 } 243 if (flags & FORK_SIGHAND && (flags & FORK_SHAREVM) == 0) 244 return (EINVAL); 245 246 /* 247 * Although process entries are dynamically created, we still keep 248 * a global limit on the maximum number we will create. We reserve 249 * the last 5 processes to root. The variable nprocesses is the 250 * current number of processes, maxprocess is the limit. Similar 251 * rules for threads (struct proc): we reserve the last 5 to root; 252 * the variable nthreads is the current number of procs, maxthread is 253 * the limit. 254 */ 255 uid = curp->p_cred->p_ruid; 256 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) { 257 static struct timeval lasttfm; 258 259 if (ratecheck(&lasttfm, &fork_tfmrate)) 260 tablefull("proc"); 261 return (EAGAIN); 262 } 263 nthreads++; 264 265 if ((flags & FORK_THREAD) == 0) { 266 if ((nprocesses >= maxprocess - 5 && uid != 0) || 267 nprocesses >= maxprocess) { 268 static struct timeval lasttfm; 269 270 if (ratecheck(&lasttfm, &fork_tfmrate)) 271 tablefull("process"); 272 nthreads--; 273 return (EAGAIN); 274 } 275 nprocesses++; 276 277 /* 278 * Increment the count of processes running with 279 * this uid. Don't allow a nonprivileged user to 280 * exceed their current limit. 281 */ 282 count = chgproccnt(uid, 1); 283 if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) { 284 (void)chgproccnt(uid, -1); 285 nprocesses--; 286 nthreads--; 287 return (EAGAIN); 288 } 289 } 290 291 uaddr = uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, USPACE, 292 USPACE_ALIGN, UVM_KMF_ZERO, 293 no_constraint.ucr_low, no_constraint.ucr_high, 294 0, 0, USPACE/PAGE_SIZE); 295 if (uaddr == 0) { 296 if ((flags & FORK_THREAD) == 0) { 297 (void)chgproccnt(uid, -1); 298 nprocesses--; 299 } 300 nthreads--; 301 return (ENOMEM); 302 } 303 304 /* 305 * From now on, we're committed to the fork and cannot fail. 306 */ 307 308 /* Allocate new proc. */ 309 p = pool_get(&proc_pool, PR_WAITOK); 310 311 p->p_stat = SIDL; /* protect against others */ 312 p->p_flag = 0; 313 314 /* 315 * Make a proc table entry for the new process. 316 * Start by zeroing the section of proc that is zero-initialized, 317 * then copy the section that is copied directly from the parent. 318 */ 319 memset(&p->p_startzero, 0, 320 (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero); 321 memcpy(&p->p_startcopy, &curp->p_startcopy, 322 (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy); 323 324 /* 325 * Initialize the timeouts. 326 */ 327 timeout_set(&p->p_sleep_to, endtsleep, p); 328 329 if (flags & FORK_THREAD) { 330 atomic_setbits_int(&p->p_flag, P_THREAD); 331 p->p_p = pr = curpr; 332 pr->ps_refcnt++; 333 } else { 334 process_new(p, curpr, flags); 335 pr = p->p_p; 336 } 337 338 /* 339 * Duplicate sub-structures as needed. 340 * Increase reference counts on shared objects. 341 */ 342 if (flags & FORK_SHAREFILES) 343 p->p_fd = fdshare(curp); 344 else 345 p->p_fd = fdcopy(curp); 346 347 if (flags & FORK_PPWAIT) { 348 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT); 349 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT); 350 } 351 352 #ifdef KTRACE 353 /* 354 * Copy traceflag and tracefile if enabled. 355 * If not inherited, these were zeroed above. 356 */ 357 if ((flags & FORK_THREAD) == 0 && curpr->ps_traceflag & KTRFAC_INHERIT) 358 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp, 359 curpr->ps_tracecred); 360 #endif 361 362 /* 363 * set priority of child to be that of parent 364 * XXX should move p_estcpu into the region of struct proc which gets 365 * copied. 366 */ 367 scheduler_fork_hook(curp, p); 368 369 if (flags & FORK_THREAD) 370 sigstkinit(&p->p_sigstk); 371 372 /* 373 * If emulation has process fork hook, call it now. 374 */ 375 if (p->p_emul->e_proc_fork) 376 (*p->p_emul->e_proc_fork)(p, curp); 377 378 p->p_addr = (struct user *)uaddr; 379 380 /* 381 * Finish creating the child process. It will return through a 382 * different path later. 383 */ 384 uvm_fork(curp, p, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, 385 0, func ? func : child_return, arg ? arg : p); 386 387 vm = p->p_vmspace; 388 389 if (flags & FORK_FORK) { 390 forkstat.cntfork++; 391 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 392 } else if (flags & FORK_VFORK) { 393 forkstat.cntvfork++; 394 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 395 } else if (flags & FORK_TFORK) { 396 forkstat.cnttfork++; 397 } else { 398 forkstat.cntkthread++; 399 forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; 400 } 401 402 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK) 403 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK); 404 #if NSYSTRACE > 0 405 if (ISSET(curp->p_flag, P_SYSTRACE)) 406 newstrp = systrace_getproc(); 407 #endif 408 409 p->p_pid = allocpid(); 410 411 LIST_INSERT_HEAD(&allproc, p, p_list); 412 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 413 if ((flags & FORK_THREAD) == 0) { 414 LIST_INSERT_AFTER(curpr, pr, ps_pglist); 415 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling); 416 417 if (pr->ps_flags & PS_TRACED) { 418 pr->ps_oppid = curpr->ps_pid; 419 if (pr->ps_pptr != curpr->ps_pptr) 420 proc_reparent(pr, curpr->ps_pptr); 421 422 /* 423 * Set ptrace status. 424 */ 425 if (flags & FORK_FORK) { 426 pr->ps_ptstat = newptstat; 427 newptstat = NULL; 428 curpr->ps_ptstat->pe_report_event = PTRACE_FORK; 429 pr->ps_ptstat->pe_report_event = PTRACE_FORK; 430 curpr->ps_ptstat->pe_other_pid = pr->ps_pid; 431 pr->ps_ptstat->pe_other_pid = curpr->ps_pid; 432 } 433 } 434 } else { 435 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 436 /* 437 * if somebody else wants to take us to single threaded mode, 438 * count ourselves in. 439 */ 440 if (pr->ps_single) { 441 curpr->ps_singlecount++; 442 atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); 443 } 444 } 445 446 #if NSYSTRACE > 0 447 if (newstrp) 448 systrace_fork(curp, p, newstrp); 449 #endif 450 451 if (tidptr != NULL) { 452 pid_t pid = p->p_pid + THREAD_PID_OFFSET; 453 454 if (copyout(&pid, tidptr, sizeof(pid))) 455 psignal(curp, SIGSEGV); 456 } 457 458 /* 459 * For new processes, set accounting bits 460 */ 461 if ((flags & FORK_THREAD) == 0) { 462 getnanotime(&pr->ps_start); 463 pr->ps_acflag = AFORK; 464 } 465 466 /* 467 * Make child runnable and add to run queue. 468 */ 469 if ((flags & FORK_IDLE) == 0) { 470 SCHED_LOCK(s); 471 p->p_stat = SRUN; 472 p->p_cpu = sched_choosecpu_fork(curp, flags); 473 setrunqueue(p); 474 SCHED_UNLOCK(s); 475 } else 476 p->p_cpu = arg; 477 478 if (newptstat) 479 free(newptstat, M_SUBPROC); 480 481 /* 482 * Notify any interested parties about the new process. 483 */ 484 if ((flags & FORK_THREAD) == 0) 485 KNOTE(&curpr->ps_klist, NOTE_FORK | p->p_pid); 486 487 /* 488 * Update stats now that we know the fork was successful. 489 */ 490 uvmexp.forks++; 491 if (flags & FORK_PPWAIT) 492 uvmexp.forks_ppwait++; 493 if (flags & FORK_SHAREVM) 494 uvmexp.forks_sharevm++; 495 496 /* 497 * Pass a pointer to the new process to the caller. 498 */ 499 if (rnewprocp != NULL) 500 *rnewprocp = p; 501 502 /* 503 * Preserve synchronization semantics of vfork. If waiting for 504 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT 505 * on ourselves, and sleep on our process for the latter flag 506 * to go away. 507 * XXX Need to stop other rthreads in the parent 508 */ 509 if (flags & FORK_PPWAIT) 510 while (curpr->ps_flags & PS_ISPWAIT) 511 tsleep(curpr, PWAIT, "ppwait", 0); 512 513 /* 514 * If we're tracing the child, alert the parent too. 515 */ 516 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 517 psignal(curp, SIGTRAP); 518 519 /* 520 * Return child pid to parent process, 521 * marking us as parent via retval[1]. 522 */ 523 if (retval != NULL) { 524 retval[0] = p->p_pid + 525 (flags & FORK_THREAD ? THREAD_PID_OFFSET : 0); 526 retval[1] = 0; 527 } 528 return (0); 529 } 530 531 /* 532 * Checks for current use of a pid, either as a pid or pgid. 533 */ 534 pid_t oldpids[100]; 535 int 536 ispidtaken(pid_t pid) 537 { 538 uint32_t i; 539 struct process *pr; 540 541 for (i = 0; i < nitems(oldpids); i++) 542 if (pid == oldpids[i]) 543 return (1); 544 545 if (pfind(pid) != NULL) 546 return (1); 547 if (pgfind(pid) != NULL) 548 return (1); 549 LIST_FOREACH(pr, &zombprocess, ps_list) { 550 if (pr->ps_pid == pid || 551 (pr->ps_pgrp && pr->ps_pgrp->pg_id == pid)) 552 return (1); 553 } 554 return (0); 555 } 556 557 /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ 558 pid_t 559 allocpid(void) 560 { 561 static pid_t lastpid; 562 pid_t pid; 563 564 if (!randompid) { 565 /* only used early on for system processes */ 566 pid = ++lastpid; 567 } else { 568 do { 569 pid = 1 + arc4random_uniform(PID_MAX); 570 } while (ispidtaken(pid)); 571 } 572 573 return pid; 574 } 575 576 void 577 freepid(pid_t pid) 578 { 579 static uint32_t idx; 580 581 oldpids[idx++ % nitems(oldpids)] = pid; 582 } 583 584 #if defined(MULTIPROCESSOR) 585 /* 586 * XXX This is a slight hack to get newly-formed processes to 587 * XXX acquire the kernel lock as soon as they run. 588 */ 589 void 590 proc_trampoline_mp(void) 591 { 592 struct proc *p; 593 594 p = curproc; 595 596 SCHED_ASSERT_LOCKED(); 597 __mp_unlock(&sched_lock); 598 spl0(); 599 SCHED_ASSERT_UNLOCKED(); 600 KASSERT(__mp_lock_held(&kernel_lock) == 0); 601 602 KERNEL_LOCK(); 603 } 604 #endif 605