1 /* $OpenBSD: kern_fork.c,v 1.202 2017/12/30 20:47:00 guenther Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/vmmeter.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <sys/sysctl.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 #include <sys/atomic.h> 60 #include <sys/pledge.h> 61 #include <sys/unistd.h> 62 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm.h> 66 #include <machine/tcb.h> 67 68 int nprocesses = 1; /* process 0 */ 69 int nthreads = 1; /* proc 0 */ 70 int randompid; /* when set to 1, pid's go random */ 71 struct forkstat forkstat; 72 73 void fork_return(void *); 74 pid_t alloctid(void); 75 pid_t allocpid(void); 76 int ispidtaken(pid_t); 77 78 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr); 79 struct process *process_new(struct proc *, struct process *, int); 80 int fork_check_maxthread(uid_t _uid); 81 82 void 83 fork_return(void *arg) 84 { 85 struct proc *p = (struct proc *)arg; 86 87 if (p->p_p->ps_flags & PS_TRACED) 88 psignal(p, SIGTRAP); 89 90 child_return(p); 91 } 92 93 int 94 sys_fork(struct proc *p, void *v, register_t *retval) 95 { 96 int flags; 97 98 flags = FORK_FORK; 99 if (p->p_p->ps_ptmask & PTRACE_FORK) 100 flags |= FORK_PTRACE; 101 return fork1(p, flags, fork_return, NULL, retval, NULL); 102 } 103 104 int 105 sys_vfork(struct proc *p, void *v, register_t *retval) 106 { 107 return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL, 108 retval, NULL); 109 } 110 111 int 112 sys___tfork(struct proc *p, void *v, register_t *retval) 113 { 114 struct sys___tfork_args /* { 115 syscallarg(const struct __tfork) *param; 116 syscallarg(size_t) psize; 117 } */ *uap = v; 118 size_t psize = SCARG(uap, psize); 119 struct __tfork param = { 0 }; 120 int error; 121 122 if (psize == 0 || psize > sizeof(param)) 123 return EINVAL; 124 if ((error = copyin(SCARG(uap, param), ¶m, psize))) 125 return error; 126 #ifdef KTRACE 127 if (KTRPOINT(p, KTR_STRUCT)) 128 ktrstruct(p, "tfork", ¶m, sizeof(param)); 129 #endif 130 #ifdef TCB_INVALID 131 if (TCB_INVALID(param.tf_tcb)) 132 return EINVAL; 133 #endif /* TCB_INVALID */ 134 135 return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid, 136 retval); 137 } 138 139 /* 140 * Allocate and initialize a thread (proc) structure, given the parent thread. 141 */ 142 struct proc * 143 thread_new(struct proc *parent, vaddr_t uaddr) 144 { 145 struct proc *p; 146 147 p = pool_get(&proc_pool, PR_WAITOK); 148 p->p_stat = SIDL; /* protect against others */ 149 p->p_flag = 0; 150 151 /* 152 * Make a proc table entry for the new process. 153 * Start by zeroing the section of proc that is zero-initialized, 154 * then copy the section that is copied directly from the parent. 155 */ 156 memset(&p->p_startzero, 0, 157 (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero); 158 memcpy(&p->p_startcopy, &parent->p_startcopy, 159 (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy); 160 crhold(p->p_ucred); 161 p->p_addr = (struct user *)uaddr; 162 163 /* 164 * Initialize the timeouts. 165 */ 166 timeout_set(&p->p_sleep_to, endtsleep, p); 167 168 /* 169 * set priority of child to be that of parent 170 * XXX should move p_estcpu into the region of struct proc which gets 171 * copied. 172 */ 173 scheduler_fork_hook(parent, p); 174 175 #ifdef WITNESS 176 p->p_sleeplocks = NULL; 177 #endif 178 179 return p; 180 } 181 182 /* 183 * Initialize common bits of a process structure, given the initial thread. 184 */ 185 void 186 process_initialize(struct process *pr, struct proc *p) 187 { 188 /* initialize the thread links */ 189 pr->ps_mainproc = p; 190 TAILQ_INIT(&pr->ps_threads); 191 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 192 pr->ps_refcnt = 1; 193 p->p_p = pr; 194 195 /* give the process the same creds as the initial thread */ 196 pr->ps_ucred = p->p_ucred; 197 crhold(pr->ps_ucred); 198 KASSERT(p->p_ucred->cr_ref >= 2); /* new thread and new process */ 199 200 LIST_INIT(&pr->ps_children); 201 202 timeout_set(&pr->ps_realit_to, realitexpire, pr); 203 } 204 205 206 /* 207 * Allocate and initialize a new process. 208 */ 209 struct process * 210 process_new(struct proc *p, struct process *parent, int flags) 211 { 212 struct process *pr; 213 214 pr = pool_get(&process_pool, PR_WAITOK); 215 216 /* 217 * Make a process structure for the new process. 218 * Start by zeroing the section of proc that is zero-initialized, 219 * then copy the section that is copied directly from the parent. 220 */ 221 memset(&pr->ps_startzero, 0, 222 (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero); 223 memcpy(&pr->ps_startcopy, &parent->ps_startcopy, 224 (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy); 225 226 process_initialize(pr, p); 227 pr->ps_pid = allocpid(); 228 229 /* post-copy fixups */ 230 pr->ps_pptr = parent; 231 pr->ps_limit->p_refcnt++; 232 233 /* bump references to the text vnode (for sysctl) */ 234 pr->ps_textvp = parent->ps_textvp; 235 if (pr->ps_textvp) 236 vref(pr->ps_textvp); 237 238 pr->ps_flags = parent->ps_flags & 239 (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED); 240 if (parent->ps_session->s_ttyvp != NULL) 241 pr->ps_flags |= parent->ps_flags & PS_CONTROLT; 242 243 /* 244 * Duplicate sub-structures as needed. 245 * Increase reference counts on shared objects. 246 */ 247 if (flags & FORK_SHAREFILES) 248 pr->ps_fd = fdshare(parent); 249 else 250 pr->ps_fd = fdcopy(parent); 251 if (flags & FORK_SIGHAND) 252 pr->ps_sigacts = sigactsshare(parent); 253 else 254 pr->ps_sigacts = sigactsinit(parent); 255 if (flags & FORK_SHAREVM) 256 pr->ps_vmspace = uvmspace_share(parent); 257 else 258 pr->ps_vmspace = uvmspace_fork(parent); 259 260 if (parent->ps_flags & PS_PROFIL) 261 startprofclock(pr); 262 if (flags & FORK_PTRACE) 263 pr->ps_flags |= parent->ps_flags & PS_TRACED; 264 if (flags & FORK_NOZOMBIE) 265 pr->ps_flags |= PS_NOZOMBIE; 266 if (flags & FORK_SYSTEM) 267 pr->ps_flags |= PS_SYSTEM; 268 269 /* mark as embryo to protect against others */ 270 pr->ps_flags |= PS_EMBRYO; 271 272 /* Force visibility of all of the above changes */ 273 membar_producer(); 274 275 /* it's sufficiently inited to be globally visible */ 276 LIST_INSERT_HEAD(&allprocess, pr, ps_list); 277 278 return pr; 279 } 280 281 /* print the 'table full' message once per 10 seconds */ 282 struct timeval fork_tfmrate = { 10, 0 }; 283 284 int 285 fork_check_maxthread(uid_t uid) 286 { 287 /* 288 * Although process entries are dynamically created, we still keep 289 * a global limit on the maximum number we will create. We reserve 290 * the last 5 processes to root. The variable nprocesses is the 291 * current number of processes, maxprocess is the limit. Similar 292 * rules for threads (struct proc): we reserve the last 5 to root; 293 * the variable nthreads is the current number of procs, maxthread is 294 * the limit. 295 */ 296 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) { 297 static struct timeval lasttfm; 298 299 if (ratecheck(&lasttfm, &fork_tfmrate)) 300 tablefull("proc"); 301 return EAGAIN; 302 } 303 nthreads++; 304 305 return 0; 306 } 307 308 static inline void 309 fork_thread_start(struct proc *p, struct proc *parent, int flags) 310 { 311 int s; 312 313 SCHED_LOCK(s); 314 p->p_stat = SRUN; 315 p->p_cpu = sched_choosecpu_fork(parent, flags); 316 setrunqueue(p); 317 SCHED_UNLOCK(s); 318 } 319 320 int 321 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg, 322 register_t *retval, struct proc **rnewprocp) 323 { 324 struct process *curpr = curp->p_p; 325 struct process *pr; 326 struct proc *p; 327 uid_t uid = curp->p_ucred->cr_ruid; 328 struct vmspace *vm; 329 int count; 330 vaddr_t uaddr; 331 int error; 332 struct ptrace_state *newptstat = NULL; 333 334 KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE 335 | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE 336 | FORK_SYSTEM | FORK_SIGHAND)) == 0); 337 KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM)); 338 KASSERT(func != NULL); 339 340 if ((error = fork_check_maxthread(uid))) 341 return error; 342 343 if ((nprocesses >= maxprocess - 5 && uid != 0) || 344 nprocesses >= maxprocess) { 345 static struct timeval lasttfm; 346 347 if (ratecheck(&lasttfm, &fork_tfmrate)) 348 tablefull("process"); 349 nthreads--; 350 return EAGAIN; 351 } 352 nprocesses++; 353 354 /* 355 * Increment the count of processes running with this uid. 356 * Don't allow a nonprivileged user to exceed their current limit. 357 */ 358 count = chgproccnt(uid, 1); 359 if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) { 360 (void)chgproccnt(uid, -1); 361 nprocesses--; 362 nthreads--; 363 return EAGAIN; 364 } 365 366 uaddr = uvm_uarea_alloc(); 367 if (uaddr == 0) { 368 (void)chgproccnt(uid, -1); 369 nprocesses--; 370 nthreads--; 371 return (ENOMEM); 372 } 373 374 /* 375 * From now on, we're committed to the fork and cannot fail. 376 */ 377 p = thread_new(curp, uaddr); 378 pr = process_new(p, curpr, flags); 379 380 p->p_fd = pr->ps_fd; 381 p->p_vmspace = pr->ps_vmspace; 382 if (pr->ps_flags & PS_SYSTEM) 383 atomic_setbits_int(&p->p_flag, P_SYSTEM); 384 385 if (flags & FORK_PPWAIT) { 386 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT); 387 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT); 388 } 389 390 #ifdef KTRACE 391 /* 392 * Copy traceflag and tracefile if enabled. 393 * If not inherited, these were zeroed above. 394 */ 395 if (curpr->ps_traceflag & KTRFAC_INHERIT) 396 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp, 397 curpr->ps_tracecred); 398 #endif 399 400 /* 401 * Finish creating the child thread. cpu_fork() will copy 402 * and update the pcb and make the child ready to run. If 403 * this is a normal user fork, the child will exit directly 404 * to user mode via child_return() on its first time slice 405 * and will not return here. If this is a kernel thread, 406 * the specified entry point will be executed. 407 */ 408 cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p); 409 410 vm = pr->ps_vmspace; 411 412 if (flags & FORK_FORK) { 413 forkstat.cntfork++; 414 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 415 } else if (flags & FORK_VFORK) { 416 forkstat.cntvfork++; 417 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 418 } else { 419 forkstat.cntkthread++; 420 } 421 422 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK) 423 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK); 424 425 p->p_tid = alloctid(); 426 427 LIST_INSERT_HEAD(&allproc, p, p_list); 428 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); 429 LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash); 430 LIST_INSERT_AFTER(curpr, pr, ps_pglist); 431 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling); 432 433 if (pr->ps_flags & PS_TRACED) { 434 pr->ps_oppid = curpr->ps_pid; 435 if (pr->ps_pptr != curpr->ps_pptr) 436 proc_reparent(pr, curpr->ps_pptr); 437 438 /* 439 * Set ptrace status. 440 */ 441 if (newptstat != NULL) { 442 pr->ps_ptstat = newptstat; 443 newptstat = NULL; 444 curpr->ps_ptstat->pe_report_event = PTRACE_FORK; 445 pr->ps_ptstat->pe_report_event = PTRACE_FORK; 446 curpr->ps_ptstat->pe_other_pid = pr->ps_pid; 447 pr->ps_ptstat->pe_other_pid = curpr->ps_pid; 448 } 449 } 450 451 /* 452 * For new processes, set accounting bits and mark as complete. 453 */ 454 getnanotime(&pr->ps_start); 455 pr->ps_acflag = AFORK; 456 atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO); 457 458 if ((flags & FORK_IDLE) == 0) 459 fork_thread_start(p, curp, flags); 460 else 461 p->p_cpu = arg; 462 463 free(newptstat, M_SUBPROC, sizeof(*newptstat)); 464 465 /* 466 * Notify any interested parties about the new process. 467 */ 468 KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid); 469 470 /* 471 * Update stats now that we know the fork was successful. 472 */ 473 uvmexp.forks++; 474 if (flags & FORK_PPWAIT) 475 uvmexp.forks_ppwait++; 476 if (flags & FORK_SHAREVM) 477 uvmexp.forks_sharevm++; 478 479 /* 480 * Pass a pointer to the new process to the caller. 481 */ 482 if (rnewprocp != NULL) 483 *rnewprocp = p; 484 485 /* 486 * Preserve synchronization semantics of vfork. If waiting for 487 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT 488 * on ourselves, and sleep on our process for the latter flag 489 * to go away. 490 * XXX Need to stop other rthreads in the parent 491 */ 492 if (flags & FORK_PPWAIT) 493 while (curpr->ps_flags & PS_ISPWAIT) 494 tsleep(curpr, PWAIT, "ppwait", 0); 495 496 /* 497 * If we're tracing the child, alert the parent too. 498 */ 499 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 500 psignal(curp, SIGTRAP); 501 502 /* 503 * Return child pid to parent process 504 */ 505 if (retval != NULL) { 506 retval[0] = pr->ps_pid; 507 retval[1] = 0; 508 } 509 return (0); 510 } 511 512 int 513 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, 514 register_t *retval) 515 { 516 struct process *pr = curp->p_p; 517 struct proc *p; 518 pid_t tid; 519 vaddr_t uaddr; 520 int error; 521 522 if (stack == NULL) 523 return EINVAL; 524 525 if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid))) 526 return error; 527 528 uaddr = uvm_uarea_alloc(); 529 if (uaddr == 0) { 530 nthreads--; 531 return ENOMEM; 532 } 533 534 /* 535 * From now on, we're committed to the fork and cannot fail. 536 */ 537 p = thread_new(curp, uaddr); 538 atomic_setbits_int(&p->p_flag, P_THREAD); 539 sigstkinit(&p->p_sigstk); 540 541 /* other links */ 542 p->p_p = pr; 543 pr->ps_refcnt++; 544 545 /* local copies */ 546 p->p_fd = pr->ps_fd; 547 p->p_vmspace = pr->ps_vmspace; 548 549 /* 550 * Finish creating the child thread. cpu_fork() will copy 551 * and update the pcb and make the child ready to run. The 552 * child will exit directly to user mode via child_return() 553 * on its first time slice and will not return here. 554 */ 555 cpu_fork(curp, p, stack, tcb, child_return, p); 556 557 p->p_tid = alloctid(); 558 559 LIST_INSERT_HEAD(&allproc, p, p_list); 560 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); 561 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 562 563 /* 564 * if somebody else wants to take us to single threaded mode, 565 * count ourselves in. 566 */ 567 if (pr->ps_single) { 568 pr->ps_singlecount++; 569 atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); 570 } 571 572 /* 573 * Return tid to parent thread and copy it out to userspace 574 */ 575 retval[0] = tid = p->p_tid + THREAD_PID_OFFSET; 576 retval[1] = 0; 577 if (tidptr != NULL) { 578 if (copyout(&tid, tidptr, sizeof(tid))) 579 psignal(curp, SIGSEGV); 580 } 581 582 fork_thread_start(p, curp, 0); 583 584 /* 585 * Update stats now that we know the fork was successful. 586 */ 587 forkstat.cnttfork++; 588 uvmexp.forks++; 589 uvmexp.forks_sharevm++; 590 591 return 0; 592 } 593 594 595 /* Find an unused tid */ 596 pid_t 597 alloctid(void) 598 { 599 pid_t tid; 600 601 do { 602 /* (0 .. TID_MASK+1] */ 603 tid = 1 + (arc4random() & TID_MASK); 604 } while (tfind(tid) != NULL); 605 606 return (tid); 607 } 608 609 /* 610 * Checks for current use of a pid, either as a pid or pgid. 611 */ 612 pid_t oldpids[128]; 613 int 614 ispidtaken(pid_t pid) 615 { 616 uint32_t i; 617 618 for (i = 0; i < nitems(oldpids); i++) 619 if (pid == oldpids[i]) 620 return (1); 621 622 if (prfind(pid) != NULL) 623 return (1); 624 if (pgfind(pid) != NULL) 625 return (1); 626 if (zombiefind(pid) != NULL) 627 return (1); 628 return (0); 629 } 630 631 /* Find an unused pid */ 632 pid_t 633 allocpid(void) 634 { 635 static pid_t lastpid; 636 pid_t pid; 637 638 if (!randompid) { 639 /* only used early on for system processes */ 640 pid = ++lastpid; 641 } else { 642 /* Find an unused pid satisfying lastpid < pid <= PID_MAX */ 643 do { 644 pid = arc4random_uniform(PID_MAX - lastpid) + 1 + 645 lastpid; 646 } while (ispidtaken(pid)); 647 } 648 649 return pid; 650 } 651 652 void 653 freepid(pid_t pid) 654 { 655 static uint32_t idx; 656 657 oldpids[idx++ % nitems(oldpids)] = pid; 658 } 659 660 #if defined(MULTIPROCESSOR) 661 /* 662 * XXX This is a slight hack to get newly-formed processes to 663 * XXX acquire the kernel lock as soon as they run. 664 */ 665 void 666 proc_trampoline_mp(void) 667 { 668 SCHED_ASSERT_LOCKED(); 669 __mp_unlock(&sched_lock); 670 spl0(); 671 SCHED_ASSERT_UNLOCKED(); 672 KERNEL_ASSERT_UNLOCKED(); 673 674 KERNEL_LOCK(); 675 } 676 #endif 677