1 /* $OpenBSD: kern_fork.c,v 1.216 2019/10/22 21:19:22 cheloha Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/vmmeter.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <sys/sysctl.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 #include <sys/atomic.h> 60 #include <sys/pledge.h> 61 #include <sys/unistd.h> 62 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm.h> 66 #include <machine/tcb.h> 67 68 #include "kcov.h" 69 70 int nprocesses = 1; /* process 0 */ 71 int nthreads = 1; /* proc 0 */ 72 int randompid; /* when set to 1, pid's go random */ 73 struct forkstat forkstat; 74 75 void fork_return(void *); 76 pid_t alloctid(void); 77 pid_t allocpid(void); 78 int ispidtaken(pid_t); 79 80 void unveil_copy(struct process *parent, struct process *child); 81 82 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr); 83 struct process *process_new(struct proc *, struct process *, int); 84 int fork_check_maxthread(uid_t _uid); 85 86 void 87 fork_return(void *arg) 88 { 89 struct proc *p = (struct proc *)arg; 90 91 if (p->p_p->ps_flags & PS_TRACED) 92 psignal(p, SIGTRAP); 93 94 child_return(p); 95 } 96 97 int 98 sys_fork(struct proc *p, void *v, register_t *retval) 99 { 100 int flags; 101 102 flags = FORK_FORK; 103 if (p->p_p->ps_ptmask & PTRACE_FORK) 104 flags |= FORK_PTRACE; 105 return fork1(p, flags, fork_return, NULL, retval, NULL); 106 } 107 108 int 109 sys_vfork(struct proc *p, void *v, register_t *retval) 110 { 111 return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL, 112 retval, NULL); 113 } 114 115 int 116 sys___tfork(struct proc *p, void *v, register_t *retval) 117 { 118 struct sys___tfork_args /* { 119 syscallarg(const struct __tfork) *param; 120 syscallarg(size_t) psize; 121 } */ *uap = v; 122 size_t psize = SCARG(uap, psize); 123 struct __tfork param = { 0 }; 124 int error; 125 126 if (psize == 0 || psize > sizeof(param)) 127 return EINVAL; 128 if ((error = copyin(SCARG(uap, param), ¶m, psize))) 129 return error; 130 #ifdef KTRACE 131 if (KTRPOINT(p, KTR_STRUCT)) 132 ktrstruct(p, "tfork", ¶m, sizeof(param)); 133 #endif 134 #ifdef TCB_INVALID 135 if (TCB_INVALID(param.tf_tcb)) 136 return EINVAL; 137 #endif /* TCB_INVALID */ 138 139 return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid, 140 retval); 141 } 142 143 /* 144 * Allocate and initialize a thread (proc) structure, given the parent thread. 145 */ 146 struct proc * 147 thread_new(struct proc *parent, vaddr_t uaddr) 148 { 149 struct proc *p; 150 151 p = pool_get(&proc_pool, PR_WAITOK); 152 p->p_stat = SIDL; /* protect against others */ 153 p->p_flag = 0; 154 p->p_limit = NULL; 155 156 /* 157 * Make a proc table entry for the new process. 158 * Start by zeroing the section of proc that is zero-initialized, 159 * then copy the section that is copied directly from the parent. 160 */ 161 memset(&p->p_startzero, 0, 162 (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero); 163 memcpy(&p->p_startcopy, &parent->p_startcopy, 164 (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy); 165 crhold(p->p_ucred); 166 p->p_addr = (struct user *)uaddr; 167 168 /* 169 * Initialize the timeouts. 170 */ 171 timeout_set(&p->p_sleep_to, endtsleep, p); 172 173 #ifdef WITNESS 174 p->p_sleeplocks = NULL; 175 #endif 176 177 #if NKCOV > 0 178 p->p_kd = NULL; 179 #endif 180 181 return p; 182 } 183 184 /* 185 * Initialize common bits of a process structure, given the initial thread. 186 */ 187 void 188 process_initialize(struct process *pr, struct proc *p) 189 { 190 /* initialize the thread links */ 191 pr->ps_mainproc = p; 192 TAILQ_INIT(&pr->ps_threads); 193 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 194 pr->ps_refcnt = 1; 195 p->p_p = pr; 196 197 /* give the process the same creds as the initial thread */ 198 pr->ps_ucred = p->p_ucred; 199 crhold(pr->ps_ucred); 200 KASSERT(p->p_ucred->cr_ref >= 2); /* new thread and new process */ 201 202 LIST_INIT(&pr->ps_children); 203 LIST_INIT(&pr->ps_ftlist); 204 LIST_INIT(&pr->ps_kqlist); 205 LIST_INIT(&pr->ps_sigiolst); 206 207 mtx_init(&pr->ps_mtx, IPL_MPFLOOR); 208 209 timeout_set(&pr->ps_realit_to, realitexpire, pr); 210 timeout_set(&pr->ps_rucheck_to, rucheck, pr); 211 } 212 213 214 /* 215 * Allocate and initialize a new process. 216 */ 217 struct process * 218 process_new(struct proc *p, struct process *parent, int flags) 219 { 220 struct process *pr; 221 222 pr = pool_get(&process_pool, PR_WAITOK); 223 224 /* 225 * Make a process structure for the new process. 226 * Start by zeroing the section of proc that is zero-initialized, 227 * then copy the section that is copied directly from the parent. 228 */ 229 memset(&pr->ps_startzero, 0, 230 (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero); 231 memcpy(&pr->ps_startcopy, &parent->ps_startcopy, 232 (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy); 233 234 process_initialize(pr, p); 235 pr->ps_pid = allocpid(); 236 lim_fork(parent, pr); 237 238 /* post-copy fixups */ 239 pr->ps_pptr = parent; 240 241 /* bump references to the text vnode (for sysctl) */ 242 pr->ps_textvp = parent->ps_textvp; 243 if (pr->ps_textvp) 244 vref(pr->ps_textvp); 245 246 /* copy unveil if unveil is active */ 247 unveil_copy(parent, pr); 248 249 pr->ps_flags = parent->ps_flags & 250 (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED); 251 if (parent->ps_session->s_ttyvp != NULL) 252 pr->ps_flags |= parent->ps_flags & PS_CONTROLT; 253 254 /* 255 * Duplicate sub-structures as needed. 256 * Increase reference counts on shared objects. 257 */ 258 if (flags & FORK_SHAREFILES) 259 pr->ps_fd = fdshare(parent); 260 else 261 pr->ps_fd = fdcopy(parent); 262 if (flags & FORK_SIGHAND) 263 pr->ps_sigacts = sigactsshare(parent); 264 else 265 pr->ps_sigacts = sigactsinit(parent); 266 if (flags & FORK_SHAREVM) 267 pr->ps_vmspace = uvmspace_share(parent); 268 else 269 pr->ps_vmspace = uvmspace_fork(parent); 270 271 if (parent->ps_flags & PS_PROFIL) 272 startprofclock(pr); 273 if (flags & FORK_PTRACE) 274 pr->ps_flags |= parent->ps_flags & PS_TRACED; 275 if (flags & FORK_NOZOMBIE) 276 pr->ps_flags |= PS_NOZOMBIE; 277 if (flags & FORK_SYSTEM) 278 pr->ps_flags |= PS_SYSTEM; 279 280 /* mark as embryo to protect against others */ 281 pr->ps_flags |= PS_EMBRYO; 282 283 /* Force visibility of all of the above changes */ 284 membar_producer(); 285 286 /* it's sufficiently inited to be globally visible */ 287 LIST_INSERT_HEAD(&allprocess, pr, ps_list); 288 289 return pr; 290 } 291 292 /* print the 'table full' message once per 10 seconds */ 293 struct timeval fork_tfmrate = { 10, 0 }; 294 295 int 296 fork_check_maxthread(uid_t uid) 297 { 298 /* 299 * Although process entries are dynamically created, we still keep 300 * a global limit on the maximum number we will create. We reserve 301 * the last 5 processes to root. The variable nprocesses is the 302 * current number of processes, maxprocess is the limit. Similar 303 * rules for threads (struct proc): we reserve the last 5 to root; 304 * the variable nthreads is the current number of procs, maxthread is 305 * the limit. 306 */ 307 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) { 308 static struct timeval lasttfm; 309 310 if (ratecheck(&lasttfm, &fork_tfmrate)) 311 tablefull("proc"); 312 return EAGAIN; 313 } 314 nthreads++; 315 316 return 0; 317 } 318 319 static inline void 320 fork_thread_start(struct proc *p, struct proc *parent, int flags) 321 { 322 struct cpu_info *ci; 323 int s; 324 325 SCHED_LOCK(s); 326 ci = sched_choosecpu_fork(parent, flags); 327 setrunqueue(ci, p, p->p_priority); 328 SCHED_UNLOCK(s); 329 } 330 331 int 332 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg, 333 register_t *retval, struct proc **rnewprocp) 334 { 335 struct process *curpr = curp->p_p; 336 struct process *pr; 337 struct proc *p; 338 uid_t uid = curp->p_ucred->cr_ruid; 339 struct vmspace *vm; 340 int count; 341 vaddr_t uaddr; 342 int error; 343 struct ptrace_state *newptstat = NULL; 344 345 KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE 346 | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE 347 | FORK_SYSTEM | FORK_SIGHAND)) == 0); 348 KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM)); 349 KASSERT(func != NULL); 350 351 if ((error = fork_check_maxthread(uid))) 352 return error; 353 354 if ((nprocesses >= maxprocess - 5 && uid != 0) || 355 nprocesses >= maxprocess) { 356 static struct timeval lasttfm; 357 358 if (ratecheck(&lasttfm, &fork_tfmrate)) 359 tablefull("process"); 360 nthreads--; 361 return EAGAIN; 362 } 363 nprocesses++; 364 365 /* 366 * Increment the count of processes running with this uid. 367 * Don't allow a nonprivileged user to exceed their current limit. 368 */ 369 count = chgproccnt(uid, 1); 370 if (uid != 0 && count > lim_cur(RLIMIT_NPROC)) { 371 (void)chgproccnt(uid, -1); 372 nprocesses--; 373 nthreads--; 374 return EAGAIN; 375 } 376 377 uaddr = uvm_uarea_alloc(); 378 if (uaddr == 0) { 379 (void)chgproccnt(uid, -1); 380 nprocesses--; 381 nthreads--; 382 return (ENOMEM); 383 } 384 385 /* 386 * From now on, we're committed to the fork and cannot fail. 387 */ 388 p = thread_new(curp, uaddr); 389 pr = process_new(p, curpr, flags); 390 391 p->p_fd = pr->ps_fd; 392 p->p_vmspace = pr->ps_vmspace; 393 if (pr->ps_flags & PS_SYSTEM) 394 atomic_setbits_int(&p->p_flag, P_SYSTEM); 395 396 if (flags & FORK_PPWAIT) { 397 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT); 398 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT); 399 } 400 401 #ifdef KTRACE 402 /* 403 * Copy traceflag and tracefile if enabled. 404 * If not inherited, these were zeroed above. 405 */ 406 if (curpr->ps_traceflag & KTRFAC_INHERIT) 407 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp, 408 curpr->ps_tracecred); 409 #endif 410 411 /* 412 * Finish creating the child thread. cpu_fork() will copy 413 * and update the pcb and make the child ready to run. If 414 * this is a normal user fork, the child will exit directly 415 * to user mode via child_return() on its first time slice 416 * and will not return here. If this is a kernel thread, 417 * the specified entry point will be executed. 418 */ 419 cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p); 420 421 vm = pr->ps_vmspace; 422 423 if (flags & FORK_FORK) { 424 forkstat.cntfork++; 425 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 426 } else if (flags & FORK_VFORK) { 427 forkstat.cntvfork++; 428 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 429 } else { 430 forkstat.cntkthread++; 431 } 432 433 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK) 434 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK); 435 436 p->p_tid = alloctid(); 437 438 LIST_INSERT_HEAD(&allproc, p, p_list); 439 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); 440 LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash); 441 LIST_INSERT_AFTER(curpr, pr, ps_pglist); 442 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling); 443 444 if (pr->ps_flags & PS_TRACED) { 445 pr->ps_oppid = curpr->ps_pid; 446 if (pr->ps_pptr != curpr->ps_pptr) 447 proc_reparent(pr, curpr->ps_pptr); 448 449 /* 450 * Set ptrace status. 451 */ 452 if (newptstat != NULL) { 453 pr->ps_ptstat = newptstat; 454 newptstat = NULL; 455 curpr->ps_ptstat->pe_report_event = PTRACE_FORK; 456 pr->ps_ptstat->pe_report_event = PTRACE_FORK; 457 curpr->ps_ptstat->pe_other_pid = pr->ps_pid; 458 pr->ps_ptstat->pe_other_pid = curpr->ps_pid; 459 } 460 } 461 462 /* 463 * For new processes, set accounting bits and mark as complete. 464 */ 465 nanouptime(&pr->ps_start); 466 pr->ps_acflag = AFORK; 467 atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO); 468 469 if ((flags & FORK_IDLE) == 0) 470 fork_thread_start(p, curp, flags); 471 else 472 p->p_cpu = arg; 473 474 free(newptstat, M_SUBPROC, sizeof(*newptstat)); 475 476 /* 477 * Notify any interested parties about the new process. 478 */ 479 KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid); 480 481 /* 482 * Update stats now that we know the fork was successful. 483 */ 484 uvmexp.forks++; 485 if (flags & FORK_PPWAIT) 486 uvmexp.forks_ppwait++; 487 if (flags & FORK_SHAREVM) 488 uvmexp.forks_sharevm++; 489 490 /* 491 * Pass a pointer to the new process to the caller. 492 */ 493 if (rnewprocp != NULL) 494 *rnewprocp = p; 495 496 /* 497 * Preserve synchronization semantics of vfork. If waiting for 498 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT 499 * on ourselves, and sleep on our process for the latter flag 500 * to go away. 501 * XXX Need to stop other rthreads in the parent 502 */ 503 if (flags & FORK_PPWAIT) 504 while (curpr->ps_flags & PS_ISPWAIT) 505 tsleep(curpr, PWAIT, "ppwait", 0); 506 507 /* 508 * If we're tracing the child, alert the parent too. 509 */ 510 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED)) 511 psignal(curp, SIGTRAP); 512 513 /* 514 * Return child pid to parent process 515 */ 516 if (retval != NULL) { 517 retval[0] = pr->ps_pid; 518 retval[1] = 0; 519 } 520 return (0); 521 } 522 523 int 524 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, 525 register_t *retval) 526 { 527 struct process *pr = curp->p_p; 528 struct proc *p; 529 pid_t tid; 530 vaddr_t uaddr; 531 int error; 532 533 if (stack == NULL) 534 return EINVAL; 535 536 if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid))) 537 return error; 538 539 uaddr = uvm_uarea_alloc(); 540 if (uaddr == 0) { 541 nthreads--; 542 return ENOMEM; 543 } 544 545 /* 546 * From now on, we're committed to the fork and cannot fail. 547 */ 548 p = thread_new(curp, uaddr); 549 atomic_setbits_int(&p->p_flag, P_THREAD); 550 sigstkinit(&p->p_sigstk); 551 552 /* other links */ 553 p->p_p = pr; 554 pr->ps_refcnt++; 555 556 /* local copies */ 557 p->p_fd = pr->ps_fd; 558 p->p_vmspace = pr->ps_vmspace; 559 560 /* 561 * Finish creating the child thread. cpu_fork() will copy 562 * and update the pcb and make the child ready to run. The 563 * child will exit directly to user mode via child_return() 564 * on its first time slice and will not return here. 565 */ 566 cpu_fork(curp, p, stack, tcb, child_return, p); 567 568 p->p_tid = alloctid(); 569 570 LIST_INSERT_HEAD(&allproc, p, p_list); 571 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); 572 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); 573 574 /* 575 * if somebody else wants to take us to single threaded mode, 576 * count ourselves in. 577 */ 578 if (pr->ps_single) { 579 pr->ps_singlecount++; 580 atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); 581 } 582 583 /* 584 * Return tid to parent thread and copy it out to userspace 585 */ 586 retval[0] = tid = p->p_tid + THREAD_PID_OFFSET; 587 retval[1] = 0; 588 if (tidptr != NULL) { 589 if (copyout(&tid, tidptr, sizeof(tid))) 590 psignal(curp, SIGSEGV); 591 } 592 593 fork_thread_start(p, curp, 0); 594 595 /* 596 * Update stats now that we know the fork was successful. 597 */ 598 forkstat.cnttfork++; 599 uvmexp.forks++; 600 uvmexp.forks_sharevm++; 601 602 return 0; 603 } 604 605 606 /* Find an unused tid */ 607 pid_t 608 alloctid(void) 609 { 610 pid_t tid; 611 612 do { 613 /* (0 .. TID_MASK+1] */ 614 tid = 1 + (arc4random() & TID_MASK); 615 } while (tfind(tid) != NULL); 616 617 return (tid); 618 } 619 620 /* 621 * Checks for current use of a pid, either as a pid or pgid. 622 */ 623 pid_t oldpids[128]; 624 int 625 ispidtaken(pid_t pid) 626 { 627 uint32_t i; 628 629 for (i = 0; i < nitems(oldpids); i++) 630 if (pid == oldpids[i]) 631 return (1); 632 633 if (prfind(pid) != NULL) 634 return (1); 635 if (pgfind(pid) != NULL) 636 return (1); 637 if (zombiefind(pid) != NULL) 638 return (1); 639 return (0); 640 } 641 642 /* Find an unused pid */ 643 pid_t 644 allocpid(void) 645 { 646 static pid_t lastpid; 647 pid_t pid; 648 649 if (!randompid) { 650 /* only used early on for system processes */ 651 pid = ++lastpid; 652 } else { 653 /* Find an unused pid satisfying lastpid < pid <= PID_MAX */ 654 do { 655 pid = arc4random_uniform(PID_MAX - lastpid) + 1 + 656 lastpid; 657 } while (ispidtaken(pid)); 658 } 659 660 return pid; 661 } 662 663 void 664 freepid(pid_t pid) 665 { 666 static uint32_t idx; 667 668 oldpids[idx++ % nitems(oldpids)] = pid; 669 } 670 671 #if defined(MULTIPROCESSOR) 672 /* 673 * XXX This is a slight hack to get newly-formed processes to 674 * XXX acquire the kernel lock as soon as they run. 675 */ 676 void 677 proc_trampoline_mp(void) 678 { 679 SCHED_ASSERT_LOCKED(); 680 __mp_unlock(&sched_lock); 681 spl0(); 682 SCHED_ASSERT_UNLOCKED(); 683 KERNEL_ASSERT_UNLOCKED(); 684 685 KERNEL_LOCK(); 686 } 687 #endif 688