1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.18 2004/02/10 15:31:47 hmp Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/filedesc.h> 49 #include <sys/kernel.h> 50 #include <sys/sysctl.h> 51 #include <sys/malloc.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/vnode.h> 55 #include <sys/acct.h> 56 #include <sys/ktrace.h> 57 #include <sys/unistd.h> 58 #include <sys/jail.h> 59 60 #include <vm/vm.h> 61 #include <sys/lock.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_extern.h> 65 #include <vm/vm_zone.h> 66 67 #include <sys/vmmeter.h> 68 #include <sys/user.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 72 /* 73 * These are the stuctures used to create a callout list for things to do 74 * when forking a process 75 */ 76 struct forklist { 77 forklist_fn function; 78 TAILQ_ENTRY(forklist) next; 79 }; 80 81 TAILQ_HEAD(forklist_head, forklist); 82 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 83 84 int forksleep; /* Place for fork1() to sleep on. */ 85 86 /* ARGSUSED */ 87 int 88 fork(struct fork_args *uap) 89 { 90 struct proc *p = curproc; 91 struct proc *p2; 92 int error; 93 94 error = fork1(p, RFFDG | RFPROC, &p2); 95 if (error == 0) { 96 start_forked_proc(p, p2); 97 uap->sysmsg_fds[0] = p2->p_pid; 98 uap->sysmsg_fds[1] = 0; 99 } 100 return error; 101 } 102 103 /* ARGSUSED */ 104 int 105 vfork(struct vfork_args *uap) 106 { 107 struct proc *p = curproc; 108 struct proc *p2; 109 int error; 110 111 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 112 if (error == 0) { 113 start_forked_proc(p, p2); 114 uap->sysmsg_fds[0] = p2->p_pid; 115 uap->sysmsg_fds[1] = 0; 116 } 117 return error; 118 } 119 120 int 121 rfork(struct rfork_args *uap) 122 { 123 struct proc *p = curproc; 124 struct proc *p2; 125 int error; 126 127 /* Don't allow kernel only flags */ 128 if ((uap->flags & RFKERNELONLY) != 0) 129 return (EINVAL); 130 131 error = fork1(p, uap->flags, &p2); 132 if (error == 0) { 133 start_forked_proc(p, p2); 134 uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0; 135 uap->sysmsg_fds[1] = 0; 136 } 137 return error; 138 } 139 140 141 int nprocs = 1; /* process 0 */ 142 static int nextpid = 0; 143 144 /* 145 * Random component to nextpid generation. We mix in a random factor to make 146 * it a little harder to predict. We sanity check the modulus value to avoid 147 * doing it in critical paths. Don't let it be too small or we pointlessly 148 * waste randomness entropy, and don't let it be impossibly large. Using a 149 * modulus that is too big causes a LOT more process table scans and slows 150 * down fork processing as the pidchecked caching is defeated. 151 */ 152 static int randompid = 0; 153 154 static int 155 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 156 { 157 int error, pid; 158 159 pid = randompid; 160 error = sysctl_handle_int(oidp, &pid, 0, req); 161 if (error || !req->newptr) 162 return (error); 163 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 164 pid = PID_MAX - 100; 165 else if (pid < 2) /* NOP */ 166 pid = 0; 167 else if (pid < 100) /* Make it reasonable */ 168 pid = 100; 169 randompid = pid; 170 return (error); 171 } 172 173 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 174 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 175 176 int 177 fork1(p1, flags, procp) 178 struct proc *p1; 179 int flags; 180 struct proc **procp; 181 { 182 struct proc *p2, *pptr; 183 uid_t uid; 184 struct proc *newproc; 185 int ok; 186 static int curfail = 0, pidchecked = 0; 187 static struct timeval lastfail; 188 struct forklist *ep; 189 struct filedesc_to_leader *fdtol; 190 191 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 192 return (EINVAL); 193 194 /* 195 * Here we don't create a new process, but we divorce 196 * certain parts of a process from itself. 197 */ 198 if ((flags & RFPROC) == 0) { 199 200 vm_fork(p1, 0, flags); 201 202 /* 203 * Close all file descriptors. 204 */ 205 if (flags & RFCFDG) { 206 struct filedesc *fdtmp; 207 fdtmp = fdinit(p1); 208 fdfree(p1); 209 p1->p_fd = fdtmp; 210 } 211 212 /* 213 * Unshare file descriptors (from parent.) 214 */ 215 if (flags & RFFDG) { 216 if (p1->p_fd->fd_refcnt > 1) { 217 struct filedesc *newfd; 218 newfd = fdcopy(p1); 219 fdfree(p1); 220 p1->p_fd = newfd; 221 } 222 } 223 *procp = NULL; 224 return (0); 225 } 226 227 /* 228 * Although process entries are dynamically created, we still keep 229 * a global limit on the maximum number we will create. Don't allow 230 * a nonprivileged user to use the last ten processes; don't let root 231 * exceed the limit. The variable nprocs is the current number of 232 * processes, maxproc is the limit. 233 */ 234 uid = p1->p_ucred->cr_ruid; 235 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 236 if (ppsratecheck(&lastfail, &curfail, 1)) 237 printf("maxproc limit exceeded by uid %d, please " 238 "see tuning(7) and login.conf(5).\n", uid); 239 tsleep(&forksleep, 0, "fork", hz / 2); 240 return (EAGAIN); 241 } 242 /* 243 * Increment the nprocs resource before blocking can occur. There 244 * are hard-limits as to the number of processes that can run. 245 */ 246 nprocs++; 247 248 /* 249 * Increment the count of procs running with this uid. Don't allow 250 * a nonprivileged user to exceed their current limit. 251 */ 252 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1, 253 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 254 if (!ok) { 255 /* 256 * Back out the process count 257 */ 258 nprocs--; 259 if (ppsratecheck(&lastfail, &curfail, 1)) 260 printf("maxproc limit exceeded by uid %d, please " 261 "see tuning(7) and login.conf(5).\n", uid); 262 tsleep(&forksleep, 0, "fork", hz / 2); 263 return (EAGAIN); 264 } 265 266 /* Allocate new proc. */ 267 newproc = zalloc(proc_zone); 268 269 /* 270 * Setup linkage for kernel based threading 271 */ 272 if((flags & RFTHREAD) != 0) { 273 newproc->p_peers = p1->p_peers; 274 p1->p_peers = newproc; 275 newproc->p_leader = p1->p_leader; 276 } else { 277 newproc->p_peers = 0; 278 newproc->p_leader = newproc; 279 } 280 281 newproc->p_wakeup = 0; 282 newproc->p_vmspace = NULL; 283 284 /* 285 * Find an unused process ID. We remember a range of unused IDs 286 * ready to use (from nextpid+1 through pidchecked-1). 287 */ 288 nextpid++; 289 if (randompid) 290 nextpid += arc4random() % randompid; 291 retry: 292 /* 293 * If the process ID prototype has wrapped around, 294 * restart somewhat above 0, as the low-numbered procs 295 * tend to include daemons that don't exit. 296 */ 297 if (nextpid >= PID_MAX) { 298 nextpid = nextpid % PID_MAX; 299 if (nextpid < 100) 300 nextpid += 100; 301 pidchecked = 0; 302 } 303 if (nextpid >= pidchecked) { 304 int doingzomb = 0; 305 306 pidchecked = PID_MAX; 307 /* 308 * Scan the active and zombie procs to check whether this pid 309 * is in use. Remember the lowest pid that's greater 310 * than nextpid, so we can avoid checking for a while. 311 */ 312 p2 = LIST_FIRST(&allproc); 313 again: 314 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) { 315 while (p2->p_pid == nextpid || 316 p2->p_pgrp->pg_id == nextpid || 317 p2->p_session->s_sid == nextpid) { 318 nextpid++; 319 if (nextpid >= pidchecked) 320 goto retry; 321 } 322 if (p2->p_pid > nextpid && pidchecked > p2->p_pid) 323 pidchecked = p2->p_pid; 324 if (p2->p_pgrp->pg_id > nextpid && 325 pidchecked > p2->p_pgrp->pg_id) 326 pidchecked = p2->p_pgrp->pg_id; 327 if (p2->p_session->s_sid > nextpid && 328 pidchecked > p2->p_session->s_sid) 329 pidchecked = p2->p_session->s_sid; 330 } 331 if (!doingzomb) { 332 doingzomb = 1; 333 p2 = LIST_FIRST(&zombproc); 334 goto again; 335 } 336 } 337 338 p2 = newproc; 339 p2->p_stat = SIDL; /* protect against others */ 340 p2->p_pid = nextpid; 341 LIST_INSERT_HEAD(&allproc, p2, p_list); 342 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 343 344 /* 345 * Make a proc table entry for the new process. 346 * Start by zeroing the section of proc that is zero-initialized, 347 * then copy the section that is copied directly from the parent. 348 */ 349 bzero(&p2->p_startzero, 350 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 351 bcopy(&p1->p_startcopy, &p2->p_startcopy, 352 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 353 354 p2->p_aioinfo = NULL; 355 356 /* 357 * Duplicate sub-structures as needed. 358 * Increase reference counts on shared objects. 359 * The p_stats and p_sigacts substructs are set in vm_fork. 360 * 361 * P_CP_RELEASED indicates that the process is starting out in 362 * the kernel (in the fork trampoline). The flag will be converted 363 * to P_CURPROC when the new process calls userret() and attempts 364 * to return to userland 365 */ 366 p2->p_flag = P_INMEM | P_CP_RELEASED; 367 if (p1->p_flag & P_PROFIL) 368 startprofclock(p2); 369 p2->p_ucred = crhold(p1->p_ucred); 370 371 if (p2->p_ucred->cr_prison) { 372 p2->p_ucred->cr_prison->pr_ref++; 373 p2->p_flag |= P_JAILED; 374 } 375 376 if (p2->p_args) 377 p2->p_args->ar_ref++; 378 379 if (flags & RFSIGSHARE) { 380 p2->p_procsig = p1->p_procsig; 381 p2->p_procsig->ps_refcnt++; 382 if (p1->p_sigacts == &p1->p_addr->u_sigacts) { 383 struct sigacts *newsigacts; 384 int s; 385 386 /* Create the shared sigacts structure */ 387 MALLOC(newsigacts, struct sigacts *, 388 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 389 s = splhigh(); 390 /* 391 * Set p_sigacts to the new shared structure. 392 * Note that this is updating p1->p_sigacts at the 393 * same time, since p_sigacts is just a pointer to 394 * the shared p_procsig->ps_sigacts. 395 */ 396 p2->p_sigacts = newsigacts; 397 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts, 398 sizeof(*p2->p_sigacts)); 399 *p2->p_sigacts = p1->p_addr->u_sigacts; 400 splx(s); 401 } 402 } else { 403 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig), 404 M_SUBPROC, M_WAITOK); 405 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 406 p2->p_procsig->ps_refcnt = 1; 407 p2->p_sigacts = NULL; /* finished in vm_fork() */ 408 } 409 if (flags & RFLINUXTHPN) 410 p2->p_sigparent = SIGUSR1; 411 else 412 p2->p_sigparent = SIGCHLD; 413 414 /* bump references to the text vnode (for procfs) */ 415 p2->p_textvp = p1->p_textvp; 416 if (p2->p_textvp) 417 VREF(p2->p_textvp); 418 419 if (flags & RFCFDG) { 420 p2->p_fd = fdinit(p1); 421 fdtol = NULL; 422 } else if (flags & RFFDG) { 423 p2->p_fd = fdcopy(p1); 424 fdtol = NULL; 425 } else { 426 p2->p_fd = fdshare(p1); 427 if (p1->p_fdtol == NULL) 428 p1->p_fdtol = 429 filedesc_to_leader_alloc(NULL, 430 p1->p_leader); 431 if ((flags & RFTHREAD) != 0) { 432 /* 433 * Shared file descriptor table and 434 * shared process leaders. 435 */ 436 fdtol = p1->p_fdtol; 437 fdtol->fdl_refcount++; 438 } else { 439 /* 440 * Shared file descriptor table, and 441 * different process leaders 442 */ 443 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 444 } 445 } 446 p2->p_fdtol = fdtol; 447 448 /* 449 * If p_limit is still copy-on-write, bump refcnt, 450 * otherwise get a copy that won't be modified. 451 * (If PL_SHAREMOD is clear, the structure is shared 452 * copy-on-write.) 453 */ 454 if (p1->p_limit->p_lflags & PL_SHAREMOD) 455 p2->p_limit = limcopy(p1->p_limit); 456 else { 457 p2->p_limit = p1->p_limit; 458 p2->p_limit->p_refcnt++; 459 } 460 461 /* 462 * Preserve some more flags in subprocess. P_PROFIL has already 463 * been preserved. 464 */ 465 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK); 466 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 467 p2->p_flag |= P_CONTROLT; 468 if (flags & RFPPWAIT) 469 p2->p_flag |= P_PPWAIT; 470 471 LIST_INSERT_AFTER(p1, p2, p_pglist); 472 473 /* 474 * Attach the new process to its parent. 475 * 476 * If RFNOWAIT is set, the newly created process becomes a child 477 * of init. This effectively disassociates the child from the 478 * parent. 479 */ 480 if (flags & RFNOWAIT) 481 pptr = initproc; 482 else 483 pptr = p1; 484 p2->p_pptr = pptr; 485 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 486 LIST_INIT(&p2->p_children); 487 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 488 489 #ifdef KTRACE 490 /* 491 * Copy traceflag and tracefile if enabled. If not inherited, 492 * these were zeroed above but we still could have a trace race 493 * so make sure p2's p_tracep is NULL. 494 */ 495 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracep == NULL) { 496 p2->p_traceflag = p1->p_traceflag; 497 if ((p2->p_tracep = p1->p_tracep) != NULL) 498 VREF(p2->p_tracep); 499 } 500 #endif 501 502 /* 503 * set priority of child to be that of parent 504 */ 505 p2->p_estcpu = p1->p_estcpu; 506 507 /* 508 * This begins the section where we must prevent the parent 509 * from being swapped. 510 */ 511 PHOLD(p1); 512 513 /* 514 * Finish creating the child process. It will return via a different 515 * execution path later. (ie: directly into user mode) 516 */ 517 vm_fork(p1, p2, flags); 518 519 if (flags == (RFFDG | RFPROC)) { 520 mycpu->gd_cnt.v_forks++; 521 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 522 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 523 mycpu->gd_cnt.v_vforks++; 524 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 525 } else if (p1 == &proc0) { 526 mycpu->gd_cnt.v_kthreads++; 527 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 528 } else { 529 mycpu->gd_cnt.v_rforks++; 530 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 531 } 532 533 /* 534 * Both processes are set up, now check if any loadable modules want 535 * to adjust anything. 536 * What if they have an error? XXX 537 */ 538 TAILQ_FOREACH(ep, &fork_list, next) { 539 (*ep->function)(p1, p2, flags); 540 } 541 542 /* 543 * Make child runnable and add to run queue. 544 */ 545 microtime(&(p2->p_stats->p_start)); 546 p2->p_acflag = AFORK; 547 548 /* 549 * tell any interested parties about the new process 550 */ 551 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 552 553 /* 554 * Return child proc pointer to parent. 555 */ 556 *procp = p2; 557 return (0); 558 } 559 560 /* 561 * The next two functionms are general routines to handle adding/deleting 562 * items on the fork callout list. 563 * 564 * at_fork(): 565 * Take the arguments given and put them onto the fork callout list, 566 * However first make sure that it's not already there. 567 * Returns 0 on success or a standard error number. 568 */ 569 570 int 571 at_fork(function) 572 forklist_fn function; 573 { 574 struct forklist *ep; 575 576 #ifdef INVARIANTS 577 /* let the programmer know if he's been stupid */ 578 if (rm_at_fork(function)) 579 printf("WARNING: fork callout entry (%p) already present\n", 580 function); 581 #endif 582 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 583 if (ep == NULL) 584 return (ENOMEM); 585 ep->function = function; 586 TAILQ_INSERT_TAIL(&fork_list, ep, next); 587 return (0); 588 } 589 590 /* 591 * Scan the exit callout list for the given item and remove it.. 592 * Returns the number of items removed (0 or 1) 593 */ 594 595 int 596 rm_at_fork(function) 597 forklist_fn function; 598 { 599 struct forklist *ep; 600 601 TAILQ_FOREACH(ep, &fork_list, next) { 602 if (ep->function == function) { 603 TAILQ_REMOVE(&fork_list, ep, next); 604 free(ep, M_ATFORK); 605 return(1); 606 } 607 } 608 return (0); 609 } 610 611 /* 612 * Add a forked process to the run queue after any remaining setup, such 613 * as setting the fork handler, has been completed. 614 */ 615 616 void 617 start_forked_proc(struct proc *p1, struct proc *p2) 618 { 619 /* 620 * Move from SIDL to RUN queue, and activate the process's thread. 621 * Activation of the thread effectively makes the process "a" 622 * current process, so we do not setrunqueue(). 623 */ 624 KASSERT(p2->p_stat == SIDL, 625 ("cannot start forked process, bad status: %p", p2)); 626 (void) splhigh(); 627 p2->p_stat = SRUN; 628 setrunqueue(p2); 629 (void) spl0(); 630 631 /* 632 * Now can be swapped. 633 */ 634 PRELE(p1); 635 636 /* 637 * Preserve synchronization semantics of vfork. If waiting for 638 * child to exec or exit, set P_PPWAIT on child, and sleep on our 639 * proc (in case of exit). 640 */ 641 while (p2->p_flag & P_PPWAIT) 642 tsleep(p1, 0, "ppwait", 0); 643 } 644 645