1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 #include <sys/lwp.h> 55 56 #include <vm/vm.h> 57 #include <sys/lock.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_extern.h> 61 62 #include <sys/vmmeter.h> 63 #include <sys/refcount.h> 64 #include <sys/thread2.h> 65 #include <sys/signal2.h> 66 #include <sys/spinlock2.h> 67 68 #include <sys/dsched.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 72 73 /* 74 * These are the stuctures used to create a callout list for things to do 75 * when forking a process 76 */ 77 struct forklist { 78 forklist_fn function; 79 TAILQ_ENTRY(forklist) next; 80 }; 81 82 TAILQ_HEAD(forklist_head, forklist); 83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 84 85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags, 86 const cpumask_t *mask); 87 static int lwp_create1(struct lwp_params *params, 88 const cpumask_t *mask); 89 static struct lock reaper_lock = LOCK_INITIALIZER("reapgl", 0, 0); 90 91 int forksleep; /* Place for fork1() to sleep on. */ 92 93 /* 94 * Red-Black tree support for LWPs 95 */ 96 97 static int 98 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 99 { 100 if (lp1->lwp_tid < lp2->lwp_tid) 101 return(-1); 102 if (lp1->lwp_tid > lp2->lwp_tid) 103 return(1); 104 return(0); 105 } 106 107 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 108 109 /* 110 * When forking, memory underpinning umtx-supported mutexes may be set 111 * COW causing the physical address to change. We must wakeup any threads 112 * blocked on the physical address to allow them to re-resolve their VM. 113 * 114 * (caller is holding p->p_token) 115 */ 116 static void 117 wake_umtx_threads(struct proc *p1) 118 { 119 struct lwp *lp; 120 struct thread *td; 121 122 RB_FOREACH(lp, lwp_rb_tree, &p1->p_lwp_tree) { 123 td = lp->lwp_thread; 124 if (td && (td->td_flags & TDF_TSLEEPQ) && 125 (td->td_wdomain & PDOMAIN_MASK) == PDOMAIN_UMTX) { 126 wakeup_domain(td->td_wchan, PDOMAIN_UMTX); 127 } 128 } 129 } 130 131 /* 132 * fork() system call 133 */ 134 int 135 sys_fork(struct fork_args *uap) 136 { 137 struct lwp *lp = curthread->td_lwp; 138 struct proc *p2; 139 int error; 140 141 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 142 if (error == 0) { 143 PHOLD(p2); 144 start_forked_proc(lp, p2); 145 uap->sysmsg_fds[0] = p2->p_pid; 146 uap->sysmsg_fds[1] = 0; 147 PRELE(p2); 148 } 149 return error; 150 } 151 152 /* 153 * vfork() system call 154 */ 155 int 156 sys_vfork(struct vfork_args *uap) 157 { 158 struct lwp *lp = curthread->td_lwp; 159 struct proc *p2; 160 int error; 161 162 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 163 if (error == 0) { 164 PHOLD(p2); 165 start_forked_proc(lp, p2); 166 uap->sysmsg_fds[0] = p2->p_pid; 167 uap->sysmsg_fds[1] = 0; 168 PRELE(p2); 169 } 170 return error; 171 } 172 173 /* 174 * Handle rforks. An rfork may (1) operate on the current process without 175 * creating a new, (2) create a new process that shared the current process's 176 * vmspace, signals, and/or descriptors, or (3) create a new process that does 177 * not share these things (normal fork). 178 * 179 * Note that we only call start_forked_proc() if a new process is actually 180 * created. 181 * 182 * rfork { int flags } 183 */ 184 int 185 sys_rfork(struct rfork_args *uap) 186 { 187 struct lwp *lp = curthread->td_lwp; 188 struct proc *p2; 189 int error; 190 191 if ((uap->flags & RFKERNELONLY) != 0) 192 return (EINVAL); 193 194 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 195 if (error == 0) { 196 if (p2) { 197 PHOLD(p2); 198 start_forked_proc(lp, p2); 199 uap->sysmsg_fds[0] = p2->p_pid; 200 uap->sysmsg_fds[1] = 0; 201 PRELE(p2); 202 } else { 203 uap->sysmsg_fds[0] = 0; 204 uap->sysmsg_fds[1] = 0; 205 } 206 } 207 return error; 208 } 209 210 static int 211 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 212 { 213 struct proc *p = curproc; 214 struct lwp *lp; 215 struct lwp_params params; 216 cpumask_t *mask = NULL, mask0; 217 int error; 218 219 error = copyin(uprm, ¶ms, sizeof(params)); 220 if (error) 221 goto fail2; 222 223 if (umask != NULL) { 224 error = copyin(umask, &mask0, sizeof(mask0)); 225 if (error) 226 goto fail2; 227 CPUMASK_ANDMASK(mask0, smp_active_mask); 228 if (CPUMASK_TESTNZERO(mask0)) 229 mask = &mask0; 230 } 231 232 lwkt_gettoken(&p->p_token); 233 plimit_lwp_fork(p); /* force exclusive access */ 234 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask); 235 error = cpu_prepare_lwp(lp, ¶ms); 236 if (error) 237 goto fail; 238 if (params.lwp_tid1 != NULL && 239 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 240 goto fail; 241 if (params.lwp_tid2 != NULL && 242 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 243 goto fail; 244 245 /* 246 * Now schedule the new lwp. 247 */ 248 p->p_usched->resetpriority(lp); 249 crit_enter(); 250 lp->lwp_stat = LSRUN; 251 p->p_usched->setrunqueue(lp); 252 crit_exit(); 253 lwkt_reltoken(&p->p_token); 254 255 return (0); 256 257 fail: 258 /* 259 * Make sure no one is using this lwp, before it is removed from 260 * the tree. If we didn't wait it here, lwp tree iteration with 261 * blocking operation would be broken. 262 */ 263 while (lp->lwp_lock > 0) 264 tsleep(lp, 0, "lwpfail", 1); 265 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 266 --p->p_nthreads; 267 /* lwp_dispose expects an exited lwp, and a held proc */ 268 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 269 lp->lwp_thread->td_flags |= TDF_EXITING; 270 lwkt_remove_tdallq(lp->lwp_thread); 271 PHOLD(p); 272 biosched_done(lp->lwp_thread); 273 dsched_exit_thread(lp->lwp_thread); 274 lwp_dispose(lp); 275 lwkt_reltoken(&p->p_token); 276 fail2: 277 return (error); 278 } 279 280 /* 281 * Low level thread create used by pthreads. 282 */ 283 int 284 sys_lwp_create(struct lwp_create_args *uap) 285 { 286 287 return (lwp_create1(uap->params, NULL)); 288 } 289 290 int 291 sys_lwp_create2(struct lwp_create2_args *uap) 292 { 293 294 return (lwp_create1(uap->params, uap->mask)); 295 } 296 297 int nprocs = 1; /* process 0 */ 298 299 int 300 fork1(struct lwp *lp1, int flags, struct proc **procp) 301 { 302 struct proc *p1 = lp1->lwp_proc; 303 struct proc *p2; 304 struct proc *pptr; 305 struct pgrp *p1grp; 306 struct pgrp *plkgrp; 307 struct sysreaper *reap; 308 uid_t uid; 309 int ok, error; 310 static int curfail = 0; 311 static struct timeval lastfail; 312 struct forklist *ep; 313 struct filedesc_to_leader *fdtol; 314 315 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 316 return (EINVAL); 317 318 lwkt_gettoken(&p1->p_token); 319 plkgrp = NULL; 320 p2 = NULL; 321 322 /* 323 * Here we don't create a new process, but we divorce 324 * certain parts of a process from itself. 325 */ 326 if ((flags & RFPROC) == 0) { 327 /* 328 * This kind of stunt does not work anymore if 329 * there are native threads (lwps) running 330 */ 331 if (p1->p_nthreads != 1) { 332 error = EINVAL; 333 goto done; 334 } 335 336 vm_fork(p1, 0, flags); 337 if ((flags & RFMEM) == 0) 338 wake_umtx_threads(p1); 339 340 /* 341 * Close all file descriptors. 342 */ 343 if (flags & RFCFDG) { 344 struct filedesc *fdtmp; 345 fdtmp = fdinit(p1); 346 fdfree(p1, fdtmp); 347 } 348 349 /* 350 * Unshare file descriptors (from parent.) 351 */ 352 if (flags & RFFDG) { 353 if (p1->p_fd->fd_refcnt > 1) { 354 struct filedesc *newfd; 355 error = fdcopy(p1, &newfd); 356 if (error != 0) { 357 error = ENOMEM; 358 goto done; 359 } 360 fdfree(p1, newfd); 361 } 362 } 363 *procp = NULL; 364 error = 0; 365 goto done; 366 } 367 368 /* 369 * Interlock against process group signal delivery. If signals 370 * are pending after the interlock is obtained we have to restart 371 * the system call to process the signals. If we don't the child 372 * can miss a pgsignal (such as ^C) sent during the fork. 373 * 374 * We can't use CURSIG() here because it will process any STOPs 375 * and cause the process group lock to be held indefinitely. If 376 * a STOP occurs, the fork will be restarted after the CONT. 377 */ 378 p1grp = p1->p_pgrp; 379 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 380 pgref(plkgrp); 381 lockmgr(&plkgrp->pg_lock, LK_SHARED); 382 if (CURSIG_NOBLOCK(lp1)) { 383 error = ERESTART; 384 goto done; 385 } 386 } 387 388 /* 389 * Although process entries are dynamically created, we still keep 390 * a global limit on the maximum number we will create. Don't allow 391 * a nonprivileged user to use the last ten processes; don't let root 392 * exceed the limit. The variable nprocs is the current number of 393 * processes, maxproc is the limit. 394 */ 395 uid = lp1->lwp_thread->td_ucred->cr_ruid; 396 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 397 if (ppsratecheck(&lastfail, &curfail, 1)) 398 kprintf("maxproc limit exceeded by uid %d, please " 399 "see tuning(7) and login.conf(5).\n", uid); 400 tsleep(&forksleep, 0, "fork", hz / 2); 401 error = EAGAIN; 402 goto done; 403 } 404 405 /* 406 * Increment the nprocs resource before blocking can occur. There 407 * are hard-limits as to the number of processes that can run. 408 */ 409 atomic_add_int(&nprocs, 1); 410 411 /* 412 * Increment the count of procs running with this uid. This also 413 * applies to root. 414 */ 415 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 416 plimit_getadjvalue(RLIMIT_NPROC)); 417 if (!ok) { 418 /* 419 * Back out the process count 420 */ 421 atomic_add_int(&nprocs, -1); 422 if (ppsratecheck(&lastfail, &curfail, 1)) { 423 kprintf("maxproc limit of %jd " 424 "exceeded by \"%s\" uid %d, " 425 "please see tuning(7) and login.conf(5).\n", 426 plimit_getadjvalue(RLIMIT_NPROC), 427 p1->p_comm, 428 uid); 429 } 430 tsleep(&forksleep, 0, "fork", hz / 2); 431 error = EAGAIN; 432 goto done; 433 } 434 435 /* 436 * Allocate a new process, don't get fancy: zero the structure. 437 */ 438 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 439 440 /* 441 * Core initialization. SIDL is a safety state that protects the 442 * partially initialized process once it starts getting hooked 443 * into system structures and becomes addressable. 444 * 445 * We must be sure to acquire p2->p_token as well, we must hold it 446 * once the process is on the allproc list to avoid things such 447 * as competing modifications to p_flags. 448 */ 449 mycpu->gd_forkid += ncpus; 450 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 451 p2->p_lasttid = 0; /* first tid will be 1 */ 452 p2->p_stat = SIDL; 453 454 /* 455 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 456 * all other processes always will. 457 */ 458 if ((reap = p1->p_reaper) != NULL) { 459 reaper_hold(reap); 460 p2->p_reaper = reap; 461 } else { 462 p2->p_reaper = NULL; 463 } 464 465 RB_INIT(&p2->p_lwp_tree); 466 spin_init(&p2->p_spin, "procfork1"); 467 lwkt_token_init(&p2->p_token, "proc"); 468 lwkt_gettoken(&p2->p_token); 469 p2->p_uidpcpu = kmalloc(sizeof(*p2->p_uidpcpu) * ncpus, 470 M_SUBPROC, M_WAITOK | M_ZERO); 471 472 /* 473 * Setup linkage for kernel based threading XXX lwp. Also add the 474 * process to the allproclist. 475 * 476 * The process structure is addressable after this point. 477 */ 478 if (flags & RFTHREAD) { 479 p2->p_peers = p1->p_peers; 480 p1->p_peers = p2; 481 p2->p_leader = p1->p_leader; 482 } else { 483 p2->p_leader = p2; 484 } 485 proc_add_allproc(p2); 486 487 /* 488 * Initialize the section which is copied verbatim from the parent. 489 */ 490 bcopy(&p1->p_startcopy, &p2->p_startcopy, 491 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 492 493 /* 494 * Duplicate sub-structures as needed. Increase reference counts 495 * on shared objects. 496 * 497 * NOTE: because we are now on the allproc list it is possible for 498 * other consumers to gain temporary references to p2 499 * (p2->p_lock can change). 500 */ 501 if (p1->p_flags & P_PROFIL) 502 startprofclock(p2); 503 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 504 505 if (jailed(p2->p_ucred)) 506 p2->p_flags |= P_JAILED; 507 508 if (p2->p_args) 509 refcount_acquire(&p2->p_args->ar_ref); 510 511 p2->p_usched = p1->p_usched; 512 /* XXX: verify copy of the secondary iosched stuff */ 513 dsched_enter_proc(p2); 514 515 if (flags & RFSIGSHARE) { 516 p2->p_sigacts = p1->p_sigacts; 517 refcount_acquire(&p2->p_sigacts->ps_refcnt); 518 } else { 519 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 520 M_SUBPROC, M_WAITOK); 521 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 522 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 523 } 524 if (flags & RFLINUXTHPN) 525 p2->p_sigparent = SIGUSR1; 526 else 527 p2->p_sigparent = SIGCHLD; 528 529 /* bump references to the text vnode (for procfs) */ 530 p2->p_textvp = p1->p_textvp; 531 if (p2->p_textvp) 532 vref(p2->p_textvp); 533 534 /* copy namecache handle to the text file */ 535 if (p1->p_textnch.mount) 536 cache_copy(&p1->p_textnch, &p2->p_textnch); 537 538 /* 539 * Handle file descriptors 540 */ 541 if (flags & RFCFDG) { 542 p2->p_fd = fdinit(p1); 543 fdtol = NULL; 544 } else if (flags & RFFDG) { 545 error = fdcopy(p1, &p2->p_fd); 546 if (error != 0) { 547 error = ENOMEM; 548 goto done; 549 } 550 fdtol = NULL; 551 } else { 552 p2->p_fd = fdshare(p1); 553 if (p1->p_fdtol == NULL) { 554 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 555 p1->p_leader); 556 } 557 if ((flags & RFTHREAD) != 0) { 558 /* 559 * Shared file descriptor table and 560 * shared process leaders. 561 */ 562 fdtol = p1->p_fdtol; 563 fdtol->fdl_refcount++; 564 } else { 565 /* 566 * Shared file descriptor table, and 567 * different process leaders 568 */ 569 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 570 } 571 } 572 p2->p_fdtol = fdtol; 573 p2->p_limit = plimit_fork(p1); 574 575 /* 576 * Adjust depth for resource downscaling 577 */ 578 if ((p2->p_depth & 31) != 31) 579 ++p2->p_depth; 580 581 /* 582 * Preserve some more flags in subprocess. P_PROFIL has already 583 * been preserved. 584 */ 585 p2->p_flags |= p1->p_flags & P_SUGID; 586 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 587 p2->p_flags |= P_CONTROLT; 588 if (flags & RFPPWAIT) { 589 p2->p_flags |= P_PPWAIT; 590 if (p1->p_upmap) 591 atomic_add_int(&p1->p_upmap->invfork, 1); 592 } 593 594 /* 595 * Inherit the virtual kernel structure (allows a virtual kernel 596 * to fork to simulate multiple cpus). 597 */ 598 if (p1->p_vkernel) 599 vkernel_inherit(p1, p2); 600 601 /* 602 * Once we are on a pglist we may receive signals. XXX we might 603 * race a ^C being sent to the process group by not receiving it 604 * at all prior to this line. 605 */ 606 pgref(p1grp); 607 lwkt_gettoken(&p1grp->pg_token); 608 LIST_INSERT_AFTER(p1, p2, p_pglist); 609 lwkt_reltoken(&p1grp->pg_token); 610 611 /* 612 * Attach the new process to its parent. 613 * 614 * If RFNOWAIT is set, the newly created process becomes a child 615 * of the reaper (typically init). This effectively disassociates 616 * the child from the parent. 617 * 618 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 619 */ 620 if (flags & RFNOWAIT) { 621 pptr = reaper_get(reap); 622 if (pptr == NULL) { 623 pptr = initproc; 624 PHOLD(pptr); 625 } 626 } else { 627 pptr = p1; 628 } 629 p2->p_pptr = pptr; 630 p2->p_ppid = pptr->p_pid; 631 LIST_INIT(&p2->p_children); 632 633 lwkt_gettoken(&pptr->p_token); 634 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 635 lwkt_reltoken(&pptr->p_token); 636 637 if (flags & RFNOWAIT) 638 PRELE(pptr); 639 640 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 641 callout_init_mp(&p2->p_ithandle); 642 643 #ifdef KTRACE 644 /* 645 * Copy traceflag and tracefile if enabled. If not inherited, 646 * these were zeroed above but we still could have a trace race 647 * so make sure p2's p_tracenode is NULL. 648 */ 649 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 650 p2->p_traceflag = p1->p_traceflag; 651 p2->p_tracenode = ktrinherit(p1->p_tracenode); 652 } 653 #endif 654 655 /* 656 * This begins the section where we must prevent the parent 657 * from being swapped. 658 * 659 * Gets PRELE'd in the caller in start_forked_proc(). 660 */ 661 PHOLD(p1); 662 663 vm_fork(p1, p2, flags); 664 if ((flags & RFMEM) == 0) 665 wake_umtx_threads(p1); 666 667 /* 668 * Create the first lwp associated with the new proc. 669 * It will return via a different execution path later, directly 670 * into userland, after it was put on the runq by 671 * start_forked_proc(). 672 */ 673 lwp_fork(lp1, p2, flags, NULL); 674 675 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 676 mycpu->gd_cnt.v_forks++; 677 mycpu->gd_cnt.v_forkpages += btoc(p2->p_vmspace->vm_dsize) + 678 btoc(p2->p_vmspace->vm_ssize); 679 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 680 mycpu->gd_cnt.v_vforks++; 681 mycpu->gd_cnt.v_vforkpages += btoc(p2->p_vmspace->vm_dsize) + 682 btoc(p2->p_vmspace->vm_ssize); 683 } else if (p1 == &proc0) { 684 mycpu->gd_cnt.v_kthreads++; 685 mycpu->gd_cnt.v_kthreadpages += btoc(p2->p_vmspace->vm_dsize) + 686 btoc(p2->p_vmspace->vm_ssize); 687 } else { 688 mycpu->gd_cnt.v_rforks++; 689 mycpu->gd_cnt.v_rforkpages += btoc(p2->p_vmspace->vm_dsize) + 690 btoc(p2->p_vmspace->vm_ssize); 691 } 692 693 /* 694 * Both processes are set up, now check if any loadable modules want 695 * to adjust anything. 696 * What if they have an error? XXX 697 */ 698 TAILQ_FOREACH(ep, &fork_list, next) { 699 (*ep->function)(p1, p2, flags); 700 } 701 702 /* 703 * Set the start time. Note that the process is not runnable. The 704 * caller is responsible for making it runnable. 705 */ 706 microtime(&p2->p_start); 707 p2->p_acflag = AFORK; 708 709 /* 710 * tell any interested parties about the new process 711 */ 712 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 713 714 /* 715 * Return child proc pointer to parent. 716 */ 717 *procp = p2; 718 error = 0; 719 done: 720 if (p2) 721 lwkt_reltoken(&p2->p_token); 722 lwkt_reltoken(&p1->p_token); 723 if (plkgrp) { 724 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 725 pgrel(plkgrp); 726 } 727 return (error); 728 } 729 730 static struct lwp * 731 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags, 732 const cpumask_t *mask) 733 { 734 globaldata_t gd = mycpu; 735 struct lwp *lp; 736 struct thread *td; 737 738 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 739 740 lp->lwp_proc = destproc; 741 lp->lwp_vmspace = destproc->p_vmspace; 742 lp->lwp_stat = LSRUN; 743 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 744 (unsigned) ((caddr_t)&lp->lwp_endcopy - 745 (caddr_t)&lp->lwp_startcopy)); 746 if (mask != NULL) 747 lp->lwp_cpumask = *mask; 748 749 /* 750 * Reset the sigaltstack if memory is shared, otherwise inherit 751 * it. 752 */ 753 if (flags & RFMEM) { 754 lp->lwp_sigstk.ss_flags = SS_DISABLE; 755 lp->lwp_sigstk.ss_size = 0; 756 lp->lwp_sigstk.ss_sp = NULL; 757 lp->lwp_flags &= ~LWP_ALTSTACK; 758 } else { 759 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 760 } 761 762 /* 763 * Set cpbase to the last timeout that occured (not the upcoming 764 * timeout). 765 * 766 * A critical section is required since a timer IPI can update 767 * scheduler specific data. 768 */ 769 crit_enter(); 770 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 771 destproc->p_usched->heuristic_forking(origlp, lp); 772 crit_exit(); 773 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 774 lwkt_token_init(&lp->lwp_token, "lwp_token"); 775 TAILQ_INIT(&lp->lwp_lpmap_backing_list); 776 spin_init(&lp->lwp_spin, "lwptoken"); 777 778 /* 779 * Assign the thread to the current cpu to begin with so we 780 * can manipulate it. 781 */ 782 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 783 lp->lwp_thread = td; 784 td->td_wakefromcpu = gd->gd_cpuid; 785 td->td_ucred = crhold(destproc->p_ucred); 786 td->td_proc = destproc; 787 td->td_lwp = lp; 788 td->td_switch = cpu_heavy_switch; 789 #ifdef NO_LWKT_SPLIT_USERPRI 790 lwkt_setpri(td, TDPRI_USER_NORM); 791 #else 792 lwkt_setpri(td, TDPRI_KERN_USER); 793 #endif 794 lwkt_set_comm(td, "%s", destproc->p_comm); 795 796 /* 797 * cpu_fork will copy and update the pcb, set up the kernel stack, 798 * and make the child ready to run. 799 */ 800 cpu_fork(origlp, lp, flags); 801 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 802 803 /* 804 * Use the same TID for the first thread in the new process after 805 * a fork or vfork. This is needed to keep pthreads and /dev/lpmap 806 * sane. In particular a consequence of implementing the per-thread 807 * /dev/lpmap map code makes this mandatory. 808 * 809 * NOTE: exec*() will reset the TID to 1 to keep things sane in that 810 * department too. 811 */ 812 lp->lwp_tid = origlp->lwp_tid - 1; 813 814 /* 815 * Leave 2 bits open so the pthreads library can optimize locks 816 * by combining the TID with a few LOck-related flags. 817 */ 818 do { 819 if (lp->lwp_tid == 0 || lp->lwp_tid == 0x3FFFFFFF) 820 lp->lwp_tid = 1; 821 else 822 ++lp->lwp_tid; 823 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 824 825 destproc->p_lasttid = lp->lwp_tid; 826 destproc->p_nthreads++; 827 828 /* 829 * This flag is set and never cleared. It means that the process 830 * was threaded at some point. Used to improve exit performance. 831 */ 832 pmap_maybethreaded(&destproc->p_vmspace->vm_pmap); 833 destproc->p_flags |= P_MAYBETHREADED; 834 835 /* 836 * If the original lp had a lpmap and a non-zero blockallsigs 837 * count, give the lp for the forked process the same count. 838 * 839 * This makes the user code and expectations less confusing 840 * in terms of unwinding locks and also allows userland to start 841 * the forked process with signals blocked via the blockallsigs() 842 * mechanism if desired. 843 * 844 * XXX future - also inherit the lwp-specific process title ? 845 */ 846 if (origlp->lwp_lpmap && 847 (origlp->lwp_lpmap->blockallsigs & 0x7FFFFFFF)) { 848 lwp_usermap(lp, 0); 849 if (lp->lwp_lpmap) { 850 lp->lwp_lpmap->blockallsigs = 851 origlp->lwp_lpmap->blockallsigs; 852 } 853 } 854 855 856 return (lp); 857 } 858 859 /* 860 * The next two functionms are general routines to handle adding/deleting 861 * items on the fork callout list. 862 * 863 * at_fork(): 864 * Take the arguments given and put them onto the fork callout list, 865 * However first make sure that it's not already there. 866 * Returns 0 on success or a standard error number. 867 */ 868 int 869 at_fork(forklist_fn function) 870 { 871 struct forklist *ep; 872 873 #ifdef INVARIANTS 874 /* let the programmer know if he's been stupid */ 875 if (rm_at_fork(function)) { 876 kprintf("WARNING: fork callout entry (%p) already present\n", 877 function); 878 } 879 #endif 880 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 881 ep->function = function; 882 TAILQ_INSERT_TAIL(&fork_list, ep, next); 883 return (0); 884 } 885 886 /* 887 * Scan the exit callout list for the given item and remove it.. 888 * Returns the number of items removed (0 or 1) 889 */ 890 int 891 rm_at_fork(forklist_fn function) 892 { 893 struct forklist *ep; 894 895 TAILQ_FOREACH(ep, &fork_list, next) { 896 if (ep->function == function) { 897 TAILQ_REMOVE(&fork_list, ep, next); 898 kfree(ep, M_ATFORK); 899 return(1); 900 } 901 } 902 return (0); 903 } 904 905 /* 906 * Add a forked process to the run queue after any remaining setup, such 907 * as setting the fork handler, has been completed. 908 * 909 * p2 is held by the caller. 910 */ 911 void 912 start_forked_proc(struct lwp *lp1, struct proc *p2) 913 { 914 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 915 int pflags; 916 917 /* 918 * Move from SIDL to RUN queue, and activate the process's thread. 919 * Activation of the thread effectively makes the process "a" 920 * current process, so we do not setrunqueue(). 921 * 922 * YYY setrunqueue works here but we should clean up the trampoline 923 * code so we just schedule the LWKT thread and let the trampoline 924 * deal with the userland scheduler on return to userland. 925 */ 926 KASSERT(p2->p_stat == SIDL, 927 ("cannot start forked process, bad status: %p", p2)); 928 p2->p_usched->resetpriority(lp2); 929 crit_enter(); 930 p2->p_stat = SACTIVE; 931 lp2->lwp_stat = LSRUN; 932 p2->p_usched->setrunqueue(lp2); 933 crit_exit(); 934 935 /* 936 * Now can be swapped. 937 */ 938 PRELE(lp1->lwp_proc); 939 940 /* 941 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 942 * the child until it has retired the parent's resources. The parent 943 * must wait for the flag to be cleared by the child. 944 * 945 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 946 * p_token conflicts. 947 * 948 * XXX Is this use of an atomic op on a field that is not normally 949 * manipulated with atomic ops ok? 950 */ 951 while ((pflags = p2->p_flags) & P_PPWAIT) { 952 cpu_ccfence(); 953 tsleep_interlock(lp1->lwp_proc, 0); 954 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 955 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 956 } 957 } 958 959 /* 960 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 961 */ 962 int 963 sys_procctl(struct procctl_args *uap) 964 { 965 struct proc *p = curproc; 966 struct proc *p2; 967 struct sysreaper *reap; 968 union reaper_info udata; 969 int error; 970 971 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 972 return EINVAL; 973 974 switch(uap->cmd) { 975 case PROC_REAP_ACQUIRE: 976 lwkt_gettoken(&p->p_token); 977 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 978 if (p->p_reaper == NULL || p->p_reaper->p != p) { 979 reaper_init(p, reap); 980 error = 0; 981 } else { 982 kfree(reap, M_REAPER); 983 error = EALREADY; 984 } 985 lwkt_reltoken(&p->p_token); 986 break; 987 case PROC_REAP_RELEASE: 988 lwkt_gettoken(&p->p_token); 989 release_again: 990 reap = p->p_reaper; 991 KKASSERT(reap != NULL); 992 if (reap->p == p) { 993 reaper_hold(reap); /* in case of thread race */ 994 lockmgr(&reap->lock, LK_EXCLUSIVE); 995 if (reap->p != p) { 996 lockmgr(&reap->lock, LK_RELEASE); 997 reaper_drop(reap); 998 goto release_again; 999 } 1000 reap->p = NULL; 1001 p->p_reaper = reap->parent; 1002 if (p->p_reaper) 1003 reaper_hold(p->p_reaper); 1004 lockmgr(&reap->lock, LK_RELEASE); 1005 reaper_drop(reap); /* our ref */ 1006 reaper_drop(reap); /* old p_reaper ref */ 1007 error = 0; 1008 } else { 1009 error = ENOTCONN; 1010 } 1011 lwkt_reltoken(&p->p_token); 1012 break; 1013 case PROC_REAP_STATUS: 1014 bzero(&udata, sizeof(udata)); 1015 lwkt_gettoken_shared(&p->p_token); 1016 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1017 udata.status.flags = reap->flags; 1018 udata.status.refs = reap->refs - 1; /* minus ours */ 1019 } 1020 p2 = LIST_FIRST(&p->p_children); 1021 udata.status.pid_head = p2 ? p2->p_pid : -1; 1022 lwkt_reltoken(&p->p_token); 1023 1024 if (uap->data) { 1025 error = copyout(&udata, uap->data, 1026 sizeof(udata.status)); 1027 } else { 1028 error = 0; 1029 } 1030 break; 1031 default: 1032 error = EINVAL; 1033 break; 1034 } 1035 return error; 1036 } 1037 1038 /* 1039 * Bump ref on reaper, preventing destruction 1040 */ 1041 void 1042 reaper_hold(struct sysreaper *reap) 1043 { 1044 KKASSERT(reap->refs > 0); 1045 refcount_acquire(&reap->refs); 1046 } 1047 1048 /* 1049 * Drop ref on reaper, destroy the structure on the 1->0 1050 * transition and loop on the parent. 1051 */ 1052 void 1053 reaper_drop(struct sysreaper *next) 1054 { 1055 struct sysreaper *reap; 1056 1057 while ((reap = next) != NULL) { 1058 if (refcount_release(&reap->refs)) { 1059 next = reap->parent; 1060 KKASSERT(reap->p == NULL); 1061 lockmgr(&reaper_lock, LK_EXCLUSIVE); 1062 reap->parent = NULL; 1063 kfree(reap, M_REAPER); 1064 lockmgr(&reaper_lock, LK_RELEASE); 1065 } else { 1066 next = NULL; 1067 } 1068 } 1069 } 1070 1071 /* 1072 * Initialize a static or newly allocated reaper structure 1073 */ 1074 void 1075 reaper_init(struct proc *p, struct sysreaper *reap) 1076 { 1077 reap->parent = p->p_reaper; 1078 reap->p = p; 1079 if (p == initproc) { 1080 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1081 reap->refs = 2; 1082 } else { 1083 reap->flags = REAPER_STAT_OWNED; 1084 reap->refs = 1; 1085 } 1086 lockinit(&reap->lock, "subrp", 0, 0); 1087 cpu_sfence(); 1088 p->p_reaper = reap; 1089 } 1090 1091 /* 1092 * Called with p->p_token held during exit. 1093 * 1094 * This is a bit simpler than RELEASE because there are no threads remaining 1095 * to race. We only release if we own the reaper, the exit code will handle 1096 * the final p_reaper release. 1097 */ 1098 struct sysreaper * 1099 reaper_exit(struct proc *p) 1100 { 1101 struct sysreaper *reap; 1102 1103 /* 1104 * Release acquired reaper 1105 */ 1106 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1107 lockmgr(&reap->lock, LK_EXCLUSIVE); 1108 p->p_reaper = reap->parent; 1109 if (p->p_reaper) 1110 reaper_hold(p->p_reaper); 1111 reap->p = NULL; 1112 lockmgr(&reap->lock, LK_RELEASE); 1113 reaper_drop(reap); 1114 } 1115 1116 /* 1117 * Return and clear reaper (caller is holding p_token for us) 1118 * (reap->p does not equal p). Caller must drop it. 1119 */ 1120 if ((reap = p->p_reaper) != NULL) { 1121 p->p_reaper = NULL; 1122 } 1123 return reap; 1124 } 1125 1126 /* 1127 * Return a held (PHOLD) process representing the reaper for process (p). 1128 * NULL should not normally be returned. Caller should PRELE() the returned 1129 * reaper process when finished. 1130 * 1131 * Remove dead internal nodes while we are at it. 1132 * 1133 * Process (p)'s token must be held on call. 1134 * The returned process's token is NOT acquired by this routine. 1135 */ 1136 struct proc * 1137 reaper_get(struct sysreaper *reap) 1138 { 1139 struct sysreaper *next; 1140 struct proc *reproc; 1141 1142 if (reap == NULL) 1143 return NULL; 1144 1145 /* 1146 * Extra hold for loop 1147 */ 1148 reaper_hold(reap); 1149 1150 while (reap) { 1151 lockmgr(&reap->lock, LK_SHARED); 1152 if (reap->p) { 1153 /* 1154 * Probable reaper 1155 */ 1156 if (reap->p) { 1157 reproc = reap->p; 1158 PHOLD(reproc); 1159 lockmgr(&reap->lock, LK_RELEASE); 1160 reaper_drop(reap); 1161 return reproc; 1162 } 1163 1164 /* 1165 * Raced, try again 1166 */ 1167 lockmgr(&reap->lock, LK_RELEASE); 1168 continue; 1169 } 1170 1171 /* 1172 * Traverse upwards in the reaper topology, destroy 1173 * dead internal nodes when possible. 1174 * 1175 * NOTE: Our ref on next means that a dead node should 1176 * have 2 (ours and reap->parent's). 1177 */ 1178 next = reap->parent; 1179 while (next) { 1180 reaper_hold(next); 1181 if (next->refs == 2 && next->p == NULL) { 1182 lockmgr(&reap->lock, LK_RELEASE); 1183 lockmgr(&reap->lock, LK_EXCLUSIVE); 1184 if (next->refs == 2 && 1185 reap->parent == next && 1186 next->p == NULL) { 1187 /* 1188 * reap->parent inherits ref from next. 1189 */ 1190 reap->parent = next->parent; 1191 next->parent = NULL; 1192 reaper_drop(next); /* ours */ 1193 reaper_drop(next); /* old parent */ 1194 next = reap->parent; 1195 continue; /* possible chain */ 1196 } 1197 } 1198 break; 1199 } 1200 lockmgr(&reap->lock, LK_RELEASE); 1201 reaper_drop(reap); 1202 reap = next; 1203 } 1204 return NULL; 1205 } 1206 1207 /* 1208 * Test that the sender is allowed to send a signal to the target. 1209 * The sender process is assumed to have a stable reaper. The 1210 * target can be e.g. from a scan callback. 1211 * 1212 * Target cannot be the reaper process itself unless reaper_ok is specified, 1213 * or sender == target. 1214 */ 1215 int 1216 reaper_sigtest(struct proc *sender, struct proc *target, int reaper_ok) 1217 { 1218 struct sysreaper *sreap; 1219 struct sysreaper *reap; 1220 int r; 1221 1222 sreap = sender->p_reaper; 1223 if (sreap == NULL) 1224 return 1; 1225 1226 if (sreap == target->p_reaper) { 1227 if (sreap->p == target && sreap->p != sender && reaper_ok == 0) 1228 return 0; 1229 return 1; 1230 } 1231 lockmgr(&reaper_lock, LK_SHARED); 1232 r = 0; 1233 for (reap = target->p_reaper; reap; reap = reap->parent) { 1234 if (sreap == reap) { 1235 if (sreap->p != target || reaper_ok) 1236 r = 1; 1237 break; 1238 } 1239 } 1240 lockmgr(&reaper_lock, LK_RELEASE); 1241 1242 return r; 1243 } 1244