1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 #include <sys/lwp.h> 55 56 #include <vm/vm.h> 57 #include <sys/lock.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_extern.h> 61 62 #include <sys/vmmeter.h> 63 #include <sys/refcount.h> 64 #include <sys/thread2.h> 65 #include <sys/signal2.h> 66 #include <sys/spinlock2.h> 67 68 #include <sys/dsched.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 72 73 /* 74 * These are the stuctures used to create a callout list for things to do 75 * when forking a process 76 */ 77 struct forklist { 78 forklist_fn function; 79 TAILQ_ENTRY(forklist) next; 80 }; 81 82 TAILQ_HEAD(forklist_head, forklist); 83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 84 85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags, 86 const cpumask_t *mask); 87 static int lwp_create1(struct lwp_params *params, 88 const cpumask_t *mask); 89 static struct lock reaper_lock = LOCK_INITIALIZER("reapgl", 0, 0); 90 91 int forksleep; /* Place for fork1() to sleep on. */ 92 93 /* 94 * Red-Black tree support for LWPs 95 */ 96 97 static int 98 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 99 { 100 if (lp1->lwp_tid < lp2->lwp_tid) 101 return(-1); 102 if (lp1->lwp_tid > lp2->lwp_tid) 103 return(1); 104 return(0); 105 } 106 107 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 108 109 /* 110 * fork() system call 111 */ 112 int 113 sys_fork(struct fork_args *uap) 114 { 115 struct lwp *lp = curthread->td_lwp; 116 struct proc *p2; 117 int error; 118 119 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 120 if (error == 0) { 121 PHOLD(p2); 122 start_forked_proc(lp, p2); 123 uap->sysmsg_fds[0] = p2->p_pid; 124 uap->sysmsg_fds[1] = 0; 125 PRELE(p2); 126 } 127 return error; 128 } 129 130 /* 131 * vfork() system call 132 */ 133 int 134 sys_vfork(struct vfork_args *uap) 135 { 136 struct lwp *lp = curthread->td_lwp; 137 struct proc *p2; 138 int error; 139 140 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 141 if (error == 0) { 142 PHOLD(p2); 143 start_forked_proc(lp, p2); 144 uap->sysmsg_fds[0] = p2->p_pid; 145 uap->sysmsg_fds[1] = 0; 146 PRELE(p2); 147 } 148 return error; 149 } 150 151 /* 152 * Handle rforks. An rfork may (1) operate on the current process without 153 * creating a new, (2) create a new process that shared the current process's 154 * vmspace, signals, and/or descriptors, or (3) create a new process that does 155 * not share these things (normal fork). 156 * 157 * Note that we only call start_forked_proc() if a new process is actually 158 * created. 159 * 160 * rfork { int flags } 161 */ 162 int 163 sys_rfork(struct rfork_args *uap) 164 { 165 struct lwp *lp = curthread->td_lwp; 166 struct proc *p2; 167 int error; 168 169 if ((uap->flags & RFKERNELONLY) != 0) 170 return (EINVAL); 171 172 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 173 if (error == 0) { 174 if (p2) { 175 PHOLD(p2); 176 start_forked_proc(lp, p2); 177 uap->sysmsg_fds[0] = p2->p_pid; 178 uap->sysmsg_fds[1] = 0; 179 PRELE(p2); 180 } else { 181 uap->sysmsg_fds[0] = 0; 182 uap->sysmsg_fds[1] = 0; 183 } 184 } 185 return error; 186 } 187 188 static int 189 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 190 { 191 struct proc *p = curproc; 192 struct lwp *lp; 193 struct lwp_params params; 194 cpumask_t *mask = NULL, mask0; 195 int error; 196 197 error = copyin(uprm, ¶ms, sizeof(params)); 198 if (error) 199 goto fail2; 200 201 if (umask != NULL) { 202 error = copyin(umask, &mask0, sizeof(mask0)); 203 if (error) 204 goto fail2; 205 CPUMASK_ANDMASK(mask0, smp_active_mask); 206 if (CPUMASK_TESTNZERO(mask0)) 207 mask = &mask0; 208 } 209 210 lwkt_gettoken(&p->p_token); 211 plimit_lwp_fork(p); /* force exclusive access */ 212 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask); 213 error = cpu_prepare_lwp(lp, ¶ms); 214 if (error) 215 goto fail; 216 if (params.lwp_tid1 != NULL && 217 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 218 goto fail; 219 if (params.lwp_tid2 != NULL && 220 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 221 goto fail; 222 223 /* 224 * Now schedule the new lwp. 225 */ 226 p->p_usched->resetpriority(lp); 227 crit_enter(); 228 lp->lwp_stat = LSRUN; 229 p->p_usched->setrunqueue(lp); 230 crit_exit(); 231 lwkt_reltoken(&p->p_token); 232 233 return (0); 234 235 fail: 236 /* 237 * Make sure no one is using this lwp, before it is removed from 238 * the tree. If we didn't wait it here, lwp tree iteration with 239 * blocking operation would be broken. 240 */ 241 while (lp->lwp_lock > 0) 242 tsleep(lp, 0, "lwpfail", 1); 243 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 244 --p->p_nthreads; 245 /* lwp_dispose expects an exited lwp, and a held proc */ 246 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 247 lp->lwp_thread->td_flags |= TDF_EXITING; 248 lwkt_remove_tdallq(lp->lwp_thread); 249 PHOLD(p); 250 biosched_done(lp->lwp_thread); 251 dsched_exit_thread(lp->lwp_thread); 252 lwp_dispose(lp); 253 lwkt_reltoken(&p->p_token); 254 fail2: 255 return (error); 256 } 257 258 /* 259 * Low level thread create used by pthreads. 260 */ 261 int 262 sys_lwp_create(struct lwp_create_args *uap) 263 { 264 265 return (lwp_create1(uap->params, NULL)); 266 } 267 268 int 269 sys_lwp_create2(struct lwp_create2_args *uap) 270 { 271 272 return (lwp_create1(uap->params, uap->mask)); 273 } 274 275 int nprocs = 1; /* process 0 */ 276 277 int 278 fork1(struct lwp *lp1, int flags, struct proc **procp) 279 { 280 struct proc *p1 = lp1->lwp_proc; 281 struct proc *p2; 282 struct proc *pptr; 283 struct pgrp *p1grp; 284 struct pgrp *plkgrp; 285 struct sysreaper *reap; 286 uid_t uid; 287 int ok, error; 288 static int curfail = 0; 289 static struct timeval lastfail; 290 struct forklist *ep; 291 struct filedesc_to_leader *fdtol; 292 293 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 294 return (EINVAL); 295 296 lwkt_gettoken(&p1->p_token); 297 plkgrp = NULL; 298 p2 = NULL; 299 300 /* 301 * Here we don't create a new process, but we divorce 302 * certain parts of a process from itself. 303 */ 304 if ((flags & RFPROC) == 0) { 305 /* 306 * This kind of stunt does not work anymore if 307 * there are native threads (lwps) running 308 */ 309 if (p1->p_nthreads != 1) { 310 error = EINVAL; 311 goto done; 312 } 313 314 vm_fork(p1, 0, flags); 315 316 /* 317 * Close all file descriptors. 318 */ 319 if (flags & RFCFDG) { 320 struct filedesc *fdtmp; 321 fdtmp = fdinit(p1); 322 fdfree(p1, fdtmp); 323 } 324 325 /* 326 * Unshare file descriptors (from parent.) 327 */ 328 if (flags & RFFDG) { 329 if (p1->p_fd->fd_refcnt > 1) { 330 struct filedesc *newfd; 331 error = fdcopy(p1, &newfd); 332 if (error != 0) { 333 error = ENOMEM; 334 goto done; 335 } 336 fdfree(p1, newfd); 337 } 338 } 339 *procp = NULL; 340 error = 0; 341 goto done; 342 } 343 344 /* 345 * Interlock against process group signal delivery. If signals 346 * are pending after the interlock is obtained we have to restart 347 * the system call to process the signals. If we don't the child 348 * can miss a pgsignal (such as ^C) sent during the fork. 349 * 350 * We can't use CURSIG() here because it will process any STOPs 351 * and cause the process group lock to be held indefinitely. If 352 * a STOP occurs, the fork will be restarted after the CONT. 353 */ 354 p1grp = p1->p_pgrp; 355 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 356 pgref(plkgrp); 357 lockmgr(&plkgrp->pg_lock, LK_SHARED); 358 if (CURSIG_NOBLOCK(lp1)) { 359 error = ERESTART; 360 goto done; 361 } 362 } 363 364 /* 365 * Although process entries are dynamically created, we still keep 366 * a global limit on the maximum number we will create. Don't allow 367 * a nonprivileged user to use the last ten processes; don't let root 368 * exceed the limit. The variable nprocs is the current number of 369 * processes, maxproc is the limit. 370 */ 371 uid = lp1->lwp_thread->td_ucred->cr_ruid; 372 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 373 if (ppsratecheck(&lastfail, &curfail, 1)) 374 kprintf("maxproc limit exceeded by uid %d, please " 375 "see tuning(7) and login.conf(5).\n", uid); 376 tsleep(&forksleep, 0, "fork", hz / 2); 377 error = EAGAIN; 378 goto done; 379 } 380 381 /* 382 * Increment the nprocs resource before blocking can occur. There 383 * are hard-limits as to the number of processes that can run. 384 */ 385 atomic_add_int(&nprocs, 1); 386 387 /* 388 * Increment the count of procs running with this uid. This also 389 * applies to root. 390 */ 391 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 392 plimit_getadjvalue(RLIMIT_NPROC)); 393 if (!ok) { 394 /* 395 * Back out the process count 396 */ 397 atomic_add_int(&nprocs, -1); 398 if (ppsratecheck(&lastfail, &curfail, 1)) { 399 kprintf("maxproc limit of %jd " 400 "exceeded by \"%s\" uid %d, " 401 "please see tuning(7) and login.conf(5).\n", 402 plimit_getadjvalue(RLIMIT_NPROC), 403 p1->p_comm, 404 uid); 405 } 406 tsleep(&forksleep, 0, "fork", hz / 2); 407 error = EAGAIN; 408 goto done; 409 } 410 411 /* 412 * Allocate a new process, don't get fancy: zero the structure. 413 */ 414 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 415 416 /* 417 * Core initialization. SIDL is a safety state that protects the 418 * partially initialized process once it starts getting hooked 419 * into system structures and becomes addressable. 420 * 421 * We must be sure to acquire p2->p_token as well, we must hold it 422 * once the process is on the allproc list to avoid things such 423 * as competing modifications to p_flags. 424 */ 425 mycpu->gd_forkid += ncpus; 426 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 427 p2->p_lasttid = 0; /* first tid will be 1 */ 428 p2->p_stat = SIDL; 429 430 /* 431 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 432 * all other processes always will. 433 */ 434 if ((reap = p1->p_reaper) != NULL) { 435 reaper_hold(reap); 436 p2->p_reaper = reap; 437 } else { 438 p2->p_reaper = NULL; 439 } 440 441 RB_INIT(&p2->p_lwp_tree); 442 spin_init(&p2->p_spin, "procfork1"); 443 lwkt_token_init(&p2->p_token, "proc"); 444 lwkt_gettoken(&p2->p_token); 445 446 /* 447 * Setup linkage for kernel based threading XXX lwp. Also add the 448 * process to the allproclist. 449 * 450 * The process structure is addressable after this point. 451 */ 452 if (flags & RFTHREAD) { 453 p2->p_peers = p1->p_peers; 454 p1->p_peers = p2; 455 p2->p_leader = p1->p_leader; 456 } else { 457 p2->p_leader = p2; 458 } 459 proc_add_allproc(p2); 460 461 /* 462 * Initialize the section which is copied verbatim from the parent. 463 */ 464 bcopy(&p1->p_startcopy, &p2->p_startcopy, 465 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 466 467 /* 468 * Duplicate sub-structures as needed. Increase reference counts 469 * on shared objects. 470 * 471 * NOTE: because we are now on the allproc list it is possible for 472 * other consumers to gain temporary references to p2 473 * (p2->p_lock can change). 474 */ 475 if (p1->p_flags & P_PROFIL) 476 startprofclock(p2); 477 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 478 479 if (jailed(p2->p_ucred)) 480 p2->p_flags |= P_JAILED; 481 482 if (p2->p_args) 483 refcount_acquire(&p2->p_args->ar_ref); 484 485 p2->p_usched = p1->p_usched; 486 /* XXX: verify copy of the secondary iosched stuff */ 487 dsched_enter_proc(p2); 488 489 if (flags & RFSIGSHARE) { 490 p2->p_sigacts = p1->p_sigacts; 491 refcount_acquire(&p2->p_sigacts->ps_refcnt); 492 } else { 493 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 494 M_SUBPROC, M_WAITOK); 495 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 496 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 497 } 498 if (flags & RFLINUXTHPN) 499 p2->p_sigparent = SIGUSR1; 500 else 501 p2->p_sigparent = SIGCHLD; 502 503 /* bump references to the text vnode (for procfs) */ 504 p2->p_textvp = p1->p_textvp; 505 if (p2->p_textvp) 506 vref(p2->p_textvp); 507 508 /* copy namecache handle to the text file */ 509 if (p1->p_textnch.mount) 510 cache_copy(&p1->p_textnch, &p2->p_textnch); 511 512 /* 513 * Handle file descriptors 514 */ 515 if (flags & RFCFDG) { 516 p2->p_fd = fdinit(p1); 517 fdtol = NULL; 518 } else if (flags & RFFDG) { 519 error = fdcopy(p1, &p2->p_fd); 520 if (error != 0) { 521 error = ENOMEM; 522 goto done; 523 } 524 fdtol = NULL; 525 } else { 526 p2->p_fd = fdshare(p1); 527 if (p1->p_fdtol == NULL) { 528 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 529 p1->p_leader); 530 } 531 if ((flags & RFTHREAD) != 0) { 532 /* 533 * Shared file descriptor table and 534 * shared process leaders. 535 */ 536 fdtol = p1->p_fdtol; 537 fdtol->fdl_refcount++; 538 } else { 539 /* 540 * Shared file descriptor table, and 541 * different process leaders 542 */ 543 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 544 } 545 } 546 p2->p_fdtol = fdtol; 547 p2->p_limit = plimit_fork(p1); 548 549 /* 550 * Adjust depth for resource downscaling 551 */ 552 if ((p2->p_depth & 31) != 31) 553 ++p2->p_depth; 554 555 /* 556 * Preserve some more flags in subprocess. P_PROFIL has already 557 * been preserved. 558 */ 559 p2->p_flags |= p1->p_flags & P_SUGID; 560 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 561 p2->p_flags |= P_CONTROLT; 562 if (flags & RFPPWAIT) { 563 p2->p_flags |= P_PPWAIT; 564 if (p1->p_upmap) 565 atomic_add_int(&p1->p_upmap->invfork, 1); 566 } 567 568 /* 569 * Inherit the virtual kernel structure (allows a virtual kernel 570 * to fork to simulate multiple cpus). 571 */ 572 if (p1->p_vkernel) 573 vkernel_inherit(p1, p2); 574 575 /* 576 * Once we are on a pglist we may receive signals. XXX we might 577 * race a ^C being sent to the process group by not receiving it 578 * at all prior to this line. 579 */ 580 pgref(p1grp); 581 lwkt_gettoken(&p1grp->pg_token); 582 LIST_INSERT_AFTER(p1, p2, p_pglist); 583 lwkt_reltoken(&p1grp->pg_token); 584 585 /* 586 * Attach the new process to its parent. 587 * 588 * If RFNOWAIT is set, the newly created process becomes a child 589 * of the reaper (typically init). This effectively disassociates 590 * the child from the parent. 591 * 592 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 593 */ 594 if (flags & RFNOWAIT) { 595 pptr = reaper_get(reap); 596 if (pptr == NULL) { 597 pptr = initproc; 598 PHOLD(pptr); 599 } 600 } else { 601 pptr = p1; 602 } 603 p2->p_pptr = pptr; 604 LIST_INIT(&p2->p_children); 605 606 lwkt_gettoken(&pptr->p_token); 607 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 608 lwkt_reltoken(&pptr->p_token); 609 610 if (flags & RFNOWAIT) 611 PRELE(pptr); 612 613 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 614 callout_init_mp(&p2->p_ithandle); 615 616 #ifdef KTRACE 617 /* 618 * Copy traceflag and tracefile if enabled. If not inherited, 619 * these were zeroed above but we still could have a trace race 620 * so make sure p2's p_tracenode is NULL. 621 */ 622 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 623 p2->p_traceflag = p1->p_traceflag; 624 p2->p_tracenode = ktrinherit(p1->p_tracenode); 625 } 626 #endif 627 628 /* 629 * This begins the section where we must prevent the parent 630 * from being swapped. 631 * 632 * Gets PRELE'd in the caller in start_forked_proc(). 633 */ 634 PHOLD(p1); 635 636 vm_fork(p1, p2, flags); 637 638 /* 639 * Create the first lwp associated with the new proc. 640 * It will return via a different execution path later, directly 641 * into userland, after it was put on the runq by 642 * start_forked_proc(). 643 */ 644 lwp_fork(lp1, p2, flags, NULL); 645 646 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 647 mycpu->gd_cnt.v_forks++; 648 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + 649 p2->p_vmspace->vm_ssize; 650 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 651 mycpu->gd_cnt.v_vforks++; 652 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 653 p2->p_vmspace->vm_ssize; 654 } else if (p1 == &proc0) { 655 mycpu->gd_cnt.v_kthreads++; 656 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 657 p2->p_vmspace->vm_ssize; 658 } else { 659 mycpu->gd_cnt.v_rforks++; 660 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 661 p2->p_vmspace->vm_ssize; 662 } 663 664 /* 665 * Both processes are set up, now check if any loadable modules want 666 * to adjust anything. 667 * What if they have an error? XXX 668 */ 669 TAILQ_FOREACH(ep, &fork_list, next) { 670 (*ep->function)(p1, p2, flags); 671 } 672 673 /* 674 * Set the start time. Note that the process is not runnable. The 675 * caller is responsible for making it runnable. 676 */ 677 microtime(&p2->p_start); 678 p2->p_acflag = AFORK; 679 680 /* 681 * tell any interested parties about the new process 682 */ 683 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 684 685 /* 686 * Return child proc pointer to parent. 687 */ 688 *procp = p2; 689 error = 0; 690 done: 691 if (p2) 692 lwkt_reltoken(&p2->p_token); 693 lwkt_reltoken(&p1->p_token); 694 if (plkgrp) { 695 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 696 pgrel(plkgrp); 697 } 698 return (error); 699 } 700 701 static struct lwp * 702 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags, 703 const cpumask_t *mask) 704 { 705 globaldata_t gd = mycpu; 706 struct lwp *lp; 707 struct thread *td; 708 709 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 710 711 lp->lwp_proc = destproc; 712 lp->lwp_vmspace = destproc->p_vmspace; 713 lp->lwp_stat = LSRUN; 714 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 715 (unsigned) ((caddr_t)&lp->lwp_endcopy - 716 (caddr_t)&lp->lwp_startcopy)); 717 if (mask != NULL) 718 lp->lwp_cpumask = *mask; 719 720 /* 721 * Reset the sigaltstack if memory is shared, otherwise inherit 722 * it. 723 */ 724 if (flags & RFMEM) { 725 lp->lwp_sigstk.ss_flags = SS_DISABLE; 726 lp->lwp_sigstk.ss_size = 0; 727 lp->lwp_sigstk.ss_sp = NULL; 728 lp->lwp_flags &= ~LWP_ALTSTACK; 729 } else { 730 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 731 } 732 733 /* 734 * Set cpbase to the last timeout that occured (not the upcoming 735 * timeout). 736 * 737 * A critical section is required since a timer IPI can update 738 * scheduler specific data. 739 */ 740 crit_enter(); 741 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 742 destproc->p_usched->heuristic_forking(origlp, lp); 743 crit_exit(); 744 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 745 lwkt_token_init(&lp->lwp_token, "lwp_token"); 746 spin_init(&lp->lwp_spin, "lwptoken"); 747 748 /* 749 * Assign the thread to the current cpu to begin with so we 750 * can manipulate it. 751 */ 752 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 753 lp->lwp_thread = td; 754 td->td_ucred = crhold(destproc->p_ucred); 755 td->td_proc = destproc; 756 td->td_lwp = lp; 757 td->td_switch = cpu_heavy_switch; 758 #ifdef NO_LWKT_SPLIT_USERPRI 759 lwkt_setpri(td, TDPRI_USER_NORM); 760 #else 761 lwkt_setpri(td, TDPRI_KERN_USER); 762 #endif 763 lwkt_set_comm(td, "%s", destproc->p_comm); 764 765 /* 766 * cpu_fork will copy and update the pcb, set up the kernel stack, 767 * and make the child ready to run. 768 */ 769 cpu_fork(origlp, lp, flags); 770 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 771 772 /* 773 * Assign a TID to the lp. Loop until the insert succeeds (returns 774 * NULL). 775 * 776 * If we are in a vfork assign the same TID as the lwp that did the 777 * vfork(). This way if the user program messes around with 778 * pthread calls inside the vfork(), it will operate like an 779 * extension of the (blocked) parent. Also note that since the 780 * address space is being shared, insofar as pthreads is concerned, 781 * the code running in the vfork() is part of the original process. 782 */ 783 if (flags & RFPPWAIT) { 784 lp->lwp_tid = origlp->lwp_tid - 1; 785 } else { 786 lp->lwp_tid = destproc->p_lasttid; 787 } 788 789 do { 790 if (++lp->lwp_tid <= 0) 791 lp->lwp_tid = 1; 792 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 793 794 destproc->p_lasttid = lp->lwp_tid; 795 destproc->p_nthreads++; 796 797 /* 798 * This flag is set and never cleared. It means that the process 799 * was threaded at some point. Used to improve exit performance. 800 */ 801 destproc->p_flags |= P_MAYBETHREADED; 802 803 return (lp); 804 } 805 806 /* 807 * The next two functionms are general routines to handle adding/deleting 808 * items on the fork callout list. 809 * 810 * at_fork(): 811 * Take the arguments given and put them onto the fork callout list, 812 * However first make sure that it's not already there. 813 * Returns 0 on success or a standard error number. 814 */ 815 int 816 at_fork(forklist_fn function) 817 { 818 struct forklist *ep; 819 820 #ifdef INVARIANTS 821 /* let the programmer know if he's been stupid */ 822 if (rm_at_fork(function)) { 823 kprintf("WARNING: fork callout entry (%p) already present\n", 824 function); 825 } 826 #endif 827 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 828 ep->function = function; 829 TAILQ_INSERT_TAIL(&fork_list, ep, next); 830 return (0); 831 } 832 833 /* 834 * Scan the exit callout list for the given item and remove it.. 835 * Returns the number of items removed (0 or 1) 836 */ 837 int 838 rm_at_fork(forklist_fn function) 839 { 840 struct forklist *ep; 841 842 TAILQ_FOREACH(ep, &fork_list, next) { 843 if (ep->function == function) { 844 TAILQ_REMOVE(&fork_list, ep, next); 845 kfree(ep, M_ATFORK); 846 return(1); 847 } 848 } 849 return (0); 850 } 851 852 /* 853 * Add a forked process to the run queue after any remaining setup, such 854 * as setting the fork handler, has been completed. 855 * 856 * p2 is held by the caller. 857 */ 858 void 859 start_forked_proc(struct lwp *lp1, struct proc *p2) 860 { 861 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 862 int pflags; 863 864 /* 865 * Move from SIDL to RUN queue, and activate the process's thread. 866 * Activation of the thread effectively makes the process "a" 867 * current process, so we do not setrunqueue(). 868 * 869 * YYY setrunqueue works here but we should clean up the trampoline 870 * code so we just schedule the LWKT thread and let the trampoline 871 * deal with the userland scheduler on return to userland. 872 */ 873 KASSERT(p2->p_stat == SIDL, 874 ("cannot start forked process, bad status: %p", p2)); 875 p2->p_usched->resetpriority(lp2); 876 crit_enter(); 877 p2->p_stat = SACTIVE; 878 lp2->lwp_stat = LSRUN; 879 p2->p_usched->setrunqueue(lp2); 880 crit_exit(); 881 882 /* 883 * Now can be swapped. 884 */ 885 PRELE(lp1->lwp_proc); 886 887 /* 888 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 889 * the child until it has retired the parent's resources. The parent 890 * must wait for the flag to be cleared by the child. 891 * 892 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 893 * p_token conflicts. 894 * 895 * XXX Is this use of an atomic op on a field that is not normally 896 * manipulated with atomic ops ok? 897 */ 898 while ((pflags = p2->p_flags) & P_PPWAIT) { 899 cpu_ccfence(); 900 tsleep_interlock(lp1->lwp_proc, 0); 901 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 902 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 903 } 904 } 905 906 /* 907 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 908 */ 909 int 910 sys_procctl(struct procctl_args *uap) 911 { 912 struct proc *p = curproc; 913 struct proc *p2; 914 struct sysreaper *reap; 915 union reaper_info udata; 916 int error; 917 918 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 919 return EINVAL; 920 921 switch(uap->cmd) { 922 case PROC_REAP_ACQUIRE: 923 lwkt_gettoken(&p->p_token); 924 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 925 if (p->p_reaper == NULL || p->p_reaper->p != p) { 926 reaper_init(p, reap); 927 error = 0; 928 } else { 929 kfree(reap, M_REAPER); 930 error = EALREADY; 931 } 932 lwkt_reltoken(&p->p_token); 933 break; 934 case PROC_REAP_RELEASE: 935 lwkt_gettoken(&p->p_token); 936 release_again: 937 reap = p->p_reaper; 938 KKASSERT(reap != NULL); 939 if (reap->p == p) { 940 reaper_hold(reap); /* in case of thread race */ 941 lockmgr(&reap->lock, LK_EXCLUSIVE); 942 if (reap->p != p) { 943 lockmgr(&reap->lock, LK_RELEASE); 944 reaper_drop(reap); 945 goto release_again; 946 } 947 reap->p = NULL; 948 p->p_reaper = reap->parent; 949 if (p->p_reaper) 950 reaper_hold(p->p_reaper); 951 lockmgr(&reap->lock, LK_RELEASE); 952 reaper_drop(reap); /* our ref */ 953 reaper_drop(reap); /* old p_reaper ref */ 954 error = 0; 955 } else { 956 error = ENOTCONN; 957 } 958 lwkt_reltoken(&p->p_token); 959 break; 960 case PROC_REAP_STATUS: 961 bzero(&udata, sizeof(udata)); 962 lwkt_gettoken_shared(&p->p_token); 963 if ((reap = p->p_reaper) != NULL && reap->p == p) { 964 udata.status.flags = reap->flags; 965 udata.status.refs = reap->refs - 1; /* minus ours */ 966 } 967 p2 = LIST_FIRST(&p->p_children); 968 udata.status.pid_head = p2 ? p2->p_pid : -1; 969 lwkt_reltoken(&p->p_token); 970 971 if (uap->data) { 972 error = copyout(&udata, uap->data, 973 sizeof(udata.status)); 974 } else { 975 error = 0; 976 } 977 break; 978 default: 979 error = EINVAL; 980 break; 981 } 982 return error; 983 } 984 985 /* 986 * Bump ref on reaper, preventing destruction 987 */ 988 void 989 reaper_hold(struct sysreaper *reap) 990 { 991 KKASSERT(reap->refs > 0); 992 refcount_acquire(&reap->refs); 993 } 994 995 /* 996 * Drop ref on reaper, destroy the structure on the 1->0 997 * transition and loop on the parent. 998 */ 999 void 1000 reaper_drop(struct sysreaper *next) 1001 { 1002 struct sysreaper *reap; 1003 1004 while ((reap = next) != NULL) { 1005 if (refcount_release(&reap->refs)) { 1006 next = reap->parent; 1007 KKASSERT(reap->p == NULL); 1008 lockmgr(&reaper_lock, LK_EXCLUSIVE); 1009 reap->parent = NULL; 1010 kfree(reap, M_REAPER); 1011 lockmgr(&reaper_lock, LK_RELEASE); 1012 } else { 1013 next = NULL; 1014 } 1015 } 1016 } 1017 1018 /* 1019 * Initialize a static or newly allocated reaper structure 1020 */ 1021 void 1022 reaper_init(struct proc *p, struct sysreaper *reap) 1023 { 1024 reap->parent = p->p_reaper; 1025 reap->p = p; 1026 if (p == initproc) { 1027 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1028 reap->refs = 2; 1029 } else { 1030 reap->flags = REAPER_STAT_OWNED; 1031 reap->refs = 1; 1032 } 1033 lockinit(&reap->lock, "subrp", 0, 0); 1034 cpu_sfence(); 1035 p->p_reaper = reap; 1036 } 1037 1038 /* 1039 * Called with p->p_token held during exit. 1040 * 1041 * This is a bit simpler than RELEASE because there are no threads remaining 1042 * to race. We only release if we own the reaper, the exit code will handle 1043 * the final p_reaper release. 1044 */ 1045 struct sysreaper * 1046 reaper_exit(struct proc *p) 1047 { 1048 struct sysreaper *reap; 1049 1050 /* 1051 * Release acquired reaper 1052 */ 1053 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1054 lockmgr(&reap->lock, LK_EXCLUSIVE); 1055 p->p_reaper = reap->parent; 1056 if (p->p_reaper) 1057 reaper_hold(p->p_reaper); 1058 reap->p = NULL; 1059 lockmgr(&reap->lock, LK_RELEASE); 1060 reaper_drop(reap); 1061 } 1062 1063 /* 1064 * Return and clear reaper (caller is holding p_token for us) 1065 * (reap->p does not equal p). Caller must drop it. 1066 */ 1067 if ((reap = p->p_reaper) != NULL) { 1068 p->p_reaper = NULL; 1069 } 1070 return reap; 1071 } 1072 1073 /* 1074 * Return a held (PHOLD) process representing the reaper for process (p). 1075 * NULL should not normally be returned. Caller should PRELE() the returned 1076 * reaper process when finished. 1077 * 1078 * Remove dead internal nodes while we are at it. 1079 * 1080 * Process (p)'s token must be held on call. 1081 * The returned process's token is NOT acquired by this routine. 1082 */ 1083 struct proc * 1084 reaper_get(struct sysreaper *reap) 1085 { 1086 struct sysreaper *next; 1087 struct proc *reproc; 1088 1089 if (reap == NULL) 1090 return NULL; 1091 1092 /* 1093 * Extra hold for loop 1094 */ 1095 reaper_hold(reap); 1096 1097 while (reap) { 1098 lockmgr(&reap->lock, LK_SHARED); 1099 if (reap->p) { 1100 /* 1101 * Probable reaper 1102 */ 1103 if (reap->p) { 1104 reproc = reap->p; 1105 PHOLD(reproc); 1106 lockmgr(&reap->lock, LK_RELEASE); 1107 reaper_drop(reap); 1108 return reproc; 1109 } 1110 1111 /* 1112 * Raced, try again 1113 */ 1114 lockmgr(&reap->lock, LK_RELEASE); 1115 continue; 1116 } 1117 1118 /* 1119 * Traverse upwards in the reaper topology, destroy 1120 * dead internal nodes when possible. 1121 * 1122 * NOTE: Our ref on next means that a dead node should 1123 * have 2 (ours and reap->parent's). 1124 */ 1125 next = reap->parent; 1126 while (next) { 1127 reaper_hold(next); 1128 if (next->refs == 2 && next->p == NULL) { 1129 lockmgr(&reap->lock, LK_RELEASE); 1130 lockmgr(&reap->lock, LK_EXCLUSIVE); 1131 if (next->refs == 2 && 1132 reap->parent == next && 1133 next->p == NULL) { 1134 /* 1135 * reap->parent inherits ref from next. 1136 */ 1137 reap->parent = next->parent; 1138 next->parent = NULL; 1139 reaper_drop(next); /* ours */ 1140 reaper_drop(next); /* old parent */ 1141 next = reap->parent; 1142 continue; /* possible chain */ 1143 } 1144 } 1145 break; 1146 } 1147 lockmgr(&reap->lock, LK_RELEASE); 1148 reaper_drop(reap); 1149 reap = next; 1150 } 1151 return NULL; 1152 } 1153 1154 /* 1155 * Test that the sender is allowed to send a signal to the target. 1156 * The sender process is assumed to have a stable reaper. The 1157 * target can be e.g. from a scan callback. 1158 * 1159 * Target cannot be the reaper process itself unless reaper_ok is specified, 1160 * or sender == target. 1161 */ 1162 int 1163 reaper_sigtest(struct proc *sender, struct proc *target, int reaper_ok) 1164 { 1165 struct sysreaper *sreap; 1166 struct sysreaper *reap; 1167 int r; 1168 1169 sreap = sender->p_reaper; 1170 if (sreap == NULL) 1171 return 1; 1172 1173 if (sreap == target->p_reaper) { 1174 if (sreap->p == target && sreap->p != sender && reaper_ok == 0) 1175 return 0; 1176 return 1; 1177 } 1178 lockmgr(&reaper_lock, LK_SHARED); 1179 r = 0; 1180 for (reap = target->p_reaper; reap; reap = reap->parent) { 1181 if (sreap == reap) { 1182 if (sreap->p != target || reaper_ok) 1183 r = 1; 1184 break; 1185 } 1186 } 1187 lockmgr(&reaper_lock, LK_RELEASE); 1188 1189 return r; 1190 } 1191