1 /* $OpenBSD: kern_fork.c,v 1.96 2008/10/09 06:31:53 guenther Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <dev/rndvar.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 60 #include <sys/syscallargs.h> 61 62 #include "systrace.h" 63 #include <dev/systrace.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <uvm/uvm_map.h> 67 68 int nprocs = 1; /* process 0 */ 69 int randompid; /* when set to 1, pid's go random */ 70 pid_t lastpid; 71 struct forkstat forkstat; 72 73 void fork_return(void *); 74 int pidtaken(pid_t); 75 76 void process_new(struct proc *, struct proc *); 77 78 void 79 fork_return(void *arg) 80 { 81 struct proc *p = (struct proc *)arg; 82 83 if (p->p_flag & P_TRACED) 84 psignal(p, SIGTRAP); 85 86 child_return(p); 87 } 88 89 /*ARGSUSED*/ 90 int 91 sys_fork(struct proc *p, void *v, register_t *retval) 92 { 93 int flags; 94 95 flags = FORK_FORK; 96 if (p->p_ptmask & PTRACE_FORK) 97 flags |= FORK_PTRACE; 98 return (fork1(p, SIGCHLD, flags, NULL, 0, 99 fork_return, NULL, retval, NULL)); 100 } 101 102 /*ARGSUSED*/ 103 int 104 sys_vfork(struct proc *p, void *v, register_t *retval) 105 { 106 return (fork1(p, SIGCHLD, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL, 107 NULL, retval, NULL)); 108 } 109 110 int 111 sys_rfork(struct proc *p, void *v, register_t *retval) 112 { 113 struct sys_rfork_args /* { 114 syscallarg(int) flags; 115 } */ *uap = v; 116 117 int rforkflags; 118 int flags; 119 120 flags = FORK_RFORK; 121 rforkflags = SCARG(uap, flags); 122 123 if ((rforkflags & RFPROC) == 0) 124 return (EINVAL); 125 126 switch(rforkflags & (RFFDG|RFCFDG)) { 127 case (RFFDG|RFCFDG): 128 return EINVAL; 129 case RFCFDG: 130 flags |= FORK_CLEANFILES; 131 break; 132 case RFFDG: 133 break; 134 default: 135 flags |= FORK_SHAREFILES; 136 break; 137 } 138 139 if (rforkflags & RFNOWAIT) 140 flags |= FORK_NOZOMBIE; 141 142 if (rforkflags & RFMEM) 143 flags |= FORK_SHAREVM; 144 #ifdef RTHREADS 145 if (rforkflags & RFTHREAD) 146 flags |= FORK_THREAD | FORK_SIGHAND; 147 #endif 148 149 return (fork1(p, SIGCHLD, flags, NULL, 0, NULL, NULL, retval, NULL)); 150 } 151 152 /* 153 * Allocate and initialize a new process. 154 */ 155 void 156 process_new(struct proc *newproc, struct proc *parent) 157 { 158 struct process *pr; 159 160 pr = pool_get(&process_pool, PR_WAITOK); 161 pr->ps_mainproc = newproc; 162 TAILQ_INIT(&pr->ps_threads); 163 TAILQ_INSERT_TAIL(&pr->ps_threads, newproc, p_thr_link); 164 pr->ps_refcnt = 1; 165 newproc->p_p = pr; 166 } 167 168 /* print the 'table full' message once per 10 seconds */ 169 struct timeval fork_tfmrate = { 10, 0 }; 170 171 int 172 fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize, 173 void (*func)(void *), void *arg, register_t *retval, 174 struct proc **rnewprocp) 175 { 176 struct proc *p2; 177 uid_t uid; 178 struct vmspace *vm; 179 int count; 180 vaddr_t uaddr; 181 int s; 182 extern void endtsleep(void *); 183 extern void realitexpire(void *); 184 185 /* 186 * Although process entries are dynamically created, we still keep 187 * a global limit on the maximum number we will create. We reserve 188 * the last 5 processes to root. The variable nprocs is the current 189 * number of processes, maxproc is the limit. 190 */ 191 uid = p1->p_cred->p_ruid; 192 if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) { 193 static struct timeval lasttfm; 194 195 if (ratecheck(&lasttfm, &fork_tfmrate)) 196 tablefull("proc"); 197 return (EAGAIN); 198 } 199 nprocs++; 200 201 /* 202 * Increment the count of procs running with this uid. Don't allow 203 * a nonprivileged user to exceed their current limit. 204 */ 205 count = chgproccnt(uid, 1); 206 if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) { 207 (void)chgproccnt(uid, -1); 208 nprocs--; 209 return (EAGAIN); 210 } 211 212 uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1); 213 if (uaddr == 0) { 214 chgproccnt(uid, -1); 215 nprocs--; 216 return (ENOMEM); 217 } 218 219 /* 220 * From now on, we're committed to the fork and cannot fail. 221 */ 222 223 /* Allocate new proc. */ 224 p2 = pool_get(&proc_pool, PR_WAITOK); 225 226 p2->p_stat = SIDL; /* protect against others */ 227 p2->p_exitsig = exitsig; 228 p2->p_flag = 0; 229 230 #ifdef RTHREADS 231 if (flags & FORK_THREAD) { 232 atomic_setbits_int(&p2->p_flag, P_THREAD); 233 p2->p_p = p1->p_p; 234 TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link); 235 p2->p_p->ps_refcnt++; 236 } else { 237 process_new(p2, p1); 238 } 239 #else 240 process_new(p2, p1); 241 #endif 242 243 /* 244 * Make a proc table entry for the new process. 245 * Start by zeroing the section of proc that is zero-initialized, 246 * then copy the section that is copied directly from the parent. 247 */ 248 bzero(&p2->p_startzero, 249 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 250 bcopy(&p1->p_startcopy, &p2->p_startcopy, 251 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 252 253 /* 254 * Initialize the timeouts. 255 */ 256 timeout_set(&p2->p_sleep_to, endtsleep, p2); 257 timeout_set(&p2->p_realit_to, realitexpire, p2); 258 259 p2->p_cpu = p1->p_cpu; 260 261 /* 262 * Duplicate sub-structures as needed. 263 * Increase reference counts on shared objects. 264 * The p_stats and p_sigacts substructs are set in vm_fork. 265 */ 266 p2->p_emul = p1->p_emul; 267 if (p1->p_flag & P_PROFIL) 268 startprofclock(p2); 269 atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC)); 270 if (flags & FORK_PTRACE) 271 atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED); 272 #ifdef RTHREADS 273 if (flags & FORK_THREAD) { 274 /* nothing */ 275 } else 276 #endif 277 { 278 p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK); 279 bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred)); 280 p2->p_p->ps_cred->p_refcnt = 1; 281 crhold(p1->p_ucred); 282 } 283 284 /* bump references to the text vnode (for procfs) */ 285 p2->p_textvp = p1->p_textvp; 286 if (p2->p_textvp) 287 VREF(p2->p_textvp); 288 289 if (flags & FORK_CLEANFILES) 290 p2->p_fd = fdinit(p1); 291 else if (flags & FORK_SHAREFILES) 292 p2->p_fd = fdshare(p1); 293 else 294 p2->p_fd = fdcopy(p1); 295 296 /* 297 * If ps_limit is still copy-on-write, bump refcnt, 298 * otherwise get a copy that won't be modified. 299 * (If PL_SHAREMOD is clear, the structure is shared 300 * copy-on-write.) 301 */ 302 #ifdef RTHREADS 303 if (flags & FORK_THREAD) { 304 /* nothing */ 305 } else 306 #endif 307 { 308 if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD) 309 p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit); 310 else { 311 p2->p_p->ps_limit = p1->p_p->ps_limit; 312 p2->p_p->ps_limit->p_refcnt++; 313 } 314 } 315 316 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 317 atomic_setbits_int(&p2->p_flag, P_CONTROLT); 318 if (flags & FORK_PPWAIT) 319 atomic_setbits_int(&p2->p_flag, P_PPWAIT); 320 p2->p_pptr = p1; 321 if (flags & FORK_NOZOMBIE) 322 atomic_setbits_int(&p2->p_flag, P_NOZOMBIE); 323 LIST_INIT(&p2->p_children); 324 325 #ifdef KTRACE 326 /* 327 * Copy traceflag and tracefile if enabled. 328 * If not inherited, these were zeroed above. 329 */ 330 if (p1->p_traceflag & KTRFAC_INHERIT) { 331 p2->p_traceflag = p1->p_traceflag; 332 if ((p2->p_tracep = p1->p_tracep) != NULL) 333 VREF(p2->p_tracep); 334 } 335 #endif 336 337 /* 338 * set priority of child to be that of parent 339 * XXX should move p_estcpu into the region of struct proc which gets 340 * copied. 341 */ 342 scheduler_fork_hook(p1, p2); 343 344 /* 345 * Create signal actions for the child process. 346 */ 347 if (flags & FORK_SIGHAND) 348 sigactsshare(p1, p2); 349 else 350 p2->p_sigacts = sigactsinit(p1); 351 352 /* 353 * If emulation has process fork hook, call it now. 354 */ 355 if (p2->p_emul->e_proc_fork) 356 (*p2->p_emul->e_proc_fork)(p2, p1); 357 358 p2->p_addr = (struct user *)uaddr; 359 360 /* 361 * Finish creating the child process. It will return through a 362 * different path later. 363 */ 364 uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, 365 stacksize, func ? func : child_return, arg ? arg : p2); 366 367 timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2); 368 timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2); 369 370 vm = p2->p_vmspace; 371 372 if (flags & FORK_FORK) { 373 forkstat.cntfork++; 374 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 375 } else if (flags & FORK_VFORK) { 376 forkstat.cntvfork++; 377 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 378 } else if (flags & FORK_RFORK) { 379 forkstat.cntrfork++; 380 forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize; 381 } else { 382 forkstat.cntkthread++; 383 forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; 384 } 385 386 /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ 387 do { 388 lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; 389 } while (pidtaken(lastpid)); 390 p2->p_pid = lastpid; 391 392 LIST_INSERT_HEAD(&allproc, p2, p_list); 393 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 394 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); 395 LIST_INSERT_AFTER(p1, p2, p_pglist); 396 if (p2->p_flag & P_TRACED) { 397 p2->p_oppid = p1->p_pid; 398 if (p2->p_pptr != p1->p_pptr) 399 proc_reparent(p2, p1->p_pptr); 400 401 /* 402 * Set ptrace status. 403 */ 404 if (flags & FORK_FORK) { 405 p2->p_ptstat = malloc(sizeof(*p2->p_ptstat), 406 M_SUBPROC, M_WAITOK); 407 p1->p_ptstat->pe_report_event = PTRACE_FORK; 408 p2->p_ptstat->pe_report_event = PTRACE_FORK; 409 p1->p_ptstat->pe_other_pid = p2->p_pid; 410 p2->p_ptstat->pe_other_pid = p1->p_pid; 411 } 412 } 413 414 #if NSYSTRACE > 0 415 if (ISSET(p1->p_flag, P_SYSTRACE)) 416 systrace_fork(p1, p2); 417 #endif 418 419 /* 420 * Make child runnable, set start time, and add to run queue. 421 */ 422 SCHED_LOCK(s); 423 getmicrotime(&p2->p_stats->p_start); 424 p2->p_acflag = AFORK; 425 p2->p_stat = SRUN; 426 setrunqueue(p2); 427 SCHED_UNLOCK(s); 428 429 /* 430 * Notify any interested parties about the new process. 431 */ 432 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 433 434 /* 435 * Update stats now that we know the fork was successfull. 436 */ 437 uvmexp.forks++; 438 if (flags & FORK_PPWAIT) 439 uvmexp.forks_ppwait++; 440 if (flags & FORK_SHAREVM) 441 uvmexp.forks_sharevm++; 442 443 /* 444 * Pass a pointer to the new process to the caller. 445 */ 446 if (rnewprocp != NULL) 447 *rnewprocp = p2; 448 449 /* 450 * Preserve synchronization semantics of vfork. If waiting for 451 * child to exec or exit, set P_PPWAIT on child, and sleep on our 452 * proc (in case of exit). 453 */ 454 if (flags & FORK_PPWAIT) 455 while (p2->p_flag & P_PPWAIT) 456 tsleep(p1, PWAIT, "ppwait", 0); 457 458 /* 459 * If we're tracing the child, alert the parent too. 460 */ 461 if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED)) 462 psignal(p1, SIGTRAP); 463 464 /* 465 * Return child pid to parent process, 466 * marking us as parent via retval[1]. 467 */ 468 if (retval != NULL) { 469 retval[0] = p2->p_pid; 470 retval[1] = 0; 471 } 472 return (0); 473 } 474 475 /* 476 * Checks for current use of a pid, either as a pid or pgid. 477 */ 478 int 479 pidtaken(pid_t pid) 480 { 481 struct proc *p; 482 483 if (pfind(pid) != NULL) 484 return (1); 485 if (pgfind(pid) != NULL) 486 return (1); 487 LIST_FOREACH(p, &zombproc, p_list) 488 if (p->p_pid == pid || p->p_pgid == pid) 489 return (1); 490 return (0); 491 } 492 493 #if defined(MULTIPROCESSOR) 494 /* 495 * XXX This is a slight hack to get newly-formed processes to 496 * XXX acquire the kernel lock as soon as they run. 497 */ 498 void 499 proc_trampoline_mp(void) 500 { 501 struct proc *p; 502 503 p = curproc; 504 505 SCHED_ASSERT_LOCKED(); 506 __mp_unlock(&sched_lock); 507 spl0(); 508 SCHED_ASSERT_UNLOCKED(); 509 KASSERT(__mp_lock_held(&kernel_lock) == 0); 510 511 KERNEL_PROC_LOCK(p); 512 } 513 #endif 514