1 /* $OpenBSD: kern_fork.c,v 1.99 2008/11/03 03:03:35 deraadt Exp $ */ 2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/proc.h> 47 #include <sys/exec.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/acct.h> 53 #include <sys/ktrace.h> 54 #include <sys/sched.h> 55 #include <dev/rndvar.h> 56 #include <sys/pool.h> 57 #include <sys/mman.h> 58 #include <sys/ptrace.h> 59 60 #include <sys/syscallargs.h> 61 62 #include "systrace.h" 63 #include <dev/systrace.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <uvm/uvm_map.h> 67 68 int nprocs = 1; /* process 0 */ 69 int randompid; /* when set to 1, pid's go random */ 70 pid_t lastpid; 71 struct forkstat forkstat; 72 73 void fork_return(void *); 74 int pidtaken(pid_t); 75 76 void process_new(struct proc *, struct proc *); 77 78 void 79 fork_return(void *arg) 80 { 81 struct proc *p = (struct proc *)arg; 82 83 if (p->p_flag & P_TRACED) 84 psignal(p, SIGTRAP); 85 86 child_return(p); 87 } 88 89 /*ARGSUSED*/ 90 int 91 sys_fork(struct proc *p, void *v, register_t *retval) 92 { 93 int flags; 94 95 flags = FORK_FORK; 96 if (p->p_ptmask & PTRACE_FORK) 97 flags |= FORK_PTRACE; 98 return (fork1(p, SIGCHLD, flags, NULL, 0, 99 fork_return, NULL, retval, NULL)); 100 } 101 102 /*ARGSUSED*/ 103 int 104 sys_vfork(struct proc *p, void *v, register_t *retval) 105 { 106 return (fork1(p, SIGCHLD, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL, 107 NULL, retval, NULL)); 108 } 109 110 int 111 sys_rfork(struct proc *p, void *v, register_t *retval) 112 { 113 struct sys_rfork_args /* { 114 syscallarg(int) flags; 115 } */ *uap = v; 116 117 int rforkflags; 118 int flags; 119 120 flags = FORK_RFORK; 121 rforkflags = SCARG(uap, flags); 122 123 if ((rforkflags & RFPROC) == 0) 124 return (EINVAL); 125 126 switch(rforkflags & (RFFDG|RFCFDG)) { 127 case (RFFDG|RFCFDG): 128 return EINVAL; 129 case RFCFDG: 130 flags |= FORK_CLEANFILES; 131 break; 132 case RFFDG: 133 break; 134 default: 135 flags |= FORK_SHAREFILES; 136 break; 137 } 138 139 if (rforkflags & RFNOWAIT) 140 flags |= FORK_NOZOMBIE; 141 142 if (rforkflags & RFMEM) 143 flags |= FORK_SHAREVM; 144 #ifdef RTHREADS 145 if (rforkflags & RFTHREAD) 146 flags |= FORK_THREAD | FORK_SIGHAND; 147 #endif 148 149 return (fork1(p, SIGCHLD, flags, NULL, 0, NULL, NULL, retval, NULL)); 150 } 151 152 /* 153 * Allocate and initialize a new process. 154 */ 155 void 156 process_new(struct proc *newproc, struct proc *parent) 157 { 158 struct process *pr; 159 160 pr = pool_get(&process_pool, PR_WAITOK); 161 pr->ps_mainproc = newproc; 162 TAILQ_INIT(&pr->ps_threads); 163 TAILQ_INSERT_TAIL(&pr->ps_threads, newproc, p_thr_link); 164 pr->ps_refcnt = 1; 165 newproc->p_p = pr; 166 } 167 168 /* print the 'table full' message once per 10 seconds */ 169 struct timeval fork_tfmrate = { 10, 0 }; 170 171 int 172 fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize, 173 void (*func)(void *), void *arg, register_t *retval, 174 struct proc **rnewprocp) 175 { 176 struct proc *p2; 177 uid_t uid; 178 struct vmspace *vm; 179 int count; 180 vaddr_t uaddr; 181 int s; 182 extern void endtsleep(void *); 183 extern void realitexpire(void *); 184 struct ptrace_state *newptstat; 185 186 /* 187 * Although process entries are dynamically created, we still keep 188 * a global limit on the maximum number we will create. We reserve 189 * the last 5 processes to root. The variable nprocs is the current 190 * number of processes, maxproc is the limit. 191 */ 192 uid = p1->p_cred->p_ruid; 193 if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) { 194 static struct timeval lasttfm; 195 196 if (ratecheck(&lasttfm, &fork_tfmrate)) 197 tablefull("proc"); 198 return (EAGAIN); 199 } 200 nprocs++; 201 202 /* 203 * Increment the count of procs running with this uid. Don't allow 204 * a nonprivileged user to exceed their current limit. 205 */ 206 count = chgproccnt(uid, 1); 207 if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) { 208 (void)chgproccnt(uid, -1); 209 nprocs--; 210 return (EAGAIN); 211 } 212 213 uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1); 214 if (uaddr == 0) { 215 chgproccnt(uid, -1); 216 nprocs--; 217 return (ENOMEM); 218 } 219 220 /* 221 * From now on, we're committed to the fork and cannot fail. 222 */ 223 224 /* Allocate new proc. */ 225 p2 = pool_get(&proc_pool, PR_WAITOK); 226 227 p2->p_stat = SIDL; /* protect against others */ 228 p2->p_exitsig = exitsig; 229 p2->p_flag = 0; 230 231 #ifdef RTHREADS 232 if (flags & FORK_THREAD) { 233 atomic_setbits_int(&p2->p_flag, P_THREAD); 234 p2->p_p = p1->p_p; 235 TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link); 236 p2->p_p->ps_refcnt++; 237 } else { 238 process_new(p2, p1); 239 } 240 #else 241 process_new(p2, p1); 242 #endif 243 244 /* 245 * Make a proc table entry for the new process. 246 * Start by zeroing the section of proc that is zero-initialized, 247 * then copy the section that is copied directly from the parent. 248 */ 249 bzero(&p2->p_startzero, 250 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 251 bcopy(&p1->p_startcopy, &p2->p_startcopy, 252 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 253 254 /* 255 * Initialize the timeouts. 256 */ 257 timeout_set(&p2->p_sleep_to, endtsleep, p2); 258 timeout_set(&p2->p_realit_to, realitexpire, p2); 259 260 p2->p_cpu = p1->p_cpu; 261 262 /* 263 * Duplicate sub-structures as needed. 264 * Increase reference counts on shared objects. 265 * The p_stats and p_sigacts substructs are set in vm_fork. 266 */ 267 p2->p_emul = p1->p_emul; 268 if (p1->p_flag & P_PROFIL) 269 startprofclock(p2); 270 atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC)); 271 if (flags & FORK_PTRACE) 272 atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED); 273 #ifdef RTHREADS 274 if (flags & FORK_THREAD) { 275 /* nothing */ 276 } else 277 #endif 278 { 279 p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK); 280 bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred)); 281 p2->p_p->ps_cred->p_refcnt = 1; 282 crhold(p1->p_ucred); 283 } 284 285 /* bump references to the text vnode (for procfs) */ 286 p2->p_textvp = p1->p_textvp; 287 if (p2->p_textvp) 288 VREF(p2->p_textvp); 289 290 if (flags & FORK_CLEANFILES) 291 p2->p_fd = fdinit(p1); 292 else if (flags & FORK_SHAREFILES) 293 p2->p_fd = fdshare(p1); 294 else 295 p2->p_fd = fdcopy(p1); 296 297 /* 298 * If ps_limit is still copy-on-write, bump refcnt, 299 * otherwise get a copy that won't be modified. 300 * (If PL_SHAREMOD is clear, the structure is shared 301 * copy-on-write.) 302 */ 303 #ifdef RTHREADS 304 if (flags & FORK_THREAD) { 305 /* nothing */ 306 } else 307 #endif 308 { 309 if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD) 310 p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit); 311 else { 312 p2->p_p->ps_limit = p1->p_p->ps_limit; 313 p2->p_p->ps_limit->p_refcnt++; 314 } 315 } 316 317 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 318 atomic_setbits_int(&p2->p_flag, P_CONTROLT); 319 if (flags & FORK_PPWAIT) 320 atomic_setbits_int(&p2->p_flag, P_PPWAIT); 321 p2->p_pptr = p1; 322 if (flags & FORK_NOZOMBIE) 323 atomic_setbits_int(&p2->p_flag, P_NOZOMBIE); 324 LIST_INIT(&p2->p_children); 325 326 #ifdef KTRACE 327 /* 328 * Copy traceflag and tracefile if enabled. 329 * If not inherited, these were zeroed above. 330 */ 331 if (p1->p_traceflag & KTRFAC_INHERIT) { 332 p2->p_traceflag = p1->p_traceflag; 333 if ((p2->p_tracep = p1->p_tracep) != NULL) 334 VREF(p2->p_tracep); 335 } 336 #endif 337 338 /* 339 * set priority of child to be that of parent 340 * XXX should move p_estcpu into the region of struct proc which gets 341 * copied. 342 */ 343 scheduler_fork_hook(p1, p2); 344 345 /* 346 * Create signal actions for the child process. 347 */ 348 if (flags & FORK_SIGHAND) 349 sigactsshare(p1, p2); 350 else 351 p2->p_sigacts = sigactsinit(p1); 352 353 /* 354 * If emulation has process fork hook, call it now. 355 */ 356 if (p2->p_emul->e_proc_fork) 357 (*p2->p_emul->e_proc_fork)(p2, p1); 358 359 p2->p_addr = (struct user *)uaddr; 360 361 /* 362 * Finish creating the child process. It will return through a 363 * different path later. 364 */ 365 uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, 366 stacksize, func ? func : child_return, arg ? arg : p2); 367 368 timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2); 369 timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2); 370 371 vm = p2->p_vmspace; 372 373 if (flags & FORK_FORK) { 374 forkstat.cntfork++; 375 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; 376 } else if (flags & FORK_VFORK) { 377 forkstat.cntvfork++; 378 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; 379 } else if (flags & FORK_RFORK) { 380 forkstat.cntrfork++; 381 forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize; 382 } else { 383 forkstat.cntkthread++; 384 forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; 385 } 386 387 newptstat = malloc(sizeof(struct ptrace_state), M_SUBPROC, M_WAITOK); 388 389 /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ 390 do { 391 lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; 392 } while (pidtaken(lastpid)); 393 p2->p_pid = lastpid; 394 395 LIST_INSERT_HEAD(&allproc, p2, p_list); 396 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 397 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); 398 LIST_INSERT_AFTER(p1, p2, p_pglist); 399 if (p2->p_flag & P_TRACED) { 400 p2->p_oppid = p1->p_pid; 401 if (p2->p_pptr != p1->p_pptr) 402 proc_reparent(p2, p1->p_pptr); 403 404 /* 405 * Set ptrace status. 406 */ 407 if (flags & FORK_FORK) { 408 p2->p_ptstat = newptstat; 409 newptstat = NULL; 410 p1->p_ptstat->pe_report_event = PTRACE_FORK; 411 p2->p_ptstat->pe_report_event = PTRACE_FORK; 412 p1->p_ptstat->pe_other_pid = p2->p_pid; 413 p2->p_ptstat->pe_other_pid = p1->p_pid; 414 } 415 } 416 417 #if NSYSTRACE > 0 418 if (ISSET(p1->p_flag, P_SYSTRACE)) 419 systrace_fork(p1, p2); 420 #endif 421 422 /* 423 * Make child runnable, set start time, and add to run queue. 424 */ 425 SCHED_LOCK(s); 426 getmicrotime(&p2->p_stats->p_start); 427 p2->p_acflag = AFORK; 428 p2->p_stat = SRUN; 429 setrunqueue(p2); 430 SCHED_UNLOCK(s); 431 432 if (newptstat) 433 free(newptstat, M_SUBPROC); 434 435 /* 436 * Notify any interested parties about the new process. 437 */ 438 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 439 440 /* 441 * Update stats now that we know the fork was successfull. 442 */ 443 uvmexp.forks++; 444 if (flags & FORK_PPWAIT) 445 uvmexp.forks_ppwait++; 446 if (flags & FORK_SHAREVM) 447 uvmexp.forks_sharevm++; 448 449 /* 450 * Pass a pointer to the new process to the caller. 451 */ 452 if (rnewprocp != NULL) 453 *rnewprocp = p2; 454 455 /* 456 * Preserve synchronization semantics of vfork. If waiting for 457 * child to exec or exit, set P_PPWAIT on child, and sleep on our 458 * proc (in case of exit). 459 */ 460 if (flags & FORK_PPWAIT) 461 while (p2->p_flag & P_PPWAIT) 462 tsleep(p1, PWAIT, "ppwait", 0); 463 464 /* 465 * If we're tracing the child, alert the parent too. 466 */ 467 if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED)) 468 psignal(p1, SIGTRAP); 469 470 /* 471 * Return child pid to parent process, 472 * marking us as parent via retval[1]. 473 */ 474 if (retval != NULL) { 475 retval[0] = p2->p_pid; 476 retval[1] = 0; 477 } 478 return (0); 479 } 480 481 /* 482 * Checks for current use of a pid, either as a pid or pgid. 483 */ 484 int 485 pidtaken(pid_t pid) 486 { 487 struct proc *p; 488 489 if (pfind(pid) != NULL) 490 return (1); 491 if (pgfind(pid) != NULL) 492 return (1); 493 LIST_FOREACH(p, &zombproc, p_list) 494 if (p->p_pid == pid || p->p_pgid == pid) 495 return (1); 496 return (0); 497 } 498 499 #if defined(MULTIPROCESSOR) 500 /* 501 * XXX This is a slight hack to get newly-formed processes to 502 * XXX acquire the kernel lock as soon as they run. 503 */ 504 void 505 proc_trampoline_mp(void) 506 { 507 struct proc *p; 508 509 p = curproc; 510 511 SCHED_ASSERT_LOCKED(); 512 __mp_unlock(&sched_lock); 513 spl0(); 514 SCHED_ASSERT_UNLOCKED(); 515 KASSERT(__mp_lock_held(&kernel_lock) == 0); 516 517 KERNEL_PROC_LOCK(p); 518 } 519 #endif 520