xref: /openbsd-src/sys/kern/kern_fork.c (revision e17acf5675eccdc838b1dec9c98991221a92f6b0)
1 /*	$OpenBSD: kern_fork.c,v 1.90 2007/04/12 22:14:15 tedu Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/file.h>
52 #include <sys/acct.h>
53 #include <sys/ktrace.h>
54 #include <sys/sched.h>
55 #include <dev/rndvar.h>
56 #include <sys/pool.h>
57 #include <sys/mman.h>
58 #include <sys/ptrace.h>
59 
60 #include <sys/syscallargs.h>
61 
62 #include "systrace.h"
63 #include <dev/systrace.h>
64 
65 #include <uvm/uvm_extern.h>
66 #include <uvm/uvm_map.h>
67 
68 int	nprocs = 1;		/* process 0 */
69 int	randompid;		/* when set to 1, pid's go random */
70 pid_t	lastpid;
71 struct	forkstat forkstat;
72 
73 void fork_return(void *);
74 int pidtaken(pid_t);
75 
76 void process_new(struct proc *, struct proc *);
77 
78 void
79 fork_return(void *arg)
80 {
81 	struct proc *p = (struct proc *)arg;
82 
83 	if (p->p_flag & P_TRACED)
84 		psignal(p, SIGTRAP);
85 
86 	child_return(p);
87 }
88 
89 /*ARGSUSED*/
90 int
91 sys_fork(struct proc *p, void *v, register_t *retval)
92 {
93 	int flags;
94 
95 	flags = FORK_FORK;
96 	if (p->p_ptmask & PTRACE_FORK)
97 		flags |= FORK_PTRACE;
98 	return (fork1(p, SIGCHLD, flags, NULL, 0,
99 	    fork_return, NULL, retval, NULL));
100 }
101 
102 /*ARGSUSED*/
103 int
104 sys_vfork(struct proc *p, void *v, register_t *retval)
105 {
106 	return (fork1(p, SIGCHLD, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL,
107 	    NULL, retval, NULL));
108 }
109 
110 int
111 sys_rfork(struct proc *p, void *v, register_t *retval)
112 {
113 	struct sys_rfork_args /* {
114 		syscallarg(int) flags;
115 	} */ *uap = v;
116 
117 	int rforkflags;
118 	int flags;
119 
120 	flags = FORK_RFORK;
121 	rforkflags = SCARG(uap, flags);
122 
123 	if ((rforkflags & RFPROC) == 0)
124 		return (EINVAL);
125 
126 	switch(rforkflags & (RFFDG|RFCFDG)) {
127 	case (RFFDG|RFCFDG):
128 		return EINVAL;
129 	case RFCFDG:
130 		flags |= FORK_CLEANFILES;
131 		break;
132 	case RFFDG:
133 		break;
134 	default:
135 		flags |= FORK_SHAREFILES;
136 		break;
137 	}
138 
139 	if (rforkflags & RFNOWAIT)
140 		flags |= FORK_NOZOMBIE;
141 
142 	if (rforkflags & RFMEM)
143 		flags |= FORK_SHAREVM;
144 #ifdef RTHREADS
145 	if (rforkflags & RFTHREAD)
146 		flags |= FORK_THREAD;
147 #endif
148 
149 	return (fork1(p, SIGCHLD, flags, NULL, 0, NULL, NULL, retval, NULL));
150 }
151 
152 /*
153  * Allocate and initialize a new process.
154  */
155 void
156 process_new(struct proc *newproc, struct proc *parent)
157 {
158 	struct process *pr;
159 
160 	pr = pool_get(&process_pool, PR_WAITOK);
161 	pr->ps_mainproc = newproc;
162 	TAILQ_INIT(&pr->ps_threads);
163 	TAILQ_INSERT_TAIL(&pr->ps_threads, newproc, p_thr_link);
164 	newproc->p_p = pr;
165 }
166 
167 /* print the 'table full' message once per 10 seconds */
168 struct timeval fork_tfmrate = { 10, 0 };
169 
170 int
171 fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
172     void (*func)(void *), void *arg, register_t *retval,
173     struct proc **rnewprocp)
174 {
175 	struct proc *p2;
176 	uid_t uid;
177 	struct vmspace *vm;
178 	int count;
179 	vaddr_t uaddr;
180 	int s;
181 	extern void endtsleep(void *);
182 	extern void realitexpire(void *);
183 
184 	/*
185 	 * Although process entries are dynamically created, we still keep
186 	 * a global limit on the maximum number we will create. We reserve
187 	 * the last 5 processes to root. The variable nprocs is the current
188 	 * number of processes, maxproc is the limit.
189 	 */
190 	uid = p1->p_cred->p_ruid;
191 	if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) {
192 		static struct timeval lasttfm;
193 
194 		if (ratecheck(&lasttfm, &fork_tfmrate))
195 			tablefull("proc");
196 		return (EAGAIN);
197 	}
198 	nprocs++;
199 
200 	/*
201 	 * Increment the count of procs running with this uid. Don't allow
202 	 * a nonprivileged user to exceed their current limit.
203 	 */
204 	count = chgproccnt(uid, 1);
205 	if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
206 		(void)chgproccnt(uid, -1);
207 		nprocs--;
208 		return (EAGAIN);
209 	}
210 
211 	uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1);
212 	if (uaddr == 0) {
213 		chgproccnt(uid, -1);
214 		nprocs--;
215 		return (ENOMEM);
216 	}
217 
218 	/*
219 	 * From now on, we're committed to the fork and cannot fail.
220 	 */
221 
222 	/* Allocate new proc. */
223 	p2 = pool_get(&proc_pool, PR_WAITOK);
224 
225 	p2->p_stat = SIDL;			/* protect against others */
226 	p2->p_exitsig = exitsig;
227 	p2->p_forw = p2->p_back = NULL;
228 
229 #ifdef RTHREADS
230 	if (flags & FORK_THREAD) {
231 		atomic_setbits_int(&p2->p_flag, P_THREAD);
232 		p2->p_p = p1->p_p;
233 		TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link);
234 	} else {
235 		process_new(p2, p1);
236 	}
237 #else
238 	process_new(p2, p1);
239 #endif
240 
241 	/*
242 	 * Make a proc table entry for the new process.
243 	 * Start by zeroing the section of proc that is zero-initialized,
244 	 * then copy the section that is copied directly from the parent.
245 	 */
246 	bzero(&p2->p_startzero,
247 	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
248 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
249 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
250 
251 	/*
252 	 * Initialize the timeouts.
253 	 */
254 	timeout_set(&p2->p_sleep_to, endtsleep, p2);
255 	timeout_set(&p2->p_realit_to, realitexpire, p2);
256 
257 #if defined(__HAVE_CPUINFO)
258 	p2->p_cpu = p1->p_cpu;
259 #endif
260 
261 	/*
262 	 * Duplicate sub-structures as needed.
263 	 * Increase reference counts on shared objects.
264 	 * The p_stats and p_sigacts substructs are set in vm_fork.
265 	 */
266 	p2->p_flag = 0;
267 	p2->p_emul = p1->p_emul;
268 	if (p1->p_flag & P_PROFIL)
269 		startprofclock(p2);
270 	atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC));
271 	if (flags & FORK_PTRACE)
272 		atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED);
273 #ifdef RTHREADS
274 	if (flags & FORK_THREAD) {
275 		/* nothing */
276 	} else
277 #endif
278 	{
279 		p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK);
280 		bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred));
281 		p2->p_p->ps_cred->p_refcnt = 1;
282 		crhold(p1->p_ucred);
283 	}
284 
285 	TAILQ_INIT(&p2->p_selects);
286 
287 	/* bump references to the text vnode (for procfs) */
288 	p2->p_textvp = p1->p_textvp;
289 	if (p2->p_textvp)
290 		VREF(p2->p_textvp);
291 
292 	if (flags & FORK_CLEANFILES)
293 		p2->p_fd = fdinit(p1);
294 	else if (flags & FORK_SHAREFILES)
295 		p2->p_fd = fdshare(p1);
296 	else
297 		p2->p_fd = fdcopy(p1);
298 
299 	/*
300 	 * If ps_limit is still copy-on-write, bump refcnt,
301 	 * otherwise get a copy that won't be modified.
302 	 * (If PL_SHAREMOD is clear, the structure is shared
303 	 * copy-on-write.)
304 	 */
305 #ifdef RTHREADS
306 	if (flags & FORK_THREAD) {
307 		/* nothing */
308 	} else
309 #endif
310 	{
311 		if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD)
312 			p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit);
313 		else {
314 			p2->p_p->ps_limit = p1->p_p->ps_limit;
315 			p2->p_p->ps_limit->p_refcnt++;
316 		}
317 	}
318 
319 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
320 		atomic_setbits_int(&p2->p_flag, P_CONTROLT);
321 	if (flags & FORK_PPWAIT)
322 		atomic_setbits_int(&p2->p_flag, P_PPWAIT);
323 	p2->p_pptr = p1;
324 	if (flags & FORK_NOZOMBIE)
325 		atomic_setbits_int(&p2->p_flag, P_NOZOMBIE);
326 	LIST_INIT(&p2->p_children);
327 
328 #ifdef KTRACE
329 	/*
330 	 * Copy traceflag and tracefile if enabled.
331 	 * If not inherited, these were zeroed above.
332 	 */
333 	if (p1->p_traceflag & KTRFAC_INHERIT) {
334 		p2->p_traceflag = p1->p_traceflag;
335 		if ((p2->p_tracep = p1->p_tracep) != NULL)
336 			VREF(p2->p_tracep);
337 	}
338 #endif
339 
340 	/*
341 	 * set priority of child to be that of parent
342 	 * XXX should move p_estcpu into the region of struct proc which gets
343 	 * copied.
344 	 */
345 	scheduler_fork_hook(p1, p2);
346 
347 	/*
348 	 * Create signal actions for the child process.
349 	 */
350 	if (flags & FORK_SIGHAND)
351 		sigactsshare(p1, p2);
352 	else
353 		p2->p_sigacts = sigactsinit(p1);
354 
355 	/*
356 	 * If emulation has process fork hook, call it now.
357 	 */
358 	if (p2->p_emul->e_proc_fork)
359 		(*p2->p_emul->e_proc_fork)(p2, p1);
360 
361 	p2->p_addr = (struct user *)uaddr;
362 
363 	/*
364 	 * Finish creating the child process.  It will return through a
365 	 * different path later.
366 	 */
367 	uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack,
368 	    stacksize, func ? func : child_return, arg ? arg : p2);
369 
370 	timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2);
371 	timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2);
372 
373 	vm = p2->p_vmspace;
374 
375 	if (flags & FORK_FORK) {
376 		forkstat.cntfork++;
377 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
378 	} else if (flags & FORK_VFORK) {
379 		forkstat.cntvfork++;
380 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
381 	} else if (flags & FORK_RFORK) {
382 		forkstat.cntrfork++;
383 		forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize;
384 	} else {
385 		forkstat.cntkthread++;
386 		forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize;
387 	}
388 
389 	/* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */
390 	do {
391 		lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX;
392 	} while (pidtaken(lastpid));
393 	p2->p_pid = lastpid;
394 
395 	LIST_INSERT_HEAD(&allproc, p2, p_list);
396 	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
397 	LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
398 	LIST_INSERT_AFTER(p1, p2, p_pglist);
399 	if (p2->p_flag & P_TRACED) {
400 		p2->p_oppid = p1->p_pid;
401 		if (p2->p_pptr != p1->p_pptr)
402 			proc_reparent(p2, p1->p_pptr);
403 
404 		/*
405 		 * Set ptrace status.
406 		 */
407 		if (flags & FORK_FORK) {
408 			p2->p_ptstat = malloc(sizeof(*p2->p_ptstat),
409 			    M_SUBPROC, M_WAITOK);
410 			p1->p_ptstat->pe_report_event = PTRACE_FORK;
411 			p2->p_ptstat->pe_report_event = PTRACE_FORK;
412 			p1->p_ptstat->pe_other_pid = p2->p_pid;
413 			p2->p_ptstat->pe_other_pid = p1->p_pid;
414 		}
415 	}
416 
417 #if NSYSTRACE > 0
418 	if (ISSET(p1->p_flag, P_SYSTRACE))
419 		systrace_fork(p1, p2);
420 #endif
421 
422 	/*
423 	 * Make child runnable, set start time, and add to run queue.
424 	 */
425 	SCHED_LOCK(s);
426  	getmicrotime(&p2->p_stats->p_start);
427 	p2->p_acflag = AFORK;
428 	p2->p_stat = SRUN;
429 	setrunqueue(p2);
430 	SCHED_UNLOCK(s);
431 
432 	/*
433 	 * Notify any interested parties about the new process.
434 	 */
435 	KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
436 
437 	/*
438 	 * Update stats now that we know the fork was successfull.
439 	 */
440 	uvmexp.forks++;
441 	if (flags & FORK_PPWAIT)
442 		uvmexp.forks_ppwait++;
443 	if (flags & FORK_SHAREVM)
444 		uvmexp.forks_sharevm++;
445 
446 	/*
447 	 * Pass a pointer to the new process to the caller.
448 	 */
449 	if (rnewprocp != NULL)
450 		*rnewprocp = p2;
451 
452 	/*
453 	 * Preserve synchronization semantics of vfork.  If waiting for
454 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
455 	 * proc (in case of exit).
456 	 */
457 	if (flags & FORK_PPWAIT)
458 		while (p2->p_flag & P_PPWAIT)
459 			tsleep(p1, PWAIT, "ppwait", 0);
460 
461 	/*
462 	 * If we're tracing the child, alert the parent too.
463 	 */
464 	if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED))
465 		psignal(p1, SIGTRAP);
466 
467 	/*
468 	 * Return child pid to parent process,
469 	 * marking us as parent via retval[1].
470 	 */
471 	if (retval != NULL) {
472 		retval[0] = p2->p_pid;
473 		retval[1] = 0;
474 	}
475 	return (0);
476 }
477 
478 /*
479  * Checks for current use of a pid, either as a pid or pgid.
480  */
481 int
482 pidtaken(pid_t pid)
483 {
484 	struct proc *p;
485 
486 	if (pfind(pid) != NULL)
487 		return (1);
488 	if (pgfind(pid) != NULL)
489 		return (1);
490 	LIST_FOREACH(p, &zombproc, p_list)
491 		if (p->p_pid == pid || p->p_pgid == pid)
492 			return (1);
493 	return (0);
494 }
495 
496 #if defined(MULTIPROCESSOR)
497 /*
498  * XXX This is a slight hack to get newly-formed processes to
499  * XXX acquire the kernel lock as soon as they run.
500  */
501 void
502 proc_trampoline_mp(void)
503 {
504 	struct proc *p;
505 
506 	p = curproc;
507 
508 	SCHED_ASSERT_UNLOCKED();
509 	KERNEL_PROC_LOCK(p);
510 }
511 #endif
512