xref: /dflybsd-src/sys/kern/kern_fork.c (revision 37d4ea13cefac0f93287e0a0a1d5f304a492ffe7)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
39  * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
40  * $DragonFly: src/sys/kern/kern_fork.c,v 1.77 2008/05/18 20:02:02 nth Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/filedesc.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/vnode.h>
55 #include <sys/acct.h>
56 #include <sys/ktrace.h>
57 #include <sys/unistd.h>
58 #include <sys/jail.h>
59 #include <sys/caps.h>
60 
61 #include <vm/vm.h>
62 #include <sys/lock.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_extern.h>
66 
67 #include <sys/vmmeter.h>
68 #include <sys/thread2.h>
69 #include <sys/signal2.h>
70 #include <sys/spinlock2.h>
71 
72 #include <sys/dsched.h>
73 
74 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
75 
76 /*
77  * These are the stuctures used to create a callout list for things to do
78  * when forking a process
79  */
80 struct forklist {
81 	forklist_fn function;
82 	TAILQ_ENTRY(forklist) next;
83 };
84 
85 TAILQ_HEAD(forklist_head, forklist);
86 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
87 
88 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags);
89 
90 int forksleep; /* Place for fork1() to sleep on. */
91 
92 /*
93  * Red-Black tree support for LWPs
94  */
95 
96 static int
97 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2)
98 {
99 	if (lp1->lwp_tid < lp2->lwp_tid)
100 		return(-1);
101 	if (lp1->lwp_tid > lp2->lwp_tid)
102 		return(1);
103 	return(0);
104 }
105 
106 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid);
107 
108 /*
109  * Fork system call
110  *
111  * MPALMOSTSAFE
112  */
113 int
114 sys_fork(struct fork_args *uap)
115 {
116 	struct lwp *lp = curthread->td_lwp;
117 	struct proc *p2;
118 	int error;
119 
120 	error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
121 	if (error == 0) {
122 		start_forked_proc(lp, p2);
123 		uap->sysmsg_fds[0] = p2->p_pid;
124 		uap->sysmsg_fds[1] = 0;
125 	}
126 	return error;
127 }
128 
129 /*
130  * MPALMOSTSAFE
131  */
132 int
133 sys_vfork(struct vfork_args *uap)
134 {
135 	struct lwp *lp = curthread->td_lwp;
136 	struct proc *p2;
137 	int error;
138 
139 	error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
140 	if (error == 0) {
141 		start_forked_proc(lp, p2);
142 		uap->sysmsg_fds[0] = p2->p_pid;
143 		uap->sysmsg_fds[1] = 0;
144 	}
145 	return error;
146 }
147 
148 /*
149  * Handle rforks.  An rfork may (1) operate on the current process without
150  * creating a new, (2) create a new process that shared the current process's
151  * vmspace, signals, and/or descriptors, or (3) create a new process that does
152  * not share these things (normal fork).
153  *
154  * Note that we only call start_forked_proc() if a new process is actually
155  * created.
156  *
157  * rfork { int flags }
158  *
159  * MPALMOSTSAFE
160  */
161 int
162 sys_rfork(struct rfork_args *uap)
163 {
164 	struct lwp *lp = curthread->td_lwp;
165 	struct proc *p2;
166 	int error;
167 
168 	if ((uap->flags & RFKERNELONLY) != 0)
169 		return (EINVAL);
170 
171 	error = fork1(lp, uap->flags | RFPGLOCK, &p2);
172 	if (error == 0) {
173 		if (p2)
174 			start_forked_proc(lp, p2);
175 		uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
176 		uap->sysmsg_fds[1] = 0;
177 	}
178 	return error;
179 }
180 
181 /*
182  * MPALMOSTSAFE
183  */
184 int
185 sys_lwp_create(struct lwp_create_args *uap)
186 {
187 	struct proc *p = curproc;
188 	struct lwp *lp;
189 	struct lwp_params params;
190 	int error;
191 
192 	error = copyin(uap->params, &params, sizeof(params));
193 	if (error)
194 		goto fail2;
195 
196 	lwkt_gettoken(&p->p_token);
197 	plimit_lwp_fork(p);	/* force exclusive access */
198 	lp = lwp_fork(curthread->td_lwp, p, RFPROC);
199 	error = cpu_prepare_lwp(lp, &params);
200 	if (params.tid1 != NULL &&
201 	    (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid))))
202 		goto fail;
203 	if (params.tid2 != NULL &&
204 	    (error = copyout(&lp->lwp_tid, params.tid2, sizeof(lp->lwp_tid))))
205 		goto fail;
206 
207 	/*
208 	 * Now schedule the new lwp.
209 	 */
210 	p->p_usched->resetpriority(lp);
211 	crit_enter();
212 	lp->lwp_stat = LSRUN;
213 	p->p_usched->setrunqueue(lp);
214 	crit_exit();
215 	lwkt_reltoken(&p->p_token);
216 
217 	return (0);
218 
219 fail:
220 	lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
221 	--p->p_nthreads;
222 	/* lwp_dispose expects an exited lwp, and a held proc */
223 	lp->lwp_flag |= LWP_WEXIT;
224 	lp->lwp_thread->td_flags |= TDF_EXITING;
225 	PHOLD(p);
226 	lwp_dispose(lp);
227 	lwkt_reltoken(&p->p_token);
228 fail2:
229 	return (error);
230 }
231 
232 int	nprocs = 1;		/* process 0 */
233 
234 int
235 fork1(struct lwp *lp1, int flags, struct proc **procp)
236 {
237 	struct proc *p1 = lp1->lwp_proc;
238 	struct proc *p2, *pptr;
239 	struct pgrp *p1grp;
240 	struct pgrp *plkgrp;
241 	uid_t uid;
242 	int ok, error;
243 	static int curfail = 0;
244 	static struct timeval lastfail;
245 	struct forklist *ep;
246 	struct filedesc_to_leader *fdtol;
247 
248 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
249 		return (EINVAL);
250 
251 	lwkt_gettoken(&p1->p_token);
252 	plkgrp = NULL;
253 
254 	/*
255 	 * Here we don't create a new process, but we divorce
256 	 * certain parts of a process from itself.
257 	 */
258 	if ((flags & RFPROC) == 0) {
259 		/*
260 		 * This kind of stunt does not work anymore if
261 		 * there are native threads (lwps) running
262 		 */
263 		if (p1->p_nthreads != 1) {
264 			error = EINVAL;
265 			goto done;
266 		}
267 
268 		vm_fork(p1, 0, flags);
269 
270 		/*
271 		 * Close all file descriptors.
272 		 */
273 		if (flags & RFCFDG) {
274 			struct filedesc *fdtmp;
275 			fdtmp = fdinit(p1);
276 			fdfree(p1, fdtmp);
277 		}
278 
279 		/*
280 		 * Unshare file descriptors (from parent.)
281 		 */
282 		if (flags & RFFDG) {
283 			if (p1->p_fd->fd_refcnt > 1) {
284 				struct filedesc *newfd;
285 				newfd = fdcopy(p1);
286 				fdfree(p1, newfd);
287 			}
288 		}
289 		*procp = NULL;
290 		error = 0;
291 		goto done;
292 	}
293 
294 	/*
295 	 * Interlock against process group signal delivery.  If signals
296 	 * are pending after the interlock is obtained we have to restart
297 	 * the system call to process the signals.  If we don't the child
298 	 * can miss a pgsignal (such as ^C) sent during the fork.
299 	 *
300 	 * We can't use CURSIG() here because it will process any STOPs
301 	 * and cause the process group lock to be held indefinitely.  If
302 	 * a STOP occurs, the fork will be restarted after the CONT.
303 	 */
304 	p1grp = p1->p_pgrp;
305 	if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) {
306 		pgref(plkgrp);
307 		lockmgr(&plkgrp->pg_lock, LK_SHARED);
308 		if (CURSIG_NOBLOCK(lp1)) {
309 			error = ERESTART;
310 			goto done;
311 		}
312 	}
313 
314 	/*
315 	 * Although process entries are dynamically created, we still keep
316 	 * a global limit on the maximum number we will create.  Don't allow
317 	 * a nonprivileged user to use the last ten processes; don't let root
318 	 * exceed the limit. The variable nprocs is the current number of
319 	 * processes, maxproc is the limit.
320 	 */
321 	uid = lp1->lwp_thread->td_ucred->cr_ruid;
322 	if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
323 		if (ppsratecheck(&lastfail, &curfail, 1))
324 			kprintf("maxproc limit exceeded by uid %d, please "
325 			       "see tuning(7) and login.conf(5).\n", uid);
326 		tsleep(&forksleep, 0, "fork", hz / 2);
327 		error = EAGAIN;
328 		goto done;
329 	}
330 	/*
331 	 * Increment the nprocs resource before blocking can occur.  There
332 	 * are hard-limits as to the number of processes that can run.
333 	 */
334 	nprocs++;
335 
336 	/*
337 	 * Increment the count of procs running with this uid. Don't allow
338 	 * a nonprivileged user to exceed their current limit.
339 	 */
340 	ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1,
341 		(uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
342 	if (!ok) {
343 		/*
344 		 * Back out the process count
345 		 */
346 		nprocs--;
347 		if (ppsratecheck(&lastfail, &curfail, 1))
348 			kprintf("maxproc limit exceeded by uid %d, please "
349 			       "see tuning(7) and login.conf(5).\n", uid);
350 		tsleep(&forksleep, 0, "fork", hz / 2);
351 		error = EAGAIN;
352 		goto done;
353 	}
354 
355 	/* Allocate new proc. */
356 	p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO);
357 
358 	/*
359 	 * Setup linkage for kernel based threading XXX lwp
360 	 */
361 	if (flags & RFTHREAD) {
362 		p2->p_peers = p1->p_peers;
363 		p1->p_peers = p2;
364 		p2->p_leader = p1->p_leader;
365 	} else {
366 		p2->p_leader = p2;
367 	}
368 
369 	RB_INIT(&p2->p_lwp_tree);
370 	spin_init(&p2->p_spin);
371 	lwkt_token_init(&p2->p_token, "iproc");
372 	p2->p_lasttid = -1;	/* first tid will be 0 */
373 
374 	/*
375 	 * Setting the state to SIDL protects the partially initialized
376 	 * process once it starts getting hooked into the rest of the system.
377 	 */
378 	p2->p_stat = SIDL;
379 	proc_add_allproc(p2);
380 
381 	/*
382 	 * Make a proc table entry for the new process.
383 	 * The whole structure was zeroed above, so copy the section that is
384 	 * copied directly from the parent.
385 	 */
386 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
387 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
388 
389 	/*
390 	 * Duplicate sub-structures as needed.  Increase reference counts
391 	 * on shared objects.
392 	 *
393 	 * NOTE: because we are now on the allproc list it is possible for
394 	 *	 other consumers to gain temporary references to p2
395 	 *	 (p2->p_lock can change).
396 	 */
397 	if (p1->p_flag & P_PROFIL)
398 		startprofclock(p2);
399 	p2->p_ucred = crhold(lp1->lwp_thread->td_ucred);
400 
401 	if (jailed(p2->p_ucred))
402 		p2->p_flag |= P_JAILED;
403 
404 	if (p2->p_args)
405 		p2->p_args->ar_ref++;
406 
407 	p2->p_usched = p1->p_usched;
408 	/* XXX: verify copy of the secondary iosched stuff */
409 	dsched_new_proc(p2);
410 
411 	if (flags & RFSIGSHARE) {
412 		p2->p_sigacts = p1->p_sigacts;
413 		p2->p_sigacts->ps_refcnt++;
414 	} else {
415 		p2->p_sigacts = (struct sigacts *)kmalloc(sizeof(*p2->p_sigacts),
416 		    M_SUBPROC, M_WAITOK);
417 		bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
418 		p2->p_sigacts->ps_refcnt = 1;
419 	}
420 	if (flags & RFLINUXTHPN)
421 	        p2->p_sigparent = SIGUSR1;
422 	else
423 	        p2->p_sigparent = SIGCHLD;
424 
425 	/* bump references to the text vnode (for procfs) */
426 	p2->p_textvp = p1->p_textvp;
427 	if (p2->p_textvp)
428 		vref(p2->p_textvp);
429 
430 	/* copy namecache handle to the text file */
431 	if (p1->p_textnch.mount)
432 		cache_copy(&p1->p_textnch, &p2->p_textnch);
433 
434 	/*
435 	 * Handle file descriptors
436 	 */
437 	if (flags & RFCFDG) {
438 		p2->p_fd = fdinit(p1);
439 		fdtol = NULL;
440 	} else if (flags & RFFDG) {
441 		p2->p_fd = fdcopy(p1);
442 		fdtol = NULL;
443 	} else {
444 		p2->p_fd = fdshare(p1);
445 		if (p1->p_fdtol == NULL) {
446 			lwkt_gettoken(&p1->p_token);
447 			p1->p_fdtol =
448 				filedesc_to_leader_alloc(NULL,
449 							 p1->p_leader);
450 			lwkt_reltoken(&p1->p_token);
451 		}
452 		if ((flags & RFTHREAD) != 0) {
453 			/*
454 			 * Shared file descriptor table and
455 			 * shared process leaders.
456 			 */
457 			fdtol = p1->p_fdtol;
458 			fdtol->fdl_refcount++;
459 		} else {
460 			/*
461 			 * Shared file descriptor table, and
462 			 * different process leaders
463 			 */
464 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
465 		}
466 	}
467 	p2->p_fdtol = fdtol;
468 	p2->p_limit = plimit_fork(p1);
469 
470 	/*
471 	 * Preserve some more flags in subprocess.  P_PROFIL has already
472 	 * been preserved.
473 	 */
474 	p2->p_flag |= p1->p_flag & P_SUGID;
475 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
476 		p2->p_flag |= P_CONTROLT;
477 	if (flags & RFPPWAIT)
478 		p2->p_flag |= P_PPWAIT;
479 
480 	/*
481 	 * Inherit the virtual kernel structure (allows a virtual kernel
482 	 * to fork to simulate multiple cpus).
483 	 */
484 	if (p1->p_vkernel)
485 		vkernel_inherit(p1, p2);
486 
487 	/*
488 	 * Once we are on a pglist we may receive signals.  XXX we might
489 	 * race a ^C being sent to the process group by not receiving it
490 	 * at all prior to this line.
491 	 */
492 	pgref(p1grp);
493 	lwkt_gettoken(&p1grp->pg_token);
494 	LIST_INSERT_AFTER(p1, p2, p_pglist);
495 	lwkt_reltoken(&p1grp->pg_token);
496 
497 	/*
498 	 * Attach the new process to its parent.
499 	 *
500 	 * If RFNOWAIT is set, the newly created process becomes a child
501 	 * of init.  This effectively disassociates the child from the
502 	 * parent.
503 	 */
504 	if (flags & RFNOWAIT)
505 		pptr = initproc;
506 	else
507 		pptr = p1;
508 	p2->p_pptr = pptr;
509 	LIST_INIT(&p2->p_children);
510 
511 	lwkt_gettoken(&pptr->p_token);
512 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
513 	lwkt_reltoken(&pptr->p_token);
514 
515 	varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
516 	callout_init(&p2->p_ithandle);
517 
518 #ifdef KTRACE
519 	/*
520 	 * Copy traceflag and tracefile if enabled.  If not inherited,
521 	 * these were zeroed above but we still could have a trace race
522 	 * so make sure p2's p_tracenode is NULL.
523 	 */
524 	if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
525 		p2->p_traceflag = p1->p_traceflag;
526 		p2->p_tracenode = ktrinherit(p1->p_tracenode);
527 	}
528 #endif
529 
530 	/*
531 	 * This begins the section where we must prevent the parent
532 	 * from being swapped.
533 	 *
534 	 * Gets PRELE'd in the caller in start_forked_proc().
535 	 */
536 	PHOLD(p1);
537 
538 	vm_fork(p1, p2, flags);
539 
540 	/*
541 	 * Create the first lwp associated with the new proc.
542 	 * It will return via a different execution path later, directly
543 	 * into userland, after it was put on the runq by
544 	 * start_forked_proc().
545 	 */
546 	lwp_fork(lp1, p2, flags);
547 
548 	if (flags == (RFFDG | RFPROC | RFPGLOCK)) {
549 		mycpu->gd_cnt.v_forks++;
550 		mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
551 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) {
552 		mycpu->gd_cnt.v_vforks++;
553 		mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
554 	} else if (p1 == &proc0) {
555 		mycpu->gd_cnt.v_kthreads++;
556 		mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
557 	} else {
558 		mycpu->gd_cnt.v_rforks++;
559 		mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
560 	}
561 
562 	/*
563 	 * Both processes are set up, now check if any loadable modules want
564 	 * to adjust anything.
565 	 *   What if they have an error? XXX
566 	 */
567 	TAILQ_FOREACH(ep, &fork_list, next) {
568 		(*ep->function)(p1, p2, flags);
569 	}
570 
571 	/*
572 	 * Set the start time.  Note that the process is not runnable.  The
573 	 * caller is responsible for making it runnable.
574 	 */
575 	microtime(&p2->p_start);
576 	p2->p_acflag = AFORK;
577 
578 	/*
579 	 * tell any interested parties about the new process
580 	 */
581 	KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
582 
583 	/*
584 	 * Return child proc pointer to parent.
585 	 */
586 	*procp = p2;
587 	error = 0;
588 done:
589 	lwkt_reltoken(&p1->p_token);
590 	if (plkgrp) {
591 		lockmgr(&plkgrp->pg_lock, LK_RELEASE);
592 		pgrel(plkgrp);
593 	}
594 	return (error);
595 }
596 
597 static struct lwp *
598 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
599 {
600 	struct lwp *lp;
601 	struct thread *td;
602 
603 	lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO);
604 
605 	lp->lwp_proc = destproc;
606 	lp->lwp_vmspace = destproc->p_vmspace;
607 	lp->lwp_stat = LSRUN;
608 	bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
609 	    (unsigned) ((caddr_t)&lp->lwp_endcopy -
610 			(caddr_t)&lp->lwp_startcopy));
611 	lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK;
612 	/*
613 	 * Set cpbase to the last timeout that occured (not the upcoming
614 	 * timeout).
615 	 *
616 	 * A critical section is required since a timer IPI can update
617 	 * scheduler specific data.
618 	 */
619 	crit_enter();
620 	lp->lwp_cpbase = mycpu->gd_schedclock.time -
621 			mycpu->gd_schedclock.periodic;
622 	destproc->p_usched->heuristic_forking(origlp, lp);
623 	crit_exit();
624 	lp->lwp_cpumask &= usched_mastermask;
625 
626 	/*
627 	 * Assign a TID to the lp.  Loop until the insert succeeds (returns
628 	 * NULL).
629 	 */
630 	lp->lwp_tid = destproc->p_lasttid;
631 	do {
632 		if (++lp->lwp_tid < 0)
633 			lp->lwp_tid = 1;
634 	} while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL);
635 	destproc->p_lasttid = lp->lwp_tid;
636 	destproc->p_nthreads++;
637 
638 	td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0);
639 	lp->lwp_thread = td;
640 	td->td_proc = destproc;
641 	td->td_lwp = lp;
642 	td->td_switch = cpu_heavy_switch;
643 	lwkt_setpri(td, TDPRI_KERN_USER);
644 	lwkt_set_comm(td, "%s", destproc->p_comm);
645 
646 	/*
647 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
648 	 * and make the child ready to run.
649 	 */
650 	cpu_fork(origlp, lp, flags);
651 	caps_fork(origlp->lwp_thread, lp->lwp_thread);
652 	kqueue_init(&lp->lwp_kqueue, destproc->p_fd);
653 
654 	return (lp);
655 }
656 
657 /*
658  * The next two functionms are general routines to handle adding/deleting
659  * items on the fork callout list.
660  *
661  * at_fork():
662  * Take the arguments given and put them onto the fork callout list,
663  * However first make sure that it's not already there.
664  * Returns 0 on success or a standard error number.
665  */
666 int
667 at_fork(forklist_fn function)
668 {
669 	struct forklist *ep;
670 
671 #ifdef INVARIANTS
672 	/* let the programmer know if he's been stupid */
673 	if (rm_at_fork(function)) {
674 		kprintf("WARNING: fork callout entry (%p) already present\n",
675 		    function);
676 	}
677 #endif
678 	ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
679 	ep->function = function;
680 	TAILQ_INSERT_TAIL(&fork_list, ep, next);
681 	return (0);
682 }
683 
684 /*
685  * Scan the exit callout list for the given item and remove it..
686  * Returns the number of items removed (0 or 1)
687  */
688 int
689 rm_at_fork(forklist_fn function)
690 {
691 	struct forklist *ep;
692 
693 	TAILQ_FOREACH(ep, &fork_list, next) {
694 		if (ep->function == function) {
695 			TAILQ_REMOVE(&fork_list, ep, next);
696 			kfree(ep, M_ATFORK);
697 			return(1);
698 		}
699 	}
700 	return (0);
701 }
702 
703 /*
704  * Add a forked process to the run queue after any remaining setup, such
705  * as setting the fork handler, has been completed.
706  */
707 void
708 start_forked_proc(struct lwp *lp1, struct proc *p2)
709 {
710 	struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
711 
712 	/*
713 	 * Move from SIDL to RUN queue, and activate the process's thread.
714 	 * Activation of the thread effectively makes the process "a"
715 	 * current process, so we do not setrunqueue().
716 	 *
717 	 * YYY setrunqueue works here but we should clean up the trampoline
718 	 * code so we just schedule the LWKT thread and let the trampoline
719 	 * deal with the userland scheduler on return to userland.
720 	 */
721 	KASSERT(p2->p_stat == SIDL,
722 	    ("cannot start forked process, bad status: %p", p2));
723 	p2->p_usched->resetpriority(lp2);
724 	crit_enter();
725 	p2->p_stat = SACTIVE;
726 	lp2->lwp_stat = LSRUN;
727 	p2->p_usched->setrunqueue(lp2);
728 	crit_exit();
729 
730 	/*
731 	 * Now can be swapped.
732 	 */
733 	PRELE(lp1->lwp_proc);
734 
735 	/*
736 	 * Preserve synchronization semantics of vfork.  If waiting for
737 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
738 	 * proc (in case of exit).
739 	 */
740 	while (p2->p_flag & P_PPWAIT)
741 		tsleep(lp1->lwp_proc, 0, "ppwait", 0);
742 }
743