xref: /openbsd-src/sys/kern/kern_fork.c (revision 6396a31b28c13abcc71f05292f11b42abbafd7d3)
1 /*	$OpenBSD: kern_fork.c,v 1.212 2019/06/01 14:11:17 mpi Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/acct.h>
53 #include <sys/ktrace.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/pool.h>
57 #include <sys/mman.h>
58 #include <sys/ptrace.h>
59 #include <sys/atomic.h>
60 #include <sys/pledge.h>
61 #include <sys/unistd.h>
62 
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm.h>
66 #include <machine/tcb.h>
67 
68 #include "kcov.h"
69 
70 int	nprocesses = 1;		/* process 0 */
71 int	nthreads = 1;		/* proc 0 */
72 int	randompid;		/* when set to 1, pid's go random */
73 struct	forkstat forkstat;
74 
75 void fork_return(void *);
76 pid_t alloctid(void);
77 pid_t allocpid(void);
78 int ispidtaken(pid_t);
79 
80 void unveil_copy(struct process *parent, struct process *child);
81 
82 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
83 struct process *process_new(struct proc *, struct process *, int);
84 int fork_check_maxthread(uid_t _uid);
85 
86 void
87 fork_return(void *arg)
88 {
89 	struct proc *p = (struct proc *)arg;
90 
91 	if (p->p_p->ps_flags & PS_TRACED)
92 		psignal(p, SIGTRAP);
93 
94 	child_return(p);
95 }
96 
97 int
98 sys_fork(struct proc *p, void *v, register_t *retval)
99 {
100 	int flags;
101 
102 	flags = FORK_FORK;
103 	if (p->p_p->ps_ptmask & PTRACE_FORK)
104 		flags |= FORK_PTRACE;
105 	return fork1(p, flags, fork_return, NULL, retval, NULL);
106 }
107 
108 int
109 sys_vfork(struct proc *p, void *v, register_t *retval)
110 {
111 	return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
112 	    retval, NULL);
113 }
114 
115 int
116 sys___tfork(struct proc *p, void *v, register_t *retval)
117 {
118 	struct sys___tfork_args /* {
119 		syscallarg(const struct __tfork) *param;
120 		syscallarg(size_t) psize;
121 	} */ *uap = v;
122 	size_t psize = SCARG(uap, psize);
123 	struct __tfork param = { 0 };
124 	int error;
125 
126 	if (psize == 0 || psize > sizeof(param))
127 		return EINVAL;
128 	if ((error = copyin(SCARG(uap, param), &param, psize)))
129 		return error;
130 #ifdef KTRACE
131 	if (KTRPOINT(p, KTR_STRUCT))
132 		ktrstruct(p, "tfork", &param, sizeof(param));
133 #endif
134 #ifdef TCB_INVALID
135 	if (TCB_INVALID(param.tf_tcb))
136 		return EINVAL;
137 #endif /* TCB_INVALID */
138 
139 	return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
140 	    retval);
141 }
142 
143 /*
144  * Allocate and initialize a thread (proc) structure, given the parent thread.
145  */
146 struct proc *
147 thread_new(struct proc *parent, vaddr_t uaddr)
148 {
149 	struct proc *p;
150 
151 	p = pool_get(&proc_pool, PR_WAITOK);
152 	p->p_stat = SIDL;			/* protect against others */
153 	p->p_flag = 0;
154 
155 	/*
156 	 * Make a proc table entry for the new process.
157 	 * Start by zeroing the section of proc that is zero-initialized,
158 	 * then copy the section that is copied directly from the parent.
159 	 */
160 	memset(&p->p_startzero, 0,
161 	    (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
162 	memcpy(&p->p_startcopy, &parent->p_startcopy,
163 	    (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
164 	crhold(p->p_ucred);
165 	p->p_addr = (struct user *)uaddr;
166 
167 	/*
168 	 * Initialize the timeouts.
169 	 */
170 	timeout_set(&p->p_sleep_to, endtsleep, p);
171 
172 	/*
173 	 * set priority of child to be that of parent
174 	 * XXX should move p_estcpu into the region of struct proc which gets
175 	 * copied.
176 	 */
177 	scheduler_fork_hook(parent, p);
178 
179 #ifdef WITNESS
180 	p->p_sleeplocks = NULL;
181 #endif
182 
183 #if NKCOV > 0
184 	p->p_kd = NULL;
185 #endif
186 
187 	return p;
188 }
189 
190 /*
191  * Initialize common bits of a process structure, given the initial thread.
192  */
193 void
194 process_initialize(struct process *pr, struct proc *p)
195 {
196 	/* initialize the thread links */
197 	pr->ps_mainproc = p;
198 	TAILQ_INIT(&pr->ps_threads);
199 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
200 	pr->ps_refcnt = 1;
201 	p->p_p = pr;
202 
203 	/* give the process the same creds as the initial thread */
204 	pr->ps_ucred = p->p_ucred;
205 	crhold(pr->ps_ucred);
206 	KASSERT(p->p_ucred->cr_ref >= 2);	/* new thread and new process */
207 
208 	LIST_INIT(&pr->ps_children);
209 	LIST_INIT(&pr->ps_ftlist);
210 	LIST_INIT(&pr->ps_kqlist);
211 	LIST_INIT(&pr->ps_sigiolst);
212 
213 	timeout_set(&pr->ps_realit_to, realitexpire, pr);
214 	timeout_set(&pr->ps_rucheck_to, rucheck, pr);
215 }
216 
217 
218 /*
219  * Allocate and initialize a new process.
220  */
221 struct process *
222 process_new(struct proc *p, struct process *parent, int flags)
223 {
224 	struct process *pr;
225 
226 	pr = pool_get(&process_pool, PR_WAITOK);
227 
228 	/*
229 	 * Make a process structure for the new process.
230 	 * Start by zeroing the section of proc that is zero-initialized,
231 	 * then copy the section that is copied directly from the parent.
232 	 */
233 	memset(&pr->ps_startzero, 0,
234 	    (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
235 	memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
236 	    (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
237 
238 	process_initialize(pr, p);
239 	pr->ps_pid = allocpid();
240 
241 	/* post-copy fixups */
242 	pr->ps_pptr = parent;
243 	pr->ps_limit->pl_refcnt++;
244 	if (pr->ps_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)
245 		timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL);
246 
247 	/* bump references to the text vnode (for sysctl) */
248 	pr->ps_textvp = parent->ps_textvp;
249 	if (pr->ps_textvp)
250 		vref(pr->ps_textvp);
251 
252 	/* copy unveil if unveil is active */
253 	unveil_copy(parent, pr);
254 
255 	pr->ps_flags = parent->ps_flags &
256 	    (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED);
257 	if (parent->ps_session->s_ttyvp != NULL)
258 		pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
259 
260 	/*
261 	 * Duplicate sub-structures as needed.
262 	 * Increase reference counts on shared objects.
263 	 */
264 	if (flags & FORK_SHAREFILES)
265 		pr->ps_fd = fdshare(parent);
266 	else
267 		pr->ps_fd = fdcopy(parent);
268 	if (flags & FORK_SIGHAND)
269 		pr->ps_sigacts = sigactsshare(parent);
270 	else
271 		pr->ps_sigacts = sigactsinit(parent);
272 	if (flags & FORK_SHAREVM)
273 		pr->ps_vmspace = uvmspace_share(parent);
274 	else
275 		pr->ps_vmspace = uvmspace_fork(parent);
276 
277 	if (parent->ps_flags & PS_PROFIL)
278 		startprofclock(pr);
279 	if (flags & FORK_PTRACE)
280 		pr->ps_flags |= parent->ps_flags & PS_TRACED;
281 	if (flags & FORK_NOZOMBIE)
282 		pr->ps_flags |= PS_NOZOMBIE;
283 	if (flags & FORK_SYSTEM)
284 		pr->ps_flags |= PS_SYSTEM;
285 
286 	/* mark as embryo to protect against others */
287 	pr->ps_flags |= PS_EMBRYO;
288 
289 	/* Force visibility of all of the above changes */
290 	membar_producer();
291 
292 	/* it's sufficiently inited to be globally visible */
293 	LIST_INSERT_HEAD(&allprocess, pr, ps_list);
294 
295 	return pr;
296 }
297 
298 /* print the 'table full' message once per 10 seconds */
299 struct timeval fork_tfmrate = { 10, 0 };
300 
301 int
302 fork_check_maxthread(uid_t uid)
303 {
304 	/*
305 	 * Although process entries are dynamically created, we still keep
306 	 * a global limit on the maximum number we will create. We reserve
307 	 * the last 5 processes to root. The variable nprocesses is the
308 	 * current number of processes, maxprocess is the limit.  Similar
309 	 * rules for threads (struct proc): we reserve the last 5 to root;
310 	 * the variable nthreads is the current number of procs, maxthread is
311 	 * the limit.
312 	 */
313 	if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
314 		static struct timeval lasttfm;
315 
316 		if (ratecheck(&lasttfm, &fork_tfmrate))
317 			tablefull("proc");
318 		return EAGAIN;
319 	}
320 	nthreads++;
321 
322 	return 0;
323 }
324 
325 static inline void
326 fork_thread_start(struct proc *p, struct proc *parent, int flags)
327 {
328 	int s;
329 
330 	SCHED_LOCK(s);
331 	p->p_stat = SRUN;
332 	p->p_cpu = sched_choosecpu_fork(parent, flags);
333 	setrunqueue(p);
334 	SCHED_UNLOCK(s);
335 }
336 
337 int
338 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
339     register_t *retval, struct proc **rnewprocp)
340 {
341 	struct process *curpr = curp->p_p;
342 	struct process *pr;
343 	struct proc *p;
344 	uid_t uid = curp->p_ucred->cr_ruid;
345 	struct vmspace *vm;
346 	int count;
347 	vaddr_t uaddr;
348 	int error;
349 	struct  ptrace_state *newptstat = NULL;
350 
351 	KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
352 	    | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
353 	    | FORK_SYSTEM | FORK_SIGHAND)) == 0);
354 	KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM));
355 	KASSERT(func != NULL);
356 
357 	if ((error = fork_check_maxthread(uid)))
358 		return error;
359 
360 	if ((nprocesses >= maxprocess - 5 && uid != 0) ||
361 	    nprocesses >= maxprocess) {
362 		static struct timeval lasttfm;
363 
364 		if (ratecheck(&lasttfm, &fork_tfmrate))
365 			tablefull("process");
366 		nthreads--;
367 		return EAGAIN;
368 	}
369 	nprocesses++;
370 
371 	/*
372 	 * Increment the count of processes running with this uid.
373 	 * Don't allow a nonprivileged user to exceed their current limit.
374 	 */
375 	count = chgproccnt(uid, 1);
376 	if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) {
377 		(void)chgproccnt(uid, -1);
378 		nprocesses--;
379 		nthreads--;
380 		return EAGAIN;
381 	}
382 
383 	uaddr = uvm_uarea_alloc();
384 	if (uaddr == 0) {
385 		(void)chgproccnt(uid, -1);
386 		nprocesses--;
387 		nthreads--;
388 		return (ENOMEM);
389 	}
390 
391 	/*
392 	 * From now on, we're committed to the fork and cannot fail.
393 	 */
394 	p = thread_new(curp, uaddr);
395 	pr = process_new(p, curpr, flags);
396 
397 	p->p_fd		= pr->ps_fd;
398 	p->p_vmspace	= pr->ps_vmspace;
399 	if (pr->ps_flags & PS_SYSTEM)
400 		atomic_setbits_int(&p->p_flag, P_SYSTEM);
401 
402 	if (flags & FORK_PPWAIT) {
403 		atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
404 		atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
405 	}
406 
407 #ifdef KTRACE
408 	/*
409 	 * Copy traceflag and tracefile if enabled.
410 	 * If not inherited, these were zeroed above.
411 	 */
412 	if (curpr->ps_traceflag & KTRFAC_INHERIT)
413 		ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
414 		    curpr->ps_tracecred);
415 #endif
416 
417 	/*
418 	 * Finish creating the child thread.  cpu_fork() will copy
419 	 * and update the pcb and make the child ready to run.  If
420 	 * this is a normal user fork, the child will exit directly
421 	 * to user mode via child_return() on its first time slice
422 	 * and will not return here.  If this is a kernel thread,
423 	 * the specified entry point will be executed.
424 	 */
425 	cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
426 
427 	vm = pr->ps_vmspace;
428 
429 	if (flags & FORK_FORK) {
430 		forkstat.cntfork++;
431 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
432 	} else if (flags & FORK_VFORK) {
433 		forkstat.cntvfork++;
434 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
435 	} else {
436 		forkstat.cntkthread++;
437 	}
438 
439 	if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
440 		newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
441 
442 	p->p_tid = alloctid();
443 
444 	LIST_INSERT_HEAD(&allproc, p, p_list);
445 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
446 	LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
447 	LIST_INSERT_AFTER(curpr, pr, ps_pglist);
448 	LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
449 
450 	if (pr->ps_flags & PS_TRACED) {
451 		pr->ps_oppid = curpr->ps_pid;
452 		if (pr->ps_pptr != curpr->ps_pptr)
453 			proc_reparent(pr, curpr->ps_pptr);
454 
455 		/*
456 		 * Set ptrace status.
457 		 */
458 		if (newptstat != NULL) {
459 			pr->ps_ptstat = newptstat;
460 			newptstat = NULL;
461 			curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
462 			pr->ps_ptstat->pe_report_event = PTRACE_FORK;
463 			curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
464 			pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
465 		}
466 	}
467 
468 	/*
469 	 * For new processes, set accounting bits and mark as complete.
470 	 */
471 	getnanotime(&pr->ps_start);
472 	pr->ps_acflag = AFORK;
473 	atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
474 
475 	if ((flags & FORK_IDLE) == 0)
476 		fork_thread_start(p, curp, flags);
477 	else
478 		p->p_cpu = arg;
479 
480 	free(newptstat, M_SUBPROC, sizeof(*newptstat));
481 
482 	/*
483 	 * Notify any interested parties about the new process.
484 	 */
485 	KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
486 
487 	/*
488 	 * Update stats now that we know the fork was successful.
489 	 */
490 	uvmexp.forks++;
491 	if (flags & FORK_PPWAIT)
492 		uvmexp.forks_ppwait++;
493 	if (flags & FORK_SHAREVM)
494 		uvmexp.forks_sharevm++;
495 
496 	/*
497 	 * Pass a pointer to the new process to the caller.
498 	 */
499 	if (rnewprocp != NULL)
500 		*rnewprocp = p;
501 
502 	/*
503 	 * Preserve synchronization semantics of vfork.  If waiting for
504 	 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
505 	 * on ourselves, and sleep on our process for the latter flag
506 	 * to go away.
507 	 * XXX Need to stop other rthreads in the parent
508 	 */
509 	if (flags & FORK_PPWAIT)
510 		while (curpr->ps_flags & PS_ISPWAIT)
511 			tsleep(curpr, PWAIT, "ppwait", 0);
512 
513 	/*
514 	 * If we're tracing the child, alert the parent too.
515 	 */
516 	if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
517 		psignal(curp, SIGTRAP);
518 
519 	/*
520 	 * Return child pid to parent process
521 	 */
522 	if (retval != NULL) {
523 		retval[0] = pr->ps_pid;
524 		retval[1] = 0;
525 	}
526 	return (0);
527 }
528 
529 int
530 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
531     register_t *retval)
532 {
533 	struct process *pr = curp->p_p;
534 	struct proc *p;
535 	pid_t tid;
536 	vaddr_t uaddr;
537 	int error;
538 
539 	if (stack == NULL)
540 		return EINVAL;
541 
542 	if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
543 		return error;
544 
545 	uaddr = uvm_uarea_alloc();
546 	if (uaddr == 0) {
547 		nthreads--;
548 		return ENOMEM;
549 	}
550 
551 	/*
552 	 * From now on, we're committed to the fork and cannot fail.
553 	 */
554 	p = thread_new(curp, uaddr);
555 	atomic_setbits_int(&p->p_flag, P_THREAD);
556 	sigstkinit(&p->p_sigstk);
557 
558 	/* other links */
559 	p->p_p = pr;
560 	pr->ps_refcnt++;
561 
562 	/* local copies */
563 	p->p_fd		= pr->ps_fd;
564 	p->p_vmspace	= pr->ps_vmspace;
565 
566 	/*
567 	 * Finish creating the child thread.  cpu_fork() will copy
568 	 * and update the pcb and make the child ready to run.  The
569 	 * child will exit directly to user mode via child_return()
570 	 * on its first time slice and will not return here.
571 	 */
572 	cpu_fork(curp, p, stack, tcb, child_return, p);
573 
574 	p->p_tid = alloctid();
575 
576 	LIST_INSERT_HEAD(&allproc, p, p_list);
577 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
578 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
579 
580 	/*
581 	 * if somebody else wants to take us to single threaded mode,
582 	 * count ourselves in.
583 	 */
584 	if (pr->ps_single) {
585 		pr->ps_singlecount++;
586 		atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
587 	}
588 
589 	/*
590 	 * Return tid to parent thread and copy it out to userspace
591 	 */
592 	retval[0] = tid = p->p_tid + THREAD_PID_OFFSET;
593 	retval[1] = 0;
594 	if (tidptr != NULL) {
595 		if (copyout(&tid, tidptr, sizeof(tid)))
596 			psignal(curp, SIGSEGV);
597 	}
598 
599 	fork_thread_start(p, curp, 0);
600 
601 	/*
602 	 * Update stats now that we know the fork was successful.
603 	 */
604 	forkstat.cnttfork++;
605 	uvmexp.forks++;
606 	uvmexp.forks_sharevm++;
607 
608 	return 0;
609 }
610 
611 
612 /* Find an unused tid */
613 pid_t
614 alloctid(void)
615 {
616 	pid_t tid;
617 
618 	do {
619 		/* (0 .. TID_MASK+1] */
620 		tid = 1 + (arc4random() & TID_MASK);
621 	} while (tfind(tid) != NULL);
622 
623 	return (tid);
624 }
625 
626 /*
627  * Checks for current use of a pid, either as a pid or pgid.
628  */
629 pid_t oldpids[128];
630 int
631 ispidtaken(pid_t pid)
632 {
633 	uint32_t i;
634 
635 	for (i = 0; i < nitems(oldpids); i++)
636 		if (pid == oldpids[i])
637 			return (1);
638 
639 	if (prfind(pid) != NULL)
640 		return (1);
641 	if (pgfind(pid) != NULL)
642 		return (1);
643 	if (zombiefind(pid) != NULL)
644 		return (1);
645 	return (0);
646 }
647 
648 /* Find an unused pid */
649 pid_t
650 allocpid(void)
651 {
652 	static pid_t lastpid;
653 	pid_t pid;
654 
655 	if (!randompid) {
656 		/* only used early on for system processes */
657 		pid = ++lastpid;
658 	} else {
659 		/* Find an unused pid satisfying lastpid < pid <= PID_MAX */
660 		do {
661 			pid = arc4random_uniform(PID_MAX - lastpid) + 1 +
662 			    lastpid;
663 		} while (ispidtaken(pid));
664 	}
665 
666 	return pid;
667 }
668 
669 void
670 freepid(pid_t pid)
671 {
672 	static uint32_t idx;
673 
674 	oldpids[idx++ % nitems(oldpids)] = pid;
675 }
676 
677 #if defined(MULTIPROCESSOR)
678 /*
679  * XXX This is a slight hack to get newly-formed processes to
680  * XXX acquire the kernel lock as soon as they run.
681  */
682 void
683 proc_trampoline_mp(void)
684 {
685 	SCHED_ASSERT_LOCKED();
686 	__mp_unlock(&sched_lock);
687 	spl0();
688 	SCHED_ASSERT_UNLOCKED();
689 	KERNEL_ASSERT_UNLOCKED();
690 
691 	KERNEL_LOCK();
692 }
693 #endif
694