xref: /openbsd-src/sys/kern/kern_fork.c (revision 8b23add8c74b86d0da67de43302cf21b97b028be)
1 /*	$OpenBSD: kern_fork.c,v 1.204 2018/07/13 09:25:23 beck Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/acct.h>
53 #include <sys/ktrace.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/pool.h>
57 #include <sys/mman.h>
58 #include <sys/ptrace.h>
59 #include <sys/atomic.h>
60 #include <sys/pledge.h>
61 #include <sys/unistd.h>
62 
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm.h>
66 #include <machine/tcb.h>
67 
68 int	nprocesses = 1;		/* process 0 */
69 int	nthreads = 1;		/* proc 0 */
70 int	randompid;		/* when set to 1, pid's go random */
71 struct	forkstat forkstat;
72 
73 void fork_return(void *);
74 pid_t alloctid(void);
75 pid_t allocpid(void);
76 int ispidtaken(pid_t);
77 
78 struct unveil *unveil_copy(struct process *s, size_t *count);
79 
80 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
81 struct process *process_new(struct proc *, struct process *, int);
82 int fork_check_maxthread(uid_t _uid);
83 
84 void
85 fork_return(void *arg)
86 {
87 	struct proc *p = (struct proc *)arg;
88 
89 	if (p->p_p->ps_flags & PS_TRACED)
90 		psignal(p, SIGTRAP);
91 
92 	child_return(p);
93 }
94 
95 int
96 sys_fork(struct proc *p, void *v, register_t *retval)
97 {
98 	int flags;
99 
100 	flags = FORK_FORK;
101 	if (p->p_p->ps_ptmask & PTRACE_FORK)
102 		flags |= FORK_PTRACE;
103 	return fork1(p, flags, fork_return, NULL, retval, NULL);
104 }
105 
106 int
107 sys_vfork(struct proc *p, void *v, register_t *retval)
108 {
109 	return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
110 	    retval, NULL);
111 }
112 
113 int
114 sys___tfork(struct proc *p, void *v, register_t *retval)
115 {
116 	struct sys___tfork_args /* {
117 		syscallarg(const struct __tfork) *param;
118 		syscallarg(size_t) psize;
119 	} */ *uap = v;
120 	size_t psize = SCARG(uap, psize);
121 	struct __tfork param = { 0 };
122 	int error;
123 
124 	if (psize == 0 || psize > sizeof(param))
125 		return EINVAL;
126 	if ((error = copyin(SCARG(uap, param), &param, psize)))
127 		return error;
128 #ifdef KTRACE
129 	if (KTRPOINT(p, KTR_STRUCT))
130 		ktrstruct(p, "tfork", &param, sizeof(param));
131 #endif
132 #ifdef TCB_INVALID
133 	if (TCB_INVALID(param.tf_tcb))
134 		return EINVAL;
135 #endif /* TCB_INVALID */
136 
137 	return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
138 	    retval);
139 }
140 
141 /*
142  * Allocate and initialize a thread (proc) structure, given the parent thread.
143  */
144 struct proc *
145 thread_new(struct proc *parent, vaddr_t uaddr)
146 {
147 	struct proc *p;
148 
149 	p = pool_get(&proc_pool, PR_WAITOK);
150 	p->p_stat = SIDL;			/* protect against others */
151 	p->p_flag = 0;
152 
153 	/*
154 	 * Make a proc table entry for the new process.
155 	 * Start by zeroing the section of proc that is zero-initialized,
156 	 * then copy the section that is copied directly from the parent.
157 	 */
158 	memset(&p->p_startzero, 0,
159 	    (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
160 	memcpy(&p->p_startcopy, &parent->p_startcopy,
161 	    (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
162 	crhold(p->p_ucred);
163 	p->p_addr = (struct user *)uaddr;
164 
165 	/*
166 	 * Initialize the timeouts.
167 	 */
168 	timeout_set(&p->p_sleep_to, endtsleep, p);
169 
170 	/*
171 	 * set priority of child to be that of parent
172 	 * XXX should move p_estcpu into the region of struct proc which gets
173 	 * copied.
174 	 */
175 	scheduler_fork_hook(parent, p);
176 
177 #ifdef WITNESS
178 	p->p_sleeplocks = NULL;
179 #endif
180 
181 	return p;
182 }
183 
184 /*
185  * Initialize common bits of a process structure, given the initial thread.
186  */
187 void
188 process_initialize(struct process *pr, struct proc *p)
189 {
190 	/* initialize the thread links */
191 	pr->ps_mainproc = p;
192 	TAILQ_INIT(&pr->ps_threads);
193 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
194 	pr->ps_refcnt = 1;
195 	p->p_p = pr;
196 
197 	/* give the process the same creds as the initial thread */
198 	pr->ps_ucred = p->p_ucred;
199 	crhold(pr->ps_ucred);
200 	KASSERT(p->p_ucred->cr_ref >= 2);	/* new thread and new process */
201 
202 	LIST_INIT(&pr->ps_children);
203 	LIST_INIT(&pr->ps_kqlist);
204 
205 	timeout_set(&pr->ps_realit_to, realitexpire, pr);
206 }
207 
208 
209 /*
210  * Allocate and initialize a new process.
211  */
212 struct process *
213 process_new(struct proc *p, struct process *parent, int flags)
214 {
215 	struct process *pr;
216 
217 	pr = pool_get(&process_pool, PR_WAITOK);
218 
219 	/*
220 	 * Make a process structure for the new process.
221 	 * Start by zeroing the section of proc that is zero-initialized,
222 	 * then copy the section that is copied directly from the parent.
223 	 */
224 	memset(&pr->ps_startzero, 0,
225 	    (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
226 	memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
227 	    (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
228 
229 	process_initialize(pr, p);
230 	pr->ps_pid = allocpid();
231 
232 	/* post-copy fixups */
233 	pr->ps_pptr = parent;
234 	pr->ps_limit->p_refcnt++;
235 
236 	/* bump references to the text vnode (for sysctl) */
237 	pr->ps_textvp = parent->ps_textvp;
238 	if (pr->ps_textvp)
239 		vref(pr->ps_textvp);
240 #if 0  /* XXX Fix this */
241 	/* copy unveil if unveil is active */
242 	if (parent->ps_uvvcount) {
243 		pr->ps_uvpaths = unveil_copy(parent, &pr->ps_uvncount);
244 		if (parent->ps_uvpcwd)
245 			pr->ps_uvpcwd = pr->ps_uvpaths +
246 			    (parent->ps_uvpcwd - parent->ps_uvpaths);
247 		pr->ps_uvpcwdgone = parent->ps_uvpcwdgone;
248 		pr->ps_uvdone = parent->ps_uvdone;
249 		pr->ps_uvshrink = 1;
250 	}
251 #endif
252 
253 	pr->ps_flags = parent->ps_flags &
254 	    (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED);
255 	if (parent->ps_session->s_ttyvp != NULL)
256 		pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
257 
258 	/*
259 	 * Duplicate sub-structures as needed.
260 	 * Increase reference counts on shared objects.
261 	 */
262 	if (flags & FORK_SHAREFILES)
263 		pr->ps_fd = fdshare(parent);
264 	else
265 		pr->ps_fd = fdcopy(parent);
266 	if (flags & FORK_SIGHAND)
267 		pr->ps_sigacts = sigactsshare(parent);
268 	else
269 		pr->ps_sigacts = sigactsinit(parent);
270 	if (flags & FORK_SHAREVM)
271 		pr->ps_vmspace = uvmspace_share(parent);
272 	else
273 		pr->ps_vmspace = uvmspace_fork(parent);
274 
275 	if (parent->ps_flags & PS_PROFIL)
276 		startprofclock(pr);
277 	if (flags & FORK_PTRACE)
278 		pr->ps_flags |= parent->ps_flags & PS_TRACED;
279 	if (flags & FORK_NOZOMBIE)
280 		pr->ps_flags |= PS_NOZOMBIE;
281 	if (flags & FORK_SYSTEM)
282 		pr->ps_flags |= PS_SYSTEM;
283 
284 	/* mark as embryo to protect against others */
285 	pr->ps_flags |= PS_EMBRYO;
286 
287 	/* Force visibility of all of the above changes */
288 	membar_producer();
289 
290 	/* it's sufficiently inited to be globally visible */
291 	LIST_INSERT_HEAD(&allprocess, pr, ps_list);
292 
293 	return pr;
294 }
295 
296 /* print the 'table full' message once per 10 seconds */
297 struct timeval fork_tfmrate = { 10, 0 };
298 
299 int
300 fork_check_maxthread(uid_t uid)
301 {
302 	/*
303 	 * Although process entries are dynamically created, we still keep
304 	 * a global limit on the maximum number we will create. We reserve
305 	 * the last 5 processes to root. The variable nprocesses is the
306 	 * current number of processes, maxprocess is the limit.  Similar
307 	 * rules for threads (struct proc): we reserve the last 5 to root;
308 	 * the variable nthreads is the current number of procs, maxthread is
309 	 * the limit.
310 	 */
311 	if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
312 		static struct timeval lasttfm;
313 
314 		if (ratecheck(&lasttfm, &fork_tfmrate))
315 			tablefull("proc");
316 		return EAGAIN;
317 	}
318 	nthreads++;
319 
320 	return 0;
321 }
322 
323 static inline void
324 fork_thread_start(struct proc *p, struct proc *parent, int flags)
325 {
326 	int s;
327 
328 	SCHED_LOCK(s);
329 	p->p_stat = SRUN;
330 	p->p_cpu = sched_choosecpu_fork(parent, flags);
331 	setrunqueue(p);
332 	SCHED_UNLOCK(s);
333 }
334 
335 int
336 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
337     register_t *retval, struct proc **rnewprocp)
338 {
339 	struct process *curpr = curp->p_p;
340 	struct process *pr;
341 	struct proc *p;
342 	uid_t uid = curp->p_ucred->cr_ruid;
343 	struct vmspace *vm;
344 	int count;
345 	vaddr_t uaddr;
346 	int error;
347 	struct  ptrace_state *newptstat = NULL;
348 
349 	KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
350 	    | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
351 	    | FORK_SYSTEM | FORK_SIGHAND)) == 0);
352 	KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM));
353 	KASSERT(func != NULL);
354 
355 	if ((error = fork_check_maxthread(uid)))
356 		return error;
357 
358 	if ((nprocesses >= maxprocess - 5 && uid != 0) ||
359 	    nprocesses >= maxprocess) {
360 		static struct timeval lasttfm;
361 
362 		if (ratecheck(&lasttfm, &fork_tfmrate))
363 			tablefull("process");
364 		nthreads--;
365 		return EAGAIN;
366 	}
367 	nprocesses++;
368 
369 	/*
370 	 * Increment the count of processes running with this uid.
371 	 * Don't allow a nonprivileged user to exceed their current limit.
372 	 */
373 	count = chgproccnt(uid, 1);
374 	if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) {
375 		(void)chgproccnt(uid, -1);
376 		nprocesses--;
377 		nthreads--;
378 		return EAGAIN;
379 	}
380 
381 	uaddr = uvm_uarea_alloc();
382 	if (uaddr == 0) {
383 		(void)chgproccnt(uid, -1);
384 		nprocesses--;
385 		nthreads--;
386 		return (ENOMEM);
387 	}
388 
389 	/*
390 	 * From now on, we're committed to the fork and cannot fail.
391 	 */
392 	p = thread_new(curp, uaddr);
393 	pr = process_new(p, curpr, flags);
394 
395 	p->p_fd		= pr->ps_fd;
396 	p->p_vmspace	= pr->ps_vmspace;
397 	if (pr->ps_flags & PS_SYSTEM)
398 		atomic_setbits_int(&p->p_flag, P_SYSTEM);
399 
400 	if (flags & FORK_PPWAIT) {
401 		atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
402 		atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
403 	}
404 
405 #ifdef KTRACE
406 	/*
407 	 * Copy traceflag and tracefile if enabled.
408 	 * If not inherited, these were zeroed above.
409 	 */
410 	if (curpr->ps_traceflag & KTRFAC_INHERIT)
411 		ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
412 		    curpr->ps_tracecred);
413 #endif
414 
415 	/*
416 	 * Finish creating the child thread.  cpu_fork() will copy
417 	 * and update the pcb and make the child ready to run.  If
418 	 * this is a normal user fork, the child will exit directly
419 	 * to user mode via child_return() on its first time slice
420 	 * and will not return here.  If this is a kernel thread,
421 	 * the specified entry point will be executed.
422 	 */
423 	cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
424 
425 	vm = pr->ps_vmspace;
426 
427 	if (flags & FORK_FORK) {
428 		forkstat.cntfork++;
429 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
430 	} else if (flags & FORK_VFORK) {
431 		forkstat.cntvfork++;
432 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
433 	} else {
434 		forkstat.cntkthread++;
435 	}
436 
437 	if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
438 		newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
439 
440 	p->p_tid = alloctid();
441 
442 	LIST_INSERT_HEAD(&allproc, p, p_list);
443 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
444 	LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
445 	LIST_INSERT_AFTER(curpr, pr, ps_pglist);
446 	LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
447 
448 	if (pr->ps_flags & PS_TRACED) {
449 		pr->ps_oppid = curpr->ps_pid;
450 		if (pr->ps_pptr != curpr->ps_pptr)
451 			proc_reparent(pr, curpr->ps_pptr);
452 
453 		/*
454 		 * Set ptrace status.
455 		 */
456 		if (newptstat != NULL) {
457 			pr->ps_ptstat = newptstat;
458 			newptstat = NULL;
459 			curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
460 			pr->ps_ptstat->pe_report_event = PTRACE_FORK;
461 			curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
462 			pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
463 		}
464 	}
465 
466 	/*
467 	 * For new processes, set accounting bits and mark as complete.
468 	 */
469 	getnanotime(&pr->ps_start);
470 	pr->ps_acflag = AFORK;
471 	atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
472 
473 	if ((flags & FORK_IDLE) == 0)
474 		fork_thread_start(p, curp, flags);
475 	else
476 		p->p_cpu = arg;
477 
478 	free(newptstat, M_SUBPROC, sizeof(*newptstat));
479 
480 	/*
481 	 * Notify any interested parties about the new process.
482 	 */
483 	KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
484 
485 	/*
486 	 * Update stats now that we know the fork was successful.
487 	 */
488 	uvmexp.forks++;
489 	if (flags & FORK_PPWAIT)
490 		uvmexp.forks_ppwait++;
491 	if (flags & FORK_SHAREVM)
492 		uvmexp.forks_sharevm++;
493 
494 	/*
495 	 * Pass a pointer to the new process to the caller.
496 	 */
497 	if (rnewprocp != NULL)
498 		*rnewprocp = p;
499 
500 	/*
501 	 * Preserve synchronization semantics of vfork.  If waiting for
502 	 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
503 	 * on ourselves, and sleep on our process for the latter flag
504 	 * to go away.
505 	 * XXX Need to stop other rthreads in the parent
506 	 */
507 	if (flags & FORK_PPWAIT)
508 		while (curpr->ps_flags & PS_ISPWAIT)
509 			tsleep(curpr, PWAIT, "ppwait", 0);
510 
511 	/*
512 	 * If we're tracing the child, alert the parent too.
513 	 */
514 	if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
515 		psignal(curp, SIGTRAP);
516 
517 	/*
518 	 * Return child pid to parent process
519 	 */
520 	if (retval != NULL) {
521 		retval[0] = pr->ps_pid;
522 		retval[1] = 0;
523 	}
524 	return (0);
525 }
526 
527 int
528 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
529     register_t *retval)
530 {
531 	struct process *pr = curp->p_p;
532 	struct proc *p;
533 	pid_t tid;
534 	vaddr_t uaddr;
535 	int error;
536 
537 	if (stack == NULL)
538 		return EINVAL;
539 
540 	if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
541 		return error;
542 
543 	uaddr = uvm_uarea_alloc();
544 	if (uaddr == 0) {
545 		nthreads--;
546 		return ENOMEM;
547 	}
548 
549 	/*
550 	 * From now on, we're committed to the fork and cannot fail.
551 	 */
552 	p = thread_new(curp, uaddr);
553 	atomic_setbits_int(&p->p_flag, P_THREAD);
554 	sigstkinit(&p->p_sigstk);
555 
556 	/* other links */
557 	p->p_p = pr;
558 	pr->ps_refcnt++;
559 
560 	/* local copies */
561 	p->p_fd		= pr->ps_fd;
562 	p->p_vmspace	= pr->ps_vmspace;
563 
564 	/*
565 	 * Finish creating the child thread.  cpu_fork() will copy
566 	 * and update the pcb and make the child ready to run.  The
567 	 * child will exit directly to user mode via child_return()
568 	 * on its first time slice and will not return here.
569 	 */
570 	cpu_fork(curp, p, stack, tcb, child_return, p);
571 
572 	p->p_tid = alloctid();
573 
574 	LIST_INSERT_HEAD(&allproc, p, p_list);
575 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
576 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
577 
578 	/*
579 	 * if somebody else wants to take us to single threaded mode,
580 	 * count ourselves in.
581 	 */
582 	if (pr->ps_single) {
583 		pr->ps_singlecount++;
584 		atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
585 	}
586 
587 	/*
588 	 * Return tid to parent thread and copy it out to userspace
589 	 */
590 	retval[0] = tid = p->p_tid + THREAD_PID_OFFSET;
591 	retval[1] = 0;
592 	if (tidptr != NULL) {
593 		if (copyout(&tid, tidptr, sizeof(tid)))
594 			psignal(curp, SIGSEGV);
595 	}
596 
597 	fork_thread_start(p, curp, 0);
598 
599 	/*
600 	 * Update stats now that we know the fork was successful.
601 	 */
602 	forkstat.cnttfork++;
603 	uvmexp.forks++;
604 	uvmexp.forks_sharevm++;
605 
606 	return 0;
607 }
608 
609 
610 /* Find an unused tid */
611 pid_t
612 alloctid(void)
613 {
614 	pid_t tid;
615 
616 	do {
617 		/* (0 .. TID_MASK+1] */
618 		tid = 1 + (arc4random() & TID_MASK);
619 	} while (tfind(tid) != NULL);
620 
621 	return (tid);
622 }
623 
624 /*
625  * Checks for current use of a pid, either as a pid or pgid.
626  */
627 pid_t oldpids[128];
628 int
629 ispidtaken(pid_t pid)
630 {
631 	uint32_t i;
632 
633 	for (i = 0; i < nitems(oldpids); i++)
634 		if (pid == oldpids[i])
635 			return (1);
636 
637 	if (prfind(pid) != NULL)
638 		return (1);
639 	if (pgfind(pid) != NULL)
640 		return (1);
641 	if (zombiefind(pid) != NULL)
642 		return (1);
643 	return (0);
644 }
645 
646 /* Find an unused pid */
647 pid_t
648 allocpid(void)
649 {
650 	static pid_t lastpid;
651 	pid_t pid;
652 
653 	if (!randompid) {
654 		/* only used early on for system processes */
655 		pid = ++lastpid;
656 	} else {
657 		/* Find an unused pid satisfying lastpid < pid <= PID_MAX */
658 		do {
659 			pid = arc4random_uniform(PID_MAX - lastpid) + 1 +
660 			    lastpid;
661 		} while (ispidtaken(pid));
662 	}
663 
664 	return pid;
665 }
666 
667 void
668 freepid(pid_t pid)
669 {
670 	static uint32_t idx;
671 
672 	oldpids[idx++ % nitems(oldpids)] = pid;
673 }
674 
675 #if defined(MULTIPROCESSOR)
676 /*
677  * XXX This is a slight hack to get newly-formed processes to
678  * XXX acquire the kernel lock as soon as they run.
679  */
680 void
681 proc_trampoline_mp(void)
682 {
683 	SCHED_ASSERT_LOCKED();
684 	__mp_unlock(&sched_lock);
685 	spl0();
686 	SCHED_ASSERT_UNLOCKED();
687 	KERNEL_ASSERT_UNLOCKED();
688 
689 	KERNEL_LOCK();
690 }
691 #endif
692