xref: /openbsd-src/sys/kern/kern_fork.c (revision d1df930ffab53da22f3324c32bed7ac5709915e6)
1 /*	$OpenBSD: kern_fork.c,v 1.207 2018/08/30 03:30:25 visa Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/acct.h>
53 #include <sys/ktrace.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/pool.h>
57 #include <sys/mman.h>
58 #include <sys/ptrace.h>
59 #include <sys/atomic.h>
60 #include <sys/pledge.h>
61 #include <sys/unistd.h>
62 
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm.h>
66 #include <machine/tcb.h>
67 
68 #include "kcov.h"
69 
70 int	nprocesses = 1;		/* process 0 */
71 int	nthreads = 1;		/* proc 0 */
72 int	randompid;		/* when set to 1, pid's go random */
73 struct	forkstat forkstat;
74 
75 void fork_return(void *);
76 pid_t alloctid(void);
77 pid_t allocpid(void);
78 int ispidtaken(pid_t);
79 
80 void unveil_copy(struct process *parent, struct process *child);
81 
82 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
83 struct process *process_new(struct proc *, struct process *, int);
84 int fork_check_maxthread(uid_t _uid);
85 
86 void
87 fork_return(void *arg)
88 {
89 	struct proc *p = (struct proc *)arg;
90 
91 	if (p->p_p->ps_flags & PS_TRACED)
92 		psignal(p, SIGTRAP);
93 
94 	child_return(p);
95 }
96 
97 int
98 sys_fork(struct proc *p, void *v, register_t *retval)
99 {
100 	int flags;
101 
102 	flags = FORK_FORK;
103 	if (p->p_p->ps_ptmask & PTRACE_FORK)
104 		flags |= FORK_PTRACE;
105 	return fork1(p, flags, fork_return, NULL, retval, NULL);
106 }
107 
108 int
109 sys_vfork(struct proc *p, void *v, register_t *retval)
110 {
111 	return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
112 	    retval, NULL);
113 }
114 
115 int
116 sys___tfork(struct proc *p, void *v, register_t *retval)
117 {
118 	struct sys___tfork_args /* {
119 		syscallarg(const struct __tfork) *param;
120 		syscallarg(size_t) psize;
121 	} */ *uap = v;
122 	size_t psize = SCARG(uap, psize);
123 	struct __tfork param = { 0 };
124 	int error;
125 
126 	if (psize == 0 || psize > sizeof(param))
127 		return EINVAL;
128 	if ((error = copyin(SCARG(uap, param), &param, psize)))
129 		return error;
130 #ifdef KTRACE
131 	if (KTRPOINT(p, KTR_STRUCT))
132 		ktrstruct(p, "tfork", &param, sizeof(param));
133 #endif
134 #ifdef TCB_INVALID
135 	if (TCB_INVALID(param.tf_tcb))
136 		return EINVAL;
137 #endif /* TCB_INVALID */
138 
139 	return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
140 	    retval);
141 }
142 
143 /*
144  * Allocate and initialize a thread (proc) structure, given the parent thread.
145  */
146 struct proc *
147 thread_new(struct proc *parent, vaddr_t uaddr)
148 {
149 	struct proc *p;
150 
151 	p = pool_get(&proc_pool, PR_WAITOK);
152 	p->p_stat = SIDL;			/* protect against others */
153 	p->p_flag = 0;
154 
155 	/*
156 	 * Make a proc table entry for the new process.
157 	 * Start by zeroing the section of proc that is zero-initialized,
158 	 * then copy the section that is copied directly from the parent.
159 	 */
160 	memset(&p->p_startzero, 0,
161 	    (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
162 	memcpy(&p->p_startcopy, &parent->p_startcopy,
163 	    (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
164 	crhold(p->p_ucred);
165 	p->p_addr = (struct user *)uaddr;
166 
167 	/*
168 	 * Initialize the timeouts.
169 	 */
170 	timeout_set(&p->p_sleep_to, endtsleep, p);
171 
172 	/*
173 	 * set priority of child to be that of parent
174 	 * XXX should move p_estcpu into the region of struct proc which gets
175 	 * copied.
176 	 */
177 	scheduler_fork_hook(parent, p);
178 
179 #ifdef WITNESS
180 	p->p_sleeplocks = NULL;
181 #endif
182 
183 #if NKCOV > 0
184 	p->p_kd = NULL;
185 #endif
186 
187 	return p;
188 }
189 
190 /*
191  * Initialize common bits of a process structure, given the initial thread.
192  */
193 void
194 process_initialize(struct process *pr, struct proc *p)
195 {
196 	/* initialize the thread links */
197 	pr->ps_mainproc = p;
198 	TAILQ_INIT(&pr->ps_threads);
199 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
200 	pr->ps_refcnt = 1;
201 	p->p_p = pr;
202 
203 	/* give the process the same creds as the initial thread */
204 	pr->ps_ucred = p->p_ucred;
205 	crhold(pr->ps_ucred);
206 	KASSERT(p->p_ucred->cr_ref >= 2);	/* new thread and new process */
207 
208 	LIST_INIT(&pr->ps_children);
209 	LIST_INIT(&pr->ps_ftlist);
210 	LIST_INIT(&pr->ps_kqlist);
211 
212 	timeout_set(&pr->ps_realit_to, realitexpire, pr);
213 }
214 
215 
216 /*
217  * Allocate and initialize a new process.
218  */
219 struct process *
220 process_new(struct proc *p, struct process *parent, int flags)
221 {
222 	struct process *pr;
223 
224 	pr = pool_get(&process_pool, PR_WAITOK);
225 
226 	/*
227 	 * Make a process structure for the new process.
228 	 * Start by zeroing the section of proc that is zero-initialized,
229 	 * then copy the section that is copied directly from the parent.
230 	 */
231 	memset(&pr->ps_startzero, 0,
232 	    (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
233 	memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
234 	    (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
235 
236 	process_initialize(pr, p);
237 	pr->ps_pid = allocpid();
238 
239 	/* post-copy fixups */
240 	pr->ps_pptr = parent;
241 	pr->ps_limit->p_refcnt++;
242 
243 	/* bump references to the text vnode (for sysctl) */
244 	pr->ps_textvp = parent->ps_textvp;
245 	if (pr->ps_textvp)
246 		vref(pr->ps_textvp);
247 
248 	/* copy unveil if unveil is active */
249 	unveil_copy(parent, pr);
250 
251 	pr->ps_flags = parent->ps_flags &
252 	    (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED);
253 	if (parent->ps_session->s_ttyvp != NULL)
254 		pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
255 
256 	/*
257 	 * Duplicate sub-structures as needed.
258 	 * Increase reference counts on shared objects.
259 	 */
260 	if (flags & FORK_SHAREFILES)
261 		pr->ps_fd = fdshare(parent);
262 	else
263 		pr->ps_fd = fdcopy(parent);
264 	if (flags & FORK_SIGHAND)
265 		pr->ps_sigacts = sigactsshare(parent);
266 	else
267 		pr->ps_sigacts = sigactsinit(parent);
268 	if (flags & FORK_SHAREVM)
269 		pr->ps_vmspace = uvmspace_share(parent);
270 	else
271 		pr->ps_vmspace = uvmspace_fork(parent);
272 
273 	if (parent->ps_flags & PS_PROFIL)
274 		startprofclock(pr);
275 	if (flags & FORK_PTRACE)
276 		pr->ps_flags |= parent->ps_flags & PS_TRACED;
277 	if (flags & FORK_NOZOMBIE)
278 		pr->ps_flags |= PS_NOZOMBIE;
279 	if (flags & FORK_SYSTEM)
280 		pr->ps_flags |= PS_SYSTEM;
281 
282 	/* mark as embryo to protect against others */
283 	pr->ps_flags |= PS_EMBRYO;
284 
285 	/* Force visibility of all of the above changes */
286 	membar_producer();
287 
288 	/* it's sufficiently inited to be globally visible */
289 	LIST_INSERT_HEAD(&allprocess, pr, ps_list);
290 
291 	return pr;
292 }
293 
294 /* print the 'table full' message once per 10 seconds */
295 struct timeval fork_tfmrate = { 10, 0 };
296 
297 int
298 fork_check_maxthread(uid_t uid)
299 {
300 	/*
301 	 * Although process entries are dynamically created, we still keep
302 	 * a global limit on the maximum number we will create. We reserve
303 	 * the last 5 processes to root. The variable nprocesses is the
304 	 * current number of processes, maxprocess is the limit.  Similar
305 	 * rules for threads (struct proc): we reserve the last 5 to root;
306 	 * the variable nthreads is the current number of procs, maxthread is
307 	 * the limit.
308 	 */
309 	if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
310 		static struct timeval lasttfm;
311 
312 		if (ratecheck(&lasttfm, &fork_tfmrate))
313 			tablefull("proc");
314 		return EAGAIN;
315 	}
316 	nthreads++;
317 
318 	return 0;
319 }
320 
321 static inline void
322 fork_thread_start(struct proc *p, struct proc *parent, int flags)
323 {
324 	int s;
325 
326 	SCHED_LOCK(s);
327 	p->p_stat = SRUN;
328 	p->p_cpu = sched_choosecpu_fork(parent, flags);
329 	setrunqueue(p);
330 	SCHED_UNLOCK(s);
331 }
332 
333 int
334 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
335     register_t *retval, struct proc **rnewprocp)
336 {
337 	struct process *curpr = curp->p_p;
338 	struct process *pr;
339 	struct proc *p;
340 	uid_t uid = curp->p_ucred->cr_ruid;
341 	struct vmspace *vm;
342 	int count;
343 	vaddr_t uaddr;
344 	int error;
345 	struct  ptrace_state *newptstat = NULL;
346 
347 	KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
348 	    | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
349 	    | FORK_SYSTEM | FORK_SIGHAND)) == 0);
350 	KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM));
351 	KASSERT(func != NULL);
352 
353 	if ((error = fork_check_maxthread(uid)))
354 		return error;
355 
356 	if ((nprocesses >= maxprocess - 5 && uid != 0) ||
357 	    nprocesses >= maxprocess) {
358 		static struct timeval lasttfm;
359 
360 		if (ratecheck(&lasttfm, &fork_tfmrate))
361 			tablefull("process");
362 		nthreads--;
363 		return EAGAIN;
364 	}
365 	nprocesses++;
366 
367 	/*
368 	 * Increment the count of processes running with this uid.
369 	 * Don't allow a nonprivileged user to exceed their current limit.
370 	 */
371 	count = chgproccnt(uid, 1);
372 	if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) {
373 		(void)chgproccnt(uid, -1);
374 		nprocesses--;
375 		nthreads--;
376 		return EAGAIN;
377 	}
378 
379 	uaddr = uvm_uarea_alloc();
380 	if (uaddr == 0) {
381 		(void)chgproccnt(uid, -1);
382 		nprocesses--;
383 		nthreads--;
384 		return (ENOMEM);
385 	}
386 
387 	/*
388 	 * From now on, we're committed to the fork and cannot fail.
389 	 */
390 	p = thread_new(curp, uaddr);
391 	pr = process_new(p, curpr, flags);
392 
393 	p->p_fd		= pr->ps_fd;
394 	p->p_vmspace	= pr->ps_vmspace;
395 	if (pr->ps_flags & PS_SYSTEM)
396 		atomic_setbits_int(&p->p_flag, P_SYSTEM);
397 
398 	if (flags & FORK_PPWAIT) {
399 		atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
400 		atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
401 	}
402 
403 #ifdef KTRACE
404 	/*
405 	 * Copy traceflag and tracefile if enabled.
406 	 * If not inherited, these were zeroed above.
407 	 */
408 	if (curpr->ps_traceflag & KTRFAC_INHERIT)
409 		ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
410 		    curpr->ps_tracecred);
411 #endif
412 
413 	/*
414 	 * Finish creating the child thread.  cpu_fork() will copy
415 	 * and update the pcb and make the child ready to run.  If
416 	 * this is a normal user fork, the child will exit directly
417 	 * to user mode via child_return() on its first time slice
418 	 * and will not return here.  If this is a kernel thread,
419 	 * the specified entry point will be executed.
420 	 */
421 	cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
422 
423 	vm = pr->ps_vmspace;
424 
425 	if (flags & FORK_FORK) {
426 		forkstat.cntfork++;
427 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
428 	} else if (flags & FORK_VFORK) {
429 		forkstat.cntvfork++;
430 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
431 	} else {
432 		forkstat.cntkthread++;
433 	}
434 
435 	if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
436 		newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
437 
438 	p->p_tid = alloctid();
439 
440 	LIST_INSERT_HEAD(&allproc, p, p_list);
441 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
442 	LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
443 	LIST_INSERT_AFTER(curpr, pr, ps_pglist);
444 	LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
445 
446 	if (pr->ps_flags & PS_TRACED) {
447 		pr->ps_oppid = curpr->ps_pid;
448 		if (pr->ps_pptr != curpr->ps_pptr)
449 			proc_reparent(pr, curpr->ps_pptr);
450 
451 		/*
452 		 * Set ptrace status.
453 		 */
454 		if (newptstat != NULL) {
455 			pr->ps_ptstat = newptstat;
456 			newptstat = NULL;
457 			curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
458 			pr->ps_ptstat->pe_report_event = PTRACE_FORK;
459 			curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
460 			pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
461 		}
462 	}
463 
464 	/*
465 	 * For new processes, set accounting bits and mark as complete.
466 	 */
467 	getnanotime(&pr->ps_start);
468 	pr->ps_acflag = AFORK;
469 	atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
470 
471 	if ((flags & FORK_IDLE) == 0)
472 		fork_thread_start(p, curp, flags);
473 	else
474 		p->p_cpu = arg;
475 
476 	free(newptstat, M_SUBPROC, sizeof(*newptstat));
477 
478 	/*
479 	 * Notify any interested parties about the new process.
480 	 */
481 	KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
482 
483 	/*
484 	 * Update stats now that we know the fork was successful.
485 	 */
486 	uvmexp.forks++;
487 	if (flags & FORK_PPWAIT)
488 		uvmexp.forks_ppwait++;
489 	if (flags & FORK_SHAREVM)
490 		uvmexp.forks_sharevm++;
491 
492 	/*
493 	 * Pass a pointer to the new process to the caller.
494 	 */
495 	if (rnewprocp != NULL)
496 		*rnewprocp = p;
497 
498 	/*
499 	 * Preserve synchronization semantics of vfork.  If waiting for
500 	 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
501 	 * on ourselves, and sleep on our process for the latter flag
502 	 * to go away.
503 	 * XXX Need to stop other rthreads in the parent
504 	 */
505 	if (flags & FORK_PPWAIT)
506 		while (curpr->ps_flags & PS_ISPWAIT)
507 			tsleep(curpr, PWAIT, "ppwait", 0);
508 
509 	/*
510 	 * If we're tracing the child, alert the parent too.
511 	 */
512 	if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
513 		psignal(curp, SIGTRAP);
514 
515 	/*
516 	 * Return child pid to parent process
517 	 */
518 	if (retval != NULL) {
519 		retval[0] = pr->ps_pid;
520 		retval[1] = 0;
521 	}
522 	return (0);
523 }
524 
525 int
526 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
527     register_t *retval)
528 {
529 	struct process *pr = curp->p_p;
530 	struct proc *p;
531 	pid_t tid;
532 	vaddr_t uaddr;
533 	int error;
534 
535 	if (stack == NULL)
536 		return EINVAL;
537 
538 	if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
539 		return error;
540 
541 	uaddr = uvm_uarea_alloc();
542 	if (uaddr == 0) {
543 		nthreads--;
544 		return ENOMEM;
545 	}
546 
547 	/*
548 	 * From now on, we're committed to the fork and cannot fail.
549 	 */
550 	p = thread_new(curp, uaddr);
551 	atomic_setbits_int(&p->p_flag, P_THREAD);
552 	sigstkinit(&p->p_sigstk);
553 
554 	/* other links */
555 	p->p_p = pr;
556 	pr->ps_refcnt++;
557 
558 	/* local copies */
559 	p->p_fd		= pr->ps_fd;
560 	p->p_vmspace	= pr->ps_vmspace;
561 
562 	/*
563 	 * Finish creating the child thread.  cpu_fork() will copy
564 	 * and update the pcb and make the child ready to run.  The
565 	 * child will exit directly to user mode via child_return()
566 	 * on its first time slice and will not return here.
567 	 */
568 	cpu_fork(curp, p, stack, tcb, child_return, p);
569 
570 	p->p_tid = alloctid();
571 
572 	LIST_INSERT_HEAD(&allproc, p, p_list);
573 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
574 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
575 
576 	/*
577 	 * if somebody else wants to take us to single threaded mode,
578 	 * count ourselves in.
579 	 */
580 	if (pr->ps_single) {
581 		pr->ps_singlecount++;
582 		atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
583 	}
584 
585 	/*
586 	 * Return tid to parent thread and copy it out to userspace
587 	 */
588 	retval[0] = tid = p->p_tid + THREAD_PID_OFFSET;
589 	retval[1] = 0;
590 	if (tidptr != NULL) {
591 		if (copyout(&tid, tidptr, sizeof(tid)))
592 			psignal(curp, SIGSEGV);
593 	}
594 
595 	fork_thread_start(p, curp, 0);
596 
597 	/*
598 	 * Update stats now that we know the fork was successful.
599 	 */
600 	forkstat.cnttfork++;
601 	uvmexp.forks++;
602 	uvmexp.forks_sharevm++;
603 
604 	return 0;
605 }
606 
607 
608 /* Find an unused tid */
609 pid_t
610 alloctid(void)
611 {
612 	pid_t tid;
613 
614 	do {
615 		/* (0 .. TID_MASK+1] */
616 		tid = 1 + (arc4random() & TID_MASK);
617 	} while (tfind(tid) != NULL);
618 
619 	return (tid);
620 }
621 
622 /*
623  * Checks for current use of a pid, either as a pid or pgid.
624  */
625 pid_t oldpids[128];
626 int
627 ispidtaken(pid_t pid)
628 {
629 	uint32_t i;
630 
631 	for (i = 0; i < nitems(oldpids); i++)
632 		if (pid == oldpids[i])
633 			return (1);
634 
635 	if (prfind(pid) != NULL)
636 		return (1);
637 	if (pgfind(pid) != NULL)
638 		return (1);
639 	if (zombiefind(pid) != NULL)
640 		return (1);
641 	return (0);
642 }
643 
644 /* Find an unused pid */
645 pid_t
646 allocpid(void)
647 {
648 	static pid_t lastpid;
649 	pid_t pid;
650 
651 	if (!randompid) {
652 		/* only used early on for system processes */
653 		pid = ++lastpid;
654 	} else {
655 		/* Find an unused pid satisfying lastpid < pid <= PID_MAX */
656 		do {
657 			pid = arc4random_uniform(PID_MAX - lastpid) + 1 +
658 			    lastpid;
659 		} while (ispidtaken(pid));
660 	}
661 
662 	return pid;
663 }
664 
665 void
666 freepid(pid_t pid)
667 {
668 	static uint32_t idx;
669 
670 	oldpids[idx++ % nitems(oldpids)] = pid;
671 }
672 
673 #if defined(MULTIPROCESSOR)
674 /*
675  * XXX This is a slight hack to get newly-formed processes to
676  * XXX acquire the kernel lock as soon as they run.
677  */
678 void
679 proc_trampoline_mp(void)
680 {
681 	SCHED_ASSERT_LOCKED();
682 	__mp_unlock(&sched_lock);
683 	spl0();
684 	SCHED_ASSERT_UNLOCKED();
685 	KERNEL_ASSERT_UNLOCKED();
686 
687 	KERNEL_LOCK();
688 }
689 #endif
690