xref: /openbsd-src/sys/kern/kern_fork.c (revision 8fb2af3abc3ad848e70f6ddec0c3b4bd7f12e036)
1 /*	$OpenBSD: kern_fork.c,v 1.224 2020/03/16 11:58:46 mpi Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/acct.h>
53 #include <sys/ktrace.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/pool.h>
57 #include <sys/mman.h>
58 #include <sys/ptrace.h>
59 #include <sys/atomic.h>
60 #include <sys/pledge.h>
61 #include <sys/unistd.h>
62 
63 #include <sys/syscallargs.h>
64 
65 #include <uvm/uvm.h>
66 #include <machine/tcb.h>
67 
68 int	nprocesses = 1;		/* process 0 */
69 int	nthreads = 1;		/* proc 0 */
70 int	randompid;		/* when set to 1, pid's go random */
71 struct	forkstat forkstat;
72 
73 void fork_return(void *);
74 pid_t alloctid(void);
75 pid_t allocpid(void);
76 int ispidtaken(pid_t);
77 
78 void unveil_copy(struct process *parent, struct process *child);
79 
80 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
81 struct process *process_new(struct proc *, struct process *, int);
82 int fork_check_maxthread(uid_t _uid);
83 
84 void
85 fork_return(void *arg)
86 {
87 	struct proc *p = (struct proc *)arg;
88 
89 	if (p->p_p->ps_flags & PS_TRACED)
90 		psignal(p, SIGTRAP);
91 
92 	child_return(p);
93 }
94 
95 int
96 sys_fork(struct proc *p, void *v, register_t *retval)
97 {
98 	int flags;
99 
100 	flags = FORK_FORK;
101 	if (p->p_p->ps_ptmask & PTRACE_FORK)
102 		flags |= FORK_PTRACE;
103 	return fork1(p, flags, fork_return, NULL, retval, NULL);
104 }
105 
106 int
107 sys_vfork(struct proc *p, void *v, register_t *retval)
108 {
109 	return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
110 	    retval, NULL);
111 }
112 
113 int
114 sys___tfork(struct proc *p, void *v, register_t *retval)
115 {
116 	struct sys___tfork_args /* {
117 		syscallarg(const struct __tfork) *param;
118 		syscallarg(size_t) psize;
119 	} */ *uap = v;
120 	size_t psize = SCARG(uap, psize);
121 	struct __tfork param = { 0 };
122 	int error;
123 
124 	if (psize == 0 || psize > sizeof(param))
125 		return EINVAL;
126 	if ((error = copyin(SCARG(uap, param), &param, psize)))
127 		return error;
128 #ifdef KTRACE
129 	if (KTRPOINT(p, KTR_STRUCT))
130 		ktrstruct(p, "tfork", &param, sizeof(param));
131 #endif
132 #ifdef TCB_INVALID
133 	if (TCB_INVALID(param.tf_tcb))
134 		return EINVAL;
135 #endif /* TCB_INVALID */
136 
137 	return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
138 	    retval);
139 }
140 
141 /*
142  * Allocate and initialize a thread (proc) structure, given the parent thread.
143  */
144 struct proc *
145 thread_new(struct proc *parent, vaddr_t uaddr)
146 {
147 	struct proc *p;
148 
149 	p = pool_get(&proc_pool, PR_WAITOK);
150 	p->p_stat = SIDL;			/* protect against others */
151 	p->p_runpri = 0;
152 	p->p_flag = 0;
153 
154 	/*
155 	 * Make a proc table entry for the new process.
156 	 * Start by zeroing the section of proc that is zero-initialized,
157 	 * then copy the section that is copied directly from the parent.
158 	 */
159 	memset(&p->p_startzero, 0,
160 	    (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
161 	memcpy(&p->p_startcopy, &parent->p_startcopy,
162 	    (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
163 	crhold(p->p_ucred);
164 	p->p_addr = (struct user *)uaddr;
165 
166 	/*
167 	 * Initialize the timeouts.
168 	 */
169 	timeout_set(&p->p_sleep_to, endtsleep, p);
170 
171 	return p;
172 }
173 
174 /*
175  * Initialize common bits of a process structure, given the initial thread.
176  */
177 void
178 process_initialize(struct process *pr, struct proc *p)
179 {
180 	/* initialize the thread links */
181 	pr->ps_mainproc = p;
182 	TAILQ_INIT(&pr->ps_threads);
183 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
184 	pr->ps_refcnt = 1;
185 	p->p_p = pr;
186 
187 	/* give the process the same creds as the initial thread */
188 	pr->ps_ucred = p->p_ucred;
189 	crhold(pr->ps_ucred);
190 	KASSERT(p->p_ucred->cr_ref >= 2);	/* new thread and new process */
191 
192 	LIST_INIT(&pr->ps_children);
193 	LIST_INIT(&pr->ps_orphans);
194 	LIST_INIT(&pr->ps_ftlist);
195 	LIST_INIT(&pr->ps_sigiolst);
196 	TAILQ_INIT(&pr->ps_tslpqueue);
197 
198 	rw_init(&pr->ps_lock, "pslock");
199 	mtx_init(&pr->ps_mtx, IPL_MPFLOOR);
200 
201 	timeout_set(&pr->ps_realit_to, realitexpire, pr);
202 	timeout_set(&pr->ps_rucheck_to, rucheck, pr);
203 }
204 
205 
206 /*
207  * Allocate and initialize a new process.
208  */
209 struct process *
210 process_new(struct proc *p, struct process *parent, int flags)
211 {
212 	struct process *pr;
213 
214 	pr = pool_get(&process_pool, PR_WAITOK);
215 
216 	/*
217 	 * Make a process structure for the new process.
218 	 * Start by zeroing the section of proc that is zero-initialized,
219 	 * then copy the section that is copied directly from the parent.
220 	 */
221 	memset(&pr->ps_startzero, 0,
222 	    (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
223 	memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
224 	    (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
225 
226 	process_initialize(pr, p);
227 	pr->ps_pid = allocpid();
228 	lim_fork(parent, pr);
229 
230 	/* post-copy fixups */
231 	pr->ps_pptr = parent;
232 
233 	/* bump references to the text vnode (for sysctl) */
234 	pr->ps_textvp = parent->ps_textvp;
235 	if (pr->ps_textvp)
236 		vref(pr->ps_textvp);
237 
238 	/* copy unveil if unveil is active */
239 	unveil_copy(parent, pr);
240 
241 	pr->ps_flags = parent->ps_flags &
242 	    (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED);
243 	if (parent->ps_session->s_ttyvp != NULL)
244 		pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
245 
246 	/*
247 	 * Duplicate sub-structures as needed.
248 	 * Increase reference counts on shared objects.
249 	 */
250 	if (flags & FORK_SHAREFILES)
251 		pr->ps_fd = fdshare(parent);
252 	else
253 		pr->ps_fd = fdcopy(parent);
254 	pr->ps_sigacts = sigactsinit(parent);
255 	if (flags & FORK_SHAREVM)
256 		pr->ps_vmspace = uvmspace_share(parent);
257 	else
258 		pr->ps_vmspace = uvmspace_fork(parent);
259 
260 	if (parent->ps_flags & PS_PROFIL)
261 		startprofclock(pr);
262 	if (flags & FORK_PTRACE)
263 		pr->ps_flags |= parent->ps_flags & PS_TRACED;
264 	if (flags & FORK_NOZOMBIE)
265 		pr->ps_flags |= PS_NOZOMBIE;
266 	if (flags & FORK_SYSTEM)
267 		pr->ps_flags |= PS_SYSTEM;
268 
269 	/* mark as embryo to protect against others */
270 	pr->ps_flags |= PS_EMBRYO;
271 
272 	/* Force visibility of all of the above changes */
273 	membar_producer();
274 
275 	/* it's sufficiently inited to be globally visible */
276 	LIST_INSERT_HEAD(&allprocess, pr, ps_list);
277 
278 	return pr;
279 }
280 
281 /* print the 'table full' message once per 10 seconds */
282 struct timeval fork_tfmrate = { 10, 0 };
283 
284 int
285 fork_check_maxthread(uid_t uid)
286 {
287 	/*
288 	 * Although process entries are dynamically created, we still keep
289 	 * a global limit on the maximum number we will create. We reserve
290 	 * the last 5 processes to root. The variable nprocesses is the
291 	 * current number of processes, maxprocess is the limit.  Similar
292 	 * rules for threads (struct proc): we reserve the last 5 to root;
293 	 * the variable nthreads is the current number of procs, maxthread is
294 	 * the limit.
295 	 */
296 	if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
297 		static struct timeval lasttfm;
298 
299 		if (ratecheck(&lasttfm, &fork_tfmrate))
300 			tablefull("proc");
301 		return EAGAIN;
302 	}
303 	nthreads++;
304 
305 	return 0;
306 }
307 
308 static inline void
309 fork_thread_start(struct proc *p, struct proc *parent, int flags)
310 {
311 	struct cpu_info *ci;
312 	int s;
313 
314 	SCHED_LOCK(s);
315 	ci = sched_choosecpu_fork(parent, flags);
316 	setrunqueue(ci, p, p->p_usrpri);
317 	SCHED_UNLOCK(s);
318 }
319 
320 int
321 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
322     register_t *retval, struct proc **rnewprocp)
323 {
324 	struct process *curpr = curp->p_p;
325 	struct process *pr;
326 	struct proc *p;
327 	uid_t uid = curp->p_ucred->cr_ruid;
328 	struct vmspace *vm;
329 	int count;
330 	vaddr_t uaddr;
331 	int error;
332 	struct  ptrace_state *newptstat = NULL;
333 
334 	KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
335 	    | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
336 	    | FORK_SYSTEM)) == 0);
337 	KASSERT(func != NULL);
338 
339 	if ((error = fork_check_maxthread(uid)))
340 		return error;
341 
342 	if ((nprocesses >= maxprocess - 5 && uid != 0) ||
343 	    nprocesses >= maxprocess) {
344 		static struct timeval lasttfm;
345 
346 		if (ratecheck(&lasttfm, &fork_tfmrate))
347 			tablefull("process");
348 		nthreads--;
349 		return EAGAIN;
350 	}
351 	nprocesses++;
352 
353 	/*
354 	 * Increment the count of processes running with this uid.
355 	 * Don't allow a nonprivileged user to exceed their current limit.
356 	 */
357 	count = chgproccnt(uid, 1);
358 	if (uid != 0 && count > lim_cur(RLIMIT_NPROC)) {
359 		(void)chgproccnt(uid, -1);
360 		nprocesses--;
361 		nthreads--;
362 		return EAGAIN;
363 	}
364 
365 	uaddr = uvm_uarea_alloc();
366 	if (uaddr == 0) {
367 		(void)chgproccnt(uid, -1);
368 		nprocesses--;
369 		nthreads--;
370 		return (ENOMEM);
371 	}
372 
373 	/*
374 	 * From now on, we're committed to the fork and cannot fail.
375 	 */
376 	p = thread_new(curp, uaddr);
377 	pr = process_new(p, curpr, flags);
378 
379 	p->p_fd		= pr->ps_fd;
380 	p->p_vmspace	= pr->ps_vmspace;
381 	if (pr->ps_flags & PS_SYSTEM)
382 		atomic_setbits_int(&p->p_flag, P_SYSTEM);
383 
384 	if (flags & FORK_PPWAIT) {
385 		atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
386 		atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
387 	}
388 
389 #ifdef KTRACE
390 	/*
391 	 * Copy traceflag and tracefile if enabled.
392 	 * If not inherited, these were zeroed above.
393 	 */
394 	if (curpr->ps_traceflag & KTRFAC_INHERIT)
395 		ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
396 		    curpr->ps_tracecred);
397 #endif
398 
399 	/*
400 	 * Finish creating the child thread.  cpu_fork() will copy
401 	 * and update the pcb and make the child ready to run.  If
402 	 * this is a normal user fork, the child will exit directly
403 	 * to user mode via child_return() on its first time slice
404 	 * and will not return here.  If this is a kernel thread,
405 	 * the specified entry point will be executed.
406 	 */
407 	cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
408 
409 	vm = pr->ps_vmspace;
410 
411 	if (flags & FORK_FORK) {
412 		forkstat.cntfork++;
413 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
414 	} else if (flags & FORK_VFORK) {
415 		forkstat.cntvfork++;
416 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
417 	} else {
418 		forkstat.cntkthread++;
419 	}
420 
421 	if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
422 		newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
423 
424 	p->p_tid = alloctid();
425 
426 	LIST_INSERT_HEAD(&allproc, p, p_list);
427 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
428 	LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
429 	LIST_INSERT_AFTER(curpr, pr, ps_pglist);
430 	LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
431 
432 	if (pr->ps_flags & PS_TRACED) {
433 		pr->ps_oppid = curpr->ps_pid;
434 		process_reparent(pr, curpr->ps_pptr);
435 
436 		/*
437 		 * Set ptrace status.
438 		 */
439 		if (newptstat != NULL) {
440 			pr->ps_ptstat = newptstat;
441 			newptstat = NULL;
442 			curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
443 			pr->ps_ptstat->pe_report_event = PTRACE_FORK;
444 			curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
445 			pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
446 		}
447 	}
448 
449 	/*
450 	 * For new processes, set accounting bits and mark as complete.
451 	 */
452 	nanouptime(&pr->ps_start);
453 	pr->ps_acflag = AFORK;
454 	atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
455 
456 	if ((flags & FORK_IDLE) == 0)
457 		fork_thread_start(p, curp, flags);
458 	else
459 		p->p_cpu = arg;
460 
461 	free(newptstat, M_SUBPROC, sizeof(*newptstat));
462 
463 	/*
464 	 * Notify any interested parties about the new process.
465 	 */
466 	KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
467 
468 	/*
469 	 * Update stats now that we know the fork was successful.
470 	 */
471 	uvmexp.forks++;
472 	if (flags & FORK_PPWAIT)
473 		uvmexp.forks_ppwait++;
474 	if (flags & FORK_SHAREVM)
475 		uvmexp.forks_sharevm++;
476 
477 	/*
478 	 * Pass a pointer to the new process to the caller.
479 	 */
480 	if (rnewprocp != NULL)
481 		*rnewprocp = p;
482 
483 	/*
484 	 * Preserve synchronization semantics of vfork.  If waiting for
485 	 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
486 	 * on ourselves, and sleep on our process for the latter flag
487 	 * to go away.
488 	 * XXX Need to stop other rthreads in the parent
489 	 */
490 	if (flags & FORK_PPWAIT)
491 		while (curpr->ps_flags & PS_ISPWAIT)
492 			tsleep_nsec(curpr, PWAIT, "ppwait", INFSLP);
493 
494 	/*
495 	 * If we're tracing the child, alert the parent too.
496 	 */
497 	if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
498 		psignal(curp, SIGTRAP);
499 
500 	/*
501 	 * Return child pid to parent process
502 	 */
503 	if (retval != NULL) {
504 		retval[0] = pr->ps_pid;
505 		retval[1] = 0;
506 	}
507 	return (0);
508 }
509 
510 int
511 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
512     register_t *retval)
513 {
514 	struct process *pr = curp->p_p;
515 	struct proc *p;
516 	pid_t tid;
517 	vaddr_t uaddr;
518 	int error;
519 
520 	if (stack == NULL)
521 		return EINVAL;
522 
523 	if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
524 		return error;
525 
526 	uaddr = uvm_uarea_alloc();
527 	if (uaddr == 0) {
528 		nthreads--;
529 		return ENOMEM;
530 	}
531 
532 	/*
533 	 * From now on, we're committed to the fork and cannot fail.
534 	 */
535 	p = thread_new(curp, uaddr);
536 	atomic_setbits_int(&p->p_flag, P_THREAD);
537 	sigstkinit(&p->p_sigstk);
538 
539 	/* other links */
540 	p->p_p = pr;
541 	pr->ps_refcnt++;
542 
543 	/* local copies */
544 	p->p_fd		= pr->ps_fd;
545 	p->p_vmspace	= pr->ps_vmspace;
546 
547 	/*
548 	 * Finish creating the child thread.  cpu_fork() will copy
549 	 * and update the pcb and make the child ready to run.  The
550 	 * child will exit directly to user mode via child_return()
551 	 * on its first time slice and will not return here.
552 	 */
553 	cpu_fork(curp, p, stack, tcb, child_return, p);
554 
555 	p->p_tid = alloctid();
556 
557 	LIST_INSERT_HEAD(&allproc, p, p_list);
558 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
559 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
560 
561 	/*
562 	 * if somebody else wants to take us to single threaded mode,
563 	 * count ourselves in.
564 	 */
565 	if (pr->ps_single) {
566 		pr->ps_singlecount++;
567 		atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
568 	}
569 
570 	/*
571 	 * Return tid to parent thread and copy it out to userspace
572 	 */
573 	retval[0] = tid = p->p_tid + THREAD_PID_OFFSET;
574 	retval[1] = 0;
575 	if (tidptr != NULL) {
576 		if (copyout(&tid, tidptr, sizeof(tid)))
577 			psignal(curp, SIGSEGV);
578 	}
579 
580 	fork_thread_start(p, curp, 0);
581 
582 	/*
583 	 * Update stats now that we know the fork was successful.
584 	 */
585 	forkstat.cnttfork++;
586 	uvmexp.forks++;
587 	uvmexp.forks_sharevm++;
588 
589 	return 0;
590 }
591 
592 
593 /* Find an unused tid */
594 pid_t
595 alloctid(void)
596 {
597 	pid_t tid;
598 
599 	do {
600 		/* (0 .. TID_MASK+1] */
601 		tid = 1 + (arc4random() & TID_MASK);
602 	} while (tfind(tid) != NULL);
603 
604 	return (tid);
605 }
606 
607 /*
608  * Checks for current use of a pid, either as a pid or pgid.
609  */
610 pid_t oldpids[128];
611 int
612 ispidtaken(pid_t pid)
613 {
614 	uint32_t i;
615 
616 	for (i = 0; i < nitems(oldpids); i++)
617 		if (pid == oldpids[i])
618 			return (1);
619 
620 	if (prfind(pid) != NULL)
621 		return (1);
622 	if (pgfind(pid) != NULL)
623 		return (1);
624 	if (zombiefind(pid) != NULL)
625 		return (1);
626 	return (0);
627 }
628 
629 /* Find an unused pid */
630 pid_t
631 allocpid(void)
632 {
633 	static pid_t lastpid;
634 	pid_t pid;
635 
636 	if (!randompid) {
637 		/* only used early on for system processes */
638 		pid = ++lastpid;
639 	} else {
640 		/* Find an unused pid satisfying lastpid < pid <= PID_MAX */
641 		do {
642 			pid = arc4random_uniform(PID_MAX - lastpid) + 1 +
643 			    lastpid;
644 		} while (ispidtaken(pid));
645 	}
646 
647 	return pid;
648 }
649 
650 void
651 freepid(pid_t pid)
652 {
653 	static uint32_t idx;
654 
655 	oldpids[idx++ % nitems(oldpids)] = pid;
656 }
657 
658 #if defined(MULTIPROCESSOR)
659 /*
660  * XXX This is a slight hack to get newly-formed processes to
661  * XXX acquire the kernel lock as soon as they run.
662  */
663 void
664 proc_trampoline_mp(void)
665 {
666 	SCHED_ASSERT_LOCKED();
667 	__mp_unlock(&sched_lock);
668 	spl0();
669 	SCHED_ASSERT_UNLOCKED();
670 	KERNEL_ASSERT_UNLOCKED();
671 
672 	KERNEL_LOCK();
673 }
674 #endif
675