xref: /openbsd-src/sys/kern/kern_fork.c (revision f5d56f6d1abf3988ecad6e5bcc0d9e60de25e475)
1 /*	$OpenBSD: kern_fork.c,v 1.196 2017/04/13 03:52:25 guenther Exp $	*/
2 /*	$NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/exec.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/vnode.h>
51 #include <sys/vmmeter.h>
52 #include <sys/file.h>
53 #include <sys/acct.h>
54 #include <sys/ktrace.h>
55 #include <sys/sched.h>
56 #include <sys/sysctl.h>
57 #include <sys/pool.h>
58 #include <sys/mman.h>
59 #include <sys/ptrace.h>
60 #include <sys/atomic.h>
61 #include <sys/pledge.h>
62 #include <sys/unistd.h>
63 
64 #include <sys/syscallargs.h>
65 
66 #include <uvm/uvm.h>
67 #include <machine/tcb.h>
68 
69 int	nprocesses = 1;		/* process 0 */
70 int	nthreads = 1;		/* proc 0 */
71 int	randompid;		/* when set to 1, pid's go random */
72 struct	forkstat forkstat;
73 
74 void fork_return(void *);
75 pid_t alloctid(void);
76 pid_t allocpid(void);
77 int ispidtaken(pid_t);
78 
79 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
80 struct process *process_new(struct proc *, struct process *, int);
81 int fork_check_maxthread(uid_t _uid);
82 
83 void
84 fork_return(void *arg)
85 {
86 	struct proc *p = (struct proc *)arg;
87 
88 	if (p->p_p->ps_flags & PS_TRACED)
89 		psignal(p, SIGTRAP);
90 
91 	child_return(p);
92 }
93 
94 int
95 sys_fork(struct proc *p, void *v, register_t *retval)
96 {
97 	int flags;
98 
99 	flags = FORK_FORK;
100 	if (p->p_p->ps_ptmask & PTRACE_FORK)
101 		flags |= FORK_PTRACE;
102 	return fork1(p, flags, fork_return, NULL, retval, NULL);
103 }
104 
105 int
106 sys_vfork(struct proc *p, void *v, register_t *retval)
107 {
108 	return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
109 	    retval, NULL);
110 }
111 
112 int
113 sys___tfork(struct proc *p, void *v, register_t *retval)
114 {
115 	struct sys___tfork_args /* {
116 		syscallarg(const struct __tfork) *param;
117 		syscallarg(size_t) psize;
118 	} */ *uap = v;
119 	size_t psize = SCARG(uap, psize);
120 	struct __tfork param = { 0 };
121 	int error;
122 
123 	if (psize == 0 || psize > sizeof(param))
124 		return EINVAL;
125 	if ((error = copyin(SCARG(uap, param), &param, psize)))
126 		return error;
127 #ifdef KTRACE
128 	if (KTRPOINT(p, KTR_STRUCT))
129 		ktrstruct(p, "tfork", &param, sizeof(param));
130 #endif
131 
132 	return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
133 	    retval);
134 }
135 
136 /*
137  * Allocate and initialize a thread (proc) structure, given the parent thread.
138  */
139 struct proc *
140 thread_new(struct proc *parent, vaddr_t uaddr)
141 {
142 	struct proc *p;
143 
144 	p = pool_get(&proc_pool, PR_WAITOK);
145 	p->p_stat = SIDL;			/* protect against others */
146 	p->p_flag = 0;
147 
148 	/*
149 	 * Make a proc table entry for the new process.
150 	 * Start by zeroing the section of proc that is zero-initialized,
151 	 * then copy the section that is copied directly from the parent.
152 	 */
153 	memset(&p->p_startzero, 0,
154 	    (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
155 	memcpy(&p->p_startcopy, &parent->p_startcopy,
156 	    (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
157 	crhold(p->p_ucred);
158 	p->p_addr = (struct user *)uaddr;
159 
160 	/*
161 	 * Initialize the timeouts.
162 	 */
163 	timeout_set(&p->p_sleep_to, endtsleep, p);
164 
165 	/*
166 	 * set priority of child to be that of parent
167 	 * XXX should move p_estcpu into the region of struct proc which gets
168 	 * copied.
169 	 */
170 	scheduler_fork_hook(parent, p);
171 
172 	return p;
173 }
174 
175 /*
176  * Initialize common bits of a process structure, given the initial thread.
177  */
178 void
179 process_initialize(struct process *pr, struct proc *p)
180 {
181 	/* initialize the thread links */
182 	pr->ps_mainproc = p;
183 	TAILQ_INIT(&pr->ps_threads);
184 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
185 	pr->ps_refcnt = 1;
186 	p->p_p = pr;
187 
188 	/* give the process the same creds as the initial thread */
189 	pr->ps_ucred = p->p_ucred;
190 	crhold(pr->ps_ucred);
191 	KASSERT(p->p_ucred->cr_ref >= 2);	/* new thread and new process */
192 
193 	LIST_INIT(&pr->ps_children);
194 
195 	timeout_set(&pr->ps_realit_to, realitexpire, pr);
196 }
197 
198 
199 /*
200  * Allocate and initialize a new process.
201  */
202 struct process *
203 process_new(struct proc *p, struct process *parent, int flags)
204 {
205 	struct process *pr;
206 
207 	pr = pool_get(&process_pool, PR_WAITOK);
208 
209 	/*
210 	 * Make a process structure for the new process.
211 	 * Start by zeroing the section of proc that is zero-initialized,
212 	 * then copy the section that is copied directly from the parent.
213 	 */
214 	memset(&pr->ps_startzero, 0,
215 	    (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
216 	memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
217 	    (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
218 
219 	process_initialize(pr, p);
220 	pr->ps_pid = allocpid();
221 
222 	/* post-copy fixups */
223 	pr->ps_pptr = parent;
224 	pr->ps_limit->p_refcnt++;
225 
226 	/* bump references to the text vnode (for sysctl) */
227 	pr->ps_textvp = parent->ps_textvp;
228 	if (pr->ps_textvp)
229 		vref(pr->ps_textvp);
230 
231 	pr->ps_flags = parent->ps_flags &
232 	    (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_WXNEEDED);
233 	if (parent->ps_session->s_ttyvp != NULL)
234 		pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
235 
236 	/*
237 	 * Duplicate sub-structures as needed.
238 	 * Increase reference counts on shared objects.
239 	 */
240 	if (flags & FORK_SHAREFILES)
241 		pr->ps_fd = fdshare(parent);
242 	else
243 		pr->ps_fd = fdcopy(parent);
244 	if (flags & FORK_SIGHAND)
245 		pr->ps_sigacts = sigactsshare(parent);
246 	else
247 		pr->ps_sigacts = sigactsinit(parent);
248 	if (flags & FORK_SHAREVM)
249 		pr->ps_vmspace = uvmspace_share(parent);
250 	else
251 		pr->ps_vmspace = uvmspace_fork(parent);
252 
253 	if (pr->ps_pledgepaths)
254 		pr->ps_pledgepaths->wl_ref++;
255 
256 	if (parent->ps_flags & PS_PROFIL)
257 		startprofclock(pr);
258 	if (flags & FORK_PTRACE)
259 		pr->ps_flags |= parent->ps_flags & PS_TRACED;
260 	if (flags & FORK_NOZOMBIE)
261 		pr->ps_flags |= PS_NOZOMBIE;
262 	if (flags & FORK_SYSTEM)
263 		pr->ps_flags |= PS_SYSTEM;
264 
265 	/* mark as embryo to protect against others */
266 	pr->ps_flags |= PS_EMBRYO;
267 
268 	/* Force visibility of all of the above changes */
269 	membar_producer();
270 
271 	/* it's sufficiently inited to be globally visible */
272 	LIST_INSERT_HEAD(&allprocess, pr, ps_list);
273 
274 	return pr;
275 }
276 
277 /* print the 'table full' message once per 10 seconds */
278 struct timeval fork_tfmrate = { 10, 0 };
279 
280 int
281 fork_check_maxthread(uid_t uid)
282 {
283 	/*
284 	 * Although process entries are dynamically created, we still keep
285 	 * a global limit on the maximum number we will create. We reserve
286 	 * the last 5 processes to root. The variable nprocesses is the
287 	 * current number of processes, maxprocess is the limit.  Similar
288 	 * rules for threads (struct proc): we reserve the last 5 to root;
289 	 * the variable nthreads is the current number of procs, maxthread is
290 	 * the limit.
291 	 */
292 	if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
293 		static struct timeval lasttfm;
294 
295 		if (ratecheck(&lasttfm, &fork_tfmrate))
296 			tablefull("proc");
297 		return EAGAIN;
298 	}
299 	nthreads++;
300 
301 	return 0;
302 }
303 
304 static inline void
305 fork_thread_start(struct proc *p, struct proc *parent, int flags)
306 {
307 	int s;
308 
309 	SCHED_LOCK(s);
310 	p->p_stat = SRUN;
311 	p->p_cpu = sched_choosecpu_fork(parent, flags);
312 	setrunqueue(p);
313 	SCHED_UNLOCK(s);
314 }
315 
316 int
317 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
318     register_t *retval, struct proc **rnewprocp)
319 {
320 	struct process *curpr = curp->p_p;
321 	struct process *pr;
322 	struct proc *p;
323 	uid_t uid = curp->p_ucred->cr_ruid;
324 	struct vmspace *vm;
325 	int count;
326 	vaddr_t uaddr;
327 	int error;
328 	struct  ptrace_state *newptstat = NULL;
329 
330 	KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
331 	    | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
332 	    | FORK_SYSTEM | FORK_SIGHAND)) == 0);
333 	KASSERT((flags & FORK_SIGHAND) == 0 || (flags & FORK_SHAREVM));
334 	KASSERT(func != NULL);
335 
336 	if ((error = fork_check_maxthread(uid)))
337 		return error;
338 
339 	if ((nprocesses >= maxprocess - 5 && uid != 0) ||
340 	    nprocesses >= maxprocess) {
341 		static struct timeval lasttfm;
342 
343 		if (ratecheck(&lasttfm, &fork_tfmrate))
344 			tablefull("process");
345 		nthreads--;
346 		return EAGAIN;
347 	}
348 	nprocesses++;
349 
350 	/*
351 	 * Increment the count of processes running with this uid.
352 	 * Don't allow a nonprivileged user to exceed their current limit.
353 	 */
354 	count = chgproccnt(uid, 1);
355 	if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) {
356 		(void)chgproccnt(uid, -1);
357 		nprocesses--;
358 		nthreads--;
359 		return EAGAIN;
360 	}
361 
362 	uaddr = uvm_uarea_alloc();
363 	if (uaddr == 0) {
364 		(void)chgproccnt(uid, -1);
365 		nprocesses--;
366 		nthreads--;
367 		return (ENOMEM);
368 	}
369 
370 	/*
371 	 * From now on, we're committed to the fork and cannot fail.
372 	 */
373 	p = thread_new(curp, uaddr);
374 	pr = process_new(p, curpr, flags);
375 
376 	p->p_fd		= pr->ps_fd;
377 	p->p_vmspace	= pr->ps_vmspace;
378 	if (pr->ps_flags & PS_SYSTEM)
379 		atomic_setbits_int(&p->p_flag, P_SYSTEM);
380 
381 	if (flags & FORK_PPWAIT) {
382 		atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
383 		atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
384 	}
385 
386 #ifdef KTRACE
387 	/*
388 	 * Copy traceflag and tracefile if enabled.
389 	 * If not inherited, these were zeroed above.
390 	 */
391 	if (curpr->ps_traceflag & KTRFAC_INHERIT)
392 		ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
393 		    curpr->ps_tracecred);
394 #endif
395 
396 	/*
397 	 * Finish creating the child thread.  cpu_fork() will copy
398 	 * and update the pcb and make the child ready to run.  If
399 	 * this is a normal user fork, the child will exit directly
400 	 * to user mode via child_return() on its first time slice
401 	 * and will not return here.  If this is a kernel thread,
402 	 * the specified entry point will be executed.
403 	 */
404 	cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
405 
406 	vm = pr->ps_vmspace;
407 
408 	if (flags & FORK_FORK) {
409 		forkstat.cntfork++;
410 		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
411 	} else if (flags & FORK_VFORK) {
412 		forkstat.cntvfork++;
413 		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
414 	} else {
415 		forkstat.cntkthread++;
416 	}
417 
418 	if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
419 		newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
420 
421 	p->p_tid = alloctid();
422 
423 	LIST_INSERT_HEAD(&allproc, p, p_list);
424 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
425 	LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
426 	LIST_INSERT_AFTER(curpr, pr, ps_pglist);
427 	LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
428 
429 	if (pr->ps_flags & PS_TRACED) {
430 		pr->ps_oppid = curpr->ps_pid;
431 		if (pr->ps_pptr != curpr->ps_pptr)
432 			proc_reparent(pr, curpr->ps_pptr);
433 
434 		/*
435 		 * Set ptrace status.
436 		 */
437 		if (newptstat != NULL) {
438 			pr->ps_ptstat = newptstat;
439 			newptstat = NULL;
440 			curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
441 			pr->ps_ptstat->pe_report_event = PTRACE_FORK;
442 			curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
443 			pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
444 		}
445 	}
446 
447 	/*
448 	 * For new processes, set accounting bits and mark as complete.
449 	 */
450 	getnanotime(&pr->ps_start);
451 	pr->ps_acflag = AFORK;
452 	atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
453 
454 	if ((flags & FORK_IDLE) == 0)
455 		fork_thread_start(p, curp, flags);
456 	else
457 		p->p_cpu = arg;
458 
459 	free(newptstat, M_SUBPROC, sizeof(*newptstat));
460 
461 	/*
462 	 * Notify any interested parties about the new process.
463 	 */
464 	KNOTE(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
465 
466 	/*
467 	 * Update stats now that we know the fork was successful.
468 	 */
469 	uvmexp.forks++;
470 	if (flags & FORK_PPWAIT)
471 		uvmexp.forks_ppwait++;
472 	if (flags & FORK_SHAREVM)
473 		uvmexp.forks_sharevm++;
474 
475 	/*
476 	 * Pass a pointer to the new process to the caller.
477 	 */
478 	if (rnewprocp != NULL)
479 		*rnewprocp = p;
480 
481 	/*
482 	 * Preserve synchronization semantics of vfork.  If waiting for
483 	 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
484 	 * on ourselves, and sleep on our process for the latter flag
485 	 * to go away.
486 	 * XXX Need to stop other rthreads in the parent
487 	 */
488 	if (flags & FORK_PPWAIT)
489 		while (curpr->ps_flags & PS_ISPWAIT)
490 			tsleep(curpr, PWAIT, "ppwait", 0);
491 
492 	/*
493 	 * If we're tracing the child, alert the parent too.
494 	 */
495 	if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
496 		psignal(curp, SIGTRAP);
497 
498 	/*
499 	 * Return child pid to parent process
500 	 */
501 	if (retval != NULL) {
502 		retval[0] = pr->ps_pid;
503 		retval[1] = 0;
504 	}
505 	return (0);
506 }
507 
508 int
509 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
510     register_t *retval)
511 {
512 	struct process *pr = curp->p_p;
513 	struct proc *p;
514 	pid_t tid;
515 	vaddr_t uaddr;
516 	int error;
517 
518 	if (stack == NULL)
519 		return EINVAL;
520 
521 	if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
522 		return error;
523 
524 	uaddr = uvm_uarea_alloc();
525 	if (uaddr == 0) {
526 		nthreads--;
527 		return ENOMEM;
528 	}
529 
530 	/*
531 	 * From now on, we're committed to the fork and cannot fail.
532 	 */
533 	p = thread_new(curp, uaddr);
534 	atomic_setbits_int(&p->p_flag, P_THREAD);
535 	sigstkinit(&p->p_sigstk);
536 
537 	/* other links */
538 	p->p_p = pr;
539 	pr->ps_refcnt++;
540 
541 	/* local copies */
542 	p->p_fd		= pr->ps_fd;
543 	p->p_vmspace	= pr->ps_vmspace;
544 
545 	/*
546 	 * Finish creating the child thread.  cpu_fork() will copy
547 	 * and update the pcb and make the child ready to run.  The
548 	 * child will exit directly to user mode via child_return()
549 	 * on its first time slice and will not return here.
550 	 */
551 	cpu_fork(curp, p, stack, tcb, child_return, p);
552 
553 	p->p_tid = alloctid();
554 
555 	LIST_INSERT_HEAD(&allproc, p, p_list);
556 	LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
557 	TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
558 
559 	/*
560 	 * if somebody else wants to take us to single threaded mode,
561 	 * count ourselves in.
562 	 */
563 	if (pr->ps_single) {
564 		pr->ps_singlecount++;
565 		atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
566 	}
567 
568 	/*
569 	 * Return tid to parent thread and copy it out to userspace
570 	 */
571 	retval[0] = tid = p->p_tid + THREAD_PID_OFFSET;
572 	retval[1] = 0;
573 	if (tidptr != NULL) {
574 		if (copyout(&tid, tidptr, sizeof(tid)))
575 			psignal(curp, SIGSEGV);
576 	}
577 
578 	fork_thread_start(p, curp, 0);
579 
580 	/*
581 	 * Update stats now that we know the fork was successful.
582 	 */
583 	forkstat.cnttfork++;
584 	uvmexp.forks++;
585 	uvmexp.forks_sharevm++;
586 
587 	return 0;
588 }
589 
590 
591 /* Find an unused tid */
592 pid_t
593 alloctid(void)
594 {
595 	pid_t tid;
596 
597 	do {
598 		/* (0 .. TID_MASK+1] */
599 		tid = 1 + (arc4random() & TID_MASK);
600 	} while (tfind(tid) != NULL);
601 
602 	return (tid);
603 }
604 
605 /*
606  * Checks for current use of a pid, either as a pid or pgid.
607  */
608 pid_t oldpids[128];
609 int
610 ispidtaken(pid_t pid)
611 {
612 	uint32_t i;
613 
614 	for (i = 0; i < nitems(oldpids); i++)
615 		if (pid == oldpids[i])
616 			return (1);
617 
618 	if (prfind(pid) != NULL)
619 		return (1);
620 	if (pgfind(pid) != NULL)
621 		return (1);
622 	if (zombiefind(pid) != NULL)
623 		return (1);
624 	return (0);
625 }
626 
627 /* Find an unused pid */
628 pid_t
629 allocpid(void)
630 {
631 	static pid_t lastpid;
632 	pid_t pid;
633 
634 	if (!randompid) {
635 		/* only used early on for system processes */
636 		pid = ++lastpid;
637 	} else {
638 		/* Find an unused pid satisfying lastpid < pid <= PID_MAX */
639 		do {
640 			pid = arc4random_uniform(PID_MAX - lastpid) + 1 +
641 			    lastpid;
642 		} while (ispidtaken(pid));
643 	}
644 
645 	return pid;
646 }
647 
648 void
649 freepid(pid_t pid)
650 {
651 	static uint32_t idx;
652 
653 	oldpids[idx++ % nitems(oldpids)] = pid;
654 }
655 
656 #if defined(MULTIPROCESSOR)
657 /*
658  * XXX This is a slight hack to get newly-formed processes to
659  * XXX acquire the kernel lock as soon as they run.
660  */
661 void
662 proc_trampoline_mp(void)
663 {
664 	SCHED_ASSERT_LOCKED();
665 	__mp_unlock(&sched_lock);
666 	spl0();
667 	SCHED_ASSERT_UNLOCKED();
668 	KERNEL_ASSERT_UNLOCKED();
669 
670 	KERNEL_LOCK();
671 }
672 #endif
673