xref: /dflybsd-src/sys/kern/init_main.c (revision e90a7c45c3303ed54c0fde732b2ba32dc80ffd9b)
1 /*
2  * Copyright (c) 1995 Terrence R. Lambert
3  * All rights reserved.
4  *
5  * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)init_main.c	8.9 (Berkeley) 1/21/94
42  * $FreeBSD: src/sys/kern/init_main.c,v 1.134.2.8 2003/06/06 20:21:32 tegge Exp $
43  * $DragonFly: src/sys/kern/init_main.c,v 1.87 2008/06/07 11:37:23 mneumann Exp $
44  */
45 
46 #include "opt_init_path.h"
47 
48 #include <sys/param.h>
49 #include <sys/file.h>
50 #include <sys/filedesc.h>
51 #include <sys/kernel.h>
52 #include <sys/mount.h>
53 #include <sys/sysctl.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/signalvar.h>
57 #include <sys/systm.h>
58 #include <sys/vnode.h>
59 #include <sys/sysent.h>
60 #include <sys/reboot.h>
61 #include <sys/sysproto.h>
62 #include <sys/vmmeter.h>
63 #include <sys/unistd.h>
64 #include <sys/malloc.h>
65 #include <sys/machintr.h>
66 
67 #include <sys/file2.h>
68 #include <sys/thread2.h>
69 #include <sys/sysref2.h>
70 #include <sys/spinlock2.h>
71 #include <sys/mplock2.h>
72 
73 #include <machine/cpu.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_param.h>
77 #include <sys/lock.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_extern.h>
81 #include <sys/user.h>
82 #include <sys/copyright.h>
83 
84 int vfs_mountroot_devfs(void);
85 
86 /* Components of the first process -- never freed. */
87 static struct session session0;
88 static struct pgrp pgrp0;
89 static struct sigacts sigacts0;
90 static struct filedesc filedesc0;
91 static struct plimit limit0;
92 static struct vmspace vmspace0;
93 struct proc *initproc;
94 struct proc proc0;
95 struct lwp lwp0;
96 struct thread thread0;
97 
98 int cmask = CMASK;
99 u_int cpu_mi_feature;
100 cpumask_t usched_global_cpumask;
101 extern	struct user *proc0paddr;
102 extern int fallback_elf_brand;
103 
104 int	boothowto = 0;		/* initialized so that it can be patched */
105 SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0,
106     "Reboot flags, from console subsystem");
107 SYSCTL_ULONG(_kern, OID_AUTO, usched_global_cpumask, CTLFLAG_RW,
108     &usched_global_cpumask, 0, "global user scheduler cpumask");
109 
110 /*
111  * This ensures that there is at least one entry so that the sysinit_set
112  * symbol is not undefined.  A subsystem ID of SI_SPECIAL_DUMMY is never
113  * executed.
114  */
115 SYSINIT(placeholder, SI_SPECIAL_DUMMY, SI_ORDER_ANY, NULL, NULL)
116 
117 /*
118  * The sysinit table itself.  Items are checked off as the are run.
119  * If we want to register new sysinit types, add them to newsysinit.
120  */
121 SET_DECLARE(sysinit_set, struct sysinit);
122 struct sysinit **sysinit, **sysinit_end;
123 struct sysinit **newsysinit, **newsysinit_end;
124 
125 
126 /*
127  * Merge a new sysinit set into the current set, reallocating it if
128  * necessary.  This can only be called after malloc is running.
129  */
130 void
131 sysinit_add(struct sysinit **set, struct sysinit **set_end)
132 {
133 	struct sysinit **newset;
134 	struct sysinit **sipp;
135 	struct sysinit **xipp;
136 	int count;
137 
138 	count = set_end - set;
139 	if (newsysinit)
140 		count += newsysinit_end - newsysinit;
141 	else
142 		count += sysinit_end - sysinit;
143 	newset = kmalloc(count * sizeof(*sipp), M_TEMP, M_WAITOK);
144 	xipp = newset;
145 	if (newsysinit) {
146 		for (sipp = newsysinit; sipp < newsysinit_end; sipp++)
147 			*xipp++ = *sipp;
148 	} else {
149 		for (sipp = sysinit; sipp < sysinit_end; sipp++)
150 			*xipp++ = *sipp;
151 	}
152 	for (sipp = set; sipp < set_end; sipp++)
153 		*xipp++ = *sipp;
154 	if (newsysinit)
155 		kfree(newsysinit, M_TEMP);
156 	newsysinit = newset;
157 	newsysinit_end = newset + count;
158 }
159 
160 /*
161  * Callbacks from machine-dependant startup code (e.g. init386) to set
162  * up low level entities related to cpu #0's globaldata.
163  *
164  * Called from very low level boot code.
165  */
166 void
167 mi_proc0init(struct globaldata *gd, struct user *proc0paddr)
168 {
169 	lwkt_init_thread(&thread0, proc0paddr, LWKT_THREAD_STACK, 0, gd);
170 	lwkt_set_comm(&thread0, "thread0");
171 	RB_INIT(&proc0.p_lwp_tree);
172 	spin_init(&proc0.p_spin);
173 	proc0.p_lasttid = 0;	/* +1 = next TID */
174 	lwp_rb_tree_RB_INSERT(&proc0.p_lwp_tree, &lwp0);
175 	lwp0.lwp_thread = &thread0;
176 	lwp0.lwp_proc = &proc0;
177 	proc0.p_usched = usched_init();
178 	lwp0.lwp_cpumask = (cpumask_t)-1;
179 	varsymset_init(&proc0.p_varsymset, NULL);
180 	thread0.td_flags |= TDF_RUNNING;
181 	thread0.td_proc = &proc0;
182 	thread0.td_lwp = &lwp0;
183 	thread0.td_switch = cpu_lwkt_switch;
184 	lwkt_schedule_self(curthread);
185 }
186 
187 /*
188  * System startup; initialize the world, create process 0, mount root
189  * filesystem, and fork to create init and pagedaemon.  Most of the
190  * hard work is done in the lower-level initialization routines including
191  * startup(), which does memory initialization and autoconfiguration.
192  *
193  * This allows simple addition of new kernel subsystems that require
194  * boot time initialization.  It also allows substitution of subsystem
195  * (for instance, a scheduler, kernel profiler, or VM system) by object
196  * module.  Finally, it allows for optional "kernel threads".
197  */
198 void
199 mi_startup(void)
200 {
201 	struct sysinit *sip;		/* system initialization*/
202 	struct sysinit **sipp;		/* system initialization*/
203 	struct sysinit **xipp;		/* interior loop of sort*/
204 	struct sysinit *save;		/* bubble*/
205 
206 	if (sysinit == NULL) {
207 		sysinit = SET_BEGIN(sysinit_set);
208 #if defined(__amd64__) && defined(_KERNEL_VIRTUAL)
209 		/*
210 		 * XXX For whatever reason, on 64-bit vkernels
211 		 * the value of sysinit obtained from the
212 		 * linker set is wrong.
213 		 */
214 		if ((long)sysinit % 8 != 0) {
215 			kprintf("Fixing sysinit value...\n");
216 			sysinit = (long)sysinit + 4;
217 		}
218 #endif
219 		sysinit_end = SET_LIMIT(sysinit_set);
220 	}
221 #if defined(__amd64__) && defined(_KERNEL_VIRTUAL)
222 	KKASSERT((long)sysinit % 8 == 0);
223 #endif
224 
225 restart:
226 	/*
227 	 * Perform a bubble sort of the system initialization objects by
228 	 * their subsystem (primary key) and order (secondary key).
229 	 */
230 	for (sipp = sysinit; sipp < sysinit_end; sipp++) {
231 		for (xipp = sipp + 1; xipp < sysinit_end; xipp++) {
232 			if ((*sipp)->subsystem < (*xipp)->subsystem ||
233 			     ((*sipp)->subsystem == (*xipp)->subsystem &&
234 			      (*sipp)->order <= (*xipp)->order))
235 				continue;	/* skip*/
236 			save = *sipp;
237 			*sipp = *xipp;
238 			*xipp = save;
239 		}
240 	}
241 
242 	/*
243 	 * Traverse the (now) ordered list of system initialization tasks.
244 	 * Perform each task, and continue on to the next task.
245 	 *
246 	 * The last item on the list is expected to be the scheduler,
247 	 * which will not return.
248 	 */
249 	for (sipp = sysinit; sipp < sysinit_end; sipp++) {
250 		sip = *sipp;
251 		if (sip->subsystem == SI_SPECIAL_DUMMY)
252 			continue;	/* skip dummy task(s)*/
253 
254 		if (sip->subsystem == SI_SPECIAL_DONE)
255 			continue;
256 
257 		/* Call function */
258 		(*(sip->func))(sip->udata);
259 
260 		/* Check off the one we're just done */
261 		sip->subsystem = SI_SPECIAL_DONE;
262 
263 		/* Check if we've installed more sysinit items via KLD */
264 		if (newsysinit != NULL) {
265 			if (sysinit != SET_BEGIN(sysinit_set))
266 				kfree(sysinit, M_TEMP);
267 			sysinit = newsysinit;
268 			sysinit_end = newsysinit_end;
269 			newsysinit = NULL;
270 			newsysinit_end = NULL;
271 			goto restart;
272 		}
273 	}
274 
275 	panic("Shouldn't get here!");
276 	/* NOTREACHED*/
277 }
278 
279 
280 /*
281  ***************************************************************************
282  ****
283  **** The following SYSINIT's belong elsewhere, but have not yet
284  **** been moved.
285  ****
286  ***************************************************************************
287  */
288 static void
289 print_caddr_t(void *data __unused)
290 {
291 	kprintf("%s", (char *)data);
292 }
293 SYSINIT(announce, SI_BOOT1_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t, copyright)
294 
295 /*
296  * Leave the critical section that protected us from spurious interrupts
297  * so device probes work.
298  */
299 static void
300 leavecrit(void *dummy __unused)
301 {
302 	MachIntrABI.stabilize();
303 	cpu_enable_intr();
304 	MachIntrABI.cleanup();
305 	crit_exit();
306 	KKASSERT(!IN_CRITICAL_SECT(curthread));
307 
308 	if (bootverbose)
309 		kprintf("Leaving critical section, allowing interrupts\n");
310 }
311 SYSINIT(leavecrit, SI_BOOT2_LEAVE_CRIT, SI_ORDER_ANY, leavecrit, NULL)
312 
313 /*
314  * This is called after the threading system is up and running,
315  * including the softclock, clock interrupts, and SMP.
316  */
317 static void
318 tsleepworks(void *dummy __unused)
319 {
320 	tsleep_now_works = 1;
321 }
322 SYSINIT(tsleepworks, SI_BOOT2_FINISH_SMP, SI_ORDER_SECOND, tsleepworks, NULL)
323 
324 /*
325  * This is called after devices have configured.  Tell the kernel we are
326  * no longer in cold boot.
327  */
328 static void
329 endofcoldboot(void *dummy __unused)
330 {
331 	cold = 0;
332 }
333 SYSINIT(endofcoldboot, SI_SUB_ISWARM, SI_ORDER_ANY, endofcoldboot, NULL)
334 
335 /*
336  ***************************************************************************
337  ****
338  **** The two following SYSINT's are proc0 specific glue code.  I am not
339  **** convinced that they can not be safely combined, but their order of
340  **** operation has been maintained as the same as the original init_main.c
341  **** for right now.
342  ****
343  **** These probably belong in init_proc.c or kern_proc.c, since they
344  **** deal with proc0 (the fork template process).
345  ****
346  ***************************************************************************
347  */
348 /* ARGSUSED*/
349 static void
350 proc0_init(void *dummy __unused)
351 {
352 	struct proc *p;
353 	struct lwp *lp;
354 
355 	p = &proc0;
356 	lp = &lwp0;
357 
358 	/*
359 	 * Initialize process and pgrp structures.
360 	 */
361 	procinit();
362 
363 	/*
364 	 * additional VM structures
365 	 */
366 	vm_init2();
367 
368 	/*
369 	 * Create process 0 (the swapper).
370 	 */
371 	LIST_INSERT_HEAD(&allproc, p, p_list);
372 	p->p_pgrp = &pgrp0;
373 	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
374 	LIST_INIT(&pgrp0.pg_members);
375 	LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist);
376 
377 	pgrp0.pg_session = &session0;
378 	session0.s_count = 1;
379 	session0.s_leader = p;
380 
381 	p->p_sysent = &aout_sysvec;
382 
383 	p->p_flag = P_SYSTEM;
384 	p->p_stat = SACTIVE;
385 	lp->lwp_stat = LSRUN;
386 	p->p_nice = NZERO;
387 	p->p_rtprio.type = RTP_PRIO_NORMAL;
388 	p->p_rtprio.prio = 0;
389 	lp->lwp_rtprio = p->p_rtprio;
390 
391 	p->p_peers = 0;
392 	p->p_leader = p;
393 
394 	bcopy("swapper", p->p_comm, sizeof ("swapper"));
395 	bcopy("swapper", thread0.td_comm, sizeof ("swapper"));
396 
397 	/* Create credentials. */
398 	p->p_ucred = crget();
399 	p->p_ucred->cr_ruidinfo = uifind(0);
400 	p->p_ucred->cr_ngroups = 1;	/* group 0 */
401 	p->p_ucred->cr_uidinfo = uifind(0);
402 	thread0.td_ucred = crhold(p->p_ucred);	/* bootstrap fork1() */
403 
404 	/* Don't jail it */
405 	p->p_ucred->cr_prison = NULL;
406 
407 	/* Create sigacts. */
408 	p->p_sigacts = &sigacts0;
409 	p->p_sigacts->ps_refcnt = 1;
410 
411 	/* Initialize signal state for process 0. */
412 	siginit(p);
413 
414 	/* Create the file descriptor table. */
415 	fdinit_bootstrap(p, &filedesc0, cmask);
416 
417 	/* Create the limits structures. */
418 	plimit_init0(&limit0);
419 	p->p_limit = &limit0;
420 
421 	/* Allocate a prototype map so we have something to fork. */
422 	pmap_pinit0(vmspace_pmap(&vmspace0));
423 	p->p_vmspace = &vmspace0;
424 	lp->lwp_vmspace = p->p_vmspace;
425 	sysref_init(&vmspace0.vm_sysref, &vmspace_sysref_class);
426 	vm_map_init(&vmspace0.vm_map,
427 		    round_page(VM_MIN_USER_ADDRESS),
428 		    trunc_page(VM_MAX_USER_ADDRESS),
429 		    vmspace_pmap(&vmspace0));
430 	sysref_activate(&vmspace0.vm_sysref);
431 
432 	kqueue_init(&lwp0.lwp_kqueue, &filedesc0);
433 
434 	/*
435 	 * Charge root for one process.
436 	 */
437 	(void)chgproccnt(p->p_ucred->cr_uidinfo, 1, 0);
438 	vm_init_limits(p);
439 }
440 SYSINIT(p0init, SI_BOOT2_PROC0, SI_ORDER_FIRST, proc0_init, NULL)
441 
442 static int proc0_post_callback(struct proc *p, void *data __unused);
443 
444 /* ARGSUSED*/
445 static void
446 proc0_post(void *dummy __unused)
447 {
448 	struct timespec ts;
449 
450 	/*
451 	 * Now we can look at the time, having had a chance to verify the
452 	 * time from the file system.  Pretend that proc0 started now.
453 	 */
454 	allproc_scan(proc0_post_callback, NULL);
455 
456 	/*
457 	 * Give the ``random'' number generator a thump.
458 	 * XXX: Does read_random() contain enough bits to be used here ?
459 	 */
460 	nanotime(&ts);
461 	skrandom(ts.tv_sec ^ ts.tv_nsec);
462 }
463 
464 static int
465 proc0_post_callback(struct proc *p, void *data __unused)
466 {
467 	microtime(&p->p_start);
468 	return(0);
469 }
470 
471 SYSINIT(p0post, SI_SUB_PROC0_POST, SI_ORDER_FIRST, proc0_post, NULL)
472 
473 /*
474  ***************************************************************************
475  ****
476  **** The following SYSINIT's and glue code should be moved to the
477  **** respective files on a per subsystem basis.
478  ****
479  ***************************************************************************
480  */
481 
482 
483 /*
484  ***************************************************************************
485  ****
486  **** The following code probably belongs in another file, like
487  **** kern/init_init.c.
488  ****
489  ***************************************************************************
490  */
491 
492 /*
493  * List of paths to try when searching for "init".
494  */
495 static char init_path[MAXPATHLEN] =
496 #ifdef	INIT_PATH
497     __XSTRING(INIT_PATH);
498 #else
499     "/sbin/init:/sbin/oinit:/sbin/init.bak";
500 #endif
501 SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0, "");
502 
503 /*
504  * Start the initial user process; try exec'ing each pathname in init_path.
505  * The program is invoked with one argument containing the boot flags.
506  */
507 static void
508 start_init(void *dummy, struct trapframe *frame)
509 {
510 	vm_offset_t addr;
511 	struct execve_args args;
512 	int options, error;
513 	char *var, *path, *next, *s;
514 	char *ucp, **uap, *arg0, *arg1;
515 	struct proc *p;
516 	struct lwp *lp;
517 	struct mount *mp;
518 	struct vnode *vp;
519 	char *env;
520 
521         /*
522 	 * This is passed in by the bootloader
523          */
524 	env = kgetenv("kernelname");
525 	if (env != NULL)
526 		strlcpy(kernelname, env, sizeof(kernelname));
527 
528 	/*
529 	 * The MP lock is not held on entry.  We release it before
530 	 * returning to userland.
531 	 */
532 	get_mplock();
533 	p = curproc;
534 
535 	lp = ONLY_LWP_IN_PROC(p);
536 
537 	/* Get the vnode for '/'.  Set p->p_fd->fd_cdir to reference it. */
538 	mp = mountlist_boot_getfirst();
539 	if (VFS_ROOT(mp, &vp))
540 		panic("cannot find root vnode");
541 	if (mp->mnt_ncmountpt.ncp == NULL) {
542 		cache_allocroot(&mp->mnt_ncmountpt, mp, vp);
543 		cache_unlock(&mp->mnt_ncmountpt);	/* leave ref intact */
544 	}
545 	p->p_fd->fd_cdir = vp;
546 	vref(p->p_fd->fd_cdir);
547 	p->p_fd->fd_rdir = vp;
548 	vref(p->p_fd->fd_rdir);
549 	vfs_cache_setroot(vp, cache_hold(&mp->mnt_ncmountpt));
550 	vn_unlock(vp);			/* leave ref intact */
551 	cache_copy(&mp->mnt_ncmountpt, &p->p_fd->fd_ncdir);
552 	cache_copy(&mp->mnt_ncmountpt, &p->p_fd->fd_nrdir);
553 
554 	kprintf("Mounting devfs\n");
555 	vfs_mountroot_devfs();
556 
557 	/*
558 	 * Need just enough stack to hold the faked-up "execve()" arguments.
559 	 */
560 	addr = trunc_page(USRSTACK - PAGE_SIZE);
561 	error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr,
562 			    PAGE_SIZE, PAGE_SIZE,
563 			    FALSE, VM_MAPTYPE_NORMAL,
564 			    VM_PROT_ALL, VM_PROT_ALL,
565 			    0);
566 	if (error)
567 		panic("init: couldn't allocate argument space");
568 	p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
569 	p->p_vmspace->vm_ssize = 1;
570 
571 	if ((var = kgetenv("init_path")) != NULL) {
572 		strncpy(init_path, var, sizeof init_path);
573 		init_path[sizeof init_path - 1] = 0;
574 	}
575 	if ((var = kgetenv("kern.fallback_elf_brand")) != NULL)
576 		fallback_elf_brand = strtol(var, NULL, 0);
577 
578 	for (path = init_path; *path != '\0'; path = next) {
579 		while (*path == ':')
580 			path++;
581 		if (*path == '\0')
582 			break;
583 		for (next = path; *next != '\0' && *next != ':'; next++)
584 			/* nothing */ ;
585 		if (bootverbose)
586 			kprintf("start_init: trying %.*s\n", (int)(next - path),
587 			    path);
588 
589 		/*
590 		 * Move out the boot flag argument.
591 		 */
592 		options = 0;
593 		ucp = (char *)USRSTACK;
594 		(void)subyte(--ucp, 0);		/* trailing zero */
595 		if (boothowto & RB_SINGLE) {
596 			(void)subyte(--ucp, 's');
597 			options = 1;
598 		}
599 #ifdef notyet
600                 if (boothowto & RB_FASTBOOT) {
601 			(void)subyte(--ucp, 'f');
602 			options = 1;
603 		}
604 #endif
605 
606 #ifdef BOOTCDROM
607 		(void)subyte(--ucp, 'C');
608 		options = 1;
609 #endif
610 		if (options == 0)
611 			(void)subyte(--ucp, '-');
612 		(void)subyte(--ucp, '-');		/* leading hyphen */
613 		arg1 = ucp;
614 
615 		/*
616 		 * Move out the file name (also arg 0).
617 		 */
618 		(void)subyte(--ucp, 0);
619 		for (s = next - 1; s >= path; s--)
620 			(void)subyte(--ucp, *s);
621 		arg0 = ucp;
622 
623 		/*
624 		 * Move out the arg pointers.
625 		 */
626 		uap = (char **)((intptr_t)ucp & ~(sizeof(intptr_t)-1));
627 		(void)suword((caddr_t)--uap, (long)0);	/* terminator */
628 		(void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
629 		(void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
630 
631 		/*
632 		 * Point at the arguments.
633 		 */
634 		args.fname = arg0;
635 		args.argv = uap;
636 		args.envv = NULL;
637 
638 		/*
639 		 * Now try to exec the program.  If can't for any reason
640 		 * other than it doesn't exist, complain.
641 		 *
642 		 * Otherwise, return via fork_trampoline() all the way
643 		 * to user mode as init!
644 		 *
645 		 * WARNING!  We may have been moved to another cpu after
646 		 * acquiring the current user process designation.  The
647 		 * MP lock will migrate with us though so we still have to
648 		 * release it.
649 		 */
650 		if ((error = sys_execve(&args)) == 0) {
651 			rel_mplock();
652 			lp->lwp_proc->p_usched->acquire_curproc(lp);
653 			return;
654 		}
655 		if (error != ENOENT)
656 			kprintf("exec %.*s: error %d\n", (int)(next - path),
657 			    path, error);
658 	}
659 	kprintf("init: not found in path %s\n", init_path);
660 	panic("no init");
661 }
662 
663 /*
664  * Like kthread_create(), but runs in it's own address space.
665  * We do this early to reserve pid 1.
666  *
667  * Note special case - do not make it runnable yet.  Other work
668  * in progress will change this more.
669  */
670 static void
671 create_init(const void *udata __unused)
672 {
673 	int error;
674 	struct lwp *lp;
675 
676 	crit_enter();
677 	error = fork1(&lwp0, RFFDG | RFPROC, &initproc);
678 	if (error)
679 		panic("cannot fork init: %d", error);
680 	initproc->p_flag |= P_SYSTEM;
681 	lp = ONLY_LWP_IN_PROC(initproc);
682 	cpu_set_fork_handler(lp, start_init, NULL);
683 	crit_exit();
684 }
685 SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
686 
687 /*
688  * Make it runnable now.
689  */
690 static void
691 kick_init(const void *udata __unused)
692 {
693 	start_forked_proc(&lwp0, initproc);
694 }
695 SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
696 
697 /*
698  * Machine independant globaldata initialization
699  *
700  * WARNING!  Called from early boot, 'mycpu' may not work yet.
701  */
702 void
703 mi_gdinit(struct globaldata *gd, int cpuid)
704 {
705 	TAILQ_INIT(&gd->gd_systimerq);
706 	gd->gd_sysid_alloc = cpuid;	/* prime low bits for cpu lookup */
707 	gd->gd_cpuid = cpuid;
708 	gd->gd_cpumask = CPUMASK(cpuid);
709 	lwkt_gdinit(gd);
710 	vm_map_entry_reserve_cpu_init(gd);
711 	sleep_gdinit(gd);
712 	usched_global_cpumask |= CPUMASK(cpuid);
713 }
714 
715