xref: /csrg-svn/sys/kern/init_main.c (revision 30531)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)init_main.c	7.4 (Berkeley) 02/19/87
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "dir.h"
14 #include "user.h"
15 #include "kernel.h"
16 #include "fs.h"
17 #include "mount.h"
18 #include "map.h"
19 #include "proc.h"
20 #include "inode.h"
21 #include "seg.h"
22 #include "conf.h"
23 #include "buf.h"
24 #include "vm.h"
25 #include "cmap.h"
26 #include "text.h"
27 #include "clist.h"
28 #include "protosw.h"
29 #include "quota.h"
30 #include "../machine/reg.h"
31 #include "../machine/cpu.h"
32 
33 int	cmask = CMASK;
34 /*
35  * Initialization code.
36  * Called from cold start routine as
37  * soon as a stack and segmentation
38  * have been established.
39  * Functions:
40  *	clear and free user core
41  *	turn on clock
42  *	hand craft 0th process
43  *	call all initialization routines
44  *	fork - process 0 to schedule
45  *	     - process 1 execute bootstrap
46  *	     - process 2 to page out
47  */
48 main(firstaddr)
49 	int firstaddr;
50 {
51 	register int i;
52 	register struct proc *p;
53 	struct fs *fs;
54 	int s;
55 
56 	rqinit();
57 #include "loop.h"
58 	startup(firstaddr);
59 
60 	/*
61 	 * set up system process 0 (swapper)
62 	 */
63 	p = &proc[0];
64 #if defined(tahoe)
65 #ifndef lint
66 #define	initkey(which, p, index) \
67     which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \
68     p->p_/**/which = index;
69 	initkey(ckey, p, MAXCKEY);
70 	initkey(dkey, p, MAXDKEY);
71 #endif
72 #endif
73 	p->p_p0br = u.u_pcb.pcb_p0br;
74 	p->p_szpt = 1;
75 	p->p_addr = uaddr(p);
76 	p->p_stat = SRUN;
77 	p->p_flag |= SLOAD|SSYS;
78 	p->p_nice = NZERO;
79 	setredzone(p->p_addr, (caddr_t)&u);
80 	u.u_procp = p;
81 	/*
82 	 * These assume that the u. area is always mapped
83 	 * to the same virtual address. Otherwise must be
84 	 * handled when copying the u. area in newproc().
85 	 */
86 	u.u_nd.ni_iov = &u.u_nd.ni_iovec;
87 	u.u_ap = u.u_arg;
88 	u.u_nd.ni_iovcnt = 1;
89 
90 	u.u_cmask = cmask;
91 	u.u_lastfile = -1;
92 	for (i = 1; i < NGROUPS; i++)
93 		u.u_groups[i] = NOGROUP;
94 	for (i = 0; i < sizeof(u.u_rlimit)/sizeof(u.u_rlimit[0]); i++)
95 		u.u_rlimit[i].rlim_cur = u.u_rlimit[i].rlim_max =
96 		    RLIM_INFINITY;
97 	/*
98 	 * configure virtual memory system,
99 	 * set vm rlimits
100 	 */
101 	vminit();
102 
103 #if defined(QUOTA)
104 	qtinit();
105 	p->p_quota = u.u_quota = getquota(0, 0, Q_NDQ);
106 #endif
107 	startrtclock();
108 #if defined(vax)
109 #include "kg.h"
110 #if NKG > 0
111 	startkgclock();
112 #endif
113 #endif
114 
115 	/*
116 	 * Initialize tables, protocols, and set up well-known inodes.
117 	 */
118 	mbinit();
119 	cinit();
120 #include "sl.h"
121 #if NSL > 0
122 	slattach();			/* XXX */
123 #endif
124 #if NLOOP > 0
125 	loattach();			/* XXX */
126 #endif
127 	/*
128 	 * Block reception of incoming packets
129 	 * until protocols have been initialized.
130 	 */
131 	s = splimp();
132 	ifinit();
133 	domaininit();
134 	splx(s);
135 	pqinit();
136 	xinit();
137 	ihinit();
138 	swapinit();
139 	nchinit();
140 #ifdef GPROF
141 	kmstartup();
142 #endif
143 
144 	fs = mountfs(rootdev, 0, (struct inode *)0);
145 	if (fs == 0)
146 		panic("iinit");
147 	bcopy("/", fs->fs_fsmnt, 2);
148 
149 	inittodr(fs->fs_time);
150 	boottime = time;
151 
152 /* kick off timeout driven events by calling first time */
153 	roundrobin();
154 	schedcpu();
155 	schedpaging();
156 
157 /* set up the root file system */
158 	rootdir = iget(rootdev, fs, (ino_t)ROOTINO);
159 	iunlock(rootdir);
160 	u.u_cdir = iget(rootdev, fs, (ino_t)ROOTINO);
161 	iunlock(u.u_cdir);
162 	u.u_rdir = NULL;
163 
164 	u.u_dmap = zdmap;
165 	u.u_smap = zdmap;
166 
167 	enablertclock();		/* enable realtime clock interrupts */
168 #if defined(tahoe)
169 	clk_enable = 1;			/* enable clock interrupt */
170 #endif
171 	/*
172 	 * make init process
173 	 */
174 
175 	proc[0].p_szpt = CLSIZE;
176 	if (newproc(0)) {
177 		expand(clrnd((int)btoc(szicode)), 0);
178 		(void) swpexpand(u.u_dsize, (size_t)0, &u.u_dmap, &u.u_smap);
179 		(void) copyout((caddr_t)icode, (caddr_t)0, (unsigned)szicode);
180 		/*
181 		 * Return goes to loc. 0 of user init
182 		 * code just copied out.
183 		 */
184 		return;
185 	}
186 	/*
187 	 * make page-out daemon (process 2)
188 	 * the daemon has ctopt(nswbuf*CLSIZE*KLMAX) pages of page
189 	 * table so that it can map dirty pages into
190 	 * its address space during asychronous pushes.
191 	 */
192 	proc[0].p_szpt = clrnd(ctopt(nswbuf*CLSIZE*KLMAX + UPAGES));
193 	if (newproc(0)) {
194 		proc[2].p_flag |= SLOAD|SSYS;
195 		proc[2].p_dsize = u.u_dsize = nswbuf*CLSIZE*KLMAX;
196 		pageout();
197 		/*NOTREACHED*/
198 	}
199 
200 	/*
201 	 * enter scheduling loop
202 	 */
203 	proc[0].p_szpt = 1;
204 	sched();
205 }
206 
207 /*
208  * Initialize hash links for buffers.
209  */
210 bhinit()
211 {
212 	register int i;
213 	register struct bufhd *bp;
214 
215 	for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++)
216 		bp->b_forw = bp->b_back = (struct buf *)bp;
217 }
218 
219 /*
220  * Initialize the buffer I/O system by freeing
221  * all buffers and setting all device buffer lists to empty.
222  */
223 binit()
224 {
225 	register struct buf *bp, *dp;
226 	register int i;
227 	int base, residual;
228 
229 	for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
230 		dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
231 		dp->b_flags = B_HEAD;
232 	}
233 	base = bufpages / nbuf;
234 	residual = bufpages % nbuf;
235 	for (i = 0; i < nbuf; i++) {
236 		bp = &buf[i];
237 		bp->b_dev = NODEV;
238 		bp->b_bcount = 0;
239 		bp->b_un.b_addr = buffers + i * MAXBSIZE;
240 		if (i < residual)
241 			bp->b_bufsize = (base + 1) * CLBYTES;
242 		else
243 			bp->b_bufsize = base * CLBYTES;
244 		binshash(bp, &bfreelist[BQ_AGE]);
245 		bp->b_flags = B_BUSY|B_INVAL;
246 		brelse(bp);
247 	}
248 }
249 
250 /*
251  * Set up swap devices.
252  * Initialize linked list of free swap
253  * headers. These do not actually point
254  * to buffers, but rather to pages that
255  * are being swapped in and out.
256  */
257 swapinit()
258 {
259 	register int i;
260 	register struct buf *sp = swbuf;
261 	struct swdevt *swp;
262 
263 	/*
264 	 * Count swap devices, and adjust total swap space available.
265 	 * Some of this space will not be available until a swapon()
266 	 * system is issued, usually when the system goes multi-user.
267 	 */
268 	nswdev = 0;
269 	nswap = 0;
270 	for (swp = swdevt; swp->sw_dev; swp++) {
271 		nswdev++;
272 		if (swp->sw_nblks > nswap)
273 			nswap = swp->sw_nblks;
274 	}
275 	if (nswdev == 0)
276 		panic("swapinit");
277 	if (nswdev > 1)
278 		nswap = ((nswap + dmmax - 1) / dmmax) * dmmax;
279 	nswap *= nswdev;
280 	/*
281 	 * If there are multiple swap areas,
282 	 * allow more paging operations per second.
283 	 */
284 	if (nswdev > 1)
285 		maxpgio = (maxpgio * (2 * nswdev - 1)) / 2;
286 	swfree(0);
287 
288 	/*
289 	 * Now set up swap buffer headers.
290 	 */
291 	bswlist.av_forw = sp;
292 	for (i=0; i<nswbuf-1; i++, sp++)
293 		sp->av_forw = sp+1;
294 	sp->av_forw = NULL;
295 }
296 
297 /*
298  * Initialize clist by freeing all character blocks, then count
299  * number of character devices. (Once-only routine)
300  */
301 cinit()
302 {
303 	register int ccp;
304 	register struct cblock *cp;
305 
306 	ccp = (int)cfree;
307 	ccp = (ccp+CROUND) & ~CROUND;
308 	for(cp=(struct cblock *)ccp; cp < &cfree[nclist-1]; cp++) {
309 		cp->c_next = cfreelist;
310 		cfreelist = cp;
311 		cfreecount += CBSIZE;
312 	}
313 }
314