xref: /csrg-svn/sys/kern/init_main.c (revision 30256)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)init_main.c	7.3 (Berkeley) 12/06/86
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "dir.h"
14 #include "user.h"
15 #include "kernel.h"
16 #include "fs.h"
17 #include "mount.h"
18 #include "map.h"
19 #include "proc.h"
20 #include "inode.h"
21 #include "seg.h"
22 #include "conf.h"
23 #include "buf.h"
24 #include "vm.h"
25 #include "cmap.h"
26 #include "text.h"
27 #include "clist.h"
28 #include "protosw.h"
29 #include "quota.h"
30 #include "../machine/reg.h"
31 #include "../machine/cpu.h"
32 
33 int	cmask = CMASK;
34 /*
35  * Initialization code.
36  * Called from cold start routine as
37  * soon as a stack and segmentation
38  * have been established.
39  * Functions:
40  *	clear and free user core
41  *	turn on clock
42  *	hand craft 0th process
43  *	call all initialization routines
44  *	fork - process 0 to schedule
45  *	     - process 1 execute bootstrap
46  *	     - process 2 to page out
47  */
48 main(firstaddr)
49 	int firstaddr;
50 {
51 	register int i;
52 	register struct proc *p;
53 	struct fs *fs;
54 	int s;
55 
56 	rqinit();
57 #include "loop.h"
58 	startup(firstaddr);
59 
60 	/*
61 	 * set up system process 0 (swapper)
62 	 */
63 	p = &proc[0];
64 #if defined(tahoe)
65 #ifndef lint
66 #define	initkey(which, p, index) \
67     which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \
68     p->p_/**/which = index;
69 	initkey(ckey, p, MAXCKEY);
70 	initkey(dkey, p, MAXDKEY);
71 #endif
72 #endif
73 	p->p_p0br = u.u_pcb.pcb_p0br;
74 	p->p_szpt = 1;
75 	p->p_addr = uaddr(p);
76 	p->p_stat = SRUN;
77 	p->p_flag |= SLOAD|SSYS;
78 	p->p_nice = NZERO;
79 	setredzone(p->p_addr, (caddr_t)&u);
80 	u.u_procp = p;
81 	/*
82 	 * These assume that the u. area is always mapped
83 	 * to the same virtual address. Otherwise must be
84 	 * handled when copying the u. area in newproc().
85 	 */
86 	u.u_nd.ni_iov = &u.u_nd.ni_iovec;
87 	u.u_ap = u.u_arg;
88 	u.u_nd.ni_iovcnt = 1;
89 
90 	u.u_cmask = cmask;
91 	u.u_lastfile = -1;
92 	for (i = 1; i < NGROUPS; i++)
93 		u.u_groups[i] = NOGROUP;
94 	for (i = 0; i < sizeof(u.u_rlimit)/sizeof(u.u_rlimit[0]); i++)
95 		u.u_rlimit[i].rlim_cur = u.u_rlimit[i].rlim_max =
96 		    RLIM_INFINITY;
97 	/*
98 	 * configure virtual memory system,
99 	 * set vm rlimits
100 	 */
101 	vminit();
102 
103 #if defined(QUOTA)
104 	qtinit();
105 	p->p_quota = u.u_quota = getquota(0, 0, Q_NDQ);
106 #endif
107 	startrtclock();
108 #if defined(vax)
109 #include "kg.h"
110 #if NKG > 0
111 	startkgclock();
112 #endif
113 #endif
114 
115 	/*
116 	 * Initialize tables, protocols, and set up well-known inodes.
117 	 */
118 	mbinit();
119 	cinit();
120 #include "sl.h"
121 #if NSL > 0
122 	slattach();			/* XXX */
123 #endif
124 #if NLOOP > 0
125 	loattach();			/* XXX */
126 #endif
127 	/*
128 	 * Block reception of incoming packets
129 	 * until protocols have been initialized.
130 	 */
131 	s = splimp();
132 	ifinit();
133 	domaininit();
134 	splx(s);
135 	pqinit();
136 	xinit();
137 	ihinit();
138 	bhinit();
139 	binit();
140 	bswinit();
141 	nchinit();
142 #ifdef GPROF
143 	kmstartup();
144 #endif
145 
146 	fs = mountfs(rootdev, 0, (struct inode *)0);
147 	if (fs == 0)
148 		panic("iinit");
149 	bcopy("/", fs->fs_fsmnt, 2);
150 
151 	inittodr(fs->fs_time);
152 	boottime = time;
153 
154 /* kick off timeout driven events by calling first time */
155 	roundrobin();
156 	schedcpu();
157 	schedpaging();
158 
159 /* set up the root file system */
160 	rootdir = iget(rootdev, fs, (ino_t)ROOTINO);
161 	iunlock(rootdir);
162 	u.u_cdir = iget(rootdev, fs, (ino_t)ROOTINO);
163 	iunlock(u.u_cdir);
164 	u.u_rdir = NULL;
165 
166 	u.u_dmap = zdmap;
167 	u.u_smap = zdmap;
168 
169 	enablertclock();		/* enable realtime clock interrupts */
170 #if defined(tahoe)
171 	clk_enable = 1;			/* enable clock interrupt */
172 #endif
173 	/*
174 	 * make init process
175 	 */
176 
177 	proc[0].p_szpt = CLSIZE;
178 	if (newproc(0)) {
179 		expand(clrnd((int)btoc(szicode)), 0);
180 		(void) swpexpand(u.u_dsize, (size_t)0, &u.u_dmap, &u.u_smap);
181 		(void) copyout((caddr_t)icode, (caddr_t)0, (unsigned)szicode);
182 		/*
183 		 * Return goes to loc. 0 of user init
184 		 * code just copied out.
185 		 */
186 		return;
187 	}
188 	/*
189 	 * make page-out daemon (process 2)
190 	 * the daemon has ctopt(nswbuf*CLSIZE*KLMAX) pages of page
191 	 * table so that it can map dirty pages into
192 	 * its address space during asychronous pushes.
193 	 */
194 	proc[0].p_szpt = clrnd(ctopt(nswbuf*CLSIZE*KLMAX + UPAGES));
195 	if (newproc(0)) {
196 		proc[2].p_flag |= SLOAD|SSYS;
197 		proc[2].p_dsize = u.u_dsize = nswbuf*CLSIZE*KLMAX;
198 		pageout();
199 		/*NOTREACHED*/
200 	}
201 
202 	/*
203 	 * enter scheduling loop
204 	 */
205 	proc[0].p_szpt = 1;
206 	sched();
207 }
208 
209 /*
210  * Initialize hash links for buffers.
211  */
212 bhinit()
213 {
214 	register int i;
215 	register struct bufhd *bp;
216 
217 	for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++)
218 		bp->b_forw = bp->b_back = (struct buf *)bp;
219 }
220 
221 /*
222  * Initialize the buffer I/O system by freeing
223  * all buffers and setting all device buffer lists to empty.
224  */
225 binit()
226 {
227 	register struct buf *bp, *dp;
228 	register int i;
229 	struct swdevt *swp;
230 	int base, residual;
231 
232 	for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) {
233 		dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp;
234 		dp->b_flags = B_HEAD;
235 	}
236 	base = bufpages / nbuf;
237 	residual = bufpages % nbuf;
238 	for (i = 0; i < nbuf; i++) {
239 		bp = &buf[i];
240 		bp->b_dev = NODEV;
241 		bp->b_bcount = 0;
242 		bp->b_un.b_addr = buffers + i * MAXBSIZE;
243 		if (i < residual)
244 			bp->b_bufsize = (base + 1) * CLBYTES;
245 		else
246 			bp->b_bufsize = base * CLBYTES;
247 		binshash(bp, &bfreelist[BQ_AGE]);
248 		bp->b_flags = B_BUSY|B_INVAL;
249 		brelse(bp);
250 	}
251 	/*
252 	 * Count swap devices, and adjust total swap space available.
253 	 * Some of this space will not be available until a vswapon()
254 	 * system is issued, usually when the system goes multi-user.
255 	 */
256 	nswdev = 0;
257 	nswap = 0;
258 	for (swp = swdevt; swp->sw_dev; swp++) {
259 		nswdev++;
260 		if (swp->sw_nblks > nswap)
261 			nswap = swp->sw_nblks;
262 	}
263 	if (nswdev == 0)
264 		panic("binit");
265 	if (nswdev > 1)
266 		nswap = ((nswap + dmmax - 1) / dmmax) * dmmax;
267 	nswap *= nswdev;
268 	/*
269 	 * If there are multiple swap areas,
270 	 * allow more paging operations per second.
271 	 */
272 	if (nswdev > 1)
273 		maxpgio = (maxpgio * (2 * nswdev - 1)) / 2;
274 	swfree(0);
275 }
276 
277 /*
278  * Initialize linked list of free swap
279  * headers. These do not actually point
280  * to buffers, but rather to pages that
281  * are being swapped in and out.
282  */
283 bswinit()
284 {
285 	register int i;
286 	register struct buf *sp = swbuf;
287 
288 	bswlist.av_forw = sp;
289 	for (i=0; i<nswbuf-1; i++, sp++)
290 		sp->av_forw = sp+1;
291 	sp->av_forw = NULL;
292 }
293 
294 /*
295  * Initialize clist by freeing all character blocks, then count
296  * number of character devices. (Once-only routine)
297  */
298 cinit()
299 {
300 	register int ccp;
301 	register struct cblock *cp;
302 
303 	ccp = (int)cfree;
304 	ccp = (ccp+CROUND) & ~CROUND;
305 	for(cp=(struct cblock *)ccp; cp < &cfree[nclist-1]; cp++) {
306 		cp->c_next = cfreelist;
307 		cfreelist = cp;
308 		cfreecount += CBSIZE;
309 	}
310 }
311