1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)init_main.c 7.2 (Berkeley) 11/03/86 7 */ 8 9 #include "../machine/pte.h" 10 11 #include "param.h" 12 #include "systm.h" 13 #include "dir.h" 14 #include "user.h" 15 #include "kernel.h" 16 #include "fs.h" 17 #include "mount.h" 18 #include "map.h" 19 #include "proc.h" 20 #include "inode.h" 21 #include "seg.h" 22 #include "conf.h" 23 #include "buf.h" 24 #include "vm.h" 25 #include "cmap.h" 26 #include "text.h" 27 #include "clist.h" 28 #include "protosw.h" 29 #include "quota.h" 30 #include "../machine/reg.h" 31 #include "../machine/cpu.h" 32 33 int cmask = CMASK; 34 /* 35 * Initialization code. 36 * Called from cold start routine as 37 * soon as a stack and segmentation 38 * have been established. 39 * Functions: 40 * clear and free user core 41 * turn on clock 42 * hand craft 0th process 43 * call all initialization routines 44 * fork - process 0 to schedule 45 * - process 1 execute bootstrap 46 * - process 2 to page out 47 */ 48 main(firstaddr) 49 int firstaddr; 50 { 51 register int i; 52 register struct proc *p; 53 struct fs *fs; 54 int s; 55 56 rqinit(); 57 #include "loop.h" 58 startup(firstaddr); 59 60 /* 61 * set up system process 0 (swapper) 62 */ 63 p = &proc[0]; 64 #if defined(tahoe) 65 #ifndef lint 66 #define initkey(which, p, index) \ 67 which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \ 68 p->p_/**/which = index; 69 initkey(ckey, p, MAXCKEY); 70 initkey(dkey, p, MAXDKEY); 71 #endif 72 #endif 73 p->p_p0br = u.u_pcb.pcb_p0br; 74 p->p_szpt = 1; 75 p->p_addr = uaddr(p); 76 p->p_stat = SRUN; 77 p->p_flag |= SLOAD|SSYS; 78 p->p_nice = NZERO; 79 setredzone(p->p_addr, (caddr_t)&u); 80 u.u_procp = p; 81 /* 82 * These assume that the u. area is always mapped 83 * to the same virtual address. Otherwise must be 84 * handled when copying the u. area in newproc(). 85 */ 86 u.u_nd.ni_iov = &u.u_nd.ni_iovec; 87 u.u_ap = u.u_arg; 88 u.u_nd.ni_iovcnt = 1; 89 90 u.u_cmask = cmask; 91 u.u_lastfile = -1; 92 for (i = 1; i < NGROUPS; i++) 93 u.u_groups[i] = NOGROUP; 94 for (i = 0; i < sizeof(u.u_rlimit)/sizeof(u.u_rlimit[0]); i++) 95 u.u_rlimit[i].rlim_cur = u.u_rlimit[i].rlim_max = 96 RLIM_INFINITY; 97 /* 98 * configure virtual memory system, 99 * set vm rlimits 100 */ 101 vminit(); 102 103 #if defined(QUOTA) 104 qtinit(); 105 p->p_quota = u.u_quota = getquota(0, 0, Q_NDQ); 106 #endif 107 #if defined(vax) 108 startrtclock(); 109 #include "kg.h" 110 #if NKG > 0 111 startkgclock(); 112 #endif 113 #endif 114 115 /* 116 * Initialize tables, protocols, and set up well-known inodes. 117 */ 118 mbinit(); 119 cinit(); 120 #include "sl.h" 121 #if NSL > 0 122 slattach(); /* XXX */ 123 #endif 124 #if NLOOP > 0 125 loattach(); /* XXX */ 126 #endif 127 /* 128 * Block reception of incoming packets 129 * until protocols have been initialized. 130 */ 131 s = splimp(); 132 ifinit(); 133 domaininit(); 134 splx(s); 135 pqinit(); 136 xinit(); 137 ihinit(); 138 bhinit(); 139 binit(); 140 bswinit(); 141 nchinit(); 142 #ifdef GPROF 143 kmstartup(); 144 #endif 145 146 fs = mountfs(rootdev, 0, (struct inode *)0); 147 if (fs == 0) 148 panic("iinit"); 149 bcopy("/", fs->fs_fsmnt, 2); 150 151 inittodr(fs->fs_time); 152 boottime = time; 153 154 /* kick off timeout driven events by calling first time */ 155 roundrobin(); 156 schedcpu(); 157 schedpaging(); 158 159 /* set up the root file system */ 160 rootdir = iget(rootdev, fs, (ino_t)ROOTINO); 161 iunlock(rootdir); 162 u.u_cdir = iget(rootdev, fs, (ino_t)ROOTINO); 163 iunlock(u.u_cdir); 164 u.u_rdir = NULL; 165 166 u.u_dmap = zdmap; 167 u.u_smap = zdmap; 168 169 #if defined(tahoe) 170 clk_enable = 1; /* enable clock interrupt */ 171 #endif 172 /* 173 * make init process 174 */ 175 176 proc[0].p_szpt = CLSIZE; 177 if (newproc(0)) { 178 expand(clrnd((int)btoc(szicode)), 0); 179 (void) swpexpand(u.u_dsize, (size_t)0, &u.u_dmap, &u.u_smap); 180 (void) copyout((caddr_t)icode, (caddr_t)0, (unsigned)szicode); 181 /* 182 * Return goes to loc. 0 of user init 183 * code just copied out. 184 */ 185 return; 186 } 187 /* 188 * make page-out daemon (process 2) 189 * the daemon has ctopt(nswbuf*CLSIZE*KLMAX) pages of page 190 * table so that it can map dirty pages into 191 * its address space during asychronous pushes. 192 */ 193 proc[0].p_szpt = clrnd(ctopt(nswbuf*CLSIZE*KLMAX + UPAGES)); 194 if (newproc(0)) { 195 proc[2].p_flag |= SLOAD|SSYS; 196 proc[2].p_dsize = u.u_dsize = nswbuf*CLSIZE*KLMAX; 197 pageout(); 198 /*NOTREACHED*/ 199 } 200 201 /* 202 * enter scheduling loop 203 */ 204 proc[0].p_szpt = 1; 205 sched(); 206 } 207 208 /* 209 * Initialize hash links for buffers. 210 */ 211 bhinit() 212 { 213 register int i; 214 register struct bufhd *bp; 215 216 for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++) 217 bp->b_forw = bp->b_back = (struct buf *)bp; 218 } 219 220 /* 221 * Initialize the buffer I/O system by freeing 222 * all buffers and setting all device buffer lists to empty. 223 */ 224 binit() 225 { 226 register struct buf *bp, *dp; 227 register int i; 228 struct swdevt *swp; 229 int base, residual; 230 231 for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 232 dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 233 dp->b_flags = B_HEAD; 234 } 235 base = bufpages / nbuf; 236 residual = bufpages % nbuf; 237 for (i = 0; i < nbuf; i++) { 238 bp = &buf[i]; 239 bp->b_dev = NODEV; 240 bp->b_bcount = 0; 241 bp->b_un.b_addr = buffers + i * MAXBSIZE; 242 if (i < residual) 243 bp->b_bufsize = (base + 1) * CLBYTES; 244 else 245 bp->b_bufsize = base * CLBYTES; 246 binshash(bp, &bfreelist[BQ_AGE]); 247 bp->b_flags = B_BUSY|B_INVAL; 248 brelse(bp); 249 } 250 /* 251 * Count swap devices, and adjust total swap space available. 252 * Some of this space will not be available until a vswapon() 253 * system is issued, usually when the system goes multi-user. 254 */ 255 nswdev = 0; 256 nswap = 0; 257 for (swp = swdevt; swp->sw_dev; swp++) { 258 nswdev++; 259 if (swp->sw_nblks > nswap) 260 nswap = swp->sw_nblks; 261 } 262 if (nswdev == 0) 263 panic("binit"); 264 if (nswdev > 1) 265 nswap = ((nswap + dmmax - 1) / dmmax) * dmmax; 266 nswap *= nswdev; 267 /* 268 * If there are multiple swap areas, 269 * allow more paging operations per second. 270 */ 271 if (nswdev > 1) 272 maxpgio = (maxpgio * (2 * nswdev - 1)) / 2; 273 swfree(0); 274 } 275 276 /* 277 * Initialize linked list of free swap 278 * headers. These do not actually point 279 * to buffers, but rather to pages that 280 * are being swapped in and out. 281 */ 282 bswinit() 283 { 284 register int i; 285 register struct buf *sp = swbuf; 286 287 bswlist.av_forw = sp; 288 for (i=0; i<nswbuf-1; i++, sp++) 289 sp->av_forw = sp+1; 290 sp->av_forw = NULL; 291 } 292 293 /* 294 * Initialize clist by freeing all character blocks, then count 295 * number of character devices. (Once-only routine) 296 */ 297 cinit() 298 { 299 register int ccp; 300 register struct cblock *cp; 301 302 ccp = (int)cfree; 303 ccp = (ccp+CROUND) & ~CROUND; 304 for(cp=(struct cblock *)ccp; cp < &cfree[nclist-1]; cp++) { 305 cp->c_next = cfreelist; 306 cfreelist = cp; 307 cfreecount += CBSIZE; 308 } 309 } 310