1 /* init_main.c 6.9 85/05/27 */ 2 3 #include "../machine/pte.h" 4 5 #include "param.h" 6 #include "systm.h" 7 #include "dir.h" 8 #include "user.h" 9 #include "kernel.h" 10 #include "fs.h" 11 #include "mount.h" 12 #include "map.h" 13 #include "proc.h" 14 #include "inode.h" 15 #include "seg.h" 16 #include "conf.h" 17 #include "buf.h" 18 #include "vm.h" 19 #include "cmap.h" 20 #include "text.h" 21 #include "clist.h" 22 #ifdef INET 23 #include "protosw.h" 24 #endif 25 #include "quota.h" 26 #include "../machine/reg.h" 27 #include "../machine/cpu.h" 28 29 int cmask = CMASK; 30 /* 31 * Initialization code. 32 * Called from cold start routine as 33 * soon as a stack and segmentation 34 * have been established. 35 * Functions: 36 * clear and free user core 37 * turn on clock 38 * hand craft 0th process 39 * call all initialization routines 40 * fork - process 0 to schedule 41 * - process 1 execute bootstrap 42 * - process 2 to page out 43 */ 44 main(firstaddr) 45 int firstaddr; 46 { 47 register int i; 48 register struct proc *p; 49 struct fs *fs; 50 int s; 51 52 rqinit(); 53 #include "loop.h" 54 startup(firstaddr); 55 56 /* 57 * set up system process 0 (swapper) 58 */ 59 p = &proc[0]; 60 p->p_p0br = u.u_pcb.pcb_p0br; 61 p->p_szpt = 1; 62 p->p_addr = uaddr(p); 63 p->p_stat = SRUN; 64 p->p_flag |= SLOAD|SSYS; 65 p->p_nice = NZERO; 66 setredzone(p->p_addr, (caddr_t)&u); 67 u.u_procp = p; 68 #ifdef vax 69 /* 70 * These assume that the u. area is always mapped 71 * to the same virtual address. Otherwise must be 72 * handled when copying the u. area in newproc(). 73 */ 74 u.u_nd.ni_iov = &u.u_nd.ni_iovec; 75 u.u_ap = u.u_arg; 76 #endif 77 u.u_nd.ni_iovcnt = 1; 78 u.u_cmask = cmask; 79 u.u_lastfile = -1; 80 for (i = 1; i < NGROUPS; i++) 81 u.u_groups[i] = NOGROUP; 82 for (i = 0; i < sizeof(u.u_rlimit)/sizeof(u.u_rlimit[0]); i++) 83 u.u_rlimit[i].rlim_cur = u.u_rlimit[i].rlim_max = 84 RLIM_INFINITY; 85 /* 86 * Virtual memory limits get set in vminit(). 87 */ 88 vminit(); 89 #if defined(QUOTA) 90 qtinit(); 91 p->p_quota = u.u_quota = getquota(0, 0, Q_NDQ); 92 #endif 93 startrtclock(); 94 #include "kg.h" 95 #if NKG > 0 96 startkgclock(); 97 #endif 98 99 /* 100 * Initialize tables, protocols, and set up well-known inodes. 101 */ 102 mbinit(); 103 cinit(); 104 #ifdef INET 105 #if NLOOP > 0 106 loattach(); /* XXX */ 107 #endif 108 /* 109 * Block reception of incoming packets 110 * until protocols have been initialized. 111 */ 112 s = splimp(); 113 ifinit(); 114 #endif 115 domaininit(); 116 #ifdef INET 117 splx(s); 118 #endif 119 pqinit(); 120 ihinit(); 121 bhinit(); 122 binit(); 123 bswinit(); 124 nchinit(); 125 #ifdef GPROF 126 kmstartup(); 127 #endif 128 129 fs = mountfs(rootdev, 0, (struct inode *)0); 130 if (fs == 0) 131 panic("iinit"); 132 bcopy("/", fs->fs_fsmnt, 2); 133 134 inittodr(fs->fs_time); 135 boottime = time; 136 137 /* kick off timeout driven events by calling first time */ 138 roundrobin(); 139 schedcpu(); 140 schedpaging(); 141 142 /* set up the root file system */ 143 rootdir = iget(rootdev, fs, (ino_t)ROOTINO); 144 iunlock(rootdir); 145 u.u_cdir = iget(rootdev, fs, (ino_t)ROOTINO); 146 iunlock(u.u_cdir); 147 u.u_rdir = NULL; 148 149 u.u_dmap = zdmap; 150 u.u_smap = zdmap; 151 152 /* 153 * make init process 154 */ 155 156 proc[0].p_szpt = CLSIZE; 157 if (newproc(0)) { 158 expand(clrnd((int)btoc(szicode)), 0); 159 (void) swpexpand(u.u_dsize, 0, &u.u_dmap, &u.u_smap); 160 (void) copyout((caddr_t)icode, (caddr_t)0, (unsigned)szicode); 161 /* 162 * Return goes to loc. 0 of user init 163 * code just copied out. 164 */ 165 return; 166 } 167 /* 168 * make page-out daemon (process 2) 169 * the daemon has ctopt(nswbuf*CLSIZE*KLMAX) pages of page 170 * table so that it can map dirty pages into 171 * its address space during asychronous pushes. 172 */ 173 proc[0].p_szpt = clrnd(ctopt(nswbuf*CLSIZE*KLMAX + UPAGES)); 174 if (newproc(0)) { 175 proc[2].p_flag |= SLOAD|SSYS; 176 proc[2].p_dsize = u.u_dsize = nswbuf*CLSIZE*KLMAX; 177 pageout(); 178 /*NOTREACHED*/ 179 } 180 181 /* 182 * enter scheduling loop 183 */ 184 proc[0].p_szpt = 1; 185 sched(); 186 } 187 188 /* 189 * Initialize hash links for buffers. 190 */ 191 bhinit() 192 { 193 register int i; 194 register struct bufhd *bp; 195 196 for (bp = bufhash, i = 0; i < BUFHSZ; i++, bp++) 197 bp->b_forw = bp->b_back = (struct buf *)bp; 198 } 199 200 /* 201 * Initialize the buffer I/O system by freeing 202 * all buffers and setting all device buffer lists to empty. 203 */ 204 binit() 205 { 206 register struct buf *bp, *dp; 207 register int i; 208 struct swdevt *swp; 209 int base, residual; 210 211 for (dp = bfreelist; dp < &bfreelist[BQUEUES]; dp++) { 212 dp->b_forw = dp->b_back = dp->av_forw = dp->av_back = dp; 213 dp->b_flags = B_HEAD; 214 } 215 base = bufpages / nbuf; 216 residual = bufpages % nbuf; 217 for (i = 0; i < nbuf; i++) { 218 bp = &buf[i]; 219 bp->b_dev = NODEV; 220 bp->b_bcount = 0; 221 bp->b_un.b_addr = buffers + i * MAXBSIZE; 222 if (i < residual) 223 bp->b_bufsize = (base + 1) * CLBYTES; 224 else 225 bp->b_bufsize = base * CLBYTES; 226 binshash(bp, &bfreelist[BQ_AGE]); 227 bp->b_flags = B_BUSY|B_INVAL; 228 brelse(bp); 229 } 230 /* 231 * Count swap devices, and adjust total swap space available. 232 * Some of this space will not be available until a vswapon() 233 * system is issued, usually when the system goes multi-user. 234 */ 235 nswdev = 0; 236 nswap = 0; 237 for (swp = swdevt; swp->sw_dev; swp++) { 238 nswdev++; 239 if (swp->sw_nblks > nswap) 240 nswap = swp->sw_nblks; 241 } 242 if (nswdev == 0) 243 panic("binit"); 244 if (nswdev > 1) 245 nswap = ((nswap + dmmax - 1) / dmmax) * dmmax; 246 nswap *= nswdev; 247 maxpgio *= nswdev; 248 swfree(0); 249 } 250 251 /* 252 * Initialize linked list of free swap 253 * headers. These do not actually point 254 * to buffers, but rather to pages that 255 * are being swapped in and out. 256 */ 257 bswinit() 258 { 259 register int i; 260 register struct buf *sp = swbuf; 261 262 bswlist.av_forw = sp; 263 for (i=0; i<nswbuf-1; i++, sp++) 264 sp->av_forw = sp+1; 265 sp->av_forw = NULL; 266 } 267 268 /* 269 * Initialize clist by freeing all character blocks, then count 270 * number of character devices. (Once-only routine) 271 */ 272 cinit() 273 { 274 register int ccp; 275 register struct cblock *cp; 276 277 ccp = (int)cfree; 278 ccp = (ccp+CROUND) & ~CROUND; 279 for(cp=(struct cblock *)ccp; cp < &cfree[nclist-1]; cp++) { 280 cp->c_next = cfreelist; 281 cfreelist = cp; 282 cfreecount += CBSIZE; 283 } 284 } 285