1 /* kern_synch.c 6.7 85/05/27 */ 2 3 #include "../machine/pte.h" 4 5 #include "param.h" 6 #include "systm.h" 7 #include "dir.h" 8 #include "user.h" 9 #include "proc.h" 10 #include "file.h" 11 #include "inode.h" 12 #include "vm.h" 13 #include "kernel.h" 14 #include "buf.h" 15 16 #ifdef vax 17 #include "../vax/mtpr.h" /* XXX */ 18 #endif 19 /* 20 * Force switch among equal priority processes every 100ms. 21 */ 22 roundrobin() 23 { 24 25 runrun++; 26 aston(); 27 timeout(roundrobin, (caddr_t)0, hz / 10); 28 } 29 30 /* fraction for digital decay to forget 90% of usage in 5*loadav sec */ 31 #define filter(loadav) ((2 * (loadav)) / (2 * (loadav) + 1)) 32 33 double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 34 35 /* 36 * Recompute process priorities, once a second 37 */ 38 schedcpu() 39 { 40 register double ccpu1 = (1.0 - ccpu) / (double)hz; 41 register struct proc *p; 42 register int s, a; 43 float scale = filter(avenrun[0]); 44 45 wakeup((caddr_t)&lbolt); 46 for (p = allproc; p != NULL; p = p->p_nxt) { 47 if (p->p_time != 127) 48 p->p_time++; 49 if (p->p_stat==SSLEEP || p->p_stat==SSTOP) 50 if (p->p_slptime != 127) 51 p->p_slptime++; 52 /* 53 * If the process has slept the entire second, 54 * stop recalculating its priority until it wakes up. 55 */ 56 if (p->p_slptime > 1) { 57 p->p_pctcpu *= ccpu; 58 continue; 59 } 60 /* 61 * p_pctcpu is only for ps. 62 */ 63 p->p_pctcpu = ccpu * p->p_pctcpu + ccpu1 * p->p_cpticks; 64 p->p_cpticks = 0; 65 a = (int) (scale * (p->p_cpu & 0377)) + p->p_nice; 66 if (a < 0) 67 a = 0; 68 if (a > 255) 69 a = 255; 70 p->p_cpu = a; 71 (void) setpri(p); 72 s = splhigh(); /* prevent state changes */ 73 if (p->p_pri >= PUSER) { 74 #define PPQ (128 / NQS) 75 if ((p != u.u_procp || noproc) && 76 p->p_stat == SRUN && 77 (p->p_flag & SLOAD) && 78 (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) { 79 remrq(p); 80 p->p_pri = p->p_usrpri; 81 setrq(p); 82 } else 83 p->p_pri = p->p_usrpri; 84 } 85 splx(s); 86 } 87 vmmeter(); 88 if (runin!=0) { 89 runin = 0; 90 wakeup((caddr_t)&runin); 91 } 92 if (bclnlist != NULL) 93 wakeup((caddr_t)&proc[2]); 94 timeout(schedcpu, (caddr_t)0, hz); 95 } 96 97 /* 98 * Recalculate the priority of a process after it has slept for a while. 99 */ 100 updatepri(p) 101 register struct proc *p; 102 { 103 register int a = p->p_cpu & 0377; 104 float scale = filter(avenrun[0]); 105 106 p->p_slptime--; /* the first time was done in schedcpu */ 107 while (a && --p->p_slptime) 108 a = (int) (scale * a) /* + p->p_nice */; 109 if (a < 0) 110 a = 0; 111 if (a > 255) 112 a = 255; 113 p->p_cpu = a; 114 (void) setpri(p); 115 } 116 117 #define SQSIZE 0100 /* Must be power of 2 */ 118 #define HASH(x) (( (int) x >> 5) & (SQSIZE-1)) 119 struct slpque { 120 struct proc *sq_head; 121 struct proc **sq_tailp; 122 } slpque[SQSIZE]; 123 124 /* 125 * Give up the processor till a wakeup occurs 126 * on chan, at which time the process 127 * enters the scheduling queue at priority pri. 128 * The most important effect of pri is that when 129 * pri<=PZERO a signal cannot disturb the sleep; 130 * if pri>PZERO signals will be processed. 131 * Callers of this routine must be prepared for 132 * premature return, and check that the reason for 133 * sleeping has gone away. 134 */ 135 sleep(chan, pri) 136 caddr_t chan; 137 int pri; 138 { 139 register struct proc *rp; 140 register struct slpque *qp; 141 register s; 142 143 rp = u.u_procp; 144 s = splhigh(); 145 if (panicstr) { 146 /* 147 * After a panic, just give interrupts a chance, 148 * then just return; don't run any other procs 149 * or panic below, in case this is the idle process 150 * and already asleep. 151 * The splnet should be spl0 if the network was being used 152 * by the filesystem, but for now avoid network interrupts 153 * that might cause another panic. 154 */ 155 (void) splnet(); 156 splx(s); 157 return; 158 } 159 if (chan==0 || rp->p_stat != SRUN || rp->p_rlink) 160 panic("sleep"); 161 rp->p_wchan = chan; 162 rp->p_slptime = 0; 163 rp->p_pri = pri; 164 qp = &slpque[HASH(chan)]; 165 if (qp->sq_head == 0) 166 qp->sq_head = rp; 167 else 168 *qp->sq_tailp = rp; 169 *(qp->sq_tailp = &rp->p_link) = 0; 170 if (pri > PZERO) { 171 if (ISSIG(rp)) { 172 if (rp->p_wchan) 173 unsleep(rp); 174 rp->p_stat = SRUN; 175 (void) spl0(); 176 goto psig; 177 } 178 if (rp->p_wchan == 0) 179 goto out; 180 rp->p_stat = SSLEEP; 181 (void) spl0(); 182 u.u_ru.ru_nvcsw++; 183 swtch(); 184 if (ISSIG(rp)) 185 goto psig; 186 } else { 187 rp->p_stat = SSLEEP; 188 (void) spl0(); 189 u.u_ru.ru_nvcsw++; 190 swtch(); 191 } 192 curpri = rp->p_usrpri; 193 out: 194 splx(s); 195 return; 196 197 /* 198 * If priority was low (>PZERO) and 199 * there has been a signal, execute non-local goto through 200 * u.u_qsave, aborting the system call in progress (see trap.c) 201 */ 202 psig: 203 longjmp(&u.u_qsave); 204 /*NOTREACHED*/ 205 } 206 207 /* 208 * Remove a process from its wait queue 209 */ 210 unsleep(p) 211 register struct proc *p; 212 { 213 register struct slpque *qp; 214 register struct proc **hp; 215 int s; 216 217 s = splhigh(); 218 if (p->p_wchan) { 219 hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head; 220 while (*hp != p) 221 hp = &(*hp)->p_link; 222 *hp = p->p_link; 223 if (qp->sq_tailp == &p->p_link) 224 qp->sq_tailp = hp; 225 p->p_wchan = 0; 226 } 227 splx(s); 228 } 229 230 /* 231 * Wake up all processes sleeping on chan. 232 */ 233 wakeup(chan) 234 register caddr_t chan; 235 { 236 register struct slpque *qp; 237 register struct proc *p, **q; 238 int s; 239 240 s = splhigh(); 241 qp = &slpque[HASH(chan)]; 242 restart: 243 for (q = &qp->sq_head; p = *q; ) { 244 if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP) 245 panic("wakeup"); 246 if (p->p_wchan==chan) { 247 p->p_wchan = 0; 248 *q = p->p_link; 249 if (p->p_slptime > 1) 250 updatepri(p); 251 if (qp->sq_tailp == &p->p_link) 252 qp->sq_tailp = q; 253 p->p_slptime = 0; 254 if (p->p_stat == SSLEEP) { 255 /* OPTIMIZED INLINE EXPANSION OF setrun(p) */ 256 p->p_stat = SRUN; 257 if (p->p_flag & SLOAD) 258 setrq(p); 259 /* 260 * Since curpri is a usrpri, 261 * p->p_pri is always better than curpri. 262 */ 263 runrun++; 264 aston(); 265 if ((p->p_flag&SLOAD) == 0) { 266 if (runout != 0) { 267 runout = 0; 268 wakeup((caddr_t)&runout); 269 } 270 wantin++; 271 } 272 /* END INLINE EXPANSION */ 273 goto restart; 274 } 275 } else 276 q = &p->p_link; 277 } 278 splx(s); 279 } 280 281 /* 282 * Initialize the (doubly-linked) run queues 283 * to be empty. 284 */ 285 rqinit() 286 { 287 register int i; 288 289 for (i = 0; i < NQS; i++) 290 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 291 } 292 293 /* 294 * Set the process running; 295 * arrange for it to be swapped in if necessary. 296 */ 297 setrun(p) 298 register struct proc *p; 299 { 300 register int s; 301 302 s = splhigh(); 303 switch (p->p_stat) { 304 305 case 0: 306 case SWAIT: 307 case SRUN: 308 case SZOMB: 309 default: 310 panic("setrun"); 311 312 case SSTOP: 313 case SSLEEP: 314 unsleep(p); /* e.g. when sending signals */ 315 break; 316 317 case SIDL: 318 break; 319 } 320 if (p->p_slptime > 1) 321 updatepri(p); 322 p->p_stat = SRUN; 323 if (p->p_flag & SLOAD) 324 setrq(p); 325 splx(s); 326 if (p->p_pri < curpri) { 327 runrun++; 328 aston(); 329 } 330 if ((p->p_flag&SLOAD) == 0) { 331 if (runout != 0) { 332 runout = 0; 333 wakeup((caddr_t)&runout); 334 } 335 wantin++; 336 } 337 } 338 339 /* 340 * Set user priority. 341 * The rescheduling flag (runrun) 342 * is set if the priority is better 343 * than the currently running process. 344 */ 345 setpri(pp) 346 register struct proc *pp; 347 { 348 register int p; 349 350 p = (pp->p_cpu & 0377)/4; 351 p += PUSER + 2 * pp->p_nice; 352 if (pp->p_rssize > pp->p_maxrss && freemem < desfree) 353 p += 2*4; /* effectively, nice(4) */ 354 if (p > 127) 355 p = 127; 356 if (p < curpri) { 357 runrun++; 358 aston(); 359 } 360 pp->p_usrpri = p; 361 return (p); 362 } 363