1 /* $NetBSD: kern_synch.c,v 1.50 1998/03/01 02:22:30 fvdl Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 41 */ 42 43 #include "opt_uvm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/buf.h> 50 #include <sys/signalvar.h> 51 #include <sys/resourcevar.h> 52 #include <vm/vm.h> 53 54 #if defined(UVM) 55 #include <uvm/uvm_extern.h> 56 #endif 57 58 #ifdef KTRACE 59 #include <sys/ktrace.h> 60 #endif 61 62 #include <machine/cpu.h> 63 64 u_char curpriority; /* usrpri of curproc */ 65 int lbolt; /* once a second sleep address */ 66 67 void roundrobin __P((void *)); 68 void schedcpu __P((void *)); 69 void updatepri __P((struct proc *)); 70 void endtsleep __P((void *)); 71 72 /* 73 * Force switch among equal priority processes every 100ms. 74 */ 75 /* ARGSUSED */ 76 void 77 roundrobin(arg) 78 void *arg; 79 { 80 81 need_resched(); 82 timeout(roundrobin, NULL, hz / 10); 83 } 84 85 /* 86 * Constants for digital decay and forget: 87 * 90% of (p_estcpu) usage in 5 * loadav time 88 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 89 * Note that, as ps(1) mentions, this can let percentages 90 * total over 100% (I've seen 137.9% for 3 processes). 91 * 92 * Note that hardclock updates p_estcpu and p_cpticks independently. 93 * 94 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 95 * That is, the system wants to compute a value of decay such 96 * that the following for loop: 97 * for (i = 0; i < (5 * loadavg); i++) 98 * p_estcpu *= decay; 99 * will compute 100 * p_estcpu *= 0.1; 101 * for all values of loadavg: 102 * 103 * Mathematically this loop can be expressed by saying: 104 * decay ** (5 * loadavg) ~= .1 105 * 106 * The system computes decay as: 107 * decay = (2 * loadavg) / (2 * loadavg + 1) 108 * 109 * We wish to prove that the system's computation of decay 110 * will always fulfill the equation: 111 * decay ** (5 * loadavg) ~= .1 112 * 113 * If we compute b as: 114 * b = 2 * loadavg 115 * then 116 * decay = b / (b + 1) 117 * 118 * We now need to prove two things: 119 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 120 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 121 * 122 * Facts: 123 * For x close to zero, exp(x) =~ 1 + x, since 124 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 125 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 126 * For x close to zero, ln(1+x) =~ x, since 127 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 128 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 129 * ln(.1) =~ -2.30 130 * 131 * Proof of (1): 132 * Solve (factor)**(power) =~ .1 given power (5*loadav): 133 * solving for factor, 134 * ln(factor) =~ (-2.30/5*loadav), or 135 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 136 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 137 * 138 * Proof of (2): 139 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 140 * solving for power, 141 * power*ln(b/(b+1)) =~ -2.30, or 142 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 143 * 144 * Actual power values for the implemented algorithm are as follows: 145 * loadav: 1 2 3 4 146 * power: 5.68 10.32 14.94 19.55 147 */ 148 149 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 150 #define loadfactor(loadav) (2 * (loadav)) 151 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 152 153 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 154 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 155 156 /* 157 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 158 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 159 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 160 * 161 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 162 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 163 * 164 * If you dont want to bother with the faster/more-accurate formula, you 165 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 166 * (more general) method of calculating the %age of CPU used by a process. 167 */ 168 #define CCPU_SHIFT 11 169 170 /* 171 * Recompute process priorities, every hz ticks. 172 */ 173 /* ARGSUSED */ 174 void 175 schedcpu(arg) 176 void *arg; 177 { 178 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 179 register struct proc *p; 180 register int s; 181 register unsigned int newcpu; 182 183 wakeup((caddr_t)&lbolt); 184 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 185 /* 186 * Increment time in/out of memory and sleep time 187 * (if sleeping). We ignore overflow; with 16-bit int's 188 * (remember them?) overflow takes 45 days. 189 */ 190 p->p_swtime++; 191 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 192 p->p_slptime++; 193 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 194 /* 195 * If the process has slept the entire second, 196 * stop recalculating its priority until it wakes up. 197 */ 198 if (p->p_slptime > 1) 199 continue; 200 s = splstatclock(); /* prevent state changes */ 201 /* 202 * p_pctcpu is only for ps. 203 */ 204 #if (FSHIFT >= CCPU_SHIFT) 205 p->p_pctcpu += (hz == 100)? 206 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 207 100 * (((fixpt_t) p->p_cpticks) 208 << (FSHIFT - CCPU_SHIFT)) / hz; 209 #else 210 p->p_pctcpu += ((FSCALE - ccpu) * 211 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 212 #endif 213 p->p_cpticks = 0; 214 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu) 215 + p->p_nice - NZERO; 216 p->p_estcpu = min(newcpu, UCHAR_MAX); 217 resetpriority(p); 218 if (p->p_priority >= PUSER) { 219 #define PPQ (128 / NQS) /* priorities per queue */ 220 if ((p != curproc) && 221 p->p_stat == SRUN && 222 (p->p_flag & P_INMEM) && 223 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 224 remrunqueue(p); 225 p->p_priority = p->p_usrpri; 226 setrunqueue(p); 227 } else 228 p->p_priority = p->p_usrpri; 229 } 230 splx(s); 231 } 232 #if defined(UVM) 233 uvm_meter(); 234 #else 235 vmmeter(); 236 #endif 237 timeout(schedcpu, (void *)0, hz); 238 } 239 240 /* 241 * Recalculate the priority of a process after it has slept for a while. 242 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 243 * least six times the loadfactor will decay p_estcpu to zero. 244 */ 245 void 246 updatepri(p) 247 register struct proc *p; 248 { 249 register unsigned int newcpu = p->p_estcpu; 250 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 251 252 if (p->p_slptime > 5 * loadfac) 253 p->p_estcpu = 0; 254 else { 255 p->p_slptime--; /* the first time was done in schedcpu */ 256 while (newcpu && --p->p_slptime) 257 newcpu = (int) decay_cpu(loadfac, newcpu); 258 p->p_estcpu = min(newcpu, UCHAR_MAX); 259 } 260 resetpriority(p); 261 } 262 263 /* 264 * We're only looking at 7 bits of the address; everything is 265 * aligned to 4, lots of things are aligned to greater powers 266 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 267 */ 268 #define TABLESIZE 128 269 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 270 struct slpque { 271 struct proc *sq_head; 272 struct proc **sq_tailp; 273 } slpque[TABLESIZE]; 274 275 /* 276 * During autoconfiguration or after a panic, a sleep will simply 277 * lower the priority briefly to allow interrupts, then return. 278 * The priority to be used (safepri) is machine-dependent, thus this 279 * value is initialized and maintained in the machine-dependent layers. 280 * This priority will typically be 0, or the lowest priority 281 * that is safe for use on the interrupt stack; it can be made 282 * higher to block network software interrupts after panics. 283 */ 284 int safepri; 285 286 /* 287 * General sleep call. Suspends the current process until a wakeup is 288 * performed on the specified identifier. The process will then be made 289 * runnable with the specified priority. Sleeps at most timo/hz seconds 290 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 291 * before and after sleeping, else signals are not checked. Returns 0 if 292 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 293 * signal needs to be delivered, ERESTART is returned if the current system 294 * call should be restarted if possible, and EINTR is returned if the system 295 * call should be interrupted by the signal (return EINTR). 296 */ 297 int 298 tsleep(ident, priority, wmesg, timo) 299 void *ident; 300 int priority, timo; 301 const char *wmesg; 302 { 303 register struct proc *p = curproc; 304 register struct slpque *qp; 305 register int s; 306 int sig, catch = priority & PCATCH; 307 extern int cold; 308 void endtsleep __P((void *)); 309 310 if (cold || panicstr) { 311 /* 312 * After a panic, or during autoconfiguration, 313 * just give interrupts a chance, then just return; 314 * don't run any other procs or panic below, 315 * in case this is the idle process and already asleep. 316 */ 317 s = splhigh(); 318 splx(safepri); 319 splx(s); 320 return (0); 321 } 322 323 #ifdef KTRACE 324 if (KTRPOINT(p, KTR_CSW)) 325 ktrcsw(p->p_tracep, 1, 0); 326 #endif 327 s = splhigh(); 328 329 #ifdef DIAGNOSTIC 330 if (ident == NULL || p->p_stat != SRUN || p->p_back) 331 panic("tsleep"); 332 #endif 333 p->p_wchan = ident; 334 p->p_wmesg = wmesg; 335 p->p_slptime = 0; 336 p->p_priority = priority & PRIMASK; 337 qp = &slpque[LOOKUP(ident)]; 338 if (qp->sq_head == 0) 339 qp->sq_head = p; 340 else 341 *qp->sq_tailp = p; 342 *(qp->sq_tailp = &p->p_forw) = 0; 343 if (timo) 344 timeout(endtsleep, (void *)p, timo); 345 /* 346 * We put ourselves on the sleep queue and start our timeout 347 * before calling CURSIG, as we could stop there, and a wakeup 348 * or a SIGCONT (or both) could occur while we were stopped. 349 * A SIGCONT would cause us to be marked as SSLEEP 350 * without resuming us, thus we must be ready for sleep 351 * when CURSIG is called. If the wakeup happens while we're 352 * stopped, p->p_wchan will be 0 upon return from CURSIG. 353 */ 354 if (catch) { 355 p->p_flag |= P_SINTR; 356 if ((sig = CURSIG(p)) != 0) { 357 if (p->p_wchan) 358 unsleep(p); 359 p->p_stat = SRUN; 360 goto resume; 361 } 362 if (p->p_wchan == 0) { 363 catch = 0; 364 goto resume; 365 } 366 } else 367 sig = 0; 368 p->p_stat = SSLEEP; 369 p->p_stats->p_ru.ru_nvcsw++; 370 mi_switch(); 371 #ifdef DDB 372 /* handy breakpoint location after process "wakes" */ 373 asm(".globl bpendtsleep ; bpendtsleep:"); 374 #endif 375 resume: 376 curpriority = p->p_usrpri; 377 splx(s); 378 p->p_flag &= ~P_SINTR; 379 if (p->p_flag & P_TIMEOUT) { 380 p->p_flag &= ~P_TIMEOUT; 381 if (sig == 0) { 382 #ifdef KTRACE 383 if (KTRPOINT(p, KTR_CSW)) 384 ktrcsw(p->p_tracep, 0, 0); 385 #endif 386 return (EWOULDBLOCK); 387 } 388 } else if (timo) 389 untimeout(endtsleep, (void *)p); 390 if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) { 391 #ifdef KTRACE 392 if (KTRPOINT(p, KTR_CSW)) 393 ktrcsw(p->p_tracep, 0, 0); 394 #endif 395 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 396 return (EINTR); 397 return (ERESTART); 398 } 399 #ifdef KTRACE 400 if (KTRPOINT(p, KTR_CSW)) 401 ktrcsw(p->p_tracep, 0, 0); 402 #endif 403 return (0); 404 } 405 406 /* 407 * Implement timeout for tsleep. 408 * If process hasn't been awakened (wchan non-zero), 409 * set timeout flag and undo the sleep. If proc 410 * is stopped, just unsleep so it will remain stopped. 411 */ 412 void 413 endtsleep(arg) 414 void *arg; 415 { 416 register struct proc *p; 417 int s; 418 419 p = (struct proc *)arg; 420 s = splhigh(); 421 if (p->p_wchan) { 422 if (p->p_stat == SSLEEP) 423 setrunnable(p); 424 else 425 unsleep(p); 426 p->p_flag |= P_TIMEOUT; 427 } 428 splx(s); 429 } 430 431 /* 432 * Short-term, non-interruptable sleep. 433 */ 434 void 435 sleep(ident, priority) 436 void *ident; 437 int priority; 438 { 439 register struct proc *p = curproc; 440 register struct slpque *qp; 441 register int s; 442 extern int cold; 443 444 #ifdef DIAGNOSTIC 445 if (priority > PZERO) { 446 printf("sleep called with priority %d > PZERO, wchan: %p\n", 447 priority, ident); 448 panic("old sleep"); 449 } 450 #endif 451 s = splhigh(); 452 if (cold || panicstr) { 453 /* 454 * After a panic, or during autoconfiguration, 455 * just give interrupts a chance, then just return; 456 * don't run any other procs or panic below, 457 * in case this is the idle process and already asleep. 458 */ 459 splx(safepri); 460 splx(s); 461 return; 462 } 463 #ifdef DIAGNOSTIC 464 if (ident == NULL || p->p_stat != SRUN || p->p_back) 465 panic("sleep"); 466 #endif 467 p->p_wchan = ident; 468 p->p_wmesg = NULL; 469 p->p_slptime = 0; 470 p->p_priority = priority; 471 qp = &slpque[LOOKUP(ident)]; 472 if (qp->sq_head == 0) 473 qp->sq_head = p; 474 else 475 *qp->sq_tailp = p; 476 *(qp->sq_tailp = &p->p_forw) = 0; 477 p->p_stat = SSLEEP; 478 p->p_stats->p_ru.ru_nvcsw++; 479 #ifdef KTRACE 480 if (KTRPOINT(p, KTR_CSW)) 481 ktrcsw(p->p_tracep, 1, 0); 482 #endif 483 mi_switch(); 484 #ifdef DDB 485 /* handy breakpoint location after process "wakes" */ 486 asm(".globl bpendsleep ; bpendsleep:"); 487 #endif 488 #ifdef KTRACE 489 if (KTRPOINT(p, KTR_CSW)) 490 ktrcsw(p->p_tracep, 0, 0); 491 #endif 492 curpriority = p->p_usrpri; 493 splx(s); 494 } 495 496 /* 497 * Remove a process from its wait queue 498 */ 499 void 500 unsleep(p) 501 register struct proc *p; 502 { 503 register struct slpque *qp; 504 register struct proc **hp; 505 int s; 506 507 s = splhigh(); 508 if (p->p_wchan) { 509 hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head; 510 while (*hp != p) 511 hp = &(*hp)->p_forw; 512 *hp = p->p_forw; 513 if (qp->sq_tailp == &p->p_forw) 514 qp->sq_tailp = hp; 515 p->p_wchan = 0; 516 } 517 splx(s); 518 } 519 520 /* 521 * Make all processes sleeping on the specified identifier runnable. 522 */ 523 void 524 wakeup(ident) 525 register void *ident; 526 { 527 register struct slpque *qp; 528 register struct proc *p, **q; 529 int s; 530 531 s = splhigh(); 532 qp = &slpque[LOOKUP(ident)]; 533 restart: 534 for (q = &qp->sq_head; (p = *q) != NULL; ) { 535 #ifdef DIAGNOSTIC 536 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP)) 537 panic("wakeup"); 538 #endif 539 if (p->p_wchan == ident) { 540 p->p_wchan = 0; 541 *q = p->p_forw; 542 if (qp->sq_tailp == &p->p_forw) 543 qp->sq_tailp = q; 544 if (p->p_stat == SSLEEP) { 545 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 546 if (p->p_slptime > 1) 547 updatepri(p); 548 p->p_slptime = 0; 549 p->p_stat = SRUN; 550 if (p->p_flag & P_INMEM) 551 setrunqueue(p); 552 /* 553 * Since curpriority is a user priority, 554 * p->p_priority is always better than 555 * curpriority. 556 */ 557 if ((p->p_flag & P_INMEM) == 0) 558 wakeup((caddr_t)&proc0); 559 else 560 need_resched(); 561 /* END INLINE EXPANSION */ 562 goto restart; 563 } 564 } else 565 q = &p->p_forw; 566 } 567 splx(s); 568 } 569 570 /* 571 * The machine independent parts of mi_switch(). 572 * Must be called at splstatclock() or higher. 573 */ 574 void 575 mi_switch() 576 { 577 register struct proc *p = curproc; /* XXX */ 578 register struct rlimit *rlim; 579 register long s, u; 580 struct timeval tv; 581 582 #ifdef DEBUG 583 if (p->p_simple_locks) 584 panic("sleep: holding simple lock"); 585 #endif 586 /* 587 * Compute the amount of time during which the current 588 * process was running, and add that to its total so far. 589 */ 590 microtime(&tv); 591 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 592 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 593 if (u < 0) { 594 u += 1000000; 595 s--; 596 } else if (u >= 1000000) { 597 u -= 1000000; 598 s++; 599 } 600 p->p_rtime.tv_usec = u; 601 p->p_rtime.tv_sec = s; 602 603 /* 604 * Check if the process exceeds its cpu resource allocation. 605 * If over max, kill it. In any case, if it has run for more 606 * than 10 minutes, reduce priority to give others a chance. 607 */ 608 rlim = &p->p_rlimit[RLIMIT_CPU]; 609 if (s >= rlim->rlim_cur) { 610 if (s >= rlim->rlim_max) 611 psignal(p, SIGKILL); 612 else { 613 psignal(p, SIGXCPU); 614 if (rlim->rlim_cur < rlim->rlim_max) 615 rlim->rlim_cur += 5; 616 } 617 } 618 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid && p->p_nice == NZERO) { 619 p->p_nice = autoniceval + NZERO; 620 resetpriority(p); 621 } 622 623 /* 624 * Pick a new current process and record its start time. 625 */ 626 #if defined(UVM) 627 uvmexp.swtch++; 628 #else 629 cnt.v_swtch++; 630 #endif 631 cpu_switch(p); 632 microtime(&runtime); 633 } 634 635 /* 636 * Initialize the (doubly-linked) run queues 637 * to be empty. 638 */ 639 void 640 rqinit() 641 { 642 register int i; 643 644 for (i = 0; i < NQS; i++) 645 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 646 } 647 648 /* 649 * Change process state to be runnable, 650 * placing it on the run queue if it is in memory, 651 * and awakening the swapper if it isn't in memory. 652 */ 653 void 654 setrunnable(p) 655 register struct proc *p; 656 { 657 register int s; 658 659 s = splhigh(); 660 switch (p->p_stat) { 661 case 0: 662 case SRUN: 663 case SZOMB: 664 default: 665 panic("setrunnable"); 666 case SSTOP: 667 /* 668 * If we're being traced (possibly because someone attached us 669 * while we were stopped), check for a signal from the debugger. 670 */ 671 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) 672 p->p_siglist |= sigmask(p->p_xstat); 673 case SSLEEP: 674 unsleep(p); /* e.g. when sending signals */ 675 break; 676 677 case SIDL: 678 break; 679 } 680 p->p_stat = SRUN; 681 if (p->p_flag & P_INMEM) 682 setrunqueue(p); 683 splx(s); 684 if (p->p_slptime > 1) 685 updatepri(p); 686 p->p_slptime = 0; 687 if ((p->p_flag & P_INMEM) == 0) 688 wakeup((caddr_t)&proc0); 689 else if (p->p_priority < curpriority) 690 need_resched(); 691 } 692 693 /* 694 * Compute the priority of a process when running in user mode. 695 * Arrange to reschedule if the resulting priority is better 696 * than that of the current process. 697 */ 698 void 699 resetpriority(p) 700 register struct proc *p; 701 { 702 register unsigned int newpriority; 703 704 newpriority = PUSER + p->p_estcpu / 4 + 2 * (p->p_nice - NZERO); 705 newpriority = min(newpriority, MAXPRI); 706 p->p_usrpri = newpriority; 707 if (newpriority < curpriority) 708 need_resched(); 709 } 710