123376Smckusick /* 240711Skarels * Copyright (c) 1982, 1986, 1990 Regents of the University of California. 323376Smckusick * All rights reserved. The Berkeley software License Agreement 423376Smckusick * specifies the terms and conditions for redistribution. 523376Smckusick * 6*47544Skarels * @(#)kern_synch.c 7.14 (Berkeley) 03/17/91 723376Smckusick */ 833Sbill 917093Sbloom #include "param.h" 1017093Sbloom #include "systm.h" 1117093Sbloom #include "user.h" 1217093Sbloom #include "proc.h" 1317093Sbloom #include "kernel.h" 1417093Sbloom #include "buf.h" 159756Ssam 16*47544Skarels #include "machine/cpu.h" 1745742Smckusick 188102Sroot /* 198102Sroot * Force switch among equal priority processes every 100ms. 208102Sroot */ 218102Sroot roundrobin() 228102Sroot { 238102Sroot 24*47544Skarels need_resched(); 258624Sroot timeout(roundrobin, (caddr_t)0, hz / 10); 268102Sroot } 278102Sroot 2832908Smckusick /* 2932908Smckusick * constants for digital decay and forget 3032908Smckusick * 90% of (p_cpu) usage in 5*loadav time 3132908Smckusick * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 3232908Smckusick * Note that, as ps(1) mentions, this can let percentages 3332908Smckusick * total over 100% (I've seen 137.9% for 3 processes). 3432908Smckusick * 3532908Smckusick * Note that hardclock updates p_cpu and p_cpticks independently. 3632908Smckusick * 3732908Smckusick * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds. 3832908Smckusick * That is, the system wants to compute a value of decay such 3932908Smckusick * that the following for loop: 4032908Smckusick * for (i = 0; i < (5 * loadavg); i++) 4132908Smckusick * p_cpu *= decay; 4232908Smckusick * will compute 4332908Smckusick * p_cpu *= 0.1; 4432908Smckusick * for all values of loadavg: 4532908Smckusick * 4632908Smckusick * Mathematically this loop can be expressed by saying: 4732908Smckusick * decay ** (5 * loadavg) ~= .1 4832908Smckusick * 4932908Smckusick * The system computes decay as: 5032908Smckusick * decay = (2 * loadavg) / (2 * loadavg + 1) 5132908Smckusick * 5232908Smckusick * We wish to prove that the system's computation of decay 5332908Smckusick * will always fulfill the equation: 5432908Smckusick * decay ** (5 * loadavg) ~= .1 5532908Smckusick * 5632908Smckusick * If we compute b as: 5732908Smckusick * b = 2 * loadavg 5832908Smckusick * then 5932908Smckusick * decay = b / (b + 1) 6032908Smckusick * 6132908Smckusick * We now need to prove two things: 6232908Smckusick * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 6332908Smckusick * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 6432908Smckusick * 6532908Smckusick * Facts: 6632908Smckusick * For x close to zero, exp(x) =~ 1 + x, since 6732908Smckusick * exp(x) = 0! + x**1/1! + x**2/2! + ... . 6832908Smckusick * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 6932908Smckusick * For x close to zero, ln(1+x) =~ x, since 7032908Smckusick * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 7132908Smckusick * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 7232908Smckusick * ln(.1) =~ -2.30 7332908Smckusick * 7432908Smckusick * Proof of (1): 7532908Smckusick * Solve (factor)**(power) =~ .1 given power (5*loadav): 7632908Smckusick * solving for factor, 7732908Smckusick * ln(factor) =~ (-2.30/5*loadav), or 78*47544Skarels * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 7932908Smckusick * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 8032908Smckusick * 8132908Smckusick * Proof of (2): 8232908Smckusick * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 8332908Smckusick * solving for power, 8432908Smckusick * power*ln(b/(b+1)) =~ -2.30, or 8532908Smckusick * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 8632908Smckusick * 8732908Smckusick * Actual power values for the implemented algorithm are as follows: 8832908Smckusick * loadav: 1 2 3 4 8932908Smckusick * power: 5.68 10.32 14.94 19.55 9032908Smckusick */ 9117541Skarels 9238164Smckusick /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 93*47544Skarels #define loadfactor(loadav) (2 * (loadav)) 94*47544Skarels #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 958102Sroot 9638164Smckusick /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 9738164Smckusick fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 9838164Smckusick 998102Sroot /* 10038164Smckusick * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 10138164Smckusick * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 10238164Smckusick * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 10338164Smckusick * 10438164Smckusick * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 10538164Smckusick * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 10638164Smckusick * 10738164Smckusick * If you dont want to bother with the faster/more-accurate formula, you 10838164Smckusick * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 10938164Smckusick * (more general) method of calculating the %age of CPU used by a process. 11038164Smckusick */ 11138164Smckusick #define CCPU_SHIFT 11 11238164Smckusick 11338164Smckusick /* 1148102Sroot * Recompute process priorities, once a second 1158102Sroot */ 1168102Sroot schedcpu() 1178102Sroot { 118*47544Skarels register fixpt_t loadfac = loadfactor(averunnable[0]); 1198102Sroot register struct proc *p; 120*47544Skarels register int s; 121*47544Skarels register unsigned int newcpu; 1228102Sroot 1238102Sroot wakeup((caddr_t)&lbolt); 12416532Skarels for (p = allproc; p != NULL; p = p->p_nxt) { 125*47544Skarels /* 126*47544Skarels * Increment time in/out of memory and sleep time 127*47544Skarels * (if sleeping). We ignore overflow; with 16-bit int's 128*47544Skarels * (remember them?) overflow takes 45 days. 129*47544Skarels */ 130*47544Skarels p->p_time++; 131*47544Skarels if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 132*47544Skarels p->p_slptime++; 13338164Smckusick p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 13417541Skarels /* 13517541Skarels * If the process has slept the entire second, 13617541Skarels * stop recalculating its priority until it wakes up. 13717541Skarels */ 13838164Smckusick if (p->p_slptime > 1) 13917541Skarels continue; 14017541Skarels /* 14117541Skarels * p_pctcpu is only for ps. 14217541Skarels */ 14338164Smckusick #if (FSHIFT >= CCPU_SHIFT) 14438164Smckusick p->p_pctcpu += (hz == 100)? 14538164Smckusick ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 14638164Smckusick 100 * (((fixpt_t) p->p_cpticks) 14738164Smckusick << (FSHIFT - CCPU_SHIFT)) / hz; 14838164Smckusick #else 14938164Smckusick p->p_pctcpu += ((FSCALE - ccpu) * 15038164Smckusick (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 15138164Smckusick #endif 1528102Sroot p->p_cpticks = 0; 153*47544Skarels newcpu = (u_int) decay_cpu(loadfac, p->p_cpu) + p->p_nice; 154*47544Skarels p->p_cpu = min(newcpu, UCHAR_MAX); 155*47544Skarels setpri(p); 15617541Skarels s = splhigh(); /* prevent state changes */ 1578102Sroot if (p->p_pri >= PUSER) { 158*47544Skarels #define PPQ (128 / NQS) /* priorities per queue */ 159*47544Skarels if ((p != curproc || noproc) && 1608102Sroot p->p_stat == SRUN && 1618102Sroot (p->p_flag & SLOAD) && 16216795Skarels (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) { 1638102Sroot remrq(p); 1648102Sroot p->p_pri = p->p_usrpri; 1658102Sroot setrq(p); 1668102Sroot } else 1678102Sroot p->p_pri = p->p_usrpri; 1688102Sroot } 1698102Sroot splx(s); 1708102Sroot } 1718102Sroot vmmeter(); 1728102Sroot if (bclnlist != NULL) 173*47544Skarels wakeup((caddr_t)pageproc); 1748624Sroot timeout(schedcpu, (caddr_t)0, hz); 1758102Sroot } 1768102Sroot 17717541Skarels /* 17817541Skarels * Recalculate the priority of a process after it has slept for a while. 179*47544Skarels * For all load averages >= 1 and max p_cpu of 255, sleeping for at least 180*47544Skarels * six times the loadfactor will decay p_cpu to zero. 18117541Skarels */ 18217541Skarels updatepri(p) 18317541Skarels register struct proc *p; 18417541Skarels { 185*47544Skarels register unsigned int newcpu = p->p_cpu; 186*47544Skarels register fixpt_t loadfac = loadfactor(averunnable[0]); 18717541Skarels 188*47544Skarels if (p->p_slptime > 5 * loadfac) 189*47544Skarels p->p_cpu = 0; 190*47544Skarels else { 191*47544Skarels p->p_slptime--; /* the first time was done in schedcpu */ 192*47544Skarels while (newcpu && --p->p_slptime) 193*47544Skarels newcpu = (int) decay_cpu(loadfac, newcpu); 194*47544Skarels p->p_cpu = min(newcpu, UCHAR_MAX); 195*47544Skarels } 196*47544Skarels setpri(p); 19717541Skarels } 19817541Skarels 19933Sbill #define SQSIZE 0100 /* Must be power of 2 */ 20033Sbill #define HASH(x) (( (int) x >> 5) & (SQSIZE-1)) 20121099Smckusick struct slpque { 20221099Smckusick struct proc *sq_head; 20321099Smckusick struct proc **sq_tailp; 20421099Smckusick } slpque[SQSIZE]; 20533Sbill 20633Sbill /* 20745671Skarels * During autoconfiguration or after a panic, a sleep will simply 20845671Skarels * lower the priority briefly to allow interrupts, then return. 20945671Skarels * The priority to be used (safepri) is machine-dependent, thus this 21045671Skarels * value is initialized and maintained in the machine-dependent layers. 21145671Skarels * This priority will typically be 0, or the lowest priority 21245671Skarels * that is safe for use on the interrupt stack; it can be made 21345671Skarels * higher to block network software interrupts after panics. 21445671Skarels */ 21545671Skarels int safepri; 21645671Skarels 21745671Skarels /* 21840711Skarels * General sleep call. 21940711Skarels * Suspends current process until a wakeup is made on chan. 22040711Skarels * The process will then be made runnable with priority pri. 22140711Skarels * Sleeps at most timo/hz seconds (0 means no timeout). 22240711Skarels * If pri includes PCATCH flag, signals are checked 22340711Skarels * before and after sleeping, else signals are not checked. 22440711Skarels * Returns 0 if awakened, EWOULDBLOCK if the timeout expires. 22540711Skarels * If PCATCH is set and a signal needs to be delivered, 22640711Skarels * ERESTART is returned if the current system call should be restarted 22740711Skarels * if possible, and EINTR is returned if the system call should 22840711Skarels * be interrupted by the signal (return EINTR). 22933Sbill */ 23040711Skarels tsleep(chan, pri, wmesg, timo) 23140710Smarc caddr_t chan; 23240710Smarc int pri; 23340710Smarc char *wmesg; 23440710Smarc int timo; 23540710Smarc { 236*47544Skarels register struct proc *p = curproc; /* XXX */ 23740710Smarc register struct slpque *qp; 23840710Smarc register s; 23940711Skarels int sig, catch = pri & PCATCH; 24040710Smarc extern int cold; 24140710Smarc int endtsleep(); 24240710Smarc 24340710Smarc s = splhigh(); 24440710Smarc if (cold || panicstr) { 24540710Smarc /* 24640710Smarc * After a panic, or during autoconfiguration, 24740710Smarc * just give interrupts a chance, then just return; 24840710Smarc * don't run any other procs or panic below, 24940710Smarc * in case this is the idle process and already asleep. 25040710Smarc */ 25145671Skarels splx(safepri); 25240710Smarc splx(s); 25340710Smarc return (0); 25440710Smarc } 25540710Smarc #ifdef DIAGNOSTIC 256*47544Skarels if (chan == 0 || p->p_stat != SRUN || p->p_rlink) 25740711Skarels panic("tsleep"); 25840710Smarc #endif 259*47544Skarels p->p_wchan = chan; 260*47544Skarels p->p_wmesg = wmesg; 261*47544Skarels p->p_slptime = 0; 262*47544Skarels p->p_pri = pri & PRIMASK; 26340710Smarc qp = &slpque[HASH(chan)]; 26440710Smarc if (qp->sq_head == 0) 265*47544Skarels qp->sq_head = p; 26640710Smarc else 267*47544Skarels *qp->sq_tailp = p; 268*47544Skarels *(qp->sq_tailp = &p->p_link) = 0; 26945671Skarels if (timo) 270*47544Skarels timeout(endtsleep, (caddr_t)p, timo); 27140710Smarc /* 272*47544Skarels * We put ourselves on the sleep queue and start our timeout 273*47544Skarels * before calling CURSIG, as we could stop there, and a wakeup 274*47544Skarels * or a SIGCONT (or both) could occur while we were stopped. 27545671Skarels * A SIGCONT would cause us to be marked as SSLEEP 27645671Skarels * without resuming us, thus we must be ready for sleep 27745671Skarels * when CURSIG is called. If the wakeup happens while we're 278*47544Skarels * stopped, p->p_wchan will be 0 upon return from CURSIG. 27940710Smarc */ 28040711Skarels if (catch) { 281*47544Skarels p->p_flag |= SSINTR; 282*47544Skarels if (sig = CURSIG(p)) { 283*47544Skarels if (p->p_wchan) 284*47544Skarels unsleep(p); 285*47544Skarels p->p_stat = SRUN; 28645671Skarels goto resume; 28740711Skarels } 288*47544Skarels if (p->p_wchan == 0) { 28945671Skarels catch = 0; 29045671Skarels goto resume; 29140711Skarels } 29240710Smarc } 293*47544Skarels p->p_stat = SSLEEP; 29440710Smarc (void) spl0(); 295*47544Skarels p->p_stats->p_ru.ru_nvcsw++; 29640710Smarc swtch(); 29745671Skarels resume: 298*47544Skarels curpri = p->p_usrpri; 29940710Smarc splx(s); 300*47544Skarels p->p_flag &= ~SSINTR; 301*47544Skarels if (p->p_flag & STIMO) { 302*47544Skarels p->p_flag &= ~STIMO; 30345671Skarels if (catch == 0 || sig == 0) 30445671Skarels return (EWOULDBLOCK); 30545671Skarels } else if (timo) 306*47544Skarels untimeout(endtsleep, (caddr_t)p); 307*47544Skarels if (catch && (sig != 0 || (sig = CURSIG(p)))) { 308*47544Skarels if (p->p_sigacts->ps_sigintr & sigmask(sig)) 30940711Skarels return (EINTR); 31040711Skarels return (ERESTART); 31140711Skarels } 31240710Smarc return (0); 31340710Smarc } 31440710Smarc 31540710Smarc /* 31640710Smarc * Implement timeout for tsleep. 31740710Smarc * If process hasn't been awakened (wchan non-zero), 31840710Smarc * set timeout flag and undo the sleep. If proc 31940710Smarc * is stopped, just unsleep so it will remain stopped. 32040710Smarc */ 32140710Smarc endtsleep(p) 32240710Smarc register struct proc *p; 32340710Smarc { 32440710Smarc int s = splhigh(); 32540710Smarc 32640710Smarc if (p->p_wchan) { 32740710Smarc if (p->p_stat == SSLEEP) 32840710Smarc setrun(p); 32940710Smarc else 33040710Smarc unsleep(p); 33140710Smarc p->p_flag |= STIMO; 33240710Smarc } 33340710Smarc splx(s); 33440710Smarc } 33540710Smarc 33640711Skarels /* 33740711Skarels * Short-term, non-interruptable sleep. 33840711Skarels */ 33933Sbill sleep(chan, pri) 3408033Sroot caddr_t chan; 3418033Sroot int pri; 34233Sbill { 343*47544Skarels register struct proc *p = curproc; /* XXX */ 34421099Smckusick register struct slpque *qp; 345207Sbill register s; 34630532Skarels extern int cold; 34733Sbill 34840711Skarels #ifdef DIAGNOSTIC 34940711Skarels if (pri > PZERO) { 35040711Skarels printf("sleep called with pri %d > PZERO, wchan: %x\n", 35140711Skarels pri, chan); 35240711Skarels panic("old sleep"); 35340711Skarels } 35440711Skarels #endif 35517541Skarels s = splhigh(); 35630532Skarels if (cold || panicstr) { 35718363Skarels /* 35830532Skarels * After a panic, or during autoconfiguration, 35930532Skarels * just give interrupts a chance, then just return; 36030532Skarels * don't run any other procs or panic below, 36130532Skarels * in case this is the idle process and already asleep. 36218363Skarels */ 36345671Skarels splx(safepri); 36418363Skarels splx(s); 36518363Skarels return; 36618363Skarels } 36740710Smarc #ifdef DIAGNOSTIC 368*47544Skarels if (chan==0 || p->p_stat != SRUN || p->p_rlink) 36933Sbill panic("sleep"); 37040710Smarc #endif 371*47544Skarels p->p_wchan = chan; 372*47544Skarels p->p_wmesg = NULL; 373*47544Skarels p->p_slptime = 0; 374*47544Skarels p->p_pri = pri; 37521099Smckusick qp = &slpque[HASH(chan)]; 37621099Smckusick if (qp->sq_head == 0) 377*47544Skarels qp->sq_head = p; 37821099Smckusick else 379*47544Skarels *qp->sq_tailp = p; 380*47544Skarels *(qp->sq_tailp = &p->p_link) = 0; 381*47544Skarels p->p_stat = SSLEEP; 38240711Skarels (void) spl0(); 383*47544Skarels p->p_stats->p_ru.ru_nvcsw++; 38440711Skarels swtch(); 385*47544Skarels curpri = p->p_usrpri; 38633Sbill splx(s); 38733Sbill } 38833Sbill 38933Sbill /* 390181Sbill * Remove a process from its wait queue 391181Sbill */ 392181Sbill unsleep(p) 3934826Swnj register struct proc *p; 394181Sbill { 39521099Smckusick register struct slpque *qp; 396181Sbill register struct proc **hp; 39721099Smckusick int s; 398181Sbill 39917541Skarels s = splhigh(); 400181Sbill if (p->p_wchan) { 40121099Smckusick hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head; 402181Sbill while (*hp != p) 403181Sbill hp = &(*hp)->p_link; 404181Sbill *hp = p->p_link; 40521099Smckusick if (qp->sq_tailp == &p->p_link) 40621099Smckusick qp->sq_tailp = hp; 407181Sbill p->p_wchan = 0; 408181Sbill } 409181Sbill splx(s); 410181Sbill } 411181Sbill 412181Sbill /* 413*47544Skarels * Wakeup on "chan"; set all processes 414*47544Skarels * sleeping on chan to run state. 41533Sbill */ 41633Sbill wakeup(chan) 4174826Swnj register caddr_t chan; 41833Sbill { 41921099Smckusick register struct slpque *qp; 42021099Smckusick register struct proc *p, **q; 42133Sbill int s; 42233Sbill 42317541Skarels s = splhigh(); 42421099Smckusick qp = &slpque[HASH(chan)]; 42533Sbill restart: 42621099Smckusick for (q = &qp->sq_head; p = *q; ) { 42740710Smarc #ifdef DIAGNOSTIC 428181Sbill if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP) 42933Sbill panic("wakeup"); 43040710Smarc #endif 431*47544Skarels if (p->p_wchan == chan) { 43233Sbill p->p_wchan = 0; 433187Sbill *q = p->p_link; 43421099Smckusick if (qp->sq_tailp == &p->p_link) 43521099Smckusick qp->sq_tailp = q; 436181Sbill if (p->p_stat == SSLEEP) { 437181Sbill /* OPTIMIZED INLINE EXPANSION OF setrun(p) */ 43821763Skarels if (p->p_slptime > 1) 43921763Skarels updatepri(p); 440*47544Skarels p->p_slptime = 0; 441181Sbill p->p_stat = SRUN; 4422702Swnj if (p->p_flag & SLOAD) 443181Sbill setrq(p); 44416795Skarels /* 44516795Skarels * Since curpri is a usrpri, 44616795Skarels * p->p_pri is always better than curpri. 44716795Skarels */ 448*47544Skarels if ((p->p_flag&SLOAD) == 0) 449*47544Skarels wakeup((caddr_t)&proc0); 450*47544Skarels else 451*47544Skarels need_resched(); 452181Sbill /* END INLINE EXPANSION */ 453187Sbill goto restart; 45433Sbill } 455187Sbill } else 456187Sbill q = &p->p_link; 45733Sbill } 45833Sbill splx(s); 45933Sbill } 46033Sbill 46133Sbill /* 46233Sbill * Initialize the (doubly-linked) run queues 46333Sbill * to be empty. 46433Sbill */ 46533Sbill rqinit() 46633Sbill { 46733Sbill register int i; 46833Sbill 46933Sbill for (i = 0; i < NQS; i++) 47033Sbill qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 47133Sbill } 47233Sbill 47333Sbill /* 474*47544Skarels * Change process state to be runnable, 475*47544Skarels * placing it on the run queue if it is in memory, 476*47544Skarels * and awakening the swapper if it isn't in memory. 47733Sbill */ 47833Sbill setrun(p) 4794826Swnj register struct proc *p; 48033Sbill { 4814826Swnj register int s; 48233Sbill 48317541Skarels s = splhigh(); 48433Sbill switch (p->p_stat) { 48533Sbill 48633Sbill case 0: 48733Sbill case SWAIT: 48833Sbill case SRUN: 48933Sbill case SZOMB: 49033Sbill default: 49133Sbill panic("setrun"); 49233Sbill 493207Sbill case SSTOP: 49433Sbill case SSLEEP: 495181Sbill unsleep(p); /* e.g. when sending signals */ 49633Sbill break; 49733Sbill 49833Sbill case SIDL: 49933Sbill break; 50033Sbill } 50133Sbill p->p_stat = SRUN; 50233Sbill if (p->p_flag & SLOAD) 50333Sbill setrq(p); 50433Sbill splx(s); 50530232Skarels if (p->p_slptime > 1) 50630232Skarels updatepri(p); 507*47544Skarels p->p_slptime = 0; 508*47544Skarels if ((p->p_flag&SLOAD) == 0) 509*47544Skarels wakeup((caddr_t)&proc0); 510*47544Skarels else if (p->p_pri < curpri) 511*47544Skarels need_resched(); 51233Sbill } 51333Sbill 51433Sbill /* 515*47544Skarels * Compute priority of process when running in user mode. 516*47544Skarels * Arrange to reschedule if the resulting priority 517*47544Skarels * is better than that of the current process. 51833Sbill */ 519*47544Skarels setpri(p) 520*47544Skarels register struct proc *p; 52133Sbill { 522*47544Skarels register unsigned int newpri; 52333Sbill 524*47544Skarels newpri = PUSER + p->p_cpu / 4 + 2 * p->p_nice; 525*47544Skarels newpri = min(newpri, MAXPRI); 526*47544Skarels p->p_usrpri = newpri; 527*47544Skarels if (newpri < curpri) 528*47544Skarels need_resched(); 52933Sbill } 530