xref: /csrg-svn/sys/kern/kern_synch.c (revision 52498)
149594Sbostic /*-
249594Sbostic  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
349594Sbostic  * Copyright (c) 1991 The Regents of the University of California.
449594Sbostic  * All rights reserved.
523376Smckusick  *
649594Sbostic  * %sccs.include.redist.c%
749594Sbostic  *
8*52498Smarc  *	@(#)kern_synch.c	7.19 (Berkeley) 02/14/92
923376Smckusick  */
1033Sbill 
1117093Sbloom #include "param.h"
1217093Sbloom #include "systm.h"
1317093Sbloom #include "proc.h"
1417093Sbloom #include "kernel.h"
1517093Sbloom #include "buf.h"
1649095Skarels #include "signalvar.h"
1749095Skarels #include "resourcevar.h"
18*52498Smarc #ifdef KTRACE
19*52498Smarc #include "ktrace.h"
20*52498Smarc #endif
219756Ssam 
2247544Skarels #include "machine/cpu.h"
2345742Smckusick 
2449226Skarels u_char	curpri;			/* usrpri of curproc */
2549226Skarels 
268102Sroot /*
278102Sroot  * Force switch among equal priority processes every 100ms.
288102Sroot  */
298102Sroot roundrobin()
308102Sroot {
318102Sroot 
3247544Skarels 	need_resched();
338624Sroot 	timeout(roundrobin, (caddr_t)0, hz / 10);
348102Sroot }
358102Sroot 
3632908Smckusick /*
3732908Smckusick  * constants for digital decay and forget
3832908Smckusick  *	90% of (p_cpu) usage in 5*loadav time
3932908Smckusick  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
4032908Smckusick  *          Note that, as ps(1) mentions, this can let percentages
4132908Smckusick  *          total over 100% (I've seen 137.9% for 3 processes).
4232908Smckusick  *
4332908Smckusick  * Note that hardclock updates p_cpu and p_cpticks independently.
4432908Smckusick  *
4532908Smckusick  * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds.
4632908Smckusick  * That is, the system wants to compute a value of decay such
4732908Smckusick  * that the following for loop:
4832908Smckusick  * 	for (i = 0; i < (5 * loadavg); i++)
4932908Smckusick  * 		p_cpu *= decay;
5032908Smckusick  * will compute
5132908Smckusick  * 	p_cpu *= 0.1;
5232908Smckusick  * for all values of loadavg:
5332908Smckusick  *
5432908Smckusick  * Mathematically this loop can be expressed by saying:
5532908Smckusick  * 	decay ** (5 * loadavg) ~= .1
5632908Smckusick  *
5732908Smckusick  * The system computes decay as:
5832908Smckusick  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
5932908Smckusick  *
6032908Smckusick  * We wish to prove that the system's computation of decay
6132908Smckusick  * will always fulfill the equation:
6232908Smckusick  * 	decay ** (5 * loadavg) ~= .1
6332908Smckusick  *
6432908Smckusick  * If we compute b as:
6532908Smckusick  * 	b = 2 * loadavg
6632908Smckusick  * then
6732908Smckusick  * 	decay = b / (b + 1)
6832908Smckusick  *
6932908Smckusick  * We now need to prove two things:
7032908Smckusick  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
7132908Smckusick  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
7232908Smckusick  *
7332908Smckusick  * Facts:
7432908Smckusick  *         For x close to zero, exp(x) =~ 1 + x, since
7532908Smckusick  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
7632908Smckusick  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
7732908Smckusick  *         For x close to zero, ln(1+x) =~ x, since
7832908Smckusick  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
7932908Smckusick  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
8032908Smckusick  *         ln(.1) =~ -2.30
8132908Smckusick  *
8232908Smckusick  * Proof of (1):
8332908Smckusick  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
8432908Smckusick  *	solving for factor,
8532908Smckusick  *      ln(factor) =~ (-2.30/5*loadav), or
8647544Skarels  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
8732908Smckusick  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
8832908Smckusick  *
8932908Smckusick  * Proof of (2):
9032908Smckusick  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
9132908Smckusick  *	solving for power,
9232908Smckusick  *      power*ln(b/(b+1)) =~ -2.30, or
9332908Smckusick  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
9432908Smckusick  *
9532908Smckusick  * Actual power values for the implemented algorithm are as follows:
9632908Smckusick  *      loadav: 1       2       3       4
9732908Smckusick  *      power:  5.68    10.32   14.94   19.55
9832908Smckusick  */
9917541Skarels 
10038164Smckusick /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
10147544Skarels #define	loadfactor(loadav)	(2 * (loadav))
10247544Skarels #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
1038102Sroot 
10438164Smckusick /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
10538164Smckusick fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
10638164Smckusick 
1078102Sroot /*
10838164Smckusick  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
10938164Smckusick  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
11038164Smckusick  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
11138164Smckusick  *
11238164Smckusick  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
11338164Smckusick  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
11438164Smckusick  *
11538164Smckusick  * If you dont want to bother with the faster/more-accurate formula, you
11638164Smckusick  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
11738164Smckusick  * (more general) method of calculating the %age of CPU used by a process.
11838164Smckusick  */
11938164Smckusick #define	CCPU_SHIFT	11
12038164Smckusick 
12138164Smckusick /*
1228102Sroot  * Recompute process priorities, once a second
1238102Sroot  */
1248102Sroot schedcpu()
1258102Sroot {
12647544Skarels 	register fixpt_t loadfac = loadfactor(averunnable[0]);
1278102Sroot 	register struct proc *p;
12847544Skarels 	register int s;
12947544Skarels 	register unsigned int newcpu;
1308102Sroot 
1318102Sroot 	wakeup((caddr_t)&lbolt);
13216532Skarels 	for (p = allproc; p != NULL; p = p->p_nxt) {
13347544Skarels 		/*
13447544Skarels 		 * Increment time in/out of memory and sleep time
13547544Skarels 		 * (if sleeping).  We ignore overflow; with 16-bit int's
13647544Skarels 		 * (remember them?) overflow takes 45 days.
13747544Skarels 		 */
13847544Skarels 		p->p_time++;
13947544Skarels 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
14047544Skarels 			p->p_slptime++;
14138164Smckusick 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
14217541Skarels 		/*
14317541Skarels 		 * If the process has slept the entire second,
14417541Skarels 		 * stop recalculating its priority until it wakes up.
14517541Skarels 		 */
14638164Smckusick 		if (p->p_slptime > 1)
14717541Skarels 			continue;
14817541Skarels 		/*
14917541Skarels 		 * p_pctcpu is only for ps.
15017541Skarels 		 */
15138164Smckusick #if	(FSHIFT >= CCPU_SHIFT)
15238164Smckusick 		p->p_pctcpu += (hz == 100)?
15338164Smckusick 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
15438164Smckusick                 	100 * (((fixpt_t) p->p_cpticks)
15538164Smckusick 				<< (FSHIFT - CCPU_SHIFT)) / hz;
15638164Smckusick #else
15738164Smckusick 		p->p_pctcpu += ((FSCALE - ccpu) *
15838164Smckusick 			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
15938164Smckusick #endif
1608102Sroot 		p->p_cpticks = 0;
16147544Skarels 		newcpu = (u_int) decay_cpu(loadfac, p->p_cpu) + p->p_nice;
16247544Skarels 		p->p_cpu = min(newcpu, UCHAR_MAX);
16347544Skarels 		setpri(p);
16417541Skarels 		s = splhigh();	/* prevent state changes */
1658102Sroot 		if (p->p_pri >= PUSER) {
16647544Skarels #define	PPQ	(128 / NQS)		/* priorities per queue */
16749095Skarels 			if ((p != curproc) &&
1688102Sroot 			    p->p_stat == SRUN &&
1698102Sroot 			    (p->p_flag & SLOAD) &&
17016795Skarels 			    (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) {
1718102Sroot 				remrq(p);
1728102Sroot 				p->p_pri = p->p_usrpri;
1738102Sroot 				setrq(p);
1748102Sroot 			} else
1758102Sroot 				p->p_pri = p->p_usrpri;
1768102Sroot 		}
1778102Sroot 		splx(s);
1788102Sroot 	}
1798102Sroot 	vmmeter();
1808102Sroot 	if (bclnlist != NULL)
18147544Skarels 		wakeup((caddr_t)pageproc);
1828624Sroot 	timeout(schedcpu, (caddr_t)0, hz);
1838102Sroot }
1848102Sroot 
18517541Skarels /*
18617541Skarels  * Recalculate the priority of a process after it has slept for a while.
18747544Skarels  * For all load averages >= 1 and max p_cpu of 255, sleeping for at least
18847544Skarels  * six times the loadfactor will decay p_cpu to zero.
18917541Skarels  */
19017541Skarels updatepri(p)
19117541Skarels 	register struct proc *p;
19217541Skarels {
19347544Skarels 	register unsigned int newcpu = p->p_cpu;
19447544Skarels 	register fixpt_t loadfac = loadfactor(averunnable[0]);
19517541Skarels 
19647544Skarels 	if (p->p_slptime > 5 * loadfac)
19747544Skarels 		p->p_cpu = 0;
19847544Skarels 	else {
19947544Skarels 		p->p_slptime--;	/* the first time was done in schedcpu */
20047544Skarels 		while (newcpu && --p->p_slptime)
20147544Skarels 			newcpu = (int) decay_cpu(loadfac, newcpu);
20247544Skarels 		p->p_cpu = min(newcpu, UCHAR_MAX);
20347544Skarels 	}
20447544Skarels 	setpri(p);
20517541Skarels }
20617541Skarels 
20733Sbill #define SQSIZE 0100	/* Must be power of 2 */
20833Sbill #define HASH(x)	(( (int) x >> 5) & (SQSIZE-1))
20921099Smckusick struct slpque {
21021099Smckusick 	struct proc *sq_head;
21121099Smckusick 	struct proc **sq_tailp;
21221099Smckusick } slpque[SQSIZE];
21333Sbill 
21433Sbill /*
21545671Skarels  * During autoconfiguration or after a panic, a sleep will simply
21645671Skarels  * lower the priority briefly to allow interrupts, then return.
21745671Skarels  * The priority to be used (safepri) is machine-dependent, thus this
21845671Skarels  * value is initialized and maintained in the machine-dependent layers.
21945671Skarels  * This priority will typically be 0, or the lowest priority
22045671Skarels  * that is safe for use on the interrupt stack; it can be made
22145671Skarels  * higher to block network software interrupts after panics.
22245671Skarels  */
22345671Skarels int safepri;
22445671Skarels 
22545671Skarels /*
22640711Skarels  * General sleep call.
22740711Skarels  * Suspends current process until a wakeup is made on chan.
22840711Skarels  * The process will then be made runnable with priority pri.
22940711Skarels  * Sleeps at most timo/hz seconds (0 means no timeout).
23040711Skarels  * If pri includes PCATCH flag, signals are checked
23140711Skarels  * before and after sleeping, else signals are not checked.
23240711Skarels  * Returns 0 if awakened, EWOULDBLOCK if the timeout expires.
23340711Skarels  * If PCATCH is set and a signal needs to be delivered,
23440711Skarels  * ERESTART is returned if the current system call should be restarted
23540711Skarels  * if possible, and EINTR is returned if the system call should
23640711Skarels  * be interrupted by the signal (return EINTR).
23733Sbill  */
23840711Skarels tsleep(chan, pri, wmesg, timo)
23940710Smarc 	caddr_t chan;
24040710Smarc 	int pri;
24140710Smarc 	char *wmesg;
24240710Smarc 	int timo;
24340710Smarc {
24449095Skarels 	register struct proc *p = curproc;
24540710Smarc 	register struct slpque *qp;
24640710Smarc 	register s;
24740711Skarels 	int sig, catch = pri & PCATCH;
24840710Smarc 	extern int cold;
24940710Smarc 	int endtsleep();
25040710Smarc 
251*52498Smarc #ifdef KTRACE
252*52498Smarc 	if (KTRPOINT(p, KTR_CSW))
253*52498Smarc 		ktrcsw(p->p_tracep, 1, 0);
254*52498Smarc #endif
25540710Smarc 	s = splhigh();
25640710Smarc 	if (cold || panicstr) {
25740710Smarc 		/*
25840710Smarc 		 * After a panic, or during autoconfiguration,
25940710Smarc 		 * just give interrupts a chance, then just return;
26040710Smarc 		 * don't run any other procs or panic below,
26140710Smarc 		 * in case this is the idle process and already asleep.
26240710Smarc 		 */
26345671Skarels 		splx(safepri);
26440710Smarc 		splx(s);
26540710Smarc 		return (0);
26640710Smarc 	}
26740710Smarc #ifdef DIAGNOSTIC
26847544Skarels 	if (chan == 0 || p->p_stat != SRUN || p->p_rlink)
26940711Skarels 		panic("tsleep");
27040710Smarc #endif
27147544Skarels 	p->p_wchan = chan;
27247544Skarels 	p->p_wmesg = wmesg;
27347544Skarels 	p->p_slptime = 0;
27447544Skarels 	p->p_pri = pri & PRIMASK;
27540710Smarc 	qp = &slpque[HASH(chan)];
27640710Smarc 	if (qp->sq_head == 0)
27747544Skarels 		qp->sq_head = p;
27840710Smarc 	else
27947544Skarels 		*qp->sq_tailp = p;
28047544Skarels 	*(qp->sq_tailp = &p->p_link) = 0;
28145671Skarels 	if (timo)
28247544Skarels 		timeout(endtsleep, (caddr_t)p, timo);
28340710Smarc 	/*
28447544Skarels 	 * We put ourselves on the sleep queue and start our timeout
28547544Skarels 	 * before calling CURSIG, as we could stop there, and a wakeup
28647544Skarels 	 * or a SIGCONT (or both) could occur while we were stopped.
28745671Skarels 	 * A SIGCONT would cause us to be marked as SSLEEP
28845671Skarels 	 * without resuming us, thus we must be ready for sleep
28945671Skarels 	 * when CURSIG is called.  If the wakeup happens while we're
29047544Skarels 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
29140710Smarc 	 */
29240711Skarels 	if (catch) {
29347544Skarels 		p->p_flag |= SSINTR;
29447544Skarels 		if (sig = CURSIG(p)) {
29547544Skarels 			if (p->p_wchan)
29647544Skarels 				unsleep(p);
29747544Skarels 			p->p_stat = SRUN;
29845671Skarels 			goto resume;
29940711Skarels 		}
30047544Skarels 		if (p->p_wchan == 0) {
30145671Skarels 			catch = 0;
30245671Skarels 			goto resume;
30340711Skarels 		}
30440710Smarc 	}
30547544Skarels 	p->p_stat = SSLEEP;
30647544Skarels 	p->p_stats->p_ru.ru_nvcsw++;
30740710Smarc 	swtch();
30845671Skarels resume:
30947544Skarels 	curpri = p->p_usrpri;
31040710Smarc 	splx(s);
31147544Skarels 	p->p_flag &= ~SSINTR;
31247544Skarels 	if (p->p_flag & STIMO) {
31347544Skarels 		p->p_flag &= ~STIMO;
314*52498Smarc 		if (catch == 0 || sig == 0) {
315*52498Smarc #ifdef KTRACE
316*52498Smarc 			if (KTRPOINT(p, KTR_CSW))
317*52498Smarc 				ktrcsw(p->p_tracep, 0, 0);
318*52498Smarc #endif
31945671Skarels 			return (EWOULDBLOCK);
320*52498Smarc 		}
32145671Skarels 	} else if (timo)
32247544Skarels 		untimeout(endtsleep, (caddr_t)p);
32347544Skarels 	if (catch && (sig != 0 || (sig = CURSIG(p)))) {
324*52498Smarc #ifdef KTRACE
325*52498Smarc 		if (KTRPOINT(p, KTR_CSW))
326*52498Smarc 			ktrcsw(p->p_tracep, 0, 0);
327*52498Smarc #endif
32847544Skarels 		if (p->p_sigacts->ps_sigintr & sigmask(sig))
32940711Skarels 			return (EINTR);
33040711Skarels 		return (ERESTART);
33140711Skarels 	}
332*52498Smarc #ifdef KTRACE
333*52498Smarc 	if (KTRPOINT(p, KTR_CSW))
334*52498Smarc 		ktrcsw(p->p_tracep, 0, 0);
335*52498Smarc #endif
33640710Smarc 	return (0);
33740710Smarc }
33840710Smarc 
33940710Smarc /*
34040710Smarc  * Implement timeout for tsleep.
34140710Smarc  * If process hasn't been awakened (wchan non-zero),
34240710Smarc  * set timeout flag and undo the sleep.  If proc
34340710Smarc  * is stopped, just unsleep so it will remain stopped.
34440710Smarc  */
34540710Smarc endtsleep(p)
34640710Smarc 	register struct proc *p;
34740710Smarc {
34840710Smarc 	int s = splhigh();
34940710Smarc 
35040710Smarc 	if (p->p_wchan) {
35140710Smarc 		if (p->p_stat == SSLEEP)
35240710Smarc 			setrun(p);
35340710Smarc 		else
35440710Smarc 			unsleep(p);
35540710Smarc 		p->p_flag |= STIMO;
35640710Smarc 	}
35740710Smarc 	splx(s);
35840710Smarc }
35940710Smarc 
36040711Skarels /*
36140711Skarels  * Short-term, non-interruptable sleep.
36240711Skarels  */
36333Sbill sleep(chan, pri)
3648033Sroot 	caddr_t chan;
3658033Sroot 	int pri;
36633Sbill {
36749095Skarels 	register struct proc *p = curproc;
36821099Smckusick 	register struct slpque *qp;
369207Sbill 	register s;
37030532Skarels 	extern int cold;
37133Sbill 
37240711Skarels #ifdef DIAGNOSTIC
37340711Skarels 	if (pri > PZERO) {
37440711Skarels 		printf("sleep called with pri %d > PZERO, wchan: %x\n",
37540711Skarels 			pri, chan);
37640711Skarels 		panic("old sleep");
37740711Skarels 	}
37840711Skarels #endif
37917541Skarels 	s = splhigh();
38030532Skarels 	if (cold || panicstr) {
38118363Skarels 		/*
38230532Skarels 		 * After a panic, or during autoconfiguration,
38330532Skarels 		 * just give interrupts a chance, then just return;
38430532Skarels 		 * don't run any other procs or panic below,
38530532Skarels 		 * in case this is the idle process and already asleep.
38618363Skarels 		 */
38745671Skarels 		splx(safepri);
38818363Skarels 		splx(s);
38918363Skarels 		return;
39018363Skarels 	}
39140710Smarc #ifdef DIAGNOSTIC
39247544Skarels 	if (chan==0 || p->p_stat != SRUN || p->p_rlink)
39333Sbill 		panic("sleep");
39440710Smarc #endif
39547544Skarels 	p->p_wchan = chan;
39647544Skarels 	p->p_wmesg = NULL;
39747544Skarels 	p->p_slptime = 0;
39847544Skarels 	p->p_pri = pri;
39921099Smckusick 	qp = &slpque[HASH(chan)];
40021099Smckusick 	if (qp->sq_head == 0)
40147544Skarels 		qp->sq_head = p;
40221099Smckusick 	else
40347544Skarels 		*qp->sq_tailp = p;
40447544Skarels 	*(qp->sq_tailp = &p->p_link) = 0;
40547544Skarels 	p->p_stat = SSLEEP;
40647544Skarels 	p->p_stats->p_ru.ru_nvcsw++;
407*52498Smarc #ifdef KTRACE
408*52498Smarc 	if (KTRPOINT(p, KTR_CSW))
409*52498Smarc 		ktrcsw(p->p_tracep, 1, 0);
410*52498Smarc #endif
41140711Skarels 	swtch();
412*52498Smarc #ifdef KTRACE
413*52498Smarc 	if (KTRPOINT(p, KTR_CSW))
414*52498Smarc 		ktrcsw(p->p_tracep, 0, 0);
415*52498Smarc #endif
41647544Skarels 	curpri = p->p_usrpri;
41733Sbill 	splx(s);
41833Sbill }
41933Sbill 
42033Sbill /*
421181Sbill  * Remove a process from its wait queue
422181Sbill  */
423181Sbill unsleep(p)
4244826Swnj 	register struct proc *p;
425181Sbill {
42621099Smckusick 	register struct slpque *qp;
427181Sbill 	register struct proc **hp;
42821099Smckusick 	int s;
429181Sbill 
43017541Skarels 	s = splhigh();
431181Sbill 	if (p->p_wchan) {
43221099Smckusick 		hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head;
433181Sbill 		while (*hp != p)
434181Sbill 			hp = &(*hp)->p_link;
435181Sbill 		*hp = p->p_link;
43621099Smckusick 		if (qp->sq_tailp == &p->p_link)
43721099Smckusick 			qp->sq_tailp = hp;
438181Sbill 		p->p_wchan = 0;
439181Sbill 	}
440181Sbill 	splx(s);
441181Sbill }
442181Sbill 
443181Sbill /*
44447544Skarels  * Wakeup on "chan"; set all processes
44547544Skarels  * sleeping on chan to run state.
44633Sbill  */
44733Sbill wakeup(chan)
4484826Swnj 	register caddr_t chan;
44933Sbill {
45021099Smckusick 	register struct slpque *qp;
45121099Smckusick 	register struct proc *p, **q;
45233Sbill 	int s;
45333Sbill 
45417541Skarels 	s = splhigh();
45521099Smckusick 	qp = &slpque[HASH(chan)];
45633Sbill restart:
45721099Smckusick 	for (q = &qp->sq_head; p = *q; ) {
45840710Smarc #ifdef DIAGNOSTIC
459181Sbill 		if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP)
46033Sbill 			panic("wakeup");
46140710Smarc #endif
46247544Skarels 		if (p->p_wchan == chan) {
46333Sbill 			p->p_wchan = 0;
464187Sbill 			*q = p->p_link;
46521099Smckusick 			if (qp->sq_tailp == &p->p_link)
46621099Smckusick 				qp->sq_tailp = q;
467181Sbill 			if (p->p_stat == SSLEEP) {
468181Sbill 				/* OPTIMIZED INLINE EXPANSION OF setrun(p) */
46921763Skarels 				if (p->p_slptime > 1)
47021763Skarels 					updatepri(p);
47147544Skarels 				p->p_slptime = 0;
472181Sbill 				p->p_stat = SRUN;
4732702Swnj 				if (p->p_flag & SLOAD)
474181Sbill 					setrq(p);
47516795Skarels 				/*
47616795Skarels 				 * Since curpri is a usrpri,
47716795Skarels 				 * p->p_pri is always better than curpri.
47816795Skarels 				 */
47947544Skarels 				if ((p->p_flag&SLOAD) == 0)
48047544Skarels 					wakeup((caddr_t)&proc0);
48147544Skarels 				else
48247544Skarels 					need_resched();
483181Sbill 				/* END INLINE EXPANSION */
484187Sbill 				goto restart;
48533Sbill 			}
486187Sbill 		} else
487187Sbill 			q = &p->p_link;
48833Sbill 	}
48933Sbill 	splx(s);
49033Sbill }
49133Sbill 
49233Sbill /*
49333Sbill  * Initialize the (doubly-linked) run queues
49433Sbill  * to be empty.
49533Sbill  */
49633Sbill rqinit()
49733Sbill {
49833Sbill 	register int i;
49933Sbill 
50033Sbill 	for (i = 0; i < NQS; i++)
50133Sbill 		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
50233Sbill }
50333Sbill 
50433Sbill /*
50547544Skarels  * Change process state to be runnable,
50647544Skarels  * placing it on the run queue if it is in memory,
50747544Skarels  * and awakening the swapper if it isn't in memory.
50833Sbill  */
50933Sbill setrun(p)
5104826Swnj 	register struct proc *p;
51133Sbill {
5124826Swnj 	register int s;
51333Sbill 
51417541Skarels 	s = splhigh();
51533Sbill 	switch (p->p_stat) {
51633Sbill 
51733Sbill 	case 0:
51833Sbill 	case SWAIT:
51933Sbill 	case SRUN:
52033Sbill 	case SZOMB:
52133Sbill 	default:
52233Sbill 		panic("setrun");
52333Sbill 
524207Sbill 	case SSTOP:
52533Sbill 	case SSLEEP:
526181Sbill 		unsleep(p);		/* e.g. when sending signals */
52733Sbill 		break;
52833Sbill 
52933Sbill 	case SIDL:
53033Sbill 		break;
53133Sbill 	}
53233Sbill 	p->p_stat = SRUN;
53333Sbill 	if (p->p_flag & SLOAD)
53433Sbill 		setrq(p);
53533Sbill 	splx(s);
53630232Skarels 	if (p->p_slptime > 1)
53730232Skarels 		updatepri(p);
53847544Skarels 	p->p_slptime = 0;
53947544Skarels 	if ((p->p_flag&SLOAD) == 0)
54047544Skarels 		wakeup((caddr_t)&proc0);
54147544Skarels 	else if (p->p_pri < curpri)
54247544Skarels 		need_resched();
54333Sbill }
54433Sbill 
54533Sbill /*
54647544Skarels  * Compute priority of process when running in user mode.
54747544Skarels  * Arrange to reschedule if the resulting priority
54847544Skarels  * is better than that of the current process.
54933Sbill  */
55047544Skarels setpri(p)
55147544Skarels 	register struct proc *p;
55233Sbill {
55347544Skarels 	register unsigned int newpri;
55433Sbill 
55547544Skarels 	newpri = PUSER + p->p_cpu / 4 + 2 * p->p_nice;
55647544Skarels 	newpri = min(newpri, MAXPRI);
55747544Skarels 	p->p_usrpri = newpri;
55847544Skarels 	if (newpri < curpri)
55947544Skarels 		need_resched();
56033Sbill }
561