xref: /csrg-svn/sys/kern/kern_synch.c (revision 52686)
149594Sbostic /*-
249594Sbostic  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
349594Sbostic  * Copyright (c) 1991 The Regents of the University of California.
449594Sbostic  * All rights reserved.
523376Smckusick  *
649594Sbostic  * %sccs.include.redist.c%
749594Sbostic  *
8*52686Ssklower  *	@(#)kern_synch.c	7.22 (Berkeley) 02/28/92
923376Smckusick  */
1033Sbill 
1117093Sbloom #include "param.h"
1217093Sbloom #include "systm.h"
1317093Sbloom #include "proc.h"
1417093Sbloom #include "kernel.h"
1517093Sbloom #include "buf.h"
1649095Skarels #include "signalvar.h"
1749095Skarels #include "resourcevar.h"
1852498Smarc #ifdef KTRACE
1952498Smarc #include "ktrace.h"
2052498Smarc #endif
219756Ssam 
2247544Skarels #include "machine/cpu.h"
2345742Smckusick 
2449226Skarels u_char	curpri;			/* usrpri of curproc */
25*52686Ssklower int	lbolt;			/* once a second sleep address */
2649226Skarels 
278102Sroot /*
288102Sroot  * Force switch among equal priority processes every 100ms.
298102Sroot  */
308102Sroot roundrobin()
318102Sroot {
328102Sroot 
3347544Skarels 	need_resched();
348624Sroot 	timeout(roundrobin, (caddr_t)0, hz / 10);
358102Sroot }
368102Sroot 
3732908Smckusick /*
3832908Smckusick  * constants for digital decay and forget
3932908Smckusick  *	90% of (p_cpu) usage in 5*loadav time
4032908Smckusick  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
4132908Smckusick  *          Note that, as ps(1) mentions, this can let percentages
4232908Smckusick  *          total over 100% (I've seen 137.9% for 3 processes).
4332908Smckusick  *
4432908Smckusick  * Note that hardclock updates p_cpu and p_cpticks independently.
4532908Smckusick  *
4632908Smckusick  * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds.
4732908Smckusick  * That is, the system wants to compute a value of decay such
4832908Smckusick  * that the following for loop:
4932908Smckusick  * 	for (i = 0; i < (5 * loadavg); i++)
5032908Smckusick  * 		p_cpu *= decay;
5132908Smckusick  * will compute
5232908Smckusick  * 	p_cpu *= 0.1;
5332908Smckusick  * for all values of loadavg:
5432908Smckusick  *
5532908Smckusick  * Mathematically this loop can be expressed by saying:
5632908Smckusick  * 	decay ** (5 * loadavg) ~= .1
5732908Smckusick  *
5832908Smckusick  * The system computes decay as:
5932908Smckusick  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
6032908Smckusick  *
6132908Smckusick  * We wish to prove that the system's computation of decay
6232908Smckusick  * will always fulfill the equation:
6332908Smckusick  * 	decay ** (5 * loadavg) ~= .1
6432908Smckusick  *
6532908Smckusick  * If we compute b as:
6632908Smckusick  * 	b = 2 * loadavg
6732908Smckusick  * then
6832908Smckusick  * 	decay = b / (b + 1)
6932908Smckusick  *
7032908Smckusick  * We now need to prove two things:
7132908Smckusick  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
7232908Smckusick  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
7332908Smckusick  *
7432908Smckusick  * Facts:
7532908Smckusick  *         For x close to zero, exp(x) =~ 1 + x, since
7632908Smckusick  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
7732908Smckusick  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
7832908Smckusick  *         For x close to zero, ln(1+x) =~ x, since
7932908Smckusick  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
8032908Smckusick  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
8132908Smckusick  *         ln(.1) =~ -2.30
8232908Smckusick  *
8332908Smckusick  * Proof of (1):
8432908Smckusick  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
8532908Smckusick  *	solving for factor,
8632908Smckusick  *      ln(factor) =~ (-2.30/5*loadav), or
8747544Skarels  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
8832908Smckusick  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
8932908Smckusick  *
9032908Smckusick  * Proof of (2):
9132908Smckusick  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
9232908Smckusick  *	solving for power,
9332908Smckusick  *      power*ln(b/(b+1)) =~ -2.30, or
9432908Smckusick  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
9532908Smckusick  *
9632908Smckusick  * Actual power values for the implemented algorithm are as follows:
9732908Smckusick  *      loadav: 1       2       3       4
9832908Smckusick  *      power:  5.68    10.32   14.94   19.55
9932908Smckusick  */
10017541Skarels 
10138164Smckusick /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
10247544Skarels #define	loadfactor(loadav)	(2 * (loadav))
10347544Skarels #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
1048102Sroot 
10538164Smckusick /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
10638164Smckusick fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
10738164Smckusick 
1088102Sroot /*
10938164Smckusick  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
11038164Smckusick  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
11138164Smckusick  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
11238164Smckusick  *
11338164Smckusick  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
11438164Smckusick  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
11538164Smckusick  *
11638164Smckusick  * If you dont want to bother with the faster/more-accurate formula, you
11738164Smckusick  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
11838164Smckusick  * (more general) method of calculating the %age of CPU used by a process.
11938164Smckusick  */
12038164Smckusick #define	CCPU_SHIFT	11
12138164Smckusick 
12238164Smckusick /*
1238102Sroot  * Recompute process priorities, once a second
1248102Sroot  */
1258102Sroot schedcpu()
1268102Sroot {
12752667Smckusick 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1288102Sroot 	register struct proc *p;
12947544Skarels 	register int s;
13047544Skarels 	register unsigned int newcpu;
1318102Sroot 
1328102Sroot 	wakeup((caddr_t)&lbolt);
13316532Skarels 	for (p = allproc; p != NULL; p = p->p_nxt) {
13447544Skarels 		/*
13547544Skarels 		 * Increment time in/out of memory and sleep time
13647544Skarels 		 * (if sleeping).  We ignore overflow; with 16-bit int's
13747544Skarels 		 * (remember them?) overflow takes 45 days.
13847544Skarels 		 */
13947544Skarels 		p->p_time++;
14047544Skarels 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
14147544Skarels 			p->p_slptime++;
14238164Smckusick 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
14317541Skarels 		/*
14417541Skarels 		 * If the process has slept the entire second,
14517541Skarels 		 * stop recalculating its priority until it wakes up.
14617541Skarels 		 */
14738164Smckusick 		if (p->p_slptime > 1)
14817541Skarels 			continue;
14917541Skarels 		/*
15017541Skarels 		 * p_pctcpu is only for ps.
15117541Skarels 		 */
15238164Smckusick #if	(FSHIFT >= CCPU_SHIFT)
15338164Smckusick 		p->p_pctcpu += (hz == 100)?
15438164Smckusick 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
15538164Smckusick                 	100 * (((fixpt_t) p->p_cpticks)
15638164Smckusick 				<< (FSHIFT - CCPU_SHIFT)) / hz;
15738164Smckusick #else
15838164Smckusick 		p->p_pctcpu += ((FSCALE - ccpu) *
15938164Smckusick 			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
16038164Smckusick #endif
1618102Sroot 		p->p_cpticks = 0;
16247544Skarels 		newcpu = (u_int) decay_cpu(loadfac, p->p_cpu) + p->p_nice;
16347544Skarels 		p->p_cpu = min(newcpu, UCHAR_MAX);
16447544Skarels 		setpri(p);
16517541Skarels 		s = splhigh();	/* prevent state changes */
1668102Sroot 		if (p->p_pri >= PUSER) {
16747544Skarels #define	PPQ	(128 / NQS)		/* priorities per queue */
16849095Skarels 			if ((p != curproc) &&
1698102Sroot 			    p->p_stat == SRUN &&
1708102Sroot 			    (p->p_flag & SLOAD) &&
17116795Skarels 			    (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) {
1728102Sroot 				remrq(p);
1738102Sroot 				p->p_pri = p->p_usrpri;
1748102Sroot 				setrq(p);
1758102Sroot 			} else
1768102Sroot 				p->p_pri = p->p_usrpri;
1778102Sroot 		}
1788102Sroot 		splx(s);
1798102Sroot 	}
1808102Sroot 	vmmeter();
1818102Sroot 	if (bclnlist != NULL)
18247544Skarels 		wakeup((caddr_t)pageproc);
1838624Sroot 	timeout(schedcpu, (caddr_t)0, hz);
1848102Sroot }
1858102Sroot 
18617541Skarels /*
18717541Skarels  * Recalculate the priority of a process after it has slept for a while.
18847544Skarels  * For all load averages >= 1 and max p_cpu of 255, sleeping for at least
18947544Skarels  * six times the loadfactor will decay p_cpu to zero.
19017541Skarels  */
19117541Skarels updatepri(p)
19217541Skarels 	register struct proc *p;
19317541Skarels {
19447544Skarels 	register unsigned int newcpu = p->p_cpu;
19552667Smckusick 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
19617541Skarels 
19747544Skarels 	if (p->p_slptime > 5 * loadfac)
19847544Skarels 		p->p_cpu = 0;
19947544Skarels 	else {
20047544Skarels 		p->p_slptime--;	/* the first time was done in schedcpu */
20147544Skarels 		while (newcpu && --p->p_slptime)
20247544Skarels 			newcpu = (int) decay_cpu(loadfac, newcpu);
20347544Skarels 		p->p_cpu = min(newcpu, UCHAR_MAX);
20447544Skarels 	}
20547544Skarels 	setpri(p);
20617541Skarels }
20717541Skarels 
20833Sbill #define SQSIZE 0100	/* Must be power of 2 */
20933Sbill #define HASH(x)	(( (int) x >> 5) & (SQSIZE-1))
21021099Smckusick struct slpque {
21121099Smckusick 	struct proc *sq_head;
21221099Smckusick 	struct proc **sq_tailp;
21321099Smckusick } slpque[SQSIZE];
21433Sbill 
21533Sbill /*
21645671Skarels  * During autoconfiguration or after a panic, a sleep will simply
21745671Skarels  * lower the priority briefly to allow interrupts, then return.
21845671Skarels  * The priority to be used (safepri) is machine-dependent, thus this
21945671Skarels  * value is initialized and maintained in the machine-dependent layers.
22045671Skarels  * This priority will typically be 0, or the lowest priority
22145671Skarels  * that is safe for use on the interrupt stack; it can be made
22245671Skarels  * higher to block network software interrupts after panics.
22345671Skarels  */
22445671Skarels int safepri;
22545671Skarels 
22645671Skarels /*
22740711Skarels  * General sleep call.
22840711Skarels  * Suspends current process until a wakeup is made on chan.
22940711Skarels  * The process will then be made runnable with priority pri.
23040711Skarels  * Sleeps at most timo/hz seconds (0 means no timeout).
23140711Skarels  * If pri includes PCATCH flag, signals are checked
23240711Skarels  * before and after sleeping, else signals are not checked.
23340711Skarels  * Returns 0 if awakened, EWOULDBLOCK if the timeout expires.
23440711Skarels  * If PCATCH is set and a signal needs to be delivered,
23540711Skarels  * ERESTART is returned if the current system call should be restarted
23640711Skarels  * if possible, and EINTR is returned if the system call should
23740711Skarels  * be interrupted by the signal (return EINTR).
23833Sbill  */
23940711Skarels tsleep(chan, pri, wmesg, timo)
24040710Smarc 	caddr_t chan;
24140710Smarc 	int pri;
24240710Smarc 	char *wmesg;
24340710Smarc 	int timo;
24440710Smarc {
24549095Skarels 	register struct proc *p = curproc;
24640710Smarc 	register struct slpque *qp;
24740710Smarc 	register s;
24840711Skarels 	int sig, catch = pri & PCATCH;
24940710Smarc 	extern int cold;
25040710Smarc 	int endtsleep();
25140710Smarc 
25252498Smarc #ifdef KTRACE
25352498Smarc 	if (KTRPOINT(p, KTR_CSW))
25452498Smarc 		ktrcsw(p->p_tracep, 1, 0);
25552498Smarc #endif
25640710Smarc 	s = splhigh();
25740710Smarc 	if (cold || panicstr) {
25840710Smarc 		/*
25940710Smarc 		 * After a panic, or during autoconfiguration,
26040710Smarc 		 * just give interrupts a chance, then just return;
26140710Smarc 		 * don't run any other procs or panic below,
26240710Smarc 		 * in case this is the idle process and already asleep.
26340710Smarc 		 */
26445671Skarels 		splx(safepri);
26540710Smarc 		splx(s);
26640710Smarc 		return (0);
26740710Smarc 	}
26840710Smarc #ifdef DIAGNOSTIC
26947544Skarels 	if (chan == 0 || p->p_stat != SRUN || p->p_rlink)
27040711Skarels 		panic("tsleep");
27140710Smarc #endif
27247544Skarels 	p->p_wchan = chan;
27347544Skarels 	p->p_wmesg = wmesg;
27447544Skarels 	p->p_slptime = 0;
27547544Skarels 	p->p_pri = pri & PRIMASK;
27640710Smarc 	qp = &slpque[HASH(chan)];
27740710Smarc 	if (qp->sq_head == 0)
27847544Skarels 		qp->sq_head = p;
27940710Smarc 	else
28047544Skarels 		*qp->sq_tailp = p;
28147544Skarels 	*(qp->sq_tailp = &p->p_link) = 0;
28245671Skarels 	if (timo)
28347544Skarels 		timeout(endtsleep, (caddr_t)p, timo);
28440710Smarc 	/*
28547544Skarels 	 * We put ourselves on the sleep queue and start our timeout
28647544Skarels 	 * before calling CURSIG, as we could stop there, and a wakeup
28747544Skarels 	 * or a SIGCONT (or both) could occur while we were stopped.
28845671Skarels 	 * A SIGCONT would cause us to be marked as SSLEEP
28945671Skarels 	 * without resuming us, thus we must be ready for sleep
29045671Skarels 	 * when CURSIG is called.  If the wakeup happens while we're
29147544Skarels 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
29240710Smarc 	 */
29340711Skarels 	if (catch) {
29447544Skarels 		p->p_flag |= SSINTR;
29547544Skarels 		if (sig = CURSIG(p)) {
29647544Skarels 			if (p->p_wchan)
29747544Skarels 				unsleep(p);
29847544Skarels 			p->p_stat = SRUN;
29945671Skarels 			goto resume;
30040711Skarels 		}
30147544Skarels 		if (p->p_wchan == 0) {
30245671Skarels 			catch = 0;
30345671Skarels 			goto resume;
30440711Skarels 		}
30552499Storek 	} else
30652499Storek 		sig = 0;
30747544Skarels 	p->p_stat = SSLEEP;
30847544Skarels 	p->p_stats->p_ru.ru_nvcsw++;
30940710Smarc 	swtch();
31045671Skarels resume:
31147544Skarels 	curpri = p->p_usrpri;
31240710Smarc 	splx(s);
31347544Skarels 	p->p_flag &= ~SSINTR;
31447544Skarels 	if (p->p_flag & STIMO) {
31547544Skarels 		p->p_flag &= ~STIMO;
31652499Storek 		if (sig == 0) {
31752498Smarc #ifdef KTRACE
31852498Smarc 			if (KTRPOINT(p, KTR_CSW))
31952498Smarc 				ktrcsw(p->p_tracep, 0, 0);
32052498Smarc #endif
32145671Skarels 			return (EWOULDBLOCK);
32252498Smarc 		}
32345671Skarels 	} else if (timo)
32447544Skarels 		untimeout(endtsleep, (caddr_t)p);
32547544Skarels 	if (catch && (sig != 0 || (sig = CURSIG(p)))) {
32652498Smarc #ifdef KTRACE
32752498Smarc 		if (KTRPOINT(p, KTR_CSW))
32852498Smarc 			ktrcsw(p->p_tracep, 0, 0);
32952498Smarc #endif
33047544Skarels 		if (p->p_sigacts->ps_sigintr & sigmask(sig))
33140711Skarels 			return (EINTR);
33240711Skarels 		return (ERESTART);
33340711Skarels 	}
33452498Smarc #ifdef KTRACE
33552498Smarc 	if (KTRPOINT(p, KTR_CSW))
33652498Smarc 		ktrcsw(p->p_tracep, 0, 0);
33752498Smarc #endif
33840710Smarc 	return (0);
33940710Smarc }
34040710Smarc 
34140710Smarc /*
34240710Smarc  * Implement timeout for tsleep.
34340710Smarc  * If process hasn't been awakened (wchan non-zero),
34440710Smarc  * set timeout flag and undo the sleep.  If proc
34540710Smarc  * is stopped, just unsleep so it will remain stopped.
34640710Smarc  */
34740710Smarc endtsleep(p)
34840710Smarc 	register struct proc *p;
34940710Smarc {
35040710Smarc 	int s = splhigh();
35140710Smarc 
35240710Smarc 	if (p->p_wchan) {
35340710Smarc 		if (p->p_stat == SSLEEP)
35440710Smarc 			setrun(p);
35540710Smarc 		else
35640710Smarc 			unsleep(p);
35740710Smarc 		p->p_flag |= STIMO;
35840710Smarc 	}
35940710Smarc 	splx(s);
36040710Smarc }
36140710Smarc 
36240711Skarels /*
36340711Skarels  * Short-term, non-interruptable sleep.
36440711Skarels  */
36533Sbill sleep(chan, pri)
3668033Sroot 	caddr_t chan;
3678033Sroot 	int pri;
36833Sbill {
36949095Skarels 	register struct proc *p = curproc;
37021099Smckusick 	register struct slpque *qp;
371207Sbill 	register s;
37230532Skarels 	extern int cold;
37333Sbill 
37440711Skarels #ifdef DIAGNOSTIC
37540711Skarels 	if (pri > PZERO) {
37640711Skarels 		printf("sleep called with pri %d > PZERO, wchan: %x\n",
37740711Skarels 			pri, chan);
37840711Skarels 		panic("old sleep");
37940711Skarels 	}
38040711Skarels #endif
38117541Skarels 	s = splhigh();
38230532Skarels 	if (cold || panicstr) {
38318363Skarels 		/*
38430532Skarels 		 * After a panic, or during autoconfiguration,
38530532Skarels 		 * just give interrupts a chance, then just return;
38630532Skarels 		 * don't run any other procs or panic below,
38730532Skarels 		 * in case this is the idle process and already asleep.
38818363Skarels 		 */
38945671Skarels 		splx(safepri);
39018363Skarels 		splx(s);
39118363Skarels 		return;
39218363Skarels 	}
39340710Smarc #ifdef DIAGNOSTIC
39447544Skarels 	if (chan==0 || p->p_stat != SRUN || p->p_rlink)
39533Sbill 		panic("sleep");
39640710Smarc #endif
39747544Skarels 	p->p_wchan = chan;
39847544Skarels 	p->p_wmesg = NULL;
39947544Skarels 	p->p_slptime = 0;
40047544Skarels 	p->p_pri = pri;
40121099Smckusick 	qp = &slpque[HASH(chan)];
40221099Smckusick 	if (qp->sq_head == 0)
40347544Skarels 		qp->sq_head = p;
40421099Smckusick 	else
40547544Skarels 		*qp->sq_tailp = p;
40647544Skarels 	*(qp->sq_tailp = &p->p_link) = 0;
40747544Skarels 	p->p_stat = SSLEEP;
40847544Skarels 	p->p_stats->p_ru.ru_nvcsw++;
40952498Smarc #ifdef KTRACE
41052498Smarc 	if (KTRPOINT(p, KTR_CSW))
41152498Smarc 		ktrcsw(p->p_tracep, 1, 0);
41252498Smarc #endif
41340711Skarels 	swtch();
41452498Smarc #ifdef KTRACE
41552498Smarc 	if (KTRPOINT(p, KTR_CSW))
41652498Smarc 		ktrcsw(p->p_tracep, 0, 0);
41752498Smarc #endif
41847544Skarels 	curpri = p->p_usrpri;
41933Sbill 	splx(s);
42033Sbill }
42133Sbill 
42233Sbill /*
423181Sbill  * Remove a process from its wait queue
424181Sbill  */
425181Sbill unsleep(p)
4264826Swnj 	register struct proc *p;
427181Sbill {
42821099Smckusick 	register struct slpque *qp;
429181Sbill 	register struct proc **hp;
43021099Smckusick 	int s;
431181Sbill 
43217541Skarels 	s = splhigh();
433181Sbill 	if (p->p_wchan) {
43421099Smckusick 		hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head;
435181Sbill 		while (*hp != p)
436181Sbill 			hp = &(*hp)->p_link;
437181Sbill 		*hp = p->p_link;
43821099Smckusick 		if (qp->sq_tailp == &p->p_link)
43921099Smckusick 			qp->sq_tailp = hp;
440181Sbill 		p->p_wchan = 0;
441181Sbill 	}
442181Sbill 	splx(s);
443181Sbill }
444181Sbill 
445181Sbill /*
44647544Skarels  * Wakeup on "chan"; set all processes
44747544Skarels  * sleeping on chan to run state.
44833Sbill  */
44933Sbill wakeup(chan)
4504826Swnj 	register caddr_t chan;
45133Sbill {
45221099Smckusick 	register struct slpque *qp;
45321099Smckusick 	register struct proc *p, **q;
45433Sbill 	int s;
45533Sbill 
45617541Skarels 	s = splhigh();
45721099Smckusick 	qp = &slpque[HASH(chan)];
45833Sbill restart:
45921099Smckusick 	for (q = &qp->sq_head; p = *q; ) {
46040710Smarc #ifdef DIAGNOSTIC
461181Sbill 		if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP)
46233Sbill 			panic("wakeup");
46340710Smarc #endif
46447544Skarels 		if (p->p_wchan == chan) {
46533Sbill 			p->p_wchan = 0;
466187Sbill 			*q = p->p_link;
46721099Smckusick 			if (qp->sq_tailp == &p->p_link)
46821099Smckusick 				qp->sq_tailp = q;
469181Sbill 			if (p->p_stat == SSLEEP) {
470181Sbill 				/* OPTIMIZED INLINE EXPANSION OF setrun(p) */
47121763Skarels 				if (p->p_slptime > 1)
47221763Skarels 					updatepri(p);
47347544Skarels 				p->p_slptime = 0;
474181Sbill 				p->p_stat = SRUN;
4752702Swnj 				if (p->p_flag & SLOAD)
476181Sbill 					setrq(p);
47716795Skarels 				/*
47816795Skarels 				 * Since curpri is a usrpri,
47916795Skarels 				 * p->p_pri is always better than curpri.
48016795Skarels 				 */
48147544Skarels 				if ((p->p_flag&SLOAD) == 0)
48247544Skarels 					wakeup((caddr_t)&proc0);
48347544Skarels 				else
48447544Skarels 					need_resched();
485181Sbill 				/* END INLINE EXPANSION */
486187Sbill 				goto restart;
48733Sbill 			}
488187Sbill 		} else
489187Sbill 			q = &p->p_link;
49033Sbill 	}
49133Sbill 	splx(s);
49233Sbill }
49333Sbill 
49433Sbill /*
49533Sbill  * Initialize the (doubly-linked) run queues
49633Sbill  * to be empty.
49733Sbill  */
49833Sbill rqinit()
49933Sbill {
50033Sbill 	register int i;
50133Sbill 
50233Sbill 	for (i = 0; i < NQS; i++)
50333Sbill 		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
50433Sbill }
50533Sbill 
50633Sbill /*
50747544Skarels  * Change process state to be runnable,
50847544Skarels  * placing it on the run queue if it is in memory,
50947544Skarels  * and awakening the swapper if it isn't in memory.
51033Sbill  */
51133Sbill setrun(p)
5124826Swnj 	register struct proc *p;
51333Sbill {
5144826Swnj 	register int s;
51533Sbill 
51617541Skarels 	s = splhigh();
51733Sbill 	switch (p->p_stat) {
51833Sbill 
51933Sbill 	case 0:
52033Sbill 	case SWAIT:
52133Sbill 	case SRUN:
52233Sbill 	case SZOMB:
52333Sbill 	default:
52433Sbill 		panic("setrun");
52533Sbill 
526207Sbill 	case SSTOP:
52733Sbill 	case SSLEEP:
528181Sbill 		unsleep(p);		/* e.g. when sending signals */
52933Sbill 		break;
53033Sbill 
53133Sbill 	case SIDL:
53233Sbill 		break;
53333Sbill 	}
53433Sbill 	p->p_stat = SRUN;
53533Sbill 	if (p->p_flag & SLOAD)
53633Sbill 		setrq(p);
53733Sbill 	splx(s);
53830232Skarels 	if (p->p_slptime > 1)
53930232Skarels 		updatepri(p);
54047544Skarels 	p->p_slptime = 0;
54147544Skarels 	if ((p->p_flag&SLOAD) == 0)
54247544Skarels 		wakeup((caddr_t)&proc0);
54347544Skarels 	else if (p->p_pri < curpri)
54447544Skarels 		need_resched();
54533Sbill }
54633Sbill 
54733Sbill /*
54847544Skarels  * Compute priority of process when running in user mode.
54947544Skarels  * Arrange to reschedule if the resulting priority
55047544Skarels  * is better than that of the current process.
55133Sbill  */
55247544Skarels setpri(p)
55347544Skarels 	register struct proc *p;
55433Sbill {
55547544Skarels 	register unsigned int newpri;
55633Sbill 
55747544Skarels 	newpri = PUSER + p->p_cpu / 4 + 2 * p->p_nice;
55847544Skarels 	newpri = min(newpri, MAXPRI);
55947544Skarels 	p->p_usrpri = newpri;
56047544Skarels 	if (newpri < curpri)
56147544Skarels 		need_resched();
56233Sbill }
563