xref: /csrg-svn/sys/kern/kern_synch.c (revision 45671)
123376Smckusick /*
240711Skarels  * Copyright (c) 1982, 1986, 1990 Regents of the University of California.
323376Smckusick  * All rights reserved.  The Berkeley software License Agreement
423376Smckusick  * specifies the terms and conditions for redistribution.
523376Smckusick  *
6*45671Skarels  *	@(#)kern_synch.c	7.12 (Berkeley) 12/01/90
723376Smckusick  */
833Sbill 
937495Smckusick #include "machine/pte.h"
1037495Smckusick #include "machine/psl.h"
1137495Smckusick #include "machine/mtpr.h"
129756Ssam 
1317093Sbloom #include "param.h"
1417093Sbloom #include "systm.h"
1517093Sbloom #include "user.h"
1617093Sbloom #include "proc.h"
1717093Sbloom #include "vm.h"
1817093Sbloom #include "kernel.h"
1917093Sbloom #include "buf.h"
209756Ssam 
218102Sroot /*
228102Sroot  * Force switch among equal priority processes every 100ms.
238102Sroot  */
248102Sroot roundrobin()
258102Sroot {
268102Sroot 
278102Sroot 	runrun++;
288102Sroot 	aston();
298624Sroot 	timeout(roundrobin, (caddr_t)0, hz / 10);
308102Sroot }
318102Sroot 
3232908Smckusick /*
3332908Smckusick  * constants for digital decay and forget
3432908Smckusick  *	90% of (p_cpu) usage in 5*loadav time
3532908Smckusick  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
3632908Smckusick  *          Note that, as ps(1) mentions, this can let percentages
3732908Smckusick  *          total over 100% (I've seen 137.9% for 3 processes).
3832908Smckusick  *
3932908Smckusick  * Note that hardclock updates p_cpu and p_cpticks independently.
4032908Smckusick  *
4132908Smckusick  * We wish to decay away 90% of p_cpu in (5 * loadavg) seconds.
4232908Smckusick  * That is, the system wants to compute a value of decay such
4332908Smckusick  * that the following for loop:
4432908Smckusick  * 	for (i = 0; i < (5 * loadavg); i++)
4532908Smckusick  * 		p_cpu *= decay;
4632908Smckusick  * will compute
4732908Smckusick  * 	p_cpu *= 0.1;
4832908Smckusick  * for all values of loadavg:
4932908Smckusick  *
5032908Smckusick  * Mathematically this loop can be expressed by saying:
5132908Smckusick  * 	decay ** (5 * loadavg) ~= .1
5232908Smckusick  *
5332908Smckusick  * The system computes decay as:
5432908Smckusick  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
5532908Smckusick  *
5632908Smckusick  * We wish to prove that the system's computation of decay
5732908Smckusick  * will always fulfill the equation:
5832908Smckusick  * 	decay ** (5 * loadavg) ~= .1
5932908Smckusick  *
6032908Smckusick  * If we compute b as:
6132908Smckusick  * 	b = 2 * loadavg
6232908Smckusick  * then
6332908Smckusick  * 	decay = b / (b + 1)
6432908Smckusick  *
6532908Smckusick  * We now need to prove two things:
6632908Smckusick  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
6732908Smckusick  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
6832908Smckusick  *
6932908Smckusick  * Facts:
7032908Smckusick  *         For x close to zero, exp(x) =~ 1 + x, since
7132908Smckusick  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
7232908Smckusick  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
7332908Smckusick  *         For x close to zero, ln(1+x) =~ x, since
7432908Smckusick  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
7532908Smckusick  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
7632908Smckusick  *         ln(.1) =~ -2.30
7732908Smckusick  *
7832908Smckusick  * Proof of (1):
7932908Smckusick  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
8032908Smckusick  *	solving for factor,
8132908Smckusick  *      ln(factor) =~ (-2.30/5*loadav), or
8232908Smckusick  *      factor =~ exp(-1/((5/2.30)*loadav) =~ exp(-1/(2*loadav)) =
8332908Smckusick  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
8432908Smckusick  *
8532908Smckusick  * Proof of (2):
8632908Smckusick  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
8732908Smckusick  *	solving for power,
8832908Smckusick  *      power*ln(b/(b+1)) =~ -2.30, or
8932908Smckusick  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
9032908Smckusick  *
9132908Smckusick  * Actual power values for the implemented algorithm are as follows:
9232908Smckusick  *      loadav: 1       2       3       4
9332908Smckusick  *      power:  5.68    10.32   14.94   19.55
9432908Smckusick  */
9517541Skarels 
9638164Smckusick /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
9738164Smckusick #define	get_b(loadav)		(2 * (loadav))
9838164Smckusick #define	get_pcpu(b, cpu)	(((b) * ((cpu) & 0377)) / ((b) + FSCALE))
998102Sroot 
10038164Smckusick /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
10138164Smckusick fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
10238164Smckusick 
1038102Sroot /*
10438164Smckusick  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
10538164Smckusick  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
10638164Smckusick  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
10738164Smckusick  *
10838164Smckusick  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
10938164Smckusick  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
11038164Smckusick  *
11138164Smckusick  * If you dont want to bother with the faster/more-accurate formula, you
11238164Smckusick  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
11338164Smckusick  * (more general) method of calculating the %age of CPU used by a process.
11438164Smckusick  */
11538164Smckusick #define	CCPU_SHIFT	11
11638164Smckusick 
11738164Smckusick /*
1188102Sroot  * Recompute process priorities, once a second
1198102Sroot  */
1208102Sroot schedcpu()
1218102Sroot {
12238164Smckusick 	register fixpt_t b = get_b(averunnable[0]);
1238102Sroot 	register struct proc *p;
1248102Sroot 	register int s, a;
1258102Sroot 
1268102Sroot 	wakeup((caddr_t)&lbolt);
12716532Skarels 	for (p = allproc; p != NULL; p = p->p_nxt) {
1288102Sroot 		if (p->p_time != 127)
1298102Sroot 			p->p_time++;
1308102Sroot 		if (p->p_stat==SSLEEP || p->p_stat==SSTOP)
1318102Sroot 			if (p->p_slptime != 127)
1328102Sroot 				p->p_slptime++;
13338164Smckusick 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
13417541Skarels 		/*
13517541Skarels 		 * If the process has slept the entire second,
13617541Skarels 		 * stop recalculating its priority until it wakes up.
13717541Skarels 		 */
13838164Smckusick 		if (p->p_slptime > 1)
13917541Skarels 			continue;
14017541Skarels 		/*
14117541Skarels 		 * p_pctcpu is only for ps.
14217541Skarels 		 */
14338164Smckusick #if	(FSHIFT >= CCPU_SHIFT)
14438164Smckusick 		p->p_pctcpu += (hz == 100)?
14538164Smckusick 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
14638164Smckusick                 	100 * (((fixpt_t) p->p_cpticks)
14738164Smckusick 				<< (FSHIFT - CCPU_SHIFT)) / hz;
14838164Smckusick #else
14938164Smckusick 		p->p_pctcpu += ((FSCALE - ccpu) *
15038164Smckusick 			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
15138164Smckusick #endif
1528102Sroot 		p->p_cpticks = 0;
15338164Smckusick 		a = (int) get_pcpu(b, p->p_cpu) + p->p_nice;
1548102Sroot 		if (a < 0)
1558102Sroot 			a = 0;
1568102Sroot 		if (a > 255)
1578102Sroot 			a = 255;
1588102Sroot 		p->p_cpu = a;
1598102Sroot 		(void) setpri(p);
16017541Skarels 		s = splhigh();	/* prevent state changes */
1618102Sroot 		if (p->p_pri >= PUSER) {
16216795Skarels #define	PPQ	(128 / NQS)
1638102Sroot 			if ((p != u.u_procp || noproc) &&
1648102Sroot 			    p->p_stat == SRUN &&
1658102Sroot 			    (p->p_flag & SLOAD) &&
16616795Skarels 			    (p->p_pri / PPQ) != (p->p_usrpri / PPQ)) {
1678102Sroot 				remrq(p);
1688102Sroot 				p->p_pri = p->p_usrpri;
1698102Sroot 				setrq(p);
1708102Sroot 			} else
1718102Sroot 				p->p_pri = p->p_usrpri;
1728102Sroot 		}
1738102Sroot 		splx(s);
1748102Sroot 	}
1758102Sroot 	vmmeter();
1768102Sroot 	if (runin!=0) {
1778102Sroot 		runin = 0;
1788102Sroot 		wakeup((caddr_t)&runin);
1798102Sroot 	}
1808102Sroot 	if (bclnlist != NULL)
1818102Sroot 		wakeup((caddr_t)&proc[2]);
1828624Sroot 	timeout(schedcpu, (caddr_t)0, hz);
1838102Sroot }
1848102Sroot 
18517541Skarels /*
18617541Skarels  * Recalculate the priority of a process after it has slept for a while.
18717541Skarels  */
18817541Skarels updatepri(p)
18917541Skarels 	register struct proc *p;
19017541Skarels {
19117541Skarels 	register int a = p->p_cpu & 0377;
19238164Smckusick 	register fixpt_t b = get_b(averunnable[0]);
19317541Skarels 
19417541Skarels 	p->p_slptime--;		/* the first time was done in schedcpu */
19517541Skarels 	while (a && --p->p_slptime)
19638164Smckusick 		a = (int) get_pcpu(b, a) /* + p->p_nice */;
19730232Skarels 	p->p_slptime = 0;
19817541Skarels 	if (a < 0)
19917541Skarels 		a = 0;
20017541Skarels 	if (a > 255)
20117541Skarels 		a = 255;
20217541Skarels 	p->p_cpu = a;
20317541Skarels 	(void) setpri(p);
20417541Skarels }
20517541Skarels 
20633Sbill #define SQSIZE 0100	/* Must be power of 2 */
20733Sbill #define HASH(x)	(( (int) x >> 5) & (SQSIZE-1))
20821099Smckusick struct slpque {
20921099Smckusick 	struct proc *sq_head;
21021099Smckusick 	struct proc **sq_tailp;
21121099Smckusick } slpque[SQSIZE];
21233Sbill 
21333Sbill /*
214*45671Skarels  * During autoconfiguration or after a panic, a sleep will simply
215*45671Skarels  * lower the priority briefly to allow interrupts, then return.
216*45671Skarels  * The priority to be used (safepri) is machine-dependent, thus this
217*45671Skarels  * value is initialized and maintained in the machine-dependent layers.
218*45671Skarels  * This priority will typically be 0, or the lowest priority
219*45671Skarels  * that is safe for use on the interrupt stack; it can be made
220*45671Skarels  * higher to block network software interrupts after panics.
221*45671Skarels  */
222*45671Skarels int safepri;
223*45671Skarels 
224*45671Skarels /*
22540711Skarels  * General sleep call.
22640711Skarels  * Suspends current process until a wakeup is made on chan.
22740711Skarels  * The process will then be made runnable with priority pri.
22840711Skarels  * Sleeps at most timo/hz seconds (0 means no timeout).
22940711Skarels  * If pri includes PCATCH flag, signals are checked
23040711Skarels  * before and after sleeping, else signals are not checked.
23140711Skarels  * Returns 0 if awakened, EWOULDBLOCK if the timeout expires.
23240711Skarels  * If PCATCH is set and a signal needs to be delivered,
23340711Skarels  * ERESTART is returned if the current system call should be restarted
23440711Skarels  * if possible, and EINTR is returned if the system call should
23540711Skarels  * be interrupted by the signal (return EINTR).
23633Sbill  */
23740711Skarels tsleep(chan, pri, wmesg, timo)
23840710Smarc 	caddr_t chan;
23940710Smarc 	int pri;
24040710Smarc 	char *wmesg;
24140710Smarc 	int timo;
24240710Smarc {
24340710Smarc 	register struct proc *rp;
24440710Smarc 	register struct slpque *qp;
24540710Smarc 	register s;
24640711Skarels 	int sig, catch = pri & PCATCH;
24740710Smarc 	extern int cold;
24840710Smarc 	int endtsleep();
24940710Smarc 
25040710Smarc 	rp = u.u_procp;
25140710Smarc 	s = splhigh();
25240710Smarc 	if (cold || panicstr) {
25340710Smarc 		/*
25440710Smarc 		 * After a panic, or during autoconfiguration,
25540710Smarc 		 * just give interrupts a chance, then just return;
25640710Smarc 		 * don't run any other procs or panic below,
25740710Smarc 		 * in case this is the idle process and already asleep.
25840710Smarc 		 */
259*45671Skarels 		splx(safepri);
26040710Smarc 		splx(s);
26140710Smarc 		return (0);
26240710Smarc 	}
26340710Smarc #ifdef DIAGNOSTIC
26440711Skarels 	if (chan == 0 || rp->p_stat != SRUN || rp->p_rlink)
26540711Skarels 		panic("tsleep");
26640710Smarc #endif
26740710Smarc 	rp->p_wchan = chan;
26840710Smarc 	rp->p_wmesg = wmesg;
26940710Smarc 	rp->p_slptime = 0;
27040711Skarels 	rp->p_pri = pri & PRIMASK;
27140710Smarc 	qp = &slpque[HASH(chan)];
27240710Smarc 	if (qp->sq_head == 0)
27340710Smarc 		qp->sq_head = rp;
27440710Smarc 	else
27540710Smarc 		*qp->sq_tailp = rp;
27640710Smarc 	*(qp->sq_tailp = &rp->p_link) = 0;
277*45671Skarels 	if (timo)
278*45671Skarels 		timeout(endtsleep, (caddr_t)rp, timo);
27940710Smarc 	/*
280*45671Skarels 	 * If we stop in CURSIG/issig(), a wakeup or a SIGCONT
281*45671Skarels 	 * (or both) could occur while we were stopped.
282*45671Skarels 	 * A SIGCONT would cause us to be marked as SSLEEP
283*45671Skarels 	 * without resuming us, thus we must be ready for sleep
284*45671Skarels 	 * when CURSIG is called.  If the wakeup happens while we're
285*45671Skarels 	 * stopped, rp->p_wchan will be 0 upon return from CURSIG.
28640710Smarc 	 */
28740711Skarels 	if (catch) {
288*45671Skarels 		rp->p_flag |= SSINTR;
28940711Skarels 		if (sig = CURSIG(rp)) {
29040711Skarels 			if (rp->p_wchan)
29140711Skarels 				unsleep(rp);
29240711Skarels 			rp->p_stat = SRUN;
293*45671Skarels 			goto resume;
29440711Skarels 		}
29540711Skarels 		if (rp->p_wchan == 0) {
296*45671Skarels 			catch = 0;
297*45671Skarels 			goto resume;
29840711Skarels 		}
29940710Smarc 	}
30040710Smarc 	rp->p_stat = SSLEEP;
30140710Smarc 	(void) spl0();
30240710Smarc 	u.u_ru.ru_nvcsw++;
30340710Smarc 	swtch();
304*45671Skarels resume:
30540710Smarc 	curpri = rp->p_usrpri;
30640710Smarc 	splx(s);
30740711Skarels 	rp->p_flag &= ~SSINTR;
30840710Smarc 	if (rp->p_flag & STIMO) {
30940710Smarc 		rp->p_flag &= ~STIMO;
310*45671Skarels 		if (catch == 0 || sig == 0)
311*45671Skarels 			return (EWOULDBLOCK);
312*45671Skarels 	} else if (timo)
31340710Smarc 		untimeout(endtsleep, (caddr_t)rp);
314*45671Skarels 	if (catch && (sig != 0 || (sig = CURSIG(rp)))) {
31540711Skarels 		if (u.u_sigintr & sigmask(sig))
31640711Skarels 			return (EINTR);
31740711Skarels 		return (ERESTART);
31840711Skarels 	}
31940710Smarc 	return (0);
32040710Smarc }
32140710Smarc 
32240710Smarc /*
32340710Smarc  * Implement timeout for tsleep.
32440710Smarc  * If process hasn't been awakened (wchan non-zero),
32540710Smarc  * set timeout flag and undo the sleep.  If proc
32640710Smarc  * is stopped, just unsleep so it will remain stopped.
32740710Smarc  */
32840710Smarc endtsleep(p)
32940710Smarc 	register struct proc *p;
33040710Smarc {
33140710Smarc 	int s = splhigh();
33240710Smarc 
33340710Smarc 	if (p->p_wchan) {
33440710Smarc 		if (p->p_stat == SSLEEP)
33540710Smarc 			setrun(p);
33640710Smarc 		else
33740710Smarc 			unsleep(p);
33840710Smarc 		p->p_flag |= STIMO;
33940710Smarc 	}
34040710Smarc 	splx(s);
34140710Smarc }
34240710Smarc 
34340711Skarels /*
34440711Skarels  * Short-term, non-interruptable sleep.
34540711Skarels  */
34633Sbill sleep(chan, pri)
3478033Sroot 	caddr_t chan;
3488033Sroot 	int pri;
34933Sbill {
35021099Smckusick 	register struct proc *rp;
35121099Smckusick 	register struct slpque *qp;
352207Sbill 	register s;
35330532Skarels 	extern int cold;
35433Sbill 
35540711Skarels #ifdef DIAGNOSTIC
35640711Skarels 	if (pri > PZERO) {
35740711Skarels 		printf("sleep called with pri %d > PZERO, wchan: %x\n",
35840711Skarels 			pri, chan);
35940711Skarels 		panic("old sleep");
36040711Skarels 	}
36140711Skarels #endif
36233Sbill 	rp = u.u_procp;
36317541Skarels 	s = splhigh();
36430532Skarels 	if (cold || panicstr) {
36518363Skarels 		/*
36630532Skarels 		 * After a panic, or during autoconfiguration,
36730532Skarels 		 * just give interrupts a chance, then just return;
36830532Skarels 		 * don't run any other procs or panic below,
36930532Skarels 		 * in case this is the idle process and already asleep.
37018363Skarels 		 */
371*45671Skarels 		splx(safepri);
37218363Skarels 		splx(s);
37318363Skarels 		return;
37418363Skarels 	}
37540710Smarc #ifdef DIAGNOSTIC
37618363Skarels 	if (chan==0 || rp->p_stat != SRUN || rp->p_rlink)
37733Sbill 		panic("sleep");
37840710Smarc #endif
37933Sbill 	rp->p_wchan = chan;
38040710Smarc 	rp->p_wmesg = NULL;
38133Sbill 	rp->p_slptime = 0;
38233Sbill 	rp->p_pri = pri;
38321099Smckusick 	qp = &slpque[HASH(chan)];
38421099Smckusick 	if (qp->sq_head == 0)
38521099Smckusick 		qp->sq_head = rp;
38621099Smckusick 	else
38721099Smckusick 		*qp->sq_tailp = rp;
38821099Smckusick 	*(qp->sq_tailp = &rp->p_link) = 0;
38940711Skarels 	rp->p_stat = SSLEEP;
39040711Skarels 	(void) spl0();
39140711Skarels 	u.u_ru.ru_nvcsw++;
39240711Skarels 	swtch();
39316795Skarels 	curpri = rp->p_usrpri;
39433Sbill 	splx(s);
39533Sbill }
39633Sbill 
39733Sbill /*
398181Sbill  * Remove a process from its wait queue
399181Sbill  */
400181Sbill unsleep(p)
4014826Swnj 	register struct proc *p;
402181Sbill {
40321099Smckusick 	register struct slpque *qp;
404181Sbill 	register struct proc **hp;
40521099Smckusick 	int s;
406181Sbill 
40717541Skarels 	s = splhigh();
408181Sbill 	if (p->p_wchan) {
40921099Smckusick 		hp = &(qp = &slpque[HASH(p->p_wchan)])->sq_head;
410181Sbill 		while (*hp != p)
411181Sbill 			hp = &(*hp)->p_link;
412181Sbill 		*hp = p->p_link;
41321099Smckusick 		if (qp->sq_tailp == &p->p_link)
41421099Smckusick 			qp->sq_tailp = hp;
415181Sbill 		p->p_wchan = 0;
416181Sbill 	}
417181Sbill 	splx(s);
418181Sbill }
419181Sbill 
420181Sbill /*
42133Sbill  * Wake up all processes sleeping on chan.
42233Sbill  */
42333Sbill wakeup(chan)
4244826Swnj 	register caddr_t chan;
42533Sbill {
42621099Smckusick 	register struct slpque *qp;
42721099Smckusick 	register struct proc *p, **q;
42833Sbill 	int s;
42933Sbill 
43017541Skarels 	s = splhigh();
43121099Smckusick 	qp = &slpque[HASH(chan)];
43233Sbill restart:
43321099Smckusick 	for (q = &qp->sq_head; p = *q; ) {
43440710Smarc #ifdef DIAGNOSTIC
435181Sbill 		if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP)
43633Sbill 			panic("wakeup");
43740710Smarc #endif
438207Sbill 		if (p->p_wchan==chan) {
43933Sbill 			p->p_wchan = 0;
440187Sbill 			*q = p->p_link;
44121099Smckusick 			if (qp->sq_tailp == &p->p_link)
44221099Smckusick 				qp->sq_tailp = q;
443181Sbill 			if (p->p_stat == SSLEEP) {
444181Sbill 				/* OPTIMIZED INLINE EXPANSION OF setrun(p) */
44521763Skarels 				if (p->p_slptime > 1)
44621763Skarels 					updatepri(p);
447181Sbill 				p->p_stat = SRUN;
4482702Swnj 				if (p->p_flag & SLOAD)
449181Sbill 					setrq(p);
45016795Skarels 				/*
45116795Skarels 				 * Since curpri is a usrpri,
45216795Skarels 				 * p->p_pri is always better than curpri.
45316795Skarels 				 */
45416795Skarels 				runrun++;
45516795Skarels 				aston();
4563545Swnj 				if ((p->p_flag&SLOAD) == 0) {
4573545Swnj 					if (runout != 0) {
4583545Swnj 						runout = 0;
4593545Swnj 						wakeup((caddr_t)&runout);
4603545Swnj 					}
4613545Swnj 					wantin++;
462181Sbill 				}
463181Sbill 				/* END INLINE EXPANSION */
464187Sbill 				goto restart;
46533Sbill 			}
466187Sbill 		} else
467187Sbill 			q = &p->p_link;
46833Sbill 	}
46933Sbill 	splx(s);
47033Sbill }
47133Sbill 
47233Sbill /*
47333Sbill  * Initialize the (doubly-linked) run queues
47433Sbill  * to be empty.
47533Sbill  */
47633Sbill rqinit()
47733Sbill {
47833Sbill 	register int i;
47933Sbill 
48033Sbill 	for (i = 0; i < NQS; i++)
48133Sbill 		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
48233Sbill }
48333Sbill 
48433Sbill /*
48533Sbill  * Set the process running;
48633Sbill  * arrange for it to be swapped in if necessary.
48733Sbill  */
48833Sbill setrun(p)
4894826Swnj 	register struct proc *p;
49033Sbill {
4914826Swnj 	register int s;
49233Sbill 
49317541Skarels 	s = splhigh();
49433Sbill 	switch (p->p_stat) {
49533Sbill 
49633Sbill 	case 0:
49733Sbill 	case SWAIT:
49833Sbill 	case SRUN:
49933Sbill 	case SZOMB:
50033Sbill 	default:
50133Sbill 		panic("setrun");
50233Sbill 
503207Sbill 	case SSTOP:
50433Sbill 	case SSLEEP:
505181Sbill 		unsleep(p);		/* e.g. when sending signals */
50633Sbill 		break;
50733Sbill 
50833Sbill 	case SIDL:
50933Sbill 		break;
51033Sbill 	}
51133Sbill 	p->p_stat = SRUN;
51233Sbill 	if (p->p_flag & SLOAD)
51333Sbill 		setrq(p);
51433Sbill 	splx(s);
51530232Skarels 	if (p->p_slptime > 1)
51630232Skarels 		updatepri(p);
5174826Swnj 	if (p->p_pri < curpri) {
51833Sbill 		runrun++;
5192443Swnj 		aston();
5202443Swnj 	}
5213545Swnj 	if ((p->p_flag&SLOAD) == 0) {
5224826Swnj 		if (runout != 0) {
5233545Swnj 			runout = 0;
5243545Swnj 			wakeup((caddr_t)&runout);
5253545Swnj 		}
5263545Swnj 		wantin++;
52733Sbill 	}
52833Sbill }
52933Sbill 
53033Sbill /*
53133Sbill  * Set user priority.
53233Sbill  * The rescheduling flag (runrun)
53333Sbill  * is set if the priority is better
53433Sbill  * than the currently running process.
53533Sbill  */
53633Sbill setpri(pp)
5374826Swnj 	register struct proc *pp;
53833Sbill {
5394826Swnj 	register int p;
54033Sbill 
5413875Swnj 	p = (pp->p_cpu & 0377)/4;
54217541Skarels 	p += PUSER + 2 * pp->p_nice;
5433530Swnj 	if (pp->p_rssize > pp->p_maxrss && freemem < desfree)
5443530Swnj 		p += 2*4;	/* effectively, nice(4) */
5454826Swnj 	if (p > 127)
54633Sbill 		p = 127;
5474826Swnj 	if (p < curpri) {
54833Sbill 		runrun++;
5492453Swnj 		aston();
5502453Swnj 	}
55133Sbill 	pp->p_usrpri = p;
5524826Swnj 	return (p);
55333Sbill }
554