xref: /csrg-svn/sys/kern/kern_clock.c (revision 8124)
1*8124Sroot /*	kern_clock.c	4.40	82/09/08	*/
29Sbill 
39Sbill #include "../h/param.h"
49Sbill #include "../h/systm.h"
5329Sbill #include "../h/dk.h"
62768Swnj #include "../h/callout.h"
79Sbill #include "../h/dir.h"
89Sbill #include "../h/user.h"
98028Sroot #include "../h/kernel.h"
109Sbill #include "../h/proc.h"
119Sbill #include "../h/psl.h"
129Sbill #include "../h/vm.h"
139Sbill #include "../h/text.h"
147490Skre #ifdef MUSH
157490Skre #include "../h/quota.h"
167490Skre #include "../h/share.h"
177490Skre #endif
189Sbill 
19*8124Sroot /*
20*8124Sroot  * Clock handling routines.
21*8124Sroot  *
22*8124Sroot  * This code is written for a machine with only one interval timer,
23*8124Sroot  * and does timing and resource utilization estimation statistically
24*8124Sroot  * based on the state of the machine hz times a second.  A machine
25*8124Sroot  * with proper clocks (running separately in user state, system state,
26*8124Sroot  * interrupt state and idle state) as well as a time-of-day clock
27*8124Sroot  * would allow a non-approximate implementation.
28*8124Sroot  */
291559Sbill 
30*8124Sroot /*
31*8124Sroot  * TODO:
32*8124Sroot  *	* Keep more accurate statistics by simulating good interval timers.
33*8124Sroot  *	* Use the time-of-day clock on the VAX to keep more accurate time
34*8124Sroot  *	  than is possible by repeated use of the interval timer.
35*8124Sroot  *	* Allocate more timeout table slots when table overflows.
36*8124Sroot  */
379Sbill 
38*8124Sroot /* bump a timeval by a small number of usec's */
39*8124Sroot #define	bumptime(tp, usec) \
40*8124Sroot 	(tp)->tv_usec += usec; \
418097Sroot 	if ((tp)->tv_usec >= 1000000) { \
428097Sroot 		(tp)->tv_usec -= 1000000; \
438097Sroot 		(tp)->tv_sec++; \
448097Sroot 	}
455247Sroot 
46*8124Sroot /*
47*8124Sroot  * The (single) hardware interval timer.
48*8124Sroot  * We update the events relating to real time, and then
49*8124Sroot  * make a gross assumption: that the system has been in the
50*8124Sroot  * state it is in (user state, kernel state, interrupt state,
51*8124Sroot  * or idle state) for the entire last time interval, and
52*8124Sroot  * update statistics accordingly.
53*8124Sroot  */
542609Swnj /*ARGSUSED*/
552442Swnj hardclock(pc, ps)
562450Swnj 	caddr_t pc;
579Sbill {
582768Swnj 	register struct callout *p1;
598097Sroot 	register struct proc *p;
602442Swnj 	register int s, cpstate;
617490Skre 	extern double avenrun[];
629Sbill 
63*8124Sroot 	/*
64*8124Sroot 	 * Update real-time timeout queue.
65*8124Sroot 	 * At front of queue are some number of events which are ``due''.
66*8124Sroot 	 * The time to these is <= 0 and if negative represents the
67*8124Sroot 	 * number of ticks which have passed since it was supposed to happen.
68*8124Sroot 	 * The rest of the q elements (times > 0) are events yet to happen,
69*8124Sroot 	 * where the time for each is given as a delta from the previous.
70*8124Sroot 	 * Decrementing just the first of these serves to decrement the time
71*8124Sroot 	 * to all events.
72*8124Sroot 	 */
733542Swnj 	for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next)
748112Sroot 		--p1->c_time;
753542Swnj 	if (p1)
768112Sroot 		--p1->c_time;
77138Sbill 
78*8124Sroot 	/*
79*8124Sroot 	 * If the cpu is currently scheduled to a process, then
80*8124Sroot 	 * charge it with resource utilization for a tick, updating
81*8124Sroot 	 * statistics which run in (user+system) virtual time,
82*8124Sroot 	 * such as the cpu time limit and profiling timers.
83*8124Sroot 	 * This assumes that the current process has been running
84*8124Sroot 	 * the entire last tick.
85*8124Sroot 	 */
869Sbill 	if (!noproc) {
879Sbill 		s = u.u_procp->p_rssize;
888097Sroot 		u.u_ru.ru_idrss += s; u.u_ru.ru_isrss += 0;	/* XXX */
899Sbill 		if (u.u_procp->p_textp) {
909Sbill 			register int xrss = u.u_procp->p_textp->x_rssize;
919Sbill 
929Sbill 			s += xrss;
938028Sroot 			u.u_ru.ru_ixrss += xrss;
949Sbill 		}
958028Sroot 		if (s > u.u_ru.ru_maxrss)
968028Sroot 			u.u_ru.ru_maxrss = s;
978028Sroot 		if ((u.u_ru.ru_utime.tv_sec+u.u_ru.ru_stime.tv_sec+1) >
988028Sroot 		    u.u_rlimit[RLIMIT_CPU].rlim_cur) {
99375Sbill 			psignal(u.u_procp, SIGXCPU);
1008028Sroot 			if (u.u_rlimit[RLIMIT_CPU].rlim_cur <
1018028Sroot 			    u.u_rlimit[RLIMIT_CPU].rlim_max)
1028028Sroot 				u.u_rlimit[RLIMIT_CPU].rlim_cur += 5;
103375Sbill 		}
1048097Sroot 		if (timerisset(&u.u_timer[ITIMER_PROF].it_value) &&
1058097Sroot 		    itimerdecr(&u.u_timer[ITIMER_PROF], tick) == 0)
1068097Sroot 			psignal(u.u_procp, SIGPROF);
1079Sbill 	}
1088097Sroot 
109*8124Sroot 	/*
110*8124Sroot 	 * Charge the time out based on the mode the cpu is in.
111*8124Sroot 	 * Here again we fudge for the lack of proper interval timers
112*8124Sroot 	 * assuming that the current state has been around at least
113*8124Sroot 	 * one tick.
114*8124Sroot 	 */
1159Sbill 	if (USERMODE(ps)) {
116*8124Sroot 		/*
117*8124Sroot 		 * CPU was in user state.  Increment
118*8124Sroot 		 * user time counter, and process process-virtual time
119*8124Sroot 		 * interval timer.
120*8124Sroot 		 */
121*8124Sroot 		bumptime(&u.u_ru.ru_utime, tick);
1228097Sroot 		if (timerisset(&u.u_timer[ITIMER_VIRTUAL].it_value) &&
1238097Sroot 		    itimerdecr(&u.u_timer[ITIMER_VIRTUAL], tick) == 0)
1248097Sroot 			psignal(u.u_procp, SIGVTALRM);
1258028Sroot 		if (u.u_procp->p_nice > NZERO)
126305Sbill 			cpstate = CP_NICE;
127305Sbill 		else
128305Sbill 			cpstate = CP_USER;
1299Sbill 	} else {
130*8124Sroot 		/*
131*8124Sroot 		 * CPU was in system state.  If profiling kernel
132*8124Sroot 		 * increment a counter.  If no process is running
133*8124Sroot 		 * then this is a system tick if we were running
134*8124Sroot 		 * at a non-zero IPL (in a driver).  If a process is running,
135*8124Sroot 		 * then we charge it with system time even if we were
136*8124Sroot 		 * at a non-zero IPL, since the system often runs
137*8124Sroot 		 * this way during processing of system calls.
138*8124Sroot 		 * This is approximate, but the lack of true interval
139*8124Sroot 		 * timers makes doing anything else difficult.
140*8124Sroot 		 */
1417388Sroot #ifdef GPROF
1427388Sroot 		int k = pc - s_lowpc;
1437388Sroot 		if (profiling < 2 && k < s_textsize)
1447388Sroot 			kcount[k / sizeof (*kcount)]++;
1454968Swnj #endif
146305Sbill 		cpstate = CP_SYS;
1477315Ssam 		if (noproc) {
1487315Ssam 			if ((ps&PSL_IPL) != 0)
1497315Ssam 				cpstate = CP_IDLE;
1508028Sroot 		} else {
151*8124Sroot 			bumptime(&u.u_ru.ru_stime, tick);
1528028Sroot 		}
1539Sbill 	}
1548097Sroot 
155*8124Sroot 	/*
156*8124Sroot 	 * We maintain statistics shown by user-level statistics
157*8124Sroot 	 * programs:  the amount of time in each cpu state, and
158*8124Sroot 	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
159*8124Sroot 	 */
1601408Sbill 	cp_time[cpstate]++;
1612442Swnj 	for (s = 0; s < DK_NDRIVE; s++)
1622442Swnj 		if (dk_busy&(1<<s))
1632442Swnj 			dk_time[s]++;
1648097Sroot 
165*8124Sroot 	/*
166*8124Sroot 	 * We adjust the priority of the current process.
167*8124Sroot 	 * The priority of a process gets worse as it accumulates
168*8124Sroot 	 * CPU time.  The cpu usage estimator (p_cpu) is increased here
169*8124Sroot 	 * and the formula for computing priorities (in kern_synch.c)
170*8124Sroot 	 * will compute a different value each time the p_cpu increases
171*8124Sroot 	 * by 4.  The cpu usage estimator ramps up quite quickly when
172*8124Sroot 	 * the process is running (linearly), and decays away exponentially,
173*8124Sroot 	 * at a rate which is proportionally slower when the system is
174*8124Sroot 	 * busy.  The basic principal is that the system will 90% forget
175*8124Sroot 	 * that a process used a lot of CPU time in 5*loadav seconds.
176*8124Sroot 	 * This causes the system to favor processes which haven't run
177*8124Sroot 	 * much recently, and to round-robin among other processes.
178*8124Sroot 	 */
1799Sbill 	if (!noproc) {
1808097Sroot 		p = u.u_procp;
1818097Sroot 		p->p_cpticks++;
1828097Sroot 		if (++p->p_cpu == 0)
1838097Sroot 			p->p_cpu--;
1847490Skre #ifdef MUSH
1858097Sroot 		p->p_quota->q_cost += (p->p_nice > NZERO ?
1868097Sroot 		    (shconsts.sc_tic * ((2*NZERO)-p->p_nice)) / NZERO :
1877490Skre 		    shconsts.sc_tic) * (((int)avenrun[0]+2)/3);
1887490Skre #endif
189*8124Sroot 		if ((p->p_cpu&3) == 0) {
1908097Sroot 			(void) setpri(p);
1918097Sroot 			if (p->p_pri >= PUSER)
1928097Sroot 				p->p_pri = p->p_usrpri;
1939Sbill 		}
1949Sbill 	}
195*8124Sroot 
196*8124Sroot 	/*
197*8124Sroot 	 * Increment the time-of-day, and schedule
198*8124Sroot 	 * processing of the callouts at a very low cpu priority,
199*8124Sroot 	 * so we don't keep the relatively high clock interrupt
200*8124Sroot 	 * priority any longer than necessary.
201*8124Sroot 	 */
202*8124Sroot 	bumptime(&time, tick);
2032442Swnj 	setsoftclock();
2042442Swnj }
2052442Swnj 
206*8124Sroot /*
207*8124Sroot  * Software priority level clock interrupt.
208*8124Sroot  * Run periodic events from timeout queue.
209*8124Sroot  */
2102609Swnj /*ARGSUSED*/
2112442Swnj softclock(pc, ps)
2122450Swnj 	caddr_t pc;
2132442Swnj {
2142442Swnj 
2158097Sroot 	for (;;) {
216*8124Sroot 		register struct callout *p1;
217*8124Sroot 		register caddr_t arg;
218*8124Sroot 		register int (*func)();
219*8124Sroot 		register int a, s;
220*8124Sroot 
2218097Sroot 		s = spl7();
2228097Sroot 		if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) {
2238097Sroot 			splx(s);
2248097Sroot 			break;
2252442Swnj 		}
226*8124Sroot 		arg = p1->c_arg; func = p1->c_func; a = p1->c_time;
2278097Sroot 		calltodo.c_next = p1->c_next;
2288097Sroot 		p1->c_next = callfree;
2298097Sroot 		callfree = p1;
2308097Sroot 		(void) splx(s);
2318112Sroot 		(*func)(arg, a);
2322442Swnj 	}
2339Sbill }
2349Sbill 
2359Sbill /*
2368097Sroot  * Arrange that (*fun)(arg) is called in tim/hz seconds.
2379Sbill  */
2389Sbill timeout(fun, arg, tim)
2392450Swnj 	int (*fun)();
2402450Swnj 	caddr_t arg;
2418097Sroot 	int tim;
2429Sbill {
2433542Swnj 	register struct callout *p1, *p2, *pnew;
2449Sbill 	register int t;
2459Sbill 	int s;
2469Sbill 
2479Sbill 	t = tim;
2489Sbill 	s = spl7();
2493542Swnj 	pnew = callfree;
2503542Swnj 	if (pnew == NULL)
2513542Swnj 		panic("timeout table overflow");
2523542Swnj 	callfree = pnew->c_next;
2533542Swnj 	pnew->c_arg = arg;
2543542Swnj 	pnew->c_func = fun;
2553542Swnj 	for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2)
2563542Swnj 		t -= p2->c_time;
2573542Swnj 	p1->c_next = pnew;
2583542Swnj 	pnew->c_next = p2;
2593542Swnj 	pnew->c_time = t;
2603542Swnj 	if (p2)
2613542Swnj 		p2->c_time -= t;
2629Sbill 	splx(s);
2639Sbill }
2647305Ssam 
2657305Ssam /*
2667305Ssam  * untimeout is called to remove a function timeout call
2677305Ssam  * from the callout structure.
2687305Ssam  */
2698097Sroot untimeout(fun, arg)
2707305Ssam 	int (*fun)();
2717305Ssam 	caddr_t arg;
2727305Ssam {
2737305Ssam 	register struct callout *p1, *p2;
2747305Ssam 	register int s;
2757305Ssam 
2767305Ssam 	s = spl7();
2777305Ssam 	for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) {
2787305Ssam 		if (p2->c_func == fun && p2->c_arg == arg) {
2798112Sroot 			if (p2->c_next && p2->c_time > 0)
2807305Ssam 				p2->c_next->c_time += p2->c_time;
2817305Ssam 			p1->c_next = p2->c_next;
2827305Ssam 			p2->c_next = callfree;
2837305Ssam 			callfree = p2;
2847305Ssam 			break;
2857305Ssam 		}
2867305Ssam 	}
2877305Ssam 	splx(s);
2887305Ssam }
2898112Sroot 
290*8124Sroot /*
291*8124Sroot  * Compute number of hz until specified time.
292*8124Sroot  * Used to compute third argument to timeout() from an
293*8124Sroot  * absolute time.
294*8124Sroot  */
2958112Sroot hzto(tv)
2968112Sroot 	struct timeval *tv;
2978112Sroot {
298*8124Sroot 	register long ticks;
299*8124Sroot 	register long sec;
3008112Sroot 	int s = spl7();
3018112Sroot 
302*8124Sroot 	/*
303*8124Sroot 	 * If number of milliseconds will fit in 32 bit arithmetic,
304*8124Sroot 	 * then compute number of milliseconds to time and scale to
305*8124Sroot 	 * ticks.  Otherwise just compute number of hz in time, rounding
306*8124Sroot 	 * times greater than representible to maximum value.
307*8124Sroot 	 *
308*8124Sroot 	 * Delta times less than 25 days can be computed ``exactly''.
309*8124Sroot 	 * Maximum value for any timeout in 10ms ticks is 250 days.
310*8124Sroot 	 */
311*8124Sroot 	sec = tv->tv_sec - time.tv_sec;
312*8124Sroot 	if (sec <= 0x7fffffff / 1000 - 1000)
313*8124Sroot 		ticks = ((tv->tv_sec - time.tv_sec) * 1000 +
314*8124Sroot 			(tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000);
315*8124Sroot 	else if (sec <= 0x7fffffff / hz)
316*8124Sroot 		ticks = sec * hz;
317*8124Sroot 	else
318*8124Sroot 		ticks = 0x7fffffff;
3198112Sroot 	splx(s);
3208112Sroot 	return (ticks);
3218112Sroot }
322