xref: /csrg-svn/sys/kern/kern_clock.c (revision 10388)
1*10388Ssam /*	kern_clock.c	4.51	83/01/17	*/
29Sbill 
39751Ssam #include "../machine/reg.h"
49751Ssam #include "../machine/psl.h"
59751Ssam 
69Sbill #include "../h/param.h"
79Sbill #include "../h/systm.h"
8329Sbill #include "../h/dk.h"
92768Swnj #include "../h/callout.h"
109Sbill #include "../h/dir.h"
119Sbill #include "../h/user.h"
128028Sroot #include "../h/kernel.h"
139Sbill #include "../h/proc.h"
149Sbill #include "../h/vm.h"
159Sbill #include "../h/text.h"
167490Skre #ifdef MUSH
177490Skre #include "../h/quota.h"
187490Skre #include "../h/share.h"
197490Skre #endif
209Sbill 
219751Ssam #ifdef vax
229751Ssam #include "../vax/mtpr.h"
239751Ssam #endif
249751Ssam 
2510291Smckusick #ifdef GPROF
2610291Smckusick #include "../h/gprof.h"
2710291Smckusick #endif
2810291Smckusick 
299751Ssam #
308124Sroot /*
318124Sroot  * Clock handling routines.
328124Sroot  *
338124Sroot  * This code is written for a machine with only one interval timer,
348124Sroot  * and does timing and resource utilization estimation statistically
358124Sroot  * based on the state of the machine hz times a second.  A machine
368124Sroot  * with proper clocks (running separately in user state, system state,
378124Sroot  * interrupt state and idle state) as well as a time-of-day clock
388124Sroot  * would allow a non-approximate implementation.
398124Sroot  */
401559Sbill 
418124Sroot /*
428124Sroot  * TODO:
438124Sroot  *	* Keep more accurate statistics by simulating good interval timers.
448124Sroot  *	* Use the time-of-day clock on the VAX to keep more accurate time
458124Sroot  *	  than is possible by repeated use of the interval timer.
468124Sroot  *	* Allocate more timeout table slots when table overflows.
478124Sroot  */
489Sbill 
498124Sroot /* bump a timeval by a small number of usec's */
508124Sroot #define	bumptime(tp, usec) \
518124Sroot 	(tp)->tv_usec += usec; \
528097Sroot 	if ((tp)->tv_usec >= 1000000) { \
538097Sroot 		(tp)->tv_usec -= 1000000; \
548097Sroot 		(tp)->tv_sec++; \
558097Sroot 	}
565247Sroot 
578124Sroot /*
588124Sroot  * The (single) hardware interval timer.
598124Sroot  * We update the events relating to real time, and then
608124Sroot  * make a gross assumption: that the system has been in the
618124Sroot  * state it is in (user state, kernel state, interrupt state,
628124Sroot  * or idle state) for the entire last time interval, and
638124Sroot  * update statistics accordingly.
648124Sroot  */
652609Swnj /*ARGSUSED*/
668965Sroot #ifdef vax
672442Swnj hardclock(pc, ps)
682450Swnj 	caddr_t pc;
698944Sroot 	int ps;
709Sbill {
718944Sroot #endif
728965Sroot #ifdef sun
738944Sroot hardclock(regs)
748944Sroot 	struct regs regs;
758944Sroot {
768944Sroot 	int ps = regs.r_sr;
778944Sroot 	caddr_t pc = (caddr_t)regs.r_pc;
788944Sroot #endif
792768Swnj 	register struct callout *p1;
808097Sroot 	register struct proc *p;
812442Swnj 	register int s, cpstate;
829Sbill 
839751Ssam #ifdef sun
849751Ssam 	if (USERMODE(ps))		/* aston needs ar0 */
859751Ssam 		u.u_ar0 = &regs.r_r0;
869751Ssam #endif
878124Sroot 	/*
888124Sroot 	 * Update real-time timeout queue.
898124Sroot 	 * At front of queue are some number of events which are ``due''.
908124Sroot 	 * The time to these is <= 0 and if negative represents the
918124Sroot 	 * number of ticks which have passed since it was supposed to happen.
928124Sroot 	 * The rest of the q elements (times > 0) are events yet to happen,
938124Sroot 	 * where the time for each is given as a delta from the previous.
948124Sroot 	 * Decrementing just the first of these serves to decrement the time
958124Sroot 	 * to all events.
968124Sroot 	 */
973542Swnj 	for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next)
988112Sroot 		--p1->c_time;
993542Swnj 	if (p1)
1008112Sroot 		--p1->c_time;
101138Sbill 
1028124Sroot 	/*
1038124Sroot 	 * Charge the time out based on the mode the cpu is in.
1048124Sroot 	 * Here again we fudge for the lack of proper interval timers
1058124Sroot 	 * assuming that the current state has been around at least
1068124Sroot 	 * one tick.
1078124Sroot 	 */
1089Sbill 	if (USERMODE(ps)) {
1098124Sroot 		/*
1108124Sroot 		 * CPU was in user state.  Increment
1118124Sroot 		 * user time counter, and process process-virtual time
1129604Ssam 		 * interval timer.
1138124Sroot 		 */
1148124Sroot 		bumptime(&u.u_ru.ru_utime, tick);
1158097Sroot 		if (timerisset(&u.u_timer[ITIMER_VIRTUAL].it_value) &&
1168097Sroot 		    itimerdecr(&u.u_timer[ITIMER_VIRTUAL], tick) == 0)
1178097Sroot 			psignal(u.u_procp, SIGVTALRM);
1188028Sroot 		if (u.u_procp->p_nice > NZERO)
119305Sbill 			cpstate = CP_NICE;
120305Sbill 		else
121305Sbill 			cpstate = CP_USER;
1229Sbill 	} else {
1238124Sroot 		/*
1248124Sroot 		 * CPU was in system state.  If profiling kernel
1258124Sroot 		 * increment a counter.  If no process is running
1268124Sroot 		 * then this is a system tick if we were running
1278124Sroot 		 * at a non-zero IPL (in a driver).  If a process is running,
1288124Sroot 		 * then we charge it with system time even if we were
1298124Sroot 		 * at a non-zero IPL, since the system often runs
1308124Sroot 		 * this way during processing of system calls.
1318124Sroot 		 * This is approximate, but the lack of true interval
1328124Sroot 		 * timers makes doing anything else difficult.
1338124Sroot 		 */
1347388Sroot #ifdef GPROF
1357388Sroot 		int k = pc - s_lowpc;
1367388Sroot 		if (profiling < 2 && k < s_textsize)
13710291Smckusick 			kcount[k / (HISTFRACTION * sizeof (*kcount))]++;
1384968Swnj #endif
139305Sbill 		cpstate = CP_SYS;
1407315Ssam 		if (noproc) {
1418944Sroot 			if (BASEPRI(ps))
1427315Ssam 				cpstate = CP_IDLE;
1438028Sroot 		} else {
1448124Sroot 			bumptime(&u.u_ru.ru_stime, tick);
1458028Sroot 		}
1469Sbill 	}
1478097Sroot 
1488124Sroot 	/*
149*10388Ssam 	 * If the cpu is currently scheduled to a process, then
150*10388Ssam 	 * charge it with resource utilization for a tick, updating
151*10388Ssam 	 * statistics which run in (user+system) virtual time,
152*10388Ssam 	 * such as the cpu time limit and profiling timers.
153*10388Ssam 	 * This assumes that the current process has been running
154*10388Ssam 	 * the entire last tick.
155*10388Ssam 	 */
156*10388Ssam 	if (noproc == 0 && cpstate != CP_IDLE) {
157*10388Ssam 		if ((u.u_ru.ru_utime.tv_sec+u.u_ru.ru_stime.tv_sec+1) >
158*10388Ssam 		    u.u_rlimit[RLIMIT_CPU].rlim_cur) {
159*10388Ssam 			psignal(u.u_procp, SIGXCPU);
160*10388Ssam 			if (u.u_rlimit[RLIMIT_CPU].rlim_cur <
161*10388Ssam 			    u.u_rlimit[RLIMIT_CPU].rlim_max)
162*10388Ssam 				u.u_rlimit[RLIMIT_CPU].rlim_cur += 5;
163*10388Ssam 		}
164*10388Ssam 		if (timerisset(&u.u_timer[ITIMER_PROF].it_value) &&
165*10388Ssam 		    itimerdecr(&u.u_timer[ITIMER_PROF], tick) == 0)
166*10388Ssam 			psignal(u.u_procp, SIGPROF);
167*10388Ssam 		s = u.u_procp->p_rssize;
168*10388Ssam 		u.u_ru.ru_idrss += s; u.u_ru.ru_isrss += 0;	/* XXX */
169*10388Ssam 		if (u.u_procp->p_textp) {
170*10388Ssam 			register int xrss = u.u_procp->p_textp->x_rssize;
171*10388Ssam 
172*10388Ssam 			s += xrss;
173*10388Ssam 			u.u_ru.ru_ixrss += xrss;
174*10388Ssam 		}
175*10388Ssam 		if (s > u.u_ru.ru_maxrss)
176*10388Ssam 			u.u_ru.ru_maxrss = s;
177*10388Ssam 	}
178*10388Ssam 
179*10388Ssam 	/*
1808124Sroot 	 * We maintain statistics shown by user-level statistics
1818124Sroot 	 * programs:  the amount of time in each cpu state, and
1828124Sroot 	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
1838124Sroot 	 */
1841408Sbill 	cp_time[cpstate]++;
1852442Swnj 	for (s = 0; s < DK_NDRIVE; s++)
1862442Swnj 		if (dk_busy&(1<<s))
1872442Swnj 			dk_time[s]++;
1888097Sroot 
1898124Sroot 	/*
1908124Sroot 	 * We adjust the priority of the current process.
1918124Sroot 	 * The priority of a process gets worse as it accumulates
1928124Sroot 	 * CPU time.  The cpu usage estimator (p_cpu) is increased here
1938124Sroot 	 * and the formula for computing priorities (in kern_synch.c)
1948124Sroot 	 * will compute a different value each time the p_cpu increases
1958124Sroot 	 * by 4.  The cpu usage estimator ramps up quite quickly when
1968124Sroot 	 * the process is running (linearly), and decays away exponentially,
1978124Sroot 	 * at a rate which is proportionally slower when the system is
1988124Sroot 	 * busy.  The basic principal is that the system will 90% forget
1998124Sroot 	 * that a process used a lot of CPU time in 5*loadav seconds.
2008124Sroot 	 * This causes the system to favor processes which haven't run
2018124Sroot 	 * much recently, and to round-robin among other processes.
2028124Sroot 	 */
2039Sbill 	if (!noproc) {
2048097Sroot 		p = u.u_procp;
2058097Sroot 		p->p_cpticks++;
2068097Sroot 		if (++p->p_cpu == 0)
2078097Sroot 			p->p_cpu--;
2087490Skre #ifdef MUSH
2098097Sroot 		p->p_quota->q_cost += (p->p_nice > NZERO ?
2108097Sroot 		    (shconsts.sc_tic * ((2*NZERO)-p->p_nice)) / NZERO :
2117490Skre 		    shconsts.sc_tic) * (((int)avenrun[0]+2)/3);
2127490Skre #endif
2138124Sroot 		if ((p->p_cpu&3) == 0) {
2148097Sroot 			(void) setpri(p);
2158097Sroot 			if (p->p_pri >= PUSER)
2168097Sroot 				p->p_pri = p->p_usrpri;
2179Sbill 		}
2189Sbill 	}
2198124Sroot 
2208124Sroot 	/*
2218124Sroot 	 * Increment the time-of-day, and schedule
2228124Sroot 	 * processing of the callouts at a very low cpu priority,
2238124Sroot 	 * so we don't keep the relatively high clock interrupt
2248124Sroot 	 * priority any longer than necessary.
2258124Sroot 	 */
2268124Sroot 	bumptime(&time, tick);
2272442Swnj 	setsoftclock();
2282442Swnj }
2292442Swnj 
2308124Sroot /*
2318124Sroot  * Software priority level clock interrupt.
2328124Sroot  * Run periodic events from timeout queue.
2338124Sroot  */
2342609Swnj /*ARGSUSED*/
2358965Sroot #ifdef vax
2362442Swnj softclock(pc, ps)
2372450Swnj 	caddr_t pc;
2388944Sroot 	int ps;
2392442Swnj {
2408944Sroot #endif
2418965Sroot #ifdef sun
2429751Ssam softclock()
2438944Sroot {
2449751Ssam 	int ps = u.u_ar0[PS];
2459751Ssam 	caddr_t pc = (caddr_t)u.u_ar0[PC];
2468944Sroot #endif
2472442Swnj 
2488097Sroot 	for (;;) {
2498124Sroot 		register struct callout *p1;
2508124Sroot 		register caddr_t arg;
2518124Sroot 		register int (*func)();
2528124Sroot 		register int a, s;
2538124Sroot 
2548097Sroot 		s = spl7();
2558097Sroot 		if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) {
2568097Sroot 			splx(s);
2578097Sroot 			break;
2582442Swnj 		}
2598124Sroot 		arg = p1->c_arg; func = p1->c_func; a = p1->c_time;
2608097Sroot 		calltodo.c_next = p1->c_next;
2618097Sroot 		p1->c_next = callfree;
2628097Sroot 		callfree = p1;
2639157Ssam 		splx(s);
2648112Sroot 		(*func)(arg, a);
2652442Swnj 	}
2669604Ssam 	/*
2679604Ssam 	 * If trapped user-mode, give it a profiling tick.
2689604Ssam 	 */
2699604Ssam 	if (USERMODE(ps) && u.u_prof.pr_scale) {
2709604Ssam 		u.u_procp->p_flag |= SOWEUPC;
2719604Ssam 		aston();
2729604Ssam 	}
2739Sbill }
2749Sbill 
2759Sbill /*
2768097Sroot  * Arrange that (*fun)(arg) is called in tim/hz seconds.
2779Sbill  */
2789Sbill timeout(fun, arg, tim)
2792450Swnj 	int (*fun)();
2802450Swnj 	caddr_t arg;
2818097Sroot 	int tim;
2829Sbill {
2833542Swnj 	register struct callout *p1, *p2, *pnew;
2849Sbill 	register int t;
2859Sbill 	int s;
2869Sbill 
2879Sbill 	t = tim;
2889Sbill 	s = spl7();
2893542Swnj 	pnew = callfree;
2903542Swnj 	if (pnew == NULL)
2913542Swnj 		panic("timeout table overflow");
2923542Swnj 	callfree = pnew->c_next;
2933542Swnj 	pnew->c_arg = arg;
2943542Swnj 	pnew->c_func = fun;
2953542Swnj 	for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2)
2969742Ssam 		if (p2->c_time > 0)
2979742Ssam 			t -= p2->c_time;
2983542Swnj 	p1->c_next = pnew;
2993542Swnj 	pnew->c_next = p2;
3003542Swnj 	pnew->c_time = t;
3013542Swnj 	if (p2)
3023542Swnj 		p2->c_time -= t;
3039Sbill 	splx(s);
3049Sbill }
3057305Ssam 
3067305Ssam /*
3077305Ssam  * untimeout is called to remove a function timeout call
3087305Ssam  * from the callout structure.
3097305Ssam  */
3108097Sroot untimeout(fun, arg)
3117305Ssam 	int (*fun)();
3127305Ssam 	caddr_t arg;
3137305Ssam {
3147305Ssam 	register struct callout *p1, *p2;
3157305Ssam 	register int s;
3167305Ssam 
3177305Ssam 	s = spl7();
3187305Ssam 	for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) {
3197305Ssam 		if (p2->c_func == fun && p2->c_arg == arg) {
3208112Sroot 			if (p2->c_next && p2->c_time > 0)
3217305Ssam 				p2->c_next->c_time += p2->c_time;
3227305Ssam 			p1->c_next = p2->c_next;
3237305Ssam 			p2->c_next = callfree;
3247305Ssam 			callfree = p2;
3257305Ssam 			break;
3267305Ssam 		}
3277305Ssam 	}
3287305Ssam 	splx(s);
3297305Ssam }
3308112Sroot 
3318124Sroot /*
3328124Sroot  * Compute number of hz until specified time.
3338124Sroot  * Used to compute third argument to timeout() from an
3348124Sroot  * absolute time.
3358124Sroot  */
3368112Sroot hzto(tv)
3378112Sroot 	struct timeval *tv;
3388112Sroot {
3398124Sroot 	register long ticks;
3408124Sroot 	register long sec;
3418112Sroot 	int s = spl7();
3428112Sroot 
3438124Sroot 	/*
3448124Sroot 	 * If number of milliseconds will fit in 32 bit arithmetic,
3458124Sroot 	 * then compute number of milliseconds to time and scale to
3468124Sroot 	 * ticks.  Otherwise just compute number of hz in time, rounding
3478124Sroot 	 * times greater than representible to maximum value.
3488124Sroot 	 *
3498124Sroot 	 * Delta times less than 25 days can be computed ``exactly''.
3508124Sroot 	 * Maximum value for any timeout in 10ms ticks is 250 days.
3518124Sroot 	 */
3528124Sroot 	sec = tv->tv_sec - time.tv_sec;
3538124Sroot 	if (sec <= 0x7fffffff / 1000 - 1000)
3548124Sroot 		ticks = ((tv->tv_sec - time.tv_sec) * 1000 +
3558124Sroot 			(tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000);
3568124Sroot 	else if (sec <= 0x7fffffff / hz)
3578124Sroot 		ticks = sec * hz;
3588124Sroot 	else
3598124Sroot 		ticks = 0x7fffffff;
3608112Sroot 	splx(s);
3618112Sroot 	return (ticks);
3628112Sroot }
363