xref: /openbsd-src/sys/kern/kern_clock.c (revision 3212dc310ec9c3aa2cd9e7180fe81d66a37e4443)
1 /*	$OpenBSD: kern_clock.c,v 1.45 2004/06/21 23:50:35 tholo Exp $	*/
2 /*	$NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/dkstat.h>
43 #include <sys/timeout.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/proc.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <uvm/uvm_extern.h>
50 #include <sys/sysctl.h>
51 #include <sys/sched.h>
52 
53 #include <machine/cpu.h>
54 
55 #ifdef GPROF
56 #include <sys/gmon.h>
57 #endif
58 
59 /*
60  * Clock handling routines.
61  *
62  * This code is written to operate with two timers that run independently of
63  * each other.  The main clock, running hz times per second, is used to keep
64  * track of real time.  The second timer handles kernel and user profiling,
65  * and does resource use estimation.  If the second timer is programmable,
66  * it is randomized to avoid aliasing between the two clocks.  For example,
67  * the randomization prevents an adversary from always giving up the cpu
68  * just before its quantum expires.  Otherwise, it would never accumulate
69  * cpu ticks.  The mean frequency of the second timer is stathz.
70  *
71  * If no second timer exists, stathz will be zero; in this case we drive
72  * profiling and statistics off the main clock.  This WILL NOT be accurate;
73  * do not do it unless absolutely necessary.
74  *
75  * The statistics clock may (or may not) be run at a higher rate while
76  * profiling.  This profile clock runs at profhz.  We require that profhz
77  * be an integral multiple of stathz.
78  *
79  * If the statistics clock is running fast, it must be divided by the ratio
80  * profhz/stathz for statistics.  (For profiling, every tick counts.)
81  */
82 
83 /*
84  * Bump a timeval by a small number of usec's.
85  */
86 #define BUMPTIME(t, usec) { \
87 	register volatile struct timeval *tp = (t); \
88 	register long us; \
89  \
90 	tp->tv_usec = us = tp->tv_usec + (usec); \
91 	if (us >= 1000000) { \
92 		tp->tv_usec = us - 1000000; \
93 		tp->tv_sec++; \
94 	} \
95 }
96 
97 int	stathz;
98 int	schedhz;
99 int	profhz;
100 int	profprocs;
101 int	ticks;
102 static int psdiv, pscnt;		/* prof => stat divider */
103 int	psratio;			/* ratio: prof / stat */
104 int	tickfix, tickfixinterval;	/* used if tick not really integral */
105 static int tickfixcnt;			/* accumulated fractional error */
106 
107 long cp_time[CPUSTATES];
108 
109 volatile time_t time_second;
110 volatile time_t time_uptime;
111 
112 volatile struct	timeval time
113 	__attribute__((__aligned__(__alignof__(quad_t))));
114 volatile struct	timeval mono_time;
115 
116 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
117 void	*softclock_si;
118 void	generic_softclock(void *);
119 
120 void
121 generic_softclock(void *ignore)
122 {
123 	/*
124 	 * XXX - dont' commit, just a dummy wrapper until we learn everyone
125 	 *       deal with a changed proto for softclock().
126 	 */
127 	softclock();
128 }
129 #endif
130 
131 /*
132  * Initialize clock frequencies and start both clocks running.
133  */
134 void
135 initclocks()
136 {
137 	int i;
138 
139 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
140 	softclock_si = softintr_establish(IPL_SOFTCLOCK, generic_softclock, NULL);
141 	if (softclock_si == NULL)
142 		panic("initclocks: unable to register softclock intr");
143 #endif
144 
145 	/*
146 	 * Set divisors to 1 (normal case) and let the machine-specific
147 	 * code do its bit.
148 	 */
149 	psdiv = pscnt = 1;
150 	cpu_initclocks();
151 
152 	/*
153 	 * Compute profhz/stathz, and fix profhz if needed.
154 	 */
155 	i = stathz ? stathz : hz;
156 	if (profhz == 0)
157 		profhz = i;
158 	psratio = profhz / i;
159 }
160 
161 /*
162  * The real-time timer, interrupting hz times per second.
163  */
164 void
165 hardclock(struct clockframe *frame)
166 {
167 	struct proc *p;
168 	int delta;
169 	extern int tickdelta;
170 	extern long timedelta;
171 #ifdef __HAVE_CPUINFO
172 	struct cpu_info *ci = curcpu();
173 #endif
174 
175 	p = curproc;
176 	if (p) {
177 		register struct pstats *pstats;
178 
179 		/*
180 		 * Run current process's virtual and profile time, as needed.
181 		 */
182 		pstats = p->p_stats;
183 		if (CLKF_USERMODE(frame) &&
184 		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
185 		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
186 			psignal(p, SIGVTALRM);
187 		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
188 		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
189 			psignal(p, SIGPROF);
190 	}
191 
192 	/*
193 	 * If no separate statistics clock is available, run it from here.
194 	 */
195 	if (stathz == 0)
196 		statclock(frame);
197 
198 #if defined(__HAVE_CPUINFO)
199 	if (--ci->ci_schedstate.spc_rrticks <= 0)
200 		roundrobin(ci);
201 
202 	/*
203 	 * If we are not the primary CPU, we're not allowed to do
204 	 * any more work.
205 	 */
206 	if (CPU_IS_PRIMARY(ci) == 0)
207 		return;
208 #endif
209 
210 	/*
211 	 * Increment the time-of-day.  The increment is normally just
212 	 * ``tick''.  If the machine is one which has a clock frequency
213 	 * such that ``hz'' would not divide the second evenly into
214 	 * milliseconds, a periodic adjustment must be applied.  Finally,
215 	 * if we are still adjusting the time (see adjtime()),
216 	 * ``tickdelta'' may also be added in.
217 	 */
218 	ticks++;
219 	delta = tick;
220 
221 	if (tickfix) {
222 		tickfixcnt += tickfix;
223 		if (tickfixcnt >= tickfixinterval) {
224 			delta++;
225 			tickfixcnt -= tickfixinterval;
226 		}
227 	}
228 	/* Imprecise 4bsd adjtime() handling */
229 	if (timedelta != 0) {
230 		delta += tickdelta;
231 		timedelta -= tickdelta;
232 	}
233 
234 #ifdef notyet
235 	microset();
236 #endif
237 
238 	BUMPTIME(&time, delta);
239 	BUMPTIME(&mono_time, delta);
240 	time_second = time.tv_sec;
241 	time_uptime = mono_time.tv_sec;
242 
243 #ifdef CPU_CLOCKUPDATE
244 	CPU_CLOCKUPDATE();
245 #endif
246 
247 	/*
248 	 * Update real-time timeout queue.
249 	 * Process callouts at a very low cpu priority, so we don't keep the
250 	 * relatively high clock interrupt priority any longer than necessary.
251 	 */
252 	if (timeout_hardclock_update()) {
253 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
254 		softintr_schedule(softclock_si);
255 #else
256 		setsoftclock();
257 #endif
258 	}
259 }
260 
261 /*
262  * Compute number of hz until specified time.  Used to
263  * compute the second argument to timeout_add() from an absolute time.
264  */
265 int
266 hzto(tv)
267 	struct timeval *tv;
268 {
269 	unsigned long ticks;
270 	long sec, usec;
271 	int s;
272 
273 	/*
274 	 * If the number of usecs in the whole seconds part of the time
275 	 * difference fits in a long, then the total number of usecs will
276 	 * fit in an unsigned long.  Compute the total and convert it to
277 	 * ticks, rounding up and adding 1 to allow for the current tick
278 	 * to expire.  Rounding also depends on unsigned long arithmetic
279 	 * to avoid overflow.
280 	 *
281 	 * Otherwise, if the number of ticks in the whole seconds part of
282 	 * the time difference fits in a long, then convert the parts to
283 	 * ticks separately and add, using similar rounding methods and
284 	 * overflow avoidance.  This method would work in the previous
285 	 * case but it is slightly slower and assumes that hz is integral.
286 	 *
287 	 * Otherwise, round the time difference down to the maximum
288 	 * representable value.
289 	 *
290 	 * If ints have 32 bits, then the maximum value for any timeout in
291 	 * 10ms ticks is 248 days.
292 	 */
293 	s = splhigh();
294 	sec = tv->tv_sec - time.tv_sec;
295 	usec = tv->tv_usec - time.tv_usec;
296 	splx(s);
297 	if (usec < 0) {
298 		sec--;
299 		usec += 1000000;
300 	}
301 	if (sec < 0 || (sec == 0 && usec <= 0)) {
302 		ticks = 0;
303 	} else if (sec <= LONG_MAX / 1000000)
304 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
305 		    / tick + 1;
306 	else if (sec <= LONG_MAX / hz)
307 		ticks = sec * hz
308 		    + ((unsigned long)usec + (tick - 1)) / tick + 1;
309 	else
310 		ticks = LONG_MAX;
311 	if (ticks > INT_MAX)
312 		ticks = INT_MAX;
313 	return ((int)ticks);
314 }
315 
316 /*
317  * Compute number of hz in the specified amount of time.
318  */
319 int
320 tvtohz(struct timeval *tv)
321 {
322 	unsigned long ticks;
323 	long sec, usec;
324 
325 	/*
326 	 * If the number of usecs in the whole seconds part of the time
327 	 * fits in a long, then the total number of usecs will
328 	 * fit in an unsigned long.  Compute the total and convert it to
329 	 * ticks, rounding up and adding 1 to allow for the current tick
330 	 * to expire.  Rounding also depends on unsigned long arithmetic
331 	 * to avoid overflow.
332 	 *
333 	 * Otherwise, if the number of ticks in the whole seconds part of
334 	 * the time fits in a long, then convert the parts to
335 	 * ticks separately and add, using similar rounding methods and
336 	 * overflow avoidance.  This method would work in the previous
337 	 * case but it is slightly slower and assumes that hz is integral.
338 	 *
339 	 * Otherwise, round the time down to the maximum
340 	 * representable value.
341 	 *
342 	 * If ints have 32 bits, then the maximum value for any timeout in
343 	 * 10ms ticks is 248 days.
344 	 */
345 	sec = tv->tv_sec;
346 	usec = tv->tv_usec;
347 	if (sec < 0 || (sec == 0 && usec <= 0))
348 		ticks = 0;
349 	else if (sec <= LONG_MAX / 1000000)
350 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
351 		    / tick + 1;
352 	else if (sec <= LONG_MAX / hz)
353 		ticks = sec * hz
354 		    + ((unsigned long)usec + (tick - 1)) / tick + 1;
355 	else
356 		ticks = LONG_MAX;
357 	if (ticks > INT_MAX)
358 		ticks = INT_MAX;
359 	return ((int)ticks);
360 }
361 
362 /*
363  * Start profiling on a process.
364  *
365  * Kernel profiling passes proc0 which never exits and hence
366  * keeps the profile clock running constantly.
367  */
368 void
369 startprofclock(p)
370 	register struct proc *p;
371 {
372 	int s;
373 
374 	if ((p->p_flag & P_PROFIL) == 0) {
375 		p->p_flag |= P_PROFIL;
376 		if (++profprocs == 1 && stathz != 0) {
377 			s = splstatclock();
378 			psdiv = pscnt = psratio;
379 			setstatclockrate(profhz);
380 			splx(s);
381 		}
382 	}
383 }
384 
385 /*
386  * Stop profiling on a process.
387  */
388 void
389 stopprofclock(p)
390 	register struct proc *p;
391 {
392 	int s;
393 
394 	if (p->p_flag & P_PROFIL) {
395 		p->p_flag &= ~P_PROFIL;
396 		if (--profprocs == 0 && stathz != 0) {
397 			s = splstatclock();
398 			psdiv = pscnt = 1;
399 			setstatclockrate(stathz);
400 			splx(s);
401 		}
402 	}
403 }
404 
405 /*
406  * Statistics clock.  Grab profile sample, and if divider reaches 0,
407  * do process and kernel statistics.
408  */
409 void
410 statclock(struct clockframe *frame)
411 {
412 #ifdef GPROF
413 	struct gmonparam *g;
414 	int i;
415 #endif
416 #ifdef __HAVE_CPUINFO
417 	struct cpu_info *ci = curcpu();
418 	struct schedstate_percpu *spc = &ci->ci_schedstate;
419 #else
420 	static int schedclk;
421 #endif
422 	struct proc *p = curproc;
423 
424 #ifdef __HAVE_CPUINFO
425 	/*
426 	 * Notice changes in divisor frequency, and adjust clock
427 	 * frequency accordingly.
428 	 */
429 	if (spc->spc_psdiv != psdiv) {
430 		spc->spc_psdiv = psdiv;
431 		spc->spc_pscnt = psdiv;
432 		if (psdiv == 1) {
433 			setstatclockrate(stathz);
434 		} else {
435 			setstatclockrate(profhz);
436 		}
437 	}
438 
439 /* XXX Kludgey */
440 #define pscnt spc->spc_pscnt
441 #define cp_time spc->spc_cp_time
442 #endif
443 
444 	if (CLKF_USERMODE(frame)) {
445 		if (p->p_flag & P_PROFIL)
446 			addupc_intr(p, CLKF_PC(frame));
447 		if (--pscnt > 0)
448 			return;
449 		/*
450 		 * Came from user mode; CPU was in user state.
451 		 * If this process is being profiled record the tick.
452 		 */
453 		p->p_uticks++;
454 		if (p->p_nice > NZERO)
455 			cp_time[CP_NICE]++;
456 		else
457 			cp_time[CP_USER]++;
458 	} else {
459 #ifdef GPROF
460 		/*
461 		 * Kernel statistics are just like addupc_intr, only easier.
462 		 */
463 		g = &_gmonparam;
464 		if (g->state == GMON_PROF_ON) {
465 			i = CLKF_PC(frame) - g->lowpc;
466 			if (i < g->textsize) {
467 				i /= HISTFRACTION * sizeof(*g->kcount);
468 				g->kcount[i]++;
469 			}
470 		}
471 #endif
472 		if (--pscnt > 0)
473 			return;
474 		/*
475 		 * Came from kernel mode, so we were:
476 		 * - handling an interrupt,
477 		 * - doing syscall or trap work on behalf of the current
478 		 *   user process, or
479 		 * - spinning in the idle loop.
480 		 * Whichever it is, charge the time as appropriate.
481 		 * Note that we charge interrupts to the current process,
482 		 * regardless of whether they are ``for'' that process,
483 		 * so that we know how much of its real time was spent
484 		 * in ``non-process'' (i.e., interrupt) work.
485 		 */
486 		if (CLKF_INTR(frame)) {
487 			if (p != NULL)
488 				p->p_iticks++;
489 			cp_time[CP_INTR]++;
490 		} else if (p != NULL) {
491 			p->p_sticks++;
492 			cp_time[CP_SYS]++;
493 		} else
494 			cp_time[CP_IDLE]++;
495 	}
496 	pscnt = psdiv;
497 
498 #ifdef __HAVE_CPUINFO
499 #undef psdiv
500 #undef cp_time
501 #endif
502 
503 	if (p != NULL) {
504 		p->p_cpticks++;
505 		/*
506 		 * If no schedclock is provided, call it here at ~~12-25 Hz;
507 		 * ~~16 Hz is best
508 		 */
509 		if (schedhz == 0) {
510 #ifdef __HAVE_CPUINFO
511 			if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
512 			    0)
513 				schedclock(p);
514 #else
515 			if ((++schedclk & 3) == 0)
516 				schedclock(p);
517 #endif
518 		}
519 	}
520 }
521 
522 /*
523  * Return information about system clocks.
524  */
525 int
526 sysctl_clockrate(where, sizep)
527 	register char *where;
528 	size_t *sizep;
529 {
530 	struct clockinfo clkinfo;
531 
532 	/*
533 	 * Construct clockinfo structure.
534 	 */
535 	clkinfo.tick = tick;
536 	clkinfo.tickadj = tickadj;
537 	clkinfo.hz = hz;
538 	clkinfo.profhz = profhz;
539 	clkinfo.stathz = stathz ? stathz : hz;
540 	return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
541 }
542