xref: /openbsd-src/sys/kern/kern_clock.c (revision 9b9d2a55a62c8e82206c25f94fcc7f4e2765250e)
1 /*	$OpenBSD: kern_clock.c,v 1.88 2015/06/11 16:03:04 mikeb Exp $	*/
2 /*	$NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/proc.h>
46 #include <sys/user.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <sys/sysctl.h>
50 #include <sys/sched.h>
51 #include <sys/timetc.h>
52 
53 
54 #ifdef GPROF
55 #include <sys/gmon.h>
56 #endif
57 
58 /*
59  * Clock handling routines.
60  *
61  * This code is written to operate with two timers that run independently of
62  * each other.  The main clock, running hz times per second, is used to keep
63  * track of real time.  The second timer handles kernel and user profiling,
64  * and does resource use estimation.  If the second timer is programmable,
65  * it is randomized to avoid aliasing between the two clocks.  For example,
66  * the randomization prevents an adversary from always giving up the cpu
67  * just before its quantum expires.  Otherwise, it would never accumulate
68  * cpu ticks.  The mean frequency of the second timer is stathz.
69  *
70  * If no second timer exists, stathz will be zero; in this case we drive
71  * profiling and statistics off the main clock.  This WILL NOT be accurate;
72  * do not do it unless absolutely necessary.
73  *
74  * The statistics clock may (or may not) be run at a higher rate while
75  * profiling.  This profile clock runs at profhz.  We require that profhz
76  * be an integral multiple of stathz.
77  *
78  * If the statistics clock is running fast, it must be divided by the ratio
79  * profhz/stathz for statistics.  (For profiling, every tick counts.)
80  */
81 
82 /*
83  * Bump a timeval by a small number of usec's.
84  */
85 #define BUMPTIME(t, usec) { \
86 	volatile struct timeval *tp = (t); \
87 	long us; \
88  \
89 	tp->tv_usec = us = tp->tv_usec + (usec); \
90 	if (us >= 1000000) { \
91 		tp->tv_usec = us - 1000000; \
92 		tp->tv_sec++; \
93 	} \
94 }
95 
96 int	stathz;
97 int	schedhz;
98 int	profhz;
99 int	profprocs;
100 int	ticks;
101 static int psdiv, pscnt;		/* prof => stat divider */
102 int	psratio;			/* ratio: prof / stat */
103 
104 void	*softclock_si;
105 
106 /*
107  * Initialize clock frequencies and start both clocks running.
108  */
109 void
110 initclocks(void)
111 {
112 	int i;
113 
114 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
115 	if (softclock_si == NULL)
116 		panic("initclocks: unable to register softclock intr");
117 
118 	/*
119 	 * Set divisors to 1 (normal case) and let the machine-specific
120 	 * code do its bit.
121 	 */
122 	psdiv = pscnt = 1;
123 	cpu_initclocks();
124 
125 	/*
126 	 * Compute profhz/stathz, and fix profhz if needed.
127 	 */
128 	i = stathz ? stathz : hz;
129 	if (profhz == 0)
130 		profhz = i;
131 	psratio = profhz / i;
132 
133 	/* For very large HZ, ensure that division by 0 does not occur later */
134 	if (tickadj == 0)
135 		tickadj = 1;
136 
137 	inittimecounter();
138 }
139 
140 /*
141  * hardclock does the accounting needed for ITIMER_PROF and ITIMER_VIRTUAL.
142  * We don't want to send signals with psignal from hardclock because it makes
143  * MULTIPROCESSOR locking very complicated. Instead, to use an idea from
144  * FreeBSD, we set a flag on the thread and when it goes to return to
145  * userspace it signals itself.
146  */
147 
148 /*
149  * The real-time timer, interrupting hz times per second.
150  */
151 void
152 hardclock(struct clockframe *frame)
153 {
154 	struct proc *p;
155 	struct cpu_info *ci = curcpu();
156 
157 	p = curproc;
158 	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
159 		struct process *pr = p->p_p;
160 
161 		/*
162 		 * Run current process's virtual and profile time, as needed.
163 		 */
164 		if (CLKF_USERMODE(frame) &&
165 		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
166 		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0) {
167 			atomic_setbits_int(&p->p_flag, P_ALRMPEND);
168 			need_proftick(p);
169 		}
170 		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
171 		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0) {
172 			atomic_setbits_int(&p->p_flag, P_PROFPEND);
173 			need_proftick(p);
174 		}
175 	}
176 
177 	/*
178 	 * If no separate statistics clock is available, run it from here.
179 	 */
180 	if (stathz == 0)
181 		statclock(frame);
182 
183 	if (--ci->ci_schedstate.spc_rrticks <= 0)
184 		roundrobin(ci);
185 
186 	/*
187 	 * If we are not the primary CPU, we're not allowed to do
188 	 * any more work.
189 	 */
190 	if (CPU_IS_PRIMARY(ci) == 0)
191 		return;
192 
193 	tc_ticktock();
194 
195 	/*
196 	 * Update real-time timeout queue.
197 	 * Process callouts at a very low cpu priority, so we don't keep the
198 	 * relatively high clock interrupt priority any longer than necessary.
199 	 */
200 	if (timeout_hardclock_update())
201 		softintr_schedule(softclock_si);
202 }
203 
204 /*
205  * Compute number of hz in the specified amount of time.
206  */
207 int
208 tvtohz(const struct timeval *tv)
209 {
210 	unsigned long nticks;
211 	time_t sec;
212 	long usec;
213 
214 	/*
215 	 * If the number of usecs in the whole seconds part of the time
216 	 * fits in a long, then the total number of usecs will
217 	 * fit in an unsigned long.  Compute the total and convert it to
218 	 * ticks, rounding up and adding 1 to allow for the current tick
219 	 * to expire.  Rounding also depends on unsigned long arithmetic
220 	 * to avoid overflow.
221 	 *
222 	 * Otherwise, if the number of ticks in the whole seconds part of
223 	 * the time fits in a long, then convert the parts to
224 	 * ticks separately and add, using similar rounding methods and
225 	 * overflow avoidance.  This method would work in the previous
226 	 * case but it is slightly slower and assumes that hz is integral.
227 	 *
228 	 * Otherwise, round the time down to the maximum
229 	 * representable value.
230 	 *
231 	 * If ints have 32 bits, then the maximum value for any timeout in
232 	 * 10ms ticks is 248 days.
233 	 */
234 	sec = tv->tv_sec;
235 	usec = tv->tv_usec;
236 	if (sec < 0 || (sec == 0 && usec <= 0))
237 		nticks = 0;
238 	else if (sec <= LONG_MAX / 1000000)
239 		nticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
240 		    / tick + 1;
241 	else if (sec <= LONG_MAX / hz)
242 		nticks = sec * hz
243 		    + ((unsigned long)usec + (tick - 1)) / tick + 1;
244 	else
245 		nticks = LONG_MAX;
246 	if (nticks > INT_MAX)
247 		nticks = INT_MAX;
248 	return ((int)nticks);
249 }
250 
251 int
252 tstohz(const struct timespec *ts)
253 {
254 	struct timeval tv;
255 	TIMESPEC_TO_TIMEVAL(&tv, ts);
256 
257 	/* Round up. */
258 	if ((ts->tv_nsec % 1000) != 0) {
259 		tv.tv_usec += 1;
260 		if (tv.tv_usec >= 1000000) {
261 			tv.tv_usec -= 1000000;
262 			tv.tv_sec += 1;
263 		}
264 	}
265 
266 	return (tvtohz(&tv));
267 }
268 
269 /*
270  * Start profiling on a process.
271  *
272  * Kernel profiling passes proc0 which never exits and hence
273  * keeps the profile clock running constantly.
274  */
275 void
276 startprofclock(struct process *pr)
277 {
278 	int s;
279 
280 	if ((pr->ps_flags & PS_PROFIL) == 0) {
281 		atomic_setbits_int(&pr->ps_flags, PS_PROFIL);
282 		if (++profprocs == 1 && stathz != 0) {
283 			s = splstatclock();
284 			psdiv = pscnt = psratio;
285 			setstatclockrate(profhz);
286 			splx(s);
287 		}
288 	}
289 }
290 
291 /*
292  * Stop profiling on a process.
293  */
294 void
295 stopprofclock(struct process *pr)
296 {
297 	int s;
298 
299 	if (pr->ps_flags & PS_PROFIL) {
300 		atomic_clearbits_int(&pr->ps_flags, PS_PROFIL);
301 		if (--profprocs == 0 && stathz != 0) {
302 			s = splstatclock();
303 			psdiv = pscnt = 1;
304 			setstatclockrate(stathz);
305 			splx(s);
306 		}
307 	}
308 }
309 
310 /*
311  * Statistics clock.  Grab profile sample, and if divider reaches 0,
312  * do process and kernel statistics.
313  */
314 void
315 statclock(struct clockframe *frame)
316 {
317 #ifdef GPROF
318 	struct gmonparam *g;
319 	u_long i;
320 #endif
321 	struct cpu_info *ci = curcpu();
322 	struct schedstate_percpu *spc = &ci->ci_schedstate;
323 	struct proc *p = curproc;
324 	struct process *pr;
325 
326 	/*
327 	 * Notice changes in divisor frequency, and adjust clock
328 	 * frequency accordingly.
329 	 */
330 	if (spc->spc_psdiv != psdiv) {
331 		spc->spc_psdiv = psdiv;
332 		spc->spc_pscnt = psdiv;
333 		if (psdiv == 1) {
334 			setstatclockrate(stathz);
335 		} else {
336 			setstatclockrate(profhz);
337 		}
338 	}
339 
340 	if (CLKF_USERMODE(frame)) {
341 		pr = p->p_p;
342 		if (pr->ps_flags & PS_PROFIL)
343 			addupc_intr(p, CLKF_PC(frame));
344 		if (--spc->spc_pscnt > 0)
345 			return;
346 		/*
347 		 * Came from user mode; CPU was in user state.
348 		 * If this process is being profiled record the tick.
349 		 */
350 		p->p_uticks++;
351 		if (pr->ps_nice > NZERO)
352 			spc->spc_cp_time[CP_NICE]++;
353 		else
354 			spc->spc_cp_time[CP_USER]++;
355 	} else {
356 #ifdef GPROF
357 		/*
358 		 * Kernel statistics are just like addupc_intr, only easier.
359 		 */
360 		g = ci->ci_gmon;
361 		if (g != NULL && g->state == GMON_PROF_ON) {
362 			i = CLKF_PC(frame) - g->lowpc;
363 			if (i < g->textsize) {
364 				i /= HISTFRACTION * sizeof(*g->kcount);
365 				g->kcount[i]++;
366 			}
367 		}
368 #endif
369 #if defined(PROC_PC)
370 		if (p != NULL && p->p_p->ps_flags & PS_PROFIL)
371 			addupc_intr(p, PROC_PC(p));
372 #endif
373 		if (--spc->spc_pscnt > 0)
374 			return;
375 		/*
376 		 * Came from kernel mode, so we were:
377 		 * - handling an interrupt,
378 		 * - doing syscall or trap work on behalf of the current
379 		 *   user process, or
380 		 * - spinning in the idle loop.
381 		 * Whichever it is, charge the time as appropriate.
382 		 * Note that we charge interrupts to the current process,
383 		 * regardless of whether they are ``for'' that process,
384 		 * so that we know how much of its real time was spent
385 		 * in ``non-process'' (i.e., interrupt) work.
386 		 */
387 		if (CLKF_INTR(frame)) {
388 			if (p != NULL)
389 				p->p_iticks++;
390 			spc->spc_cp_time[CP_INTR]++;
391 		} else if (p != NULL && p != spc->spc_idleproc) {
392 			p->p_sticks++;
393 			spc->spc_cp_time[CP_SYS]++;
394 		} else
395 			spc->spc_cp_time[CP_IDLE]++;
396 	}
397 	spc->spc_pscnt = psdiv;
398 
399 	if (p != NULL) {
400 		p->p_cpticks++;
401 		/*
402 		 * If no schedclock is provided, call it here at ~~12-25 Hz;
403 		 * ~~16 Hz is best
404 		 */
405 		if (schedhz == 0) {
406 			if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
407 			    0)
408 				schedclock(p);
409 		}
410 	}
411 }
412 
413 /*
414  * Return information about system clocks.
415  */
416 int
417 sysctl_clockrate(char *where, size_t *sizep, void *newp)
418 {
419 	struct clockinfo clkinfo;
420 
421 	/*
422 	 * Construct clockinfo structure.
423 	 */
424 	clkinfo.tick = tick;
425 	clkinfo.tickadj = tickadj;
426 	clkinfo.hz = hz;
427 	clkinfo.profhz = profhz;
428 	clkinfo.stathz = stathz ? stathz : hz;
429 	return (sysctl_rdstruct(where, sizep, newp, &clkinfo, sizeof(clkinfo)));
430 }
431