xref: /netbsd-src/sys/kern/kern_clock.c (revision 27578b9aac214cc7796ead81dcc5427e79d5f2a0)
1 /*	$NetBSD: kern_clock.c,v 1.77 2001/09/13 05:22:17 enami Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1986, 1991, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
78  */
79 
80 #include "opt_callout.h"
81 #include "opt_ntp.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/dkstat.h>
86 #include <sys/callout.h>
87 #include <sys/kernel.h>
88 #include <sys/proc.h>
89 #include <sys/resourcevar.h>
90 #include <sys/signalvar.h>
91 #include <uvm/uvm_extern.h>
92 #include <sys/sysctl.h>
93 #include <sys/timex.h>
94 #include <sys/sched.h>
95 #ifdef CALLWHEEL_STATS
96 #include <sys/device.h>
97 #endif
98 
99 #include <machine/cpu.h>
100 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
101 #include <machine/intr.h>
102 #endif
103 
104 #ifdef GPROF
105 #include <sys/gmon.h>
106 #endif
107 
108 /*
109  * Clock handling routines.
110  *
111  * This code is written to operate with two timers that run independently of
112  * each other.  The main clock, running hz times per second, is used to keep
113  * track of real time.  The second timer handles kernel and user profiling,
114  * and does resource use estimation.  If the second timer is programmable,
115  * it is randomized to avoid aliasing between the two clocks.  For example,
116  * the randomization prevents an adversary from always giving up the cpu
117  * just before its quantum expires.  Otherwise, it would never accumulate
118  * cpu ticks.  The mean frequency of the second timer is stathz.
119  *
120  * If no second timer exists, stathz will be zero; in this case we drive
121  * profiling and statistics off the main clock.  This WILL NOT be accurate;
122  * do not do it unless absolutely necessary.
123  *
124  * The statistics clock may (or may not) be run at a higher rate while
125  * profiling.  This profile clock runs at profhz.  We require that profhz
126  * be an integral multiple of stathz.
127  *
128  * If the statistics clock is running fast, it must be divided by the ratio
129  * profhz/stathz for statistics.  (For profiling, every tick counts.)
130  */
131 
132 #ifdef NTP	/* NTP phase-locked loop in kernel */
133 /*
134  * Phase/frequency-lock loop (PLL/FLL) definitions
135  *
136  * The following variables are read and set by the ntp_adjtime() system
137  * call.
138  *
139  * time_state shows the state of the system clock, with values defined
140  * in the timex.h header file.
141  *
142  * time_status shows the status of the system clock, with bits defined
143  * in the timex.h header file.
144  *
145  * time_offset is used by the PLL/FLL to adjust the system time in small
146  * increments.
147  *
148  * time_constant determines the bandwidth or "stiffness" of the PLL.
149  *
150  * time_tolerance determines maximum frequency error or tolerance of the
151  * CPU clock oscillator and is a property of the architecture; however,
152  * in principle it could change as result of the presence of external
153  * discipline signals, for instance.
154  *
155  * time_precision is usually equal to the kernel tick variable; however,
156  * in cases where a precision clock counter or external clock is
157  * available, the resolution can be much less than this and depend on
158  * whether the external clock is working or not.
159  *
160  * time_maxerror is initialized by a ntp_adjtime() call and increased by
161  * the kernel once each second to reflect the maximum error bound
162  * growth.
163  *
164  * time_esterror is set and read by the ntp_adjtime() call, but
165  * otherwise not used by the kernel.
166  */
167 int time_state = TIME_OK;	/* clock state */
168 int time_status = STA_UNSYNC;	/* clock status bits */
169 long time_offset = 0;		/* time offset (us) */
170 long time_constant = 0;		/* pll time constant */
171 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
172 long time_precision = 1;	/* clock precision (us) */
173 long time_maxerror = MAXPHASE;	/* maximum error (us) */
174 long time_esterror = MAXPHASE;	/* estimated error (us) */
175 
176 /*
177  * The following variables establish the state of the PLL/FLL and the
178  * residual time and frequency offset of the local clock. The scale
179  * factors are defined in the timex.h header file.
180  *
181  * time_phase and time_freq are the phase increment and the frequency
182  * increment, respectively, of the kernel time variable.
183  *
184  * time_freq is set via ntp_adjtime() from a value stored in a file when
185  * the synchronization daemon is first started. Its value is retrieved
186  * via ntp_adjtime() and written to the file about once per hour by the
187  * daemon.
188  *
189  * time_adj is the adjustment added to the value of tick at each timer
190  * interrupt and is recomputed from time_phase and time_freq at each
191  * seconds rollover.
192  *
193  * time_reftime is the second's portion of the system time at the last
194  * call to ntp_adjtime(). It is used to adjust the time_freq variable
195  * and to increase the time_maxerror as the time since last update
196  * increases.
197  */
198 long time_phase = 0;		/* phase offset (scaled us) */
199 long time_freq = 0;		/* frequency offset (scaled ppm) */
200 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
201 long time_reftime = 0;		/* time at last adjustment (s) */
202 
203 #ifdef PPS_SYNC
204 /*
205  * The following variables are used only if the kernel PPS discipline
206  * code is configured (PPS_SYNC). The scale factors are defined in the
207  * timex.h header file.
208  *
209  * pps_time contains the time at each calibration interval, as read by
210  * microtime(). pps_count counts the seconds of the calibration
211  * interval, the duration of which is nominally pps_shift in powers of
212  * two.
213  *
214  * pps_offset is the time offset produced by the time median filter
215  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
216  * this filter.
217  *
218  * pps_freq is the frequency offset produced by the frequency median
219  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
220  * by this filter.
221  *
222  * pps_usec is latched from a high resolution counter or external clock
223  * at pps_time. Here we want the hardware counter contents only, not the
224  * contents plus the time_tv.usec as usual.
225  *
226  * pps_valid counts the number of seconds since the last PPS update. It
227  * is used as a watchdog timer to disable the PPS discipline should the
228  * PPS signal be lost.
229  *
230  * pps_glitch counts the number of seconds since the beginning of an
231  * offset burst more than tick/2 from current nominal offset. It is used
232  * mainly to suppress error bursts due to priority conflicts between the
233  * PPS interrupt and timer interrupt.
234  *
235  * pps_intcnt counts the calibration intervals for use in the interval-
236  * adaptation algorithm. It's just too complicated for words.
237  */
238 struct timeval pps_time;	/* kernel time at last interval */
239 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
240 long pps_offset = 0;		/* pps time offset (us) */
241 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
242 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
243 long pps_freq = 0;		/* frequency offset (scaled ppm) */
244 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
245 long pps_usec = 0;		/* microsec counter at last interval */
246 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
247 int pps_glitch = 0;		/* pps signal glitch counter */
248 int pps_count = 0;		/* calibration interval counter (s) */
249 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
250 int pps_intcnt = 0;		/* intervals at current duration */
251 
252 /*
253  * PPS signal quality monitors
254  *
255  * pps_jitcnt counts the seconds that have been discarded because the
256  * jitter measured by the time median filter exceeds the limit MAXTIME
257  * (100 us).
258  *
259  * pps_calcnt counts the frequency calibration intervals, which are
260  * variable from 4 s to 256 s.
261  *
262  * pps_errcnt counts the calibration intervals which have been discarded
263  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
264  * calibration interval jitter exceeds two ticks.
265  *
266  * pps_stbcnt counts the calibration intervals that have been discarded
267  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
268  */
269 long pps_jitcnt = 0;		/* jitter limit exceeded */
270 long pps_calcnt = 0;		/* calibration intervals */
271 long pps_errcnt = 0;		/* calibration errors */
272 long pps_stbcnt = 0;		/* stability limit exceeded */
273 #endif /* PPS_SYNC */
274 
275 #ifdef EXT_CLOCK
276 /*
277  * External clock definitions
278  *
279  * The following definitions and declarations are used only if an
280  * external clock is configured on the system.
281  */
282 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
283 
284 /*
285  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
286  * interrupt and decremented once each second.
287  */
288 int clock_count = 0;		/* CPU clock counter */
289 
290 #ifdef HIGHBALL
291 /*
292  * The clock_offset and clock_cpu variables are used by the HIGHBALL
293  * interface. The clock_offset variable defines the offset between
294  * system time and the HIGBALL counters. The clock_cpu variable contains
295  * the offset between the system clock and the HIGHBALL clock for use in
296  * disciplining the kernel time variable.
297  */
298 extern struct timeval clock_offset; /* Highball clock offset */
299 long clock_cpu = 0;		/* CPU clock adjust */
300 #endif /* HIGHBALL */
301 #endif /* EXT_CLOCK */
302 #endif /* NTP */
303 
304 
305 /*
306  * Bump a timeval by a small number of usec's.
307  */
308 #define BUMPTIME(t, usec) { \
309 	volatile struct timeval *tp = (t); \
310 	long us; \
311  \
312 	tp->tv_usec = us = tp->tv_usec + (usec); \
313 	if (us >= 1000000) { \
314 		tp->tv_usec = us - 1000000; \
315 		tp->tv_sec++; \
316 	} \
317 }
318 
319 int	stathz;
320 int	profhz;
321 int	schedhz;
322 int	profprocs;
323 int	softclock_running;		/* 1 => softclock() is running */
324 static int psdiv;			/* prof => stat divider */
325 int	psratio;			/* ratio: prof / stat */
326 int	tickfix, tickfixinterval;	/* used if tick not really integral */
327 #ifndef NTP
328 static int tickfixcnt;			/* accumulated fractional error */
329 #else
330 int	fixtick;			/* used by NTP for same */
331 int	shifthz;
332 #endif
333 
334 /*
335  * We might want ldd to load the both words from time at once.
336  * To succeed we need to be quadword aligned.
337  * The sparc already does that, and that it has worked so far is a fluke.
338  */
339 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
340 volatile struct	timeval mono_time;
341 
342 /*
343  * The callout mechanism is based on the work of Adam M. Costello and
344  * George Varghese, published in a technical report entitled "Redesigning
345  * the BSD Callout and Timer Facilities", and Justin Gibbs's subsequent
346  * integration into FreeBSD, modified for NetBSD by Jason R. Thorpe.
347  *
348  * The original work on the data structures used in this implementation
349  * was published by G. Varghese and A. Lauck in the paper "Hashed and
350  * Hierarchical Timing Wheels: Data Structures for the Efficient
351  * Implementation of a Timer Facility" in the Proceedings of the 11th
352  * ACM Annual Symposium on Operating System Principles, Austin, Texas,
353  * November 1987.
354  */
355 struct callout_queue *callwheel;
356 int	callwheelsize, callwheelbits, callwheelmask;
357 
358 static struct callout *nextsoftcheck;	/* next callout to be checked */
359 
360 #ifdef CALLWHEEL_STATS
361 int	     *callwheel_sizes;		/* per-bucket length count */
362 struct evcnt callwheel_collisions;	/* number of hash collisions */
363 struct evcnt callwheel_maxlength;	/* length of the longest hash chain */
364 struct evcnt callwheel_count;		/* # callouts currently */
365 struct evcnt callwheel_established;	/* # callouts established */
366 struct evcnt callwheel_fired;		/* # callouts that fired */
367 struct evcnt callwheel_disestablished;	/* # callouts disestablished */
368 struct evcnt callwheel_changed;		/* # callouts changed */
369 struct evcnt callwheel_softclocks;	/* # times softclock() called */
370 struct evcnt callwheel_softchecks;	/* # checks per softclock() */
371 struct evcnt callwheel_softempty;	/* # empty buckets seen */
372 struct evcnt callwheel_hintworked;	/* # times hint saved scan */
373 #endif /* CALLWHEEL_STATS */
374 
375 /*
376  * This value indicates the number of consecutive callouts that
377  * will be checked before we allow interrupts to have a chance
378  * again.
379  */
380 #ifndef MAX_SOFTCLOCK_STEPS
381 #define	MAX_SOFTCLOCK_STEPS	100
382 #endif
383 
384 struct simplelock callwheel_slock;
385 
386 #define	CALLWHEEL_LOCK(s)						\
387 do {									\
388 	s = splclock();							\
389 	simple_lock(&callwheel_slock);					\
390 } while (0)
391 
392 #define	CALLWHEEL_UNLOCK(s)						\
393 do {									\
394 	simple_unlock(&callwheel_slock);				\
395 	splx(s);							\
396 } while (0)
397 
398 static void callout_stop_locked(struct callout *);
399 
400 /*
401  * These are both protected by callwheel_lock.
402  * XXX SHOULD BE STATIC!!
403  */
404 u_int64_t hardclock_ticks, softclock_ticks;
405 
406 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
407 void	softclock(void *);
408 void	*softclock_si;
409 #endif
410 
411 /*
412  * Initialize clock frequencies and start both clocks running.
413  */
414 void
415 initclocks(void)
416 {
417 	int i;
418 
419 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
420 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
421 	if (softclock_si == NULL)
422 		panic("initclocks: unable to register softclock intr");
423 #endif
424 
425 	/*
426 	 * Set divisors to 1 (normal case) and let the machine-specific
427 	 * code do its bit.
428 	 */
429 	psdiv = 1;
430 	cpu_initclocks();
431 
432 	/*
433 	 * Compute profhz/stathz/rrticks, and fix profhz if needed.
434 	 */
435 	i = stathz ? stathz : hz;
436 	if (profhz == 0)
437 		profhz = i;
438 	psratio = profhz / i;
439 	rrticks = hz / 10;
440 
441 #ifdef NTP
442 	switch (hz) {
443 	case 1:
444 		shifthz = SHIFT_SCALE - 0;
445 		break;
446 	case 2:
447 		shifthz = SHIFT_SCALE - 1;
448 		break;
449 	case 4:
450 		shifthz = SHIFT_SCALE - 2;
451 		break;
452 	case 8:
453 		shifthz = SHIFT_SCALE - 3;
454 		break;
455 	case 16:
456 		shifthz = SHIFT_SCALE - 4;
457 		break;
458 	case 32:
459 		shifthz = SHIFT_SCALE - 5;
460 		break;
461 	case 60:
462 	case 64:
463 		shifthz = SHIFT_SCALE - 6;
464 		break;
465 	case 96:
466 	case 100:
467 	case 128:
468 		shifthz = SHIFT_SCALE - 7;
469 		break;
470 	case 256:
471 		shifthz = SHIFT_SCALE - 8;
472 		break;
473 	case 512:
474 		shifthz = SHIFT_SCALE - 9;
475 		break;
476 	case 1000:
477 	case 1024:
478 		shifthz = SHIFT_SCALE - 10;
479 		break;
480 	case 1200:
481 	case 2048:
482 		shifthz = SHIFT_SCALE - 11;
483 		break;
484 	case 4096:
485 		shifthz = SHIFT_SCALE - 12;
486 		break;
487 	case 8192:
488 		shifthz = SHIFT_SCALE - 13;
489 		break;
490 	case 16384:
491 		shifthz = SHIFT_SCALE - 14;
492 		break;
493 	case 32768:
494 		shifthz = SHIFT_SCALE - 15;
495 		break;
496 	case 65536:
497 		shifthz = SHIFT_SCALE - 16;
498 		break;
499 	default:
500 		panic("weird hz");
501 	}
502 	if (fixtick == 0) {
503 		/*
504 		 * Give MD code a chance to set this to a better
505 		 * value; but, if it doesn't, we should.
506 		 */
507 		fixtick = (1000000 - (hz*tick));
508 	}
509 #endif
510 }
511 
512 /*
513  * The real-time timer, interrupting hz times per second.
514  */
515 void
516 hardclock(struct clockframe *frame)
517 {
518 	struct proc *p;
519 	int delta;
520 	extern int tickdelta;
521 	extern long timedelta;
522 	struct cpu_info *ci = curcpu();
523 #ifdef NTP
524 	int time_update;
525 	int ltemp;
526 #endif
527 
528 	p = curproc;
529 	if (p) {
530 		struct pstats *pstats;
531 
532 		/*
533 		 * Run current process's virtual and profile time, as needed.
534 		 */
535 		pstats = p->p_stats;
536 		if (CLKF_USERMODE(frame) &&
537 		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
538 		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
539 			psignal(p, SIGVTALRM);
540 		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
541 		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
542 			psignal(p, SIGPROF);
543 	}
544 
545 	/*
546 	 * If no separate statistics clock is available, run it from here.
547 	 */
548 	if (stathz == 0)
549 		statclock(frame);
550 	if ((--ci->ci_schedstate.spc_rrticks) <= 0)
551 		roundrobin(ci);
552 
553 #if defined(MULTIPROCESSOR)
554 	/*
555 	 * If we are not the primary CPU, we're not allowed to do
556 	 * any more work.
557 	 */
558 	if (CPU_IS_PRIMARY(ci) == 0)
559 		return;
560 #endif
561 
562 	/*
563 	 * Increment the time-of-day.  The increment is normally just
564 	 * ``tick''.  If the machine is one which has a clock frequency
565 	 * such that ``hz'' would not divide the second evenly into
566 	 * milliseconds, a periodic adjustment must be applied.  Finally,
567 	 * if we are still adjusting the time (see adjtime()),
568 	 * ``tickdelta'' may also be added in.
569 	 */
570 	delta = tick;
571 
572 #ifndef NTP
573 	if (tickfix) {
574 		tickfixcnt += tickfix;
575 		if (tickfixcnt >= tickfixinterval) {
576 			delta++;
577 			tickfixcnt -= tickfixinterval;
578 		}
579 	}
580 #endif /* !NTP */
581 	/* Imprecise 4bsd adjtime() handling */
582 	if (timedelta != 0) {
583 		delta += tickdelta;
584 		timedelta -= tickdelta;
585 	}
586 
587 #ifdef notyet
588 	microset();
589 #endif
590 
591 #ifndef NTP
592 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
593 #endif
594 	BUMPTIME(&mono_time, delta);
595 
596 #ifdef NTP
597 	time_update = delta;
598 
599 	/*
600 	 * Compute the phase adjustment. If the low-order bits
601 	 * (time_phase) of the update overflow, bump the high-order bits
602 	 * (time_update).
603 	 */
604 	time_phase += time_adj;
605 	if (time_phase <= -FINEUSEC) {
606 		ltemp = -time_phase >> SHIFT_SCALE;
607 		time_phase += ltemp << SHIFT_SCALE;
608 		time_update -= ltemp;
609 	} else if (time_phase >= FINEUSEC) {
610 		ltemp = time_phase >> SHIFT_SCALE;
611 		time_phase -= ltemp << SHIFT_SCALE;
612 		time_update += ltemp;
613 	}
614 
615 #ifdef HIGHBALL
616 	/*
617 	 * If the HIGHBALL board is installed, we need to adjust the
618 	 * external clock offset in order to close the hardware feedback
619 	 * loop. This will adjust the external clock phase and frequency
620 	 * in small amounts. The additional phase noise and frequency
621 	 * wander this causes should be minimal. We also need to
622 	 * discipline the kernel time variable, since the PLL is used to
623 	 * discipline the external clock. If the Highball board is not
624 	 * present, we discipline kernel time with the PLL as usual. We
625 	 * assume that the external clock phase adjustment (time_update)
626 	 * and kernel phase adjustment (clock_cpu) are less than the
627 	 * value of tick.
628 	 */
629 	clock_offset.tv_usec += time_update;
630 	if (clock_offset.tv_usec >= 1000000) {
631 		clock_offset.tv_sec++;
632 		clock_offset.tv_usec -= 1000000;
633 	}
634 	if (clock_offset.tv_usec < 0) {
635 		clock_offset.tv_sec--;
636 		clock_offset.tv_usec += 1000000;
637 	}
638 	time.tv_usec += clock_cpu;
639 	clock_cpu = 0;
640 #else
641 	time.tv_usec += time_update;
642 #endif /* HIGHBALL */
643 
644 	/*
645 	 * On rollover of the second the phase adjustment to be used for
646 	 * the next second is calculated. Also, the maximum error is
647 	 * increased by the tolerance. If the PPS frequency discipline
648 	 * code is present, the phase is increased to compensate for the
649 	 * CPU clock oscillator frequency error.
650 	 *
651  	 * On a 32-bit machine and given parameters in the timex.h
652 	 * header file, the maximum phase adjustment is +-512 ms and
653 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
654 	 * 64-bit machine, you shouldn't need to ask.
655 	 */
656 	if (time.tv_usec >= 1000000) {
657 		time.tv_usec -= 1000000;
658 		time.tv_sec++;
659 		time_maxerror += time_tolerance >> SHIFT_USEC;
660 
661 		/*
662 		 * Leap second processing. If in leap-insert state at
663 		 * the end of the day, the system clock is set back one
664 		 * second; if in leap-delete state, the system clock is
665 		 * set ahead one second. The microtime() routine or
666 		 * external clock driver will insure that reported time
667 		 * is always monotonic. The ugly divides should be
668 		 * replaced.
669 		 */
670 		switch (time_state) {
671 		case TIME_OK:
672 			if (time_status & STA_INS)
673 				time_state = TIME_INS;
674 			else if (time_status & STA_DEL)
675 				time_state = TIME_DEL;
676 			break;
677 
678 		case TIME_INS:
679 			if (time.tv_sec % 86400 == 0) {
680 				time.tv_sec--;
681 				time_state = TIME_OOP;
682 			}
683 			break;
684 
685 		case TIME_DEL:
686 			if ((time.tv_sec + 1) % 86400 == 0) {
687 				time.tv_sec++;
688 				time_state = TIME_WAIT;
689 			}
690 			break;
691 
692 		case TIME_OOP:
693 			time_state = TIME_WAIT;
694 			break;
695 
696 		case TIME_WAIT:
697 			if (!(time_status & (STA_INS | STA_DEL)))
698 				time_state = TIME_OK;
699 			break;
700 		}
701 
702 		/*
703 		 * Compute the phase adjustment for the next second. In
704 		 * PLL mode, the offset is reduced by a fixed factor
705 		 * times the time constant. In FLL mode the offset is
706 		 * used directly. In either mode, the maximum phase
707 		 * adjustment for each second is clamped so as to spread
708 		 * the adjustment over not more than the number of
709 		 * seconds between updates.
710 		 */
711 		if (time_offset < 0) {
712 			ltemp = -time_offset;
713 			if (!(time_status & STA_FLL))
714 				ltemp >>= SHIFT_KG + time_constant;
715 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
716 				ltemp = (MAXPHASE / MINSEC) <<
717 				    SHIFT_UPDATE;
718 			time_offset += ltemp;
719 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
720 		} else if (time_offset > 0) {
721 			ltemp = time_offset;
722 			if (!(time_status & STA_FLL))
723 				ltemp >>= SHIFT_KG + time_constant;
724 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
725 				ltemp = (MAXPHASE / MINSEC) <<
726 				    SHIFT_UPDATE;
727 			time_offset -= ltemp;
728 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
729 		} else
730 			time_adj = 0;
731 
732 		/*
733 		 * Compute the frequency estimate and additional phase
734 		 * adjustment due to frequency error for the next
735 		 * second. When the PPS signal is engaged, gnaw on the
736 		 * watchdog counter and update the frequency computed by
737 		 * the pll and the PPS signal.
738 		 */
739 #ifdef PPS_SYNC
740 		pps_valid++;
741 		if (pps_valid == PPS_VALID) {
742 			pps_jitter = MAXTIME;
743 			pps_stabil = MAXFREQ;
744 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
745 			    STA_PPSWANDER | STA_PPSERROR);
746 		}
747 		ltemp = time_freq + pps_freq;
748 #else
749 		ltemp = time_freq;
750 #endif /* PPS_SYNC */
751 
752 		if (ltemp < 0)
753 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
754 		else
755 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
756 		time_adj += (long)fixtick << shifthz;
757 
758 		/*
759 		 * When the CPU clock oscillator frequency is not a
760 		 * power of 2 in Hz, shifthz is only an approximate
761 		 * scale factor.
762 		 *
763 		 * To determine the adjustment, you can do the following:
764 		 *   bc -q
765 		 *   scale=24
766 		 *   obase=2
767 		 *   idealhz/realhz
768 		 * where `idealhz' is the next higher power of 2, and `realhz'
769 		 * is the actual value.  You may need to factor this result
770 		 * into a sequence of 2 multipliers to get better precision.
771 		 *
772 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
773 		 *   bc -q
774 		 *   scale=24
775 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
776 		 * (and then multiply by 1000000 to get ppm).
777 		 */
778 		switch (hz) {
779 		case 60:
780 			/* A factor of 1.000100010001 gives about 15ppm
781 			   error. */
782 			if (time_adj < 0) {
783 				time_adj -= (-time_adj >> 4);
784 				time_adj -= (-time_adj >> 8);
785 			} else {
786 				time_adj += (time_adj >> 4);
787 				time_adj += (time_adj >> 8);
788 			}
789 			break;
790 
791 		case 96:
792 			/* A factor of 1.0101010101 gives about 244ppm error. */
793 			if (time_adj < 0) {
794 				time_adj -= (-time_adj >> 2);
795 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
796 			} else {
797 				time_adj += (time_adj >> 2);
798 				time_adj += (time_adj >> 4) + (time_adj >> 8);
799 			}
800 			break;
801 
802 		case 100:
803 			/* A factor of 1.010001111010111 gives about 1ppm
804 			   error. */
805 			if (time_adj < 0) {
806 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
807 				time_adj += (-time_adj >> 10);
808 			} else {
809 				time_adj += (time_adj >> 2) + (time_adj >> 5);
810 				time_adj -= (time_adj >> 10);
811 			}
812 			break;
813 
814 		case 1000:
815 			/* A factor of 1.000001100010100001 gives about 50ppm
816 			   error. */
817 			if (time_adj < 0) {
818 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
819 				time_adj -= (-time_adj >> 7);
820 			} else {
821 				time_adj += (time_adj >> 6) + (time_adj >> 11);
822 				time_adj += (time_adj >> 7);
823 			}
824 			break;
825 
826 		case 1200:
827 			/* A factor of 1.1011010011100001 gives about 64ppm
828 			   error. */
829 			if (time_adj < 0) {
830 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
831 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
832 			} else {
833 				time_adj += (time_adj >> 1) + (time_adj >> 6);
834 				time_adj += (time_adj >> 3) + (time_adj >> 10);
835 			}
836 			break;
837 		}
838 
839 #ifdef EXT_CLOCK
840 		/*
841 		 * If an external clock is present, it is necessary to
842 		 * discipline the kernel time variable anyway, since not
843 		 * all system components use the microtime() interface.
844 		 * Here, the time offset between the external clock and
845 		 * kernel time variable is computed every so often.
846 		 */
847 		clock_count++;
848 		if (clock_count > CLOCK_INTERVAL) {
849 			clock_count = 0;
850 			microtime(&clock_ext);
851 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
852 			delta.tv_usec = clock_ext.tv_usec -
853 			    time.tv_usec;
854 			if (delta.tv_usec < 0)
855 				delta.tv_sec--;
856 			if (delta.tv_usec >= 500000) {
857 				delta.tv_usec -= 1000000;
858 				delta.tv_sec++;
859 			}
860 			if (delta.tv_usec < -500000) {
861 				delta.tv_usec += 1000000;
862 				delta.tv_sec--;
863 			}
864 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
865 			    delta.tv_usec > MAXPHASE) ||
866 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
867 			    delta.tv_usec < -MAXPHASE)) {
868 				time = clock_ext;
869 				delta.tv_sec = 0;
870 				delta.tv_usec = 0;
871 			}
872 #ifdef HIGHBALL
873 			clock_cpu = delta.tv_usec;
874 #else /* HIGHBALL */
875 			hardupdate(delta.tv_usec);
876 #endif /* HIGHBALL */
877 		}
878 #endif /* EXT_CLOCK */
879 	}
880 
881 #endif /* NTP */
882 
883 	/*
884 	 * Process callouts at a very low cpu priority, so we don't keep the
885 	 * relatively high clock interrupt priority any longer than necessary.
886 	 */
887 	simple_lock(&callwheel_slock);	/* already at splclock() */
888 	hardclock_ticks++;
889 	if (! TAILQ_EMPTY(&callwheel[hardclock_ticks & callwheelmask].cq_q)) {
890 		simple_unlock(&callwheel_slock);
891 		if (CLKF_BASEPRI(frame)) {
892 			/*
893 			 * Save the overhead of a software interrupt;
894 			 * it will happen as soon as we return, so do
895 			 * it now.
896 			 *
897 			 * NOTE: If we're at ``base priority'', softclock()
898 			 * was not already running.
899 			 */
900 			spllowersoftclock();
901 			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
902 			softclock(NULL);
903 			KERNEL_UNLOCK();
904 		} else {
905 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
906 			softintr_schedule(softclock_si);
907 #else
908 			setsoftclock();
909 #endif
910 		}
911 		return;
912 	} else if (softclock_running == 0 &&
913 		   (softclock_ticks + 1) == hardclock_ticks) {
914 		softclock_ticks++;
915 	}
916 	simple_unlock(&callwheel_slock);
917 }
918 
919 /*
920  * Software (low priority) clock interrupt.
921  * Run periodic events from timeout queue.
922  */
923 /*ARGSUSED*/
924 void
925 softclock(void *v)
926 {
927 	struct callout_queue *bucket;
928 	struct callout *c;
929 	void (*func)(void *);
930 	void *arg;
931 	int s, idx;
932 	int steps = 0;
933 
934 	CALLWHEEL_LOCK(s);
935 
936 	softclock_running = 1;
937 
938 #ifdef CALLWHEEL_STATS
939 	callwheel_softclocks.ev_count++;
940 #endif
941 
942 	while (softclock_ticks != hardclock_ticks) {
943 		softclock_ticks++;
944 		idx = (int)(softclock_ticks & callwheelmask);
945 		bucket = &callwheel[idx];
946 		c = TAILQ_FIRST(&bucket->cq_q);
947 		if (c == NULL) {
948 #ifdef CALLWHEEL_STATS
949 			callwheel_softempty.ev_count++;
950 #endif
951 			continue;
952 		}
953 		if (softclock_ticks < bucket->cq_hint) {
954 #ifdef CALLWHEEL_STATS
955 			callwheel_hintworked.ev_count++;
956 #endif
957 			continue;
958 		}
959 		bucket->cq_hint = UQUAD_MAX;
960 		while (c != NULL) {
961 #ifdef CALLWHEEL_STATS
962 			callwheel_softchecks.ev_count++;
963 #endif
964 			if (c->c_time != softclock_ticks) {
965 				if (c->c_time < bucket->cq_hint)
966 					bucket->cq_hint = c->c_time;
967 				c = TAILQ_NEXT(c, c_link);
968 				if (++steps >= MAX_SOFTCLOCK_STEPS) {
969 					nextsoftcheck = c;
970 					/* Give interrupts a chance. */
971 					CALLWHEEL_UNLOCK(s);
972 					CALLWHEEL_LOCK(s);
973 					c = nextsoftcheck;
974 					steps = 0;
975 				}
976 			} else {
977 				nextsoftcheck = TAILQ_NEXT(c, c_link);
978 				TAILQ_REMOVE(&bucket->cq_q, c, c_link);
979 #ifdef CALLWHEEL_STATS
980 				callwheel_sizes[idx]--;
981 				callwheel_fired.ev_count++;
982 				callwheel_count.ev_count--;
983 #endif
984 				func = c->c_func;
985 				arg = c->c_arg;
986 				c->c_func = NULL;
987 				c->c_flags &= ~CALLOUT_PENDING;
988 				CALLWHEEL_UNLOCK(s);
989 				(*func)(arg);
990 				CALLWHEEL_LOCK(s);
991 				steps = 0;
992 				c = nextsoftcheck;
993 			}
994 		}
995 		if (TAILQ_EMPTY(&bucket->cq_q))
996 			bucket->cq_hint = UQUAD_MAX;
997 	}
998 	nextsoftcheck = NULL;
999 	softclock_running = 0;
1000 	CALLWHEEL_UNLOCK(s);
1001 }
1002 
1003 /*
1004  * callout_setsize:
1005  *
1006  *	Determine how many callwheels are necessary and
1007  *	set hash mask.  Called from allocsys().
1008  */
1009 void
1010 callout_setsize(void)
1011 {
1012 
1013 	for (callwheelsize = 1; callwheelsize < ncallout; callwheelsize <<= 1)
1014 		/* loop */ ;
1015 	callwheelmask = callwheelsize - 1;
1016 }
1017 
1018 /*
1019  * callout_startup:
1020  *
1021  *	Initialize the callwheel buckets.
1022  */
1023 void
1024 callout_startup(void)
1025 {
1026 	int i;
1027 
1028 	for (i = 0; i < callwheelsize; i++) {
1029 		callwheel[i].cq_hint = UQUAD_MAX;
1030 		TAILQ_INIT(&callwheel[i].cq_q);
1031 	}
1032 
1033 	simple_lock_init(&callwheel_slock);
1034 
1035 #ifdef CALLWHEEL_STATS
1036 	evcnt_attach_dynamic(&callwheel_collisions, EVCNT_TYPE_MISC,
1037 	    NULL, "callwheel", "collisions");
1038 	evcnt_attach_dynamic(&callwheel_maxlength, EVCNT_TYPE_MISC,
1039 	    NULL, "callwheel", "maxlength");
1040 	evcnt_attach_dynamic(&callwheel_count, EVCNT_TYPE_MISC,
1041 	    NULL, "callwheel", "count");
1042 	evcnt_attach_dynamic(&callwheel_established, EVCNT_TYPE_MISC,
1043 	    NULL, "callwheel", "established");
1044 	evcnt_attach_dynamic(&callwheel_fired, EVCNT_TYPE_MISC,
1045 	    NULL, "callwheel", "fired");
1046 	evcnt_attach_dynamic(&callwheel_disestablished, EVCNT_TYPE_MISC,
1047 	    NULL, "callwheel", "disestablished");
1048 	evcnt_attach_dynamic(&callwheel_changed, EVCNT_TYPE_MISC,
1049 	    NULL, "callwheel", "changed");
1050 	evcnt_attach_dynamic(&callwheel_softclocks, EVCNT_TYPE_MISC,
1051 	    NULL, "callwheel", "softclocks");
1052 	evcnt_attach_dynamic(&callwheel_softempty, EVCNT_TYPE_MISC,
1053 	    NULL, "callwheel", "softempty");
1054 	evcnt_attach_dynamic(&callwheel_hintworked, EVCNT_TYPE_MISC,
1055 	    NULL, "callwheel", "hintworked");
1056 #endif /* CALLWHEEL_STATS */
1057 }
1058 
1059 /*
1060  * callout_init:
1061  *
1062  *	Initialize a callout structure so that it can be used
1063  *	by callout_reset() and callout_stop().
1064  */
1065 void
1066 callout_init(struct callout *c)
1067 {
1068 
1069 	memset(c, 0, sizeof(*c));
1070 }
1071 
1072 /*
1073  * callout_reset:
1074  *
1075  *	Establish or change a timeout.
1076  */
1077 void
1078 callout_reset(struct callout *c, int ticks, void (*func)(void *), void *arg)
1079 {
1080 	struct callout_queue *bucket;
1081 	int s;
1082 
1083 	if (ticks <= 0)
1084 		ticks = 1;
1085 
1086 	CALLWHEEL_LOCK(s);
1087 
1088 	/*
1089 	 * If this callout's timer is already running, cancel it
1090 	 * before we modify it.
1091 	 */
1092 	if (c->c_flags & CALLOUT_PENDING) {
1093 		callout_stop_locked(c);	/* Already locked */
1094 #ifdef CALLWHEEL_STATS
1095 		callwheel_changed.ev_count++;
1096 #endif
1097 	}
1098 
1099 	c->c_arg = arg;
1100 	c->c_func = func;
1101 	c->c_flags = CALLOUT_ACTIVE | CALLOUT_PENDING;
1102 	c->c_time = hardclock_ticks + ticks;
1103 
1104 	bucket = &callwheel[c->c_time & callwheelmask];
1105 
1106 #ifdef CALLWHEEL_STATS
1107 	if (! TAILQ_EMPTY(&bucket->cq_q))
1108 		callwheel_collisions.ev_count++;
1109 #endif
1110 
1111 	TAILQ_INSERT_TAIL(&bucket->cq_q, c, c_link);
1112 	if (c->c_time < bucket->cq_hint)
1113 		bucket->cq_hint = c->c_time;
1114 
1115 #ifdef CALLWHEEL_STATS
1116 	callwheel_count.ev_count++;
1117 	callwheel_established.ev_count++;
1118 	if (++callwheel_sizes[c->c_time & callwheelmask] >
1119 	    callwheel_maxlength.ev_count)
1120 		callwheel_maxlength.ev_count =
1121 		    callwheel_sizes[c->c_time & callwheelmask];
1122 #endif
1123 
1124 	CALLWHEEL_UNLOCK(s);
1125 }
1126 
1127 /*
1128  * callout_stop_locked:
1129  *
1130  *	Disestablish a timeout.  Callwheel is locked.
1131  */
1132 static void
1133 callout_stop_locked(struct callout *c)
1134 {
1135 	struct callout_queue *bucket;
1136 
1137 	/*
1138 	 * Don't attempt to delete a callout that's not on the queue.
1139 	 */
1140 	if ((c->c_flags & CALLOUT_PENDING) == 0) {
1141 		c->c_flags &= ~CALLOUT_ACTIVE;
1142 		return;
1143 	}
1144 
1145 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1146 
1147 	if (nextsoftcheck == c)
1148 		nextsoftcheck = TAILQ_NEXT(c, c_link);
1149 
1150 	bucket = &callwheel[c->c_time & callwheelmask];
1151 	TAILQ_REMOVE(&bucket->cq_q, c, c_link);
1152 	if (TAILQ_EMPTY(&bucket->cq_q))
1153 		bucket->cq_hint = UQUAD_MAX;
1154 #ifdef CALLWHEEL_STATS
1155 	callwheel_count.ev_count--;
1156 	callwheel_disestablished.ev_count++;
1157 	callwheel_sizes[c->c_time & callwheelmask]--;
1158 #endif
1159 
1160 	c->c_func = NULL;
1161 }
1162 
1163 /*
1164  * callout_stop:
1165  *
1166  *	Disestablish a timeout.  Callwheel is unlocked.  This is
1167  *	the standard entry point.
1168  */
1169 void
1170 callout_stop(struct callout *c)
1171 {
1172 	int s;
1173 
1174 	CALLWHEEL_LOCK(s);
1175 	callout_stop_locked(c);
1176 	CALLWHEEL_UNLOCK(s);
1177 }
1178 
1179 #ifdef CALLWHEEL_STATS
1180 /*
1181  * callout_showstats:
1182  *
1183  *	Display callout statistics.  Call it from DDB.
1184  */
1185 void
1186 callout_showstats(void)
1187 {
1188 	u_int64_t curticks;
1189 	int s;
1190 
1191 	s = splclock();
1192 	curticks = softclock_ticks;
1193 	splx(s);
1194 
1195 	printf("Callwheel statistics:\n");
1196 	printf("\tCallouts currently queued: %llu\n",
1197 	    (long long) callwheel_count.ev_count);
1198 	printf("\tCallouts established: %llu\n",
1199 	    (long long) callwheel_established.ev_count);
1200 	printf("\tCallouts disestablished: %llu\n",
1201 	    (long long) callwheel_disestablished.ev_count);
1202 	if (callwheel_changed.ev_count != 0)
1203 		printf("\t\tOf those, %llu were changes\n",
1204 		    (long long) callwheel_changed.ev_count);
1205 	printf("\tCallouts that fired: %llu\n",
1206 	    (long long) callwheel_fired.ev_count);
1207 	printf("\tNumber of buckets: %d\n", callwheelsize);
1208 	printf("\tNumber of hash collisions: %llu\n",
1209 	    (long long) callwheel_collisions.ev_count);
1210 	printf("\tMaximum hash chain length: %llu\n",
1211 	    (long long) callwheel_maxlength.ev_count);
1212 	printf("\tSoftclocks: %llu, Softchecks: %llu\n",
1213 	    (long long) callwheel_softclocks.ev_count,
1214 	    (long long) callwheel_softchecks.ev_count);
1215 	printf("\t\tEmpty buckets seen: %llu\n",
1216 	    (long long) callwheel_softempty.ev_count);
1217 	printf("\t\tTimes hint saved scan: %llu\n",
1218 	    (long long) callwheel_hintworked.ev_count);
1219 }
1220 #endif
1221 
1222 /*
1223  * Compute number of hz until specified time.  Used to compute second
1224  * argument to callout_reset() from an absolute time.
1225  */
1226 int
1227 hzto(struct timeval *tv)
1228 {
1229 	unsigned long ticks;
1230 	long sec, usec;
1231 	int s;
1232 
1233 	/*
1234 	 * If the number of usecs in the whole seconds part of the time
1235 	 * difference fits in a long, then the total number of usecs will
1236 	 * fit in an unsigned long.  Compute the total and convert it to
1237 	 * ticks, rounding up and adding 1 to allow for the current tick
1238 	 * to expire.  Rounding also depends on unsigned long arithmetic
1239 	 * to avoid overflow.
1240 	 *
1241 	 * Otherwise, if the number of ticks in the whole seconds part of
1242 	 * the time difference fits in a long, then convert the parts to
1243 	 * ticks separately and add, using similar rounding methods and
1244 	 * overflow avoidance.  This method would work in the previous
1245 	 * case, but it is slightly slower and assume that hz is integral.
1246 	 *
1247 	 * Otherwise, round the time difference down to the maximum
1248 	 * representable value.
1249 	 *
1250 	 * If ints are 32-bit, then the maximum value for any timeout in
1251 	 * 10ms ticks is 248 days.
1252 	 */
1253 	s = splclock();
1254 	sec = tv->tv_sec - time.tv_sec;
1255 	usec = tv->tv_usec - time.tv_usec;
1256 	splx(s);
1257 
1258 	if (usec < 0) {
1259 		sec--;
1260 		usec += 1000000;
1261 	}
1262 
1263 	if (sec < 0 || (sec == 0 && usec <= 0)) {
1264 		/*
1265 		 * Would expire now or in the past.  Return 0 ticks.
1266 		 * This is different from the legacy hzto() interface,
1267 		 * and callers need to check for it.
1268 		 */
1269 		ticks = 0;
1270 	} else if (sec <= (LONG_MAX / 1000000))
1271 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
1272 		    / tick) + 1;
1273 	else if (sec <= (LONG_MAX / hz))
1274 		ticks = (sec * hz) +
1275 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
1276 	else
1277 		ticks = LONG_MAX;
1278 
1279 	if (ticks > INT_MAX)
1280 		ticks = INT_MAX;
1281 
1282 	return ((int)ticks);
1283 }
1284 
1285 /*
1286  * Start profiling on a process.
1287  *
1288  * Kernel profiling passes proc0 which never exits and hence
1289  * keeps the profile clock running constantly.
1290  */
1291 void
1292 startprofclock(struct proc *p)
1293 {
1294 
1295 	if ((p->p_flag & P_PROFIL) == 0) {
1296 		p->p_flag |= P_PROFIL;
1297 		if (++profprocs == 1 && stathz != 0)
1298 			psdiv = psratio;
1299 	}
1300 }
1301 
1302 /*
1303  * Stop profiling on a process.
1304  */
1305 void
1306 stopprofclock(struct proc *p)
1307 {
1308 
1309 	if (p->p_flag & P_PROFIL) {
1310 		p->p_flag &= ~P_PROFIL;
1311 		if (--profprocs == 0 && stathz != 0)
1312 			psdiv = 1;
1313 	}
1314 }
1315 
1316 /*
1317  * Statistics clock.  Grab profile sample, and if divider reaches 0,
1318  * do process and kernel statistics.
1319  */
1320 void
1321 statclock(struct clockframe *frame)
1322 {
1323 #ifdef GPROF
1324 	struct gmonparam *g;
1325 	intptr_t i;
1326 #endif
1327 	struct cpu_info *ci = curcpu();
1328 	struct schedstate_percpu *spc = &ci->ci_schedstate;
1329 	struct proc *p;
1330 
1331 	/*
1332 	 * Notice changes in divisor frequency, and adjust clock
1333 	 * frequency accordingly.
1334 	 */
1335 	if (spc->spc_psdiv != psdiv) {
1336 		spc->spc_psdiv = psdiv;
1337 		spc->spc_pscnt = psdiv;
1338 		if (psdiv == 1) {
1339 			setstatclockrate(stathz);
1340 		} else {
1341 			setstatclockrate(profhz);
1342 		}
1343 	}
1344 	p = curproc;
1345 	if (CLKF_USERMODE(frame)) {
1346 		if (p->p_flag & P_PROFIL)
1347 			addupc_intr(p, CLKF_PC(frame));
1348 		if (--spc->spc_pscnt > 0)
1349 			return;
1350 		/*
1351 		 * Came from user mode; CPU was in user state.
1352 		 * If this process is being profiled record the tick.
1353 		 */
1354 		p->p_uticks++;
1355 		if (p->p_nice > NZERO)
1356 			spc->spc_cp_time[CP_NICE]++;
1357 		else
1358 			spc->spc_cp_time[CP_USER]++;
1359 	} else {
1360 #ifdef GPROF
1361 		/*
1362 		 * Kernel statistics are just like addupc_intr, only easier.
1363 		 */
1364 		g = &_gmonparam;
1365 		if (g->state == GMON_PROF_ON) {
1366 			i = CLKF_PC(frame) - g->lowpc;
1367 			if (i < g->textsize) {
1368 				i /= HISTFRACTION * sizeof(*g->kcount);
1369 				g->kcount[i]++;
1370 			}
1371 		}
1372 #endif
1373 #ifdef PROC_PC
1374 		if (p && p->p_flag & P_PROFIL)
1375 			addupc_intr(p, PROC_PC(p));
1376 #endif
1377 		if (--spc->spc_pscnt > 0)
1378 			return;
1379 		/*
1380 		 * Came from kernel mode, so we were:
1381 		 * - handling an interrupt,
1382 		 * - doing syscall or trap work on behalf of the current
1383 		 *   user process, or
1384 		 * - spinning in the idle loop.
1385 		 * Whichever it is, charge the time as appropriate.
1386 		 * Note that we charge interrupts to the current process,
1387 		 * regardless of whether they are ``for'' that process,
1388 		 * so that we know how much of its real time was spent
1389 		 * in ``non-process'' (i.e., interrupt) work.
1390 		 */
1391 		if (CLKF_INTR(frame)) {
1392 			if (p != NULL)
1393 				p->p_iticks++;
1394 			spc->spc_cp_time[CP_INTR]++;
1395 		} else if (p != NULL) {
1396 			p->p_sticks++;
1397 			spc->spc_cp_time[CP_SYS]++;
1398 		} else
1399 			spc->spc_cp_time[CP_IDLE]++;
1400 	}
1401 	spc->spc_pscnt = psdiv;
1402 
1403 	if (p != NULL) {
1404 		++p->p_cpticks;
1405 		/*
1406 		 * If no separate schedclock is provided, call it here
1407 		 * at ~~12-25 Hz, ~~16 Hz is best
1408 		 */
1409 		if (schedhz == 0)
1410 			if ((++ci->ci_schedstate.spc_schedticks & 3) == 0)
1411 				schedclock(p);
1412 	}
1413 }
1414 
1415 
1416 #ifdef NTP	/* NTP phase-locked loop in kernel */
1417 
1418 /*
1419  * hardupdate() - local clock update
1420  *
1421  * This routine is called by ntp_adjtime() to update the local clock
1422  * phase and frequency. The implementation is of an adaptive-parameter,
1423  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1424  * time and frequency offset estimates for each call. If the kernel PPS
1425  * discipline code is configured (PPS_SYNC), the PPS signal itself
1426  * determines the new time offset, instead of the calling argument.
1427  * Presumably, calls to ntp_adjtime() occur only when the caller
1428  * believes the local clock is valid within some bound (+-128 ms with
1429  * NTP). If the caller's time is far different than the PPS time, an
1430  * argument will ensue, and it's not clear who will lose.
1431  *
1432  * For uncompensated quartz crystal oscillatores and nominal update
1433  * intervals less than 1024 s, operation should be in phase-lock mode
1434  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1435  * intervals greater than thiss, operation should be in frequency-lock
1436  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1437  *
1438  * Note: splclock() is in effect.
1439  */
1440 void
1441 hardupdate(long offset)
1442 {
1443 	long ltemp, mtemp;
1444 
1445 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1446 		return;
1447 	ltemp = offset;
1448 #ifdef PPS_SYNC
1449 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1450 		ltemp = pps_offset;
1451 #endif /* PPS_SYNC */
1452 
1453 	/*
1454 	 * Scale the phase adjustment and clamp to the operating range.
1455 	 */
1456 	if (ltemp > MAXPHASE)
1457 		time_offset = MAXPHASE << SHIFT_UPDATE;
1458 	else if (ltemp < -MAXPHASE)
1459 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
1460 	else
1461 		time_offset = ltemp << SHIFT_UPDATE;
1462 
1463 	/*
1464 	 * Select whether the frequency is to be controlled and in which
1465 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
1466 	 * multiply/divide should be replaced someday.
1467 	 */
1468 	if (time_status & STA_FREQHOLD || time_reftime == 0)
1469 		time_reftime = time.tv_sec;
1470 	mtemp = time.tv_sec - time_reftime;
1471 	time_reftime = time.tv_sec;
1472 	if (time_status & STA_FLL) {
1473 		if (mtemp >= MINSEC) {
1474 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1475 			    SHIFT_UPDATE));
1476 			if (ltemp < 0)
1477 				time_freq -= -ltemp >> SHIFT_KH;
1478 			else
1479 				time_freq += ltemp >> SHIFT_KH;
1480 		}
1481 	} else {
1482 		if (mtemp < MAXSEC) {
1483 			ltemp *= mtemp;
1484 			if (ltemp < 0)
1485 				time_freq -= -ltemp >> (time_constant +
1486 				    time_constant + SHIFT_KF -
1487 				    SHIFT_USEC);
1488 			else
1489 				time_freq += ltemp >> (time_constant +
1490 				    time_constant + SHIFT_KF -
1491 				    SHIFT_USEC);
1492 		}
1493 	}
1494 	if (time_freq > time_tolerance)
1495 		time_freq = time_tolerance;
1496 	else if (time_freq < -time_tolerance)
1497 		time_freq = -time_tolerance;
1498 }
1499 
1500 #ifdef PPS_SYNC
1501 /*
1502  * hardpps() - discipline CPU clock oscillator to external PPS signal
1503  *
1504  * This routine is called at each PPS interrupt in order to discipline
1505  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1506  * and leaves it in a handy spot for the hardclock() routine. It
1507  * integrates successive PPS phase differences and calculates the
1508  * frequency offset. This is used in hardclock() to discipline the CPU
1509  * clock oscillator so that intrinsic frequency error is cancelled out.
1510  * The code requires the caller to capture the time and hardware counter
1511  * value at the on-time PPS signal transition.
1512  *
1513  * Note that, on some Unix systems, this routine runs at an interrupt
1514  * priority level higher than the timer interrupt routine hardclock().
1515  * Therefore, the variables used are distinct from the hardclock()
1516  * variables, except for certain exceptions: The PPS frequency pps_freq
1517  * and phase pps_offset variables are determined by this routine and
1518  * updated atomically. The time_tolerance variable can be considered a
1519  * constant, since it is infrequently changed, and then only when the
1520  * PPS signal is disabled. The watchdog counter pps_valid is updated
1521  * once per second by hardclock() and is atomically cleared in this
1522  * routine.
1523  */
1524 void
1525 hardpps(struct timeval *tvp,		/* time at PPS */
1526 	long usec			/* hardware counter at PPS */)
1527 {
1528 	long u_usec, v_usec, bigtick;
1529 	long cal_sec, cal_usec;
1530 
1531 	/*
1532 	 * An occasional glitch can be produced when the PPS interrupt
1533 	 * occurs in the hardclock() routine before the time variable is
1534 	 * updated. Here the offset is discarded when the difference
1535 	 * between it and the last one is greater than tick/2, but not
1536 	 * if the interval since the first discard exceeds 30 s.
1537 	 */
1538 	time_status |= STA_PPSSIGNAL;
1539 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1540 	pps_valid = 0;
1541 	u_usec = -tvp->tv_usec;
1542 	if (u_usec < -500000)
1543 		u_usec += 1000000;
1544 	v_usec = pps_offset - u_usec;
1545 	if (v_usec < 0)
1546 		v_usec = -v_usec;
1547 	if (v_usec > (tick >> 1)) {
1548 		if (pps_glitch > MAXGLITCH) {
1549 			pps_glitch = 0;
1550 			pps_tf[2] = u_usec;
1551 			pps_tf[1] = u_usec;
1552 		} else {
1553 			pps_glitch++;
1554 			u_usec = pps_offset;
1555 		}
1556 	} else
1557 		pps_glitch = 0;
1558 
1559 	/*
1560 	 * A three-stage median filter is used to help deglitch the pps
1561 	 * time. The median sample becomes the time offset estimate; the
1562 	 * difference between the other two samples becomes the time
1563 	 * dispersion (jitter) estimate.
1564 	 */
1565 	pps_tf[2] = pps_tf[1];
1566 	pps_tf[1] = pps_tf[0];
1567 	pps_tf[0] = u_usec;
1568 	if (pps_tf[0] > pps_tf[1]) {
1569 		if (pps_tf[1] > pps_tf[2]) {
1570 			pps_offset = pps_tf[1];		/* 0 1 2 */
1571 			v_usec = pps_tf[0] - pps_tf[2];
1572 		} else if (pps_tf[2] > pps_tf[0]) {
1573 			pps_offset = pps_tf[0];		/* 2 0 1 */
1574 			v_usec = pps_tf[2] - pps_tf[1];
1575 		} else {
1576 			pps_offset = pps_tf[2];		/* 0 2 1 */
1577 			v_usec = pps_tf[0] - pps_tf[1];
1578 		}
1579 	} else {
1580 		if (pps_tf[1] < pps_tf[2]) {
1581 			pps_offset = pps_tf[1];		/* 2 1 0 */
1582 			v_usec = pps_tf[2] - pps_tf[0];
1583 		} else  if (pps_tf[2] < pps_tf[0]) {
1584 			pps_offset = pps_tf[0];		/* 1 0 2 */
1585 			v_usec = pps_tf[1] - pps_tf[2];
1586 		} else {
1587 			pps_offset = pps_tf[2];		/* 1 2 0 */
1588 			v_usec = pps_tf[1] - pps_tf[0];
1589 		}
1590 	}
1591 	if (v_usec > MAXTIME)
1592 		pps_jitcnt++;
1593 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1594 	if (v_usec < 0)
1595 		pps_jitter -= -v_usec >> PPS_AVG;
1596 	else
1597 		pps_jitter += v_usec >> PPS_AVG;
1598 	if (pps_jitter > (MAXTIME >> 1))
1599 		time_status |= STA_PPSJITTER;
1600 
1601 	/*
1602 	 * During the calibration interval adjust the starting time when
1603 	 * the tick overflows. At the end of the interval compute the
1604 	 * duration of the interval and the difference of the hardware
1605 	 * counters at the beginning and end of the interval. This code
1606 	 * is deliciously complicated by the fact valid differences may
1607 	 * exceed the value of tick when using long calibration
1608 	 * intervals and small ticks. Note that the counter can be
1609 	 * greater than tick if caught at just the wrong instant, but
1610 	 * the values returned and used here are correct.
1611 	 */
1612 	bigtick = (long)tick << SHIFT_USEC;
1613 	pps_usec -= pps_freq;
1614 	if (pps_usec >= bigtick)
1615 		pps_usec -= bigtick;
1616 	if (pps_usec < 0)
1617 		pps_usec += bigtick;
1618 	pps_time.tv_sec++;
1619 	pps_count++;
1620 	if (pps_count < (1 << pps_shift))
1621 		return;
1622 	pps_count = 0;
1623 	pps_calcnt++;
1624 	u_usec = usec << SHIFT_USEC;
1625 	v_usec = pps_usec - u_usec;
1626 	if (v_usec >= bigtick >> 1)
1627 		v_usec -= bigtick;
1628 	if (v_usec < -(bigtick >> 1))
1629 		v_usec += bigtick;
1630 	if (v_usec < 0)
1631 		v_usec = -(-v_usec >> pps_shift);
1632 	else
1633 		v_usec = v_usec >> pps_shift;
1634 	pps_usec = u_usec;
1635 	cal_sec = tvp->tv_sec;
1636 	cal_usec = tvp->tv_usec;
1637 	cal_sec -= pps_time.tv_sec;
1638 	cal_usec -= pps_time.tv_usec;
1639 	if (cal_usec < 0) {
1640 		cal_usec += 1000000;
1641 		cal_sec--;
1642 	}
1643 	pps_time = *tvp;
1644 
1645 	/*
1646 	 * Check for lost interrupts, noise, excessive jitter and
1647 	 * excessive frequency error. The number of timer ticks during
1648 	 * the interval may vary +-1 tick. Add to this a margin of one
1649 	 * tick for the PPS signal jitter and maximum frequency
1650 	 * deviation. If the limits are exceeded, the calibration
1651 	 * interval is reset to the minimum and we start over.
1652 	 */
1653 	u_usec = (long)tick << 1;
1654 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1655 	    || (cal_sec == 0 && cal_usec < u_usec))
1656 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
1657 		pps_errcnt++;
1658 		pps_shift = PPS_SHIFT;
1659 		pps_intcnt = 0;
1660 		time_status |= STA_PPSERROR;
1661 		return;
1662 	}
1663 
1664 	/*
1665 	 * A three-stage median filter is used to help deglitch the pps
1666 	 * frequency. The median sample becomes the frequency offset
1667 	 * estimate; the difference between the other two samples
1668 	 * becomes the frequency dispersion (stability) estimate.
1669 	 */
1670 	pps_ff[2] = pps_ff[1];
1671 	pps_ff[1] = pps_ff[0];
1672 	pps_ff[0] = v_usec;
1673 	if (pps_ff[0] > pps_ff[1]) {
1674 		if (pps_ff[1] > pps_ff[2]) {
1675 			u_usec = pps_ff[1];		/* 0 1 2 */
1676 			v_usec = pps_ff[0] - pps_ff[2];
1677 		} else if (pps_ff[2] > pps_ff[0]) {
1678 			u_usec = pps_ff[0];		/* 2 0 1 */
1679 			v_usec = pps_ff[2] - pps_ff[1];
1680 		} else {
1681 			u_usec = pps_ff[2];		/* 0 2 1 */
1682 			v_usec = pps_ff[0] - pps_ff[1];
1683 		}
1684 	} else {
1685 		if (pps_ff[1] < pps_ff[2]) {
1686 			u_usec = pps_ff[1];		/* 2 1 0 */
1687 			v_usec = pps_ff[2] - pps_ff[0];
1688 		} else  if (pps_ff[2] < pps_ff[0]) {
1689 			u_usec = pps_ff[0];		/* 1 0 2 */
1690 			v_usec = pps_ff[1] - pps_ff[2];
1691 		} else {
1692 			u_usec = pps_ff[2];		/* 1 2 0 */
1693 			v_usec = pps_ff[1] - pps_ff[0];
1694 		}
1695 	}
1696 
1697 	/*
1698 	 * Here the frequency dispersion (stability) is updated. If it
1699 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1700 	 * offset is updated as well, but clamped to the tolerance. It
1701 	 * will be processed later by the hardclock() routine.
1702 	 */
1703 	v_usec = (v_usec >> 1) - pps_stabil;
1704 	if (v_usec < 0)
1705 		pps_stabil -= -v_usec >> PPS_AVG;
1706 	else
1707 		pps_stabil += v_usec >> PPS_AVG;
1708 	if (pps_stabil > MAXFREQ >> 2) {
1709 		pps_stbcnt++;
1710 		time_status |= STA_PPSWANDER;
1711 		return;
1712 	}
1713 	if (time_status & STA_PPSFREQ) {
1714 		if (u_usec < 0) {
1715 			pps_freq -= -u_usec >> PPS_AVG;
1716 			if (pps_freq < -time_tolerance)
1717 				pps_freq = -time_tolerance;
1718 			u_usec = -u_usec;
1719 		} else {
1720 			pps_freq += u_usec >> PPS_AVG;
1721 			if (pps_freq > time_tolerance)
1722 				pps_freq = time_tolerance;
1723 		}
1724 	}
1725 
1726 	/*
1727 	 * Here the calibration interval is adjusted. If the maximum
1728 	 * time difference is greater than tick / 4, reduce the interval
1729 	 * by half. If this is not the case for four consecutive
1730 	 * intervals, double the interval.
1731 	 */
1732 	if (u_usec << pps_shift > bigtick >> 2) {
1733 		pps_intcnt = 0;
1734 		if (pps_shift > PPS_SHIFT)
1735 			pps_shift--;
1736 	} else if (pps_intcnt >= 4) {
1737 		pps_intcnt = 0;
1738 		if (pps_shift < PPS_SHIFTMAX)
1739 			pps_shift++;
1740 	} else
1741 		pps_intcnt++;
1742 }
1743 #endif /* PPS_SYNC */
1744 #endif /* NTP  */
1745 
1746 /*
1747  * Return information about system clocks.
1748  */
1749 int
1750 sysctl_clockrate(void *where, size_t *sizep)
1751 {
1752 	struct clockinfo clkinfo;
1753 
1754 	/*
1755 	 * Construct clockinfo structure.
1756 	 */
1757 	clkinfo.tick = tick;
1758 	clkinfo.tickadj = tickadj;
1759 	clkinfo.hz = hz;
1760 	clkinfo.profhz = profhz;
1761 	clkinfo.stathz = stathz ? stathz : hz;
1762 	return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1763 }
1764