xref: /netbsd-src/sys/kern/kern_clock.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: kern_clock.c,v 1.86 2003/06/23 11:02:04 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1986, 1991, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.86 2003/06/23 11:02:04 martin Exp $");
82 
83 #include "opt_ntp.h"
84 #include "opt_multiprocessor.h"
85 #include "opt_perfctrs.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/kernel.h>
91 #include <sys/proc.h>
92 #include <sys/resourcevar.h>
93 #include <sys/signalvar.h>
94 #include <sys/sysctl.h>
95 #include <sys/timex.h>
96 #include <sys/sched.h>
97 #include <sys/time.h>
98 
99 #include <machine/cpu.h>
100 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
101 #include <machine/intr.h>
102 #endif
103 
104 #ifdef GPROF
105 #include <sys/gmon.h>
106 #endif
107 
108 /*
109  * Clock handling routines.
110  *
111  * This code is written to operate with two timers that run independently of
112  * each other.  The main clock, running hz times per second, is used to keep
113  * track of real time.  The second timer handles kernel and user profiling,
114  * and does resource use estimation.  If the second timer is programmable,
115  * it is randomized to avoid aliasing between the two clocks.  For example,
116  * the randomization prevents an adversary from always giving up the cpu
117  * just before its quantum expires.  Otherwise, it would never accumulate
118  * cpu ticks.  The mean frequency of the second timer is stathz.
119  *
120  * If no second timer exists, stathz will be zero; in this case we drive
121  * profiling and statistics off the main clock.  This WILL NOT be accurate;
122  * do not do it unless absolutely necessary.
123  *
124  * The statistics clock may (or may not) be run at a higher rate while
125  * profiling.  This profile clock runs at profhz.  We require that profhz
126  * be an integral multiple of stathz.
127  *
128  * If the statistics clock is running fast, it must be divided by the ratio
129  * profhz/stathz for statistics.  (For profiling, every tick counts.)
130  */
131 
132 #ifdef NTP	/* NTP phase-locked loop in kernel */
133 /*
134  * Phase/frequency-lock loop (PLL/FLL) definitions
135  *
136  * The following variables are read and set by the ntp_adjtime() system
137  * call.
138  *
139  * time_state shows the state of the system clock, with values defined
140  * in the timex.h header file.
141  *
142  * time_status shows the status of the system clock, with bits defined
143  * in the timex.h header file.
144  *
145  * time_offset is used by the PLL/FLL to adjust the system time in small
146  * increments.
147  *
148  * time_constant determines the bandwidth or "stiffness" of the PLL.
149  *
150  * time_tolerance determines maximum frequency error or tolerance of the
151  * CPU clock oscillator and is a property of the architecture; however,
152  * in principle it could change as result of the presence of external
153  * discipline signals, for instance.
154  *
155  * time_precision is usually equal to the kernel tick variable; however,
156  * in cases where a precision clock counter or external clock is
157  * available, the resolution can be much less than this and depend on
158  * whether the external clock is working or not.
159  *
160  * time_maxerror is initialized by a ntp_adjtime() call and increased by
161  * the kernel once each second to reflect the maximum error bound
162  * growth.
163  *
164  * time_esterror is set and read by the ntp_adjtime() call, but
165  * otherwise not used by the kernel.
166  */
167 int time_state = TIME_OK;	/* clock state */
168 int time_status = STA_UNSYNC;	/* clock status bits */
169 long time_offset = 0;		/* time offset (us) */
170 long time_constant = 0;		/* pll time constant */
171 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
172 long time_precision = 1;	/* clock precision (us) */
173 long time_maxerror = MAXPHASE;	/* maximum error (us) */
174 long time_esterror = MAXPHASE;	/* estimated error (us) */
175 
176 /*
177  * The following variables establish the state of the PLL/FLL and the
178  * residual time and frequency offset of the local clock. The scale
179  * factors are defined in the timex.h header file.
180  *
181  * time_phase and time_freq are the phase increment and the frequency
182  * increment, respectively, of the kernel time variable.
183  *
184  * time_freq is set via ntp_adjtime() from a value stored in a file when
185  * the synchronization daemon is first started. Its value is retrieved
186  * via ntp_adjtime() and written to the file about once per hour by the
187  * daemon.
188  *
189  * time_adj is the adjustment added to the value of tick at each timer
190  * interrupt and is recomputed from time_phase and time_freq at each
191  * seconds rollover.
192  *
193  * time_reftime is the second's portion of the system time at the last
194  * call to ntp_adjtime(). It is used to adjust the time_freq variable
195  * and to increase the time_maxerror as the time since last update
196  * increases.
197  */
198 long time_phase = 0;		/* phase offset (scaled us) */
199 long time_freq = 0;		/* frequency offset (scaled ppm) */
200 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
201 long time_reftime = 0;		/* time at last adjustment (s) */
202 
203 #ifdef PPS_SYNC
204 /*
205  * The following variables are used only if the kernel PPS discipline
206  * code is configured (PPS_SYNC). The scale factors are defined in the
207  * timex.h header file.
208  *
209  * pps_time contains the time at each calibration interval, as read by
210  * microtime(). pps_count counts the seconds of the calibration
211  * interval, the duration of which is nominally pps_shift in powers of
212  * two.
213  *
214  * pps_offset is the time offset produced by the time median filter
215  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
216  * this filter.
217  *
218  * pps_freq is the frequency offset produced by the frequency median
219  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
220  * by this filter.
221  *
222  * pps_usec is latched from a high resolution counter or external clock
223  * at pps_time. Here we want the hardware counter contents only, not the
224  * contents plus the time_tv.usec as usual.
225  *
226  * pps_valid counts the number of seconds since the last PPS update. It
227  * is used as a watchdog timer to disable the PPS discipline should the
228  * PPS signal be lost.
229  *
230  * pps_glitch counts the number of seconds since the beginning of an
231  * offset burst more than tick/2 from current nominal offset. It is used
232  * mainly to suppress error bursts due to priority conflicts between the
233  * PPS interrupt and timer interrupt.
234  *
235  * pps_intcnt counts the calibration intervals for use in the interval-
236  * adaptation algorithm. It's just too complicated for words.
237  */
238 struct timeval pps_time;	/* kernel time at last interval */
239 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
240 long pps_offset = 0;		/* pps time offset (us) */
241 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
242 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
243 long pps_freq = 0;		/* frequency offset (scaled ppm) */
244 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
245 long pps_usec = 0;		/* microsec counter at last interval */
246 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
247 int pps_glitch = 0;		/* pps signal glitch counter */
248 int pps_count = 0;		/* calibration interval counter (s) */
249 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
250 int pps_intcnt = 0;		/* intervals at current duration */
251 
252 /*
253  * PPS signal quality monitors
254  *
255  * pps_jitcnt counts the seconds that have been discarded because the
256  * jitter measured by the time median filter exceeds the limit MAXTIME
257  * (100 us).
258  *
259  * pps_calcnt counts the frequency calibration intervals, which are
260  * variable from 4 s to 256 s.
261  *
262  * pps_errcnt counts the calibration intervals which have been discarded
263  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
264  * calibration interval jitter exceeds two ticks.
265  *
266  * pps_stbcnt counts the calibration intervals that have been discarded
267  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
268  */
269 long pps_jitcnt = 0;		/* jitter limit exceeded */
270 long pps_calcnt = 0;		/* calibration intervals */
271 long pps_errcnt = 0;		/* calibration errors */
272 long pps_stbcnt = 0;		/* stability limit exceeded */
273 #endif /* PPS_SYNC */
274 
275 #ifdef EXT_CLOCK
276 /*
277  * External clock definitions
278  *
279  * The following definitions and declarations are used only if an
280  * external clock is configured on the system.
281  */
282 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
283 
284 /*
285  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
286  * interrupt and decremented once each second.
287  */
288 int clock_count = 0;		/* CPU clock counter */
289 
290 #ifdef HIGHBALL
291 /*
292  * The clock_offset and clock_cpu variables are used by the HIGHBALL
293  * interface. The clock_offset variable defines the offset between
294  * system time and the HIGBALL counters. The clock_cpu variable contains
295  * the offset between the system clock and the HIGHBALL clock for use in
296  * disciplining the kernel time variable.
297  */
298 extern struct timeval clock_offset; /* Highball clock offset */
299 long clock_cpu = 0;		/* CPU clock adjust */
300 #endif /* HIGHBALL */
301 #endif /* EXT_CLOCK */
302 #endif /* NTP */
303 
304 
305 /*
306  * Bump a timeval by a small number of usec's.
307  */
308 #define BUMPTIME(t, usec) { \
309 	volatile struct timeval *tp = (t); \
310 	long us; \
311  \
312 	tp->tv_usec = us = tp->tv_usec + (usec); \
313 	if (us >= 1000000) { \
314 		tp->tv_usec = us - 1000000; \
315 		tp->tv_sec++; \
316 	} \
317 }
318 
319 int	stathz;
320 int	profhz;
321 int	profsrc;
322 int	schedhz;
323 int	profprocs;
324 int	hardclock_ticks;
325 static int psdiv;			/* prof => stat divider */
326 int	psratio;			/* ratio: prof / stat */
327 int	tickfix, tickfixinterval;	/* used if tick not really integral */
328 #ifndef NTP
329 static int tickfixcnt;			/* accumulated fractional error */
330 #else
331 int	fixtick;			/* used by NTP for same */
332 int	shifthz;
333 #endif
334 
335 /*
336  * We might want ldd to load the both words from time at once.
337  * To succeed we need to be quadword aligned.
338  * The sparc already does that, and that it has worked so far is a fluke.
339  */
340 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
341 volatile struct	timeval mono_time;
342 
343 void	*softclock_si;
344 
345 /*
346  * Initialize clock frequencies and start both clocks running.
347  */
348 void
349 initclocks(void)
350 {
351 	int i;
352 
353 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
354 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
355 	if (softclock_si == NULL)
356 		panic("initclocks: unable to register softclock intr");
357 #endif
358 
359 	/*
360 	 * Set divisors to 1 (normal case) and let the machine-specific
361 	 * code do its bit.
362 	 */
363 	psdiv = 1;
364 	cpu_initclocks();
365 
366 	/*
367 	 * Compute profhz/stathz/rrticks, and fix profhz if needed.
368 	 */
369 	i = stathz ? stathz : hz;
370 	if (profhz == 0)
371 		profhz = i;
372 	psratio = profhz / i;
373 	rrticks = hz / 10;
374 
375 #ifdef NTP
376 	switch (hz) {
377 	case 1:
378 		shifthz = SHIFT_SCALE - 0;
379 		break;
380 	case 2:
381 		shifthz = SHIFT_SCALE - 1;
382 		break;
383 	case 4:
384 		shifthz = SHIFT_SCALE - 2;
385 		break;
386 	case 8:
387 		shifthz = SHIFT_SCALE - 3;
388 		break;
389 	case 16:
390 		shifthz = SHIFT_SCALE - 4;
391 		break;
392 	case 32:
393 		shifthz = SHIFT_SCALE - 5;
394 		break;
395 	case 60:
396 	case 64:
397 		shifthz = SHIFT_SCALE - 6;
398 		break;
399 	case 96:
400 	case 100:
401 	case 128:
402 		shifthz = SHIFT_SCALE - 7;
403 		break;
404 	case 256:
405 		shifthz = SHIFT_SCALE - 8;
406 		break;
407 	case 512:
408 		shifthz = SHIFT_SCALE - 9;
409 		break;
410 	case 1000:
411 	case 1024:
412 		shifthz = SHIFT_SCALE - 10;
413 		break;
414 	case 1200:
415 	case 2048:
416 		shifthz = SHIFT_SCALE - 11;
417 		break;
418 	case 4096:
419 		shifthz = SHIFT_SCALE - 12;
420 		break;
421 	case 8192:
422 		shifthz = SHIFT_SCALE - 13;
423 		break;
424 	case 16384:
425 		shifthz = SHIFT_SCALE - 14;
426 		break;
427 	case 32768:
428 		shifthz = SHIFT_SCALE - 15;
429 		break;
430 	case 65536:
431 		shifthz = SHIFT_SCALE - 16;
432 		break;
433 	default:
434 		panic("weird hz");
435 	}
436 	if (fixtick == 0) {
437 		/*
438 		 * Give MD code a chance to set this to a better
439 		 * value; but, if it doesn't, we should.
440 		 */
441 		fixtick = (1000000 - (hz*tick));
442 	}
443 #endif
444 }
445 
446 /*
447  * The real-time timer, interrupting hz times per second.
448  */
449 void
450 hardclock(struct clockframe *frame)
451 {
452 	struct lwp *l;
453 	struct proc *p;
454 	int delta;
455 	extern int tickdelta;
456 	extern long timedelta;
457 	struct cpu_info *ci = curcpu();
458 	struct ptimer *pt;
459 #ifdef NTP
460 	int time_update;
461 	int ltemp;
462 #endif
463 
464 	l = curlwp;
465 	if (l) {
466 		p = l->l_proc;
467 		/*
468 		 * Run current process's virtual and profile time, as needed.
469 		 */
470 		if (CLKF_USERMODE(frame) && p->p_timers &&
471 		    (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
472 			if (itimerdecr(pt, tick) == 0)
473 				itimerfire(pt);
474 		if (p->p_timers &&
475 		    (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
476 			if (itimerdecr(pt, tick) == 0)
477 				itimerfire(pt);
478 	}
479 
480 	/*
481 	 * If no separate statistics clock is available, run it from here.
482 	 */
483 	if (stathz == 0)
484 		statclock(frame);
485 	if ((--ci->ci_schedstate.spc_rrticks) <= 0)
486 		roundrobin(ci);
487 
488 #if defined(MULTIPROCESSOR)
489 	/*
490 	 * If we are not the primary CPU, we're not allowed to do
491 	 * any more work.
492 	 */
493 	if (CPU_IS_PRIMARY(ci) == 0)
494 		return;
495 #endif
496 
497 	/*
498 	 * Increment the time-of-day.  The increment is normally just
499 	 * ``tick''.  If the machine is one which has a clock frequency
500 	 * such that ``hz'' would not divide the second evenly into
501 	 * milliseconds, a periodic adjustment must be applied.  Finally,
502 	 * if we are still adjusting the time (see adjtime()),
503 	 * ``tickdelta'' may also be added in.
504 	 */
505 	hardclock_ticks++;
506 	delta = tick;
507 
508 #ifndef NTP
509 	if (tickfix) {
510 		tickfixcnt += tickfix;
511 		if (tickfixcnt >= tickfixinterval) {
512 			delta++;
513 			tickfixcnt -= tickfixinterval;
514 		}
515 	}
516 #endif /* !NTP */
517 	/* Imprecise 4bsd adjtime() handling */
518 	if (timedelta != 0) {
519 		delta += tickdelta;
520 		timedelta -= tickdelta;
521 	}
522 
523 #ifdef notyet
524 	microset();
525 #endif
526 
527 #ifndef NTP
528 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
529 #endif
530 	BUMPTIME(&mono_time, delta);
531 
532 #ifdef NTP
533 	time_update = delta;
534 
535 	/*
536 	 * Compute the phase adjustment. If the low-order bits
537 	 * (time_phase) of the update overflow, bump the high-order bits
538 	 * (time_update).
539 	 */
540 	time_phase += time_adj;
541 	if (time_phase <= -FINEUSEC) {
542 		ltemp = -time_phase >> SHIFT_SCALE;
543 		time_phase += ltemp << SHIFT_SCALE;
544 		time_update -= ltemp;
545 	} else if (time_phase >= FINEUSEC) {
546 		ltemp = time_phase >> SHIFT_SCALE;
547 		time_phase -= ltemp << SHIFT_SCALE;
548 		time_update += ltemp;
549 	}
550 
551 #ifdef HIGHBALL
552 	/*
553 	 * If the HIGHBALL board is installed, we need to adjust the
554 	 * external clock offset in order to close the hardware feedback
555 	 * loop. This will adjust the external clock phase and frequency
556 	 * in small amounts. The additional phase noise and frequency
557 	 * wander this causes should be minimal. We also need to
558 	 * discipline the kernel time variable, since the PLL is used to
559 	 * discipline the external clock. If the Highball board is not
560 	 * present, we discipline kernel time with the PLL as usual. We
561 	 * assume that the external clock phase adjustment (time_update)
562 	 * and kernel phase adjustment (clock_cpu) are less than the
563 	 * value of tick.
564 	 */
565 	clock_offset.tv_usec += time_update;
566 	if (clock_offset.tv_usec >= 1000000) {
567 		clock_offset.tv_sec++;
568 		clock_offset.tv_usec -= 1000000;
569 	}
570 	if (clock_offset.tv_usec < 0) {
571 		clock_offset.tv_sec--;
572 		clock_offset.tv_usec += 1000000;
573 	}
574 	time.tv_usec += clock_cpu;
575 	clock_cpu = 0;
576 #else
577 	time.tv_usec += time_update;
578 #endif /* HIGHBALL */
579 
580 	/*
581 	 * On rollover of the second the phase adjustment to be used for
582 	 * the next second is calculated. Also, the maximum error is
583 	 * increased by the tolerance. If the PPS frequency discipline
584 	 * code is present, the phase is increased to compensate for the
585 	 * CPU clock oscillator frequency error.
586 	 *
587  	 * On a 32-bit machine and given parameters in the timex.h
588 	 * header file, the maximum phase adjustment is +-512 ms and
589 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
590 	 * 64-bit machine, you shouldn't need to ask.
591 	 */
592 	if (time.tv_usec >= 1000000) {
593 		time.tv_usec -= 1000000;
594 		time.tv_sec++;
595 		time_maxerror += time_tolerance >> SHIFT_USEC;
596 
597 		/*
598 		 * Leap second processing. If in leap-insert state at
599 		 * the end of the day, the system clock is set back one
600 		 * second; if in leap-delete state, the system clock is
601 		 * set ahead one second. The microtime() routine or
602 		 * external clock driver will insure that reported time
603 		 * is always monotonic. The ugly divides should be
604 		 * replaced.
605 		 */
606 		switch (time_state) {
607 		case TIME_OK:
608 			if (time_status & STA_INS)
609 				time_state = TIME_INS;
610 			else if (time_status & STA_DEL)
611 				time_state = TIME_DEL;
612 			break;
613 
614 		case TIME_INS:
615 			if (time.tv_sec % 86400 == 0) {
616 				time.tv_sec--;
617 				time_state = TIME_OOP;
618 			}
619 			break;
620 
621 		case TIME_DEL:
622 			if ((time.tv_sec + 1) % 86400 == 0) {
623 				time.tv_sec++;
624 				time_state = TIME_WAIT;
625 			}
626 			break;
627 
628 		case TIME_OOP:
629 			time_state = TIME_WAIT;
630 			break;
631 
632 		case TIME_WAIT:
633 			if (!(time_status & (STA_INS | STA_DEL)))
634 				time_state = TIME_OK;
635 			break;
636 		}
637 
638 		/*
639 		 * Compute the phase adjustment for the next second. In
640 		 * PLL mode, the offset is reduced by a fixed factor
641 		 * times the time constant. In FLL mode the offset is
642 		 * used directly. In either mode, the maximum phase
643 		 * adjustment for each second is clamped so as to spread
644 		 * the adjustment over not more than the number of
645 		 * seconds between updates.
646 		 */
647 		if (time_offset < 0) {
648 			ltemp = -time_offset;
649 			if (!(time_status & STA_FLL))
650 				ltemp >>= SHIFT_KG + time_constant;
651 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
652 				ltemp = (MAXPHASE / MINSEC) <<
653 				    SHIFT_UPDATE;
654 			time_offset += ltemp;
655 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
656 		} else if (time_offset > 0) {
657 			ltemp = time_offset;
658 			if (!(time_status & STA_FLL))
659 				ltemp >>= SHIFT_KG + time_constant;
660 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
661 				ltemp = (MAXPHASE / MINSEC) <<
662 				    SHIFT_UPDATE;
663 			time_offset -= ltemp;
664 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
665 		} else
666 			time_adj = 0;
667 
668 		/*
669 		 * Compute the frequency estimate and additional phase
670 		 * adjustment due to frequency error for the next
671 		 * second. When the PPS signal is engaged, gnaw on the
672 		 * watchdog counter and update the frequency computed by
673 		 * the pll and the PPS signal.
674 		 */
675 #ifdef PPS_SYNC
676 		pps_valid++;
677 		if (pps_valid == PPS_VALID) {
678 			pps_jitter = MAXTIME;
679 			pps_stabil = MAXFREQ;
680 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
681 			    STA_PPSWANDER | STA_PPSERROR);
682 		}
683 		ltemp = time_freq + pps_freq;
684 #else
685 		ltemp = time_freq;
686 #endif /* PPS_SYNC */
687 
688 		if (ltemp < 0)
689 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
690 		else
691 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
692 		time_adj += (long)fixtick << shifthz;
693 
694 		/*
695 		 * When the CPU clock oscillator frequency is not a
696 		 * power of 2 in Hz, shifthz is only an approximate
697 		 * scale factor.
698 		 *
699 		 * To determine the adjustment, you can do the following:
700 		 *   bc -q
701 		 *   scale=24
702 		 *   obase=2
703 		 *   idealhz/realhz
704 		 * where `idealhz' is the next higher power of 2, and `realhz'
705 		 * is the actual value.  You may need to factor this result
706 		 * into a sequence of 2 multipliers to get better precision.
707 		 *
708 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
709 		 *   bc -q
710 		 *   scale=24
711 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
712 		 * (and then multiply by 1000000 to get ppm).
713 		 */
714 		switch (hz) {
715 		case 60:
716 			/* A factor of 1.000100010001 gives about 15ppm
717 			   error. */
718 			if (time_adj < 0) {
719 				time_adj -= (-time_adj >> 4);
720 				time_adj -= (-time_adj >> 8);
721 			} else {
722 				time_adj += (time_adj >> 4);
723 				time_adj += (time_adj >> 8);
724 			}
725 			break;
726 
727 		case 96:
728 			/* A factor of 1.0101010101 gives about 244ppm error. */
729 			if (time_adj < 0) {
730 				time_adj -= (-time_adj >> 2);
731 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
732 			} else {
733 				time_adj += (time_adj >> 2);
734 				time_adj += (time_adj >> 4) + (time_adj >> 8);
735 			}
736 			break;
737 
738 		case 100:
739 			/* A factor of 1.010001111010111 gives about 1ppm
740 			   error. */
741 			if (time_adj < 0) {
742 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
743 				time_adj += (-time_adj >> 10);
744 			} else {
745 				time_adj += (time_adj >> 2) + (time_adj >> 5);
746 				time_adj -= (time_adj >> 10);
747 			}
748 			break;
749 
750 		case 1000:
751 			/* A factor of 1.000001100010100001 gives about 50ppm
752 			   error. */
753 			if (time_adj < 0) {
754 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
755 				time_adj -= (-time_adj >> 7);
756 			} else {
757 				time_adj += (time_adj >> 6) + (time_adj >> 11);
758 				time_adj += (time_adj >> 7);
759 			}
760 			break;
761 
762 		case 1200:
763 			/* A factor of 1.1011010011100001 gives about 64ppm
764 			   error. */
765 			if (time_adj < 0) {
766 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
767 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
768 			} else {
769 				time_adj += (time_adj >> 1) + (time_adj >> 6);
770 				time_adj += (time_adj >> 3) + (time_adj >> 10);
771 			}
772 			break;
773 		}
774 
775 #ifdef EXT_CLOCK
776 		/*
777 		 * If an external clock is present, it is necessary to
778 		 * discipline the kernel time variable anyway, since not
779 		 * all system components use the microtime() interface.
780 		 * Here, the time offset between the external clock and
781 		 * kernel time variable is computed every so often.
782 		 */
783 		clock_count++;
784 		if (clock_count > CLOCK_INTERVAL) {
785 			clock_count = 0;
786 			microtime(&clock_ext);
787 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
788 			delta.tv_usec = clock_ext.tv_usec -
789 			    time.tv_usec;
790 			if (delta.tv_usec < 0)
791 				delta.tv_sec--;
792 			if (delta.tv_usec >= 500000) {
793 				delta.tv_usec -= 1000000;
794 				delta.tv_sec++;
795 			}
796 			if (delta.tv_usec < -500000) {
797 				delta.tv_usec += 1000000;
798 				delta.tv_sec--;
799 			}
800 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
801 			    delta.tv_usec > MAXPHASE) ||
802 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
803 			    delta.tv_usec < -MAXPHASE)) {
804 				time = clock_ext;
805 				delta.tv_sec = 0;
806 				delta.tv_usec = 0;
807 			}
808 #ifdef HIGHBALL
809 			clock_cpu = delta.tv_usec;
810 #else /* HIGHBALL */
811 			hardupdate(delta.tv_usec);
812 #endif /* HIGHBALL */
813 		}
814 #endif /* EXT_CLOCK */
815 	}
816 
817 #endif /* NTP */
818 
819 	/*
820 	 * Update real-time timeout queue.
821 	 * Process callouts at a very low cpu priority, so we don't keep the
822 	 * relatively high clock interrupt priority any longer than necessary.
823 	 */
824 	if (callout_hardclock()) {
825 		if (CLKF_BASEPRI(frame)) {
826 			/*
827 			 * Save the overhead of a software interrupt;
828 			 * it will happen as soon as we return, so do
829 			 * it now.
830 			 */
831 			spllowersoftclock();
832 			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
833 			softclock(NULL);
834 			KERNEL_UNLOCK();
835 		} else {
836 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
837 			softintr_schedule(softclock_si);
838 #else
839 			setsoftclock();
840 #endif
841 		}
842 	}
843 }
844 
845 /*
846  * Compute number of hz until specified time.  Used to compute second
847  * argument to callout_reset() from an absolute time.
848  */
849 int
850 hzto(struct timeval *tv)
851 {
852 	unsigned long ticks;
853 	long sec, usec;
854 	int s;
855 
856 	/*
857 	 * If the number of usecs in the whole seconds part of the time
858 	 * difference fits in a long, then the total number of usecs will
859 	 * fit in an unsigned long.  Compute the total and convert it to
860 	 * ticks, rounding up and adding 1 to allow for the current tick
861 	 * to expire.  Rounding also depends on unsigned long arithmetic
862 	 * to avoid overflow.
863 	 *
864 	 * Otherwise, if the number of ticks in the whole seconds part of
865 	 * the time difference fits in a long, then convert the parts to
866 	 * ticks separately and add, using similar rounding methods and
867 	 * overflow avoidance.  This method would work in the previous
868 	 * case, but it is slightly slower and assume that hz is integral.
869 	 *
870 	 * Otherwise, round the time difference down to the maximum
871 	 * representable value.
872 	 *
873 	 * If ints are 32-bit, then the maximum value for any timeout in
874 	 * 10ms ticks is 248 days.
875 	 */
876 	s = splclock();
877 	sec = tv->tv_sec - time.tv_sec;
878 	usec = tv->tv_usec - time.tv_usec;
879 	splx(s);
880 
881 	if (usec < 0) {
882 		sec--;
883 		usec += 1000000;
884 	}
885 
886 	if (sec < 0 || (sec == 0 && usec <= 0)) {
887 		/*
888 		 * Would expire now or in the past.  Return 0 ticks.
889 		 * This is different from the legacy hzto() interface,
890 		 * and callers need to check for it.
891 		 */
892 		ticks = 0;
893 	} else if (sec <= (LONG_MAX / 1000000))
894 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
895 		    / tick) + 1;
896 	else if (sec <= (LONG_MAX / hz))
897 		ticks = (sec * hz) +
898 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
899 	else
900 		ticks = LONG_MAX;
901 
902 	if (ticks > INT_MAX)
903 		ticks = INT_MAX;
904 
905 	return ((int)ticks);
906 }
907 
908 /*
909  * Start profiling on a process.
910  *
911  * Kernel profiling passes proc0 which never exits and hence
912  * keeps the profile clock running constantly.
913  */
914 void
915 startprofclock(struct proc *p)
916 {
917 
918 	if ((p->p_flag & P_PROFIL) == 0) {
919 		p->p_flag |= P_PROFIL;
920 		/*
921 		 * This is only necessary if using the clock as the
922 		 * profiling source.
923 		 */
924 		if (++profprocs == 1 && stathz != 0)
925 			psdiv = psratio;
926 	}
927 }
928 
929 /*
930  * Stop profiling on a process.
931  */
932 void
933 stopprofclock(struct proc *p)
934 {
935 
936 	if (p->p_flag & P_PROFIL) {
937 		p->p_flag &= ~P_PROFIL;
938 		/*
939 		 * This is only necessary if using the clock as the
940 		 * profiling source.
941 		 */
942 		if (--profprocs == 0 && stathz != 0)
943 			psdiv = 1;
944 	}
945 }
946 
947 #if defined(PERFCTRS)
948 /*
949  * Independent profiling "tick" in case we're using a separate
950  * clock or profiling event source.  Currently, that's just
951  * performance counters--hence the wrapper.
952  */
953 void
954 proftick(struct clockframe *frame)
955 {
956 #ifdef GPROF
957         struct gmonparam *g;
958         intptr_t i;
959 #endif
960 	struct proc *p;
961 
962 	p = curproc;
963 	if (CLKF_USERMODE(frame)) {
964 		if (p->p_flag & P_PROFIL)
965 			addupc_intr(p, CLKF_PC(frame));
966 	} else {
967 #ifdef GPROF
968 		g = &_gmonparam;
969 		if (g->state == GMON_PROF_ON) {
970 			i = CLKF_PC(frame) - g->lowpc;
971 			if (i < g->textsize) {
972 				i /= HISTFRACTION * sizeof(*g->kcount);
973 				g->kcount[i]++;
974 			}
975 		}
976 #endif
977 #ifdef PROC_PC
978                 if (p && p->p_flag & P_PROFIL)
979                         addupc_intr(p, PROC_PC(p));
980 #endif
981 	}
982 }
983 #endif
984 
985 /*
986  * Statistics clock.  Grab profile sample, and if divider reaches 0,
987  * do process and kernel statistics.
988  */
989 void
990 statclock(struct clockframe *frame)
991 {
992 #ifdef GPROF
993 	struct gmonparam *g;
994 	intptr_t i;
995 #endif
996 	struct cpu_info *ci = curcpu();
997 	struct schedstate_percpu *spc = &ci->ci_schedstate;
998 	struct lwp *l;
999 	struct proc *p;
1000 
1001 	/*
1002 	 * Notice changes in divisor frequency, and adjust clock
1003 	 * frequency accordingly.
1004 	 */
1005 	if (spc->spc_psdiv != psdiv) {
1006 		spc->spc_psdiv = psdiv;
1007 		spc->spc_pscnt = psdiv;
1008 		if (psdiv == 1) {
1009 			setstatclockrate(stathz);
1010 		} else {
1011 			setstatclockrate(profhz);
1012 		}
1013 	}
1014 	l = curlwp;
1015 	p = (l ? l->l_proc : 0);
1016 	if (CLKF_USERMODE(frame)) {
1017 		if (p->p_flag & P_PROFIL && profsrc == PROFSRC_CLOCK)
1018 			addupc_intr(p, CLKF_PC(frame));
1019 		if (--spc->spc_pscnt > 0)
1020 			return;
1021 		/*
1022 		 * Came from user mode; CPU was in user state.
1023 		 * If this process is being profiled record the tick.
1024 		 */
1025 		p->p_uticks++;
1026 		if (p->p_nice > NZERO)
1027 			spc->spc_cp_time[CP_NICE]++;
1028 		else
1029 			spc->spc_cp_time[CP_USER]++;
1030 	} else {
1031 #ifdef GPROF
1032 		/*
1033 		 * Kernel statistics are just like addupc_intr, only easier.
1034 		 */
1035 		g = &_gmonparam;
1036 		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
1037 			i = CLKF_PC(frame) - g->lowpc;
1038 			if (i < g->textsize) {
1039 				i /= HISTFRACTION * sizeof(*g->kcount);
1040 				g->kcount[i]++;
1041 			}
1042 		}
1043 #endif
1044 #ifdef LWP_PC
1045 		if (p && profsrc == PROFSRC_CLOCK && p->p_flag & P_PROFIL)
1046 			addupc_intr(p, LWP_PC(l));
1047 #endif
1048 		if (--spc->spc_pscnt > 0)
1049 			return;
1050 		/*
1051 		 * Came from kernel mode, so we were:
1052 		 * - handling an interrupt,
1053 		 * - doing syscall or trap work on behalf of the current
1054 		 *   user process, or
1055 		 * - spinning in the idle loop.
1056 		 * Whichever it is, charge the time as appropriate.
1057 		 * Note that we charge interrupts to the current process,
1058 		 * regardless of whether they are ``for'' that process,
1059 		 * so that we know how much of its real time was spent
1060 		 * in ``non-process'' (i.e., interrupt) work.
1061 		 */
1062 		if (CLKF_INTR(frame)) {
1063 			if (p != NULL)
1064 				p->p_iticks++;
1065 			spc->spc_cp_time[CP_INTR]++;
1066 		} else if (p != NULL) {
1067 			p->p_sticks++;
1068 			spc->spc_cp_time[CP_SYS]++;
1069 		} else
1070 			spc->spc_cp_time[CP_IDLE]++;
1071 	}
1072 	spc->spc_pscnt = psdiv;
1073 
1074 	if (l != NULL) {
1075 		++p->p_cpticks;
1076 		/*
1077 		 * If no separate schedclock is provided, call it here
1078 		 * at ~~12-25 Hz, ~~16 Hz is best
1079 		 */
1080 		if (schedhz == 0)
1081 			if ((++ci->ci_schedstate.spc_schedticks & 3) == 0)
1082 				schedclock(l);
1083 	}
1084 }
1085 
1086 
1087 #ifdef NTP	/* NTP phase-locked loop in kernel */
1088 
1089 /*
1090  * hardupdate() - local clock update
1091  *
1092  * This routine is called by ntp_adjtime() to update the local clock
1093  * phase and frequency. The implementation is of an adaptive-parameter,
1094  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1095  * time and frequency offset estimates for each call. If the kernel PPS
1096  * discipline code is configured (PPS_SYNC), the PPS signal itself
1097  * determines the new time offset, instead of the calling argument.
1098  * Presumably, calls to ntp_adjtime() occur only when the caller
1099  * believes the local clock is valid within some bound (+-128 ms with
1100  * NTP). If the caller's time is far different than the PPS time, an
1101  * argument will ensue, and it's not clear who will lose.
1102  *
1103  * For uncompensated quartz crystal oscillatores and nominal update
1104  * intervals less than 1024 s, operation should be in phase-lock mode
1105  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1106  * intervals greater than thiss, operation should be in frequency-lock
1107  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1108  *
1109  * Note: splclock() is in effect.
1110  */
1111 void
1112 hardupdate(long offset)
1113 {
1114 	long ltemp, mtemp;
1115 
1116 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1117 		return;
1118 	ltemp = offset;
1119 #ifdef PPS_SYNC
1120 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1121 		ltemp = pps_offset;
1122 #endif /* PPS_SYNC */
1123 
1124 	/*
1125 	 * Scale the phase adjustment and clamp to the operating range.
1126 	 */
1127 	if (ltemp > MAXPHASE)
1128 		time_offset = MAXPHASE << SHIFT_UPDATE;
1129 	else if (ltemp < -MAXPHASE)
1130 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
1131 	else
1132 		time_offset = ltemp << SHIFT_UPDATE;
1133 
1134 	/*
1135 	 * Select whether the frequency is to be controlled and in which
1136 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
1137 	 * multiply/divide should be replaced someday.
1138 	 */
1139 	if (time_status & STA_FREQHOLD || time_reftime == 0)
1140 		time_reftime = time.tv_sec;
1141 	mtemp = time.tv_sec - time_reftime;
1142 	time_reftime = time.tv_sec;
1143 	if (time_status & STA_FLL) {
1144 		if (mtemp >= MINSEC) {
1145 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1146 			    SHIFT_UPDATE));
1147 			if (ltemp < 0)
1148 				time_freq -= -ltemp >> SHIFT_KH;
1149 			else
1150 				time_freq += ltemp >> SHIFT_KH;
1151 		}
1152 	} else {
1153 		if (mtemp < MAXSEC) {
1154 			ltemp *= mtemp;
1155 			if (ltemp < 0)
1156 				time_freq -= -ltemp >> (time_constant +
1157 				    time_constant + SHIFT_KF -
1158 				    SHIFT_USEC);
1159 			else
1160 				time_freq += ltemp >> (time_constant +
1161 				    time_constant + SHIFT_KF -
1162 				    SHIFT_USEC);
1163 		}
1164 	}
1165 	if (time_freq > time_tolerance)
1166 		time_freq = time_tolerance;
1167 	else if (time_freq < -time_tolerance)
1168 		time_freq = -time_tolerance;
1169 }
1170 
1171 #ifdef PPS_SYNC
1172 /*
1173  * hardpps() - discipline CPU clock oscillator to external PPS signal
1174  *
1175  * This routine is called at each PPS interrupt in order to discipline
1176  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1177  * and leaves it in a handy spot for the hardclock() routine. It
1178  * integrates successive PPS phase differences and calculates the
1179  * frequency offset. This is used in hardclock() to discipline the CPU
1180  * clock oscillator so that intrinsic frequency error is cancelled out.
1181  * The code requires the caller to capture the time and hardware counter
1182  * value at the on-time PPS signal transition.
1183  *
1184  * Note that, on some Unix systems, this routine runs at an interrupt
1185  * priority level higher than the timer interrupt routine hardclock().
1186  * Therefore, the variables used are distinct from the hardclock()
1187  * variables, except for certain exceptions: The PPS frequency pps_freq
1188  * and phase pps_offset variables are determined by this routine and
1189  * updated atomically. The time_tolerance variable can be considered a
1190  * constant, since it is infrequently changed, and then only when the
1191  * PPS signal is disabled. The watchdog counter pps_valid is updated
1192  * once per second by hardclock() and is atomically cleared in this
1193  * routine.
1194  */
1195 void
1196 hardpps(struct timeval *tvp,		/* time at PPS */
1197 	long usec			/* hardware counter at PPS */)
1198 {
1199 	long u_usec, v_usec, bigtick;
1200 	long cal_sec, cal_usec;
1201 
1202 	/*
1203 	 * An occasional glitch can be produced when the PPS interrupt
1204 	 * occurs in the hardclock() routine before the time variable is
1205 	 * updated. Here the offset is discarded when the difference
1206 	 * between it and the last one is greater than tick/2, but not
1207 	 * if the interval since the first discard exceeds 30 s.
1208 	 */
1209 	time_status |= STA_PPSSIGNAL;
1210 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1211 	pps_valid = 0;
1212 	u_usec = -tvp->tv_usec;
1213 	if (u_usec < -500000)
1214 		u_usec += 1000000;
1215 	v_usec = pps_offset - u_usec;
1216 	if (v_usec < 0)
1217 		v_usec = -v_usec;
1218 	if (v_usec > (tick >> 1)) {
1219 		if (pps_glitch > MAXGLITCH) {
1220 			pps_glitch = 0;
1221 			pps_tf[2] = u_usec;
1222 			pps_tf[1] = u_usec;
1223 		} else {
1224 			pps_glitch++;
1225 			u_usec = pps_offset;
1226 		}
1227 	} else
1228 		pps_glitch = 0;
1229 
1230 	/*
1231 	 * A three-stage median filter is used to help deglitch the pps
1232 	 * time. The median sample becomes the time offset estimate; the
1233 	 * difference between the other two samples becomes the time
1234 	 * dispersion (jitter) estimate.
1235 	 */
1236 	pps_tf[2] = pps_tf[1];
1237 	pps_tf[1] = pps_tf[0];
1238 	pps_tf[0] = u_usec;
1239 	if (pps_tf[0] > pps_tf[1]) {
1240 		if (pps_tf[1] > pps_tf[2]) {
1241 			pps_offset = pps_tf[1];		/* 0 1 2 */
1242 			v_usec = pps_tf[0] - pps_tf[2];
1243 		} else if (pps_tf[2] > pps_tf[0]) {
1244 			pps_offset = pps_tf[0];		/* 2 0 1 */
1245 			v_usec = pps_tf[2] - pps_tf[1];
1246 		} else {
1247 			pps_offset = pps_tf[2];		/* 0 2 1 */
1248 			v_usec = pps_tf[0] - pps_tf[1];
1249 		}
1250 	} else {
1251 		if (pps_tf[1] < pps_tf[2]) {
1252 			pps_offset = pps_tf[1];		/* 2 1 0 */
1253 			v_usec = pps_tf[2] - pps_tf[0];
1254 		} else  if (pps_tf[2] < pps_tf[0]) {
1255 			pps_offset = pps_tf[0];		/* 1 0 2 */
1256 			v_usec = pps_tf[1] - pps_tf[2];
1257 		} else {
1258 			pps_offset = pps_tf[2];		/* 1 2 0 */
1259 			v_usec = pps_tf[1] - pps_tf[0];
1260 		}
1261 	}
1262 	if (v_usec > MAXTIME)
1263 		pps_jitcnt++;
1264 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1265 	if (v_usec < 0)
1266 		pps_jitter -= -v_usec >> PPS_AVG;
1267 	else
1268 		pps_jitter += v_usec >> PPS_AVG;
1269 	if (pps_jitter > (MAXTIME >> 1))
1270 		time_status |= STA_PPSJITTER;
1271 
1272 	/*
1273 	 * During the calibration interval adjust the starting time when
1274 	 * the tick overflows. At the end of the interval compute the
1275 	 * duration of the interval and the difference of the hardware
1276 	 * counters at the beginning and end of the interval. This code
1277 	 * is deliciously complicated by the fact valid differences may
1278 	 * exceed the value of tick when using long calibration
1279 	 * intervals and small ticks. Note that the counter can be
1280 	 * greater than tick if caught at just the wrong instant, but
1281 	 * the values returned and used here are correct.
1282 	 */
1283 	bigtick = (long)tick << SHIFT_USEC;
1284 	pps_usec -= pps_freq;
1285 	if (pps_usec >= bigtick)
1286 		pps_usec -= bigtick;
1287 	if (pps_usec < 0)
1288 		pps_usec += bigtick;
1289 	pps_time.tv_sec++;
1290 	pps_count++;
1291 	if (pps_count < (1 << pps_shift))
1292 		return;
1293 	pps_count = 0;
1294 	pps_calcnt++;
1295 	u_usec = usec << SHIFT_USEC;
1296 	v_usec = pps_usec - u_usec;
1297 	if (v_usec >= bigtick >> 1)
1298 		v_usec -= bigtick;
1299 	if (v_usec < -(bigtick >> 1))
1300 		v_usec += bigtick;
1301 	if (v_usec < 0)
1302 		v_usec = -(-v_usec >> pps_shift);
1303 	else
1304 		v_usec = v_usec >> pps_shift;
1305 	pps_usec = u_usec;
1306 	cal_sec = tvp->tv_sec;
1307 	cal_usec = tvp->tv_usec;
1308 	cal_sec -= pps_time.tv_sec;
1309 	cal_usec -= pps_time.tv_usec;
1310 	if (cal_usec < 0) {
1311 		cal_usec += 1000000;
1312 		cal_sec--;
1313 	}
1314 	pps_time = *tvp;
1315 
1316 	/*
1317 	 * Check for lost interrupts, noise, excessive jitter and
1318 	 * excessive frequency error. The number of timer ticks during
1319 	 * the interval may vary +-1 tick. Add to this a margin of one
1320 	 * tick for the PPS signal jitter and maximum frequency
1321 	 * deviation. If the limits are exceeded, the calibration
1322 	 * interval is reset to the minimum and we start over.
1323 	 */
1324 	u_usec = (long)tick << 1;
1325 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1326 	    || (cal_sec == 0 && cal_usec < u_usec))
1327 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
1328 		pps_errcnt++;
1329 		pps_shift = PPS_SHIFT;
1330 		pps_intcnt = 0;
1331 		time_status |= STA_PPSERROR;
1332 		return;
1333 	}
1334 
1335 	/*
1336 	 * A three-stage median filter is used to help deglitch the pps
1337 	 * frequency. The median sample becomes the frequency offset
1338 	 * estimate; the difference between the other two samples
1339 	 * becomes the frequency dispersion (stability) estimate.
1340 	 */
1341 	pps_ff[2] = pps_ff[1];
1342 	pps_ff[1] = pps_ff[0];
1343 	pps_ff[0] = v_usec;
1344 	if (pps_ff[0] > pps_ff[1]) {
1345 		if (pps_ff[1] > pps_ff[2]) {
1346 			u_usec = pps_ff[1];		/* 0 1 2 */
1347 			v_usec = pps_ff[0] - pps_ff[2];
1348 		} else if (pps_ff[2] > pps_ff[0]) {
1349 			u_usec = pps_ff[0];		/* 2 0 1 */
1350 			v_usec = pps_ff[2] - pps_ff[1];
1351 		} else {
1352 			u_usec = pps_ff[2];		/* 0 2 1 */
1353 			v_usec = pps_ff[0] - pps_ff[1];
1354 		}
1355 	} else {
1356 		if (pps_ff[1] < pps_ff[2]) {
1357 			u_usec = pps_ff[1];		/* 2 1 0 */
1358 			v_usec = pps_ff[2] - pps_ff[0];
1359 		} else  if (pps_ff[2] < pps_ff[0]) {
1360 			u_usec = pps_ff[0];		/* 1 0 2 */
1361 			v_usec = pps_ff[1] - pps_ff[2];
1362 		} else {
1363 			u_usec = pps_ff[2];		/* 1 2 0 */
1364 			v_usec = pps_ff[1] - pps_ff[0];
1365 		}
1366 	}
1367 
1368 	/*
1369 	 * Here the frequency dispersion (stability) is updated. If it
1370 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1371 	 * offset is updated as well, but clamped to the tolerance. It
1372 	 * will be processed later by the hardclock() routine.
1373 	 */
1374 	v_usec = (v_usec >> 1) - pps_stabil;
1375 	if (v_usec < 0)
1376 		pps_stabil -= -v_usec >> PPS_AVG;
1377 	else
1378 		pps_stabil += v_usec >> PPS_AVG;
1379 	if (pps_stabil > MAXFREQ >> 2) {
1380 		pps_stbcnt++;
1381 		time_status |= STA_PPSWANDER;
1382 		return;
1383 	}
1384 	if (time_status & STA_PPSFREQ) {
1385 		if (u_usec < 0) {
1386 			pps_freq -= -u_usec >> PPS_AVG;
1387 			if (pps_freq < -time_tolerance)
1388 				pps_freq = -time_tolerance;
1389 			u_usec = -u_usec;
1390 		} else {
1391 			pps_freq += u_usec >> PPS_AVG;
1392 			if (pps_freq > time_tolerance)
1393 				pps_freq = time_tolerance;
1394 		}
1395 	}
1396 
1397 	/*
1398 	 * Here the calibration interval is adjusted. If the maximum
1399 	 * time difference is greater than tick / 4, reduce the interval
1400 	 * by half. If this is not the case for four consecutive
1401 	 * intervals, double the interval.
1402 	 */
1403 	if (u_usec << pps_shift > bigtick >> 2) {
1404 		pps_intcnt = 0;
1405 		if (pps_shift > PPS_SHIFT)
1406 			pps_shift--;
1407 	} else if (pps_intcnt >= 4) {
1408 		pps_intcnt = 0;
1409 		if (pps_shift < PPS_SHIFTMAX)
1410 			pps_shift++;
1411 	} else
1412 		pps_intcnt++;
1413 }
1414 #endif /* PPS_SYNC */
1415 #endif /* NTP  */
1416 
1417 /*
1418  * Return information about system clocks.
1419  */
1420 int
1421 sysctl_clockrate(void *where, size_t *sizep)
1422 {
1423 	struct clockinfo clkinfo;
1424 
1425 	/*
1426 	 * Construct clockinfo structure.
1427 	 */
1428 	clkinfo.tick = tick;
1429 	clkinfo.tickadj = tickadj;
1430 	clkinfo.hz = hz;
1431 	clkinfo.profhz = profhz;
1432 	clkinfo.stathz = stathz ? stathz : hz;
1433 	return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1434 }
1435