xref: /netbsd-src/sys/kern/kern_clock.c (revision c0179c282a5968435315a82f4128c61372c68fc3)
1 /*	$NetBSD: kern_clock.c,v 1.104 2006/11/01 10:17:58 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Charles M. Hannum.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*-
43  * Copyright (c) 1982, 1986, 1991, 1993
44  *	The Regents of the University of California.  All rights reserved.
45  * (c) UNIX System Laboratories, Inc.
46  * All or some portions of this file are derived from material licensed
47  * to the University of California by American Telephone and Telegraph
48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49  * the permission of UNIX System Laboratories, Inc.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  * 3. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  *
75  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.104 2006/11/01 10:17:58 yamt Exp $");
80 
81 #include "opt_ntp.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84 
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/callout.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/resourcevar.h>
91 #include <sys/signalvar.h>
92 #include <sys/sysctl.h>
93 #include <sys/timex.h>
94 #include <sys/sched.h>
95 #include <sys/time.h>
96 #ifdef __HAVE_TIMECOUNTER
97 #include <sys/timetc.h>
98 #endif
99 
100 #include <machine/cpu.h>
101 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
102 #include <machine/intr.h>
103 #endif
104 
105 #ifdef GPROF
106 #include <sys/gmon.h>
107 #endif
108 
109 /*
110  * Clock handling routines.
111  *
112  * This code is written to operate with two timers that run independently of
113  * each other.  The main clock, running hz times per second, is used to keep
114  * track of real time.  The second timer handles kernel and user profiling,
115  * and does resource use estimation.  If the second timer is programmable,
116  * it is randomized to avoid aliasing between the two clocks.  For example,
117  * the randomization prevents an adversary from always giving up the CPU
118  * just before its quantum expires.  Otherwise, it would never accumulate
119  * CPU ticks.  The mean frequency of the second timer is stathz.
120  *
121  * If no second timer exists, stathz will be zero; in this case we drive
122  * profiling and statistics off the main clock.  This WILL NOT be accurate;
123  * do not do it unless absolutely necessary.
124  *
125  * The statistics clock may (or may not) be run at a higher rate while
126  * profiling.  This profile clock runs at profhz.  We require that profhz
127  * be an integral multiple of stathz.
128  *
129  * If the statistics clock is running fast, it must be divided by the ratio
130  * profhz/stathz for statistics.  (For profiling, every tick counts.)
131  */
132 
133 #ifndef __HAVE_TIMECOUNTER
134 #ifdef NTP	/* NTP phase-locked loop in kernel */
135 /*
136  * Phase/frequency-lock loop (PLL/FLL) definitions
137  *
138  * The following variables are read and set by the ntp_adjtime() system
139  * call.
140  *
141  * time_state shows the state of the system clock, with values defined
142  * in the timex.h header file.
143  *
144  * time_status shows the status of the system clock, with bits defined
145  * in the timex.h header file.
146  *
147  * time_offset is used by the PLL/FLL to adjust the system time in small
148  * increments.
149  *
150  * time_constant determines the bandwidth or "stiffness" of the PLL.
151  *
152  * time_tolerance determines maximum frequency error or tolerance of the
153  * CPU clock oscillator and is a property of the architecture; however,
154  * in principle it could change as result of the presence of external
155  * discipline signals, for instance.
156  *
157  * time_precision is usually equal to the kernel tick variable; however,
158  * in cases where a precision clock counter or external clock is
159  * available, the resolution can be much less than this and depend on
160  * whether the external clock is working or not.
161  *
162  * time_maxerror is initialized by a ntp_adjtime() call and increased by
163  * the kernel once each second to reflect the maximum error bound
164  * growth.
165  *
166  * time_esterror is set and read by the ntp_adjtime() call, but
167  * otherwise not used by the kernel.
168  */
169 int time_state = TIME_OK;	/* clock state */
170 int time_status = STA_UNSYNC;	/* clock status bits */
171 long time_offset = 0;		/* time offset (us) */
172 long time_constant = 0;		/* pll time constant */
173 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
174 long time_precision = 1;	/* clock precision (us) */
175 long time_maxerror = MAXPHASE;	/* maximum error (us) */
176 long time_esterror = MAXPHASE;	/* estimated error (us) */
177 
178 /*
179  * The following variables establish the state of the PLL/FLL and the
180  * residual time and frequency offset of the local clock. The scale
181  * factors are defined in the timex.h header file.
182  *
183  * time_phase and time_freq are the phase increment and the frequency
184  * increment, respectively, of the kernel time variable.
185  *
186  * time_freq is set via ntp_adjtime() from a value stored in a file when
187  * the synchronization daemon is first started. Its value is retrieved
188  * via ntp_adjtime() and written to the file about once per hour by the
189  * daemon.
190  *
191  * time_adj is the adjustment added to the value of tick at each timer
192  * interrupt and is recomputed from time_phase and time_freq at each
193  * seconds rollover.
194  *
195  * time_reftime is the second's portion of the system time at the last
196  * call to ntp_adjtime(). It is used to adjust the time_freq variable
197  * and to increase the time_maxerror as the time since last update
198  * increases.
199  */
200 long time_phase = 0;		/* phase offset (scaled us) */
201 long time_freq = 0;		/* frequency offset (scaled ppm) */
202 long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
203 long time_reftime = 0;		/* time at last adjustment (s) */
204 
205 #ifdef PPS_SYNC
206 /*
207  * The following variables are used only if the kernel PPS discipline
208  * code is configured (PPS_SYNC). The scale factors are defined in the
209  * timex.h header file.
210  *
211  * pps_time contains the time at each calibration interval, as read by
212  * microtime(). pps_count counts the seconds of the calibration
213  * interval, the duration of which is nominally pps_shift in powers of
214  * two.
215  *
216  * pps_offset is the time offset produced by the time median filter
217  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
218  * this filter.
219  *
220  * pps_freq is the frequency offset produced by the frequency median
221  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
222  * by this filter.
223  *
224  * pps_usec is latched from a high resolution counter or external clock
225  * at pps_time. Here we want the hardware counter contents only, not the
226  * contents plus the time_tv.usec as usual.
227  *
228  * pps_valid counts the number of seconds since the last PPS update. It
229  * is used as a watchdog timer to disable the PPS discipline should the
230  * PPS signal be lost.
231  *
232  * pps_glitch counts the number of seconds since the beginning of an
233  * offset burst more than tick/2 from current nominal offset. It is used
234  * mainly to suppress error bursts due to priority conflicts between the
235  * PPS interrupt and timer interrupt.
236  *
237  * pps_intcnt counts the calibration intervals for use in the interval-
238  * adaptation algorithm. It's just too complicated for words.
239  *
240  * pps_kc_hardpps_source contains an arbitrary value that uniquely
241  * identifies the currently bound source of the PPS signal, or NULL
242  * if no source is bound.
243  *
244  * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS
245  * signal should be reported.
246  */
247 struct timeval pps_time;	/* kernel time at last interval */
248 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
249 long pps_offset = 0;		/* pps time offset (us) */
250 long pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
251 long pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
252 long pps_freq = 0;		/* frequency offset (scaled ppm) */
253 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
254 long pps_usec = 0;		/* microsec counter at last interval */
255 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
256 int pps_glitch = 0;		/* pps signal glitch counter */
257 int pps_count = 0;		/* calibration interval counter (s) */
258 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
259 int pps_intcnt = 0;		/* intervals at current duration */
260 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */
261 int pps_kc_hardpps_mode = 0;	/* interesting edges of PPS signal */
262 
263 /*
264  * PPS signal quality monitors
265  *
266  * pps_jitcnt counts the seconds that have been discarded because the
267  * jitter measured by the time median filter exceeds the limit MAXTIME
268  * (100 us).
269  *
270  * pps_calcnt counts the frequency calibration intervals, which are
271  * variable from 4 s to 256 s.
272  *
273  * pps_errcnt counts the calibration intervals which have been discarded
274  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
275  * calibration interval jitter exceeds two ticks.
276  *
277  * pps_stbcnt counts the calibration intervals that have been discarded
278  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
279  */
280 long pps_jitcnt = 0;		/* jitter limit exceeded */
281 long pps_calcnt = 0;		/* calibration intervals */
282 long pps_errcnt = 0;		/* calibration errors */
283 long pps_stbcnt = 0;		/* stability limit exceeded */
284 #endif /* PPS_SYNC */
285 
286 #ifdef EXT_CLOCK
287 /*
288  * External clock definitions
289  *
290  * The following definitions and declarations are used only if an
291  * external clock is configured on the system.
292  */
293 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
294 
295 /*
296  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
297  * interrupt and decremented once each second.
298  */
299 int clock_count = 0;		/* CPU clock counter */
300 
301 #ifdef HIGHBALL
302 /*
303  * The clock_offset and clock_cpu variables are used by the HIGHBALL
304  * interface. The clock_offset variable defines the offset between
305  * system time and the HIGBALL counters. The clock_cpu variable contains
306  * the offset between the system clock and the HIGHBALL clock for use in
307  * disciplining the kernel time variable.
308  */
309 extern struct timeval clock_offset; /* Highball clock offset */
310 long clock_cpu = 0;		/* CPU clock adjust */
311 #endif /* HIGHBALL */
312 #endif /* EXT_CLOCK */
313 #endif /* NTP */
314 
315 /*
316  * Bump a timeval by a small number of usec's.
317  */
318 #define BUMPTIME(t, usec) { \
319 	volatile struct timeval *tp = (t); \
320 	long us; \
321  \
322 	tp->tv_usec = us = tp->tv_usec + (usec); \
323 	if (us >= 1000000) { \
324 		tp->tv_usec = us - 1000000; \
325 		tp->tv_sec++; \
326 	} \
327 }
328 #endif /* !__HAVE_TIMECOUNTER */
329 
330 int	stathz;
331 int	profhz;
332 int	profsrc;
333 int	schedhz;
334 int	profprocs;
335 int	hardclock_ticks;
336 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */
337 static int psdiv;			/* prof => stat divider */
338 int	psratio;			/* ratio: prof / stat */
339 #ifndef __HAVE_TIMECOUNTER
340 int	tickfix, tickfixinterval;	/* used if tick not really integral */
341 #ifndef NTP
342 static int tickfixcnt;			/* accumulated fractional error */
343 #else
344 int	fixtick;			/* used by NTP for same */
345 int	shifthz;
346 #endif
347 
348 /*
349  * We might want ldd to load the both words from time at once.
350  * To succeed we need to be quadword aligned.
351  * The sparc already does that, and that it has worked so far is a fluke.
352  */
353 volatile struct	timeval time  __attribute__((__aligned__(__alignof__(quad_t))));
354 volatile struct	timeval mono_time;
355 #endif /* !__HAVE_TIMECOUNTER */
356 
357 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
358 void	*softclock_si;
359 #endif
360 
361 #ifdef __HAVE_TIMECOUNTER
362 static u_int get_intr_timecount(struct timecounter *);
363 
364 static struct timecounter intr_timecounter = {
365 	get_intr_timecount,	/* get_timecount */
366 	0,			/* no poll_pps */
367 	~0u,			/* counter_mask */
368 	0,		        /* frequency */
369 	"clockinterrupt",	/* name */
370 	0,			/* quality - minimum implementation level for a clock */
371 	NULL,			/* prev */
372 	NULL,			/* next */
373 };
374 
375 static u_int
376 get_intr_timecount(struct timecounter *tc)
377 {
378 
379 	return (u_int)hardclock_ticks;
380 }
381 #endif
382 
383 /*
384  * Initialize clock frequencies and start both clocks running.
385  */
386 void
387 initclocks(void)
388 {
389 	int i;
390 
391 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
392 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
393 	if (softclock_si == NULL)
394 		panic("initclocks: unable to register softclock intr");
395 #endif
396 
397 	/*
398 	 * Set divisors to 1 (normal case) and let the machine-specific
399 	 * code do its bit.
400 	 */
401 	psdiv = 1;
402 #ifdef __HAVE_TIMECOUNTER
403 	/*
404 	 * provide minimum default time counter
405 	 * will only run at interrupt resolution
406 	 */
407 	intr_timecounter.tc_frequency = hz;
408 	tc_init(&intr_timecounter);
409 #endif
410 	cpu_initclocks();
411 
412 	/*
413 	 * Compute profhz/stathz/rrticks, and fix profhz if needed.
414 	 */
415 	i = stathz ? stathz : hz;
416 	if (profhz == 0)
417 		profhz = i;
418 	psratio = profhz / i;
419 	rrticks = hz / 10;
420 	if (schedhz == 0) {
421 		/* 16Hz is best */
422 		statscheddiv = i / 16;
423 		if (statscheddiv <= 0)
424 			panic("statscheddiv");
425 	}
426 
427 #ifndef __HAVE_TIMECOUNTER
428 #ifdef NTP
429 	switch (hz) {
430 	case 1:
431 		shifthz = SHIFT_SCALE - 0;
432 		break;
433 	case 2:
434 		shifthz = SHIFT_SCALE - 1;
435 		break;
436 	case 4:
437 		shifthz = SHIFT_SCALE - 2;
438 		break;
439 	case 8:
440 		shifthz = SHIFT_SCALE - 3;
441 		break;
442 	case 16:
443 		shifthz = SHIFT_SCALE - 4;
444 		break;
445 	case 32:
446 		shifthz = SHIFT_SCALE - 5;
447 		break;
448 	case 50:
449 	case 60:
450 	case 64:
451 		shifthz = SHIFT_SCALE - 6;
452 		break;
453 	case 96:
454 	case 100:
455 	case 128:
456 		shifthz = SHIFT_SCALE - 7;
457 		break;
458 	case 256:
459 		shifthz = SHIFT_SCALE - 8;
460 		break;
461 	case 512:
462 		shifthz = SHIFT_SCALE - 9;
463 		break;
464 	case 1000:
465 	case 1024:
466 		shifthz = SHIFT_SCALE - 10;
467 		break;
468 	case 1200:
469 	case 2048:
470 		shifthz = SHIFT_SCALE - 11;
471 		break;
472 	case 4096:
473 		shifthz = SHIFT_SCALE - 12;
474 		break;
475 	case 8192:
476 		shifthz = SHIFT_SCALE - 13;
477 		break;
478 	case 16384:
479 		shifthz = SHIFT_SCALE - 14;
480 		break;
481 	case 32768:
482 		shifthz = SHIFT_SCALE - 15;
483 		break;
484 	case 65536:
485 		shifthz = SHIFT_SCALE - 16;
486 		break;
487 	default:
488 		panic("weird hz");
489 	}
490 	if (fixtick == 0) {
491 		/*
492 		 * Give MD code a chance to set this to a better
493 		 * value; but, if it doesn't, we should.
494 		 */
495 		fixtick = (1000000 - (hz*tick));
496 	}
497 #endif /* NTP */
498 #endif /* !__HAVE_TIMECOUNTER */
499 }
500 
501 /*
502  * The real-time timer, interrupting hz times per second.
503  */
504 void
505 hardclock(struct clockframe *frame)
506 {
507 	struct lwp *l;
508 	struct proc *p;
509 	struct cpu_info *ci = curcpu();
510 	struct ptimer *pt;
511 #ifndef __HAVE_TIMECOUNTER
512 	int delta;
513 	extern int tickdelta;
514 	extern long timedelta;
515 #ifdef NTP
516 	int time_update;
517 	int ltemp;
518 #endif /* NTP */
519 #endif /* __HAVE_TIMECOUNTER */
520 
521 	l = curlwp;
522 	if (l) {
523 		p = l->l_proc;
524 		/*
525 		 * Run current process's virtual and profile time, as needed.
526 		 */
527 		if (CLKF_USERMODE(frame) && p->p_timers &&
528 		    (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL)
529 			if (itimerdecr(pt, tick) == 0)
530 				itimerfire(pt);
531 		if (p->p_timers &&
532 		    (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL)
533 			if (itimerdecr(pt, tick) == 0)
534 				itimerfire(pt);
535 	}
536 
537 	/*
538 	 * If no separate statistics clock is available, run it from here.
539 	 */
540 	if (stathz == 0)
541 		statclock(frame);
542 	if ((--ci->ci_schedstate.spc_rrticks) <= 0)
543 		roundrobin(ci);
544 
545 #if defined(MULTIPROCESSOR)
546 	/*
547 	 * If we are not the primary CPU, we're not allowed to do
548 	 * any more work.
549 	 */
550 	if (CPU_IS_PRIMARY(ci) == 0)
551 		return;
552 #endif
553 
554 	hardclock_ticks++;
555 
556 #ifdef __HAVE_TIMECOUNTER
557 	tc_ticktock();
558 #else /* __HAVE_TIMECOUNTER */
559 	/*
560 	 * Increment the time-of-day.  The increment is normally just
561 	 * ``tick''.  If the machine is one which has a clock frequency
562 	 * such that ``hz'' would not divide the second evenly into
563 	 * milliseconds, a periodic adjustment must be applied.  Finally,
564 	 * if we are still adjusting the time (see adjtime()),
565 	 * ``tickdelta'' may also be added in.
566 	 */
567 	delta = tick;
568 
569 #ifndef NTP
570 	if (tickfix) {
571 		tickfixcnt += tickfix;
572 		if (tickfixcnt >= tickfixinterval) {
573 			delta++;
574 			tickfixcnt -= tickfixinterval;
575 		}
576 	}
577 #endif /* !NTP */
578 	/* Imprecise 4bsd adjtime() handling */
579 	if (timedelta != 0) {
580 		delta += tickdelta;
581 		timedelta -= tickdelta;
582 	}
583 
584 #ifdef notyet
585 	microset();
586 #endif
587 
588 #ifndef NTP
589 	BUMPTIME(&time, delta);		/* XXX Now done using NTP code below */
590 #endif
591 	BUMPTIME(&mono_time, delta);
592 
593 #ifdef NTP
594 	time_update = delta;
595 
596 	/*
597 	 * Compute the phase adjustment. If the low-order bits
598 	 * (time_phase) of the update overflow, bump the high-order bits
599 	 * (time_update).
600 	 */
601 	time_phase += time_adj;
602 	if (time_phase <= -FINEUSEC) {
603 		ltemp = -time_phase >> SHIFT_SCALE;
604 		time_phase += ltemp << SHIFT_SCALE;
605 		time_update -= ltemp;
606 	} else if (time_phase >= FINEUSEC) {
607 		ltemp = time_phase >> SHIFT_SCALE;
608 		time_phase -= ltemp << SHIFT_SCALE;
609 		time_update += ltemp;
610 	}
611 
612 #ifdef HIGHBALL
613 	/*
614 	 * If the HIGHBALL board is installed, we need to adjust the
615 	 * external clock offset in order to close the hardware feedback
616 	 * loop. This will adjust the external clock phase and frequency
617 	 * in small amounts. The additional phase noise and frequency
618 	 * wander this causes should be minimal. We also need to
619 	 * discipline the kernel time variable, since the PLL is used to
620 	 * discipline the external clock. If the Highball board is not
621 	 * present, we discipline kernel time with the PLL as usual. We
622 	 * assume that the external clock phase adjustment (time_update)
623 	 * and kernel phase adjustment (clock_cpu) are less than the
624 	 * value of tick.
625 	 */
626 	clock_offset.tv_usec += time_update;
627 	if (clock_offset.tv_usec >= 1000000) {
628 		clock_offset.tv_sec++;
629 		clock_offset.tv_usec -= 1000000;
630 	}
631 	if (clock_offset.tv_usec < 0) {
632 		clock_offset.tv_sec--;
633 		clock_offset.tv_usec += 1000000;
634 	}
635 	time.tv_usec += clock_cpu;
636 	clock_cpu = 0;
637 #else
638 	time.tv_usec += time_update;
639 #endif /* HIGHBALL */
640 
641 	/*
642 	 * On rollover of the second the phase adjustment to be used for
643 	 * the next second is calculated. Also, the maximum error is
644 	 * increased by the tolerance. If the PPS frequency discipline
645 	 * code is present, the phase is increased to compensate for the
646 	 * CPU clock oscillator frequency error.
647 	 *
648  	 * On a 32-bit machine and given parameters in the timex.h
649 	 * header file, the maximum phase adjustment is +-512 ms and
650 	 * maximum frequency offset is a tad less than) +-512 ppm. On a
651 	 * 64-bit machine, you shouldn't need to ask.
652 	 */
653 	if (time.tv_usec >= 1000000) {
654 		time.tv_usec -= 1000000;
655 		time.tv_sec++;
656 		time_maxerror += time_tolerance >> SHIFT_USEC;
657 
658 		/*
659 		 * Leap second processing. If in leap-insert state at
660 		 * the end of the day, the system clock is set back one
661 		 * second; if in leap-delete state, the system clock is
662 		 * set ahead one second. The microtime() routine or
663 		 * external clock driver will insure that reported time
664 		 * is always monotonic. The ugly divides should be
665 		 * replaced.
666 		 */
667 		switch (time_state) {
668 		case TIME_OK:
669 			if (time_status & STA_INS)
670 				time_state = TIME_INS;
671 			else if (time_status & STA_DEL)
672 				time_state = TIME_DEL;
673 			break;
674 
675 		case TIME_INS:
676 			if (time.tv_sec % 86400 == 0) {
677 				time.tv_sec--;
678 				time_state = TIME_OOP;
679 			}
680 			break;
681 
682 		case TIME_DEL:
683 			if ((time.tv_sec + 1) % 86400 == 0) {
684 				time.tv_sec++;
685 				time_state = TIME_WAIT;
686 			}
687 			break;
688 
689 		case TIME_OOP:
690 			time_state = TIME_WAIT;
691 			break;
692 
693 		case TIME_WAIT:
694 			if (!(time_status & (STA_INS | STA_DEL)))
695 				time_state = TIME_OK;
696 			break;
697 		}
698 
699 		/*
700 		 * Compute the phase adjustment for the next second. In
701 		 * PLL mode, the offset is reduced by a fixed factor
702 		 * times the time constant. In FLL mode the offset is
703 		 * used directly. In either mode, the maximum phase
704 		 * adjustment for each second is clamped so as to spread
705 		 * the adjustment over not more than the number of
706 		 * seconds between updates.
707 		 */
708 		if (time_offset < 0) {
709 			ltemp = -time_offset;
710 			if (!(time_status & STA_FLL))
711 				ltemp >>= SHIFT_KG + time_constant;
712 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
713 				ltemp = (MAXPHASE / MINSEC) <<
714 				    SHIFT_UPDATE;
715 			time_offset += ltemp;
716 			time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
717 		} else if (time_offset > 0) {
718 			ltemp = time_offset;
719 			if (!(time_status & STA_FLL))
720 				ltemp >>= SHIFT_KG + time_constant;
721 			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
722 				ltemp = (MAXPHASE / MINSEC) <<
723 				    SHIFT_UPDATE;
724 			time_offset -= ltemp;
725 			time_adj = ltemp << (shifthz - SHIFT_UPDATE);
726 		} else
727 			time_adj = 0;
728 
729 		/*
730 		 * Compute the frequency estimate and additional phase
731 		 * adjustment due to frequency error for the next
732 		 * second. When the PPS signal is engaged, gnaw on the
733 		 * watchdog counter and update the frequency computed by
734 		 * the pll and the PPS signal.
735 		 */
736 #ifdef PPS_SYNC
737 		pps_valid++;
738 		if (pps_valid == PPS_VALID) {
739 			pps_jitter = MAXTIME;
740 			pps_stabil = MAXFREQ;
741 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
742 			    STA_PPSWANDER | STA_PPSERROR);
743 		}
744 		ltemp = time_freq + pps_freq;
745 #else
746 		ltemp = time_freq;
747 #endif /* PPS_SYNC */
748 
749 		if (ltemp < 0)
750 			time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
751 		else
752 			time_adj += ltemp >> (SHIFT_USEC - shifthz);
753 		time_adj += (long)fixtick << shifthz;
754 
755 		/*
756 		 * When the CPU clock oscillator frequency is not a
757 		 * power of 2 in Hz, shifthz is only an approximate
758 		 * scale factor.
759 		 *
760 		 * To determine the adjustment, you can do the following:
761 		 *   bc -q
762 		 *   scale=24
763 		 *   obase=2
764 		 *   idealhz/realhz
765 		 * where `idealhz' is the next higher power of 2, and `realhz'
766 		 * is the actual value.  You may need to factor this result
767 		 * into a sequence of 2 multipliers to get better precision.
768 		 *
769 		 * Likewise, the error can be calculated with (e.g. for 100Hz):
770 		 *   bc -q
771 		 *   scale=24
772 		 *   ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz
773 		 * (and then multiply by 1000000 to get ppm).
774 		 */
775 		switch (hz) {
776 		case 60:
777 			/* A factor of 1.000100010001 gives about 15ppm
778 			   error. */
779 			if (time_adj < 0) {
780 				time_adj -= (-time_adj >> 4);
781 				time_adj -= (-time_adj >> 8);
782 			} else {
783 				time_adj += (time_adj >> 4);
784 				time_adj += (time_adj >> 8);
785 			}
786 			break;
787 
788 		case 96:
789 			/* A factor of 1.0101010101 gives about 244ppm error. */
790 			if (time_adj < 0) {
791 				time_adj -= (-time_adj >> 2);
792 				time_adj -= (-time_adj >> 4) + (-time_adj >> 8);
793 			} else {
794 				time_adj += (time_adj >> 2);
795 				time_adj += (time_adj >> 4) + (time_adj >> 8);
796 			}
797 			break;
798 
799 		case 50:
800 		case 100:
801 			/* A factor of 1.010001111010111 gives about 1ppm
802 			   error. */
803 			if (time_adj < 0) {
804 				time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
805 				time_adj += (-time_adj >> 10);
806 			} else {
807 				time_adj += (time_adj >> 2) + (time_adj >> 5);
808 				time_adj -= (time_adj >> 10);
809 			}
810 			break;
811 
812 		case 1000:
813 			/* A factor of 1.000001100010100001 gives about 50ppm
814 			   error. */
815 			if (time_adj < 0) {
816 				time_adj -= (-time_adj >> 6) + (-time_adj >> 11);
817 				time_adj -= (-time_adj >> 7);
818 			} else {
819 				time_adj += (time_adj >> 6) + (time_adj >> 11);
820 				time_adj += (time_adj >> 7);
821 			}
822 			break;
823 
824 		case 1200:
825 			/* A factor of 1.1011010011100001 gives about 64ppm
826 			   error. */
827 			if (time_adj < 0) {
828 				time_adj -= (-time_adj >> 1) + (-time_adj >> 6);
829 				time_adj -= (-time_adj >> 3) + (-time_adj >> 10);
830 			} else {
831 				time_adj += (time_adj >> 1) + (time_adj >> 6);
832 				time_adj += (time_adj >> 3) + (time_adj >> 10);
833 			}
834 			break;
835 		}
836 
837 #ifdef EXT_CLOCK
838 		/*
839 		 * If an external clock is present, it is necessary to
840 		 * discipline the kernel time variable anyway, since not
841 		 * all system components use the microtime() interface.
842 		 * Here, the time offset between the external clock and
843 		 * kernel time variable is computed every so often.
844 		 */
845 		clock_count++;
846 		if (clock_count > CLOCK_INTERVAL) {
847 			clock_count = 0;
848 			microtime(&clock_ext);
849 			delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
850 			delta.tv_usec = clock_ext.tv_usec -
851 			    time.tv_usec;
852 			if (delta.tv_usec < 0)
853 				delta.tv_sec--;
854 			if (delta.tv_usec >= 500000) {
855 				delta.tv_usec -= 1000000;
856 				delta.tv_sec++;
857 			}
858 			if (delta.tv_usec < -500000) {
859 				delta.tv_usec += 1000000;
860 				delta.tv_sec--;
861 			}
862 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
863 			    delta.tv_usec > MAXPHASE) ||
864 			    delta.tv_sec < -1 || (delta.tv_sec == -1 &&
865 			    delta.tv_usec < -MAXPHASE)) {
866 				time = clock_ext;
867 				delta.tv_sec = 0;
868 				delta.tv_usec = 0;
869 			}
870 #ifdef HIGHBALL
871 			clock_cpu = delta.tv_usec;
872 #else /* HIGHBALL */
873 			hardupdate(delta.tv_usec);
874 #endif /* HIGHBALL */
875 		}
876 #endif /* EXT_CLOCK */
877 	}
878 
879 #endif /* NTP */
880 #endif /* !__HAVE_TIMECOUNTER */
881 
882 	/*
883 	 * Update real-time timeout queue.
884 	 * Process callouts at a very low CPU priority, so we don't keep the
885 	 * relatively high clock interrupt priority any longer than necessary.
886 	 */
887 	if (callout_hardclock()) {
888 		if (CLKF_BASEPRI(frame)) {
889 			/*
890 			 * Save the overhead of a software interrupt;
891 			 * it will happen as soon as we return, so do
892 			 * it now.
893 			 */
894 			spllowersoftclock();
895 			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
896 			softclock(NULL);
897 			KERNEL_UNLOCK();
898 		} else {
899 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
900 			softintr_schedule(softclock_si);
901 #else
902 			setsoftclock();
903 #endif
904 		}
905 	}
906 }
907 
908 #ifdef __HAVE_TIMECOUNTER
909 /*
910  * Compute number of hz until specified time.  Used to compute second
911  * argument to callout_reset() from an absolute time.
912  */
913 int
914 hzto(struct timeval *tvp)
915 {
916 	struct timeval now, tv;
917 
918 	tv = *tvp;	/* Don't modify original tvp. */
919 	getmicrotime(&now);
920 	timersub(&tv, &now, &tv);
921 	return tvtohz(&tv);
922 }
923 #endif /* __HAVE_TIMECOUNTER */
924 
925 /*
926  * Compute number of ticks in the specified amount of time.
927  */
928 int
929 tvtohz(struct timeval *tv)
930 {
931 	unsigned long ticks;
932 	long sec, usec;
933 
934 	/*
935 	 * If the number of usecs in the whole seconds part of the time
936 	 * difference fits in a long, then the total number of usecs will
937 	 * fit in an unsigned long.  Compute the total and convert it to
938 	 * ticks, rounding up and adding 1 to allow for the current tick
939 	 * to expire.  Rounding also depends on unsigned long arithmetic
940 	 * to avoid overflow.
941 	 *
942 	 * Otherwise, if the number of ticks in the whole seconds part of
943 	 * the time difference fits in a long, then convert the parts to
944 	 * ticks separately and add, using similar rounding methods and
945 	 * overflow avoidance.  This method would work in the previous
946 	 * case, but it is slightly slower and assumes that hz is integral.
947 	 *
948 	 * Otherwise, round the time difference down to the maximum
949 	 * representable value.
950 	 *
951 	 * If ints are 32-bit, then the maximum value for any timeout in
952 	 * 10ms ticks is 248 days.
953 	 */
954 	sec = tv->tv_sec;
955 	usec = tv->tv_usec;
956 
957 	if (usec < 0) {
958 		sec--;
959 		usec += 1000000;
960 	}
961 
962 	if (sec < 0 || (sec == 0 && usec <= 0)) {
963 		/*
964 		 * Would expire now or in the past.  Return 0 ticks.
965 		 * This is different from the legacy hzto() interface,
966 		 * and callers need to check for it.
967 		 */
968 		ticks = 0;
969 	} else if (sec <= (LONG_MAX / 1000000))
970 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
971 		    / tick) + 1;
972 	else if (sec <= (LONG_MAX / hz))
973 		ticks = (sec * hz) +
974 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
975 	else
976 		ticks = LONG_MAX;
977 
978 	if (ticks > INT_MAX)
979 		ticks = INT_MAX;
980 
981 	return ((int)ticks);
982 }
983 
984 #ifndef __HAVE_TIMECOUNTER
985 /*
986  * Compute number of hz until specified time.  Used to compute second
987  * argument to callout_reset() from an absolute time.
988  */
989 int
990 hzto(struct timeval *tv)
991 {
992 	unsigned long ticks;
993 	long sec, usec;
994 	int s;
995 
996 	/*
997 	 * If the number of usecs in the whole seconds part of the time
998 	 * difference fits in a long, then the total number of usecs will
999 	 * fit in an unsigned long.  Compute the total and convert it to
1000 	 * ticks, rounding up and adding 1 to allow for the current tick
1001 	 * to expire.  Rounding also depends on unsigned long arithmetic
1002 	 * to avoid overflow.
1003 	 *
1004 	 * Otherwise, if the number of ticks in the whole seconds part of
1005 	 * the time difference fits in a long, then convert the parts to
1006 	 * ticks separately and add, using similar rounding methods and
1007 	 * overflow avoidance.  This method would work in the previous
1008 	 * case, but it is slightly slower and assume that hz is integral.
1009 	 *
1010 	 * Otherwise, round the time difference down to the maximum
1011 	 * representable value.
1012 	 *
1013 	 * If ints are 32-bit, then the maximum value for any timeout in
1014 	 * 10ms ticks is 248 days.
1015 	 */
1016 	s = splclock();
1017 	sec = tv->tv_sec - time.tv_sec;
1018 	usec = tv->tv_usec - time.tv_usec;
1019 	splx(s);
1020 
1021 	if (usec < 0) {
1022 		sec--;
1023 		usec += 1000000;
1024 	}
1025 
1026 	if (sec < 0 || (sec == 0 && usec <= 0)) {
1027 		/*
1028 		 * Would expire now or in the past.  Return 0 ticks.
1029 		 * This is different from the legacy hzto() interface,
1030 		 * and callers need to check for it.
1031 		 */
1032 		ticks = 0;
1033 	} else if (sec <= (LONG_MAX / 1000000))
1034 		ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1))
1035 		    / tick) + 1;
1036 	else if (sec <= (LONG_MAX / hz))
1037 		ticks = (sec * hz) +
1038 		    (((unsigned long)usec + (tick - 1)) / tick) + 1;
1039 	else
1040 		ticks = LONG_MAX;
1041 
1042 	if (ticks > INT_MAX)
1043 		ticks = INT_MAX;
1044 
1045 	return ((int)ticks);
1046 }
1047 #endif /* !__HAVE_TIMECOUNTER */
1048 
1049 /*
1050  * Compute number of ticks in the specified amount of time.
1051  */
1052 int
1053 tstohz(struct timespec *ts)
1054 {
1055 	struct timeval tv;
1056 
1057 	/*
1058 	 * usec has great enough resolution for hz, so convert to a
1059 	 * timeval and use tvtohz() above.
1060 	 */
1061 	TIMESPEC_TO_TIMEVAL(&tv, ts);
1062 	return tvtohz(&tv);
1063 }
1064 
1065 /*
1066  * Start profiling on a process.
1067  *
1068  * Kernel profiling passes proc0 which never exits and hence
1069  * keeps the profile clock running constantly.
1070  */
1071 void
1072 startprofclock(struct proc *p)
1073 {
1074 
1075 	if ((p->p_flag & P_PROFIL) == 0) {
1076 		p->p_flag |= P_PROFIL;
1077 		/*
1078 		 * This is only necessary if using the clock as the
1079 		 * profiling source.
1080 		 */
1081 		if (++profprocs == 1 && stathz != 0)
1082 			psdiv = psratio;
1083 	}
1084 }
1085 
1086 /*
1087  * Stop profiling on a process.
1088  */
1089 void
1090 stopprofclock(struct proc *p)
1091 {
1092 
1093 	if (p->p_flag & P_PROFIL) {
1094 		p->p_flag &= ~P_PROFIL;
1095 		/*
1096 		 * This is only necessary if using the clock as the
1097 		 * profiling source.
1098 		 */
1099 		if (--profprocs == 0 && stathz != 0)
1100 			psdiv = 1;
1101 	}
1102 }
1103 
1104 #if defined(PERFCTRS)
1105 /*
1106  * Independent profiling "tick" in case we're using a separate
1107  * clock or profiling event source.  Currently, that's just
1108  * performance counters--hence the wrapper.
1109  */
1110 void
1111 proftick(struct clockframe *frame)
1112 {
1113 #ifdef GPROF
1114         struct gmonparam *g;
1115         intptr_t i;
1116 #endif
1117 	struct proc *p;
1118 
1119 	p = curproc;
1120 	if (CLKF_USERMODE(frame)) {
1121 		if (p->p_flag & P_PROFIL)
1122 			addupc_intr(p, CLKF_PC(frame));
1123 	} else {
1124 #ifdef GPROF
1125 		g = &_gmonparam;
1126 		if (g->state == GMON_PROF_ON) {
1127 			i = CLKF_PC(frame) - g->lowpc;
1128 			if (i < g->textsize) {
1129 				i /= HISTFRACTION * sizeof(*g->kcount);
1130 				g->kcount[i]++;
1131 			}
1132 		}
1133 #endif
1134 #ifdef PROC_PC
1135                 if (p && (p->p_flag & P_PROFIL))
1136                         addupc_intr(p, PROC_PC(p));
1137 #endif
1138 	}
1139 }
1140 #endif
1141 
1142 /*
1143  * Statistics clock.  Grab profile sample, and if divider reaches 0,
1144  * do process and kernel statistics.
1145  */
1146 void
1147 statclock(struct clockframe *frame)
1148 {
1149 #ifdef GPROF
1150 	struct gmonparam *g;
1151 	intptr_t i;
1152 #endif
1153 	struct cpu_info *ci = curcpu();
1154 	struct schedstate_percpu *spc = &ci->ci_schedstate;
1155 	struct proc *p;
1156 	struct lwp *l;
1157 
1158 	/*
1159 	 * Notice changes in divisor frequency, and adjust clock
1160 	 * frequency accordingly.
1161 	 */
1162 	if (spc->spc_psdiv != psdiv) {
1163 		spc->spc_psdiv = psdiv;
1164 		spc->spc_pscnt = psdiv;
1165 		if (psdiv == 1) {
1166 			setstatclockrate(stathz);
1167 		} else {
1168 			setstatclockrate(profhz);
1169 		}
1170 	}
1171 	l = curlwp;
1172 	p = (l ? l->l_proc : NULL);
1173 	if (CLKF_USERMODE(frame)) {
1174 		KASSERT(p != NULL);
1175 
1176 		if ((p->p_flag & P_PROFIL) && profsrc == PROFSRC_CLOCK)
1177 			addupc_intr(p, CLKF_PC(frame));
1178 		if (--spc->spc_pscnt > 0)
1179 			return;
1180 		/*
1181 		 * Came from user mode; CPU was in user state.
1182 		 * If this process is being profiled record the tick.
1183 		 */
1184 		p->p_uticks++;
1185 		if (p->p_nice > NZERO)
1186 			spc->spc_cp_time[CP_NICE]++;
1187 		else
1188 			spc->spc_cp_time[CP_USER]++;
1189 	} else {
1190 #ifdef GPROF
1191 		/*
1192 		 * Kernel statistics are just like addupc_intr, only easier.
1193 		 */
1194 		g = &_gmonparam;
1195 		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
1196 			i = CLKF_PC(frame) - g->lowpc;
1197 			if (i < g->textsize) {
1198 				i /= HISTFRACTION * sizeof(*g->kcount);
1199 				g->kcount[i]++;
1200 			}
1201 		}
1202 #endif
1203 #ifdef LWP_PC
1204 		if (p && profsrc == PROFSRC_CLOCK && (p->p_flag & P_PROFIL))
1205 			addupc_intr(p, LWP_PC(l));
1206 #endif
1207 		if (--spc->spc_pscnt > 0)
1208 			return;
1209 		/*
1210 		 * Came from kernel mode, so we were:
1211 		 * - handling an interrupt,
1212 		 * - doing syscall or trap work on behalf of the current
1213 		 *   user process, or
1214 		 * - spinning in the idle loop.
1215 		 * Whichever it is, charge the time as appropriate.
1216 		 * Note that we charge interrupts to the current process,
1217 		 * regardless of whether they are ``for'' that process,
1218 		 * so that we know how much of its real time was spent
1219 		 * in ``non-process'' (i.e., interrupt) work.
1220 		 */
1221 		if (CLKF_INTR(frame)) {
1222 			if (p != NULL)
1223 				p->p_iticks++;
1224 			spc->spc_cp_time[CP_INTR]++;
1225 		} else if (p != NULL) {
1226 			p->p_sticks++;
1227 			spc->spc_cp_time[CP_SYS]++;
1228 		} else
1229 			spc->spc_cp_time[CP_IDLE]++;
1230 	}
1231 	spc->spc_pscnt = psdiv;
1232 
1233 	if (p != NULL) {
1234 		++p->p_cpticks;
1235 		/*
1236 		 * If no separate schedclock is provided, call it here
1237 		 * at about 16 Hz.
1238 		 */
1239 		if (schedhz == 0)
1240 			if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
1241 				schedclock(l);
1242 				ci->ci_schedstate.spc_schedticks = statscheddiv;
1243 			}
1244 	}
1245 }
1246 
1247 #ifndef __HAVE_TIMECOUNTER
1248 #ifdef NTP	/* NTP phase-locked loop in kernel */
1249 /*
1250  * hardupdate() - local clock update
1251  *
1252  * This routine is called by ntp_adjtime() to update the local clock
1253  * phase and frequency. The implementation is of an adaptive-parameter,
1254  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
1255  * time and frequency offset estimates for each call. If the kernel PPS
1256  * discipline code is configured (PPS_SYNC), the PPS signal itself
1257  * determines the new time offset, instead of the calling argument.
1258  * Presumably, calls to ntp_adjtime() occur only when the caller
1259  * believes the local clock is valid within some bound (+-128 ms with
1260  * NTP). If the caller's time is far different than the PPS time, an
1261  * argument will ensue, and it's not clear who will lose.
1262  *
1263  * For uncompensated quartz crystal oscillatores and nominal update
1264  * intervals less than 1024 s, operation should be in phase-lock mode
1265  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1266  * intervals greater than thiss, operation should be in frequency-lock
1267  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1268  *
1269  * Note: splclock() is in effect.
1270  */
1271 void
1272 hardupdate(long offset)
1273 {
1274 	long ltemp, mtemp;
1275 
1276 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1277 		return;
1278 	ltemp = offset;
1279 #ifdef PPS_SYNC
1280 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1281 		ltemp = pps_offset;
1282 #endif /* PPS_SYNC */
1283 
1284 	/*
1285 	 * Scale the phase adjustment and clamp to the operating range.
1286 	 */
1287 	if (ltemp > MAXPHASE)
1288 		time_offset = MAXPHASE << SHIFT_UPDATE;
1289 	else if (ltemp < -MAXPHASE)
1290 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
1291 	else
1292 		time_offset = ltemp << SHIFT_UPDATE;
1293 
1294 	/*
1295 	 * Select whether the frequency is to be controlled and in which
1296 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
1297 	 * multiply/divide should be replaced someday.
1298 	 */
1299 	if (time_status & STA_FREQHOLD || time_reftime == 0)
1300 		time_reftime = time.tv_sec;
1301 	mtemp = time.tv_sec - time_reftime;
1302 	time_reftime = time.tv_sec;
1303 	if (time_status & STA_FLL) {
1304 		if (mtemp >= MINSEC) {
1305 			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1306 			    SHIFT_UPDATE));
1307 			if (ltemp < 0)
1308 				time_freq -= -ltemp >> SHIFT_KH;
1309 			else
1310 				time_freq += ltemp >> SHIFT_KH;
1311 		}
1312 	} else {
1313 		if (mtemp < MAXSEC) {
1314 			ltemp *= mtemp;
1315 			if (ltemp < 0)
1316 				time_freq -= -ltemp >> (time_constant +
1317 				    time_constant + SHIFT_KF -
1318 				    SHIFT_USEC);
1319 			else
1320 				time_freq += ltemp >> (time_constant +
1321 				    time_constant + SHIFT_KF -
1322 				    SHIFT_USEC);
1323 		}
1324 	}
1325 	if (time_freq > time_tolerance)
1326 		time_freq = time_tolerance;
1327 	else if (time_freq < -time_tolerance)
1328 		time_freq = -time_tolerance;
1329 }
1330 
1331 #ifdef PPS_SYNC
1332 /*
1333  * hardpps() - discipline CPU clock oscillator to external PPS signal
1334  *
1335  * This routine is called at each PPS interrupt in order to discipline
1336  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1337  * and leaves it in a handy spot for the hardclock() routine. It
1338  * integrates successive PPS phase differences and calculates the
1339  * frequency offset. This is used in hardclock() to discipline the CPU
1340  * clock oscillator so that intrinsic frequency error is cancelled out.
1341  * The code requires the caller to capture the time and hardware counter
1342  * value at the on-time PPS signal transition.
1343  *
1344  * Note that, on some Unix systems, this routine runs at an interrupt
1345  * priority level higher than the timer interrupt routine hardclock().
1346  * Therefore, the variables used are distinct from the hardclock()
1347  * variables, except for certain exceptions: The PPS frequency pps_freq
1348  * and phase pps_offset variables are determined by this routine and
1349  * updated atomically. The time_tolerance variable can be considered a
1350  * constant, since it is infrequently changed, and then only when the
1351  * PPS signal is disabled. The watchdog counter pps_valid is updated
1352  * once per second by hardclock() and is atomically cleared in this
1353  * routine.
1354  */
1355 void
1356 hardpps(struct timeval *tvp,		/* time at PPS */
1357 	long usec			/* hardware counter at PPS */)
1358 {
1359 	long u_usec, v_usec, bigtick;
1360 	long cal_sec, cal_usec;
1361 
1362 	/*
1363 	 * An occasional glitch can be produced when the PPS interrupt
1364 	 * occurs in the hardclock() routine before the time variable is
1365 	 * updated. Here the offset is discarded when the difference
1366 	 * between it and the last one is greater than tick/2, but not
1367 	 * if the interval since the first discard exceeds 30 s.
1368 	 */
1369 	time_status |= STA_PPSSIGNAL;
1370 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1371 	pps_valid = 0;
1372 	u_usec = -tvp->tv_usec;
1373 	if (u_usec < -500000)
1374 		u_usec += 1000000;
1375 	v_usec = pps_offset - u_usec;
1376 	if (v_usec < 0)
1377 		v_usec = -v_usec;
1378 	if (v_usec > (tick >> 1)) {
1379 		if (pps_glitch > MAXGLITCH) {
1380 			pps_glitch = 0;
1381 			pps_tf[2] = u_usec;
1382 			pps_tf[1] = u_usec;
1383 		} else {
1384 			pps_glitch++;
1385 			u_usec = pps_offset;
1386 		}
1387 	} else
1388 		pps_glitch = 0;
1389 
1390 	/*
1391 	 * A three-stage median filter is used to help deglitch the pps
1392 	 * time. The median sample becomes the time offset estimate; the
1393 	 * difference between the other two samples becomes the time
1394 	 * dispersion (jitter) estimate.
1395 	 */
1396 	pps_tf[2] = pps_tf[1];
1397 	pps_tf[1] = pps_tf[0];
1398 	pps_tf[0] = u_usec;
1399 	if (pps_tf[0] > pps_tf[1]) {
1400 		if (pps_tf[1] > pps_tf[2]) {
1401 			pps_offset = pps_tf[1];		/* 0 1 2 */
1402 			v_usec = pps_tf[0] - pps_tf[2];
1403 		} else if (pps_tf[2] > pps_tf[0]) {
1404 			pps_offset = pps_tf[0];		/* 2 0 1 */
1405 			v_usec = pps_tf[2] - pps_tf[1];
1406 		} else {
1407 			pps_offset = pps_tf[2];		/* 0 2 1 */
1408 			v_usec = pps_tf[0] - pps_tf[1];
1409 		}
1410 	} else {
1411 		if (pps_tf[1] < pps_tf[2]) {
1412 			pps_offset = pps_tf[1];		/* 2 1 0 */
1413 			v_usec = pps_tf[2] - pps_tf[0];
1414 		} else  if (pps_tf[2] < pps_tf[0]) {
1415 			pps_offset = pps_tf[0];		/* 1 0 2 */
1416 			v_usec = pps_tf[1] - pps_tf[2];
1417 		} else {
1418 			pps_offset = pps_tf[2];		/* 1 2 0 */
1419 			v_usec = pps_tf[1] - pps_tf[0];
1420 		}
1421 	}
1422 	if (v_usec > MAXTIME)
1423 		pps_jitcnt++;
1424 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1425 	if (v_usec < 0)
1426 		pps_jitter -= -v_usec >> PPS_AVG;
1427 	else
1428 		pps_jitter += v_usec >> PPS_AVG;
1429 	if (pps_jitter > (MAXTIME >> 1))
1430 		time_status |= STA_PPSJITTER;
1431 
1432 	/*
1433 	 * During the calibration interval adjust the starting time when
1434 	 * the tick overflows. At the end of the interval compute the
1435 	 * duration of the interval and the difference of the hardware
1436 	 * counters at the beginning and end of the interval. This code
1437 	 * is deliciously complicated by the fact valid differences may
1438 	 * exceed the value of tick when using long calibration
1439 	 * intervals and small ticks. Note that the counter can be
1440 	 * greater than tick if caught at just the wrong instant, but
1441 	 * the values returned and used here are correct.
1442 	 */
1443 	bigtick = (long)tick << SHIFT_USEC;
1444 	pps_usec -= pps_freq;
1445 	if (pps_usec >= bigtick)
1446 		pps_usec -= bigtick;
1447 	if (pps_usec < 0)
1448 		pps_usec += bigtick;
1449 	pps_time.tv_sec++;
1450 	pps_count++;
1451 	if (pps_count < (1 << pps_shift))
1452 		return;
1453 	pps_count = 0;
1454 	pps_calcnt++;
1455 	u_usec = usec << SHIFT_USEC;
1456 	v_usec = pps_usec - u_usec;
1457 	if (v_usec >= bigtick >> 1)
1458 		v_usec -= bigtick;
1459 	if (v_usec < -(bigtick >> 1))
1460 		v_usec += bigtick;
1461 	if (v_usec < 0)
1462 		v_usec = -(-v_usec >> pps_shift);
1463 	else
1464 		v_usec = v_usec >> pps_shift;
1465 	pps_usec = u_usec;
1466 	cal_sec = tvp->tv_sec;
1467 	cal_usec = tvp->tv_usec;
1468 	cal_sec -= pps_time.tv_sec;
1469 	cal_usec -= pps_time.tv_usec;
1470 	if (cal_usec < 0) {
1471 		cal_usec += 1000000;
1472 		cal_sec--;
1473 	}
1474 	pps_time = *tvp;
1475 
1476 	/*
1477 	 * Check for lost interrupts, noise, excessive jitter and
1478 	 * excessive frequency error. The number of timer ticks during
1479 	 * the interval may vary +-1 tick. Add to this a margin of one
1480 	 * tick for the PPS signal jitter and maximum frequency
1481 	 * deviation. If the limits are exceeded, the calibration
1482 	 * interval is reset to the minimum and we start over.
1483 	 */
1484 	u_usec = (long)tick << 1;
1485 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1486 	    || (cal_sec == 0 && cal_usec < u_usec))
1487 	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
1488 		pps_errcnt++;
1489 		pps_shift = PPS_SHIFT;
1490 		pps_intcnt = 0;
1491 		time_status |= STA_PPSERROR;
1492 		return;
1493 	}
1494 
1495 	/*
1496 	 * A three-stage median filter is used to help deglitch the pps
1497 	 * frequency. The median sample becomes the frequency offset
1498 	 * estimate; the difference between the other two samples
1499 	 * becomes the frequency dispersion (stability) estimate.
1500 	 */
1501 	pps_ff[2] = pps_ff[1];
1502 	pps_ff[1] = pps_ff[0];
1503 	pps_ff[0] = v_usec;
1504 	if (pps_ff[0] > pps_ff[1]) {
1505 		if (pps_ff[1] > pps_ff[2]) {
1506 			u_usec = pps_ff[1];		/* 0 1 2 */
1507 			v_usec = pps_ff[0] - pps_ff[2];
1508 		} else if (pps_ff[2] > pps_ff[0]) {
1509 			u_usec = pps_ff[0];		/* 2 0 1 */
1510 			v_usec = pps_ff[2] - pps_ff[1];
1511 		} else {
1512 			u_usec = pps_ff[2];		/* 0 2 1 */
1513 			v_usec = pps_ff[0] - pps_ff[1];
1514 		}
1515 	} else {
1516 		if (pps_ff[1] < pps_ff[2]) {
1517 			u_usec = pps_ff[1];		/* 2 1 0 */
1518 			v_usec = pps_ff[2] - pps_ff[0];
1519 		} else  if (pps_ff[2] < pps_ff[0]) {
1520 			u_usec = pps_ff[0];		/* 1 0 2 */
1521 			v_usec = pps_ff[1] - pps_ff[2];
1522 		} else {
1523 			u_usec = pps_ff[2];		/* 1 2 0 */
1524 			v_usec = pps_ff[1] - pps_ff[0];
1525 		}
1526 	}
1527 
1528 	/*
1529 	 * Here the frequency dispersion (stability) is updated. If it
1530 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1531 	 * offset is updated as well, but clamped to the tolerance. It
1532 	 * will be processed later by the hardclock() routine.
1533 	 */
1534 	v_usec = (v_usec >> 1) - pps_stabil;
1535 	if (v_usec < 0)
1536 		pps_stabil -= -v_usec >> PPS_AVG;
1537 	else
1538 		pps_stabil += v_usec >> PPS_AVG;
1539 	if (pps_stabil > MAXFREQ >> 2) {
1540 		pps_stbcnt++;
1541 		time_status |= STA_PPSWANDER;
1542 		return;
1543 	}
1544 	if (time_status & STA_PPSFREQ) {
1545 		if (u_usec < 0) {
1546 			pps_freq -= -u_usec >> PPS_AVG;
1547 			if (pps_freq < -time_tolerance)
1548 				pps_freq = -time_tolerance;
1549 			u_usec = -u_usec;
1550 		} else {
1551 			pps_freq += u_usec >> PPS_AVG;
1552 			if (pps_freq > time_tolerance)
1553 				pps_freq = time_tolerance;
1554 		}
1555 	}
1556 
1557 	/*
1558 	 * Here the calibration interval is adjusted. If the maximum
1559 	 * time difference is greater than tick / 4, reduce the interval
1560 	 * by half. If this is not the case for four consecutive
1561 	 * intervals, double the interval.
1562 	 */
1563 	if (u_usec << pps_shift > bigtick >> 2) {
1564 		pps_intcnt = 0;
1565 		if (pps_shift > PPS_SHIFT)
1566 			pps_shift--;
1567 	} else if (pps_intcnt >= 4) {
1568 		pps_intcnt = 0;
1569 		if (pps_shift < PPS_SHIFTMAX)
1570 			pps_shift++;
1571 	} else
1572 		pps_intcnt++;
1573 }
1574 #endif /* PPS_SYNC */
1575 #endif /* NTP  */
1576 
1577 /* timecounter compat functions */
1578 void
1579 nanotime(struct timespec *ts)
1580 {
1581 	struct timeval tv;
1582 
1583 	microtime(&tv);
1584 	TIMEVAL_TO_TIMESPEC(&tv, ts);
1585 }
1586 
1587 void
1588 getbinuptime(struct bintime *bt)
1589 {
1590 	struct timeval tv;
1591 
1592 	microtime(&tv);
1593 	timeval2bintime(&tv, bt);
1594 }
1595 
1596 void
1597 nanouptime(struct timespec *tsp)
1598 {
1599 	int s;
1600 
1601 	s = splclock();
1602 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
1603 	splx(s);
1604 }
1605 
1606 void
1607 getnanouptime(struct timespec *tsp)
1608 {
1609 	int s;
1610 
1611 	s = splclock();
1612 	TIMEVAL_TO_TIMESPEC(&mono_time, tsp);
1613 	splx(s);
1614 }
1615 
1616 void
1617 getmicrouptime(struct timeval *tvp)
1618 {
1619 	int s;
1620 
1621 	s = splclock();
1622 	*tvp = mono_time;
1623 	splx(s);
1624 }
1625 
1626 void
1627 getnanotime(struct timespec *tsp)
1628 {
1629 	int s;
1630 
1631 	s = splclock();
1632 	TIMEVAL_TO_TIMESPEC(&time, tsp);
1633 	splx(s);
1634 }
1635 
1636 void
1637 getmicrotime(struct timeval *tvp)
1638 {
1639 	int s;
1640 
1641 	s = splclock();
1642 	*tvp = time;
1643 	splx(s);
1644 }
1645 #endif /* !__HAVE_TIMECOUNTER */
1646