1 /* $NetBSD: kern_clock.c,v 1.91 2004/07/01 12:36:57 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /*- 41 * Copyright (c) 1982, 1986, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. Neither the name of the University nor the names of its contributors 58 * may be used to endorse or promote products derived from this software 59 * without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 * 73 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.91 2004/07/01 12:36:57 yamt Exp $"); 78 79 #include "opt_ntp.h" 80 #include "opt_multiprocessor.h" 81 #include "opt_perfctrs.h" 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/callout.h> 86 #include <sys/kernel.h> 87 #include <sys/proc.h> 88 #include <sys/resourcevar.h> 89 #include <sys/signalvar.h> 90 #include <sys/sysctl.h> 91 #include <sys/timex.h> 92 #include <sys/sched.h> 93 #include <sys/time.h> 94 95 #include <machine/cpu.h> 96 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 97 #include <machine/intr.h> 98 #endif 99 100 #ifdef GPROF 101 #include <sys/gmon.h> 102 #endif 103 104 /* 105 * Clock handling routines. 106 * 107 * This code is written to operate with two timers that run independently of 108 * each other. The main clock, running hz times per second, is used to keep 109 * track of real time. The second timer handles kernel and user profiling, 110 * and does resource use estimation. If the second timer is programmable, 111 * it is randomized to avoid aliasing between the two clocks. For example, 112 * the randomization prevents an adversary from always giving up the CPU 113 * just before its quantum expires. Otherwise, it would never accumulate 114 * CPU ticks. The mean frequency of the second timer is stathz. 115 * 116 * If no second timer exists, stathz will be zero; in this case we drive 117 * profiling and statistics off the main clock. This WILL NOT be accurate; 118 * do not do it unless absolutely necessary. 119 * 120 * The statistics clock may (or may not) be run at a higher rate while 121 * profiling. This profile clock runs at profhz. We require that profhz 122 * be an integral multiple of stathz. 123 * 124 * If the statistics clock is running fast, it must be divided by the ratio 125 * profhz/stathz for statistics. (For profiling, every tick counts.) 126 */ 127 128 #ifdef NTP /* NTP phase-locked loop in kernel */ 129 /* 130 * Phase/frequency-lock loop (PLL/FLL) definitions 131 * 132 * The following variables are read and set by the ntp_adjtime() system 133 * call. 134 * 135 * time_state shows the state of the system clock, with values defined 136 * in the timex.h header file. 137 * 138 * time_status shows the status of the system clock, with bits defined 139 * in the timex.h header file. 140 * 141 * time_offset is used by the PLL/FLL to adjust the system time in small 142 * increments. 143 * 144 * time_constant determines the bandwidth or "stiffness" of the PLL. 145 * 146 * time_tolerance determines maximum frequency error or tolerance of the 147 * CPU clock oscillator and is a property of the architecture; however, 148 * in principle it could change as result of the presence of external 149 * discipline signals, for instance. 150 * 151 * time_precision is usually equal to the kernel tick variable; however, 152 * in cases where a precision clock counter or external clock is 153 * available, the resolution can be much less than this and depend on 154 * whether the external clock is working or not. 155 * 156 * time_maxerror is initialized by a ntp_adjtime() call and increased by 157 * the kernel once each second to reflect the maximum error bound 158 * growth. 159 * 160 * time_esterror is set and read by the ntp_adjtime() call, but 161 * otherwise not used by the kernel. 162 */ 163 int time_state = TIME_OK; /* clock state */ 164 int time_status = STA_UNSYNC; /* clock status bits */ 165 long time_offset = 0; /* time offset (us) */ 166 long time_constant = 0; /* pll time constant */ 167 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 168 long time_precision = 1; /* clock precision (us) */ 169 long time_maxerror = MAXPHASE; /* maximum error (us) */ 170 long time_esterror = MAXPHASE; /* estimated error (us) */ 171 172 /* 173 * The following variables establish the state of the PLL/FLL and the 174 * residual time and frequency offset of the local clock. The scale 175 * factors are defined in the timex.h header file. 176 * 177 * time_phase and time_freq are the phase increment and the frequency 178 * increment, respectively, of the kernel time variable. 179 * 180 * time_freq is set via ntp_adjtime() from a value stored in a file when 181 * the synchronization daemon is first started. Its value is retrieved 182 * via ntp_adjtime() and written to the file about once per hour by the 183 * daemon. 184 * 185 * time_adj is the adjustment added to the value of tick at each timer 186 * interrupt and is recomputed from time_phase and time_freq at each 187 * seconds rollover. 188 * 189 * time_reftime is the second's portion of the system time at the last 190 * call to ntp_adjtime(). It is used to adjust the time_freq variable 191 * and to increase the time_maxerror as the time since last update 192 * increases. 193 */ 194 long time_phase = 0; /* phase offset (scaled us) */ 195 long time_freq = 0; /* frequency offset (scaled ppm) */ 196 long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 197 long time_reftime = 0; /* time at last adjustment (s) */ 198 199 #ifdef PPS_SYNC 200 /* 201 * The following variables are used only if the kernel PPS discipline 202 * code is configured (PPS_SYNC). The scale factors are defined in the 203 * timex.h header file. 204 * 205 * pps_time contains the time at each calibration interval, as read by 206 * microtime(). pps_count counts the seconds of the calibration 207 * interval, the duration of which is nominally pps_shift in powers of 208 * two. 209 * 210 * pps_offset is the time offset produced by the time median filter 211 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 212 * this filter. 213 * 214 * pps_freq is the frequency offset produced by the frequency median 215 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 216 * by this filter. 217 * 218 * pps_usec is latched from a high resolution counter or external clock 219 * at pps_time. Here we want the hardware counter contents only, not the 220 * contents plus the time_tv.usec as usual. 221 * 222 * pps_valid counts the number of seconds since the last PPS update. It 223 * is used as a watchdog timer to disable the PPS discipline should the 224 * PPS signal be lost. 225 * 226 * pps_glitch counts the number of seconds since the beginning of an 227 * offset burst more than tick/2 from current nominal offset. It is used 228 * mainly to suppress error bursts due to priority conflicts between the 229 * PPS interrupt and timer interrupt. 230 * 231 * pps_intcnt counts the calibration intervals for use in the interval- 232 * adaptation algorithm. It's just too complicated for words. 233 * 234 * pps_kc_hardpps_source contains an arbitrary value that uniquely 235 * identifies the currently bound source of the PPS signal, or NULL 236 * if no source is bound. 237 * 238 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS 239 * signal should be reported. 240 */ 241 struct timeval pps_time; /* kernel time at last interval */ 242 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 243 long pps_offset = 0; /* pps time offset (us) */ 244 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 245 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 246 long pps_freq = 0; /* frequency offset (scaled ppm) */ 247 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 248 long pps_usec = 0; /* microsec counter at last interval */ 249 long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 250 int pps_glitch = 0; /* pps signal glitch counter */ 251 int pps_count = 0; /* calibration interval counter (s) */ 252 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 253 int pps_intcnt = 0; /* intervals at current duration */ 254 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */ 255 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */ 256 257 /* 258 * PPS signal quality monitors 259 * 260 * pps_jitcnt counts the seconds that have been discarded because the 261 * jitter measured by the time median filter exceeds the limit MAXTIME 262 * (100 us). 263 * 264 * pps_calcnt counts the frequency calibration intervals, which are 265 * variable from 4 s to 256 s. 266 * 267 * pps_errcnt counts the calibration intervals which have been discarded 268 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 269 * calibration interval jitter exceeds two ticks. 270 * 271 * pps_stbcnt counts the calibration intervals that have been discarded 272 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 273 */ 274 long pps_jitcnt = 0; /* jitter limit exceeded */ 275 long pps_calcnt = 0; /* calibration intervals */ 276 long pps_errcnt = 0; /* calibration errors */ 277 long pps_stbcnt = 0; /* stability limit exceeded */ 278 #endif /* PPS_SYNC */ 279 280 #ifdef EXT_CLOCK 281 /* 282 * External clock definitions 283 * 284 * The following definitions and declarations are used only if an 285 * external clock is configured on the system. 286 */ 287 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 288 289 /* 290 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 291 * interrupt and decremented once each second. 292 */ 293 int clock_count = 0; /* CPU clock counter */ 294 295 #ifdef HIGHBALL 296 /* 297 * The clock_offset and clock_cpu variables are used by the HIGHBALL 298 * interface. The clock_offset variable defines the offset between 299 * system time and the HIGBALL counters. The clock_cpu variable contains 300 * the offset between the system clock and the HIGHBALL clock for use in 301 * disciplining the kernel time variable. 302 */ 303 extern struct timeval clock_offset; /* Highball clock offset */ 304 long clock_cpu = 0; /* CPU clock adjust */ 305 #endif /* HIGHBALL */ 306 #endif /* EXT_CLOCK */ 307 #endif /* NTP */ 308 309 310 /* 311 * Bump a timeval by a small number of usec's. 312 */ 313 #define BUMPTIME(t, usec) { \ 314 volatile struct timeval *tp = (t); \ 315 long us; \ 316 \ 317 tp->tv_usec = us = tp->tv_usec + (usec); \ 318 if (us >= 1000000) { \ 319 tp->tv_usec = us - 1000000; \ 320 tp->tv_sec++; \ 321 } \ 322 } 323 324 int stathz; 325 int profhz; 326 int profsrc; 327 int schedhz; 328 int profprocs; 329 int hardclock_ticks; 330 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */ 331 static int psdiv; /* prof => stat divider */ 332 int psratio; /* ratio: prof / stat */ 333 int tickfix, tickfixinterval; /* used if tick not really integral */ 334 #ifndef NTP 335 static int tickfixcnt; /* accumulated fractional error */ 336 #else 337 int fixtick; /* used by NTP for same */ 338 int shifthz; 339 #endif 340 341 /* 342 * We might want ldd to load the both words from time at once. 343 * To succeed we need to be quadword aligned. 344 * The sparc already does that, and that it has worked so far is a fluke. 345 */ 346 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t)))); 347 volatile struct timeval mono_time; 348 349 void *softclock_si; 350 351 /* 352 * Initialize clock frequencies and start both clocks running. 353 */ 354 void 355 initclocks(void) 356 { 357 int i; 358 359 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 360 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL); 361 if (softclock_si == NULL) 362 panic("initclocks: unable to register softclock intr"); 363 #endif 364 365 /* 366 * Set divisors to 1 (normal case) and let the machine-specific 367 * code do its bit. 368 */ 369 psdiv = 1; 370 cpu_initclocks(); 371 372 /* 373 * Compute profhz/stathz/rrticks, and fix profhz if needed. 374 */ 375 i = stathz ? stathz : hz; 376 if (profhz == 0) 377 profhz = i; 378 psratio = profhz / i; 379 rrticks = hz / 10; 380 if (schedhz == 0) { 381 /* 16Hz is best */ 382 statscheddiv = i / 16; 383 if (statscheddiv <= 0) 384 panic("statscheddiv"); 385 } 386 387 #ifdef NTP 388 switch (hz) { 389 case 1: 390 shifthz = SHIFT_SCALE - 0; 391 break; 392 case 2: 393 shifthz = SHIFT_SCALE - 1; 394 break; 395 case 4: 396 shifthz = SHIFT_SCALE - 2; 397 break; 398 case 8: 399 shifthz = SHIFT_SCALE - 3; 400 break; 401 case 16: 402 shifthz = SHIFT_SCALE - 4; 403 break; 404 case 32: 405 shifthz = SHIFT_SCALE - 5; 406 break; 407 case 60: 408 case 64: 409 shifthz = SHIFT_SCALE - 6; 410 break; 411 case 96: 412 case 100: 413 case 128: 414 shifthz = SHIFT_SCALE - 7; 415 break; 416 case 256: 417 shifthz = SHIFT_SCALE - 8; 418 break; 419 case 512: 420 shifthz = SHIFT_SCALE - 9; 421 break; 422 case 1000: 423 case 1024: 424 shifthz = SHIFT_SCALE - 10; 425 break; 426 case 1200: 427 case 2048: 428 shifthz = SHIFT_SCALE - 11; 429 break; 430 case 4096: 431 shifthz = SHIFT_SCALE - 12; 432 break; 433 case 8192: 434 shifthz = SHIFT_SCALE - 13; 435 break; 436 case 16384: 437 shifthz = SHIFT_SCALE - 14; 438 break; 439 case 32768: 440 shifthz = SHIFT_SCALE - 15; 441 break; 442 case 65536: 443 shifthz = SHIFT_SCALE - 16; 444 break; 445 default: 446 panic("weird hz"); 447 } 448 if (fixtick == 0) { 449 /* 450 * Give MD code a chance to set this to a better 451 * value; but, if it doesn't, we should. 452 */ 453 fixtick = (1000000 - (hz*tick)); 454 } 455 #endif 456 } 457 458 /* 459 * The real-time timer, interrupting hz times per second. 460 */ 461 void 462 hardclock(struct clockframe *frame) 463 { 464 struct lwp *l; 465 struct proc *p; 466 int delta; 467 extern int tickdelta; 468 extern long timedelta; 469 struct cpu_info *ci = curcpu(); 470 struct ptimer *pt; 471 #ifdef NTP 472 int time_update; 473 int ltemp; 474 #endif 475 476 l = curlwp; 477 if (l) { 478 p = l->l_proc; 479 /* 480 * Run current process's virtual and profile time, as needed. 481 */ 482 if (CLKF_USERMODE(frame) && p->p_timers && 483 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL) 484 if (itimerdecr(pt, tick) == 0) 485 itimerfire(pt); 486 if (p->p_timers && 487 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL) 488 if (itimerdecr(pt, tick) == 0) 489 itimerfire(pt); 490 } 491 492 /* 493 * If no separate statistics clock is available, run it from here. 494 */ 495 if (stathz == 0) 496 statclock(frame); 497 if ((--ci->ci_schedstate.spc_rrticks) <= 0) 498 roundrobin(ci); 499 500 #if defined(MULTIPROCESSOR) 501 /* 502 * If we are not the primary CPU, we're not allowed to do 503 * any more work. 504 */ 505 if (CPU_IS_PRIMARY(ci) == 0) 506 return; 507 #endif 508 509 /* 510 * Increment the time-of-day. The increment is normally just 511 * ``tick''. If the machine is one which has a clock frequency 512 * such that ``hz'' would not divide the second evenly into 513 * milliseconds, a periodic adjustment must be applied. Finally, 514 * if we are still adjusting the time (see adjtime()), 515 * ``tickdelta'' may also be added in. 516 */ 517 hardclock_ticks++; 518 delta = tick; 519 520 #ifndef NTP 521 if (tickfix) { 522 tickfixcnt += tickfix; 523 if (tickfixcnt >= tickfixinterval) { 524 delta++; 525 tickfixcnt -= tickfixinterval; 526 } 527 } 528 #endif /* !NTP */ 529 /* Imprecise 4bsd adjtime() handling */ 530 if (timedelta != 0) { 531 delta += tickdelta; 532 timedelta -= tickdelta; 533 } 534 535 #ifdef notyet 536 microset(); 537 #endif 538 539 #ifndef NTP 540 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */ 541 #endif 542 BUMPTIME(&mono_time, delta); 543 544 #ifdef NTP 545 time_update = delta; 546 547 /* 548 * Compute the phase adjustment. If the low-order bits 549 * (time_phase) of the update overflow, bump the high-order bits 550 * (time_update). 551 */ 552 time_phase += time_adj; 553 if (time_phase <= -FINEUSEC) { 554 ltemp = -time_phase >> SHIFT_SCALE; 555 time_phase += ltemp << SHIFT_SCALE; 556 time_update -= ltemp; 557 } else if (time_phase >= FINEUSEC) { 558 ltemp = time_phase >> SHIFT_SCALE; 559 time_phase -= ltemp << SHIFT_SCALE; 560 time_update += ltemp; 561 } 562 563 #ifdef HIGHBALL 564 /* 565 * If the HIGHBALL board is installed, we need to adjust the 566 * external clock offset in order to close the hardware feedback 567 * loop. This will adjust the external clock phase and frequency 568 * in small amounts. The additional phase noise and frequency 569 * wander this causes should be minimal. We also need to 570 * discipline the kernel time variable, since the PLL is used to 571 * discipline the external clock. If the Highball board is not 572 * present, we discipline kernel time with the PLL as usual. We 573 * assume that the external clock phase adjustment (time_update) 574 * and kernel phase adjustment (clock_cpu) are less than the 575 * value of tick. 576 */ 577 clock_offset.tv_usec += time_update; 578 if (clock_offset.tv_usec >= 1000000) { 579 clock_offset.tv_sec++; 580 clock_offset.tv_usec -= 1000000; 581 } 582 if (clock_offset.tv_usec < 0) { 583 clock_offset.tv_sec--; 584 clock_offset.tv_usec += 1000000; 585 } 586 time.tv_usec += clock_cpu; 587 clock_cpu = 0; 588 #else 589 time.tv_usec += time_update; 590 #endif /* HIGHBALL */ 591 592 /* 593 * On rollover of the second the phase adjustment to be used for 594 * the next second is calculated. Also, the maximum error is 595 * increased by the tolerance. If the PPS frequency discipline 596 * code is present, the phase is increased to compensate for the 597 * CPU clock oscillator frequency error. 598 * 599 * On a 32-bit machine and given parameters in the timex.h 600 * header file, the maximum phase adjustment is +-512 ms and 601 * maximum frequency offset is a tad less than) +-512 ppm. On a 602 * 64-bit machine, you shouldn't need to ask. 603 */ 604 if (time.tv_usec >= 1000000) { 605 time.tv_usec -= 1000000; 606 time.tv_sec++; 607 time_maxerror += time_tolerance >> SHIFT_USEC; 608 609 /* 610 * Leap second processing. If in leap-insert state at 611 * the end of the day, the system clock is set back one 612 * second; if in leap-delete state, the system clock is 613 * set ahead one second. The microtime() routine or 614 * external clock driver will insure that reported time 615 * is always monotonic. The ugly divides should be 616 * replaced. 617 */ 618 switch (time_state) { 619 case TIME_OK: 620 if (time_status & STA_INS) 621 time_state = TIME_INS; 622 else if (time_status & STA_DEL) 623 time_state = TIME_DEL; 624 break; 625 626 case TIME_INS: 627 if (time.tv_sec % 86400 == 0) { 628 time.tv_sec--; 629 time_state = TIME_OOP; 630 } 631 break; 632 633 case TIME_DEL: 634 if ((time.tv_sec + 1) % 86400 == 0) { 635 time.tv_sec++; 636 time_state = TIME_WAIT; 637 } 638 break; 639 640 case TIME_OOP: 641 time_state = TIME_WAIT; 642 break; 643 644 case TIME_WAIT: 645 if (!(time_status & (STA_INS | STA_DEL))) 646 time_state = TIME_OK; 647 break; 648 } 649 650 /* 651 * Compute the phase adjustment for the next second. In 652 * PLL mode, the offset is reduced by a fixed factor 653 * times the time constant. In FLL mode the offset is 654 * used directly. In either mode, the maximum phase 655 * adjustment for each second is clamped so as to spread 656 * the adjustment over not more than the number of 657 * seconds between updates. 658 */ 659 if (time_offset < 0) { 660 ltemp = -time_offset; 661 if (!(time_status & STA_FLL)) 662 ltemp >>= SHIFT_KG + time_constant; 663 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 664 ltemp = (MAXPHASE / MINSEC) << 665 SHIFT_UPDATE; 666 time_offset += ltemp; 667 time_adj = -ltemp << (shifthz - SHIFT_UPDATE); 668 } else if (time_offset > 0) { 669 ltemp = time_offset; 670 if (!(time_status & STA_FLL)) 671 ltemp >>= SHIFT_KG + time_constant; 672 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 673 ltemp = (MAXPHASE / MINSEC) << 674 SHIFT_UPDATE; 675 time_offset -= ltemp; 676 time_adj = ltemp << (shifthz - SHIFT_UPDATE); 677 } else 678 time_adj = 0; 679 680 /* 681 * Compute the frequency estimate and additional phase 682 * adjustment due to frequency error for the next 683 * second. When the PPS signal is engaged, gnaw on the 684 * watchdog counter and update the frequency computed by 685 * the pll and the PPS signal. 686 */ 687 #ifdef PPS_SYNC 688 pps_valid++; 689 if (pps_valid == PPS_VALID) { 690 pps_jitter = MAXTIME; 691 pps_stabil = MAXFREQ; 692 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 693 STA_PPSWANDER | STA_PPSERROR); 694 } 695 ltemp = time_freq + pps_freq; 696 #else 697 ltemp = time_freq; 698 #endif /* PPS_SYNC */ 699 700 if (ltemp < 0) 701 time_adj -= -ltemp >> (SHIFT_USEC - shifthz); 702 else 703 time_adj += ltemp >> (SHIFT_USEC - shifthz); 704 time_adj += (long)fixtick << shifthz; 705 706 /* 707 * When the CPU clock oscillator frequency is not a 708 * power of 2 in Hz, shifthz is only an approximate 709 * scale factor. 710 * 711 * To determine the adjustment, you can do the following: 712 * bc -q 713 * scale=24 714 * obase=2 715 * idealhz/realhz 716 * where `idealhz' is the next higher power of 2, and `realhz' 717 * is the actual value. You may need to factor this result 718 * into a sequence of 2 multipliers to get better precision. 719 * 720 * Likewise, the error can be calculated with (e.g. for 100Hz): 721 * bc -q 722 * scale=24 723 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz 724 * (and then multiply by 1000000 to get ppm). 725 */ 726 switch (hz) { 727 case 60: 728 /* A factor of 1.000100010001 gives about 15ppm 729 error. */ 730 if (time_adj < 0) { 731 time_adj -= (-time_adj >> 4); 732 time_adj -= (-time_adj >> 8); 733 } else { 734 time_adj += (time_adj >> 4); 735 time_adj += (time_adj >> 8); 736 } 737 break; 738 739 case 96: 740 /* A factor of 1.0101010101 gives about 244ppm error. */ 741 if (time_adj < 0) { 742 time_adj -= (-time_adj >> 2); 743 time_adj -= (-time_adj >> 4) + (-time_adj >> 8); 744 } else { 745 time_adj += (time_adj >> 2); 746 time_adj += (time_adj >> 4) + (time_adj >> 8); 747 } 748 break; 749 750 case 100: 751 /* A factor of 1.010001111010111 gives about 1ppm 752 error. */ 753 if (time_adj < 0) { 754 time_adj -= (-time_adj >> 2) + (-time_adj >> 5); 755 time_adj += (-time_adj >> 10); 756 } else { 757 time_adj += (time_adj >> 2) + (time_adj >> 5); 758 time_adj -= (time_adj >> 10); 759 } 760 break; 761 762 case 1000: 763 /* A factor of 1.000001100010100001 gives about 50ppm 764 error. */ 765 if (time_adj < 0) { 766 time_adj -= (-time_adj >> 6) + (-time_adj >> 11); 767 time_adj -= (-time_adj >> 7); 768 } else { 769 time_adj += (time_adj >> 6) + (time_adj >> 11); 770 time_adj += (time_adj >> 7); 771 } 772 break; 773 774 case 1200: 775 /* A factor of 1.1011010011100001 gives about 64ppm 776 error. */ 777 if (time_adj < 0) { 778 time_adj -= (-time_adj >> 1) + (-time_adj >> 6); 779 time_adj -= (-time_adj >> 3) + (-time_adj >> 10); 780 } else { 781 time_adj += (time_adj >> 1) + (time_adj >> 6); 782 time_adj += (time_adj >> 3) + (time_adj >> 10); 783 } 784 break; 785 } 786 787 #ifdef EXT_CLOCK 788 /* 789 * If an external clock is present, it is necessary to 790 * discipline the kernel time variable anyway, since not 791 * all system components use the microtime() interface. 792 * Here, the time offset between the external clock and 793 * kernel time variable is computed every so often. 794 */ 795 clock_count++; 796 if (clock_count > CLOCK_INTERVAL) { 797 clock_count = 0; 798 microtime(&clock_ext); 799 delta.tv_sec = clock_ext.tv_sec - time.tv_sec; 800 delta.tv_usec = clock_ext.tv_usec - 801 time.tv_usec; 802 if (delta.tv_usec < 0) 803 delta.tv_sec--; 804 if (delta.tv_usec >= 500000) { 805 delta.tv_usec -= 1000000; 806 delta.tv_sec++; 807 } 808 if (delta.tv_usec < -500000) { 809 delta.tv_usec += 1000000; 810 delta.tv_sec--; 811 } 812 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 813 delta.tv_usec > MAXPHASE) || 814 delta.tv_sec < -1 || (delta.tv_sec == -1 && 815 delta.tv_usec < -MAXPHASE)) { 816 time = clock_ext; 817 delta.tv_sec = 0; 818 delta.tv_usec = 0; 819 } 820 #ifdef HIGHBALL 821 clock_cpu = delta.tv_usec; 822 #else /* HIGHBALL */ 823 hardupdate(delta.tv_usec); 824 #endif /* HIGHBALL */ 825 } 826 #endif /* EXT_CLOCK */ 827 } 828 829 #endif /* NTP */ 830 831 /* 832 * Update real-time timeout queue. 833 * Process callouts at a very low CPU priority, so we don't keep the 834 * relatively high clock interrupt priority any longer than necessary. 835 */ 836 if (callout_hardclock()) { 837 if (CLKF_BASEPRI(frame)) { 838 /* 839 * Save the overhead of a software interrupt; 840 * it will happen as soon as we return, so do 841 * it now. 842 */ 843 spllowersoftclock(); 844 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 845 softclock(NULL); 846 KERNEL_UNLOCK(); 847 } else { 848 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 849 softintr_schedule(softclock_si); 850 #else 851 setsoftclock(); 852 #endif 853 } 854 } 855 } 856 857 /* 858 * Compute number of hz until specified time. Used to compute second 859 * argument to callout_reset() from an absolute time. 860 */ 861 int 862 hzto(struct timeval *tv) 863 { 864 unsigned long ticks; 865 long sec, usec; 866 int s; 867 868 /* 869 * If the number of usecs in the whole seconds part of the time 870 * difference fits in a long, then the total number of usecs will 871 * fit in an unsigned long. Compute the total and convert it to 872 * ticks, rounding up and adding 1 to allow for the current tick 873 * to expire. Rounding also depends on unsigned long arithmetic 874 * to avoid overflow. 875 * 876 * Otherwise, if the number of ticks in the whole seconds part of 877 * the time difference fits in a long, then convert the parts to 878 * ticks separately and add, using similar rounding methods and 879 * overflow avoidance. This method would work in the previous 880 * case, but it is slightly slower and assume that hz is integral. 881 * 882 * Otherwise, round the time difference down to the maximum 883 * representable value. 884 * 885 * If ints are 32-bit, then the maximum value for any timeout in 886 * 10ms ticks is 248 days. 887 */ 888 s = splclock(); 889 sec = tv->tv_sec - time.tv_sec; 890 usec = tv->tv_usec - time.tv_usec; 891 splx(s); 892 893 if (usec < 0) { 894 sec--; 895 usec += 1000000; 896 } 897 898 if (sec < 0 || (sec == 0 && usec <= 0)) { 899 /* 900 * Would expire now or in the past. Return 0 ticks. 901 * This is different from the legacy hzto() interface, 902 * and callers need to check for it. 903 */ 904 ticks = 0; 905 } else if (sec <= (LONG_MAX / 1000000)) 906 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1)) 907 / tick) + 1; 908 else if (sec <= (LONG_MAX / hz)) 909 ticks = (sec * hz) + 910 (((unsigned long)usec + (tick - 1)) / tick) + 1; 911 else 912 ticks = LONG_MAX; 913 914 if (ticks > INT_MAX) 915 ticks = INT_MAX; 916 917 return ((int)ticks); 918 } 919 920 /* 921 * Start profiling on a process. 922 * 923 * Kernel profiling passes proc0 which never exits and hence 924 * keeps the profile clock running constantly. 925 */ 926 void 927 startprofclock(struct proc *p) 928 { 929 930 if ((p->p_flag & P_PROFIL) == 0) { 931 p->p_flag |= P_PROFIL; 932 /* 933 * This is only necessary if using the clock as the 934 * profiling source. 935 */ 936 if (++profprocs == 1 && stathz != 0) 937 psdiv = psratio; 938 } 939 } 940 941 /* 942 * Stop profiling on a process. 943 */ 944 void 945 stopprofclock(struct proc *p) 946 { 947 948 if (p->p_flag & P_PROFIL) { 949 p->p_flag &= ~P_PROFIL; 950 /* 951 * This is only necessary if using the clock as the 952 * profiling source. 953 */ 954 if (--profprocs == 0 && stathz != 0) 955 psdiv = 1; 956 } 957 } 958 959 #if defined(PERFCTRS) 960 /* 961 * Independent profiling "tick" in case we're using a separate 962 * clock or profiling event source. Currently, that's just 963 * performance counters--hence the wrapper. 964 */ 965 void 966 proftick(struct clockframe *frame) 967 { 968 #ifdef GPROF 969 struct gmonparam *g; 970 intptr_t i; 971 #endif 972 struct proc *p; 973 974 p = curproc; 975 if (CLKF_USERMODE(frame)) { 976 if (p->p_flag & P_PROFIL) 977 addupc_intr(p, CLKF_PC(frame)); 978 } else { 979 #ifdef GPROF 980 g = &_gmonparam; 981 if (g->state == GMON_PROF_ON) { 982 i = CLKF_PC(frame) - g->lowpc; 983 if (i < g->textsize) { 984 i /= HISTFRACTION * sizeof(*g->kcount); 985 g->kcount[i]++; 986 } 987 } 988 #endif 989 #ifdef PROC_PC 990 if (p && p->p_flag & P_PROFIL) 991 addupc_intr(p, PROC_PC(p)); 992 #endif 993 } 994 } 995 #endif 996 997 /* 998 * Statistics clock. Grab profile sample, and if divider reaches 0, 999 * do process and kernel statistics. 1000 */ 1001 void 1002 statclock(struct clockframe *frame) 1003 { 1004 #ifdef GPROF 1005 struct gmonparam *g; 1006 intptr_t i; 1007 #endif 1008 struct cpu_info *ci = curcpu(); 1009 struct schedstate_percpu *spc = &ci->ci_schedstate; 1010 struct lwp *l; 1011 struct proc *p; 1012 1013 /* 1014 * Notice changes in divisor frequency, and adjust clock 1015 * frequency accordingly. 1016 */ 1017 if (spc->spc_psdiv != psdiv) { 1018 spc->spc_psdiv = psdiv; 1019 spc->spc_pscnt = psdiv; 1020 if (psdiv == 1) { 1021 setstatclockrate(stathz); 1022 } else { 1023 setstatclockrate(profhz); 1024 } 1025 } 1026 l = curlwp; 1027 p = (l ? l->l_proc : 0); 1028 if (CLKF_USERMODE(frame)) { 1029 if (p->p_flag & P_PROFIL && profsrc == PROFSRC_CLOCK) 1030 addupc_intr(p, CLKF_PC(frame)); 1031 if (--spc->spc_pscnt > 0) 1032 return; 1033 /* 1034 * Came from user mode; CPU was in user state. 1035 * If this process is being profiled record the tick. 1036 */ 1037 p->p_uticks++; 1038 if (p->p_nice > NZERO) 1039 spc->spc_cp_time[CP_NICE]++; 1040 else 1041 spc->spc_cp_time[CP_USER]++; 1042 } else { 1043 #ifdef GPROF 1044 /* 1045 * Kernel statistics are just like addupc_intr, only easier. 1046 */ 1047 g = &_gmonparam; 1048 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) { 1049 i = CLKF_PC(frame) - g->lowpc; 1050 if (i < g->textsize) { 1051 i /= HISTFRACTION * sizeof(*g->kcount); 1052 g->kcount[i]++; 1053 } 1054 } 1055 #endif 1056 #ifdef LWP_PC 1057 if (p && profsrc == PROFSRC_CLOCK && p->p_flag & P_PROFIL) 1058 addupc_intr(p, LWP_PC(l)); 1059 #endif 1060 if (--spc->spc_pscnt > 0) 1061 return; 1062 /* 1063 * Came from kernel mode, so we were: 1064 * - handling an interrupt, 1065 * - doing syscall or trap work on behalf of the current 1066 * user process, or 1067 * - spinning in the idle loop. 1068 * Whichever it is, charge the time as appropriate. 1069 * Note that we charge interrupts to the current process, 1070 * regardless of whether they are ``for'' that process, 1071 * so that we know how much of its real time was spent 1072 * in ``non-process'' (i.e., interrupt) work. 1073 */ 1074 if (CLKF_INTR(frame)) { 1075 if (p != NULL) 1076 p->p_iticks++; 1077 spc->spc_cp_time[CP_INTR]++; 1078 } else if (p != NULL) { 1079 p->p_sticks++; 1080 spc->spc_cp_time[CP_SYS]++; 1081 } else 1082 spc->spc_cp_time[CP_IDLE]++; 1083 } 1084 spc->spc_pscnt = psdiv; 1085 1086 if (l != NULL) { 1087 ++p->p_cpticks; 1088 /* 1089 * If no separate schedclock is provided, call it here 1090 * at about 16 Hz. 1091 */ 1092 if (schedhz == 0) 1093 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) { 1094 schedclock(l); 1095 ci->ci_schedstate.spc_schedticks = statscheddiv; 1096 } 1097 } 1098 } 1099 1100 1101 #ifdef NTP /* NTP phase-locked loop in kernel */ 1102 1103 /* 1104 * hardupdate() - local clock update 1105 * 1106 * This routine is called by ntp_adjtime() to update the local clock 1107 * phase and frequency. The implementation is of an adaptive-parameter, 1108 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 1109 * time and frequency offset estimates for each call. If the kernel PPS 1110 * discipline code is configured (PPS_SYNC), the PPS signal itself 1111 * determines the new time offset, instead of the calling argument. 1112 * Presumably, calls to ntp_adjtime() occur only when the caller 1113 * believes the local clock is valid within some bound (+-128 ms with 1114 * NTP). If the caller's time is far different than the PPS time, an 1115 * argument will ensue, and it's not clear who will lose. 1116 * 1117 * For uncompensated quartz crystal oscillatores and nominal update 1118 * intervals less than 1024 s, operation should be in phase-lock mode 1119 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1120 * intervals greater than thiss, operation should be in frequency-lock 1121 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1122 * 1123 * Note: splclock() is in effect. 1124 */ 1125 void 1126 hardupdate(long offset) 1127 { 1128 long ltemp, mtemp; 1129 1130 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1131 return; 1132 ltemp = offset; 1133 #ifdef PPS_SYNC 1134 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 1135 ltemp = pps_offset; 1136 #endif /* PPS_SYNC */ 1137 1138 /* 1139 * Scale the phase adjustment and clamp to the operating range. 1140 */ 1141 if (ltemp > MAXPHASE) 1142 time_offset = MAXPHASE << SHIFT_UPDATE; 1143 else if (ltemp < -MAXPHASE) 1144 time_offset = -(MAXPHASE << SHIFT_UPDATE); 1145 else 1146 time_offset = ltemp << SHIFT_UPDATE; 1147 1148 /* 1149 * Select whether the frequency is to be controlled and in which 1150 * mode (PLL or FLL). Clamp to the operating range. Ugly 1151 * multiply/divide should be replaced someday. 1152 */ 1153 if (time_status & STA_FREQHOLD || time_reftime == 0) 1154 time_reftime = time.tv_sec; 1155 mtemp = time.tv_sec - time_reftime; 1156 time_reftime = time.tv_sec; 1157 if (time_status & STA_FLL) { 1158 if (mtemp >= MINSEC) { 1159 ltemp = ((time_offset / mtemp) << (SHIFT_USEC - 1160 SHIFT_UPDATE)); 1161 if (ltemp < 0) 1162 time_freq -= -ltemp >> SHIFT_KH; 1163 else 1164 time_freq += ltemp >> SHIFT_KH; 1165 } 1166 } else { 1167 if (mtemp < MAXSEC) { 1168 ltemp *= mtemp; 1169 if (ltemp < 0) 1170 time_freq -= -ltemp >> (time_constant + 1171 time_constant + SHIFT_KF - 1172 SHIFT_USEC); 1173 else 1174 time_freq += ltemp >> (time_constant + 1175 time_constant + SHIFT_KF - 1176 SHIFT_USEC); 1177 } 1178 } 1179 if (time_freq > time_tolerance) 1180 time_freq = time_tolerance; 1181 else if (time_freq < -time_tolerance) 1182 time_freq = -time_tolerance; 1183 } 1184 1185 #ifdef PPS_SYNC 1186 /* 1187 * hardpps() - discipline CPU clock oscillator to external PPS signal 1188 * 1189 * This routine is called at each PPS interrupt in order to discipline 1190 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1191 * and leaves it in a handy spot for the hardclock() routine. It 1192 * integrates successive PPS phase differences and calculates the 1193 * frequency offset. This is used in hardclock() to discipline the CPU 1194 * clock oscillator so that intrinsic frequency error is cancelled out. 1195 * The code requires the caller to capture the time and hardware counter 1196 * value at the on-time PPS signal transition. 1197 * 1198 * Note that, on some Unix systems, this routine runs at an interrupt 1199 * priority level higher than the timer interrupt routine hardclock(). 1200 * Therefore, the variables used are distinct from the hardclock() 1201 * variables, except for certain exceptions: The PPS frequency pps_freq 1202 * and phase pps_offset variables are determined by this routine and 1203 * updated atomically. The time_tolerance variable can be considered a 1204 * constant, since it is infrequently changed, and then only when the 1205 * PPS signal is disabled. The watchdog counter pps_valid is updated 1206 * once per second by hardclock() and is atomically cleared in this 1207 * routine. 1208 */ 1209 void 1210 hardpps(struct timeval *tvp, /* time at PPS */ 1211 long usec /* hardware counter at PPS */) 1212 { 1213 long u_usec, v_usec, bigtick; 1214 long cal_sec, cal_usec; 1215 1216 /* 1217 * An occasional glitch can be produced when the PPS interrupt 1218 * occurs in the hardclock() routine before the time variable is 1219 * updated. Here the offset is discarded when the difference 1220 * between it and the last one is greater than tick/2, but not 1221 * if the interval since the first discard exceeds 30 s. 1222 */ 1223 time_status |= STA_PPSSIGNAL; 1224 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1225 pps_valid = 0; 1226 u_usec = -tvp->tv_usec; 1227 if (u_usec < -500000) 1228 u_usec += 1000000; 1229 v_usec = pps_offset - u_usec; 1230 if (v_usec < 0) 1231 v_usec = -v_usec; 1232 if (v_usec > (tick >> 1)) { 1233 if (pps_glitch > MAXGLITCH) { 1234 pps_glitch = 0; 1235 pps_tf[2] = u_usec; 1236 pps_tf[1] = u_usec; 1237 } else { 1238 pps_glitch++; 1239 u_usec = pps_offset; 1240 } 1241 } else 1242 pps_glitch = 0; 1243 1244 /* 1245 * A three-stage median filter is used to help deglitch the pps 1246 * time. The median sample becomes the time offset estimate; the 1247 * difference between the other two samples becomes the time 1248 * dispersion (jitter) estimate. 1249 */ 1250 pps_tf[2] = pps_tf[1]; 1251 pps_tf[1] = pps_tf[0]; 1252 pps_tf[0] = u_usec; 1253 if (pps_tf[0] > pps_tf[1]) { 1254 if (pps_tf[1] > pps_tf[2]) { 1255 pps_offset = pps_tf[1]; /* 0 1 2 */ 1256 v_usec = pps_tf[0] - pps_tf[2]; 1257 } else if (pps_tf[2] > pps_tf[0]) { 1258 pps_offset = pps_tf[0]; /* 2 0 1 */ 1259 v_usec = pps_tf[2] - pps_tf[1]; 1260 } else { 1261 pps_offset = pps_tf[2]; /* 0 2 1 */ 1262 v_usec = pps_tf[0] - pps_tf[1]; 1263 } 1264 } else { 1265 if (pps_tf[1] < pps_tf[2]) { 1266 pps_offset = pps_tf[1]; /* 2 1 0 */ 1267 v_usec = pps_tf[2] - pps_tf[0]; 1268 } else if (pps_tf[2] < pps_tf[0]) { 1269 pps_offset = pps_tf[0]; /* 1 0 2 */ 1270 v_usec = pps_tf[1] - pps_tf[2]; 1271 } else { 1272 pps_offset = pps_tf[2]; /* 1 2 0 */ 1273 v_usec = pps_tf[1] - pps_tf[0]; 1274 } 1275 } 1276 if (v_usec > MAXTIME) 1277 pps_jitcnt++; 1278 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1279 if (v_usec < 0) 1280 pps_jitter -= -v_usec >> PPS_AVG; 1281 else 1282 pps_jitter += v_usec >> PPS_AVG; 1283 if (pps_jitter > (MAXTIME >> 1)) 1284 time_status |= STA_PPSJITTER; 1285 1286 /* 1287 * During the calibration interval adjust the starting time when 1288 * the tick overflows. At the end of the interval compute the 1289 * duration of the interval and the difference of the hardware 1290 * counters at the beginning and end of the interval. This code 1291 * is deliciously complicated by the fact valid differences may 1292 * exceed the value of tick when using long calibration 1293 * intervals and small ticks. Note that the counter can be 1294 * greater than tick if caught at just the wrong instant, but 1295 * the values returned and used here are correct. 1296 */ 1297 bigtick = (long)tick << SHIFT_USEC; 1298 pps_usec -= pps_freq; 1299 if (pps_usec >= bigtick) 1300 pps_usec -= bigtick; 1301 if (pps_usec < 0) 1302 pps_usec += bigtick; 1303 pps_time.tv_sec++; 1304 pps_count++; 1305 if (pps_count < (1 << pps_shift)) 1306 return; 1307 pps_count = 0; 1308 pps_calcnt++; 1309 u_usec = usec << SHIFT_USEC; 1310 v_usec = pps_usec - u_usec; 1311 if (v_usec >= bigtick >> 1) 1312 v_usec -= bigtick; 1313 if (v_usec < -(bigtick >> 1)) 1314 v_usec += bigtick; 1315 if (v_usec < 0) 1316 v_usec = -(-v_usec >> pps_shift); 1317 else 1318 v_usec = v_usec >> pps_shift; 1319 pps_usec = u_usec; 1320 cal_sec = tvp->tv_sec; 1321 cal_usec = tvp->tv_usec; 1322 cal_sec -= pps_time.tv_sec; 1323 cal_usec -= pps_time.tv_usec; 1324 if (cal_usec < 0) { 1325 cal_usec += 1000000; 1326 cal_sec--; 1327 } 1328 pps_time = *tvp; 1329 1330 /* 1331 * Check for lost interrupts, noise, excessive jitter and 1332 * excessive frequency error. The number of timer ticks during 1333 * the interval may vary +-1 tick. Add to this a margin of one 1334 * tick for the PPS signal jitter and maximum frequency 1335 * deviation. If the limits are exceeded, the calibration 1336 * interval is reset to the minimum and we start over. 1337 */ 1338 u_usec = (long)tick << 1; 1339 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1340 || (cal_sec == 0 && cal_usec < u_usec)) 1341 || v_usec > time_tolerance || v_usec < -time_tolerance) { 1342 pps_errcnt++; 1343 pps_shift = PPS_SHIFT; 1344 pps_intcnt = 0; 1345 time_status |= STA_PPSERROR; 1346 return; 1347 } 1348 1349 /* 1350 * A three-stage median filter is used to help deglitch the pps 1351 * frequency. The median sample becomes the frequency offset 1352 * estimate; the difference between the other two samples 1353 * becomes the frequency dispersion (stability) estimate. 1354 */ 1355 pps_ff[2] = pps_ff[1]; 1356 pps_ff[1] = pps_ff[0]; 1357 pps_ff[0] = v_usec; 1358 if (pps_ff[0] > pps_ff[1]) { 1359 if (pps_ff[1] > pps_ff[2]) { 1360 u_usec = pps_ff[1]; /* 0 1 2 */ 1361 v_usec = pps_ff[0] - pps_ff[2]; 1362 } else if (pps_ff[2] > pps_ff[0]) { 1363 u_usec = pps_ff[0]; /* 2 0 1 */ 1364 v_usec = pps_ff[2] - pps_ff[1]; 1365 } else { 1366 u_usec = pps_ff[2]; /* 0 2 1 */ 1367 v_usec = pps_ff[0] - pps_ff[1]; 1368 } 1369 } else { 1370 if (pps_ff[1] < pps_ff[2]) { 1371 u_usec = pps_ff[1]; /* 2 1 0 */ 1372 v_usec = pps_ff[2] - pps_ff[0]; 1373 } else if (pps_ff[2] < pps_ff[0]) { 1374 u_usec = pps_ff[0]; /* 1 0 2 */ 1375 v_usec = pps_ff[1] - pps_ff[2]; 1376 } else { 1377 u_usec = pps_ff[2]; /* 1 2 0 */ 1378 v_usec = pps_ff[1] - pps_ff[0]; 1379 } 1380 } 1381 1382 /* 1383 * Here the frequency dispersion (stability) is updated. If it 1384 * is less than one-fourth the maximum (MAXFREQ), the frequency 1385 * offset is updated as well, but clamped to the tolerance. It 1386 * will be processed later by the hardclock() routine. 1387 */ 1388 v_usec = (v_usec >> 1) - pps_stabil; 1389 if (v_usec < 0) 1390 pps_stabil -= -v_usec >> PPS_AVG; 1391 else 1392 pps_stabil += v_usec >> PPS_AVG; 1393 if (pps_stabil > MAXFREQ >> 2) { 1394 pps_stbcnt++; 1395 time_status |= STA_PPSWANDER; 1396 return; 1397 } 1398 if (time_status & STA_PPSFREQ) { 1399 if (u_usec < 0) { 1400 pps_freq -= -u_usec >> PPS_AVG; 1401 if (pps_freq < -time_tolerance) 1402 pps_freq = -time_tolerance; 1403 u_usec = -u_usec; 1404 } else { 1405 pps_freq += u_usec >> PPS_AVG; 1406 if (pps_freq > time_tolerance) 1407 pps_freq = time_tolerance; 1408 } 1409 } 1410 1411 /* 1412 * Here the calibration interval is adjusted. If the maximum 1413 * time difference is greater than tick / 4, reduce the interval 1414 * by half. If this is not the case for four consecutive 1415 * intervals, double the interval. 1416 */ 1417 if (u_usec << pps_shift > bigtick >> 2) { 1418 pps_intcnt = 0; 1419 if (pps_shift > PPS_SHIFT) 1420 pps_shift--; 1421 } else if (pps_intcnt >= 4) { 1422 pps_intcnt = 0; 1423 if (pps_shift < PPS_SHIFTMAX) 1424 pps_shift++; 1425 } else 1426 pps_intcnt++; 1427 } 1428 #endif /* PPS_SYNC */ 1429 #endif /* NTP */ 1430