1 /* $NetBSD: kern_clock.c,v 1.90 2004/02/13 11:36:22 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /*- 41 * Copyright (c) 1982, 1986, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. Neither the name of the University nor the names of its contributors 58 * may be used to endorse or promote products derived from this software 59 * without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 * 73 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.90 2004/02/13 11:36:22 wiz Exp $"); 78 79 #include "opt_ntp.h" 80 #include "opt_multiprocessor.h" 81 #include "opt_perfctrs.h" 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/callout.h> 86 #include <sys/kernel.h> 87 #include <sys/proc.h> 88 #include <sys/resourcevar.h> 89 #include <sys/signalvar.h> 90 #include <sys/sysctl.h> 91 #include <sys/timex.h> 92 #include <sys/sched.h> 93 #include <sys/time.h> 94 95 #include <machine/cpu.h> 96 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 97 #include <machine/intr.h> 98 #endif 99 100 #ifdef GPROF 101 #include <sys/gmon.h> 102 #endif 103 104 /* 105 * Clock handling routines. 106 * 107 * This code is written to operate with two timers that run independently of 108 * each other. The main clock, running hz times per second, is used to keep 109 * track of real time. The second timer handles kernel and user profiling, 110 * and does resource use estimation. If the second timer is programmable, 111 * it is randomized to avoid aliasing between the two clocks. For example, 112 * the randomization prevents an adversary from always giving up the CPU 113 * just before its quantum expires. Otherwise, it would never accumulate 114 * CPU ticks. The mean frequency of the second timer is stathz. 115 * 116 * If no second timer exists, stathz will be zero; in this case we drive 117 * profiling and statistics off the main clock. This WILL NOT be accurate; 118 * do not do it unless absolutely necessary. 119 * 120 * The statistics clock may (or may not) be run at a higher rate while 121 * profiling. This profile clock runs at profhz. We require that profhz 122 * be an integral multiple of stathz. 123 * 124 * If the statistics clock is running fast, it must be divided by the ratio 125 * profhz/stathz for statistics. (For profiling, every tick counts.) 126 */ 127 128 #ifdef NTP /* NTP phase-locked loop in kernel */ 129 /* 130 * Phase/frequency-lock loop (PLL/FLL) definitions 131 * 132 * The following variables are read and set by the ntp_adjtime() system 133 * call. 134 * 135 * time_state shows the state of the system clock, with values defined 136 * in the timex.h header file. 137 * 138 * time_status shows the status of the system clock, with bits defined 139 * in the timex.h header file. 140 * 141 * time_offset is used by the PLL/FLL to adjust the system time in small 142 * increments. 143 * 144 * time_constant determines the bandwidth or "stiffness" of the PLL. 145 * 146 * time_tolerance determines maximum frequency error or tolerance of the 147 * CPU clock oscillator and is a property of the architecture; however, 148 * in principle it could change as result of the presence of external 149 * discipline signals, for instance. 150 * 151 * time_precision is usually equal to the kernel tick variable; however, 152 * in cases where a precision clock counter or external clock is 153 * available, the resolution can be much less than this and depend on 154 * whether the external clock is working or not. 155 * 156 * time_maxerror is initialized by a ntp_adjtime() call and increased by 157 * the kernel once each second to reflect the maximum error bound 158 * growth. 159 * 160 * time_esterror is set and read by the ntp_adjtime() call, but 161 * otherwise not used by the kernel. 162 */ 163 int time_state = TIME_OK; /* clock state */ 164 int time_status = STA_UNSYNC; /* clock status bits */ 165 long time_offset = 0; /* time offset (us) */ 166 long time_constant = 0; /* pll time constant */ 167 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 168 long time_precision = 1; /* clock precision (us) */ 169 long time_maxerror = MAXPHASE; /* maximum error (us) */ 170 long time_esterror = MAXPHASE; /* estimated error (us) */ 171 172 /* 173 * The following variables establish the state of the PLL/FLL and the 174 * residual time and frequency offset of the local clock. The scale 175 * factors are defined in the timex.h header file. 176 * 177 * time_phase and time_freq are the phase increment and the frequency 178 * increment, respectively, of the kernel time variable. 179 * 180 * time_freq is set via ntp_adjtime() from a value stored in a file when 181 * the synchronization daemon is first started. Its value is retrieved 182 * via ntp_adjtime() and written to the file about once per hour by the 183 * daemon. 184 * 185 * time_adj is the adjustment added to the value of tick at each timer 186 * interrupt and is recomputed from time_phase and time_freq at each 187 * seconds rollover. 188 * 189 * time_reftime is the second's portion of the system time at the last 190 * call to ntp_adjtime(). It is used to adjust the time_freq variable 191 * and to increase the time_maxerror as the time since last update 192 * increases. 193 */ 194 long time_phase = 0; /* phase offset (scaled us) */ 195 long time_freq = 0; /* frequency offset (scaled ppm) */ 196 long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 197 long time_reftime = 0; /* time at last adjustment (s) */ 198 199 #ifdef PPS_SYNC 200 /* 201 * The following variables are used only if the kernel PPS discipline 202 * code is configured (PPS_SYNC). The scale factors are defined in the 203 * timex.h header file. 204 * 205 * pps_time contains the time at each calibration interval, as read by 206 * microtime(). pps_count counts the seconds of the calibration 207 * interval, the duration of which is nominally pps_shift in powers of 208 * two. 209 * 210 * pps_offset is the time offset produced by the time median filter 211 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 212 * this filter. 213 * 214 * pps_freq is the frequency offset produced by the frequency median 215 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 216 * by this filter. 217 * 218 * pps_usec is latched from a high resolution counter or external clock 219 * at pps_time. Here we want the hardware counter contents only, not the 220 * contents plus the time_tv.usec as usual. 221 * 222 * pps_valid counts the number of seconds since the last PPS update. It 223 * is used as a watchdog timer to disable the PPS discipline should the 224 * PPS signal be lost. 225 * 226 * pps_glitch counts the number of seconds since the beginning of an 227 * offset burst more than tick/2 from current nominal offset. It is used 228 * mainly to suppress error bursts due to priority conflicts between the 229 * PPS interrupt and timer interrupt. 230 * 231 * pps_intcnt counts the calibration intervals for use in the interval- 232 * adaptation algorithm. It's just too complicated for words. 233 * 234 * pps_kc_hardpps_source contains an arbitrary value that uniquely 235 * identifies the currently bound source of the PPS signal, or NULL 236 * if no source is bound. 237 * 238 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS 239 * signal should be reported. 240 */ 241 struct timeval pps_time; /* kernel time at last interval */ 242 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 243 long pps_offset = 0; /* pps time offset (us) */ 244 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 245 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 246 long pps_freq = 0; /* frequency offset (scaled ppm) */ 247 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 248 long pps_usec = 0; /* microsec counter at last interval */ 249 long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 250 int pps_glitch = 0; /* pps signal glitch counter */ 251 int pps_count = 0; /* calibration interval counter (s) */ 252 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 253 int pps_intcnt = 0; /* intervals at current duration */ 254 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */ 255 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */ 256 257 /* 258 * PPS signal quality monitors 259 * 260 * pps_jitcnt counts the seconds that have been discarded because the 261 * jitter measured by the time median filter exceeds the limit MAXTIME 262 * (100 us). 263 * 264 * pps_calcnt counts the frequency calibration intervals, which are 265 * variable from 4 s to 256 s. 266 * 267 * pps_errcnt counts the calibration intervals which have been discarded 268 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 269 * calibration interval jitter exceeds two ticks. 270 * 271 * pps_stbcnt counts the calibration intervals that have been discarded 272 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 273 */ 274 long pps_jitcnt = 0; /* jitter limit exceeded */ 275 long pps_calcnt = 0; /* calibration intervals */ 276 long pps_errcnt = 0; /* calibration errors */ 277 long pps_stbcnt = 0; /* stability limit exceeded */ 278 #endif /* PPS_SYNC */ 279 280 #ifdef EXT_CLOCK 281 /* 282 * External clock definitions 283 * 284 * The following definitions and declarations are used only if an 285 * external clock is configured on the system. 286 */ 287 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 288 289 /* 290 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 291 * interrupt and decremented once each second. 292 */ 293 int clock_count = 0; /* CPU clock counter */ 294 295 #ifdef HIGHBALL 296 /* 297 * The clock_offset and clock_cpu variables are used by the HIGHBALL 298 * interface. The clock_offset variable defines the offset between 299 * system time and the HIGBALL counters. The clock_cpu variable contains 300 * the offset between the system clock and the HIGHBALL clock for use in 301 * disciplining the kernel time variable. 302 */ 303 extern struct timeval clock_offset; /* Highball clock offset */ 304 long clock_cpu = 0; /* CPU clock adjust */ 305 #endif /* HIGHBALL */ 306 #endif /* EXT_CLOCK */ 307 #endif /* NTP */ 308 309 310 /* 311 * Bump a timeval by a small number of usec's. 312 */ 313 #define BUMPTIME(t, usec) { \ 314 volatile struct timeval *tp = (t); \ 315 long us; \ 316 \ 317 tp->tv_usec = us = tp->tv_usec + (usec); \ 318 if (us >= 1000000) { \ 319 tp->tv_usec = us - 1000000; \ 320 tp->tv_sec++; \ 321 } \ 322 } 323 324 int stathz; 325 int profhz; 326 int profsrc; 327 int schedhz; 328 int profprocs; 329 int hardclock_ticks; 330 static int psdiv; /* prof => stat divider */ 331 int psratio; /* ratio: prof / stat */ 332 int tickfix, tickfixinterval; /* used if tick not really integral */ 333 #ifndef NTP 334 static int tickfixcnt; /* accumulated fractional error */ 335 #else 336 int fixtick; /* used by NTP for same */ 337 int shifthz; 338 #endif 339 340 /* 341 * We might want ldd to load the both words from time at once. 342 * To succeed we need to be quadword aligned. 343 * The sparc already does that, and that it has worked so far is a fluke. 344 */ 345 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t)))); 346 volatile struct timeval mono_time; 347 348 void *softclock_si; 349 350 /* 351 * Initialize clock frequencies and start both clocks running. 352 */ 353 void 354 initclocks(void) 355 { 356 int i; 357 358 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 359 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL); 360 if (softclock_si == NULL) 361 panic("initclocks: unable to register softclock intr"); 362 #endif 363 364 /* 365 * Set divisors to 1 (normal case) and let the machine-specific 366 * code do its bit. 367 */ 368 psdiv = 1; 369 cpu_initclocks(); 370 371 /* 372 * Compute profhz/stathz/rrticks, and fix profhz if needed. 373 */ 374 i = stathz ? stathz : hz; 375 if (profhz == 0) 376 profhz = i; 377 psratio = profhz / i; 378 rrticks = hz / 10; 379 380 #ifdef NTP 381 switch (hz) { 382 case 1: 383 shifthz = SHIFT_SCALE - 0; 384 break; 385 case 2: 386 shifthz = SHIFT_SCALE - 1; 387 break; 388 case 4: 389 shifthz = SHIFT_SCALE - 2; 390 break; 391 case 8: 392 shifthz = SHIFT_SCALE - 3; 393 break; 394 case 16: 395 shifthz = SHIFT_SCALE - 4; 396 break; 397 case 32: 398 shifthz = SHIFT_SCALE - 5; 399 break; 400 case 60: 401 case 64: 402 shifthz = SHIFT_SCALE - 6; 403 break; 404 case 96: 405 case 100: 406 case 128: 407 shifthz = SHIFT_SCALE - 7; 408 break; 409 case 256: 410 shifthz = SHIFT_SCALE - 8; 411 break; 412 case 512: 413 shifthz = SHIFT_SCALE - 9; 414 break; 415 case 1000: 416 case 1024: 417 shifthz = SHIFT_SCALE - 10; 418 break; 419 case 1200: 420 case 2048: 421 shifthz = SHIFT_SCALE - 11; 422 break; 423 case 4096: 424 shifthz = SHIFT_SCALE - 12; 425 break; 426 case 8192: 427 shifthz = SHIFT_SCALE - 13; 428 break; 429 case 16384: 430 shifthz = SHIFT_SCALE - 14; 431 break; 432 case 32768: 433 shifthz = SHIFT_SCALE - 15; 434 break; 435 case 65536: 436 shifthz = SHIFT_SCALE - 16; 437 break; 438 default: 439 panic("weird hz"); 440 } 441 if (fixtick == 0) { 442 /* 443 * Give MD code a chance to set this to a better 444 * value; but, if it doesn't, we should. 445 */ 446 fixtick = (1000000 - (hz*tick)); 447 } 448 #endif 449 } 450 451 /* 452 * The real-time timer, interrupting hz times per second. 453 */ 454 void 455 hardclock(struct clockframe *frame) 456 { 457 struct lwp *l; 458 struct proc *p; 459 int delta; 460 extern int tickdelta; 461 extern long timedelta; 462 struct cpu_info *ci = curcpu(); 463 struct ptimer *pt; 464 #ifdef NTP 465 int time_update; 466 int ltemp; 467 #endif 468 469 l = curlwp; 470 if (l) { 471 p = l->l_proc; 472 /* 473 * Run current process's virtual and profile time, as needed. 474 */ 475 if (CLKF_USERMODE(frame) && p->p_timers && 476 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL) 477 if (itimerdecr(pt, tick) == 0) 478 itimerfire(pt); 479 if (p->p_timers && 480 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL) 481 if (itimerdecr(pt, tick) == 0) 482 itimerfire(pt); 483 } 484 485 /* 486 * If no separate statistics clock is available, run it from here. 487 */ 488 if (stathz == 0) 489 statclock(frame); 490 if ((--ci->ci_schedstate.spc_rrticks) <= 0) 491 roundrobin(ci); 492 493 #if defined(MULTIPROCESSOR) 494 /* 495 * If we are not the primary CPU, we're not allowed to do 496 * any more work. 497 */ 498 if (CPU_IS_PRIMARY(ci) == 0) 499 return; 500 #endif 501 502 /* 503 * Increment the time-of-day. The increment is normally just 504 * ``tick''. If the machine is one which has a clock frequency 505 * such that ``hz'' would not divide the second evenly into 506 * milliseconds, a periodic adjustment must be applied. Finally, 507 * if we are still adjusting the time (see adjtime()), 508 * ``tickdelta'' may also be added in. 509 */ 510 hardclock_ticks++; 511 delta = tick; 512 513 #ifndef NTP 514 if (tickfix) { 515 tickfixcnt += tickfix; 516 if (tickfixcnt >= tickfixinterval) { 517 delta++; 518 tickfixcnt -= tickfixinterval; 519 } 520 } 521 #endif /* !NTP */ 522 /* Imprecise 4bsd adjtime() handling */ 523 if (timedelta != 0) { 524 delta += tickdelta; 525 timedelta -= tickdelta; 526 } 527 528 #ifdef notyet 529 microset(); 530 #endif 531 532 #ifndef NTP 533 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */ 534 #endif 535 BUMPTIME(&mono_time, delta); 536 537 #ifdef NTP 538 time_update = delta; 539 540 /* 541 * Compute the phase adjustment. If the low-order bits 542 * (time_phase) of the update overflow, bump the high-order bits 543 * (time_update). 544 */ 545 time_phase += time_adj; 546 if (time_phase <= -FINEUSEC) { 547 ltemp = -time_phase >> SHIFT_SCALE; 548 time_phase += ltemp << SHIFT_SCALE; 549 time_update -= ltemp; 550 } else if (time_phase >= FINEUSEC) { 551 ltemp = time_phase >> SHIFT_SCALE; 552 time_phase -= ltemp << SHIFT_SCALE; 553 time_update += ltemp; 554 } 555 556 #ifdef HIGHBALL 557 /* 558 * If the HIGHBALL board is installed, we need to adjust the 559 * external clock offset in order to close the hardware feedback 560 * loop. This will adjust the external clock phase and frequency 561 * in small amounts. The additional phase noise and frequency 562 * wander this causes should be minimal. We also need to 563 * discipline the kernel time variable, since the PLL is used to 564 * discipline the external clock. If the Highball board is not 565 * present, we discipline kernel time with the PLL as usual. We 566 * assume that the external clock phase adjustment (time_update) 567 * and kernel phase adjustment (clock_cpu) are less than the 568 * value of tick. 569 */ 570 clock_offset.tv_usec += time_update; 571 if (clock_offset.tv_usec >= 1000000) { 572 clock_offset.tv_sec++; 573 clock_offset.tv_usec -= 1000000; 574 } 575 if (clock_offset.tv_usec < 0) { 576 clock_offset.tv_sec--; 577 clock_offset.tv_usec += 1000000; 578 } 579 time.tv_usec += clock_cpu; 580 clock_cpu = 0; 581 #else 582 time.tv_usec += time_update; 583 #endif /* HIGHBALL */ 584 585 /* 586 * On rollover of the second the phase adjustment to be used for 587 * the next second is calculated. Also, the maximum error is 588 * increased by the tolerance. If the PPS frequency discipline 589 * code is present, the phase is increased to compensate for the 590 * CPU clock oscillator frequency error. 591 * 592 * On a 32-bit machine and given parameters in the timex.h 593 * header file, the maximum phase adjustment is +-512 ms and 594 * maximum frequency offset is a tad less than) +-512 ppm. On a 595 * 64-bit machine, you shouldn't need to ask. 596 */ 597 if (time.tv_usec >= 1000000) { 598 time.tv_usec -= 1000000; 599 time.tv_sec++; 600 time_maxerror += time_tolerance >> SHIFT_USEC; 601 602 /* 603 * Leap second processing. If in leap-insert state at 604 * the end of the day, the system clock is set back one 605 * second; if in leap-delete state, the system clock is 606 * set ahead one second. The microtime() routine or 607 * external clock driver will insure that reported time 608 * is always monotonic. The ugly divides should be 609 * replaced. 610 */ 611 switch (time_state) { 612 case TIME_OK: 613 if (time_status & STA_INS) 614 time_state = TIME_INS; 615 else if (time_status & STA_DEL) 616 time_state = TIME_DEL; 617 break; 618 619 case TIME_INS: 620 if (time.tv_sec % 86400 == 0) { 621 time.tv_sec--; 622 time_state = TIME_OOP; 623 } 624 break; 625 626 case TIME_DEL: 627 if ((time.tv_sec + 1) % 86400 == 0) { 628 time.tv_sec++; 629 time_state = TIME_WAIT; 630 } 631 break; 632 633 case TIME_OOP: 634 time_state = TIME_WAIT; 635 break; 636 637 case TIME_WAIT: 638 if (!(time_status & (STA_INS | STA_DEL))) 639 time_state = TIME_OK; 640 break; 641 } 642 643 /* 644 * Compute the phase adjustment for the next second. In 645 * PLL mode, the offset is reduced by a fixed factor 646 * times the time constant. In FLL mode the offset is 647 * used directly. In either mode, the maximum phase 648 * adjustment for each second is clamped so as to spread 649 * the adjustment over not more than the number of 650 * seconds between updates. 651 */ 652 if (time_offset < 0) { 653 ltemp = -time_offset; 654 if (!(time_status & STA_FLL)) 655 ltemp >>= SHIFT_KG + time_constant; 656 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 657 ltemp = (MAXPHASE / MINSEC) << 658 SHIFT_UPDATE; 659 time_offset += ltemp; 660 time_adj = -ltemp << (shifthz - SHIFT_UPDATE); 661 } else if (time_offset > 0) { 662 ltemp = time_offset; 663 if (!(time_status & STA_FLL)) 664 ltemp >>= SHIFT_KG + time_constant; 665 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 666 ltemp = (MAXPHASE / MINSEC) << 667 SHIFT_UPDATE; 668 time_offset -= ltemp; 669 time_adj = ltemp << (shifthz - SHIFT_UPDATE); 670 } else 671 time_adj = 0; 672 673 /* 674 * Compute the frequency estimate and additional phase 675 * adjustment due to frequency error for the next 676 * second. When the PPS signal is engaged, gnaw on the 677 * watchdog counter and update the frequency computed by 678 * the pll and the PPS signal. 679 */ 680 #ifdef PPS_SYNC 681 pps_valid++; 682 if (pps_valid == PPS_VALID) { 683 pps_jitter = MAXTIME; 684 pps_stabil = MAXFREQ; 685 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 686 STA_PPSWANDER | STA_PPSERROR); 687 } 688 ltemp = time_freq + pps_freq; 689 #else 690 ltemp = time_freq; 691 #endif /* PPS_SYNC */ 692 693 if (ltemp < 0) 694 time_adj -= -ltemp >> (SHIFT_USEC - shifthz); 695 else 696 time_adj += ltemp >> (SHIFT_USEC - shifthz); 697 time_adj += (long)fixtick << shifthz; 698 699 /* 700 * When the CPU clock oscillator frequency is not a 701 * power of 2 in Hz, shifthz is only an approximate 702 * scale factor. 703 * 704 * To determine the adjustment, you can do the following: 705 * bc -q 706 * scale=24 707 * obase=2 708 * idealhz/realhz 709 * where `idealhz' is the next higher power of 2, and `realhz' 710 * is the actual value. You may need to factor this result 711 * into a sequence of 2 multipliers to get better precision. 712 * 713 * Likewise, the error can be calculated with (e.g. for 100Hz): 714 * bc -q 715 * scale=24 716 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz 717 * (and then multiply by 1000000 to get ppm). 718 */ 719 switch (hz) { 720 case 60: 721 /* A factor of 1.000100010001 gives about 15ppm 722 error. */ 723 if (time_adj < 0) { 724 time_adj -= (-time_adj >> 4); 725 time_adj -= (-time_adj >> 8); 726 } else { 727 time_adj += (time_adj >> 4); 728 time_adj += (time_adj >> 8); 729 } 730 break; 731 732 case 96: 733 /* A factor of 1.0101010101 gives about 244ppm error. */ 734 if (time_adj < 0) { 735 time_adj -= (-time_adj >> 2); 736 time_adj -= (-time_adj >> 4) + (-time_adj >> 8); 737 } else { 738 time_adj += (time_adj >> 2); 739 time_adj += (time_adj >> 4) + (time_adj >> 8); 740 } 741 break; 742 743 case 100: 744 /* A factor of 1.010001111010111 gives about 1ppm 745 error. */ 746 if (time_adj < 0) { 747 time_adj -= (-time_adj >> 2) + (-time_adj >> 5); 748 time_adj += (-time_adj >> 10); 749 } else { 750 time_adj += (time_adj >> 2) + (time_adj >> 5); 751 time_adj -= (time_adj >> 10); 752 } 753 break; 754 755 case 1000: 756 /* A factor of 1.000001100010100001 gives about 50ppm 757 error. */ 758 if (time_adj < 0) { 759 time_adj -= (-time_adj >> 6) + (-time_adj >> 11); 760 time_adj -= (-time_adj >> 7); 761 } else { 762 time_adj += (time_adj >> 6) + (time_adj >> 11); 763 time_adj += (time_adj >> 7); 764 } 765 break; 766 767 case 1200: 768 /* A factor of 1.1011010011100001 gives about 64ppm 769 error. */ 770 if (time_adj < 0) { 771 time_adj -= (-time_adj >> 1) + (-time_adj >> 6); 772 time_adj -= (-time_adj >> 3) + (-time_adj >> 10); 773 } else { 774 time_adj += (time_adj >> 1) + (time_adj >> 6); 775 time_adj += (time_adj >> 3) + (time_adj >> 10); 776 } 777 break; 778 } 779 780 #ifdef EXT_CLOCK 781 /* 782 * If an external clock is present, it is necessary to 783 * discipline the kernel time variable anyway, since not 784 * all system components use the microtime() interface. 785 * Here, the time offset between the external clock and 786 * kernel time variable is computed every so often. 787 */ 788 clock_count++; 789 if (clock_count > CLOCK_INTERVAL) { 790 clock_count = 0; 791 microtime(&clock_ext); 792 delta.tv_sec = clock_ext.tv_sec - time.tv_sec; 793 delta.tv_usec = clock_ext.tv_usec - 794 time.tv_usec; 795 if (delta.tv_usec < 0) 796 delta.tv_sec--; 797 if (delta.tv_usec >= 500000) { 798 delta.tv_usec -= 1000000; 799 delta.tv_sec++; 800 } 801 if (delta.tv_usec < -500000) { 802 delta.tv_usec += 1000000; 803 delta.tv_sec--; 804 } 805 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 806 delta.tv_usec > MAXPHASE) || 807 delta.tv_sec < -1 || (delta.tv_sec == -1 && 808 delta.tv_usec < -MAXPHASE)) { 809 time = clock_ext; 810 delta.tv_sec = 0; 811 delta.tv_usec = 0; 812 } 813 #ifdef HIGHBALL 814 clock_cpu = delta.tv_usec; 815 #else /* HIGHBALL */ 816 hardupdate(delta.tv_usec); 817 #endif /* HIGHBALL */ 818 } 819 #endif /* EXT_CLOCK */ 820 } 821 822 #endif /* NTP */ 823 824 /* 825 * Update real-time timeout queue. 826 * Process callouts at a very low CPU priority, so we don't keep the 827 * relatively high clock interrupt priority any longer than necessary. 828 */ 829 if (callout_hardclock()) { 830 if (CLKF_BASEPRI(frame)) { 831 /* 832 * Save the overhead of a software interrupt; 833 * it will happen as soon as we return, so do 834 * it now. 835 */ 836 spllowersoftclock(); 837 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 838 softclock(NULL); 839 KERNEL_UNLOCK(); 840 } else { 841 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 842 softintr_schedule(softclock_si); 843 #else 844 setsoftclock(); 845 #endif 846 } 847 } 848 } 849 850 /* 851 * Compute number of hz until specified time. Used to compute second 852 * argument to callout_reset() from an absolute time. 853 */ 854 int 855 hzto(struct timeval *tv) 856 { 857 unsigned long ticks; 858 long sec, usec; 859 int s; 860 861 /* 862 * If the number of usecs in the whole seconds part of the time 863 * difference fits in a long, then the total number of usecs will 864 * fit in an unsigned long. Compute the total and convert it to 865 * ticks, rounding up and adding 1 to allow for the current tick 866 * to expire. Rounding also depends on unsigned long arithmetic 867 * to avoid overflow. 868 * 869 * Otherwise, if the number of ticks in the whole seconds part of 870 * the time difference fits in a long, then convert the parts to 871 * ticks separately and add, using similar rounding methods and 872 * overflow avoidance. This method would work in the previous 873 * case, but it is slightly slower and assume that hz is integral. 874 * 875 * Otherwise, round the time difference down to the maximum 876 * representable value. 877 * 878 * If ints are 32-bit, then the maximum value for any timeout in 879 * 10ms ticks is 248 days. 880 */ 881 s = splclock(); 882 sec = tv->tv_sec - time.tv_sec; 883 usec = tv->tv_usec - time.tv_usec; 884 splx(s); 885 886 if (usec < 0) { 887 sec--; 888 usec += 1000000; 889 } 890 891 if (sec < 0 || (sec == 0 && usec <= 0)) { 892 /* 893 * Would expire now or in the past. Return 0 ticks. 894 * This is different from the legacy hzto() interface, 895 * and callers need to check for it. 896 */ 897 ticks = 0; 898 } else if (sec <= (LONG_MAX / 1000000)) 899 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1)) 900 / tick) + 1; 901 else if (sec <= (LONG_MAX / hz)) 902 ticks = (sec * hz) + 903 (((unsigned long)usec + (tick - 1)) / tick) + 1; 904 else 905 ticks = LONG_MAX; 906 907 if (ticks > INT_MAX) 908 ticks = INT_MAX; 909 910 return ((int)ticks); 911 } 912 913 /* 914 * Start profiling on a process. 915 * 916 * Kernel profiling passes proc0 which never exits and hence 917 * keeps the profile clock running constantly. 918 */ 919 void 920 startprofclock(struct proc *p) 921 { 922 923 if ((p->p_flag & P_PROFIL) == 0) { 924 p->p_flag |= P_PROFIL; 925 /* 926 * This is only necessary if using the clock as the 927 * profiling source. 928 */ 929 if (++profprocs == 1 && stathz != 0) 930 psdiv = psratio; 931 } 932 } 933 934 /* 935 * Stop profiling on a process. 936 */ 937 void 938 stopprofclock(struct proc *p) 939 { 940 941 if (p->p_flag & P_PROFIL) { 942 p->p_flag &= ~P_PROFIL; 943 /* 944 * This is only necessary if using the clock as the 945 * profiling source. 946 */ 947 if (--profprocs == 0 && stathz != 0) 948 psdiv = 1; 949 } 950 } 951 952 #if defined(PERFCTRS) 953 /* 954 * Independent profiling "tick" in case we're using a separate 955 * clock or profiling event source. Currently, that's just 956 * performance counters--hence the wrapper. 957 */ 958 void 959 proftick(struct clockframe *frame) 960 { 961 #ifdef GPROF 962 struct gmonparam *g; 963 intptr_t i; 964 #endif 965 struct proc *p; 966 967 p = curproc; 968 if (CLKF_USERMODE(frame)) { 969 if (p->p_flag & P_PROFIL) 970 addupc_intr(p, CLKF_PC(frame)); 971 } else { 972 #ifdef GPROF 973 g = &_gmonparam; 974 if (g->state == GMON_PROF_ON) { 975 i = CLKF_PC(frame) - g->lowpc; 976 if (i < g->textsize) { 977 i /= HISTFRACTION * sizeof(*g->kcount); 978 g->kcount[i]++; 979 } 980 } 981 #endif 982 #ifdef PROC_PC 983 if (p && p->p_flag & P_PROFIL) 984 addupc_intr(p, PROC_PC(p)); 985 #endif 986 } 987 } 988 #endif 989 990 /* 991 * Statistics clock. Grab profile sample, and if divider reaches 0, 992 * do process and kernel statistics. 993 */ 994 void 995 statclock(struct clockframe *frame) 996 { 997 #ifdef GPROF 998 struct gmonparam *g; 999 intptr_t i; 1000 #endif 1001 struct cpu_info *ci = curcpu(); 1002 struct schedstate_percpu *spc = &ci->ci_schedstate; 1003 struct lwp *l; 1004 struct proc *p; 1005 1006 /* 1007 * Notice changes in divisor frequency, and adjust clock 1008 * frequency accordingly. 1009 */ 1010 if (spc->spc_psdiv != psdiv) { 1011 spc->spc_psdiv = psdiv; 1012 spc->spc_pscnt = psdiv; 1013 if (psdiv == 1) { 1014 setstatclockrate(stathz); 1015 } else { 1016 setstatclockrate(profhz); 1017 } 1018 } 1019 l = curlwp; 1020 p = (l ? l->l_proc : 0); 1021 if (CLKF_USERMODE(frame)) { 1022 if (p->p_flag & P_PROFIL && profsrc == PROFSRC_CLOCK) 1023 addupc_intr(p, CLKF_PC(frame)); 1024 if (--spc->spc_pscnt > 0) 1025 return; 1026 /* 1027 * Came from user mode; CPU was in user state. 1028 * If this process is being profiled record the tick. 1029 */ 1030 p->p_uticks++; 1031 if (p->p_nice > NZERO) 1032 spc->spc_cp_time[CP_NICE]++; 1033 else 1034 spc->spc_cp_time[CP_USER]++; 1035 } else { 1036 #ifdef GPROF 1037 /* 1038 * Kernel statistics are just like addupc_intr, only easier. 1039 */ 1040 g = &_gmonparam; 1041 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) { 1042 i = CLKF_PC(frame) - g->lowpc; 1043 if (i < g->textsize) { 1044 i /= HISTFRACTION * sizeof(*g->kcount); 1045 g->kcount[i]++; 1046 } 1047 } 1048 #endif 1049 #ifdef LWP_PC 1050 if (p && profsrc == PROFSRC_CLOCK && p->p_flag & P_PROFIL) 1051 addupc_intr(p, LWP_PC(l)); 1052 #endif 1053 if (--spc->spc_pscnt > 0) 1054 return; 1055 /* 1056 * Came from kernel mode, so we were: 1057 * - handling an interrupt, 1058 * - doing syscall or trap work on behalf of the current 1059 * user process, or 1060 * - spinning in the idle loop. 1061 * Whichever it is, charge the time as appropriate. 1062 * Note that we charge interrupts to the current process, 1063 * regardless of whether they are ``for'' that process, 1064 * so that we know how much of its real time was spent 1065 * in ``non-process'' (i.e., interrupt) work. 1066 */ 1067 if (CLKF_INTR(frame)) { 1068 if (p != NULL) 1069 p->p_iticks++; 1070 spc->spc_cp_time[CP_INTR]++; 1071 } else if (p != NULL) { 1072 p->p_sticks++; 1073 spc->spc_cp_time[CP_SYS]++; 1074 } else 1075 spc->spc_cp_time[CP_IDLE]++; 1076 } 1077 spc->spc_pscnt = psdiv; 1078 1079 if (l != NULL) { 1080 ++p->p_cpticks; 1081 /* 1082 * If no separate schedclock is provided, call it here 1083 * at ~~12-25 Hz, ~~16 Hz is best 1084 */ 1085 if (schedhz == 0) 1086 if ((++ci->ci_schedstate.spc_schedticks & 3) == 0) 1087 schedclock(l); 1088 } 1089 } 1090 1091 1092 #ifdef NTP /* NTP phase-locked loop in kernel */ 1093 1094 /* 1095 * hardupdate() - local clock update 1096 * 1097 * This routine is called by ntp_adjtime() to update the local clock 1098 * phase and frequency. The implementation is of an adaptive-parameter, 1099 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 1100 * time and frequency offset estimates for each call. If the kernel PPS 1101 * discipline code is configured (PPS_SYNC), the PPS signal itself 1102 * determines the new time offset, instead of the calling argument. 1103 * Presumably, calls to ntp_adjtime() occur only when the caller 1104 * believes the local clock is valid within some bound (+-128 ms with 1105 * NTP). If the caller's time is far different than the PPS time, an 1106 * argument will ensue, and it's not clear who will lose. 1107 * 1108 * For uncompensated quartz crystal oscillatores and nominal update 1109 * intervals less than 1024 s, operation should be in phase-lock mode 1110 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1111 * intervals greater than thiss, operation should be in frequency-lock 1112 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1113 * 1114 * Note: splclock() is in effect. 1115 */ 1116 void 1117 hardupdate(long offset) 1118 { 1119 long ltemp, mtemp; 1120 1121 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1122 return; 1123 ltemp = offset; 1124 #ifdef PPS_SYNC 1125 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 1126 ltemp = pps_offset; 1127 #endif /* PPS_SYNC */ 1128 1129 /* 1130 * Scale the phase adjustment and clamp to the operating range. 1131 */ 1132 if (ltemp > MAXPHASE) 1133 time_offset = MAXPHASE << SHIFT_UPDATE; 1134 else if (ltemp < -MAXPHASE) 1135 time_offset = -(MAXPHASE << SHIFT_UPDATE); 1136 else 1137 time_offset = ltemp << SHIFT_UPDATE; 1138 1139 /* 1140 * Select whether the frequency is to be controlled and in which 1141 * mode (PLL or FLL). Clamp to the operating range. Ugly 1142 * multiply/divide should be replaced someday. 1143 */ 1144 if (time_status & STA_FREQHOLD || time_reftime == 0) 1145 time_reftime = time.tv_sec; 1146 mtemp = time.tv_sec - time_reftime; 1147 time_reftime = time.tv_sec; 1148 if (time_status & STA_FLL) { 1149 if (mtemp >= MINSEC) { 1150 ltemp = ((time_offset / mtemp) << (SHIFT_USEC - 1151 SHIFT_UPDATE)); 1152 if (ltemp < 0) 1153 time_freq -= -ltemp >> SHIFT_KH; 1154 else 1155 time_freq += ltemp >> SHIFT_KH; 1156 } 1157 } else { 1158 if (mtemp < MAXSEC) { 1159 ltemp *= mtemp; 1160 if (ltemp < 0) 1161 time_freq -= -ltemp >> (time_constant + 1162 time_constant + SHIFT_KF - 1163 SHIFT_USEC); 1164 else 1165 time_freq += ltemp >> (time_constant + 1166 time_constant + SHIFT_KF - 1167 SHIFT_USEC); 1168 } 1169 } 1170 if (time_freq > time_tolerance) 1171 time_freq = time_tolerance; 1172 else if (time_freq < -time_tolerance) 1173 time_freq = -time_tolerance; 1174 } 1175 1176 #ifdef PPS_SYNC 1177 /* 1178 * hardpps() - discipline CPU clock oscillator to external PPS signal 1179 * 1180 * This routine is called at each PPS interrupt in order to discipline 1181 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1182 * and leaves it in a handy spot for the hardclock() routine. It 1183 * integrates successive PPS phase differences and calculates the 1184 * frequency offset. This is used in hardclock() to discipline the CPU 1185 * clock oscillator so that intrinsic frequency error is cancelled out. 1186 * The code requires the caller to capture the time and hardware counter 1187 * value at the on-time PPS signal transition. 1188 * 1189 * Note that, on some Unix systems, this routine runs at an interrupt 1190 * priority level higher than the timer interrupt routine hardclock(). 1191 * Therefore, the variables used are distinct from the hardclock() 1192 * variables, except for certain exceptions: The PPS frequency pps_freq 1193 * and phase pps_offset variables are determined by this routine and 1194 * updated atomically. The time_tolerance variable can be considered a 1195 * constant, since it is infrequently changed, and then only when the 1196 * PPS signal is disabled. The watchdog counter pps_valid is updated 1197 * once per second by hardclock() and is atomically cleared in this 1198 * routine. 1199 */ 1200 void 1201 hardpps(struct timeval *tvp, /* time at PPS */ 1202 long usec /* hardware counter at PPS */) 1203 { 1204 long u_usec, v_usec, bigtick; 1205 long cal_sec, cal_usec; 1206 1207 /* 1208 * An occasional glitch can be produced when the PPS interrupt 1209 * occurs in the hardclock() routine before the time variable is 1210 * updated. Here the offset is discarded when the difference 1211 * between it and the last one is greater than tick/2, but not 1212 * if the interval since the first discard exceeds 30 s. 1213 */ 1214 time_status |= STA_PPSSIGNAL; 1215 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1216 pps_valid = 0; 1217 u_usec = -tvp->tv_usec; 1218 if (u_usec < -500000) 1219 u_usec += 1000000; 1220 v_usec = pps_offset - u_usec; 1221 if (v_usec < 0) 1222 v_usec = -v_usec; 1223 if (v_usec > (tick >> 1)) { 1224 if (pps_glitch > MAXGLITCH) { 1225 pps_glitch = 0; 1226 pps_tf[2] = u_usec; 1227 pps_tf[1] = u_usec; 1228 } else { 1229 pps_glitch++; 1230 u_usec = pps_offset; 1231 } 1232 } else 1233 pps_glitch = 0; 1234 1235 /* 1236 * A three-stage median filter is used to help deglitch the pps 1237 * time. The median sample becomes the time offset estimate; the 1238 * difference between the other two samples becomes the time 1239 * dispersion (jitter) estimate. 1240 */ 1241 pps_tf[2] = pps_tf[1]; 1242 pps_tf[1] = pps_tf[0]; 1243 pps_tf[0] = u_usec; 1244 if (pps_tf[0] > pps_tf[1]) { 1245 if (pps_tf[1] > pps_tf[2]) { 1246 pps_offset = pps_tf[1]; /* 0 1 2 */ 1247 v_usec = pps_tf[0] - pps_tf[2]; 1248 } else if (pps_tf[2] > pps_tf[0]) { 1249 pps_offset = pps_tf[0]; /* 2 0 1 */ 1250 v_usec = pps_tf[2] - pps_tf[1]; 1251 } else { 1252 pps_offset = pps_tf[2]; /* 0 2 1 */ 1253 v_usec = pps_tf[0] - pps_tf[1]; 1254 } 1255 } else { 1256 if (pps_tf[1] < pps_tf[2]) { 1257 pps_offset = pps_tf[1]; /* 2 1 0 */ 1258 v_usec = pps_tf[2] - pps_tf[0]; 1259 } else if (pps_tf[2] < pps_tf[0]) { 1260 pps_offset = pps_tf[0]; /* 1 0 2 */ 1261 v_usec = pps_tf[1] - pps_tf[2]; 1262 } else { 1263 pps_offset = pps_tf[2]; /* 1 2 0 */ 1264 v_usec = pps_tf[1] - pps_tf[0]; 1265 } 1266 } 1267 if (v_usec > MAXTIME) 1268 pps_jitcnt++; 1269 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1270 if (v_usec < 0) 1271 pps_jitter -= -v_usec >> PPS_AVG; 1272 else 1273 pps_jitter += v_usec >> PPS_AVG; 1274 if (pps_jitter > (MAXTIME >> 1)) 1275 time_status |= STA_PPSJITTER; 1276 1277 /* 1278 * During the calibration interval adjust the starting time when 1279 * the tick overflows. At the end of the interval compute the 1280 * duration of the interval and the difference of the hardware 1281 * counters at the beginning and end of the interval. This code 1282 * is deliciously complicated by the fact valid differences may 1283 * exceed the value of tick when using long calibration 1284 * intervals and small ticks. Note that the counter can be 1285 * greater than tick if caught at just the wrong instant, but 1286 * the values returned and used here are correct. 1287 */ 1288 bigtick = (long)tick << SHIFT_USEC; 1289 pps_usec -= pps_freq; 1290 if (pps_usec >= bigtick) 1291 pps_usec -= bigtick; 1292 if (pps_usec < 0) 1293 pps_usec += bigtick; 1294 pps_time.tv_sec++; 1295 pps_count++; 1296 if (pps_count < (1 << pps_shift)) 1297 return; 1298 pps_count = 0; 1299 pps_calcnt++; 1300 u_usec = usec << SHIFT_USEC; 1301 v_usec = pps_usec - u_usec; 1302 if (v_usec >= bigtick >> 1) 1303 v_usec -= bigtick; 1304 if (v_usec < -(bigtick >> 1)) 1305 v_usec += bigtick; 1306 if (v_usec < 0) 1307 v_usec = -(-v_usec >> pps_shift); 1308 else 1309 v_usec = v_usec >> pps_shift; 1310 pps_usec = u_usec; 1311 cal_sec = tvp->tv_sec; 1312 cal_usec = tvp->tv_usec; 1313 cal_sec -= pps_time.tv_sec; 1314 cal_usec -= pps_time.tv_usec; 1315 if (cal_usec < 0) { 1316 cal_usec += 1000000; 1317 cal_sec--; 1318 } 1319 pps_time = *tvp; 1320 1321 /* 1322 * Check for lost interrupts, noise, excessive jitter and 1323 * excessive frequency error. The number of timer ticks during 1324 * the interval may vary +-1 tick. Add to this a margin of one 1325 * tick for the PPS signal jitter and maximum frequency 1326 * deviation. If the limits are exceeded, the calibration 1327 * interval is reset to the minimum and we start over. 1328 */ 1329 u_usec = (long)tick << 1; 1330 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1331 || (cal_sec == 0 && cal_usec < u_usec)) 1332 || v_usec > time_tolerance || v_usec < -time_tolerance) { 1333 pps_errcnt++; 1334 pps_shift = PPS_SHIFT; 1335 pps_intcnt = 0; 1336 time_status |= STA_PPSERROR; 1337 return; 1338 } 1339 1340 /* 1341 * A three-stage median filter is used to help deglitch the pps 1342 * frequency. The median sample becomes the frequency offset 1343 * estimate; the difference between the other two samples 1344 * becomes the frequency dispersion (stability) estimate. 1345 */ 1346 pps_ff[2] = pps_ff[1]; 1347 pps_ff[1] = pps_ff[0]; 1348 pps_ff[0] = v_usec; 1349 if (pps_ff[0] > pps_ff[1]) { 1350 if (pps_ff[1] > pps_ff[2]) { 1351 u_usec = pps_ff[1]; /* 0 1 2 */ 1352 v_usec = pps_ff[0] - pps_ff[2]; 1353 } else if (pps_ff[2] > pps_ff[0]) { 1354 u_usec = pps_ff[0]; /* 2 0 1 */ 1355 v_usec = pps_ff[2] - pps_ff[1]; 1356 } else { 1357 u_usec = pps_ff[2]; /* 0 2 1 */ 1358 v_usec = pps_ff[0] - pps_ff[1]; 1359 } 1360 } else { 1361 if (pps_ff[1] < pps_ff[2]) { 1362 u_usec = pps_ff[1]; /* 2 1 0 */ 1363 v_usec = pps_ff[2] - pps_ff[0]; 1364 } else if (pps_ff[2] < pps_ff[0]) { 1365 u_usec = pps_ff[0]; /* 1 0 2 */ 1366 v_usec = pps_ff[1] - pps_ff[2]; 1367 } else { 1368 u_usec = pps_ff[2]; /* 1 2 0 */ 1369 v_usec = pps_ff[1] - pps_ff[0]; 1370 } 1371 } 1372 1373 /* 1374 * Here the frequency dispersion (stability) is updated. If it 1375 * is less than one-fourth the maximum (MAXFREQ), the frequency 1376 * offset is updated as well, but clamped to the tolerance. It 1377 * will be processed later by the hardclock() routine. 1378 */ 1379 v_usec = (v_usec >> 1) - pps_stabil; 1380 if (v_usec < 0) 1381 pps_stabil -= -v_usec >> PPS_AVG; 1382 else 1383 pps_stabil += v_usec >> PPS_AVG; 1384 if (pps_stabil > MAXFREQ >> 2) { 1385 pps_stbcnt++; 1386 time_status |= STA_PPSWANDER; 1387 return; 1388 } 1389 if (time_status & STA_PPSFREQ) { 1390 if (u_usec < 0) { 1391 pps_freq -= -u_usec >> PPS_AVG; 1392 if (pps_freq < -time_tolerance) 1393 pps_freq = -time_tolerance; 1394 u_usec = -u_usec; 1395 } else { 1396 pps_freq += u_usec >> PPS_AVG; 1397 if (pps_freq > time_tolerance) 1398 pps_freq = time_tolerance; 1399 } 1400 } 1401 1402 /* 1403 * Here the calibration interval is adjusted. If the maximum 1404 * time difference is greater than tick / 4, reduce the interval 1405 * by half. If this is not the case for four consecutive 1406 * intervals, double the interval. 1407 */ 1408 if (u_usec << pps_shift > bigtick >> 2) { 1409 pps_intcnt = 0; 1410 if (pps_shift > PPS_SHIFT) 1411 pps_shift--; 1412 } else if (pps_intcnt >= 4) { 1413 pps_intcnt = 0; 1414 if (pps_shift < PPS_SHIFTMAX) 1415 pps_shift++; 1416 } else 1417 pps_intcnt++; 1418 } 1419 #endif /* PPS_SYNC */ 1420 #endif /* NTP */ 1421