1 /* $NetBSD: kern_clock.c,v 1.110 2007/08/09 07:36:18 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by Charles M. Hannum. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the NetBSD 24 * Foundation, Inc. and its contributors. 25 * 4. Neither the name of The NetBSD Foundation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /*- 43 * Copyright (c) 1982, 1986, 1991, 1993 44 * The Regents of the University of California. All rights reserved. 45 * (c) UNIX System Laboratories, Inc. 46 * All or some portions of this file are derived from material licensed 47 * to the University of California by American Telephone and Telegraph 48 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 49 * the permission of UNIX System Laboratories, Inc. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.110 2007/08/09 07:36:18 pooka Exp $"); 80 81 #include "opt_ntp.h" 82 #include "opt_multiprocessor.h" 83 #include "opt_perfctrs.h" 84 85 #include <sys/param.h> 86 #include <sys/systm.h> 87 #include <sys/callout.h> 88 #include <sys/kernel.h> 89 #include <sys/proc.h> 90 #include <sys/resourcevar.h> 91 #include <sys/signalvar.h> 92 #include <sys/sysctl.h> 93 #include <sys/timex.h> 94 #include <sys/sched.h> 95 #include <sys/time.h> 96 #include <sys/timetc.h> 97 #include <sys/cpu.h> 98 99 #ifdef GPROF 100 #include <sys/gmon.h> 101 #endif 102 103 /* 104 * Clock handling routines. 105 * 106 * This code is written to operate with two timers that run independently of 107 * each other. The main clock, running hz times per second, is used to keep 108 * track of real time. The second timer handles kernel and user profiling, 109 * and does resource use estimation. If the second timer is programmable, 110 * it is randomized to avoid aliasing between the two clocks. For example, 111 * the randomization prevents an adversary from always giving up the CPU 112 * just before its quantum expires. Otherwise, it would never accumulate 113 * CPU ticks. The mean frequency of the second timer is stathz. 114 * 115 * If no second timer exists, stathz will be zero; in this case we drive 116 * profiling and statistics off the main clock. This WILL NOT be accurate; 117 * do not do it unless absolutely necessary. 118 * 119 * The statistics clock may (or may not) be run at a higher rate while 120 * profiling. This profile clock runs at profhz. We require that profhz 121 * be an integral multiple of stathz. 122 * 123 * If the statistics clock is running fast, it must be divided by the ratio 124 * profhz/stathz for statistics. (For profiling, every tick counts.) 125 */ 126 127 #ifndef __HAVE_TIMECOUNTER 128 #ifdef NTP /* NTP phase-locked loop in kernel */ 129 /* 130 * Phase/frequency-lock loop (PLL/FLL) definitions 131 * 132 * The following variables are read and set by the ntp_adjtime() system 133 * call. 134 * 135 * time_state shows the state of the system clock, with values defined 136 * in the timex.h header file. 137 * 138 * time_status shows the status of the system clock, with bits defined 139 * in the timex.h header file. 140 * 141 * time_offset is used by the PLL/FLL to adjust the system time in small 142 * increments. 143 * 144 * time_constant determines the bandwidth or "stiffness" of the PLL. 145 * 146 * time_tolerance determines maximum frequency error or tolerance of the 147 * CPU clock oscillator and is a property of the architecture; however, 148 * in principle it could change as result of the presence of external 149 * discipline signals, for instance. 150 * 151 * time_precision is usually equal to the kernel tick variable; however, 152 * in cases where a precision clock counter or external clock is 153 * available, the resolution can be much less than this and depend on 154 * whether the external clock is working or not. 155 * 156 * time_maxerror is initialized by a ntp_adjtime() call and increased by 157 * the kernel once each second to reflect the maximum error bound 158 * growth. 159 * 160 * time_esterror is set and read by the ntp_adjtime() call, but 161 * otherwise not used by the kernel. 162 */ 163 int time_state = TIME_OK; /* clock state */ 164 int time_status = STA_UNSYNC; /* clock status bits */ 165 long time_offset = 0; /* time offset (us) */ 166 long time_constant = 0; /* pll time constant */ 167 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 168 long time_precision = 1; /* clock precision (us) */ 169 long time_maxerror = MAXPHASE; /* maximum error (us) */ 170 long time_esterror = MAXPHASE; /* estimated error (us) */ 171 172 /* 173 * The following variables establish the state of the PLL/FLL and the 174 * residual time and frequency offset of the local clock. The scale 175 * factors are defined in the timex.h header file. 176 * 177 * time_phase and time_freq are the phase increment and the frequency 178 * increment, respectively, of the kernel time variable. 179 * 180 * time_freq is set via ntp_adjtime() from a value stored in a file when 181 * the synchronization daemon is first started. Its value is retrieved 182 * via ntp_adjtime() and written to the file about once per hour by the 183 * daemon. 184 * 185 * time_adj is the adjustment added to the value of tick at each timer 186 * interrupt and is recomputed from time_phase and time_freq at each 187 * seconds rollover. 188 * 189 * time_reftime is the second's portion of the system time at the last 190 * call to ntp_adjtime(). It is used to adjust the time_freq variable 191 * and to increase the time_maxerror as the time since last update 192 * increases. 193 */ 194 long time_phase = 0; /* phase offset (scaled us) */ 195 long time_freq = 0; /* frequency offset (scaled ppm) */ 196 long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 197 long time_reftime = 0; /* time at last adjustment (s) */ 198 199 #ifdef PPS_SYNC 200 /* 201 * The following variables are used only if the kernel PPS discipline 202 * code is configured (PPS_SYNC). The scale factors are defined in the 203 * timex.h header file. 204 * 205 * pps_time contains the time at each calibration interval, as read by 206 * microtime(). pps_count counts the seconds of the calibration 207 * interval, the duration of which is nominally pps_shift in powers of 208 * two. 209 * 210 * pps_offset is the time offset produced by the time median filter 211 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 212 * this filter. 213 * 214 * pps_freq is the frequency offset produced by the frequency median 215 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 216 * by this filter. 217 * 218 * pps_usec is latched from a high resolution counter or external clock 219 * at pps_time. Here we want the hardware counter contents only, not the 220 * contents plus the time_tv.usec as usual. 221 * 222 * pps_valid counts the number of seconds since the last PPS update. It 223 * is used as a watchdog timer to disable the PPS discipline should the 224 * PPS signal be lost. 225 * 226 * pps_glitch counts the number of seconds since the beginning of an 227 * offset burst more than tick/2 from current nominal offset. It is used 228 * mainly to suppress error bursts due to priority conflicts between the 229 * PPS interrupt and timer interrupt. 230 * 231 * pps_intcnt counts the calibration intervals for use in the interval- 232 * adaptation algorithm. It's just too complicated for words. 233 * 234 * pps_kc_hardpps_source contains an arbitrary value that uniquely 235 * identifies the currently bound source of the PPS signal, or NULL 236 * if no source is bound. 237 * 238 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS 239 * signal should be reported. 240 */ 241 struct timeval pps_time; /* kernel time at last interval */ 242 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 243 long pps_offset = 0; /* pps time offset (us) */ 244 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 245 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 246 long pps_freq = 0; /* frequency offset (scaled ppm) */ 247 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 248 long pps_usec = 0; /* microsec counter at last interval */ 249 long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 250 int pps_glitch = 0; /* pps signal glitch counter */ 251 int pps_count = 0; /* calibration interval counter (s) */ 252 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 253 int pps_intcnt = 0; /* intervals at current duration */ 254 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */ 255 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */ 256 257 /* 258 * PPS signal quality monitors 259 * 260 * pps_jitcnt counts the seconds that have been discarded because the 261 * jitter measured by the time median filter exceeds the limit MAXTIME 262 * (100 us). 263 * 264 * pps_calcnt counts the frequency calibration intervals, which are 265 * variable from 4 s to 256 s. 266 * 267 * pps_errcnt counts the calibration intervals which have been discarded 268 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 269 * calibration interval jitter exceeds two ticks. 270 * 271 * pps_stbcnt counts the calibration intervals that have been discarded 272 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 273 */ 274 long pps_jitcnt = 0; /* jitter limit exceeded */ 275 long pps_calcnt = 0; /* calibration intervals */ 276 long pps_errcnt = 0; /* calibration errors */ 277 long pps_stbcnt = 0; /* stability limit exceeded */ 278 #endif /* PPS_SYNC */ 279 280 #ifdef EXT_CLOCK 281 /* 282 * External clock definitions 283 * 284 * The following definitions and declarations are used only if an 285 * external clock is configured on the system. 286 */ 287 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 288 289 /* 290 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 291 * interrupt and decremented once each second. 292 */ 293 int clock_count = 0; /* CPU clock counter */ 294 295 #ifdef HIGHBALL 296 /* 297 * The clock_offset and clock_cpu variables are used by the HIGHBALL 298 * interface. The clock_offset variable defines the offset between 299 * system time and the HIGBALL counters. The clock_cpu variable contains 300 * the offset between the system clock and the HIGHBALL clock for use in 301 * disciplining the kernel time variable. 302 */ 303 extern struct timeval clock_offset; /* Highball clock offset */ 304 long clock_cpu = 0; /* CPU clock adjust */ 305 #endif /* HIGHBALL */ 306 #endif /* EXT_CLOCK */ 307 #endif /* NTP */ 308 309 /* 310 * Bump a timeval by a small number of usec's. 311 */ 312 #define BUMPTIME(t, usec) { \ 313 volatile struct timeval *tp = (t); \ 314 long us; \ 315 \ 316 tp->tv_usec = us = tp->tv_usec + (usec); \ 317 if (us >= 1000000) { \ 318 tp->tv_usec = us - 1000000; \ 319 tp->tv_sec++; \ 320 } \ 321 } 322 #endif /* !__HAVE_TIMECOUNTER */ 323 324 int stathz; 325 int profhz; 326 int profsrc; 327 int schedhz; 328 int profprocs; 329 int hardclock_ticks; 330 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */ 331 static int psdiv; /* prof => stat divider */ 332 int psratio; /* ratio: prof / stat */ 333 #ifndef __HAVE_TIMECOUNTER 334 int tickfix, tickfixinterval; /* used if tick not really integral */ 335 #ifndef NTP 336 static int tickfixcnt; /* accumulated fractional error */ 337 #else 338 int fixtick; /* used by NTP for same */ 339 int shifthz; 340 #endif 341 342 /* 343 * We might want ldd to load the both words from time at once. 344 * To succeed we need to be quadword aligned. 345 * The sparc already does that, and that it has worked so far is a fluke. 346 */ 347 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t)))); 348 volatile struct timeval mono_time; 349 #endif /* !__HAVE_TIMECOUNTER */ 350 351 #ifdef __HAVE_TIMECOUNTER 352 static u_int get_intr_timecount(struct timecounter *); 353 354 static struct timecounter intr_timecounter = { 355 get_intr_timecount, /* get_timecount */ 356 0, /* no poll_pps */ 357 ~0u, /* counter_mask */ 358 0, /* frequency */ 359 "clockinterrupt", /* name */ 360 0, /* quality - minimum implementation level for a clock */ 361 NULL, /* prev */ 362 NULL, /* next */ 363 }; 364 365 static u_int 366 get_intr_timecount(struct timecounter *tc) 367 { 368 369 return (u_int)hardclock_ticks; 370 } 371 #endif 372 373 /* 374 * Initialize clock frequencies and start both clocks running. 375 */ 376 void 377 initclocks(void) 378 { 379 int i; 380 381 /* 382 * Set divisors to 1 (normal case) and let the machine-specific 383 * code do its bit. 384 */ 385 psdiv = 1; 386 #ifdef __HAVE_TIMECOUNTER 387 /* 388 * provide minimum default time counter 389 * will only run at interrupt resolution 390 */ 391 intr_timecounter.tc_frequency = hz; 392 tc_init(&intr_timecounter); 393 #endif 394 cpu_initclocks(); 395 396 /* 397 * Compute profhz and stathz, fix profhz if needed. 398 */ 399 i = stathz ? stathz : hz; 400 if (profhz == 0) 401 profhz = i; 402 psratio = profhz / i; 403 if (schedhz == 0) { 404 /* 16Hz is best */ 405 statscheddiv = i / 16; 406 if (statscheddiv <= 0) 407 panic("statscheddiv"); 408 } 409 410 #ifndef __HAVE_TIMECOUNTER 411 #ifdef NTP 412 switch (hz) { 413 case 1: 414 shifthz = SHIFT_SCALE - 0; 415 break; 416 case 2: 417 shifthz = SHIFT_SCALE - 1; 418 break; 419 case 4: 420 shifthz = SHIFT_SCALE - 2; 421 break; 422 case 8: 423 shifthz = SHIFT_SCALE - 3; 424 break; 425 case 16: 426 shifthz = SHIFT_SCALE - 4; 427 break; 428 case 32: 429 shifthz = SHIFT_SCALE - 5; 430 break; 431 case 50: 432 case 60: 433 case 64: 434 shifthz = SHIFT_SCALE - 6; 435 break; 436 case 96: 437 case 100: 438 case 128: 439 shifthz = SHIFT_SCALE - 7; 440 break; 441 case 256: 442 shifthz = SHIFT_SCALE - 8; 443 break; 444 case 512: 445 shifthz = SHIFT_SCALE - 9; 446 break; 447 case 1000: 448 case 1024: 449 shifthz = SHIFT_SCALE - 10; 450 break; 451 case 1200: 452 case 2048: 453 shifthz = SHIFT_SCALE - 11; 454 break; 455 case 4096: 456 shifthz = SHIFT_SCALE - 12; 457 break; 458 case 8192: 459 shifthz = SHIFT_SCALE - 13; 460 break; 461 case 16384: 462 shifthz = SHIFT_SCALE - 14; 463 break; 464 case 32768: 465 shifthz = SHIFT_SCALE - 15; 466 break; 467 case 65536: 468 shifthz = SHIFT_SCALE - 16; 469 break; 470 default: 471 panic("weird hz"); 472 } 473 if (fixtick == 0) { 474 /* 475 * Give MD code a chance to set this to a better 476 * value; but, if it doesn't, we should. 477 */ 478 fixtick = (1000000 - (hz*tick)); 479 } 480 #endif /* NTP */ 481 #endif /* !__HAVE_TIMECOUNTER */ 482 } 483 484 /* 485 * The real-time timer, interrupting hz times per second. 486 */ 487 void 488 hardclock(struct clockframe *frame) 489 { 490 struct lwp *l; 491 struct proc *p; 492 struct cpu_info *ci = curcpu(); 493 struct ptimer *pt; 494 #ifndef __HAVE_TIMECOUNTER 495 int delta; 496 extern int tickdelta; 497 extern long timedelta; 498 #ifdef NTP 499 int time_update; 500 int ltemp; 501 #endif /* NTP */ 502 #endif /* __HAVE_TIMECOUNTER */ 503 504 l = curlwp; 505 if (!CURCPU_IDLE_P()) { 506 p = l->l_proc; 507 /* 508 * Run current process's virtual and profile time, as needed. 509 */ 510 if (CLKF_USERMODE(frame) && p->p_timers && 511 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL) 512 if (itimerdecr(pt, tick) == 0) 513 itimerfire(pt); 514 if (p->p_timers && 515 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL) 516 if (itimerdecr(pt, tick) == 0) 517 itimerfire(pt); 518 } 519 520 /* 521 * If no separate statistics clock is available, run it from here. 522 */ 523 if (stathz == 0) 524 statclock(frame); 525 if ((--ci->ci_schedstate.spc_ticks) <= 0) 526 sched_tick(ci); 527 528 #if defined(MULTIPROCESSOR) 529 /* 530 * If we are not the primary CPU, we're not allowed to do 531 * any more work. 532 */ 533 if (CPU_IS_PRIMARY(ci) == 0) 534 return; 535 #endif 536 537 hardclock_ticks++; 538 539 #ifdef __HAVE_TIMECOUNTER 540 tc_ticktock(); 541 #else /* __HAVE_TIMECOUNTER */ 542 /* 543 * Increment the time-of-day. The increment is normally just 544 * ``tick''. If the machine is one which has a clock frequency 545 * such that ``hz'' would not divide the second evenly into 546 * milliseconds, a periodic adjustment must be applied. Finally, 547 * if we are still adjusting the time (see adjtime()), 548 * ``tickdelta'' may also be added in. 549 */ 550 delta = tick; 551 552 #ifndef NTP 553 if (tickfix) { 554 tickfixcnt += tickfix; 555 if (tickfixcnt >= tickfixinterval) { 556 delta++; 557 tickfixcnt -= tickfixinterval; 558 } 559 } 560 #endif /* !NTP */ 561 /* Imprecise 4bsd adjtime() handling */ 562 if (timedelta != 0) { 563 delta += tickdelta; 564 timedelta -= tickdelta; 565 } 566 567 #ifdef notyet 568 microset(); 569 #endif 570 571 #ifndef NTP 572 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */ 573 #endif 574 BUMPTIME(&mono_time, delta); 575 576 #ifdef NTP 577 time_update = delta; 578 579 /* 580 * Compute the phase adjustment. If the low-order bits 581 * (time_phase) of the update overflow, bump the high-order bits 582 * (time_update). 583 */ 584 time_phase += time_adj; 585 if (time_phase <= -FINEUSEC) { 586 ltemp = -time_phase >> SHIFT_SCALE; 587 time_phase += ltemp << SHIFT_SCALE; 588 time_update -= ltemp; 589 } else if (time_phase >= FINEUSEC) { 590 ltemp = time_phase >> SHIFT_SCALE; 591 time_phase -= ltemp << SHIFT_SCALE; 592 time_update += ltemp; 593 } 594 595 #ifdef HIGHBALL 596 /* 597 * If the HIGHBALL board is installed, we need to adjust the 598 * external clock offset in order to close the hardware feedback 599 * loop. This will adjust the external clock phase and frequency 600 * in small amounts. The additional phase noise and frequency 601 * wander this causes should be minimal. We also need to 602 * discipline the kernel time variable, since the PLL is used to 603 * discipline the external clock. If the Highball board is not 604 * present, we discipline kernel time with the PLL as usual. We 605 * assume that the external clock phase adjustment (time_update) 606 * and kernel phase adjustment (clock_cpu) are less than the 607 * value of tick. 608 */ 609 clock_offset.tv_usec += time_update; 610 if (clock_offset.tv_usec >= 1000000) { 611 clock_offset.tv_sec++; 612 clock_offset.tv_usec -= 1000000; 613 } 614 if (clock_offset.tv_usec < 0) { 615 clock_offset.tv_sec--; 616 clock_offset.tv_usec += 1000000; 617 } 618 time.tv_usec += clock_cpu; 619 clock_cpu = 0; 620 #else 621 time.tv_usec += time_update; 622 #endif /* HIGHBALL */ 623 624 /* 625 * On rollover of the second the phase adjustment to be used for 626 * the next second is calculated. Also, the maximum error is 627 * increased by the tolerance. If the PPS frequency discipline 628 * code is present, the phase is increased to compensate for the 629 * CPU clock oscillator frequency error. 630 * 631 * On a 32-bit machine and given parameters in the timex.h 632 * header file, the maximum phase adjustment is +-512 ms and 633 * maximum frequency offset is a tad less than) +-512 ppm. On a 634 * 64-bit machine, you shouldn't need to ask. 635 */ 636 if (time.tv_usec >= 1000000) { 637 time.tv_usec -= 1000000; 638 time.tv_sec++; 639 time_maxerror += time_tolerance >> SHIFT_USEC; 640 641 /* 642 * Leap second processing. If in leap-insert state at 643 * the end of the day, the system clock is set back one 644 * second; if in leap-delete state, the system clock is 645 * set ahead one second. The microtime() routine or 646 * external clock driver will insure that reported time 647 * is always monotonic. The ugly divides should be 648 * replaced. 649 */ 650 switch (time_state) { 651 case TIME_OK: 652 if (time_status & STA_INS) 653 time_state = TIME_INS; 654 else if (time_status & STA_DEL) 655 time_state = TIME_DEL; 656 break; 657 658 case TIME_INS: 659 if (time.tv_sec % 86400 == 0) { 660 time.tv_sec--; 661 time_state = TIME_OOP; 662 } 663 break; 664 665 case TIME_DEL: 666 if ((time.tv_sec + 1) % 86400 == 0) { 667 time.tv_sec++; 668 time_state = TIME_WAIT; 669 } 670 break; 671 672 case TIME_OOP: 673 time_state = TIME_WAIT; 674 break; 675 676 case TIME_WAIT: 677 if (!(time_status & (STA_INS | STA_DEL))) 678 time_state = TIME_OK; 679 break; 680 } 681 682 /* 683 * Compute the phase adjustment for the next second. In 684 * PLL mode, the offset is reduced by a fixed factor 685 * times the time constant. In FLL mode the offset is 686 * used directly. In either mode, the maximum phase 687 * adjustment for each second is clamped so as to spread 688 * the adjustment over not more than the number of 689 * seconds between updates. 690 */ 691 if (time_offset < 0) { 692 ltemp = -time_offset; 693 if (!(time_status & STA_FLL)) 694 ltemp >>= SHIFT_KG + time_constant; 695 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 696 ltemp = (MAXPHASE / MINSEC) << 697 SHIFT_UPDATE; 698 time_offset += ltemp; 699 time_adj = -ltemp << (shifthz - SHIFT_UPDATE); 700 } else if (time_offset > 0) { 701 ltemp = time_offset; 702 if (!(time_status & STA_FLL)) 703 ltemp >>= SHIFT_KG + time_constant; 704 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 705 ltemp = (MAXPHASE / MINSEC) << 706 SHIFT_UPDATE; 707 time_offset -= ltemp; 708 time_adj = ltemp << (shifthz - SHIFT_UPDATE); 709 } else 710 time_adj = 0; 711 712 /* 713 * Compute the frequency estimate and additional phase 714 * adjustment due to frequency error for the next 715 * second. When the PPS signal is engaged, gnaw on the 716 * watchdog counter and update the frequency computed by 717 * the pll and the PPS signal. 718 */ 719 #ifdef PPS_SYNC 720 pps_valid++; 721 if (pps_valid == PPS_VALID) { 722 pps_jitter = MAXTIME; 723 pps_stabil = MAXFREQ; 724 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 725 STA_PPSWANDER | STA_PPSERROR); 726 } 727 ltemp = time_freq + pps_freq; 728 #else 729 ltemp = time_freq; 730 #endif /* PPS_SYNC */ 731 732 if (ltemp < 0) 733 time_adj -= -ltemp >> (SHIFT_USEC - shifthz); 734 else 735 time_adj += ltemp >> (SHIFT_USEC - shifthz); 736 time_adj += (long)fixtick << shifthz; 737 738 /* 739 * When the CPU clock oscillator frequency is not a 740 * power of 2 in Hz, shifthz is only an approximate 741 * scale factor. 742 * 743 * To determine the adjustment, you can do the following: 744 * bc -q 745 * scale=24 746 * obase=2 747 * idealhz/realhz 748 * where `idealhz' is the next higher power of 2, and `realhz' 749 * is the actual value. You may need to factor this result 750 * into a sequence of 2 multipliers to get better precision. 751 * 752 * Likewise, the error can be calculated with (e.g. for 100Hz): 753 * bc -q 754 * scale=24 755 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz 756 * (and then multiply by 1000000 to get ppm). 757 */ 758 switch (hz) { 759 case 60: 760 /* A factor of 1.000100010001 gives about 15ppm 761 error. */ 762 if (time_adj < 0) { 763 time_adj -= (-time_adj >> 4); 764 time_adj -= (-time_adj >> 8); 765 } else { 766 time_adj += (time_adj >> 4); 767 time_adj += (time_adj >> 8); 768 } 769 break; 770 771 case 96: 772 /* A factor of 1.0101010101 gives about 244ppm error. */ 773 if (time_adj < 0) { 774 time_adj -= (-time_adj >> 2); 775 time_adj -= (-time_adj >> 4) + (-time_adj >> 8); 776 } else { 777 time_adj += (time_adj >> 2); 778 time_adj += (time_adj >> 4) + (time_adj >> 8); 779 } 780 break; 781 782 case 50: 783 case 100: 784 /* A factor of 1.010001111010111 gives about 1ppm 785 error. */ 786 if (time_adj < 0) { 787 time_adj -= (-time_adj >> 2) + (-time_adj >> 5); 788 time_adj += (-time_adj >> 10); 789 } else { 790 time_adj += (time_adj >> 2) + (time_adj >> 5); 791 time_adj -= (time_adj >> 10); 792 } 793 break; 794 795 case 1000: 796 /* A factor of 1.000001100010100001 gives about 50ppm 797 error. */ 798 if (time_adj < 0) { 799 time_adj -= (-time_adj >> 6) + (-time_adj >> 11); 800 time_adj -= (-time_adj >> 7); 801 } else { 802 time_adj += (time_adj >> 6) + (time_adj >> 11); 803 time_adj += (time_adj >> 7); 804 } 805 break; 806 807 case 1200: 808 /* A factor of 1.1011010011100001 gives about 64ppm 809 error. */ 810 if (time_adj < 0) { 811 time_adj -= (-time_adj >> 1) + (-time_adj >> 6); 812 time_adj -= (-time_adj >> 3) + (-time_adj >> 10); 813 } else { 814 time_adj += (time_adj >> 1) + (time_adj >> 6); 815 time_adj += (time_adj >> 3) + (time_adj >> 10); 816 } 817 break; 818 } 819 820 #ifdef EXT_CLOCK 821 /* 822 * If an external clock is present, it is necessary to 823 * discipline the kernel time variable anyway, since not 824 * all system components use the microtime() interface. 825 * Here, the time offset between the external clock and 826 * kernel time variable is computed every so often. 827 */ 828 clock_count++; 829 if (clock_count > CLOCK_INTERVAL) { 830 clock_count = 0; 831 microtime(&clock_ext); 832 delta.tv_sec = clock_ext.tv_sec - time.tv_sec; 833 delta.tv_usec = clock_ext.tv_usec - 834 time.tv_usec; 835 if (delta.tv_usec < 0) 836 delta.tv_sec--; 837 if (delta.tv_usec >= 500000) { 838 delta.tv_usec -= 1000000; 839 delta.tv_sec++; 840 } 841 if (delta.tv_usec < -500000) { 842 delta.tv_usec += 1000000; 843 delta.tv_sec--; 844 } 845 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 846 delta.tv_usec > MAXPHASE) || 847 delta.tv_sec < -1 || (delta.tv_sec == -1 && 848 delta.tv_usec < -MAXPHASE)) { 849 time = clock_ext; 850 delta.tv_sec = 0; 851 delta.tv_usec = 0; 852 } 853 #ifdef HIGHBALL 854 clock_cpu = delta.tv_usec; 855 #else /* HIGHBALL */ 856 hardupdate(delta.tv_usec); 857 #endif /* HIGHBALL */ 858 } 859 #endif /* EXT_CLOCK */ 860 } 861 862 #endif /* NTP */ 863 #endif /* !__HAVE_TIMECOUNTER */ 864 865 /* 866 * Update real-time timeout queue. Callouts are processed at a 867 * very low CPU priority, so we don't keep the relatively high 868 * clock interrupt priority any longer than necessary. 869 */ 870 callout_hardclock(); 871 } 872 873 /* 874 * Start profiling on a process. 875 * 876 * Kernel profiling passes proc0 which never exits and hence 877 * keeps the profile clock running constantly. 878 */ 879 void 880 startprofclock(struct proc *p) 881 { 882 883 KASSERT(mutex_owned(&p->p_stmutex)); 884 885 if ((p->p_stflag & PST_PROFIL) == 0) { 886 p->p_stflag |= PST_PROFIL; 887 /* 888 * This is only necessary if using the clock as the 889 * profiling source. 890 */ 891 if (++profprocs == 1 && stathz != 0) 892 psdiv = psratio; 893 } 894 } 895 896 /* 897 * Stop profiling on a process. 898 */ 899 void 900 stopprofclock(struct proc *p) 901 { 902 903 KASSERT(mutex_owned(&p->p_stmutex)); 904 905 if (p->p_stflag & PST_PROFIL) { 906 p->p_stflag &= ~PST_PROFIL; 907 /* 908 * This is only necessary if using the clock as the 909 * profiling source. 910 */ 911 if (--profprocs == 0 && stathz != 0) 912 psdiv = 1; 913 } 914 } 915 916 #if defined(PERFCTRS) 917 /* 918 * Independent profiling "tick" in case we're using a separate 919 * clock or profiling event source. Currently, that's just 920 * performance counters--hence the wrapper. 921 */ 922 void 923 proftick(struct clockframe *frame) 924 { 925 #ifdef GPROF 926 struct gmonparam *g; 927 intptr_t i; 928 #endif 929 struct lwp *l; 930 struct proc *p; 931 932 l = curlwp; 933 p = (l ? l->l_proc : NULL); 934 if (CLKF_USERMODE(frame)) { 935 mutex_spin_enter(&p->p_stmutex); 936 if (p->p_stflag & PST_PROFIL) 937 addupc_intr(l, CLKF_PC(frame)); 938 mutex_spin_exit(&p->p_stmutex); 939 } else { 940 #ifdef GPROF 941 g = &_gmonparam; 942 if (g->state == GMON_PROF_ON) { 943 i = CLKF_PC(frame) - g->lowpc; 944 if (i < g->textsize) { 945 i /= HISTFRACTION * sizeof(*g->kcount); 946 g->kcount[i]++; 947 } 948 } 949 #endif 950 #ifdef PROC_PC 951 if (p != NULL) { 952 mutex_spin_enter(&p->p_stmutex); 953 if (p->p_stflag & PST_PROFIL)) 954 addupc_intr(l, PROC_PC(p)); 955 mutex_spin_exit(&p->p_stmutex); 956 } 957 #endif 958 } 959 } 960 #endif 961 962 void 963 schedclock(struct lwp *l) 964 { 965 966 if ((l->l_flag & LW_IDLE) != 0) 967 return; 968 969 sched_schedclock(l); 970 } 971 972 /* 973 * Statistics clock. Grab profile sample, and if divider reaches 0, 974 * do process and kernel statistics. 975 */ 976 void 977 statclock(struct clockframe *frame) 978 { 979 #ifdef GPROF 980 struct gmonparam *g; 981 intptr_t i; 982 #endif 983 struct cpu_info *ci = curcpu(); 984 struct schedstate_percpu *spc = &ci->ci_schedstate; 985 struct proc *p; 986 struct lwp *l; 987 988 /* 989 * Notice changes in divisor frequency, and adjust clock 990 * frequency accordingly. 991 */ 992 if (spc->spc_psdiv != psdiv) { 993 spc->spc_psdiv = psdiv; 994 spc->spc_pscnt = psdiv; 995 if (psdiv == 1) { 996 setstatclockrate(stathz); 997 } else { 998 setstatclockrate(profhz); 999 } 1000 } 1001 l = curlwp; 1002 if ((l->l_flag & LW_IDLE) != 0) { 1003 /* 1004 * don't account idle lwps as swapper. 1005 */ 1006 p = NULL; 1007 } else { 1008 p = l->l_proc; 1009 mutex_spin_enter(&p->p_stmutex); 1010 } 1011 1012 if (CLKF_USERMODE(frame)) { 1013 if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK) 1014 addupc_intr(l, CLKF_PC(frame)); 1015 if (--spc->spc_pscnt > 0) { 1016 mutex_spin_exit(&p->p_stmutex); 1017 return; 1018 } 1019 1020 /* 1021 * Came from user mode; CPU was in user state. 1022 * If this process is being profiled record the tick. 1023 */ 1024 p->p_uticks++; 1025 if (p->p_nice > NZERO) 1026 spc->spc_cp_time[CP_NICE]++; 1027 else 1028 spc->spc_cp_time[CP_USER]++; 1029 } else { 1030 #ifdef GPROF 1031 /* 1032 * Kernel statistics are just like addupc_intr, only easier. 1033 */ 1034 g = &_gmonparam; 1035 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) { 1036 i = CLKF_PC(frame) - g->lowpc; 1037 if (i < g->textsize) { 1038 i /= HISTFRACTION * sizeof(*g->kcount); 1039 g->kcount[i]++; 1040 } 1041 } 1042 #endif 1043 #ifdef LWP_PC 1044 if (p != NULL && profsrc == PROFSRC_CLOCK && 1045 (p->p_stflag & PST_PROFIL)) { 1046 addupc_intr(l, LWP_PC(l)); 1047 } 1048 #endif 1049 if (--spc->spc_pscnt > 0) { 1050 if (p != NULL) 1051 mutex_spin_exit(&p->p_stmutex); 1052 return; 1053 } 1054 /* 1055 * Came from kernel mode, so we were: 1056 * - handling an interrupt, 1057 * - doing syscall or trap work on behalf of the current 1058 * user process, or 1059 * - spinning in the idle loop. 1060 * Whichever it is, charge the time as appropriate. 1061 * Note that we charge interrupts to the current process, 1062 * regardless of whether they are ``for'' that process, 1063 * so that we know how much of its real time was spent 1064 * in ``non-process'' (i.e., interrupt) work. 1065 */ 1066 if (CLKF_INTR(frame) || (l->l_flag & LW_INTR) != 0) { 1067 if (p != NULL) { 1068 p->p_iticks++; 1069 } 1070 spc->spc_cp_time[CP_INTR]++; 1071 } else if (p != NULL) { 1072 p->p_sticks++; 1073 spc->spc_cp_time[CP_SYS]++; 1074 } else { 1075 spc->spc_cp_time[CP_IDLE]++; 1076 } 1077 } 1078 spc->spc_pscnt = psdiv; 1079 1080 if (p != NULL) { 1081 ++l->l_cpticks; 1082 mutex_spin_exit(&p->p_stmutex); 1083 } 1084 1085 /* 1086 * If no separate schedclock is provided, call it here 1087 * at about 16 Hz. 1088 */ 1089 if (schedhz == 0) { 1090 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) { 1091 schedclock(l); 1092 ci->ci_schedstate.spc_schedticks = statscheddiv; 1093 } 1094 } 1095 } 1096 1097 #ifndef __HAVE_TIMECOUNTER 1098 #ifdef NTP /* NTP phase-locked loop in kernel */ 1099 /* 1100 * hardupdate() - local clock update 1101 * 1102 * This routine is called by ntp_adjtime() to update the local clock 1103 * phase and frequency. The implementation is of an adaptive-parameter, 1104 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 1105 * time and frequency offset estimates for each call. If the kernel PPS 1106 * discipline code is configured (PPS_SYNC), the PPS signal itself 1107 * determines the new time offset, instead of the calling argument. 1108 * Presumably, calls to ntp_adjtime() occur only when the caller 1109 * believes the local clock is valid within some bound (+-128 ms with 1110 * NTP). If the caller's time is far different than the PPS time, an 1111 * argument will ensue, and it's not clear who will lose. 1112 * 1113 * For uncompensated quartz crystal oscillatores and nominal update 1114 * intervals less than 1024 s, operation should be in phase-lock mode 1115 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1116 * intervals greater than thiss, operation should be in frequency-lock 1117 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1118 * 1119 * Note: splclock() is in effect. 1120 */ 1121 void 1122 hardupdate(long offset) 1123 { 1124 long ltemp, mtemp; 1125 1126 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1127 return; 1128 ltemp = offset; 1129 #ifdef PPS_SYNC 1130 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 1131 ltemp = pps_offset; 1132 #endif /* PPS_SYNC */ 1133 1134 /* 1135 * Scale the phase adjustment and clamp to the operating range. 1136 */ 1137 if (ltemp > MAXPHASE) 1138 time_offset = MAXPHASE << SHIFT_UPDATE; 1139 else if (ltemp < -MAXPHASE) 1140 time_offset = -(MAXPHASE << SHIFT_UPDATE); 1141 else 1142 time_offset = ltemp << SHIFT_UPDATE; 1143 1144 /* 1145 * Select whether the frequency is to be controlled and in which 1146 * mode (PLL or FLL). Clamp to the operating range. Ugly 1147 * multiply/divide should be replaced someday. 1148 */ 1149 if (time_status & STA_FREQHOLD || time_reftime == 0) 1150 time_reftime = time.tv_sec; 1151 mtemp = time.tv_sec - time_reftime; 1152 time_reftime = time.tv_sec; 1153 if (time_status & STA_FLL) { 1154 if (mtemp >= MINSEC) { 1155 ltemp = ((time_offset / mtemp) << (SHIFT_USEC - 1156 SHIFT_UPDATE)); 1157 if (ltemp < 0) 1158 time_freq -= -ltemp >> SHIFT_KH; 1159 else 1160 time_freq += ltemp >> SHIFT_KH; 1161 } 1162 } else { 1163 if (mtemp < MAXSEC) { 1164 ltemp *= mtemp; 1165 if (ltemp < 0) 1166 time_freq -= -ltemp >> (time_constant + 1167 time_constant + SHIFT_KF - 1168 SHIFT_USEC); 1169 else 1170 time_freq += ltemp >> (time_constant + 1171 time_constant + SHIFT_KF - 1172 SHIFT_USEC); 1173 } 1174 } 1175 if (time_freq > time_tolerance) 1176 time_freq = time_tolerance; 1177 else if (time_freq < -time_tolerance) 1178 time_freq = -time_tolerance; 1179 } 1180 1181 #ifdef PPS_SYNC 1182 /* 1183 * hardpps() - discipline CPU clock oscillator to external PPS signal 1184 * 1185 * This routine is called at each PPS interrupt in order to discipline 1186 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1187 * and leaves it in a handy spot for the hardclock() routine. It 1188 * integrates successive PPS phase differences and calculates the 1189 * frequency offset. This is used in hardclock() to discipline the CPU 1190 * clock oscillator so that intrinsic frequency error is cancelled out. 1191 * The code requires the caller to capture the time and hardware counter 1192 * value at the on-time PPS signal transition. 1193 * 1194 * Note that, on some Unix systems, this routine runs at an interrupt 1195 * priority level higher than the timer interrupt routine hardclock(). 1196 * Therefore, the variables used are distinct from the hardclock() 1197 * variables, except for certain exceptions: The PPS frequency pps_freq 1198 * and phase pps_offset variables are determined by this routine and 1199 * updated atomically. The time_tolerance variable can be considered a 1200 * constant, since it is infrequently changed, and then only when the 1201 * PPS signal is disabled. The watchdog counter pps_valid is updated 1202 * once per second by hardclock() and is atomically cleared in this 1203 * routine. 1204 */ 1205 void 1206 hardpps(struct timeval *tvp, /* time at PPS */ 1207 long usec /* hardware counter at PPS */) 1208 { 1209 long u_usec, v_usec, bigtick; 1210 long cal_sec, cal_usec; 1211 1212 /* 1213 * An occasional glitch can be produced when the PPS interrupt 1214 * occurs in the hardclock() routine before the time variable is 1215 * updated. Here the offset is discarded when the difference 1216 * between it and the last one is greater than tick/2, but not 1217 * if the interval since the first discard exceeds 30 s. 1218 */ 1219 time_status |= STA_PPSSIGNAL; 1220 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1221 pps_valid = 0; 1222 u_usec = -tvp->tv_usec; 1223 if (u_usec < -500000) 1224 u_usec += 1000000; 1225 v_usec = pps_offset - u_usec; 1226 if (v_usec < 0) 1227 v_usec = -v_usec; 1228 if (v_usec > (tick >> 1)) { 1229 if (pps_glitch > MAXGLITCH) { 1230 pps_glitch = 0; 1231 pps_tf[2] = u_usec; 1232 pps_tf[1] = u_usec; 1233 } else { 1234 pps_glitch++; 1235 u_usec = pps_offset; 1236 } 1237 } else 1238 pps_glitch = 0; 1239 1240 /* 1241 * A three-stage median filter is used to help deglitch the pps 1242 * time. The median sample becomes the time offset estimate; the 1243 * difference between the other two samples becomes the time 1244 * dispersion (jitter) estimate. 1245 */ 1246 pps_tf[2] = pps_tf[1]; 1247 pps_tf[1] = pps_tf[0]; 1248 pps_tf[0] = u_usec; 1249 if (pps_tf[0] > pps_tf[1]) { 1250 if (pps_tf[1] > pps_tf[2]) { 1251 pps_offset = pps_tf[1]; /* 0 1 2 */ 1252 v_usec = pps_tf[0] - pps_tf[2]; 1253 } else if (pps_tf[2] > pps_tf[0]) { 1254 pps_offset = pps_tf[0]; /* 2 0 1 */ 1255 v_usec = pps_tf[2] - pps_tf[1]; 1256 } else { 1257 pps_offset = pps_tf[2]; /* 0 2 1 */ 1258 v_usec = pps_tf[0] - pps_tf[1]; 1259 } 1260 } else { 1261 if (pps_tf[1] < pps_tf[2]) { 1262 pps_offset = pps_tf[1]; /* 2 1 0 */ 1263 v_usec = pps_tf[2] - pps_tf[0]; 1264 } else if (pps_tf[2] < pps_tf[0]) { 1265 pps_offset = pps_tf[0]; /* 1 0 2 */ 1266 v_usec = pps_tf[1] - pps_tf[2]; 1267 } else { 1268 pps_offset = pps_tf[2]; /* 1 2 0 */ 1269 v_usec = pps_tf[1] - pps_tf[0]; 1270 } 1271 } 1272 if (v_usec > MAXTIME) 1273 pps_jitcnt++; 1274 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1275 if (v_usec < 0) 1276 pps_jitter -= -v_usec >> PPS_AVG; 1277 else 1278 pps_jitter += v_usec >> PPS_AVG; 1279 if (pps_jitter > (MAXTIME >> 1)) 1280 time_status |= STA_PPSJITTER; 1281 1282 /* 1283 * During the calibration interval adjust the starting time when 1284 * the tick overflows. At the end of the interval compute the 1285 * duration of the interval and the difference of the hardware 1286 * counters at the beginning and end of the interval. This code 1287 * is deliciously complicated by the fact valid differences may 1288 * exceed the value of tick when using long calibration 1289 * intervals and small ticks. Note that the counter can be 1290 * greater than tick if caught at just the wrong instant, but 1291 * the values returned and used here are correct. 1292 */ 1293 bigtick = (long)tick << SHIFT_USEC; 1294 pps_usec -= pps_freq; 1295 if (pps_usec >= bigtick) 1296 pps_usec -= bigtick; 1297 if (pps_usec < 0) 1298 pps_usec += bigtick; 1299 pps_time.tv_sec++; 1300 pps_count++; 1301 if (pps_count < (1 << pps_shift)) 1302 return; 1303 pps_count = 0; 1304 pps_calcnt++; 1305 u_usec = usec << SHIFT_USEC; 1306 v_usec = pps_usec - u_usec; 1307 if (v_usec >= bigtick >> 1) 1308 v_usec -= bigtick; 1309 if (v_usec < -(bigtick >> 1)) 1310 v_usec += bigtick; 1311 if (v_usec < 0) 1312 v_usec = -(-v_usec >> pps_shift); 1313 else 1314 v_usec = v_usec >> pps_shift; 1315 pps_usec = u_usec; 1316 cal_sec = tvp->tv_sec; 1317 cal_usec = tvp->tv_usec; 1318 cal_sec -= pps_time.tv_sec; 1319 cal_usec -= pps_time.tv_usec; 1320 if (cal_usec < 0) { 1321 cal_usec += 1000000; 1322 cal_sec--; 1323 } 1324 pps_time = *tvp; 1325 1326 /* 1327 * Check for lost interrupts, noise, excessive jitter and 1328 * excessive frequency error. The number of timer ticks during 1329 * the interval may vary +-1 tick. Add to this a margin of one 1330 * tick for the PPS signal jitter and maximum frequency 1331 * deviation. If the limits are exceeded, the calibration 1332 * interval is reset to the minimum and we start over. 1333 */ 1334 u_usec = (long)tick << 1; 1335 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1336 || (cal_sec == 0 && cal_usec < u_usec)) 1337 || v_usec > time_tolerance || v_usec < -time_tolerance) { 1338 pps_errcnt++; 1339 pps_shift = PPS_SHIFT; 1340 pps_intcnt = 0; 1341 time_status |= STA_PPSERROR; 1342 return; 1343 } 1344 1345 /* 1346 * A three-stage median filter is used to help deglitch the pps 1347 * frequency. The median sample becomes the frequency offset 1348 * estimate; the difference between the other two samples 1349 * becomes the frequency dispersion (stability) estimate. 1350 */ 1351 pps_ff[2] = pps_ff[1]; 1352 pps_ff[1] = pps_ff[0]; 1353 pps_ff[0] = v_usec; 1354 if (pps_ff[0] > pps_ff[1]) { 1355 if (pps_ff[1] > pps_ff[2]) { 1356 u_usec = pps_ff[1]; /* 0 1 2 */ 1357 v_usec = pps_ff[0] - pps_ff[2]; 1358 } else if (pps_ff[2] > pps_ff[0]) { 1359 u_usec = pps_ff[0]; /* 2 0 1 */ 1360 v_usec = pps_ff[2] - pps_ff[1]; 1361 } else { 1362 u_usec = pps_ff[2]; /* 0 2 1 */ 1363 v_usec = pps_ff[0] - pps_ff[1]; 1364 } 1365 } else { 1366 if (pps_ff[1] < pps_ff[2]) { 1367 u_usec = pps_ff[1]; /* 2 1 0 */ 1368 v_usec = pps_ff[2] - pps_ff[0]; 1369 } else if (pps_ff[2] < pps_ff[0]) { 1370 u_usec = pps_ff[0]; /* 1 0 2 */ 1371 v_usec = pps_ff[1] - pps_ff[2]; 1372 } else { 1373 u_usec = pps_ff[2]; /* 1 2 0 */ 1374 v_usec = pps_ff[1] - pps_ff[0]; 1375 } 1376 } 1377 1378 /* 1379 * Here the frequency dispersion (stability) is updated. If it 1380 * is less than one-fourth the maximum (MAXFREQ), the frequency 1381 * offset is updated as well, but clamped to the tolerance. It 1382 * will be processed later by the hardclock() routine. 1383 */ 1384 v_usec = (v_usec >> 1) - pps_stabil; 1385 if (v_usec < 0) 1386 pps_stabil -= -v_usec >> PPS_AVG; 1387 else 1388 pps_stabil += v_usec >> PPS_AVG; 1389 if (pps_stabil > MAXFREQ >> 2) { 1390 pps_stbcnt++; 1391 time_status |= STA_PPSWANDER; 1392 return; 1393 } 1394 if (time_status & STA_PPSFREQ) { 1395 if (u_usec < 0) { 1396 pps_freq -= -u_usec >> PPS_AVG; 1397 if (pps_freq < -time_tolerance) 1398 pps_freq = -time_tolerance; 1399 u_usec = -u_usec; 1400 } else { 1401 pps_freq += u_usec >> PPS_AVG; 1402 if (pps_freq > time_tolerance) 1403 pps_freq = time_tolerance; 1404 } 1405 } 1406 1407 /* 1408 * Here the calibration interval is adjusted. If the maximum 1409 * time difference is greater than tick / 4, reduce the interval 1410 * by half. If this is not the case for four consecutive 1411 * intervals, double the interval. 1412 */ 1413 if (u_usec << pps_shift > bigtick >> 2) { 1414 pps_intcnt = 0; 1415 if (pps_shift > PPS_SHIFT) 1416 pps_shift--; 1417 } else if (pps_intcnt >= 4) { 1418 pps_intcnt = 0; 1419 if (pps_shift < PPS_SHIFTMAX) 1420 pps_shift++; 1421 } else 1422 pps_intcnt++; 1423 } 1424 #endif /* PPS_SYNC */ 1425 #endif /* NTP */ 1426 1427 /* timecounter compat functions */ 1428 void 1429 nanotime(struct timespec *ts) 1430 { 1431 struct timeval tv; 1432 1433 microtime(&tv); 1434 TIMEVAL_TO_TIMESPEC(&tv, ts); 1435 } 1436 1437 void 1438 getbinuptime(struct bintime *bt) 1439 { 1440 struct timeval tv; 1441 1442 microtime(&tv); 1443 timeval2bintime(&tv, bt); 1444 } 1445 1446 void 1447 nanouptime(struct timespec *tsp) 1448 { 1449 int s; 1450 1451 s = splclock(); 1452 TIMEVAL_TO_TIMESPEC(&mono_time, tsp); 1453 splx(s); 1454 } 1455 1456 void 1457 getnanouptime(struct timespec *tsp) 1458 { 1459 int s; 1460 1461 s = splclock(); 1462 TIMEVAL_TO_TIMESPEC(&mono_time, tsp); 1463 splx(s); 1464 } 1465 1466 void 1467 getmicrouptime(struct timeval *tvp) 1468 { 1469 int s; 1470 1471 s = splclock(); 1472 *tvp = mono_time; 1473 splx(s); 1474 } 1475 1476 void 1477 getnanotime(struct timespec *tsp) 1478 { 1479 int s; 1480 1481 s = splclock(); 1482 TIMEVAL_TO_TIMESPEC(&time, tsp); 1483 splx(s); 1484 } 1485 1486 void 1487 getmicrotime(struct timeval *tvp) 1488 { 1489 int s; 1490 1491 s = splclock(); 1492 *tvp = time; 1493 splx(s); 1494 } 1495 1496 u_int64_t 1497 tc_getfrequency(void) 1498 { 1499 return hz; 1500 } 1501 #endif /* !__HAVE_TIMECOUNTER */ 1502