1 /* $NetBSD: kern_clock.c,v 1.102 2006/09/02 06:21:32 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by Charles M. Hannum. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the NetBSD 24 * Foundation, Inc. and its contributors. 25 * 4. Neither the name of The NetBSD Foundation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /*- 43 * Copyright (c) 1982, 1986, 1991, 1993 44 * The Regents of the University of California. All rights reserved. 45 * (c) UNIX System Laboratories, Inc. 46 * All or some portions of this file are derived from material licensed 47 * to the University of California by American Telephone and Telegraph 48 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 49 * the permission of UNIX System Laboratories, Inc. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.102 2006/09/02 06:21:32 christos Exp $"); 80 81 #include "opt_ntp.h" 82 #include "opt_multiprocessor.h" 83 #include "opt_perfctrs.h" 84 85 #include <sys/param.h> 86 #include <sys/systm.h> 87 #include <sys/callout.h> 88 #include <sys/kernel.h> 89 #include <sys/proc.h> 90 #include <sys/resourcevar.h> 91 #include <sys/signalvar.h> 92 #include <sys/sysctl.h> 93 #include <sys/timex.h> 94 #include <sys/sched.h> 95 #include <sys/time.h> 96 #ifdef __HAVE_TIMECOUNTER 97 #include <sys/timetc.h> 98 #endif 99 100 #include <machine/cpu.h> 101 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 102 #include <machine/intr.h> 103 #endif 104 105 #ifdef GPROF 106 #include <sys/gmon.h> 107 #endif 108 109 /* 110 * Clock handling routines. 111 * 112 * This code is written to operate with two timers that run independently of 113 * each other. The main clock, running hz times per second, is used to keep 114 * track of real time. The second timer handles kernel and user profiling, 115 * and does resource use estimation. If the second timer is programmable, 116 * it is randomized to avoid aliasing between the two clocks. For example, 117 * the randomization prevents an adversary from always giving up the CPU 118 * just before its quantum expires. Otherwise, it would never accumulate 119 * CPU ticks. The mean frequency of the second timer is stathz. 120 * 121 * If no second timer exists, stathz will be zero; in this case we drive 122 * profiling and statistics off the main clock. This WILL NOT be accurate; 123 * do not do it unless absolutely necessary. 124 * 125 * The statistics clock may (or may not) be run at a higher rate while 126 * profiling. This profile clock runs at profhz. We require that profhz 127 * be an integral multiple of stathz. 128 * 129 * If the statistics clock is running fast, it must be divided by the ratio 130 * profhz/stathz for statistics. (For profiling, every tick counts.) 131 */ 132 133 #ifndef __HAVE_TIMECOUNTER 134 #ifdef NTP /* NTP phase-locked loop in kernel */ 135 /* 136 * Phase/frequency-lock loop (PLL/FLL) definitions 137 * 138 * The following variables are read and set by the ntp_adjtime() system 139 * call. 140 * 141 * time_state shows the state of the system clock, with values defined 142 * in the timex.h header file. 143 * 144 * time_status shows the status of the system clock, with bits defined 145 * in the timex.h header file. 146 * 147 * time_offset is used by the PLL/FLL to adjust the system time in small 148 * increments. 149 * 150 * time_constant determines the bandwidth or "stiffness" of the PLL. 151 * 152 * time_tolerance determines maximum frequency error or tolerance of the 153 * CPU clock oscillator and is a property of the architecture; however, 154 * in principle it could change as result of the presence of external 155 * discipline signals, for instance. 156 * 157 * time_precision is usually equal to the kernel tick variable; however, 158 * in cases where a precision clock counter or external clock is 159 * available, the resolution can be much less than this and depend on 160 * whether the external clock is working or not. 161 * 162 * time_maxerror is initialized by a ntp_adjtime() call and increased by 163 * the kernel once each second to reflect the maximum error bound 164 * growth. 165 * 166 * time_esterror is set and read by the ntp_adjtime() call, but 167 * otherwise not used by the kernel. 168 */ 169 int time_state = TIME_OK; /* clock state */ 170 int time_status = STA_UNSYNC; /* clock status bits */ 171 long time_offset = 0; /* time offset (us) */ 172 long time_constant = 0; /* pll time constant */ 173 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 174 long time_precision = 1; /* clock precision (us) */ 175 long time_maxerror = MAXPHASE; /* maximum error (us) */ 176 long time_esterror = MAXPHASE; /* estimated error (us) */ 177 178 /* 179 * The following variables establish the state of the PLL/FLL and the 180 * residual time and frequency offset of the local clock. The scale 181 * factors are defined in the timex.h header file. 182 * 183 * time_phase and time_freq are the phase increment and the frequency 184 * increment, respectively, of the kernel time variable. 185 * 186 * time_freq is set via ntp_adjtime() from a value stored in a file when 187 * the synchronization daemon is first started. Its value is retrieved 188 * via ntp_adjtime() and written to the file about once per hour by the 189 * daemon. 190 * 191 * time_adj is the adjustment added to the value of tick at each timer 192 * interrupt and is recomputed from time_phase and time_freq at each 193 * seconds rollover. 194 * 195 * time_reftime is the second's portion of the system time at the last 196 * call to ntp_adjtime(). It is used to adjust the time_freq variable 197 * and to increase the time_maxerror as the time since last update 198 * increases. 199 */ 200 long time_phase = 0; /* phase offset (scaled us) */ 201 long time_freq = 0; /* frequency offset (scaled ppm) */ 202 long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 203 long time_reftime = 0; /* time at last adjustment (s) */ 204 205 #ifdef PPS_SYNC 206 /* 207 * The following variables are used only if the kernel PPS discipline 208 * code is configured (PPS_SYNC). The scale factors are defined in the 209 * timex.h header file. 210 * 211 * pps_time contains the time at each calibration interval, as read by 212 * microtime(). pps_count counts the seconds of the calibration 213 * interval, the duration of which is nominally pps_shift in powers of 214 * two. 215 * 216 * pps_offset is the time offset produced by the time median filter 217 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 218 * this filter. 219 * 220 * pps_freq is the frequency offset produced by the frequency median 221 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 222 * by this filter. 223 * 224 * pps_usec is latched from a high resolution counter or external clock 225 * at pps_time. Here we want the hardware counter contents only, not the 226 * contents plus the time_tv.usec as usual. 227 * 228 * pps_valid counts the number of seconds since the last PPS update. It 229 * is used as a watchdog timer to disable the PPS discipline should the 230 * PPS signal be lost. 231 * 232 * pps_glitch counts the number of seconds since the beginning of an 233 * offset burst more than tick/2 from current nominal offset. It is used 234 * mainly to suppress error bursts due to priority conflicts between the 235 * PPS interrupt and timer interrupt. 236 * 237 * pps_intcnt counts the calibration intervals for use in the interval- 238 * adaptation algorithm. It's just too complicated for words. 239 * 240 * pps_kc_hardpps_source contains an arbitrary value that uniquely 241 * identifies the currently bound source of the PPS signal, or NULL 242 * if no source is bound. 243 * 244 * pps_kc_hardpps_mode indicates which transitions, if any, of the PPS 245 * signal should be reported. 246 */ 247 struct timeval pps_time; /* kernel time at last interval */ 248 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 249 long pps_offset = 0; /* pps time offset (us) */ 250 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 251 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 252 long pps_freq = 0; /* frequency offset (scaled ppm) */ 253 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 254 long pps_usec = 0; /* microsec counter at last interval */ 255 long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 256 int pps_glitch = 0; /* pps signal glitch counter */ 257 int pps_count = 0; /* calibration interval counter (s) */ 258 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 259 int pps_intcnt = 0; /* intervals at current duration */ 260 void *pps_kc_hardpps_source = NULL; /* current PPS supplier's identifier */ 261 int pps_kc_hardpps_mode = 0; /* interesting edges of PPS signal */ 262 263 /* 264 * PPS signal quality monitors 265 * 266 * pps_jitcnt counts the seconds that have been discarded because the 267 * jitter measured by the time median filter exceeds the limit MAXTIME 268 * (100 us). 269 * 270 * pps_calcnt counts the frequency calibration intervals, which are 271 * variable from 4 s to 256 s. 272 * 273 * pps_errcnt counts the calibration intervals which have been discarded 274 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 275 * calibration interval jitter exceeds two ticks. 276 * 277 * pps_stbcnt counts the calibration intervals that have been discarded 278 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 279 */ 280 long pps_jitcnt = 0; /* jitter limit exceeded */ 281 long pps_calcnt = 0; /* calibration intervals */ 282 long pps_errcnt = 0; /* calibration errors */ 283 long pps_stbcnt = 0; /* stability limit exceeded */ 284 #endif /* PPS_SYNC */ 285 286 #ifdef EXT_CLOCK 287 /* 288 * External clock definitions 289 * 290 * The following definitions and declarations are used only if an 291 * external clock is configured on the system. 292 */ 293 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 294 295 /* 296 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 297 * interrupt and decremented once each second. 298 */ 299 int clock_count = 0; /* CPU clock counter */ 300 301 #ifdef HIGHBALL 302 /* 303 * The clock_offset and clock_cpu variables are used by the HIGHBALL 304 * interface. The clock_offset variable defines the offset between 305 * system time and the HIGBALL counters. The clock_cpu variable contains 306 * the offset between the system clock and the HIGHBALL clock for use in 307 * disciplining the kernel time variable. 308 */ 309 extern struct timeval clock_offset; /* Highball clock offset */ 310 long clock_cpu = 0; /* CPU clock adjust */ 311 #endif /* HIGHBALL */ 312 #endif /* EXT_CLOCK */ 313 #endif /* NTP */ 314 315 /* 316 * Bump a timeval by a small number of usec's. 317 */ 318 #define BUMPTIME(t, usec) { \ 319 volatile struct timeval *tp = (t); \ 320 long us; \ 321 \ 322 tp->tv_usec = us = tp->tv_usec + (usec); \ 323 if (us >= 1000000) { \ 324 tp->tv_usec = us - 1000000; \ 325 tp->tv_sec++; \ 326 } \ 327 } 328 #endif /* !__HAVE_TIMECOUNTER */ 329 330 int stathz; 331 int profhz; 332 int profsrc; 333 int schedhz; 334 int profprocs; 335 int hardclock_ticks; 336 static int statscheddiv; /* stat => sched divider (used if schedhz == 0) */ 337 static int psdiv; /* prof => stat divider */ 338 int psratio; /* ratio: prof / stat */ 339 #ifndef __HAVE_TIMECOUNTER 340 int tickfix, tickfixinterval; /* used if tick not really integral */ 341 #ifndef NTP 342 static int tickfixcnt; /* accumulated fractional error */ 343 #else 344 int fixtick; /* used by NTP for same */ 345 int shifthz; 346 #endif 347 348 /* 349 * We might want ldd to load the both words from time at once. 350 * To succeed we need to be quadword aligned. 351 * The sparc already does that, and that it has worked so far is a fluke. 352 */ 353 volatile struct timeval time __attribute__((__aligned__(__alignof__(quad_t)))); 354 volatile struct timeval mono_time; 355 #endif /* !__HAVE_TIMECOUNTER */ 356 357 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 358 void *softclock_si; 359 #endif 360 361 #ifdef __HAVE_TIMECOUNTER 362 static u_int get_intr_timecount(struct timecounter *); 363 364 static struct timecounter intr_timecounter = { 365 get_intr_timecount, /* get_timecount */ 366 0, /* no poll_pps */ 367 ~0u, /* counter_mask */ 368 0, /* frequency */ 369 "clockinterrupt", /* name */ 370 0, /* quality - minimum implementation level for a clock */ 371 NULL, /* prev */ 372 NULL, /* next */ 373 }; 374 375 static u_int 376 get_intr_timecount(struct timecounter *tc) 377 { 378 return (u_int)hardclock_ticks; 379 } 380 #endif 381 382 /* 383 * Initialize clock frequencies and start both clocks running. 384 */ 385 void 386 initclocks(void) 387 { 388 int i; 389 390 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 391 softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL); 392 if (softclock_si == NULL) 393 panic("initclocks: unable to register softclock intr"); 394 #endif 395 396 /* 397 * Set divisors to 1 (normal case) and let the machine-specific 398 * code do its bit. 399 */ 400 psdiv = 1; 401 #ifdef __HAVE_TIMECOUNTER 402 /* 403 * provide minimum default time counter 404 * will only run at interrupt resolution 405 */ 406 intr_timecounter.tc_frequency = hz; 407 tc_init(&intr_timecounter); 408 #endif 409 cpu_initclocks(); 410 411 /* 412 * Compute profhz/stathz/rrticks, and fix profhz if needed. 413 */ 414 i = stathz ? stathz : hz; 415 if (profhz == 0) 416 profhz = i; 417 psratio = profhz / i; 418 rrticks = hz / 10; 419 if (schedhz == 0) { 420 /* 16Hz is best */ 421 statscheddiv = i / 16; 422 if (statscheddiv <= 0) 423 panic("statscheddiv"); 424 } 425 426 #ifndef __HAVE_TIMECOUNTER 427 #ifdef NTP 428 switch (hz) { 429 case 1: 430 shifthz = SHIFT_SCALE - 0; 431 break; 432 case 2: 433 shifthz = SHIFT_SCALE - 1; 434 break; 435 case 4: 436 shifthz = SHIFT_SCALE - 2; 437 break; 438 case 8: 439 shifthz = SHIFT_SCALE - 3; 440 break; 441 case 16: 442 shifthz = SHIFT_SCALE - 4; 443 break; 444 case 32: 445 shifthz = SHIFT_SCALE - 5; 446 break; 447 case 50: 448 case 60: 449 case 64: 450 shifthz = SHIFT_SCALE - 6; 451 break; 452 case 96: 453 case 100: 454 case 128: 455 shifthz = SHIFT_SCALE - 7; 456 break; 457 case 256: 458 shifthz = SHIFT_SCALE - 8; 459 break; 460 case 512: 461 shifthz = SHIFT_SCALE - 9; 462 break; 463 case 1000: 464 case 1024: 465 shifthz = SHIFT_SCALE - 10; 466 break; 467 case 1200: 468 case 2048: 469 shifthz = SHIFT_SCALE - 11; 470 break; 471 case 4096: 472 shifthz = SHIFT_SCALE - 12; 473 break; 474 case 8192: 475 shifthz = SHIFT_SCALE - 13; 476 break; 477 case 16384: 478 shifthz = SHIFT_SCALE - 14; 479 break; 480 case 32768: 481 shifthz = SHIFT_SCALE - 15; 482 break; 483 case 65536: 484 shifthz = SHIFT_SCALE - 16; 485 break; 486 default: 487 panic("weird hz"); 488 } 489 if (fixtick == 0) { 490 /* 491 * Give MD code a chance to set this to a better 492 * value; but, if it doesn't, we should. 493 */ 494 fixtick = (1000000 - (hz*tick)); 495 } 496 #endif /* NTP */ 497 #endif /* !__HAVE_TIMECOUNTER */ 498 } 499 500 /* 501 * The real-time timer, interrupting hz times per second. 502 */ 503 void 504 hardclock(struct clockframe *frame) 505 { 506 struct lwp *l; 507 struct proc *p; 508 struct cpu_info *ci = curcpu(); 509 struct ptimer *pt; 510 #ifndef __HAVE_TIMECOUNTER 511 int delta; 512 extern int tickdelta; 513 extern long timedelta; 514 #ifdef NTP 515 int time_update; 516 int ltemp; 517 #endif /* NTP */ 518 #endif /* __HAVE_TIMECOUNTER */ 519 520 l = curlwp; 521 if (l) { 522 p = l->l_proc; 523 /* 524 * Run current process's virtual and profile time, as needed. 525 */ 526 if (CLKF_USERMODE(frame) && p->p_timers && 527 (pt = LIST_FIRST(&p->p_timers->pts_virtual)) != NULL) 528 if (itimerdecr(pt, tick) == 0) 529 itimerfire(pt); 530 if (p->p_timers && 531 (pt = LIST_FIRST(&p->p_timers->pts_prof)) != NULL) 532 if (itimerdecr(pt, tick) == 0) 533 itimerfire(pt); 534 } 535 536 /* 537 * If no separate statistics clock is available, run it from here. 538 */ 539 if (stathz == 0) 540 statclock(frame); 541 if ((--ci->ci_schedstate.spc_rrticks) <= 0) 542 roundrobin(ci); 543 544 #if defined(MULTIPROCESSOR) 545 /* 546 * If we are not the primary CPU, we're not allowed to do 547 * any more work. 548 */ 549 if (CPU_IS_PRIMARY(ci) == 0) 550 return; 551 #endif 552 553 hardclock_ticks++; 554 555 #ifdef __HAVE_TIMECOUNTER 556 tc_ticktock(); 557 #else /* __HAVE_TIMECOUNTER */ 558 /* 559 * Increment the time-of-day. The increment is normally just 560 * ``tick''. If the machine is one which has a clock frequency 561 * such that ``hz'' would not divide the second evenly into 562 * milliseconds, a periodic adjustment must be applied. Finally, 563 * if we are still adjusting the time (see adjtime()), 564 * ``tickdelta'' may also be added in. 565 */ 566 delta = tick; 567 568 #ifndef NTP 569 if (tickfix) { 570 tickfixcnt += tickfix; 571 if (tickfixcnt >= tickfixinterval) { 572 delta++; 573 tickfixcnt -= tickfixinterval; 574 } 575 } 576 #endif /* !NTP */ 577 /* Imprecise 4bsd adjtime() handling */ 578 if (timedelta != 0) { 579 delta += tickdelta; 580 timedelta -= tickdelta; 581 } 582 583 #ifdef notyet 584 microset(); 585 #endif 586 587 #ifndef NTP 588 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */ 589 #endif 590 BUMPTIME(&mono_time, delta); 591 592 #ifdef NTP 593 time_update = delta; 594 595 /* 596 * Compute the phase adjustment. If the low-order bits 597 * (time_phase) of the update overflow, bump the high-order bits 598 * (time_update). 599 */ 600 time_phase += time_adj; 601 if (time_phase <= -FINEUSEC) { 602 ltemp = -time_phase >> SHIFT_SCALE; 603 time_phase += ltemp << SHIFT_SCALE; 604 time_update -= ltemp; 605 } else if (time_phase >= FINEUSEC) { 606 ltemp = time_phase >> SHIFT_SCALE; 607 time_phase -= ltemp << SHIFT_SCALE; 608 time_update += ltemp; 609 } 610 611 #ifdef HIGHBALL 612 /* 613 * If the HIGHBALL board is installed, we need to adjust the 614 * external clock offset in order to close the hardware feedback 615 * loop. This will adjust the external clock phase and frequency 616 * in small amounts. The additional phase noise and frequency 617 * wander this causes should be minimal. We also need to 618 * discipline the kernel time variable, since the PLL is used to 619 * discipline the external clock. If the Highball board is not 620 * present, we discipline kernel time with the PLL as usual. We 621 * assume that the external clock phase adjustment (time_update) 622 * and kernel phase adjustment (clock_cpu) are less than the 623 * value of tick. 624 */ 625 clock_offset.tv_usec += time_update; 626 if (clock_offset.tv_usec >= 1000000) { 627 clock_offset.tv_sec++; 628 clock_offset.tv_usec -= 1000000; 629 } 630 if (clock_offset.tv_usec < 0) { 631 clock_offset.tv_sec--; 632 clock_offset.tv_usec += 1000000; 633 } 634 time.tv_usec += clock_cpu; 635 clock_cpu = 0; 636 #else 637 time.tv_usec += time_update; 638 #endif /* HIGHBALL */ 639 640 /* 641 * On rollover of the second the phase adjustment to be used for 642 * the next second is calculated. Also, the maximum error is 643 * increased by the tolerance. If the PPS frequency discipline 644 * code is present, the phase is increased to compensate for the 645 * CPU clock oscillator frequency error. 646 * 647 * On a 32-bit machine and given parameters in the timex.h 648 * header file, the maximum phase adjustment is +-512 ms and 649 * maximum frequency offset is a tad less than) +-512 ppm. On a 650 * 64-bit machine, you shouldn't need to ask. 651 */ 652 if (time.tv_usec >= 1000000) { 653 time.tv_usec -= 1000000; 654 time.tv_sec++; 655 time_maxerror += time_tolerance >> SHIFT_USEC; 656 657 /* 658 * Leap second processing. If in leap-insert state at 659 * the end of the day, the system clock is set back one 660 * second; if in leap-delete state, the system clock is 661 * set ahead one second. The microtime() routine or 662 * external clock driver will insure that reported time 663 * is always monotonic. The ugly divides should be 664 * replaced. 665 */ 666 switch (time_state) { 667 case TIME_OK: 668 if (time_status & STA_INS) 669 time_state = TIME_INS; 670 else if (time_status & STA_DEL) 671 time_state = TIME_DEL; 672 break; 673 674 case TIME_INS: 675 if (time.tv_sec % 86400 == 0) { 676 time.tv_sec--; 677 time_state = TIME_OOP; 678 } 679 break; 680 681 case TIME_DEL: 682 if ((time.tv_sec + 1) % 86400 == 0) { 683 time.tv_sec++; 684 time_state = TIME_WAIT; 685 } 686 break; 687 688 case TIME_OOP: 689 time_state = TIME_WAIT; 690 break; 691 692 case TIME_WAIT: 693 if (!(time_status & (STA_INS | STA_DEL))) 694 time_state = TIME_OK; 695 break; 696 } 697 698 /* 699 * Compute the phase adjustment for the next second. In 700 * PLL mode, the offset is reduced by a fixed factor 701 * times the time constant. In FLL mode the offset is 702 * used directly. In either mode, the maximum phase 703 * adjustment for each second is clamped so as to spread 704 * the adjustment over not more than the number of 705 * seconds between updates. 706 */ 707 if (time_offset < 0) { 708 ltemp = -time_offset; 709 if (!(time_status & STA_FLL)) 710 ltemp >>= SHIFT_KG + time_constant; 711 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 712 ltemp = (MAXPHASE / MINSEC) << 713 SHIFT_UPDATE; 714 time_offset += ltemp; 715 time_adj = -ltemp << (shifthz - SHIFT_UPDATE); 716 } else if (time_offset > 0) { 717 ltemp = time_offset; 718 if (!(time_status & STA_FLL)) 719 ltemp >>= SHIFT_KG + time_constant; 720 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 721 ltemp = (MAXPHASE / MINSEC) << 722 SHIFT_UPDATE; 723 time_offset -= ltemp; 724 time_adj = ltemp << (shifthz - SHIFT_UPDATE); 725 } else 726 time_adj = 0; 727 728 /* 729 * Compute the frequency estimate and additional phase 730 * adjustment due to frequency error for the next 731 * second. When the PPS signal is engaged, gnaw on the 732 * watchdog counter and update the frequency computed by 733 * the pll and the PPS signal. 734 */ 735 #ifdef PPS_SYNC 736 pps_valid++; 737 if (pps_valid == PPS_VALID) { 738 pps_jitter = MAXTIME; 739 pps_stabil = MAXFREQ; 740 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 741 STA_PPSWANDER | STA_PPSERROR); 742 } 743 ltemp = time_freq + pps_freq; 744 #else 745 ltemp = time_freq; 746 #endif /* PPS_SYNC */ 747 748 if (ltemp < 0) 749 time_adj -= -ltemp >> (SHIFT_USEC - shifthz); 750 else 751 time_adj += ltemp >> (SHIFT_USEC - shifthz); 752 time_adj += (long)fixtick << shifthz; 753 754 /* 755 * When the CPU clock oscillator frequency is not a 756 * power of 2 in Hz, shifthz is only an approximate 757 * scale factor. 758 * 759 * To determine the adjustment, you can do the following: 760 * bc -q 761 * scale=24 762 * obase=2 763 * idealhz/realhz 764 * where `idealhz' is the next higher power of 2, and `realhz' 765 * is the actual value. You may need to factor this result 766 * into a sequence of 2 multipliers to get better precision. 767 * 768 * Likewise, the error can be calculated with (e.g. for 100Hz): 769 * bc -q 770 * scale=24 771 * ((1+2^-2+2^-5)*(1-2^-10)*realhz-idealhz)/idealhz 772 * (and then multiply by 1000000 to get ppm). 773 */ 774 switch (hz) { 775 case 60: 776 /* A factor of 1.000100010001 gives about 15ppm 777 error. */ 778 if (time_adj < 0) { 779 time_adj -= (-time_adj >> 4); 780 time_adj -= (-time_adj >> 8); 781 } else { 782 time_adj += (time_adj >> 4); 783 time_adj += (time_adj >> 8); 784 } 785 break; 786 787 case 96: 788 /* A factor of 1.0101010101 gives about 244ppm error. */ 789 if (time_adj < 0) { 790 time_adj -= (-time_adj >> 2); 791 time_adj -= (-time_adj >> 4) + (-time_adj >> 8); 792 } else { 793 time_adj += (time_adj >> 2); 794 time_adj += (time_adj >> 4) + (time_adj >> 8); 795 } 796 break; 797 798 case 50: 799 case 100: 800 /* A factor of 1.010001111010111 gives about 1ppm 801 error. */ 802 if (time_adj < 0) { 803 time_adj -= (-time_adj >> 2) + (-time_adj >> 5); 804 time_adj += (-time_adj >> 10); 805 } else { 806 time_adj += (time_adj >> 2) + (time_adj >> 5); 807 time_adj -= (time_adj >> 10); 808 } 809 break; 810 811 case 1000: 812 /* A factor of 1.000001100010100001 gives about 50ppm 813 error. */ 814 if (time_adj < 0) { 815 time_adj -= (-time_adj >> 6) + (-time_adj >> 11); 816 time_adj -= (-time_adj >> 7); 817 } else { 818 time_adj += (time_adj >> 6) + (time_adj >> 11); 819 time_adj += (time_adj >> 7); 820 } 821 break; 822 823 case 1200: 824 /* A factor of 1.1011010011100001 gives about 64ppm 825 error. */ 826 if (time_adj < 0) { 827 time_adj -= (-time_adj >> 1) + (-time_adj >> 6); 828 time_adj -= (-time_adj >> 3) + (-time_adj >> 10); 829 } else { 830 time_adj += (time_adj >> 1) + (time_adj >> 6); 831 time_adj += (time_adj >> 3) + (time_adj >> 10); 832 } 833 break; 834 } 835 836 #ifdef EXT_CLOCK 837 /* 838 * If an external clock is present, it is necessary to 839 * discipline the kernel time variable anyway, since not 840 * all system components use the microtime() interface. 841 * Here, the time offset between the external clock and 842 * kernel time variable is computed every so often. 843 */ 844 clock_count++; 845 if (clock_count > CLOCK_INTERVAL) { 846 clock_count = 0; 847 microtime(&clock_ext); 848 delta.tv_sec = clock_ext.tv_sec - time.tv_sec; 849 delta.tv_usec = clock_ext.tv_usec - 850 time.tv_usec; 851 if (delta.tv_usec < 0) 852 delta.tv_sec--; 853 if (delta.tv_usec >= 500000) { 854 delta.tv_usec -= 1000000; 855 delta.tv_sec++; 856 } 857 if (delta.tv_usec < -500000) { 858 delta.tv_usec += 1000000; 859 delta.tv_sec--; 860 } 861 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 862 delta.tv_usec > MAXPHASE) || 863 delta.tv_sec < -1 || (delta.tv_sec == -1 && 864 delta.tv_usec < -MAXPHASE)) { 865 time = clock_ext; 866 delta.tv_sec = 0; 867 delta.tv_usec = 0; 868 } 869 #ifdef HIGHBALL 870 clock_cpu = delta.tv_usec; 871 #else /* HIGHBALL */ 872 hardupdate(delta.tv_usec); 873 #endif /* HIGHBALL */ 874 } 875 #endif /* EXT_CLOCK */ 876 } 877 878 #endif /* NTP */ 879 #endif /* !__HAVE_TIMECOUNTER */ 880 881 /* 882 * Update real-time timeout queue. 883 * Process callouts at a very low CPU priority, so we don't keep the 884 * relatively high clock interrupt priority any longer than necessary. 885 */ 886 if (callout_hardclock()) { 887 if (CLKF_BASEPRI(frame)) { 888 /* 889 * Save the overhead of a software interrupt; 890 * it will happen as soon as we return, so do 891 * it now. 892 */ 893 spllowersoftclock(); 894 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 895 softclock(NULL); 896 KERNEL_UNLOCK(); 897 } else { 898 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS 899 softintr_schedule(softclock_si); 900 #else 901 setsoftclock(); 902 #endif 903 } 904 } 905 } 906 907 #ifdef __HAVE_TIMECOUNTER 908 /* 909 * Compute number of hz until specified time. Used to compute second 910 * argument to callout_reset() from an absolute time. 911 */ 912 int 913 hzto(struct timeval *tvp) 914 { 915 struct timeval now, tv; 916 917 tv = *tvp; /* Don't modify original tvp. */ 918 getmicrotime(&now); 919 timersub(&tv, &now, &tv); 920 return tvtohz(&tv); 921 } 922 #endif /* __HAVE_TIMECOUNTER */ 923 924 /* 925 * Compute number of ticks in the specified amount of time. 926 */ 927 int 928 tvtohz(struct timeval *tv) 929 { 930 unsigned long ticks; 931 long sec, usec; 932 933 /* 934 * If the number of usecs in the whole seconds part of the time 935 * difference fits in a long, then the total number of usecs will 936 * fit in an unsigned long. Compute the total and convert it to 937 * ticks, rounding up and adding 1 to allow for the current tick 938 * to expire. Rounding also depends on unsigned long arithmetic 939 * to avoid overflow. 940 * 941 * Otherwise, if the number of ticks in the whole seconds part of 942 * the time difference fits in a long, then convert the parts to 943 * ticks separately and add, using similar rounding methods and 944 * overflow avoidance. This method would work in the previous 945 * case, but it is slightly slower and assumes that hz is integral. 946 * 947 * Otherwise, round the time difference down to the maximum 948 * representable value. 949 * 950 * If ints are 32-bit, then the maximum value for any timeout in 951 * 10ms ticks is 248 days. 952 */ 953 sec = tv->tv_sec; 954 usec = tv->tv_usec; 955 956 if (usec < 0) { 957 sec--; 958 usec += 1000000; 959 } 960 961 if (sec < 0 || (sec == 0 && usec <= 0)) { 962 /* 963 * Would expire now or in the past. Return 0 ticks. 964 * This is different from the legacy hzto() interface, 965 * and callers need to check for it. 966 */ 967 ticks = 0; 968 } else if (sec <= (LONG_MAX / 1000000)) 969 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1)) 970 / tick) + 1; 971 else if (sec <= (LONG_MAX / hz)) 972 ticks = (sec * hz) + 973 (((unsigned long)usec + (tick - 1)) / tick) + 1; 974 else 975 ticks = LONG_MAX; 976 977 if (ticks > INT_MAX) 978 ticks = INT_MAX; 979 980 return ((int)ticks); 981 } 982 983 #ifndef __HAVE_TIMECOUNTER 984 /* 985 * Compute number of hz until specified time. Used to compute second 986 * argument to callout_reset() from an absolute time. 987 */ 988 int 989 hzto(struct timeval *tv) 990 { 991 unsigned long ticks; 992 long sec, usec; 993 int s; 994 995 /* 996 * If the number of usecs in the whole seconds part of the time 997 * difference fits in a long, then the total number of usecs will 998 * fit in an unsigned long. Compute the total and convert it to 999 * ticks, rounding up and adding 1 to allow for the current tick 1000 * to expire. Rounding also depends on unsigned long arithmetic 1001 * to avoid overflow. 1002 * 1003 * Otherwise, if the number of ticks in the whole seconds part of 1004 * the time difference fits in a long, then convert the parts to 1005 * ticks separately and add, using similar rounding methods and 1006 * overflow avoidance. This method would work in the previous 1007 * case, but it is slightly slower and assume that hz is integral. 1008 * 1009 * Otherwise, round the time difference down to the maximum 1010 * representable value. 1011 * 1012 * If ints are 32-bit, then the maximum value for any timeout in 1013 * 10ms ticks is 248 days. 1014 */ 1015 s = splclock(); 1016 sec = tv->tv_sec - time.tv_sec; 1017 usec = tv->tv_usec - time.tv_usec; 1018 splx(s); 1019 1020 if (usec < 0) { 1021 sec--; 1022 usec += 1000000; 1023 } 1024 1025 if (sec < 0 || (sec == 0 && usec <= 0)) { 1026 /* 1027 * Would expire now or in the past. Return 0 ticks. 1028 * This is different from the legacy hzto() interface, 1029 * and callers need to check for it. 1030 */ 1031 ticks = 0; 1032 } else if (sec <= (LONG_MAX / 1000000)) 1033 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1)) 1034 / tick) + 1; 1035 else if (sec <= (LONG_MAX / hz)) 1036 ticks = (sec * hz) + 1037 (((unsigned long)usec + (tick - 1)) / tick) + 1; 1038 else 1039 ticks = LONG_MAX; 1040 1041 if (ticks > INT_MAX) 1042 ticks = INT_MAX; 1043 1044 return ((int)ticks); 1045 } 1046 #endif /* !__HAVE_TIMECOUNTER */ 1047 1048 /* 1049 * Compute number of ticks in the specified amount of time. 1050 */ 1051 int 1052 tstohz(struct timespec *ts) 1053 { 1054 struct timeval tv; 1055 1056 /* 1057 * usec has great enough resolution for hz, so convert to a 1058 * timeval and use tvtohz() above. 1059 */ 1060 TIMESPEC_TO_TIMEVAL(&tv, ts); 1061 return tvtohz(&tv); 1062 } 1063 1064 /* 1065 * Start profiling on a process. 1066 * 1067 * Kernel profiling passes proc0 which never exits and hence 1068 * keeps the profile clock running constantly. 1069 */ 1070 void 1071 startprofclock(struct proc *p) 1072 { 1073 1074 if ((p->p_flag & P_PROFIL) == 0) { 1075 p->p_flag |= P_PROFIL; 1076 /* 1077 * This is only necessary if using the clock as the 1078 * profiling source. 1079 */ 1080 if (++profprocs == 1 && stathz != 0) 1081 psdiv = psratio; 1082 } 1083 } 1084 1085 /* 1086 * Stop profiling on a process. 1087 */ 1088 void 1089 stopprofclock(struct proc *p) 1090 { 1091 1092 if (p->p_flag & P_PROFIL) { 1093 p->p_flag &= ~P_PROFIL; 1094 /* 1095 * This is only necessary if using the clock as the 1096 * profiling source. 1097 */ 1098 if (--profprocs == 0 && stathz != 0) 1099 psdiv = 1; 1100 } 1101 } 1102 1103 #if defined(PERFCTRS) 1104 /* 1105 * Independent profiling "tick" in case we're using a separate 1106 * clock or profiling event source. Currently, that's just 1107 * performance counters--hence the wrapper. 1108 */ 1109 void 1110 proftick(struct clockframe *frame) 1111 { 1112 #ifdef GPROF 1113 struct gmonparam *g; 1114 intptr_t i; 1115 #endif 1116 struct proc *p; 1117 1118 p = curproc; 1119 if (CLKF_USERMODE(frame)) { 1120 if (p->p_flag & P_PROFIL) 1121 addupc_intr(p, CLKF_PC(frame)); 1122 } else { 1123 #ifdef GPROF 1124 g = &_gmonparam; 1125 if (g->state == GMON_PROF_ON) { 1126 i = CLKF_PC(frame) - g->lowpc; 1127 if (i < g->textsize) { 1128 i /= HISTFRACTION * sizeof(*g->kcount); 1129 g->kcount[i]++; 1130 } 1131 } 1132 #endif 1133 #ifdef PROC_PC 1134 if (p && (p->p_flag & P_PROFIL)) 1135 addupc_intr(p, PROC_PC(p)); 1136 #endif 1137 } 1138 } 1139 #endif 1140 1141 /* 1142 * Statistics clock. Grab profile sample, and if divider reaches 0, 1143 * do process and kernel statistics. 1144 */ 1145 void 1146 statclock(struct clockframe *frame) 1147 { 1148 #ifdef GPROF 1149 struct gmonparam *g; 1150 intptr_t i; 1151 #endif 1152 struct cpu_info *ci = curcpu(); 1153 struct schedstate_percpu *spc = &ci->ci_schedstate; 1154 struct proc *p; 1155 struct lwp *l; 1156 1157 /* 1158 * Notice changes in divisor frequency, and adjust clock 1159 * frequency accordingly. 1160 */ 1161 if (spc->spc_psdiv != psdiv) { 1162 spc->spc_psdiv = psdiv; 1163 spc->spc_pscnt = psdiv; 1164 if (psdiv == 1) { 1165 setstatclockrate(stathz); 1166 } else { 1167 setstatclockrate(profhz); 1168 } 1169 } 1170 l = curlwp; 1171 p = (l ? l->l_proc : NULL); 1172 if (CLKF_USERMODE(frame)) { 1173 KASSERT(p != NULL); 1174 1175 if ((p->p_flag & P_PROFIL) && profsrc == PROFSRC_CLOCK) 1176 addupc_intr(p, CLKF_PC(frame)); 1177 if (--spc->spc_pscnt > 0) 1178 return; 1179 /* 1180 * Came from user mode; CPU was in user state. 1181 * If this process is being profiled record the tick. 1182 */ 1183 p->p_uticks++; 1184 if (p->p_nice > NZERO) 1185 spc->spc_cp_time[CP_NICE]++; 1186 else 1187 spc->spc_cp_time[CP_USER]++; 1188 } else { 1189 #ifdef GPROF 1190 /* 1191 * Kernel statistics are just like addupc_intr, only easier. 1192 */ 1193 g = &_gmonparam; 1194 if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) { 1195 i = CLKF_PC(frame) - g->lowpc; 1196 if (i < g->textsize) { 1197 i /= HISTFRACTION * sizeof(*g->kcount); 1198 g->kcount[i]++; 1199 } 1200 } 1201 #endif 1202 #ifdef LWP_PC 1203 if (p && profsrc == PROFSRC_CLOCK && (p->p_flag & P_PROFIL)) 1204 addupc_intr(p, LWP_PC(l)); 1205 #endif 1206 if (--spc->spc_pscnt > 0) 1207 return; 1208 /* 1209 * Came from kernel mode, so we were: 1210 * - handling an interrupt, 1211 * - doing syscall or trap work on behalf of the current 1212 * user process, or 1213 * - spinning in the idle loop. 1214 * Whichever it is, charge the time as appropriate. 1215 * Note that we charge interrupts to the current process, 1216 * regardless of whether they are ``for'' that process, 1217 * so that we know how much of its real time was spent 1218 * in ``non-process'' (i.e., interrupt) work. 1219 */ 1220 if (CLKF_INTR(frame)) { 1221 if (p != NULL) 1222 p->p_iticks++; 1223 spc->spc_cp_time[CP_INTR]++; 1224 } else if (p != NULL) { 1225 p->p_sticks++; 1226 spc->spc_cp_time[CP_SYS]++; 1227 } else 1228 spc->spc_cp_time[CP_IDLE]++; 1229 } 1230 spc->spc_pscnt = psdiv; 1231 1232 if (p != NULL) { 1233 ++p->p_cpticks; 1234 /* 1235 * If no separate schedclock is provided, call it here 1236 * at about 16 Hz. 1237 */ 1238 if (schedhz == 0) 1239 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) { 1240 schedclock(l); 1241 ci->ci_schedstate.spc_schedticks = statscheddiv; 1242 } 1243 } 1244 } 1245 1246 #ifndef __HAVE_TIMECOUNTER 1247 #ifdef NTP /* NTP phase-locked loop in kernel */ 1248 /* 1249 * hardupdate() - local clock update 1250 * 1251 * This routine is called by ntp_adjtime() to update the local clock 1252 * phase and frequency. The implementation is of an adaptive-parameter, 1253 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 1254 * time and frequency offset estimates for each call. If the kernel PPS 1255 * discipline code is configured (PPS_SYNC), the PPS signal itself 1256 * determines the new time offset, instead of the calling argument. 1257 * Presumably, calls to ntp_adjtime() occur only when the caller 1258 * believes the local clock is valid within some bound (+-128 ms with 1259 * NTP). If the caller's time is far different than the PPS time, an 1260 * argument will ensue, and it's not clear who will lose. 1261 * 1262 * For uncompensated quartz crystal oscillatores and nominal update 1263 * intervals less than 1024 s, operation should be in phase-lock mode 1264 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1265 * intervals greater than thiss, operation should be in frequency-lock 1266 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1267 * 1268 * Note: splclock() is in effect. 1269 */ 1270 void 1271 hardupdate(long offset) 1272 { 1273 long ltemp, mtemp; 1274 1275 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1276 return; 1277 ltemp = offset; 1278 #ifdef PPS_SYNC 1279 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 1280 ltemp = pps_offset; 1281 #endif /* PPS_SYNC */ 1282 1283 /* 1284 * Scale the phase adjustment and clamp to the operating range. 1285 */ 1286 if (ltemp > MAXPHASE) 1287 time_offset = MAXPHASE << SHIFT_UPDATE; 1288 else if (ltemp < -MAXPHASE) 1289 time_offset = -(MAXPHASE << SHIFT_UPDATE); 1290 else 1291 time_offset = ltemp << SHIFT_UPDATE; 1292 1293 /* 1294 * Select whether the frequency is to be controlled and in which 1295 * mode (PLL or FLL). Clamp to the operating range. Ugly 1296 * multiply/divide should be replaced someday. 1297 */ 1298 if (time_status & STA_FREQHOLD || time_reftime == 0) 1299 time_reftime = time.tv_sec; 1300 mtemp = time.tv_sec - time_reftime; 1301 time_reftime = time.tv_sec; 1302 if (time_status & STA_FLL) { 1303 if (mtemp >= MINSEC) { 1304 ltemp = ((time_offset / mtemp) << (SHIFT_USEC - 1305 SHIFT_UPDATE)); 1306 if (ltemp < 0) 1307 time_freq -= -ltemp >> SHIFT_KH; 1308 else 1309 time_freq += ltemp >> SHIFT_KH; 1310 } 1311 } else { 1312 if (mtemp < MAXSEC) { 1313 ltemp *= mtemp; 1314 if (ltemp < 0) 1315 time_freq -= -ltemp >> (time_constant + 1316 time_constant + SHIFT_KF - 1317 SHIFT_USEC); 1318 else 1319 time_freq += ltemp >> (time_constant + 1320 time_constant + SHIFT_KF - 1321 SHIFT_USEC); 1322 } 1323 } 1324 if (time_freq > time_tolerance) 1325 time_freq = time_tolerance; 1326 else if (time_freq < -time_tolerance) 1327 time_freq = -time_tolerance; 1328 } 1329 1330 #ifdef PPS_SYNC 1331 /* 1332 * hardpps() - discipline CPU clock oscillator to external PPS signal 1333 * 1334 * This routine is called at each PPS interrupt in order to discipline 1335 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1336 * and leaves it in a handy spot for the hardclock() routine. It 1337 * integrates successive PPS phase differences and calculates the 1338 * frequency offset. This is used in hardclock() to discipline the CPU 1339 * clock oscillator so that intrinsic frequency error is cancelled out. 1340 * The code requires the caller to capture the time and hardware counter 1341 * value at the on-time PPS signal transition. 1342 * 1343 * Note that, on some Unix systems, this routine runs at an interrupt 1344 * priority level higher than the timer interrupt routine hardclock(). 1345 * Therefore, the variables used are distinct from the hardclock() 1346 * variables, except for certain exceptions: The PPS frequency pps_freq 1347 * and phase pps_offset variables are determined by this routine and 1348 * updated atomically. The time_tolerance variable can be considered a 1349 * constant, since it is infrequently changed, and then only when the 1350 * PPS signal is disabled. The watchdog counter pps_valid is updated 1351 * once per second by hardclock() and is atomically cleared in this 1352 * routine. 1353 */ 1354 void 1355 hardpps(struct timeval *tvp, /* time at PPS */ 1356 long usec /* hardware counter at PPS */) 1357 { 1358 long u_usec, v_usec, bigtick; 1359 long cal_sec, cal_usec; 1360 1361 /* 1362 * An occasional glitch can be produced when the PPS interrupt 1363 * occurs in the hardclock() routine before the time variable is 1364 * updated. Here the offset is discarded when the difference 1365 * between it and the last one is greater than tick/2, but not 1366 * if the interval since the first discard exceeds 30 s. 1367 */ 1368 time_status |= STA_PPSSIGNAL; 1369 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1370 pps_valid = 0; 1371 u_usec = -tvp->tv_usec; 1372 if (u_usec < -500000) 1373 u_usec += 1000000; 1374 v_usec = pps_offset - u_usec; 1375 if (v_usec < 0) 1376 v_usec = -v_usec; 1377 if (v_usec > (tick >> 1)) { 1378 if (pps_glitch > MAXGLITCH) { 1379 pps_glitch = 0; 1380 pps_tf[2] = u_usec; 1381 pps_tf[1] = u_usec; 1382 } else { 1383 pps_glitch++; 1384 u_usec = pps_offset; 1385 } 1386 } else 1387 pps_glitch = 0; 1388 1389 /* 1390 * A three-stage median filter is used to help deglitch the pps 1391 * time. The median sample becomes the time offset estimate; the 1392 * difference between the other two samples becomes the time 1393 * dispersion (jitter) estimate. 1394 */ 1395 pps_tf[2] = pps_tf[1]; 1396 pps_tf[1] = pps_tf[0]; 1397 pps_tf[0] = u_usec; 1398 if (pps_tf[0] > pps_tf[1]) { 1399 if (pps_tf[1] > pps_tf[2]) { 1400 pps_offset = pps_tf[1]; /* 0 1 2 */ 1401 v_usec = pps_tf[0] - pps_tf[2]; 1402 } else if (pps_tf[2] > pps_tf[0]) { 1403 pps_offset = pps_tf[0]; /* 2 0 1 */ 1404 v_usec = pps_tf[2] - pps_tf[1]; 1405 } else { 1406 pps_offset = pps_tf[2]; /* 0 2 1 */ 1407 v_usec = pps_tf[0] - pps_tf[1]; 1408 } 1409 } else { 1410 if (pps_tf[1] < pps_tf[2]) { 1411 pps_offset = pps_tf[1]; /* 2 1 0 */ 1412 v_usec = pps_tf[2] - pps_tf[0]; 1413 } else if (pps_tf[2] < pps_tf[0]) { 1414 pps_offset = pps_tf[0]; /* 1 0 2 */ 1415 v_usec = pps_tf[1] - pps_tf[2]; 1416 } else { 1417 pps_offset = pps_tf[2]; /* 1 2 0 */ 1418 v_usec = pps_tf[1] - pps_tf[0]; 1419 } 1420 } 1421 if (v_usec > MAXTIME) 1422 pps_jitcnt++; 1423 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1424 if (v_usec < 0) 1425 pps_jitter -= -v_usec >> PPS_AVG; 1426 else 1427 pps_jitter += v_usec >> PPS_AVG; 1428 if (pps_jitter > (MAXTIME >> 1)) 1429 time_status |= STA_PPSJITTER; 1430 1431 /* 1432 * During the calibration interval adjust the starting time when 1433 * the tick overflows. At the end of the interval compute the 1434 * duration of the interval and the difference of the hardware 1435 * counters at the beginning and end of the interval. This code 1436 * is deliciously complicated by the fact valid differences may 1437 * exceed the value of tick when using long calibration 1438 * intervals and small ticks. Note that the counter can be 1439 * greater than tick if caught at just the wrong instant, but 1440 * the values returned and used here are correct. 1441 */ 1442 bigtick = (long)tick << SHIFT_USEC; 1443 pps_usec -= pps_freq; 1444 if (pps_usec >= bigtick) 1445 pps_usec -= bigtick; 1446 if (pps_usec < 0) 1447 pps_usec += bigtick; 1448 pps_time.tv_sec++; 1449 pps_count++; 1450 if (pps_count < (1 << pps_shift)) 1451 return; 1452 pps_count = 0; 1453 pps_calcnt++; 1454 u_usec = usec << SHIFT_USEC; 1455 v_usec = pps_usec - u_usec; 1456 if (v_usec >= bigtick >> 1) 1457 v_usec -= bigtick; 1458 if (v_usec < -(bigtick >> 1)) 1459 v_usec += bigtick; 1460 if (v_usec < 0) 1461 v_usec = -(-v_usec >> pps_shift); 1462 else 1463 v_usec = v_usec >> pps_shift; 1464 pps_usec = u_usec; 1465 cal_sec = tvp->tv_sec; 1466 cal_usec = tvp->tv_usec; 1467 cal_sec -= pps_time.tv_sec; 1468 cal_usec -= pps_time.tv_usec; 1469 if (cal_usec < 0) { 1470 cal_usec += 1000000; 1471 cal_sec--; 1472 } 1473 pps_time = *tvp; 1474 1475 /* 1476 * Check for lost interrupts, noise, excessive jitter and 1477 * excessive frequency error. The number of timer ticks during 1478 * the interval may vary +-1 tick. Add to this a margin of one 1479 * tick for the PPS signal jitter and maximum frequency 1480 * deviation. If the limits are exceeded, the calibration 1481 * interval is reset to the minimum and we start over. 1482 */ 1483 u_usec = (long)tick << 1; 1484 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1485 || (cal_sec == 0 && cal_usec < u_usec)) 1486 || v_usec > time_tolerance || v_usec < -time_tolerance) { 1487 pps_errcnt++; 1488 pps_shift = PPS_SHIFT; 1489 pps_intcnt = 0; 1490 time_status |= STA_PPSERROR; 1491 return; 1492 } 1493 1494 /* 1495 * A three-stage median filter is used to help deglitch the pps 1496 * frequency. The median sample becomes the frequency offset 1497 * estimate; the difference between the other two samples 1498 * becomes the frequency dispersion (stability) estimate. 1499 */ 1500 pps_ff[2] = pps_ff[1]; 1501 pps_ff[1] = pps_ff[0]; 1502 pps_ff[0] = v_usec; 1503 if (pps_ff[0] > pps_ff[1]) { 1504 if (pps_ff[1] > pps_ff[2]) { 1505 u_usec = pps_ff[1]; /* 0 1 2 */ 1506 v_usec = pps_ff[0] - pps_ff[2]; 1507 } else if (pps_ff[2] > pps_ff[0]) { 1508 u_usec = pps_ff[0]; /* 2 0 1 */ 1509 v_usec = pps_ff[2] - pps_ff[1]; 1510 } else { 1511 u_usec = pps_ff[2]; /* 0 2 1 */ 1512 v_usec = pps_ff[0] - pps_ff[1]; 1513 } 1514 } else { 1515 if (pps_ff[1] < pps_ff[2]) { 1516 u_usec = pps_ff[1]; /* 2 1 0 */ 1517 v_usec = pps_ff[2] - pps_ff[0]; 1518 } else if (pps_ff[2] < pps_ff[0]) { 1519 u_usec = pps_ff[0]; /* 1 0 2 */ 1520 v_usec = pps_ff[1] - pps_ff[2]; 1521 } else { 1522 u_usec = pps_ff[2]; /* 1 2 0 */ 1523 v_usec = pps_ff[1] - pps_ff[0]; 1524 } 1525 } 1526 1527 /* 1528 * Here the frequency dispersion (stability) is updated. If it 1529 * is less than one-fourth the maximum (MAXFREQ), the frequency 1530 * offset is updated as well, but clamped to the tolerance. It 1531 * will be processed later by the hardclock() routine. 1532 */ 1533 v_usec = (v_usec >> 1) - pps_stabil; 1534 if (v_usec < 0) 1535 pps_stabil -= -v_usec >> PPS_AVG; 1536 else 1537 pps_stabil += v_usec >> PPS_AVG; 1538 if (pps_stabil > MAXFREQ >> 2) { 1539 pps_stbcnt++; 1540 time_status |= STA_PPSWANDER; 1541 return; 1542 } 1543 if (time_status & STA_PPSFREQ) { 1544 if (u_usec < 0) { 1545 pps_freq -= -u_usec >> PPS_AVG; 1546 if (pps_freq < -time_tolerance) 1547 pps_freq = -time_tolerance; 1548 u_usec = -u_usec; 1549 } else { 1550 pps_freq += u_usec >> PPS_AVG; 1551 if (pps_freq > time_tolerance) 1552 pps_freq = time_tolerance; 1553 } 1554 } 1555 1556 /* 1557 * Here the calibration interval is adjusted. If the maximum 1558 * time difference is greater than tick / 4, reduce the interval 1559 * by half. If this is not the case for four consecutive 1560 * intervals, double the interval. 1561 */ 1562 if (u_usec << pps_shift > bigtick >> 2) { 1563 pps_intcnt = 0; 1564 if (pps_shift > PPS_SHIFT) 1565 pps_shift--; 1566 } else if (pps_intcnt >= 4) { 1567 pps_intcnt = 0; 1568 if (pps_shift < PPS_SHIFTMAX) 1569 pps_shift++; 1570 } else 1571 pps_intcnt++; 1572 } 1573 #endif /* PPS_SYNC */ 1574 #endif /* NTP */ 1575 1576 /* timecounter compat functions */ 1577 void 1578 nanotime(struct timespec *ts) 1579 { 1580 struct timeval tv; 1581 1582 microtime(&tv); 1583 TIMEVAL_TO_TIMESPEC(&tv, ts); 1584 } 1585 1586 void 1587 getbinuptime(struct bintime *bt) 1588 { 1589 struct timeval tv; 1590 1591 microtime(&tv); 1592 timeval2bintime(&tv, bt); 1593 } 1594 1595 void 1596 nanouptime(struct timespec *tsp) 1597 { 1598 int s; 1599 1600 s = splclock(); 1601 TIMEVAL_TO_TIMESPEC(&mono_time, tsp); 1602 splx(s); 1603 } 1604 1605 void 1606 getnanouptime(struct timespec *tsp) 1607 { 1608 int s; 1609 1610 s = splclock(); 1611 TIMEVAL_TO_TIMESPEC(&mono_time, tsp); 1612 splx(s); 1613 } 1614 1615 void 1616 getmicrouptime(struct timeval *tvp) 1617 { 1618 int s; 1619 1620 s = splclock(); 1621 *tvp = mono_time; 1622 splx(s); 1623 } 1624 1625 void 1626 getnanotime(struct timespec *tsp) 1627 { 1628 int s; 1629 1630 s = splclock(); 1631 TIMEVAL_TO_TIMESPEC(&time, tsp); 1632 splx(s); 1633 } 1634 1635 void 1636 getmicrotime(struct timeval *tvp) 1637 { 1638 int s; 1639 1640 s = splclock(); 1641 *tvp = time; 1642 splx(s); 1643 } 1644 #endif /* !__HAVE_TIMECOUNTER */ 1645