1 /* $NetBSD: kern_clock.c,v 1.42 1997/05/21 19:55:45 gwr Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/dkstat.h> 46 #include <sys/callout.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/resourcevar.h> 50 #include <sys/signalvar.h> 51 #include <vm/vm.h> 52 #include <sys/sysctl.h> 53 #include <sys/timex.h> 54 55 #include <machine/cpu.h> 56 57 #ifdef GPROF 58 #include <sys/gmon.h> 59 #endif 60 61 /* 62 * Clock handling routines. 63 * 64 * This code is written to operate with two timers that run independently of 65 * each other. The main clock, running hz times per second, is used to keep 66 * track of real time. The second timer handles kernel and user profiling, 67 * and does resource use estimation. If the second timer is programmable, 68 * it is randomized to avoid aliasing between the two clocks. For example, 69 * the randomization prevents an adversary from always giving up the cpu 70 * just before its quantum expires. Otherwise, it would never accumulate 71 * cpu ticks. The mean frequency of the second timer is stathz. 72 * 73 * If no second timer exists, stathz will be zero; in this case we drive 74 * profiling and statistics off the main clock. This WILL NOT be accurate; 75 * do not do it unless absolutely necessary. 76 * 77 * The statistics clock may (or may not) be run at a higher rate while 78 * profiling. This profile clock runs at profhz. We require that profhz 79 * be an integral multiple of stathz. 80 * 81 * If the statistics clock is running fast, it must be divided by the ratio 82 * profhz/stathz for statistics. (For profiling, every tick counts.) 83 */ 84 85 /* 86 * TODO: 87 * allocate more timeout table slots when table overflows. 88 */ 89 90 91 #ifdef NTP /* NTP phase-locked loop in kernel */ 92 /* 93 * Phase/frequency-lock loop (PLL/FLL) definitions 94 * 95 * The following variables are read and set by the ntp_adjtime() system 96 * call. 97 * 98 * time_state shows the state of the system clock, with values defined 99 * in the timex.h header file. 100 * 101 * time_status shows the status of the system clock, with bits defined 102 * in the timex.h header file. 103 * 104 * time_offset is used by the PLL/FLL to adjust the system time in small 105 * increments. 106 * 107 * time_constant determines the bandwidth or "stiffness" of the PLL. 108 * 109 * time_tolerance determines maximum frequency error or tolerance of the 110 * CPU clock oscillator and is a property of the architecture; however, 111 * in principle it could change as result of the presence of external 112 * discipline signals, for instance. 113 * 114 * time_precision is usually equal to the kernel tick variable; however, 115 * in cases where a precision clock counter or external clock is 116 * available, the resolution can be much less than this and depend on 117 * whether the external clock is working or not. 118 * 119 * time_maxerror is initialized by a ntp_adjtime() call and increased by 120 * the kernel once each second to reflect the maximum error bound 121 * growth. 122 * 123 * time_esterror is set and read by the ntp_adjtime() call, but 124 * otherwise not used by the kernel. 125 */ 126 int time_state = TIME_OK; /* clock state */ 127 int time_status = STA_UNSYNC; /* clock status bits */ 128 long time_offset = 0; /* time offset (us) */ 129 long time_constant = 0; /* pll time constant */ 130 long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 131 long time_precision = 1; /* clock precision (us) */ 132 long time_maxerror = MAXPHASE; /* maximum error (us) */ 133 long time_esterror = MAXPHASE; /* estimated error (us) */ 134 135 /* 136 * The following variables establish the state of the PLL/FLL and the 137 * residual time and frequency offset of the local clock. The scale 138 * factors are defined in the timex.h header file. 139 * 140 * time_phase and time_freq are the phase increment and the frequency 141 * increment, respectively, of the kernel time variable. 142 * 143 * time_freq is set via ntp_adjtime() from a value stored in a file when 144 * the synchronization daemon is first started. Its value is retrieved 145 * via ntp_adjtime() and written to the file about once per hour by the 146 * daemon. 147 * 148 * time_adj is the adjustment added to the value of tick at each timer 149 * interrupt and is recomputed from time_phase and time_freq at each 150 * seconds rollover. 151 * 152 * time_reftime is the second's portion of the system time at the last 153 * call to ntp_adjtime(). It is used to adjust the time_freq variable 154 * and to increase the time_maxerror as the time since last update 155 * increases. 156 */ 157 long time_phase = 0; /* phase offset (scaled us) */ 158 long time_freq = 0; /* frequency offset (scaled ppm) */ 159 long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 160 long time_reftime = 0; /* time at last adjustment (s) */ 161 162 #ifdef PPS_SYNC 163 /* 164 * The following variables are used only if the kernel PPS discipline 165 * code is configured (PPS_SYNC). The scale factors are defined in the 166 * timex.h header file. 167 * 168 * pps_time contains the time at each calibration interval, as read by 169 * microtime(). pps_count counts the seconds of the calibration 170 * interval, the duration of which is nominally pps_shift in powers of 171 * two. 172 * 173 * pps_offset is the time offset produced by the time median filter 174 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 175 * this filter. 176 * 177 * pps_freq is the frequency offset produced by the frequency median 178 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 179 * by this filter. 180 * 181 * pps_usec is latched from a high resolution counter or external clock 182 * at pps_time. Here we want the hardware counter contents only, not the 183 * contents plus the time_tv.usec as usual. 184 * 185 * pps_valid counts the number of seconds since the last PPS update. It 186 * is used as a watchdog timer to disable the PPS discipline should the 187 * PPS signal be lost. 188 * 189 * pps_glitch counts the number of seconds since the beginning of an 190 * offset burst more than tick/2 from current nominal offset. It is used 191 * mainly to suppress error bursts due to priority conflicts between the 192 * PPS interrupt and timer interrupt. 193 * 194 * pps_intcnt counts the calibration intervals for use in the interval- 195 * adaptation algorithm. It's just too complicated for words. 196 */ 197 struct timeval pps_time; /* kernel time at last interval */ 198 long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 199 long pps_offset = 0; /* pps time offset (us) */ 200 long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 201 long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 202 long pps_freq = 0; /* frequency offset (scaled ppm) */ 203 long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 204 long pps_usec = 0; /* microsec counter at last interval */ 205 long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 206 int pps_glitch = 0; /* pps signal glitch counter */ 207 int pps_count = 0; /* calibration interval counter (s) */ 208 int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 209 int pps_intcnt = 0; /* intervals at current duration */ 210 211 /* 212 * PPS signal quality monitors 213 * 214 * pps_jitcnt counts the seconds that have been discarded because the 215 * jitter measured by the time median filter exceeds the limit MAXTIME 216 * (100 us). 217 * 218 * pps_calcnt counts the frequency calibration intervals, which are 219 * variable from 4 s to 256 s. 220 * 221 * pps_errcnt counts the calibration intervals which have been discarded 222 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 223 * calibration interval jitter exceeds two ticks. 224 * 225 * pps_stbcnt counts the calibration intervals that have been discarded 226 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 227 */ 228 long pps_jitcnt = 0; /* jitter limit exceeded */ 229 long pps_calcnt = 0; /* calibration intervals */ 230 long pps_errcnt = 0; /* calibration errors */ 231 long pps_stbcnt = 0; /* stability limit exceeded */ 232 #endif /* PPS_SYNC */ 233 234 #ifdef EXT_CLOCK 235 /* 236 * External clock definitions 237 * 238 * The following definitions and declarations are used only if an 239 * external clock is configured on the system. 240 */ 241 #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 242 243 /* 244 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 245 * interrupt and decremented once each second. 246 */ 247 int clock_count = 0; /* CPU clock counter */ 248 249 #ifdef HIGHBALL 250 /* 251 * The clock_offset and clock_cpu variables are used by the HIGHBALL 252 * interface. The clock_offset variable defines the offset between 253 * system time and the HIGBALL counters. The clock_cpu variable contains 254 * the offset between the system clock and the HIGHBALL clock for use in 255 * disciplining the kernel time variable. 256 */ 257 extern struct timeval clock_offset; /* Highball clock offset */ 258 long clock_cpu = 0; /* CPU clock adjust */ 259 #endif /* HIGHBALL */ 260 #endif /* EXT_CLOCK */ 261 #endif /* NTP */ 262 263 264 /* 265 * Bump a timeval by a small number of usec's. 266 */ 267 #define BUMPTIME(t, usec) { \ 268 register volatile struct timeval *tp = (t); \ 269 register long us; \ 270 \ 271 tp->tv_usec = us = tp->tv_usec + (usec); \ 272 if (us >= 1000000) { \ 273 tp->tv_usec = us - 1000000; \ 274 tp->tv_sec++; \ 275 } \ 276 } 277 278 int stathz; 279 int profhz; 280 int profprocs; 281 int ticks; 282 static int psdiv, pscnt; /* prof => stat divider */ 283 int psratio; /* ratio: prof / stat */ 284 int tickfix, tickfixinterval; /* used if tick not really integral */ 285 #ifndef NTP 286 static int tickfixcnt; /* accumulated fractional error */ 287 #else 288 int fixtick; /* used by NTP for same */ 289 int shifthz; 290 #endif 291 292 volatile struct timeval time; 293 volatile struct timeval mono_time; 294 295 /* 296 * Initialize clock frequencies and start both clocks running. 297 */ 298 void 299 initclocks() 300 { 301 register int i; 302 303 /* 304 * Set divisors to 1 (normal case) and let the machine-specific 305 * code do its bit. 306 */ 307 psdiv = pscnt = 1; 308 cpu_initclocks(); 309 310 /* 311 * Compute profhz/stathz, and fix profhz if needed. 312 */ 313 i = stathz ? stathz : hz; 314 if (profhz == 0) 315 profhz = i; 316 psratio = profhz / i; 317 318 #ifdef NTP 319 switch (hz) { 320 case 60: 321 case 64: 322 shifthz = SHIFT_SCALE - 6; 323 break; 324 case 96: 325 case 100: 326 case 128: 327 shifthz = SHIFT_SCALE - 7; 328 break; 329 case 256: 330 shifthz = SHIFT_SCALE - 8; 331 break; 332 case 512: 333 shifthz = SHIFT_SCALE - 9; 334 break; 335 case 1024: 336 shifthz = SHIFT_SCALE - 10; 337 break; 338 default: 339 panic("weird hz"); 340 } 341 #endif 342 } 343 344 /* 345 * The real-time timer, interrupting hz times per second. 346 */ 347 void 348 hardclock(frame) 349 register struct clockframe *frame; 350 { 351 register struct callout *p1; 352 register struct proc *p; 353 register int delta, needsoft; 354 extern int tickdelta; 355 extern long timedelta; 356 #ifdef NTP 357 register int time_update; 358 register int ltemp; 359 #endif 360 361 /* 362 * Update real-time timeout queue. 363 * At front of queue are some number of events which are ``due''. 364 * The time to these is <= 0 and if negative represents the 365 * number of ticks which have passed since it was supposed to happen. 366 * The rest of the q elements (times > 0) are events yet to happen, 367 * where the time for each is given as a delta from the previous. 368 * Decrementing just the first of these serves to decrement the time 369 * to all events. 370 */ 371 needsoft = 0; 372 for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) { 373 if (--p1->c_time > 0) 374 break; 375 needsoft = 1; 376 if (p1->c_time == 0) 377 break; 378 } 379 380 p = curproc; 381 if (p) { 382 register struct pstats *pstats; 383 384 /* 385 * Run current process's virtual and profile time, as needed. 386 */ 387 pstats = p->p_stats; 388 if (CLKF_USERMODE(frame) && 389 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 390 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 391 psignal(p, SIGVTALRM); 392 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && 393 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 394 psignal(p, SIGPROF); 395 } 396 397 /* 398 * If no separate statistics clock is available, run it from here. 399 */ 400 if (stathz == 0) 401 statclock(frame); 402 403 /* 404 * Increment the time-of-day. The increment is normally just 405 * ``tick''. If the machine is one which has a clock frequency 406 * such that ``hz'' would not divide the second evenly into 407 * milliseconds, a periodic adjustment must be applied. Finally, 408 * if we are still adjusting the time (see adjtime()), 409 * ``tickdelta'' may also be added in. 410 */ 411 ticks++; 412 delta = tick; 413 414 #ifndef NTP 415 if (tickfix) { 416 tickfixcnt += tickfix; 417 if (tickfixcnt >= tickfixinterval) { 418 delta++; 419 tickfixcnt -= tickfixinterval; 420 } 421 } 422 #endif /* !NTP */ 423 /* Imprecise 4bsd adjtime() handling */ 424 if (timedelta != 0) { 425 delta += tickdelta; 426 timedelta -= tickdelta; 427 } 428 429 #ifdef notyet 430 microset(); 431 #endif 432 433 #ifndef NTP 434 BUMPTIME(&time, delta); /* XXX Now done using NTP code below */ 435 #endif 436 BUMPTIME(&mono_time, delta); 437 438 #ifdef NTP 439 time_update = delta; 440 441 /* 442 * Compute the phase adjustment. If the low-order bits 443 * (time_phase) of the update overflow, bump the high-order bits 444 * (time_update). 445 */ 446 time_phase += time_adj; 447 if (time_phase <= -FINEUSEC) { 448 ltemp = -time_phase >> SHIFT_SCALE; 449 time_phase += ltemp << SHIFT_SCALE; 450 time_update -= ltemp; 451 } else if (time_phase >= FINEUSEC) { 452 ltemp = time_phase >> SHIFT_SCALE; 453 time_phase -= ltemp << SHIFT_SCALE; 454 time_update += ltemp; 455 } 456 457 #ifdef HIGHBALL 458 /* 459 * If the HIGHBALL board is installed, we need to adjust the 460 * external clock offset in order to close the hardware feedback 461 * loop. This will adjust the external clock phase and frequency 462 * in small amounts. The additional phase noise and frequency 463 * wander this causes should be minimal. We also need to 464 * discipline the kernel time variable, since the PLL is used to 465 * discipline the external clock. If the Highball board is not 466 * present, we discipline kernel time with the PLL as usual. We 467 * assume that the external clock phase adjustment (time_update) 468 * and kernel phase adjustment (clock_cpu) are less than the 469 * value of tick. 470 */ 471 clock_offset.tv_usec += time_update; 472 if (clock_offset.tv_usec >= 1000000) { 473 clock_offset.tv_sec++; 474 clock_offset.tv_usec -= 1000000; 475 } 476 if (clock_offset.tv_usec < 0) { 477 clock_offset.tv_sec--; 478 clock_offset.tv_usec += 1000000; 479 } 480 time.tv_usec += clock_cpu; 481 clock_cpu = 0; 482 #else 483 time.tv_usec += time_update; 484 #endif /* HIGHBALL */ 485 486 /* 487 * On rollover of the second the phase adjustment to be used for 488 * the next second is calculated. Also, the maximum error is 489 * increased by the tolerance. If the PPS frequency discipline 490 * code is present, the phase is increased to compensate for the 491 * CPU clock oscillator frequency error. 492 * 493 * On a 32-bit machine and given parameters in the timex.h 494 * header file, the maximum phase adjustment is +-512 ms and 495 * maximum frequency offset is a tad less than) +-512 ppm. On a 496 * 64-bit machine, you shouldn't need to ask. 497 */ 498 if (time.tv_usec >= 1000000) { 499 time.tv_usec -= 1000000; 500 time.tv_sec++; 501 time_maxerror += time_tolerance >> SHIFT_USEC; 502 503 /* 504 * Leap second processing. If in leap-insert state at 505 * the end of the day, the system clock is set back one 506 * second; if in leap-delete state, the system clock is 507 * set ahead one second. The microtime() routine or 508 * external clock driver will insure that reported time 509 * is always monotonic. The ugly divides should be 510 * replaced. 511 */ 512 switch (time_state) { 513 case TIME_OK: 514 if (time_status & STA_INS) 515 time_state = TIME_INS; 516 else if (time_status & STA_DEL) 517 time_state = TIME_DEL; 518 break; 519 520 case TIME_INS: 521 if (time.tv_sec % 86400 == 0) { 522 time.tv_sec--; 523 time_state = TIME_OOP; 524 } 525 break; 526 527 case TIME_DEL: 528 if ((time.tv_sec + 1) % 86400 == 0) { 529 time.tv_sec++; 530 time_state = TIME_WAIT; 531 } 532 break; 533 534 case TIME_OOP: 535 time_state = TIME_WAIT; 536 break; 537 538 case TIME_WAIT: 539 if (!(time_status & (STA_INS | STA_DEL))) 540 time_state = TIME_OK; 541 break; 542 } 543 544 /* 545 * Compute the phase adjustment for the next second. In 546 * PLL mode, the offset is reduced by a fixed factor 547 * times the time constant. In FLL mode the offset is 548 * used directly. In either mode, the maximum phase 549 * adjustment for each second is clamped so as to spread 550 * the adjustment over not more than the number of 551 * seconds between updates. 552 */ 553 if (time_offset < 0) { 554 ltemp = -time_offset; 555 if (!(time_status & STA_FLL)) 556 ltemp >>= SHIFT_KG + time_constant; 557 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 558 ltemp = (MAXPHASE / MINSEC) << 559 SHIFT_UPDATE; 560 time_offset += ltemp; 561 time_adj = -ltemp << (shifthz - SHIFT_UPDATE); 562 } else if (time_offset > 0) { 563 ltemp = time_offset; 564 if (!(time_status & STA_FLL)) 565 ltemp >>= SHIFT_KG + time_constant; 566 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 567 ltemp = (MAXPHASE / MINSEC) << 568 SHIFT_UPDATE; 569 time_offset -= ltemp; 570 time_adj = ltemp << (shifthz - SHIFT_UPDATE); 571 } else 572 time_adj = 0; 573 574 /* 575 * Compute the frequency estimate and additional phase 576 * adjustment due to frequency error for the next 577 * second. When the PPS signal is engaged, gnaw on the 578 * watchdog counter and update the frequency computed by 579 * the pll and the PPS signal. 580 */ 581 #ifdef PPS_SYNC 582 pps_valid++; 583 if (pps_valid == PPS_VALID) { 584 pps_jitter = MAXTIME; 585 pps_stabil = MAXFREQ; 586 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 587 STA_PPSWANDER | STA_PPSERROR); 588 } 589 ltemp = time_freq + pps_freq; 590 #else 591 ltemp = time_freq; 592 #endif /* PPS_SYNC */ 593 594 if (ltemp < 0) 595 time_adj -= -ltemp >> (SHIFT_USEC - shifthz); 596 else 597 time_adj += ltemp >> (SHIFT_USEC - shifthz); 598 time_adj += (long)fixtick << shifthz; 599 600 /* 601 * When the CPU clock oscillator frequency is not a 602 * power of 2 in Hz, shifthz is only an approximate 603 * scale factor. 604 */ 605 switch (hz) { 606 case 96: 607 case 100: 608 /* 609 * In the following code the overall gain is increased 610 * by a factor of 1.25, which results in a residual 611 * error less than 3 percent. 612 */ 613 if (time_adj < 0) 614 time_adj -= -time_adj >> 2; 615 else 616 time_adj += time_adj >> 2; 617 break; 618 case 60: 619 /* 620 * 60 Hz m68k and vaxes have a PLL gain factor of of 621 * 60/64 (15/16) of what it should be. In the following code 622 * the overall gain is increased by a factor of 1.0625, 623 * (17/16) which results in a residual error of just less 624 * than 0.4 percent. 625 */ 626 if (time_adj < 0) 627 time_adj -= -time_adj >> 4; 628 else 629 time_adj += time_adj >> 4; 630 break; 631 } 632 633 #ifdef EXT_CLOCK 634 /* 635 * If an external clock is present, it is necessary to 636 * discipline the kernel time variable anyway, since not 637 * all system components use the microtime() interface. 638 * Here, the time offset between the external clock and 639 * kernel time variable is computed every so often. 640 */ 641 clock_count++; 642 if (clock_count > CLOCK_INTERVAL) { 643 clock_count = 0; 644 microtime(&clock_ext); 645 delta.tv_sec = clock_ext.tv_sec - time.tv_sec; 646 delta.tv_usec = clock_ext.tv_usec - 647 time.tv_usec; 648 if (delta.tv_usec < 0) 649 delta.tv_sec--; 650 if (delta.tv_usec >= 500000) { 651 delta.tv_usec -= 1000000; 652 delta.tv_sec++; 653 } 654 if (delta.tv_usec < -500000) { 655 delta.tv_usec += 1000000; 656 delta.tv_sec--; 657 } 658 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 659 delta.tv_usec > MAXPHASE) || 660 delta.tv_sec < -1 || (delta.tv_sec == -1 && 661 delta.tv_usec < -MAXPHASE)) { 662 time = clock_ext; 663 delta.tv_sec = 0; 664 delta.tv_usec = 0; 665 } 666 #ifdef HIGHBALL 667 clock_cpu = delta.tv_usec; 668 #else /* HIGHBALL */ 669 hardupdate(delta.tv_usec); 670 #endif /* HIGHBALL */ 671 } 672 #endif /* EXT_CLOCK */ 673 } 674 675 #endif /* NTP */ 676 677 /* 678 * Process callouts at a very low cpu priority, so we don't keep the 679 * relatively high clock interrupt priority any longer than necessary. 680 */ 681 if (needsoft) { 682 if (CLKF_BASEPRI(frame)) { 683 /* 684 * Save the overhead of a software interrupt; 685 * it will happen as soon as we return, so do it now. 686 */ 687 (void)splsoftclock(); 688 softclock(); 689 } else 690 setsoftclock(); 691 } 692 } 693 694 /* 695 * Software (low priority) clock interrupt. 696 * Run periodic events from timeout queue. 697 */ 698 /*ARGSUSED*/ 699 void 700 softclock() 701 { 702 register struct callout *c; 703 register void *arg; 704 register void (*func) __P((void *)); 705 register int s; 706 707 s = splhigh(); 708 while ((c = calltodo.c_next) != NULL && c->c_time <= 0) { 709 func = c->c_func; 710 arg = c->c_arg; 711 calltodo.c_next = c->c_next; 712 c->c_next = callfree; 713 callfree = c; 714 splx(s); 715 (*func)(arg); 716 (void) splhigh(); 717 } 718 splx(s); 719 } 720 721 /* 722 * timeout -- 723 * Execute a function after a specified length of time. 724 * 725 * untimeout -- 726 * Cancel previous timeout function call. 727 * 728 * See AT&T BCI Driver Reference Manual for specification. This 729 * implementation differs from that one in that no identification 730 * value is returned from timeout, rather, the original arguments 731 * to timeout are used to identify entries for untimeout. 732 */ 733 void 734 timeout(ftn, arg, ticks) 735 void (*ftn) __P((void *)); 736 void *arg; 737 register int ticks; 738 { 739 register struct callout *new, *p, *t; 740 register int s; 741 742 if (ticks <= 0) 743 ticks = 1; 744 745 /* Lock out the clock. */ 746 s = splhigh(); 747 748 /* Fill in the next free callout structure. */ 749 if (callfree == NULL) 750 panic("timeout table full"); 751 new = callfree; 752 callfree = new->c_next; 753 new->c_arg = arg; 754 new->c_func = ftn; 755 756 /* 757 * The time for each event is stored as a difference from the time 758 * of the previous event on the queue. Walk the queue, correcting 759 * the ticks argument for queue entries passed. Correct the ticks 760 * value for the queue entry immediately after the insertion point 761 * as well. Watch out for negative c_time values; these represent 762 * overdue events. 763 */ 764 for (p = &calltodo; 765 (t = p->c_next) != NULL && ticks > t->c_time; p = t) 766 if (t->c_time > 0) 767 ticks -= t->c_time; 768 new->c_time = ticks; 769 if (t != NULL) 770 t->c_time -= ticks; 771 772 /* Insert the new entry into the queue. */ 773 p->c_next = new; 774 new->c_next = t; 775 splx(s); 776 } 777 778 void 779 untimeout(ftn, arg) 780 void (*ftn) __P((void *)); 781 void *arg; 782 { 783 register struct callout *p, *t; 784 register int s; 785 786 s = splhigh(); 787 for (p = &calltodo; (t = p->c_next) != NULL; p = t) 788 if (t->c_func == ftn && t->c_arg == arg) { 789 /* Increment next entry's tick count. */ 790 if (t->c_next && t->c_time > 0) 791 t->c_next->c_time += t->c_time; 792 793 /* Move entry from callout queue to callfree queue. */ 794 p->c_next = t->c_next; 795 t->c_next = callfree; 796 callfree = t; 797 break; 798 } 799 splx(s); 800 } 801 802 /* 803 * Compute number of hz until specified time. Used to 804 * compute third argument to timeout() from an absolute time. 805 */ 806 int 807 hzto(tv) 808 struct timeval *tv; 809 { 810 register long ticks, sec; 811 int s; 812 813 /* 814 * If number of microseconds will fit in 32 bit arithmetic, 815 * then compute number of microseconds to time and scale to 816 * ticks. Otherwise just compute number of hz in time, rounding 817 * times greater than representible to maximum value. (We must 818 * compute in microseconds, because hz can be greater than 1000, 819 * and thus tick can be less than one millisecond). 820 * 821 * Delta times less than 14 hours can be computed ``exactly''. 822 * (Note that if hz would yeild a non-integral number of us per 823 * tick, i.e. tickfix is nonzero, timouts can be a tick longer 824 * than they should be.) Maximum value for any timeout in 10ms 825 * ticks is 250 days. 826 */ 827 s = splclock(); 828 sec = tv->tv_sec - time.tv_sec; 829 if (sec <= 0x7fffffff / 1000000 - 1) 830 ticks = ((tv->tv_sec - time.tv_sec) * 1000000 + 831 (tv->tv_usec - time.tv_usec)) / tick; 832 else if (sec <= 0x7fffffff / hz) 833 ticks = sec * hz; 834 else 835 ticks = 0x7fffffff; 836 splx(s); 837 return (ticks); 838 } 839 840 /* 841 * Start profiling on a process. 842 * 843 * Kernel profiling passes proc0 which never exits and hence 844 * keeps the profile clock running constantly. 845 */ 846 void 847 startprofclock(p) 848 register struct proc *p; 849 { 850 int s; 851 852 if ((p->p_flag & P_PROFIL) == 0) { 853 p->p_flag |= P_PROFIL; 854 if (++profprocs == 1 && stathz != 0) { 855 s = splstatclock(); 856 psdiv = pscnt = psratio; 857 setstatclockrate(profhz); 858 splx(s); 859 } 860 } 861 } 862 863 /* 864 * Stop profiling on a process. 865 */ 866 void 867 stopprofclock(p) 868 register struct proc *p; 869 { 870 int s; 871 872 if (p->p_flag & P_PROFIL) { 873 p->p_flag &= ~P_PROFIL; 874 if (--profprocs == 0 && stathz != 0) { 875 s = splstatclock(); 876 psdiv = pscnt = 1; 877 setstatclockrate(stathz); 878 splx(s); 879 } 880 } 881 } 882 883 /* 884 * Statistics clock. Grab profile sample, and if divider reaches 0, 885 * do process and kernel statistics. 886 */ 887 void 888 statclock(frame) 889 register struct clockframe *frame; 890 { 891 #ifdef GPROF 892 register struct gmonparam *g; 893 register int i; 894 #endif 895 register struct proc *p; 896 897 if (CLKF_USERMODE(frame)) { 898 p = curproc; 899 if (p->p_flag & P_PROFIL) 900 addupc_intr(p, CLKF_PC(frame), 1); 901 if (--pscnt > 0) 902 return; 903 /* 904 * Came from user mode; CPU was in user state. 905 * If this process is being profiled record the tick. 906 */ 907 p->p_uticks++; 908 if (p->p_nice > NZERO) 909 cp_time[CP_NICE]++; 910 else 911 cp_time[CP_USER]++; 912 } else { 913 #ifdef GPROF 914 /* 915 * Kernel statistics are just like addupc_intr, only easier. 916 */ 917 g = &_gmonparam; 918 if (g->state == GMON_PROF_ON) { 919 i = CLKF_PC(frame) - g->lowpc; 920 if (i < g->textsize) { 921 i /= HISTFRACTION * sizeof(*g->kcount); 922 g->kcount[i]++; 923 } 924 } 925 #endif 926 if (--pscnt > 0) 927 return; 928 /* 929 * Came from kernel mode, so we were: 930 * - handling an interrupt, 931 * - doing syscall or trap work on behalf of the current 932 * user process, or 933 * - spinning in the idle loop. 934 * Whichever it is, charge the time as appropriate. 935 * Note that we charge interrupts to the current process, 936 * regardless of whether they are ``for'' that process, 937 * so that we know how much of its real time was spent 938 * in ``non-process'' (i.e., interrupt) work. 939 */ 940 p = curproc; 941 if (CLKF_INTR(frame)) { 942 if (p != NULL) 943 p->p_iticks++; 944 cp_time[CP_INTR]++; 945 } else if (p != NULL) { 946 p->p_sticks++; 947 cp_time[CP_SYS]++; 948 } else 949 cp_time[CP_IDLE]++; 950 } 951 pscnt = psdiv; 952 953 /* 954 * We adjust the priority of the current process. The priority of 955 * a process gets worse as it accumulates CPU time. The cpu usage 956 * estimator (p_estcpu) is increased here. The formula for computing 957 * priorities (in kern_synch.c) will compute a different value each 958 * time p_estcpu increases by 4. The cpu usage estimator ramps up 959 * quite quickly when the process is running (linearly), and decays 960 * away exponentially, at a rate which is proportionally slower when 961 * the system is busy. The basic principal is that the system will 962 * 90% forget that the process used a lot of CPU time in 5 * loadav 963 * seconds. This causes the system to favor processes which haven't 964 * run much recently, and to round-robin among other processes. 965 */ 966 if (p != NULL) { 967 p->p_cpticks++; 968 if (++p->p_estcpu == 0) 969 p->p_estcpu--; 970 if ((p->p_estcpu & 3) == 0) { 971 resetpriority(p); 972 if (p->p_priority >= PUSER) 973 p->p_priority = p->p_usrpri; 974 } 975 } 976 } 977 978 979 #ifdef NTP /* NTP phase-locked loop in kernel */ 980 981 /* 982 * hardupdate() - local clock update 983 * 984 * This routine is called by ntp_adjtime() to update the local clock 985 * phase and frequency. The implementation is of an adaptive-parameter, 986 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 987 * time and frequency offset estimates for each call. If the kernel PPS 988 * discipline code is configured (PPS_SYNC), the PPS signal itself 989 * determines the new time offset, instead of the calling argument. 990 * Presumably, calls to ntp_adjtime() occur only when the caller 991 * believes the local clock is valid within some bound (+-128 ms with 992 * NTP). If the caller's time is far different than the PPS time, an 993 * argument will ensue, and it's not clear who will lose. 994 * 995 * For uncompensated quartz crystal oscillatores and nominal update 996 * intervals less than 1024 s, operation should be in phase-lock mode 997 * (STA_FLL = 0), where the loop is disciplined to phase. For update 998 * intervals greater than thiss, operation should be in frequency-lock 999 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1000 * 1001 * Note: splclock() is in effect. 1002 */ 1003 void 1004 hardupdate(offset) 1005 long offset; 1006 { 1007 long ltemp, mtemp; 1008 1009 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1010 return; 1011 ltemp = offset; 1012 #ifdef PPS_SYNC 1013 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 1014 ltemp = pps_offset; 1015 #endif /* PPS_SYNC */ 1016 1017 /* 1018 * Scale the phase adjustment and clamp to the operating range. 1019 */ 1020 if (ltemp > MAXPHASE) 1021 time_offset = MAXPHASE << SHIFT_UPDATE; 1022 else if (ltemp < -MAXPHASE) 1023 time_offset = -(MAXPHASE << SHIFT_UPDATE); 1024 else 1025 time_offset = ltemp << SHIFT_UPDATE; 1026 1027 /* 1028 * Select whether the frequency is to be controlled and in which 1029 * mode (PLL or FLL). Clamp to the operating range. Ugly 1030 * multiply/divide should be replaced someday. 1031 */ 1032 if (time_status & STA_FREQHOLD || time_reftime == 0) 1033 time_reftime = time.tv_sec; 1034 mtemp = time.tv_sec - time_reftime; 1035 time_reftime = time.tv_sec; 1036 if (time_status & STA_FLL) { 1037 if (mtemp >= MINSEC) { 1038 ltemp = ((time_offset / mtemp) << (SHIFT_USEC - 1039 SHIFT_UPDATE)); 1040 if (ltemp < 0) 1041 time_freq -= -ltemp >> SHIFT_KH; 1042 else 1043 time_freq += ltemp >> SHIFT_KH; 1044 } 1045 } else { 1046 if (mtemp < MAXSEC) { 1047 ltemp *= mtemp; 1048 if (ltemp < 0) 1049 time_freq -= -ltemp >> (time_constant + 1050 time_constant + SHIFT_KF - 1051 SHIFT_USEC); 1052 else 1053 time_freq += ltemp >> (time_constant + 1054 time_constant + SHIFT_KF - 1055 SHIFT_USEC); 1056 } 1057 } 1058 if (time_freq > time_tolerance) 1059 time_freq = time_tolerance; 1060 else if (time_freq < -time_tolerance) 1061 time_freq = -time_tolerance; 1062 } 1063 1064 #ifdef PPS_SYNC 1065 /* 1066 * hardpps() - discipline CPU clock oscillator to external PPS signal 1067 * 1068 * This routine is called at each PPS interrupt in order to discipline 1069 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1070 * and leaves it in a handy spot for the hardclock() routine. It 1071 * integrates successive PPS phase differences and calculates the 1072 * frequency offset. This is used in hardclock() to discipline the CPU 1073 * clock oscillator so that intrinsic frequency error is cancelled out. 1074 * The code requires the caller to capture the time and hardware counter 1075 * value at the on-time PPS signal transition. 1076 * 1077 * Note that, on some Unix systems, this routine runs at an interrupt 1078 * priority level higher than the timer interrupt routine hardclock(). 1079 * Therefore, the variables used are distinct from the hardclock() 1080 * variables, except for certain exceptions: The PPS frequency pps_freq 1081 * and phase pps_offset variables are determined by this routine and 1082 * updated atomically. The time_tolerance variable can be considered a 1083 * constant, since it is infrequently changed, and then only when the 1084 * PPS signal is disabled. The watchdog counter pps_valid is updated 1085 * once per second by hardclock() and is atomically cleared in this 1086 * routine. 1087 */ 1088 void 1089 hardpps(tvp, usec) 1090 struct timeval *tvp; /* time at PPS */ 1091 long usec; /* hardware counter at PPS */ 1092 { 1093 long u_usec, v_usec, bigtick; 1094 long cal_sec, cal_usec; 1095 1096 /* 1097 * An occasional glitch can be produced when the PPS interrupt 1098 * occurs in the hardclock() routine before the time variable is 1099 * updated. Here the offset is discarded when the difference 1100 * between it and the last one is greater than tick/2, but not 1101 * if the interval since the first discard exceeds 30 s. 1102 */ 1103 time_status |= STA_PPSSIGNAL; 1104 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1105 pps_valid = 0; 1106 u_usec = -tvp->tv_usec; 1107 if (u_usec < -500000) 1108 u_usec += 1000000; 1109 v_usec = pps_offset - u_usec; 1110 if (v_usec < 0) 1111 v_usec = -v_usec; 1112 if (v_usec > (tick >> 1)) { 1113 if (pps_glitch > MAXGLITCH) { 1114 pps_glitch = 0; 1115 pps_tf[2] = u_usec; 1116 pps_tf[1] = u_usec; 1117 } else { 1118 pps_glitch++; 1119 u_usec = pps_offset; 1120 } 1121 } else 1122 pps_glitch = 0; 1123 1124 /* 1125 * A three-stage median filter is used to help deglitch the pps 1126 * time. The median sample becomes the time offset estimate; the 1127 * difference between the other two samples becomes the time 1128 * dispersion (jitter) estimate. 1129 */ 1130 pps_tf[2] = pps_tf[1]; 1131 pps_tf[1] = pps_tf[0]; 1132 pps_tf[0] = u_usec; 1133 if (pps_tf[0] > pps_tf[1]) { 1134 if (pps_tf[1] > pps_tf[2]) { 1135 pps_offset = pps_tf[1]; /* 0 1 2 */ 1136 v_usec = pps_tf[0] - pps_tf[2]; 1137 } else if (pps_tf[2] > pps_tf[0]) { 1138 pps_offset = pps_tf[0]; /* 2 0 1 */ 1139 v_usec = pps_tf[2] - pps_tf[1]; 1140 } else { 1141 pps_offset = pps_tf[2]; /* 0 2 1 */ 1142 v_usec = pps_tf[0] - pps_tf[1]; 1143 } 1144 } else { 1145 if (pps_tf[1] < pps_tf[2]) { 1146 pps_offset = pps_tf[1]; /* 2 1 0 */ 1147 v_usec = pps_tf[2] - pps_tf[0]; 1148 } else if (pps_tf[2] < pps_tf[0]) { 1149 pps_offset = pps_tf[0]; /* 1 0 2 */ 1150 v_usec = pps_tf[1] - pps_tf[2]; 1151 } else { 1152 pps_offset = pps_tf[2]; /* 1 2 0 */ 1153 v_usec = pps_tf[1] - pps_tf[0]; 1154 } 1155 } 1156 if (v_usec > MAXTIME) 1157 pps_jitcnt++; 1158 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1159 if (v_usec < 0) 1160 pps_jitter -= -v_usec >> PPS_AVG; 1161 else 1162 pps_jitter += v_usec >> PPS_AVG; 1163 if (pps_jitter > (MAXTIME >> 1)) 1164 time_status |= STA_PPSJITTER; 1165 1166 /* 1167 * During the calibration interval adjust the starting time when 1168 * the tick overflows. At the end of the interval compute the 1169 * duration of the interval and the difference of the hardware 1170 * counters at the beginning and end of the interval. This code 1171 * is deliciously complicated by the fact valid differences may 1172 * exceed the value of tick when using long calibration 1173 * intervals and small ticks. Note that the counter can be 1174 * greater than tick if caught at just the wrong instant, but 1175 * the values returned and used here are correct. 1176 */ 1177 bigtick = (long)tick << SHIFT_USEC; 1178 pps_usec -= pps_freq; 1179 if (pps_usec >= bigtick) 1180 pps_usec -= bigtick; 1181 if (pps_usec < 0) 1182 pps_usec += bigtick; 1183 pps_time.tv_sec++; 1184 pps_count++; 1185 if (pps_count < (1 << pps_shift)) 1186 return; 1187 pps_count = 0; 1188 pps_calcnt++; 1189 u_usec = usec << SHIFT_USEC; 1190 v_usec = pps_usec - u_usec; 1191 if (v_usec >= bigtick >> 1) 1192 v_usec -= bigtick; 1193 if (v_usec < -(bigtick >> 1)) 1194 v_usec += bigtick; 1195 if (v_usec < 0) 1196 v_usec = -(-v_usec >> pps_shift); 1197 else 1198 v_usec = v_usec >> pps_shift; 1199 pps_usec = u_usec; 1200 cal_sec = tvp->tv_sec; 1201 cal_usec = tvp->tv_usec; 1202 cal_sec -= pps_time.tv_sec; 1203 cal_usec -= pps_time.tv_usec; 1204 if (cal_usec < 0) { 1205 cal_usec += 1000000; 1206 cal_sec--; 1207 } 1208 pps_time = *tvp; 1209 1210 /* 1211 * Check for lost interrupts, noise, excessive jitter and 1212 * excessive frequency error. The number of timer ticks during 1213 * the interval may vary +-1 tick. Add to this a margin of one 1214 * tick for the PPS signal jitter and maximum frequency 1215 * deviation. If the limits are exceeded, the calibration 1216 * interval is reset to the minimum and we start over. 1217 */ 1218 u_usec = (long)tick << 1; 1219 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1220 || (cal_sec == 0 && cal_usec < u_usec)) 1221 || v_usec > time_tolerance || v_usec < -time_tolerance) { 1222 pps_errcnt++; 1223 pps_shift = PPS_SHIFT; 1224 pps_intcnt = 0; 1225 time_status |= STA_PPSERROR; 1226 return; 1227 } 1228 1229 /* 1230 * A three-stage median filter is used to help deglitch the pps 1231 * frequency. The median sample becomes the frequency offset 1232 * estimate; the difference between the other two samples 1233 * becomes the frequency dispersion (stability) estimate. 1234 */ 1235 pps_ff[2] = pps_ff[1]; 1236 pps_ff[1] = pps_ff[0]; 1237 pps_ff[0] = v_usec; 1238 if (pps_ff[0] > pps_ff[1]) { 1239 if (pps_ff[1] > pps_ff[2]) { 1240 u_usec = pps_ff[1]; /* 0 1 2 */ 1241 v_usec = pps_ff[0] - pps_ff[2]; 1242 } else if (pps_ff[2] > pps_ff[0]) { 1243 u_usec = pps_ff[0]; /* 2 0 1 */ 1244 v_usec = pps_ff[2] - pps_ff[1]; 1245 } else { 1246 u_usec = pps_ff[2]; /* 0 2 1 */ 1247 v_usec = pps_ff[0] - pps_ff[1]; 1248 } 1249 } else { 1250 if (pps_ff[1] < pps_ff[2]) { 1251 u_usec = pps_ff[1]; /* 2 1 0 */ 1252 v_usec = pps_ff[2] - pps_ff[0]; 1253 } else if (pps_ff[2] < pps_ff[0]) { 1254 u_usec = pps_ff[0]; /* 1 0 2 */ 1255 v_usec = pps_ff[1] - pps_ff[2]; 1256 } else { 1257 u_usec = pps_ff[2]; /* 1 2 0 */ 1258 v_usec = pps_ff[1] - pps_ff[0]; 1259 } 1260 } 1261 1262 /* 1263 * Here the frequency dispersion (stability) is updated. If it 1264 * is less than one-fourth the maximum (MAXFREQ), the frequency 1265 * offset is updated as well, but clamped to the tolerance. It 1266 * will be processed later by the hardclock() routine. 1267 */ 1268 v_usec = (v_usec >> 1) - pps_stabil; 1269 if (v_usec < 0) 1270 pps_stabil -= -v_usec >> PPS_AVG; 1271 else 1272 pps_stabil += v_usec >> PPS_AVG; 1273 if (pps_stabil > MAXFREQ >> 2) { 1274 pps_stbcnt++; 1275 time_status |= STA_PPSWANDER; 1276 return; 1277 } 1278 if (time_status & STA_PPSFREQ) { 1279 if (u_usec < 0) { 1280 pps_freq -= -u_usec >> PPS_AVG; 1281 if (pps_freq < -time_tolerance) 1282 pps_freq = -time_tolerance; 1283 u_usec = -u_usec; 1284 } else { 1285 pps_freq += u_usec >> PPS_AVG; 1286 if (pps_freq > time_tolerance) 1287 pps_freq = time_tolerance; 1288 } 1289 } 1290 1291 /* 1292 * Here the calibration interval is adjusted. If the maximum 1293 * time difference is greater than tick / 4, reduce the interval 1294 * by half. If this is not the case for four consecutive 1295 * intervals, double the interval. 1296 */ 1297 if (u_usec << pps_shift > bigtick >> 2) { 1298 pps_intcnt = 0; 1299 if (pps_shift > PPS_SHIFT) 1300 pps_shift--; 1301 } else if (pps_intcnt >= 4) { 1302 pps_intcnt = 0; 1303 if (pps_shift < PPS_SHIFTMAX) 1304 pps_shift++; 1305 } else 1306 pps_intcnt++; 1307 } 1308 #endif /* PPS_SYNC */ 1309 #endif /* NTP */ 1310 1311 1312 /* 1313 * Return information about system clocks. 1314 */ 1315 int 1316 sysctl_clockrate(where, sizep) 1317 register char *where; 1318 size_t *sizep; 1319 { 1320 struct clockinfo clkinfo; 1321 1322 /* 1323 * Construct clockinfo structure. 1324 */ 1325 clkinfo.tick = tick; 1326 clkinfo.tickadj = tickadj; 1327 clkinfo.hz = hz; 1328 clkinfo.profhz = profhz; 1329 clkinfo.stathz = stathz ? stathz : hz; 1330 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo))); 1331 } 1332 1333