1 /* $OpenBSD: kern_tc.c,v 1.52 2019/12/02 02:24:29 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * If we meet some day, and you think this stuff is worth it, you 21 * can buy me a beer in return. Poul-Henning Kamp 22 */ 23 24 #include <sys/param.h> 25 #include <sys/atomic.h> 26 #include <sys/kernel.h> 27 #include <sys/mutex.h> 28 #include <sys/rwlock.h> 29 #include <sys/stdint.h> 30 #include <sys/timeout.h> 31 #include <sys/sysctl.h> 32 #include <sys/syslog.h> 33 #include <sys/systm.h> 34 #include <sys/timetc.h> 35 #include <sys/queue.h> 36 #include <sys/malloc.h> 37 #include <dev/rndvar.h> 38 39 /* 40 * A large step happens on boot. This constant detects such steps. 41 * It is relatively small so that ntp_update_second gets called enough 42 * in the typical 'missed a couple of seconds' case, but doesn't loop 43 * forever when the time step is large. 44 */ 45 #define LARGE_STEP 200 46 47 u_int dummy_get_timecount(struct timecounter *); 48 49 int sysctl_tc_hardware(void *, size_t *, void *, size_t); 50 int sysctl_tc_choice(void *, size_t *, void *, size_t); 51 52 /* 53 * Implement a dummy timecounter which we can use until we get a real one 54 * in the air. This allows the console and other early stuff to use 55 * time services. 56 */ 57 58 u_int 59 dummy_get_timecount(struct timecounter *tc) 60 { 61 static u_int now; 62 63 return (++now); 64 } 65 66 static struct timecounter dummy_timecounter = { 67 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 68 }; 69 70 /* 71 * Locks used to protect struct members, global variables in this file: 72 * I immutable after initialization 73 * t tc_lock 74 * w windup_mtx 75 */ 76 77 struct timehands { 78 /* These fields must be initialized by the driver. */ 79 struct timecounter *th_counter; /* [w] */ 80 int64_t th_adjtimedelta; /* [tw] */ 81 int64_t th_adjustment; /* [w] */ 82 u_int64_t th_scale; /* [w] */ 83 u_int th_offset_count; /* [w] */ 84 struct bintime th_boottime; /* [tw] */ 85 struct bintime th_offset; /* [w] */ 86 struct timeval th_microtime; /* [w] */ 87 struct timespec th_nanotime; /* [w] */ 88 /* Fields not to be copied in tc_windup start with th_generation. */ 89 volatile u_int th_generation; /* [w] */ 90 struct timehands *th_next; /* [I] */ 91 }; 92 93 static struct timehands th0; 94 static struct timehands th1 = { 95 .th_next = &th0 96 }; 97 static struct timehands th0 = { 98 .th_counter = &dummy_timecounter, 99 .th_scale = UINT64_MAX / 1000000, 100 .th_offset = { .sec = 1, .frac = 0 }, 101 .th_generation = 1, 102 .th_next = &th1 103 }; 104 105 struct rwlock tc_lock = RWLOCK_INITIALIZER("tc_lock"); 106 107 /* 108 * tc_windup() must be called before leaving this mutex. 109 */ 110 struct mutex windup_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 111 112 static struct timehands *volatile timehands = &th0; /* [w] */ 113 struct timecounter *timecounter = &dummy_timecounter; /* [t] */ 114 static SLIST_HEAD(, timecounter) tc_list = SLIST_HEAD_INITIALIZER(tc_list); 115 116 volatile time_t time_second = 1; 117 volatile time_t time_uptime = 0; 118 119 struct bintime naptime; 120 static int timestepwarnings; 121 122 void ntp_update_second(struct timehands *); 123 void tc_windup(struct bintime *, struct bintime *, int64_t *); 124 125 /* 126 * Return the difference between the timehands' counter value now and what 127 * was when we copied it to the timehands' offset_count. 128 */ 129 static __inline u_int 130 tc_delta(struct timehands *th) 131 { 132 struct timecounter *tc; 133 134 tc = th->th_counter; 135 return ((tc->tc_get_timecount(tc) - th->th_offset_count) & 136 tc->tc_counter_mask); 137 } 138 139 /* 140 * Functions for reading the time. We have to loop until we are sure that 141 * the timehands that we operated on was not updated under our feet. See 142 * the comment in <sys/time.h> for a description of these functions. 143 */ 144 145 void 146 binboottime(struct bintime *bt) 147 { 148 struct timehands *th; 149 u_int gen; 150 151 do { 152 th = timehands; 153 gen = th->th_generation; 154 membar_consumer(); 155 *bt = th->th_boottime; 156 membar_consumer(); 157 } while (gen == 0 || gen != th->th_generation); 158 } 159 160 void 161 microboottime(struct timeval *tvp) 162 { 163 struct bintime bt; 164 165 binboottime(&bt); 166 BINTIME_TO_TIMEVAL(&bt, tvp); 167 } 168 169 void 170 nanoboottime(struct timespec *tsp) 171 { 172 struct bintime bt; 173 174 binboottime(&bt); 175 BINTIME_TO_TIMESPEC(&bt, tsp); 176 } 177 178 void 179 binuptime(struct bintime *bt) 180 { 181 struct timehands *th; 182 u_int gen; 183 184 do { 185 th = timehands; 186 gen = th->th_generation; 187 membar_consumer(); 188 *bt = th->th_offset; 189 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 190 membar_consumer(); 191 } while (gen == 0 || gen != th->th_generation); 192 } 193 194 void 195 nanouptime(struct timespec *tsp) 196 { 197 struct bintime bt; 198 199 binuptime(&bt); 200 BINTIME_TO_TIMESPEC(&bt, tsp); 201 } 202 203 void 204 microuptime(struct timeval *tvp) 205 { 206 struct bintime bt; 207 208 binuptime(&bt); 209 BINTIME_TO_TIMEVAL(&bt, tvp); 210 } 211 212 void 213 bintime(struct bintime *bt) 214 { 215 struct timehands *th; 216 u_int gen; 217 218 do { 219 th = timehands; 220 gen = th->th_generation; 221 membar_consumer(); 222 *bt = th->th_offset; 223 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 224 bintimeadd(bt, &th->th_boottime, bt); 225 membar_consumer(); 226 } while (gen == 0 || gen != th->th_generation); 227 } 228 229 void 230 nanotime(struct timespec *tsp) 231 { 232 struct bintime bt; 233 234 bintime(&bt); 235 BINTIME_TO_TIMESPEC(&bt, tsp); 236 } 237 238 void 239 microtime(struct timeval *tvp) 240 { 241 struct bintime bt; 242 243 bintime(&bt); 244 BINTIME_TO_TIMEVAL(&bt, tvp); 245 } 246 247 void 248 getnanouptime(struct timespec *tsp) 249 { 250 struct timehands *th; 251 u_int gen; 252 253 do { 254 th = timehands; 255 gen = th->th_generation; 256 membar_consumer(); 257 BINTIME_TO_TIMESPEC(&th->th_offset, tsp); 258 membar_consumer(); 259 } while (gen == 0 || gen != th->th_generation); 260 } 261 262 void 263 getmicrouptime(struct timeval *tvp) 264 { 265 struct timehands *th; 266 u_int gen; 267 268 do { 269 th = timehands; 270 gen = th->th_generation; 271 membar_consumer(); 272 BINTIME_TO_TIMEVAL(&th->th_offset, tvp); 273 membar_consumer(); 274 } while (gen == 0 || gen != th->th_generation); 275 } 276 277 void 278 getnanotime(struct timespec *tsp) 279 { 280 struct timehands *th; 281 u_int gen; 282 283 do { 284 th = timehands; 285 gen = th->th_generation; 286 membar_consumer(); 287 *tsp = th->th_nanotime; 288 membar_consumer(); 289 } while (gen == 0 || gen != th->th_generation); 290 } 291 292 void 293 getmicrotime(struct timeval *tvp) 294 { 295 struct timehands *th; 296 u_int gen; 297 298 do { 299 th = timehands; 300 gen = th->th_generation; 301 membar_consumer(); 302 *tvp = th->th_microtime; 303 membar_consumer(); 304 } while (gen == 0 || gen != th->th_generation); 305 } 306 307 /* 308 * Initialize a new timecounter and possibly use it. 309 */ 310 void 311 tc_init(struct timecounter *tc) 312 { 313 u_int64_t tmp; 314 u_int u; 315 316 u = tc->tc_frequency / tc->tc_counter_mask; 317 /* XXX: We need some margin here, 10% is a guess */ 318 u *= 11; 319 u /= 10; 320 if (tc->tc_quality >= 0) { 321 if (u > hz) { 322 tc->tc_quality = -2000; 323 printf("Timecounter \"%s\" frequency %lu Hz", 324 tc->tc_name, (unsigned long)tc->tc_frequency); 325 printf(" -- Insufficient hz, needs at least %u\n", u); 326 } 327 } 328 329 /* Determine the counter's precision. */ 330 for (tmp = 1; (tmp & tc->tc_counter_mask) == 0; tmp <<= 1) 331 continue; 332 tc->tc_precision = tmp; 333 334 SLIST_INSERT_HEAD(&tc_list, tc, tc_next); 335 336 /* 337 * Never automatically use a timecounter with negative quality. 338 * Even though we run on the dummy counter, switching here may be 339 * worse since this timecounter may not be monotonic. 340 */ 341 if (tc->tc_quality < 0) 342 return; 343 if (tc->tc_quality < timecounter->tc_quality) 344 return; 345 if (tc->tc_quality == timecounter->tc_quality && 346 tc->tc_frequency < timecounter->tc_frequency) 347 return; 348 (void)tc->tc_get_timecount(tc); 349 enqueue_randomness(tc->tc_get_timecount(tc)); 350 351 timecounter = tc; 352 } 353 354 /* Report the frequency of the current timecounter. */ 355 u_int64_t 356 tc_getfrequency(void) 357 { 358 return (timehands->th_counter->tc_frequency); 359 } 360 361 /* Report the precision of the current timecounter. */ 362 u_int64_t 363 tc_getprecision(void) 364 { 365 return (timehands->th_counter->tc_precision); 366 } 367 368 /* 369 * Step our concept of UTC, aka the realtime clock. 370 * This is done by modifying our estimate of when we booted. 371 * 372 * Any ongoing adjustment is meaningless after a clock jump, 373 * so we zero adjtimedelta here as well. 374 */ 375 void 376 tc_setrealtimeclock(const struct timespec *ts) 377 { 378 struct timespec ts2; 379 struct bintime bt, bt2; 380 int64_t zero = 0; 381 382 rw_enter_write(&tc_lock); 383 mtx_enter(&windup_mtx); 384 binuptime(&bt2); 385 TIMESPEC_TO_BINTIME(ts, &bt); 386 bintimesub(&bt, &bt2, &bt); 387 bintimeadd(&bt2, &timehands->th_boottime, &bt2); 388 389 /* XXX fiddle all the little crinkly bits around the fiords... */ 390 tc_windup(&bt, NULL, &zero); 391 mtx_leave(&windup_mtx); 392 rw_exit_write(&tc_lock); 393 394 enqueue_randomness(ts->tv_sec); 395 396 if (timestepwarnings) { 397 BINTIME_TO_TIMESPEC(&bt2, &ts2); 398 log(LOG_INFO, "Time stepped from %lld.%09ld to %lld.%09ld\n", 399 (long long)ts2.tv_sec, ts2.tv_nsec, 400 (long long)ts->tv_sec, ts->tv_nsec); 401 } 402 } 403 404 /* 405 * Step the monotonic and realtime clocks, triggering any timeouts that 406 * should have occurred across the interval. 407 */ 408 void 409 tc_setclock(const struct timespec *ts) 410 { 411 struct bintime bt, bt2; 412 struct timespec earlier; 413 static int first = 1; 414 int rewind = 0; 415 #ifndef SMALL_KERNEL 416 long long adj_ticks; 417 #endif 418 419 /* 420 * When we're called for the first time, during boot when 421 * the root partition is mounted, we need to set boottime. 422 */ 423 if (first) { 424 tc_setrealtimeclock(ts); 425 first = 0; 426 return; 427 } 428 429 enqueue_randomness(ts->tv_sec); 430 431 mtx_enter(&windup_mtx); 432 TIMESPEC_TO_BINTIME(ts, &bt); 433 bintimesub(&bt, &timehands->th_boottime, &bt); 434 435 /* 436 * Don't rewind the offset. 437 */ 438 if (bintimecmp(&bt, &timehands->th_offset, <)) 439 rewind = 1; 440 441 bt2 = timehands->th_offset; 442 443 /* XXX fiddle all the little crinkly bits around the fiords... */ 444 tc_windup(NULL, rewind ? NULL : &bt, NULL); 445 mtx_leave(&windup_mtx); 446 447 if (rewind) { 448 BINTIME_TO_TIMESPEC(&bt, &earlier); 449 printf("%s: cannot rewind uptime to %lld.%09ld\n", 450 __func__, (long long)earlier.tv_sec, earlier.tv_nsec); 451 return; 452 } 453 454 #ifndef SMALL_KERNEL 455 /* convert the bintime to ticks */ 456 bintimesub(&bt, &bt2, &bt); 457 bintimeadd(&naptime, &bt, &naptime); 458 adj_ticks = (uint64_t)hz * bt.sec + 459 (((uint64_t)1000000 * (uint32_t)(bt.frac >> 32)) >> 32) / tick; 460 if (adj_ticks > 0) { 461 if (adj_ticks > INT_MAX) 462 adj_ticks = INT_MAX; 463 ticks += adj_ticks; 464 } 465 #endif 466 } 467 468 /* 469 * Initialize the next struct timehands in the ring and make 470 * it the active timehands. Along the way we might switch to a different 471 * timecounter and/or do seconds processing in NTP. Slightly magic. 472 */ 473 void 474 tc_windup(struct bintime *new_boottime, struct bintime *new_offset, 475 int64_t *new_adjtimedelta) 476 { 477 struct bintime bt; 478 struct timecounter *active_tc; 479 struct timehands *th, *tho; 480 int64_t counter_adjustment; 481 u_int64_t scale; 482 u_int delta, ncount, ogen; 483 int i; 484 485 if (new_boottime != NULL || new_adjtimedelta != NULL) 486 rw_assert_wrlock(&tc_lock); 487 MUTEX_ASSERT_LOCKED(&windup_mtx); 488 489 active_tc = timecounter; 490 491 /* 492 * Make the next timehands a copy of the current one, but do not 493 * overwrite the generation or next pointer. While we update 494 * the contents, the generation must be zero. 495 */ 496 tho = timehands; 497 th = tho->th_next; 498 ogen = th->th_generation; 499 th->th_generation = 0; 500 membar_producer(); 501 memcpy(th, tho, offsetof(struct timehands, th_generation)); 502 503 /* 504 * If changing the boot offset, do so before updating the 505 * offset fields. 506 */ 507 if (new_offset != NULL) 508 th->th_offset = *new_offset; 509 510 /* 511 * Capture a timecounter delta on the current timecounter and if 512 * changing timecounters, a counter value from the new timecounter. 513 * Update the offset fields accordingly. 514 */ 515 delta = tc_delta(th); 516 if (th->th_counter != active_tc) 517 ncount = active_tc->tc_get_timecount(active_tc); 518 else 519 ncount = 0; 520 th->th_offset_count += delta; 521 th->th_offset_count &= th->th_counter->tc_counter_mask; 522 bintimeaddfrac(&th->th_offset, th->th_scale * delta, &th->th_offset); 523 524 #ifdef notyet 525 /* 526 * Hardware latching timecounters may not generate interrupts on 527 * PPS events, so instead we poll them. There is a finite risk that 528 * the hardware might capture a count which is later than the one we 529 * got above, and therefore possibly in the next NTP second which might 530 * have a different rate than the current NTP second. It doesn't 531 * matter in practice. 532 */ 533 if (tho->th_counter->tc_poll_pps) 534 tho->th_counter->tc_poll_pps(tho->th_counter); 535 #endif 536 537 /* 538 * If changing the boot time or clock adjustment, do so before 539 * NTP processing. 540 */ 541 if (new_boottime != NULL) 542 th->th_boottime = *new_boottime; 543 if (new_adjtimedelta != NULL) 544 th->th_adjtimedelta = *new_adjtimedelta; 545 546 /* 547 * Deal with NTP second processing. The for loop normally 548 * iterates at most once, but in extreme situations it might 549 * keep NTP sane if timeouts are not run for several seconds. 550 * At boot, the time step can be large when the TOD hardware 551 * has been read, so on really large steps, we call 552 * ntp_update_second only twice. We need to call it twice in 553 * case we missed a leap second. 554 */ 555 bt = th->th_offset; 556 bintimeadd(&bt, &th->th_boottime, &bt); 557 i = bt.sec - tho->th_microtime.tv_sec; 558 if (i > LARGE_STEP) 559 i = 2; 560 for (; i > 0; i--) 561 ntp_update_second(th); 562 563 /* Update the UTC timestamps used by the get*() functions. */ 564 /* XXX shouldn't do this here. Should force non-`get' versions. */ 565 BINTIME_TO_TIMEVAL(&bt, &th->th_microtime); 566 BINTIME_TO_TIMESPEC(&bt, &th->th_nanotime); 567 568 /* Now is a good time to change timecounters. */ 569 if (th->th_counter != active_tc) { 570 th->th_counter = active_tc; 571 th->th_offset_count = ncount; 572 } 573 574 /*- 575 * Recalculate the scaling factor. We want the number of 1/2^64 576 * fractions of a second per period of the hardware counter, taking 577 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 578 * processing provides us with. 579 * 580 * The th_adjustment is nanoseconds per second with 32 bit binary 581 * fraction and we want 64 bit binary fraction of second: 582 * 583 * x = a * 2^32 / 10^9 = a * 4.294967296 584 * 585 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 586 * we can only multiply by about 850 without overflowing, but that 587 * leaves suitably precise fractions for multiply before divide. 588 * 589 * Divide before multiply with a fraction of 2199/512 results in a 590 * systematic undercompensation of 10PPM of th_adjustment. On a 591 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 592 * 593 * We happily sacrifice the lowest of the 64 bits of our result 594 * to the goddess of code clarity. 595 * 596 */ 597 scale = (u_int64_t)1 << 63; 598 counter_adjustment = th->th_counter->tc_freq_adj; 599 scale += ((th->th_adjustment + counter_adjustment) / 1024) * 2199; 600 scale /= th->th_counter->tc_frequency; 601 th->th_scale = scale * 2; 602 603 /* 604 * Now that the struct timehands is again consistent, set the new 605 * generation number, making sure to not make it zero. 606 */ 607 if (++ogen == 0) 608 ogen = 1; 609 membar_producer(); 610 th->th_generation = ogen; 611 612 /* Go live with the new struct timehands. */ 613 time_second = th->th_microtime.tv_sec; 614 time_uptime = th->th_offset.sec; 615 membar_producer(); 616 timehands = th; 617 } 618 619 /* Report or change the active timecounter hardware. */ 620 int 621 sysctl_tc_hardware(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 622 { 623 char newname[32]; 624 struct timecounter *newtc, *tc; 625 int error; 626 627 tc = timecounter; 628 strlcpy(newname, tc->tc_name, sizeof(newname)); 629 630 error = sysctl_string(oldp, oldlenp, newp, newlen, newname, sizeof(newname)); 631 if (error != 0 || strcmp(newname, tc->tc_name) == 0) 632 return (error); 633 SLIST_FOREACH(newtc, &tc_list, tc_next) { 634 if (strcmp(newname, newtc->tc_name) != 0) 635 continue; 636 637 /* Warm up new timecounter. */ 638 (void)newtc->tc_get_timecount(newtc); 639 (void)newtc->tc_get_timecount(newtc); 640 641 rw_enter_write(&tc_lock); 642 timecounter = newtc; 643 rw_exit_write(&tc_lock); 644 645 return (0); 646 } 647 return (EINVAL); 648 } 649 650 /* Report or change the active timecounter hardware. */ 651 int 652 sysctl_tc_choice(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 653 { 654 char buf[32], *spc, *choices; 655 struct timecounter *tc; 656 int error, maxlen; 657 658 if (SLIST_EMPTY(&tc_list)) 659 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 660 661 spc = ""; 662 maxlen = 0; 663 SLIST_FOREACH(tc, &tc_list, tc_next) 664 maxlen += sizeof(buf); 665 choices = malloc(maxlen, M_TEMP, M_WAITOK); 666 *choices = '\0'; 667 SLIST_FOREACH(tc, &tc_list, tc_next) { 668 snprintf(buf, sizeof(buf), "%s%s(%d)", 669 spc, tc->tc_name, tc->tc_quality); 670 spc = " "; 671 strlcat(choices, buf, maxlen); 672 } 673 error = sysctl_rdstring(oldp, oldlenp, newp, choices); 674 free(choices, M_TEMP, maxlen); 675 return (error); 676 } 677 678 /* 679 * Timecounters need to be updated every so often to prevent the hardware 680 * counter from overflowing. Updating also recalculates the cached values 681 * used by the get*() family of functions, so their precision depends on 682 * the update frequency. 683 */ 684 static int tc_tick; 685 686 void 687 tc_ticktock(void) 688 { 689 static int count; 690 691 if (++count < tc_tick) 692 return; 693 if (!mtx_enter_try(&windup_mtx)) 694 return; 695 count = 0; 696 tc_windup(NULL, NULL, NULL); 697 mtx_leave(&windup_mtx); 698 } 699 700 void 701 inittimecounter(void) 702 { 703 #ifdef DEBUG 704 u_int p; 705 #endif 706 707 /* 708 * Set the initial timeout to 709 * max(1, <approx. number of hardclock ticks in a millisecond>). 710 * People should probably not use the sysctl to set the timeout 711 * to smaller than its initial value, since that value is the 712 * smallest reasonable one. If they want better timestamps they 713 * should use the non-"get"* functions. 714 */ 715 if (hz > 1000) 716 tc_tick = (hz + 500) / 1000; 717 else 718 tc_tick = 1; 719 #ifdef DEBUG 720 p = (tc_tick * 1000000) / hz; 721 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); 722 #endif 723 724 /* warm up new timecounter (again) and get rolling. */ 725 (void)timecounter->tc_get_timecount(timecounter); 726 (void)timecounter->tc_get_timecount(timecounter); 727 } 728 729 /* 730 * Return timecounter-related information. 731 */ 732 int 733 sysctl_tc(int *name, u_int namelen, void *oldp, size_t *oldlenp, 734 void *newp, size_t newlen) 735 { 736 if (namelen != 1) 737 return (ENOTDIR); 738 739 switch (name[0]) { 740 case KERN_TIMECOUNTER_TICK: 741 return (sysctl_rdint(oldp, oldlenp, newp, tc_tick)); 742 case KERN_TIMECOUNTER_TIMESTEPWARNINGS: 743 return (sysctl_int(oldp, oldlenp, newp, newlen, 744 ×tepwarnings)); 745 case KERN_TIMECOUNTER_HARDWARE: 746 return (sysctl_tc_hardware(oldp, oldlenp, newp, newlen)); 747 case KERN_TIMECOUNTER_CHOICE: 748 return (sysctl_tc_choice(oldp, oldlenp, newp, newlen)); 749 default: 750 return (EOPNOTSUPP); 751 } 752 /* NOTREACHED */ 753 } 754 755 /* 756 * Skew the timehands according to any adjtime(2) adjustment. 757 */ 758 void 759 ntp_update_second(struct timehands *th) 760 { 761 int64_t adj; 762 763 MUTEX_ASSERT_LOCKED(&windup_mtx); 764 765 if (th->th_adjtimedelta > 0) 766 adj = MIN(5000, th->th_adjtimedelta); 767 else 768 adj = MAX(-5000, th->th_adjtimedelta); 769 th->th_adjtimedelta -= adj; 770 th->th_adjustment = (adj * 1000) << 32; 771 } 772 773 void 774 tc_adjfreq(int64_t *old, int64_t *new) 775 { 776 if (old != NULL) { 777 rw_assert_anylock(&tc_lock); 778 *old = timecounter->tc_freq_adj; 779 } 780 if (new != NULL) { 781 rw_assert_wrlock(&tc_lock); 782 mtx_enter(&windup_mtx); 783 timecounter->tc_freq_adj = *new; 784 tc_windup(NULL, NULL, NULL); 785 mtx_leave(&windup_mtx); 786 } 787 } 788 789 void 790 tc_adjtime(int64_t *old, int64_t *new) 791 { 792 struct timehands *th; 793 u_int gen; 794 795 if (old != NULL) { 796 do { 797 th = timehands; 798 gen = th->th_generation; 799 membar_consumer(); 800 *old = th->th_adjtimedelta; 801 membar_consumer(); 802 } while (gen == 0 || gen != th->th_generation); 803 } 804 if (new != NULL) { 805 rw_assert_wrlock(&tc_lock); 806 mtx_enter(&windup_mtx); 807 tc_windup(NULL, NULL, new); 808 mtx_leave(&windup_mtx); 809 } 810 } 811