1 /* $OpenBSD: kern_tc.c,v 1.55 2019/12/12 19:30:21 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * If we meet some day, and you think this stuff is worth it, you 21 * can buy me a beer in return. Poul-Henning Kamp 22 */ 23 24 #include <sys/param.h> 25 #include <sys/atomic.h> 26 #include <sys/kernel.h> 27 #include <sys/mutex.h> 28 #include <sys/rwlock.h> 29 #include <sys/stdint.h> 30 #include <sys/timeout.h> 31 #include <sys/sysctl.h> 32 #include <sys/syslog.h> 33 #include <sys/systm.h> 34 #include <sys/timetc.h> 35 #include <sys/queue.h> 36 #include <sys/malloc.h> 37 #include <dev/rndvar.h> 38 39 /* 40 * A large step happens on boot. This constant detects such steps. 41 * It is relatively small so that ntp_update_second gets called enough 42 * in the typical 'missed a couple of seconds' case, but doesn't loop 43 * forever when the time step is large. 44 */ 45 #define LARGE_STEP 200 46 47 u_int dummy_get_timecount(struct timecounter *); 48 49 int sysctl_tc_hardware(void *, size_t *, void *, size_t); 50 int sysctl_tc_choice(void *, size_t *, void *, size_t); 51 52 /* 53 * Implement a dummy timecounter which we can use until we get a real one 54 * in the air. This allows the console and other early stuff to use 55 * time services. 56 */ 57 58 u_int 59 dummy_get_timecount(struct timecounter *tc) 60 { 61 static u_int now; 62 63 return (++now); 64 } 65 66 static struct timecounter dummy_timecounter = { 67 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 68 }; 69 70 /* 71 * Locks used to protect struct members, global variables in this file: 72 * I immutable after initialization 73 * t tc_lock 74 * w windup_mtx 75 */ 76 77 struct timehands { 78 /* These fields must be initialized by the driver. */ 79 struct timecounter *th_counter; /* [w] */ 80 int64_t th_adjtimedelta; /* [tw] */ 81 int64_t th_adjustment; /* [w] */ 82 u_int64_t th_scale; /* [w] */ 83 u_int th_offset_count; /* [w] */ 84 struct bintime th_boottime; /* [tw] */ 85 struct bintime th_offset; /* [w] */ 86 struct timeval th_microtime; /* [w] */ 87 struct timespec th_nanotime; /* [w] */ 88 /* Fields not to be copied in tc_windup start with th_generation. */ 89 volatile u_int th_generation; /* [w] */ 90 struct timehands *th_next; /* [I] */ 91 }; 92 93 static struct timehands th0; 94 static struct timehands th1 = { 95 .th_next = &th0 96 }; 97 static struct timehands th0 = { 98 .th_counter = &dummy_timecounter, 99 .th_scale = UINT64_MAX / 1000000, 100 .th_offset = { .sec = 1, .frac = 0 }, 101 .th_generation = 1, 102 .th_next = &th1 103 }; 104 105 struct rwlock tc_lock = RWLOCK_INITIALIZER("tc_lock"); 106 107 /* 108 * tc_windup() must be called before leaving this mutex. 109 */ 110 struct mutex windup_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 111 112 static struct timehands *volatile timehands = &th0; /* [w] */ 113 struct timecounter *timecounter = &dummy_timecounter; /* [t] */ 114 static SLIST_HEAD(, timecounter) tc_list = SLIST_HEAD_INITIALIZER(tc_list); 115 116 volatile time_t time_second = 1; 117 volatile time_t time_uptime = 0; 118 119 struct bintime naptime; 120 static int timestepwarnings; 121 122 void ntp_update_second(struct timehands *); 123 void tc_windup(struct bintime *, struct bintime *, int64_t *); 124 125 /* 126 * Return the difference between the timehands' counter value now and what 127 * was when we copied it to the timehands' offset_count. 128 */ 129 static __inline u_int 130 tc_delta(struct timehands *th) 131 { 132 struct timecounter *tc; 133 134 tc = th->th_counter; 135 return ((tc->tc_get_timecount(tc) - th->th_offset_count) & 136 tc->tc_counter_mask); 137 } 138 139 /* 140 * Functions for reading the time. We have to loop until we are sure that 141 * the timehands that we operated on was not updated under our feet. See 142 * the comment in <sys/time.h> for a description of these functions. 143 */ 144 145 void 146 binboottime(struct bintime *bt) 147 { 148 struct timehands *th; 149 u_int gen; 150 151 do { 152 th = timehands; 153 gen = th->th_generation; 154 membar_consumer(); 155 *bt = th->th_boottime; 156 membar_consumer(); 157 } while (gen == 0 || gen != th->th_generation); 158 } 159 160 void 161 microboottime(struct timeval *tvp) 162 { 163 struct bintime bt; 164 165 binboottime(&bt); 166 BINTIME_TO_TIMEVAL(&bt, tvp); 167 } 168 169 void 170 nanoboottime(struct timespec *tsp) 171 { 172 struct bintime bt; 173 174 binboottime(&bt); 175 BINTIME_TO_TIMESPEC(&bt, tsp); 176 } 177 178 void 179 binuptime(struct bintime *bt) 180 { 181 struct timehands *th; 182 u_int gen; 183 184 do { 185 th = timehands; 186 gen = th->th_generation; 187 membar_consumer(); 188 *bt = th->th_offset; 189 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 190 membar_consumer(); 191 } while (gen == 0 || gen != th->th_generation); 192 } 193 194 void 195 nanouptime(struct timespec *tsp) 196 { 197 struct bintime bt; 198 199 binuptime(&bt); 200 BINTIME_TO_TIMESPEC(&bt, tsp); 201 } 202 203 void 204 microuptime(struct timeval *tvp) 205 { 206 struct bintime bt; 207 208 binuptime(&bt); 209 BINTIME_TO_TIMEVAL(&bt, tvp); 210 } 211 212 void 213 bintime(struct bintime *bt) 214 { 215 struct timehands *th; 216 u_int gen; 217 218 do { 219 th = timehands; 220 gen = th->th_generation; 221 membar_consumer(); 222 *bt = th->th_offset; 223 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 224 bintimeadd(bt, &th->th_boottime, bt); 225 membar_consumer(); 226 } while (gen == 0 || gen != th->th_generation); 227 } 228 229 void 230 nanotime(struct timespec *tsp) 231 { 232 struct bintime bt; 233 234 bintime(&bt); 235 BINTIME_TO_TIMESPEC(&bt, tsp); 236 } 237 238 void 239 microtime(struct timeval *tvp) 240 { 241 struct bintime bt; 242 243 bintime(&bt); 244 BINTIME_TO_TIMEVAL(&bt, tvp); 245 } 246 247 void 248 getnanouptime(struct timespec *tsp) 249 { 250 struct timehands *th; 251 u_int gen; 252 253 do { 254 th = timehands; 255 gen = th->th_generation; 256 membar_consumer(); 257 BINTIME_TO_TIMESPEC(&th->th_offset, tsp); 258 membar_consumer(); 259 } while (gen == 0 || gen != th->th_generation); 260 } 261 262 void 263 getmicrouptime(struct timeval *tvp) 264 { 265 struct timehands *th; 266 u_int gen; 267 268 do { 269 th = timehands; 270 gen = th->th_generation; 271 membar_consumer(); 272 BINTIME_TO_TIMEVAL(&th->th_offset, tvp); 273 membar_consumer(); 274 } while (gen == 0 || gen != th->th_generation); 275 } 276 277 void 278 getnanotime(struct timespec *tsp) 279 { 280 struct timehands *th; 281 u_int gen; 282 283 do { 284 th = timehands; 285 gen = th->th_generation; 286 membar_consumer(); 287 *tsp = th->th_nanotime; 288 membar_consumer(); 289 } while (gen == 0 || gen != th->th_generation); 290 } 291 292 void 293 getmicrotime(struct timeval *tvp) 294 { 295 struct timehands *th; 296 u_int gen; 297 298 do { 299 th = timehands; 300 gen = th->th_generation; 301 membar_consumer(); 302 *tvp = th->th_microtime; 303 membar_consumer(); 304 } while (gen == 0 || gen != th->th_generation); 305 } 306 307 /* 308 * Initialize a new timecounter and possibly use it. 309 */ 310 void 311 tc_init(struct timecounter *tc) 312 { 313 u_int64_t tmp; 314 u_int u; 315 316 u = tc->tc_frequency / tc->tc_counter_mask; 317 /* XXX: We need some margin here, 10% is a guess */ 318 u *= 11; 319 u /= 10; 320 if (tc->tc_quality >= 0) { 321 if (u > hz) { 322 tc->tc_quality = -2000; 323 printf("Timecounter \"%s\" frequency %lu Hz", 324 tc->tc_name, (unsigned long)tc->tc_frequency); 325 printf(" -- Insufficient hz, needs at least %u\n", u); 326 } 327 } 328 329 /* Determine the counter's precision. */ 330 for (tmp = 1; (tmp & tc->tc_counter_mask) == 0; tmp <<= 1) 331 continue; 332 tc->tc_precision = tmp; 333 334 SLIST_INSERT_HEAD(&tc_list, tc, tc_next); 335 336 /* 337 * Never automatically use a timecounter with negative quality. 338 * Even though we run on the dummy counter, switching here may be 339 * worse since this timecounter may not be monotonic. 340 */ 341 if (tc->tc_quality < 0) 342 return; 343 if (tc->tc_quality < timecounter->tc_quality) 344 return; 345 if (tc->tc_quality == timecounter->tc_quality && 346 tc->tc_frequency < timecounter->tc_frequency) 347 return; 348 (void)tc->tc_get_timecount(tc); 349 enqueue_randomness(tc->tc_get_timecount(tc)); 350 351 timecounter = tc; 352 } 353 354 /* Report the frequency of the current timecounter. */ 355 u_int64_t 356 tc_getfrequency(void) 357 { 358 return (timehands->th_counter->tc_frequency); 359 } 360 361 /* Report the precision of the current timecounter. */ 362 u_int64_t 363 tc_getprecision(void) 364 { 365 return (timehands->th_counter->tc_precision); 366 } 367 368 /* 369 * Step our concept of UTC, aka the realtime clock. 370 * This is done by modifying our estimate of when we booted. 371 * 372 * Any ongoing adjustment is meaningless after a clock jump, 373 * so we zero adjtimedelta here as well. 374 */ 375 void 376 tc_setrealtimeclock(const struct timespec *ts) 377 { 378 struct timespec ts2; 379 struct bintime bt, bt2; 380 int64_t zero = 0; 381 382 rw_enter_write(&tc_lock); 383 mtx_enter(&windup_mtx); 384 binuptime(&bt2); 385 TIMESPEC_TO_BINTIME(ts, &bt); 386 bintimesub(&bt, &bt2, &bt); 387 bintimeadd(&bt2, &timehands->th_boottime, &bt2); 388 389 /* XXX fiddle all the little crinkly bits around the fiords... */ 390 tc_windup(&bt, NULL, &zero); 391 mtx_leave(&windup_mtx); 392 rw_exit_write(&tc_lock); 393 394 enqueue_randomness(ts->tv_sec); 395 396 if (timestepwarnings) { 397 BINTIME_TO_TIMESPEC(&bt2, &ts2); 398 log(LOG_INFO, "Time stepped from %lld.%09ld to %lld.%09ld\n", 399 (long long)ts2.tv_sec, ts2.tv_nsec, 400 (long long)ts->tv_sec, ts->tv_nsec); 401 } 402 } 403 404 /* 405 * Step the monotonic and realtime clocks, triggering any timeouts that 406 * should have occurred across the interval. 407 */ 408 void 409 tc_setclock(const struct timespec *ts) 410 { 411 struct bintime bt, bt2; 412 struct timespec earlier; 413 static int first = 1; 414 int rewind = 0; 415 #ifndef SMALL_KERNEL 416 long long adj_ticks; 417 #endif 418 419 /* 420 * When we're called for the first time, during boot when 421 * the root partition is mounted, we need to set boottime. 422 */ 423 if (first) { 424 tc_setrealtimeclock(ts); 425 first = 0; 426 return; 427 } 428 429 enqueue_randomness(ts->tv_sec); 430 431 mtx_enter(&windup_mtx); 432 TIMESPEC_TO_BINTIME(ts, &bt); 433 bintimesub(&bt, &timehands->th_boottime, &bt); 434 435 /* 436 * Don't rewind the offset. 437 */ 438 if (bintimecmp(&bt, &timehands->th_offset, <)) 439 rewind = 1; 440 441 bt2 = timehands->th_offset; 442 443 /* XXX fiddle all the little crinkly bits around the fiords... */ 444 tc_windup(NULL, rewind ? NULL : &bt, NULL); 445 mtx_leave(&windup_mtx); 446 447 if (rewind) { 448 BINTIME_TO_TIMESPEC(&bt, &earlier); 449 printf("%s: cannot rewind uptime to %lld.%09ld\n", 450 __func__, (long long)earlier.tv_sec, earlier.tv_nsec); 451 return; 452 } 453 454 #ifndef SMALL_KERNEL 455 /* convert the bintime to ticks */ 456 bintimesub(&bt, &bt2, &bt); 457 bintimeadd(&naptime, &bt, &naptime); 458 adj_ticks = (uint64_t)hz * bt.sec + 459 (((uint64_t)1000000 * (uint32_t)(bt.frac >> 32)) >> 32) / tick; 460 if (adj_ticks > 0) { 461 if (adj_ticks > INT_MAX) 462 adj_ticks = INT_MAX; 463 timeout_adjust_ticks(adj_ticks); 464 } 465 #endif 466 } 467 468 /* 469 * Initialize the next struct timehands in the ring and make 470 * it the active timehands. Along the way we might switch to a different 471 * timecounter and/or do seconds processing in NTP. Slightly magic. 472 */ 473 void 474 tc_windup(struct bintime *new_boottime, struct bintime *new_offset, 475 int64_t *new_adjtimedelta) 476 { 477 struct bintime bt; 478 struct timecounter *active_tc; 479 struct timehands *th, *tho; 480 u_int64_t scale; 481 u_int delta, ncount, ogen; 482 int i; 483 484 if (new_boottime != NULL || new_adjtimedelta != NULL) 485 rw_assert_wrlock(&tc_lock); 486 MUTEX_ASSERT_LOCKED(&windup_mtx); 487 488 active_tc = timecounter; 489 490 /* 491 * Make the next timehands a copy of the current one, but do not 492 * overwrite the generation or next pointer. While we update 493 * the contents, the generation must be zero. 494 */ 495 tho = timehands; 496 th = tho->th_next; 497 ogen = th->th_generation; 498 th->th_generation = 0; 499 membar_producer(); 500 memcpy(th, tho, offsetof(struct timehands, th_generation)); 501 502 /* 503 * If changing the boot offset, do so before updating the 504 * offset fields. 505 */ 506 if (new_offset != NULL) 507 th->th_offset = *new_offset; 508 509 /* 510 * Capture a timecounter delta on the current timecounter and if 511 * changing timecounters, a counter value from the new timecounter. 512 * Update the offset fields accordingly. 513 */ 514 delta = tc_delta(th); 515 if (th->th_counter != active_tc) 516 ncount = active_tc->tc_get_timecount(active_tc); 517 else 518 ncount = 0; 519 th->th_offset_count += delta; 520 th->th_offset_count &= th->th_counter->tc_counter_mask; 521 bintimeaddfrac(&th->th_offset, th->th_scale * delta, &th->th_offset); 522 523 #ifdef notyet 524 /* 525 * Hardware latching timecounters may not generate interrupts on 526 * PPS events, so instead we poll them. There is a finite risk that 527 * the hardware might capture a count which is later than the one we 528 * got above, and therefore possibly in the next NTP second which might 529 * have a different rate than the current NTP second. It doesn't 530 * matter in practice. 531 */ 532 if (tho->th_counter->tc_poll_pps) 533 tho->th_counter->tc_poll_pps(tho->th_counter); 534 #endif 535 536 /* 537 * If changing the boot time or clock adjustment, do so before 538 * NTP processing. 539 */ 540 if (new_boottime != NULL) 541 th->th_boottime = *new_boottime; 542 if (new_adjtimedelta != NULL) 543 th->th_adjtimedelta = *new_adjtimedelta; 544 545 /* 546 * Deal with NTP second processing. The for loop normally 547 * iterates at most once, but in extreme situations it might 548 * keep NTP sane if timeouts are not run for several seconds. 549 * At boot, the time step can be large when the TOD hardware 550 * has been read, so on really large steps, we call 551 * ntp_update_second only twice. We need to call it twice in 552 * case we missed a leap second. 553 */ 554 bt = th->th_offset; 555 bintimeadd(&bt, &th->th_boottime, &bt); 556 i = bt.sec - tho->th_microtime.tv_sec; 557 if (i > LARGE_STEP) 558 i = 2; 559 for (; i > 0; i--) 560 ntp_update_second(th); 561 562 /* Update the UTC timestamps used by the get*() functions. */ 563 /* XXX shouldn't do this here. Should force non-`get' versions. */ 564 BINTIME_TO_TIMEVAL(&bt, &th->th_microtime); 565 BINTIME_TO_TIMESPEC(&bt, &th->th_nanotime); 566 567 /* Now is a good time to change timecounters. */ 568 if (th->th_counter != active_tc) { 569 th->th_counter = active_tc; 570 th->th_offset_count = ncount; 571 } 572 573 /*- 574 * Recalculate the scaling factor. We want the number of 1/2^64 575 * fractions of a second per period of the hardware counter, taking 576 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 577 * processing provides us with. 578 * 579 * The th_adjustment is nanoseconds per second with 32 bit binary 580 * fraction and we want 64 bit binary fraction of second: 581 * 582 * x = a * 2^32 / 10^9 = a * 4.294967296 583 * 584 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 585 * we can only multiply by about 850 without overflowing, but that 586 * leaves suitably precise fractions for multiply before divide. 587 * 588 * Divide before multiply with a fraction of 2199/512 results in a 589 * systematic undercompensation of 10PPM of th_adjustment. On a 590 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 591 * 592 * We happily sacrifice the lowest of the 64 bits of our result 593 * to the goddess of code clarity. 594 * 595 */ 596 scale = (u_int64_t)1 << 63; 597 scale += \ 598 ((th->th_adjustment + th->th_counter->tc_freq_adj) / 1024) * 2199; 599 scale /= th->th_counter->tc_frequency; 600 th->th_scale = scale * 2; 601 602 /* 603 * Now that the struct timehands is again consistent, set the new 604 * generation number, making sure to not make it zero. 605 */ 606 if (++ogen == 0) 607 ogen = 1; 608 membar_producer(); 609 th->th_generation = ogen; 610 611 /* Go live with the new struct timehands. */ 612 time_second = th->th_microtime.tv_sec; 613 time_uptime = th->th_offset.sec; 614 membar_producer(); 615 timehands = th; 616 } 617 618 /* Report or change the active timecounter hardware. */ 619 int 620 sysctl_tc_hardware(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 621 { 622 char newname[32]; 623 struct timecounter *newtc, *tc; 624 int error; 625 626 tc = timecounter; 627 strlcpy(newname, tc->tc_name, sizeof(newname)); 628 629 error = sysctl_string(oldp, oldlenp, newp, newlen, newname, sizeof(newname)); 630 if (error != 0 || strcmp(newname, tc->tc_name) == 0) 631 return (error); 632 SLIST_FOREACH(newtc, &tc_list, tc_next) { 633 if (strcmp(newname, newtc->tc_name) != 0) 634 continue; 635 636 /* Warm up new timecounter. */ 637 (void)newtc->tc_get_timecount(newtc); 638 (void)newtc->tc_get_timecount(newtc); 639 640 rw_enter_write(&tc_lock); 641 timecounter = newtc; 642 rw_exit_write(&tc_lock); 643 644 return (0); 645 } 646 return (EINVAL); 647 } 648 649 /* Report or change the active timecounter hardware. */ 650 int 651 sysctl_tc_choice(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 652 { 653 char buf[32], *spc, *choices; 654 struct timecounter *tc; 655 int error, maxlen; 656 657 if (SLIST_EMPTY(&tc_list)) 658 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 659 660 spc = ""; 661 maxlen = 0; 662 SLIST_FOREACH(tc, &tc_list, tc_next) 663 maxlen += sizeof(buf); 664 choices = malloc(maxlen, M_TEMP, M_WAITOK); 665 *choices = '\0'; 666 SLIST_FOREACH(tc, &tc_list, tc_next) { 667 snprintf(buf, sizeof(buf), "%s%s(%d)", 668 spc, tc->tc_name, tc->tc_quality); 669 spc = " "; 670 strlcat(choices, buf, maxlen); 671 } 672 error = sysctl_rdstring(oldp, oldlenp, newp, choices); 673 free(choices, M_TEMP, maxlen); 674 return (error); 675 } 676 677 /* 678 * Timecounters need to be updated every so often to prevent the hardware 679 * counter from overflowing. Updating also recalculates the cached values 680 * used by the get*() family of functions, so their precision depends on 681 * the update frequency. 682 */ 683 static int tc_tick; 684 685 void 686 tc_ticktock(void) 687 { 688 static int count; 689 690 if (++count < tc_tick) 691 return; 692 if (!mtx_enter_try(&windup_mtx)) 693 return; 694 count = 0; 695 tc_windup(NULL, NULL, NULL); 696 mtx_leave(&windup_mtx); 697 } 698 699 void 700 inittimecounter(void) 701 { 702 #ifdef DEBUG 703 u_int p; 704 #endif 705 706 /* 707 * Set the initial timeout to 708 * max(1, <approx. number of hardclock ticks in a millisecond>). 709 * People should probably not use the sysctl to set the timeout 710 * to smaller than its initial value, since that value is the 711 * smallest reasonable one. If they want better timestamps they 712 * should use the non-"get"* functions. 713 */ 714 if (hz > 1000) 715 tc_tick = (hz + 500) / 1000; 716 else 717 tc_tick = 1; 718 #ifdef DEBUG 719 p = (tc_tick * 1000000) / hz; 720 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); 721 #endif 722 723 /* warm up new timecounter (again) and get rolling. */ 724 (void)timecounter->tc_get_timecount(timecounter); 725 (void)timecounter->tc_get_timecount(timecounter); 726 } 727 728 /* 729 * Return timecounter-related information. 730 */ 731 int 732 sysctl_tc(int *name, u_int namelen, void *oldp, size_t *oldlenp, 733 void *newp, size_t newlen) 734 { 735 if (namelen != 1) 736 return (ENOTDIR); 737 738 switch (name[0]) { 739 case KERN_TIMECOUNTER_TICK: 740 return (sysctl_rdint(oldp, oldlenp, newp, tc_tick)); 741 case KERN_TIMECOUNTER_TIMESTEPWARNINGS: 742 return (sysctl_int(oldp, oldlenp, newp, newlen, 743 ×tepwarnings)); 744 case KERN_TIMECOUNTER_HARDWARE: 745 return (sysctl_tc_hardware(oldp, oldlenp, newp, newlen)); 746 case KERN_TIMECOUNTER_CHOICE: 747 return (sysctl_tc_choice(oldp, oldlenp, newp, newlen)); 748 default: 749 return (EOPNOTSUPP); 750 } 751 /* NOTREACHED */ 752 } 753 754 /* 755 * Skew the timehands according to any adjtime(2) adjustment. 756 */ 757 void 758 ntp_update_second(struct timehands *th) 759 { 760 int64_t adj; 761 762 MUTEX_ASSERT_LOCKED(&windup_mtx); 763 764 if (th->th_adjtimedelta > 0) 765 adj = MIN(5000, th->th_adjtimedelta); 766 else 767 adj = MAX(-5000, th->th_adjtimedelta); 768 th->th_adjtimedelta -= adj; 769 th->th_adjustment = (adj * 1000) << 32; 770 } 771 772 void 773 tc_adjfreq(int64_t *old, int64_t *new) 774 { 775 if (old != NULL) { 776 rw_assert_anylock(&tc_lock); 777 *old = timecounter->tc_freq_adj; 778 } 779 if (new != NULL) { 780 rw_assert_wrlock(&tc_lock); 781 mtx_enter(&windup_mtx); 782 timecounter->tc_freq_adj = *new; 783 tc_windup(NULL, NULL, NULL); 784 mtx_leave(&windup_mtx); 785 } 786 } 787 788 void 789 tc_adjtime(int64_t *old, int64_t *new) 790 { 791 struct timehands *th; 792 u_int gen; 793 794 if (old != NULL) { 795 do { 796 th = timehands; 797 gen = th->th_generation; 798 membar_consumer(); 799 *old = th->th_adjtimedelta; 800 membar_consumer(); 801 } while (gen == 0 || gen != th->th_generation); 802 } 803 if (new != NULL) { 804 rw_assert_wrlock(&tc_lock); 805 mtx_enter(&windup_mtx); 806 tc_windup(NULL, NULL, new); 807 mtx_leave(&windup_mtx); 808 } 809 } 810