1 /* $OpenBSD: kern_tc.c,v 1.47 2019/05/22 19:59:37 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * If we meet some day, and you think this stuff is worth it, you 21 * can buy me a beer in return. Poul-Henning Kamp 22 */ 23 24 #include <sys/param.h> 25 #include <sys/atomic.h> 26 #include <sys/kernel.h> 27 #include <sys/mutex.h> 28 #include <sys/rwlock.h> 29 #include <sys/stdint.h> 30 #include <sys/timeout.h> 31 #include <sys/sysctl.h> 32 #include <sys/syslog.h> 33 #include <sys/systm.h> 34 #include <sys/timetc.h> 35 #include <sys/queue.h> 36 #include <sys/malloc.h> 37 #include <dev/rndvar.h> 38 39 /* 40 * A large step happens on boot. This constant detects such steps. 41 * It is relatively small so that ntp_update_second gets called enough 42 * in the typical 'missed a couple of seconds' case, but doesn't loop 43 * forever when the time step is large. 44 */ 45 #define LARGE_STEP 200 46 47 u_int dummy_get_timecount(struct timecounter *); 48 49 int sysctl_tc_hardware(void *, size_t *, void *, size_t); 50 int sysctl_tc_choice(void *, size_t *, void *, size_t); 51 52 /* 53 * Implement a dummy timecounter which we can use until we get a real one 54 * in the air. This allows the console and other early stuff to use 55 * time services. 56 */ 57 58 u_int 59 dummy_get_timecount(struct timecounter *tc) 60 { 61 static u_int now; 62 63 return (++now); 64 } 65 66 static struct timecounter dummy_timecounter = { 67 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 68 }; 69 70 /* 71 * Locks used to protect struct members, global variables in this file: 72 * I immutable after initialization 73 * t tc_lock 74 * w windup_mtx 75 */ 76 77 struct timehands { 78 /* These fields must be initialized by the driver. */ 79 struct timecounter *th_counter; /* [w] */ 80 int64_t th_adjtimedelta; /* [tw] */ 81 int64_t th_adjustment; /* [w] */ 82 u_int64_t th_scale; /* [w] */ 83 u_int th_offset_count; /* [w] */ 84 struct bintime th_boottime; /* [tw] */ 85 struct bintime th_offset; /* [w] */ 86 struct timeval th_microtime; /* [w] */ 87 struct timespec th_nanotime; /* [w] */ 88 /* Fields not to be copied in tc_windup start with th_generation. */ 89 volatile u_int th_generation; /* [w] */ 90 struct timehands *th_next; /* [I] */ 91 }; 92 93 static struct timehands th0; 94 static struct timehands th1 = { 95 .th_next = &th0 96 }; 97 static struct timehands th0 = { 98 .th_counter = &dummy_timecounter, 99 .th_scale = UINT64_MAX / 1000000, 100 .th_offset = { .sec = 1, .frac = 0 }, 101 .th_generation = 1, 102 .th_next = &th1 103 }; 104 105 struct rwlock tc_lock = RWLOCK_INITIALIZER("tc_lock"); 106 107 /* 108 * tc_windup() must be called before leaving this mutex. 109 */ 110 struct mutex windup_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 111 112 static struct timehands *volatile timehands = &th0; /* [w] */ 113 struct timecounter *timecounter = &dummy_timecounter; /* [t] */ 114 static SLIST_HEAD(, timecounter) tc_list = SLIST_HEAD_INITIALIZER(tc_list); 115 116 volatile time_t time_second = 1; 117 volatile time_t time_uptime = 0; 118 119 struct bintime naptime; 120 static int timestepwarnings; 121 122 void ntp_update_second(struct timehands *); 123 void tc_windup(struct bintime *, struct bintime *, int64_t *); 124 125 /* 126 * Return the difference between the timehands' counter value now and what 127 * was when we copied it to the timehands' offset_count. 128 */ 129 static __inline u_int 130 tc_delta(struct timehands *th) 131 { 132 struct timecounter *tc; 133 134 tc = th->th_counter; 135 return ((tc->tc_get_timecount(tc) - th->th_offset_count) & 136 tc->tc_counter_mask); 137 } 138 139 /* 140 * Functions for reading the time. We have to loop until we are sure that 141 * the timehands that we operated on was not updated under our feet. See 142 * the comment in <sys/time.h> for a description of these functions. 143 */ 144 145 void 146 binboottime(struct bintime *bt) 147 { 148 struct timehands *th; 149 u_int gen; 150 151 do { 152 th = timehands; 153 gen = th->th_generation; 154 membar_consumer(); 155 *bt = th->th_boottime; 156 membar_consumer(); 157 } while (gen == 0 || gen != th->th_generation); 158 } 159 160 void 161 microboottime(struct timeval *tvp) 162 { 163 struct bintime bt; 164 165 binboottime(&bt); 166 bintime2timeval(&bt, tvp); 167 } 168 169 void 170 binuptime(struct bintime *bt) 171 { 172 struct timehands *th; 173 u_int gen; 174 175 do { 176 th = timehands; 177 gen = th->th_generation; 178 membar_consumer(); 179 *bt = th->th_offset; 180 bintime_addx(bt, th->th_scale * tc_delta(th)); 181 membar_consumer(); 182 } while (gen == 0 || gen != th->th_generation); 183 } 184 185 void 186 nanouptime(struct timespec *tsp) 187 { 188 struct bintime bt; 189 190 binuptime(&bt); 191 bintime2timespec(&bt, tsp); 192 } 193 194 void 195 microuptime(struct timeval *tvp) 196 { 197 struct bintime bt; 198 199 binuptime(&bt); 200 bintime2timeval(&bt, tvp); 201 } 202 203 void 204 bintime(struct bintime *bt) 205 { 206 struct timehands *th; 207 u_int gen; 208 209 do { 210 th = timehands; 211 gen = th->th_generation; 212 membar_consumer(); 213 *bt = th->th_offset; 214 bintime_addx(bt, th->th_scale * tc_delta(th)); 215 bintime_add(bt, &th->th_boottime); 216 membar_consumer(); 217 } while (gen == 0 || gen != th->th_generation); 218 } 219 220 void 221 nanotime(struct timespec *tsp) 222 { 223 struct bintime bt; 224 225 bintime(&bt); 226 bintime2timespec(&bt, tsp); 227 } 228 229 void 230 microtime(struct timeval *tvp) 231 { 232 struct bintime bt; 233 234 bintime(&bt); 235 bintime2timeval(&bt, tvp); 236 } 237 238 void 239 getnanouptime(struct timespec *tsp) 240 { 241 struct timehands *th; 242 u_int gen; 243 244 do { 245 th = timehands; 246 gen = th->th_generation; 247 membar_consumer(); 248 bintime2timespec(&th->th_offset, tsp); 249 membar_consumer(); 250 } while (gen == 0 || gen != th->th_generation); 251 } 252 253 void 254 getmicrouptime(struct timeval *tvp) 255 { 256 struct timehands *th; 257 u_int gen; 258 259 do { 260 th = timehands; 261 gen = th->th_generation; 262 membar_consumer(); 263 bintime2timeval(&th->th_offset, tvp); 264 membar_consumer(); 265 } while (gen == 0 || gen != th->th_generation); 266 } 267 268 void 269 getnanotime(struct timespec *tsp) 270 { 271 struct timehands *th; 272 u_int gen; 273 274 do { 275 th = timehands; 276 gen = th->th_generation; 277 membar_consumer(); 278 *tsp = th->th_nanotime; 279 membar_consumer(); 280 } while (gen == 0 || gen != th->th_generation); 281 } 282 283 void 284 getmicrotime(struct timeval *tvp) 285 { 286 struct timehands *th; 287 u_int gen; 288 289 do { 290 th = timehands; 291 gen = th->th_generation; 292 membar_consumer(); 293 *tvp = th->th_microtime; 294 membar_consumer(); 295 } while (gen == 0 || gen != th->th_generation); 296 } 297 298 /* 299 * Initialize a new timecounter and possibly use it. 300 */ 301 void 302 tc_init(struct timecounter *tc) 303 { 304 u_int u; 305 306 u = tc->tc_frequency / tc->tc_counter_mask; 307 /* XXX: We need some margin here, 10% is a guess */ 308 u *= 11; 309 u /= 10; 310 if (tc->tc_quality >= 0) { 311 if (u > hz) { 312 tc->tc_quality = -2000; 313 printf("Timecounter \"%s\" frequency %lu Hz", 314 tc->tc_name, (unsigned long)tc->tc_frequency); 315 printf(" -- Insufficient hz, needs at least %u\n", u); 316 } 317 } 318 319 SLIST_INSERT_HEAD(&tc_list, tc, tc_next); 320 321 /* 322 * Never automatically use a timecounter with negative quality. 323 * Even though we run on the dummy counter, switching here may be 324 * worse since this timecounter may not be monotonic. 325 */ 326 if (tc->tc_quality < 0) 327 return; 328 if (tc->tc_quality < timecounter->tc_quality) 329 return; 330 if (tc->tc_quality == timecounter->tc_quality && 331 tc->tc_frequency < timecounter->tc_frequency) 332 return; 333 (void)tc->tc_get_timecount(tc); 334 enqueue_randomness(tc->tc_get_timecount(tc)); 335 336 timecounter = tc; 337 } 338 339 /* Report the frequency of the current timecounter. */ 340 u_int64_t 341 tc_getfrequency(void) 342 { 343 344 return (timehands->th_counter->tc_frequency); 345 } 346 347 /* 348 * Step our concept of UTC, aka the realtime clock. 349 * This is done by modifying our estimate of when we booted. 350 * 351 * Any ongoing adjustment is meaningless after a clock jump, 352 * so we zero adjtimedelta here as well. 353 */ 354 void 355 tc_setrealtimeclock(const struct timespec *ts) 356 { 357 struct timespec ts2; 358 struct bintime bt, bt2; 359 int64_t zero = 0; 360 361 rw_enter_write(&tc_lock); 362 mtx_enter(&windup_mtx); 363 binuptime(&bt2); 364 timespec2bintime(ts, &bt); 365 bintime_sub(&bt, &bt2); 366 bintime_add(&bt2, &timehands->th_boottime); 367 368 /* XXX fiddle all the little crinkly bits around the fiords... */ 369 tc_windup(&bt, NULL, &zero); 370 mtx_leave(&windup_mtx); 371 rw_exit_write(&tc_lock); 372 373 enqueue_randomness(ts->tv_sec); 374 375 if (timestepwarnings) { 376 bintime2timespec(&bt2, &ts2); 377 log(LOG_INFO, "Time stepped from %lld.%09ld to %lld.%09ld\n", 378 (long long)ts2.tv_sec, ts2.tv_nsec, 379 (long long)ts->tv_sec, ts->tv_nsec); 380 } 381 } 382 383 /* 384 * Step the monotonic and realtime clocks, triggering any timeouts that 385 * should have occurred across the interval. 386 */ 387 void 388 tc_setclock(const struct timespec *ts) 389 { 390 struct bintime bt, bt2; 391 struct timespec earlier; 392 static int first = 1; 393 int rewind = 0; 394 #ifndef SMALL_KERNEL 395 long long adj_ticks; 396 #endif 397 398 /* 399 * When we're called for the first time, during boot when 400 * the root partition is mounted, we need to set boottime. 401 */ 402 if (first) { 403 tc_setrealtimeclock(ts); 404 first = 0; 405 return; 406 } 407 408 enqueue_randomness(ts->tv_sec); 409 410 mtx_enter(&windup_mtx); 411 timespec2bintime(ts, &bt); 412 bintime_sub(&bt, &timehands->th_boottime); 413 414 /* 415 * Don't rewind the offset. 416 */ 417 if (bt.sec < timehands->th_offset.sec || 418 (bt.sec == timehands->th_offset.sec && 419 bt.frac < timehands->th_offset.frac)) 420 rewind = 1; 421 422 bt2 = timehands->th_offset; 423 424 /* XXX fiddle all the little crinkly bits around the fiords... */ 425 tc_windup(NULL, rewind ? NULL : &bt, NULL); 426 mtx_leave(&windup_mtx); 427 428 if (rewind) { 429 bintime2timespec(&bt, &earlier); 430 printf("%s: cannot rewind uptime to %lld.%09ld\n", 431 __func__, (long long)earlier.tv_sec, earlier.tv_nsec); 432 return; 433 } 434 435 #ifndef SMALL_KERNEL 436 /* convert the bintime to ticks */ 437 bintime_sub(&bt, &bt2); 438 bintime_add(&naptime, &bt); 439 adj_ticks = (uint64_t)hz * bt.sec + 440 (((uint64_t)1000000 * (uint32_t)(bt.frac >> 32)) >> 32) / tick; 441 if (adj_ticks > 0) { 442 if (adj_ticks > INT_MAX) 443 adj_ticks = INT_MAX; 444 timeout_adjust_ticks(adj_ticks); 445 } 446 #endif 447 } 448 449 /* 450 * Initialize the next struct timehands in the ring and make 451 * it the active timehands. Along the way we might switch to a different 452 * timecounter and/or do seconds processing in NTP. Slightly magic. 453 */ 454 void 455 tc_windup(struct bintime *new_boottime, struct bintime *new_offset, 456 int64_t *new_adjtimedelta) 457 { 458 struct bintime bt; 459 struct timecounter *active_tc; 460 struct timehands *th, *tho; 461 u_int64_t scale; 462 u_int delta, ncount, ogen; 463 int i; 464 465 if (new_boottime != NULL || new_adjtimedelta != NULL) 466 rw_assert_wrlock(&tc_lock); 467 MUTEX_ASSERT_LOCKED(&windup_mtx); 468 469 active_tc = timecounter; 470 471 /* 472 * Make the next timehands a copy of the current one, but do not 473 * overwrite the generation or next pointer. While we update 474 * the contents, the generation must be zero. 475 */ 476 tho = timehands; 477 th = tho->th_next; 478 ogen = th->th_generation; 479 th->th_generation = 0; 480 membar_producer(); 481 memcpy(th, tho, offsetof(struct timehands, th_generation)); 482 483 /* 484 * If changing the boot offset, do so before updating the 485 * offset fields. 486 */ 487 if (new_offset != NULL) 488 th->th_offset = *new_offset; 489 490 /* 491 * Capture a timecounter delta on the current timecounter and if 492 * changing timecounters, a counter value from the new timecounter. 493 * Update the offset fields accordingly. 494 */ 495 delta = tc_delta(th); 496 if (th->th_counter != active_tc) 497 ncount = active_tc->tc_get_timecount(active_tc); 498 else 499 ncount = 0; 500 th->th_offset_count += delta; 501 th->th_offset_count &= th->th_counter->tc_counter_mask; 502 bintime_addx(&th->th_offset, th->th_scale * delta); 503 504 #ifdef notyet 505 /* 506 * Hardware latching timecounters may not generate interrupts on 507 * PPS events, so instead we poll them. There is a finite risk that 508 * the hardware might capture a count which is later than the one we 509 * got above, and therefore possibly in the next NTP second which might 510 * have a different rate than the current NTP second. It doesn't 511 * matter in practice. 512 */ 513 if (tho->th_counter->tc_poll_pps) 514 tho->th_counter->tc_poll_pps(tho->th_counter); 515 #endif 516 517 /* 518 * If changing the boot time or clock adjustment, do so before 519 * NTP processing. 520 */ 521 if (new_boottime != NULL) 522 th->th_boottime = *new_boottime; 523 if (new_adjtimedelta != NULL) 524 th->th_adjtimedelta = *new_adjtimedelta; 525 526 /* 527 * Deal with NTP second processing. The for loop normally 528 * iterates at most once, but in extreme situations it might 529 * keep NTP sane if timeouts are not run for several seconds. 530 * At boot, the time step can be large when the TOD hardware 531 * has been read, so on really large steps, we call 532 * ntp_update_second only twice. We need to call it twice in 533 * case we missed a leap second. 534 */ 535 bt = th->th_offset; 536 bintime_add(&bt, &th->th_boottime); 537 i = bt.sec - tho->th_microtime.tv_sec; 538 if (i > LARGE_STEP) 539 i = 2; 540 for (; i > 0; i--) 541 ntp_update_second(th); 542 543 /* Update the UTC timestamps used by the get*() functions. */ 544 /* XXX shouldn't do this here. Should force non-`get' versions. */ 545 bintime2timeval(&bt, &th->th_microtime); 546 bintime2timespec(&bt, &th->th_nanotime); 547 548 /* Now is a good time to change timecounters. */ 549 if (th->th_counter != active_tc) { 550 th->th_counter = active_tc; 551 th->th_offset_count = ncount; 552 } 553 554 /*- 555 * Recalculate the scaling factor. We want the number of 1/2^64 556 * fractions of a second per period of the hardware counter, taking 557 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 558 * processing provides us with. 559 * 560 * The th_adjustment is nanoseconds per second with 32 bit binary 561 * fraction and we want 64 bit binary fraction of second: 562 * 563 * x = a * 2^32 / 10^9 = a * 4.294967296 564 * 565 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 566 * we can only multiply by about 850 without overflowing, but that 567 * leaves suitably precise fractions for multiply before divide. 568 * 569 * Divide before multiply with a fraction of 2199/512 results in a 570 * systematic undercompensation of 10PPM of th_adjustment. On a 571 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 572 * 573 * We happily sacrifice the lowest of the 64 bits of our result 574 * to the goddess of code clarity. 575 * 576 */ 577 scale = (u_int64_t)1 << 63; 578 scale += (th->th_adjustment / 1024) * 2199; 579 scale /= th->th_counter->tc_frequency; 580 th->th_scale = scale * 2; 581 582 /* 583 * Now that the struct timehands is again consistent, set the new 584 * generation number, making sure to not make it zero. 585 */ 586 if (++ogen == 0) 587 ogen = 1; 588 membar_producer(); 589 th->th_generation = ogen; 590 591 /* Go live with the new struct timehands. */ 592 time_second = th->th_microtime.tv_sec; 593 time_uptime = th->th_offset.sec; 594 membar_producer(); 595 timehands = th; 596 } 597 598 /* Report or change the active timecounter hardware. */ 599 int 600 sysctl_tc_hardware(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 601 { 602 char newname[32]; 603 struct timecounter *newtc, *tc; 604 int error; 605 606 tc = timecounter; 607 strlcpy(newname, tc->tc_name, sizeof(newname)); 608 609 error = sysctl_string(oldp, oldlenp, newp, newlen, newname, sizeof(newname)); 610 if (error != 0 || strcmp(newname, tc->tc_name) == 0) 611 return (error); 612 SLIST_FOREACH(newtc, &tc_list, tc_next) { 613 if (strcmp(newname, newtc->tc_name) != 0) 614 continue; 615 616 /* Warm up new timecounter. */ 617 (void)newtc->tc_get_timecount(newtc); 618 (void)newtc->tc_get_timecount(newtc); 619 620 rw_enter_write(&tc_lock); 621 timecounter = newtc; 622 rw_exit_write(&tc_lock); 623 624 return (0); 625 } 626 return (EINVAL); 627 } 628 629 /* Report or change the active timecounter hardware. */ 630 int 631 sysctl_tc_choice(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 632 { 633 char buf[32], *spc, *choices; 634 struct timecounter *tc; 635 int error, maxlen; 636 637 if (SLIST_EMPTY(&tc_list)) 638 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 639 640 spc = ""; 641 maxlen = 0; 642 SLIST_FOREACH(tc, &tc_list, tc_next) 643 maxlen += sizeof(buf); 644 choices = malloc(maxlen, M_TEMP, M_WAITOK); 645 *choices = '\0'; 646 SLIST_FOREACH(tc, &tc_list, tc_next) { 647 snprintf(buf, sizeof(buf), "%s%s(%d)", 648 spc, tc->tc_name, tc->tc_quality); 649 spc = " "; 650 strlcat(choices, buf, maxlen); 651 } 652 error = sysctl_rdstring(oldp, oldlenp, newp, choices); 653 free(choices, M_TEMP, maxlen); 654 return (error); 655 } 656 657 /* 658 * Timecounters need to be updated every so often to prevent the hardware 659 * counter from overflowing. Updating also recalculates the cached values 660 * used by the get*() family of functions, so their precision depends on 661 * the update frequency. 662 */ 663 static int tc_tick; 664 665 void 666 tc_ticktock(void) 667 { 668 static int count; 669 670 if (++count < tc_tick) 671 return; 672 if (!mtx_enter_try(&windup_mtx)) 673 return; 674 count = 0; 675 tc_windup(NULL, NULL, NULL); 676 mtx_leave(&windup_mtx); 677 } 678 679 void 680 inittimecounter(void) 681 { 682 #ifdef DEBUG 683 u_int p; 684 #endif 685 686 /* 687 * Set the initial timeout to 688 * max(1, <approx. number of hardclock ticks in a millisecond>). 689 * People should probably not use the sysctl to set the timeout 690 * to smaller than its initial value, since that value is the 691 * smallest reasonable one. If they want better timestamps they 692 * should use the non-"get"* functions. 693 */ 694 if (hz > 1000) 695 tc_tick = (hz + 500) / 1000; 696 else 697 tc_tick = 1; 698 #ifdef DEBUG 699 p = (tc_tick * 1000000) / hz; 700 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); 701 #endif 702 703 /* warm up new timecounter (again) and get rolling. */ 704 (void)timecounter->tc_get_timecount(timecounter); 705 (void)timecounter->tc_get_timecount(timecounter); 706 } 707 708 /* 709 * Return timecounter-related information. 710 */ 711 int 712 sysctl_tc(int *name, u_int namelen, void *oldp, size_t *oldlenp, 713 void *newp, size_t newlen) 714 { 715 if (namelen != 1) 716 return (ENOTDIR); 717 718 switch (name[0]) { 719 case KERN_TIMECOUNTER_TICK: 720 return (sysctl_rdint(oldp, oldlenp, newp, tc_tick)); 721 case KERN_TIMECOUNTER_TIMESTEPWARNINGS: 722 return (sysctl_int(oldp, oldlenp, newp, newlen, 723 ×tepwarnings)); 724 case KERN_TIMECOUNTER_HARDWARE: 725 return (sysctl_tc_hardware(oldp, oldlenp, newp, newlen)); 726 case KERN_TIMECOUNTER_CHOICE: 727 return (sysctl_tc_choice(oldp, oldlenp, newp, newlen)); 728 default: 729 return (EOPNOTSUPP); 730 } 731 /* NOTREACHED */ 732 } 733 734 /* 735 * Skew the timehands according to any adjfreq(2)/adjtime(2) adjustments. 736 */ 737 void 738 ntp_update_second(struct timehands *th) 739 { 740 int64_t adj; 741 742 MUTEX_ASSERT_LOCKED(&windup_mtx); 743 744 if (th->th_adjtimedelta > 0) 745 adj = MIN(5000, th->th_adjtimedelta); 746 else 747 adj = MAX(-5000, th->th_adjtimedelta); 748 th->th_adjtimedelta -= adj; 749 th->th_adjustment = (adj * 1000) << 32; 750 th->th_adjustment += th->th_counter->tc_freq_adj; 751 } 752 753 void 754 tc_adjfreq(int64_t *old, int64_t *new) 755 { 756 if (old != NULL) { 757 rw_assert_anylock(&tc_lock); 758 *old = timecounter->tc_freq_adj; 759 } 760 if (new != NULL) { 761 rw_assert_wrlock(&tc_lock); 762 mtx_enter(&windup_mtx); 763 timecounter->tc_freq_adj = *new; 764 tc_windup(NULL, NULL, NULL); 765 mtx_leave(&windup_mtx); 766 } 767 } 768 769 void 770 tc_adjtime(int64_t *old, int64_t *new) 771 { 772 struct timehands *th; 773 u_int gen; 774 775 if (old != NULL) { 776 do { 777 th = timehands; 778 gen = th->th_generation; 779 membar_consumer(); 780 *old = th->th_adjtimedelta; 781 membar_consumer(); 782 } while (gen == 0 || gen != th->th_generation); 783 } 784 if (new != NULL) { 785 rw_assert_wrlock(&tc_lock); 786 mtx_enter(&windup_mtx); 787 tc_windup(NULL, NULL, new); 788 mtx_leave(&windup_mtx); 789 } 790 } 791