1 /* $OpenBSD: kern_tc.c,v 1.48 2019/06/03 01:27:30 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * If we meet some day, and you think this stuff is worth it, you 21 * can buy me a beer in return. Poul-Henning Kamp 22 */ 23 24 #include <sys/param.h> 25 #include <sys/atomic.h> 26 #include <sys/kernel.h> 27 #include <sys/mutex.h> 28 #include <sys/rwlock.h> 29 #include <sys/stdint.h> 30 #include <sys/timeout.h> 31 #include <sys/sysctl.h> 32 #include <sys/syslog.h> 33 #include <sys/systm.h> 34 #include <sys/timetc.h> 35 #include <sys/queue.h> 36 #include <sys/malloc.h> 37 #include <dev/rndvar.h> 38 39 /* 40 * A large step happens on boot. This constant detects such steps. 41 * It is relatively small so that ntp_update_second gets called enough 42 * in the typical 'missed a couple of seconds' case, but doesn't loop 43 * forever when the time step is large. 44 */ 45 #define LARGE_STEP 200 46 47 u_int dummy_get_timecount(struct timecounter *); 48 49 int sysctl_tc_hardware(void *, size_t *, void *, size_t); 50 int sysctl_tc_choice(void *, size_t *, void *, size_t); 51 52 /* 53 * Implement a dummy timecounter which we can use until we get a real one 54 * in the air. This allows the console and other early stuff to use 55 * time services. 56 */ 57 58 u_int 59 dummy_get_timecount(struct timecounter *tc) 60 { 61 static u_int now; 62 63 return (++now); 64 } 65 66 static struct timecounter dummy_timecounter = { 67 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 68 }; 69 70 /* 71 * Locks used to protect struct members, global variables in this file: 72 * I immutable after initialization 73 * t tc_lock 74 * w windup_mtx 75 */ 76 77 struct timehands { 78 /* These fields must be initialized by the driver. */ 79 struct timecounter *th_counter; /* [w] */ 80 int64_t th_adjtimedelta; /* [tw] */ 81 int64_t th_adjustment; /* [w] */ 82 u_int64_t th_scale; /* [w] */ 83 u_int th_offset_count; /* [w] */ 84 struct bintime th_boottime; /* [tw] */ 85 struct bintime th_offset; /* [w] */ 86 struct timeval th_microtime; /* [w] */ 87 struct timespec th_nanotime; /* [w] */ 88 /* Fields not to be copied in tc_windup start with th_generation. */ 89 volatile u_int th_generation; /* [w] */ 90 struct timehands *th_next; /* [I] */ 91 }; 92 93 static struct timehands th0; 94 static struct timehands th1 = { 95 .th_next = &th0 96 }; 97 static struct timehands th0 = { 98 .th_counter = &dummy_timecounter, 99 .th_scale = UINT64_MAX / 1000000, 100 .th_offset = { .sec = 1, .frac = 0 }, 101 .th_generation = 1, 102 .th_next = &th1 103 }; 104 105 struct rwlock tc_lock = RWLOCK_INITIALIZER("tc_lock"); 106 107 /* 108 * tc_windup() must be called before leaving this mutex. 109 */ 110 struct mutex windup_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 111 112 static struct timehands *volatile timehands = &th0; /* [w] */ 113 struct timecounter *timecounter = &dummy_timecounter; /* [t] */ 114 static SLIST_HEAD(, timecounter) tc_list = SLIST_HEAD_INITIALIZER(tc_list); 115 116 volatile time_t time_second = 1; 117 volatile time_t time_uptime = 0; 118 119 struct bintime naptime; 120 static int timestepwarnings; 121 122 void ntp_update_second(struct timehands *); 123 void tc_windup(struct bintime *, struct bintime *, int64_t *); 124 125 /* 126 * Return the difference between the timehands' counter value now and what 127 * was when we copied it to the timehands' offset_count. 128 */ 129 static __inline u_int 130 tc_delta(struct timehands *th) 131 { 132 struct timecounter *tc; 133 134 tc = th->th_counter; 135 return ((tc->tc_get_timecount(tc) - th->th_offset_count) & 136 tc->tc_counter_mask); 137 } 138 139 /* 140 * Functions for reading the time. We have to loop until we are sure that 141 * the timehands that we operated on was not updated under our feet. See 142 * the comment in <sys/time.h> for a description of these functions. 143 */ 144 145 void 146 binboottime(struct bintime *bt) 147 { 148 struct timehands *th; 149 u_int gen; 150 151 do { 152 th = timehands; 153 gen = th->th_generation; 154 membar_consumer(); 155 *bt = th->th_boottime; 156 membar_consumer(); 157 } while (gen == 0 || gen != th->th_generation); 158 } 159 160 void 161 microboottime(struct timeval *tvp) 162 { 163 struct bintime bt; 164 165 binboottime(&bt); 166 BINTIME_TO_TIMEVAL(&bt, tvp); 167 } 168 169 void 170 binuptime(struct bintime *bt) 171 { 172 struct timehands *th; 173 u_int gen; 174 175 do { 176 th = timehands; 177 gen = th->th_generation; 178 membar_consumer(); 179 *bt = th->th_offset; 180 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 181 membar_consumer(); 182 } while (gen == 0 || gen != th->th_generation); 183 } 184 185 void 186 nanouptime(struct timespec *tsp) 187 { 188 struct bintime bt; 189 190 binuptime(&bt); 191 BINTIME_TO_TIMESPEC(&bt, tsp); 192 } 193 194 void 195 microuptime(struct timeval *tvp) 196 { 197 struct bintime bt; 198 199 binuptime(&bt); 200 BINTIME_TO_TIMEVAL(&bt, tvp); 201 } 202 203 void 204 bintime(struct bintime *bt) 205 { 206 struct timehands *th; 207 u_int gen; 208 209 do { 210 th = timehands; 211 gen = th->th_generation; 212 membar_consumer(); 213 *bt = th->th_offset; 214 bintimeaddfrac(bt, th->th_scale * tc_delta(th), bt); 215 bintimeadd(bt, &th->th_boottime, bt); 216 membar_consumer(); 217 } while (gen == 0 || gen != th->th_generation); 218 } 219 220 void 221 nanotime(struct timespec *tsp) 222 { 223 struct bintime bt; 224 225 bintime(&bt); 226 BINTIME_TO_TIMESPEC(&bt, tsp); 227 } 228 229 void 230 microtime(struct timeval *tvp) 231 { 232 struct bintime bt; 233 234 bintime(&bt); 235 BINTIME_TO_TIMEVAL(&bt, tvp); 236 } 237 238 void 239 getnanouptime(struct timespec *tsp) 240 { 241 struct timehands *th; 242 u_int gen; 243 244 do { 245 th = timehands; 246 gen = th->th_generation; 247 membar_consumer(); 248 BINTIME_TO_TIMESPEC(&th->th_offset, tsp); 249 membar_consumer(); 250 } while (gen == 0 || gen != th->th_generation); 251 } 252 253 void 254 getmicrouptime(struct timeval *tvp) 255 { 256 struct timehands *th; 257 u_int gen; 258 259 do { 260 th = timehands; 261 gen = th->th_generation; 262 membar_consumer(); 263 BINTIME_TO_TIMEVAL(&th->th_offset, tvp); 264 membar_consumer(); 265 } while (gen == 0 || gen != th->th_generation); 266 } 267 268 void 269 getnanotime(struct timespec *tsp) 270 { 271 struct timehands *th; 272 u_int gen; 273 274 do { 275 th = timehands; 276 gen = th->th_generation; 277 membar_consumer(); 278 *tsp = th->th_nanotime; 279 membar_consumer(); 280 } while (gen == 0 || gen != th->th_generation); 281 } 282 283 void 284 getmicrotime(struct timeval *tvp) 285 { 286 struct timehands *th; 287 u_int gen; 288 289 do { 290 th = timehands; 291 gen = th->th_generation; 292 membar_consumer(); 293 *tvp = th->th_microtime; 294 membar_consumer(); 295 } while (gen == 0 || gen != th->th_generation); 296 } 297 298 /* 299 * Initialize a new timecounter and possibly use it. 300 */ 301 void 302 tc_init(struct timecounter *tc) 303 { 304 u_int u; 305 306 u = tc->tc_frequency / tc->tc_counter_mask; 307 /* XXX: We need some margin here, 10% is a guess */ 308 u *= 11; 309 u /= 10; 310 if (tc->tc_quality >= 0) { 311 if (u > hz) { 312 tc->tc_quality = -2000; 313 printf("Timecounter \"%s\" frequency %lu Hz", 314 tc->tc_name, (unsigned long)tc->tc_frequency); 315 printf(" -- Insufficient hz, needs at least %u\n", u); 316 } 317 } 318 319 SLIST_INSERT_HEAD(&tc_list, tc, tc_next); 320 321 /* 322 * Never automatically use a timecounter with negative quality. 323 * Even though we run on the dummy counter, switching here may be 324 * worse since this timecounter may not be monotonic. 325 */ 326 if (tc->tc_quality < 0) 327 return; 328 if (tc->tc_quality < timecounter->tc_quality) 329 return; 330 if (tc->tc_quality == timecounter->tc_quality && 331 tc->tc_frequency < timecounter->tc_frequency) 332 return; 333 (void)tc->tc_get_timecount(tc); 334 enqueue_randomness(tc->tc_get_timecount(tc)); 335 336 timecounter = tc; 337 } 338 339 /* Report the frequency of the current timecounter. */ 340 u_int64_t 341 tc_getfrequency(void) 342 { 343 344 return (timehands->th_counter->tc_frequency); 345 } 346 347 /* 348 * Step our concept of UTC, aka the realtime clock. 349 * This is done by modifying our estimate of when we booted. 350 * 351 * Any ongoing adjustment is meaningless after a clock jump, 352 * so we zero adjtimedelta here as well. 353 */ 354 void 355 tc_setrealtimeclock(const struct timespec *ts) 356 { 357 struct timespec ts2; 358 struct bintime bt, bt2; 359 int64_t zero = 0; 360 361 rw_enter_write(&tc_lock); 362 mtx_enter(&windup_mtx); 363 binuptime(&bt2); 364 TIMESPEC_TO_BINTIME(ts, &bt); 365 bintimesub(&bt, &bt2, &bt); 366 bintimeadd(&bt2, &timehands->th_boottime, &bt2); 367 368 /* XXX fiddle all the little crinkly bits around the fiords... */ 369 tc_windup(&bt, NULL, &zero); 370 mtx_leave(&windup_mtx); 371 rw_exit_write(&tc_lock); 372 373 enqueue_randomness(ts->tv_sec); 374 375 if (timestepwarnings) { 376 BINTIME_TO_TIMESPEC(&bt2, &ts2); 377 log(LOG_INFO, "Time stepped from %lld.%09ld to %lld.%09ld\n", 378 (long long)ts2.tv_sec, ts2.tv_nsec, 379 (long long)ts->tv_sec, ts->tv_nsec); 380 } 381 } 382 383 /* 384 * Step the monotonic and realtime clocks, triggering any timeouts that 385 * should have occurred across the interval. 386 */ 387 void 388 tc_setclock(const struct timespec *ts) 389 { 390 struct bintime bt, bt2; 391 struct timespec earlier; 392 static int first = 1; 393 int rewind = 0; 394 #ifndef SMALL_KERNEL 395 long long adj_ticks; 396 #endif 397 398 /* 399 * When we're called for the first time, during boot when 400 * the root partition is mounted, we need to set boottime. 401 */ 402 if (first) { 403 tc_setrealtimeclock(ts); 404 first = 0; 405 return; 406 } 407 408 enqueue_randomness(ts->tv_sec); 409 410 mtx_enter(&windup_mtx); 411 TIMESPEC_TO_BINTIME(ts, &bt); 412 bintimesub(&bt, &timehands->th_boottime, &bt); 413 414 /* 415 * Don't rewind the offset. 416 */ 417 if (bintimecmp(&bt, &timehands->th_offset, <)) 418 rewind = 1; 419 420 bt2 = timehands->th_offset; 421 422 /* XXX fiddle all the little crinkly bits around the fiords... */ 423 tc_windup(NULL, rewind ? NULL : &bt, NULL); 424 mtx_leave(&windup_mtx); 425 426 if (rewind) { 427 BINTIME_TO_TIMESPEC(&bt, &earlier); 428 printf("%s: cannot rewind uptime to %lld.%09ld\n", 429 __func__, (long long)earlier.tv_sec, earlier.tv_nsec); 430 return; 431 } 432 433 #ifndef SMALL_KERNEL 434 /* convert the bintime to ticks */ 435 bintimesub(&bt, &bt2, &bt); 436 bintimeadd(&naptime, &bt, &naptime); 437 adj_ticks = (uint64_t)hz * bt.sec + 438 (((uint64_t)1000000 * (uint32_t)(bt.frac >> 32)) >> 32) / tick; 439 if (adj_ticks > 0) { 440 if (adj_ticks > INT_MAX) 441 adj_ticks = INT_MAX; 442 timeout_adjust_ticks(adj_ticks); 443 } 444 #endif 445 } 446 447 /* 448 * Initialize the next struct timehands in the ring and make 449 * it the active timehands. Along the way we might switch to a different 450 * timecounter and/or do seconds processing in NTP. Slightly magic. 451 */ 452 void 453 tc_windup(struct bintime *new_boottime, struct bintime *new_offset, 454 int64_t *new_adjtimedelta) 455 { 456 struct bintime bt; 457 struct timecounter *active_tc; 458 struct timehands *th, *tho; 459 u_int64_t scale; 460 u_int delta, ncount, ogen; 461 int i; 462 463 if (new_boottime != NULL || new_adjtimedelta != NULL) 464 rw_assert_wrlock(&tc_lock); 465 MUTEX_ASSERT_LOCKED(&windup_mtx); 466 467 active_tc = timecounter; 468 469 /* 470 * Make the next timehands a copy of the current one, but do not 471 * overwrite the generation or next pointer. While we update 472 * the contents, the generation must be zero. 473 */ 474 tho = timehands; 475 th = tho->th_next; 476 ogen = th->th_generation; 477 th->th_generation = 0; 478 membar_producer(); 479 memcpy(th, tho, offsetof(struct timehands, th_generation)); 480 481 /* 482 * If changing the boot offset, do so before updating the 483 * offset fields. 484 */ 485 if (new_offset != NULL) 486 th->th_offset = *new_offset; 487 488 /* 489 * Capture a timecounter delta on the current timecounter and if 490 * changing timecounters, a counter value from the new timecounter. 491 * Update the offset fields accordingly. 492 */ 493 delta = tc_delta(th); 494 if (th->th_counter != active_tc) 495 ncount = active_tc->tc_get_timecount(active_tc); 496 else 497 ncount = 0; 498 th->th_offset_count += delta; 499 th->th_offset_count &= th->th_counter->tc_counter_mask; 500 bintimeaddfrac(&th->th_offset, th->th_scale * delta, &th->th_offset); 501 502 #ifdef notyet 503 /* 504 * Hardware latching timecounters may not generate interrupts on 505 * PPS events, so instead we poll them. There is a finite risk that 506 * the hardware might capture a count which is later than the one we 507 * got above, and therefore possibly in the next NTP second which might 508 * have a different rate than the current NTP second. It doesn't 509 * matter in practice. 510 */ 511 if (tho->th_counter->tc_poll_pps) 512 tho->th_counter->tc_poll_pps(tho->th_counter); 513 #endif 514 515 /* 516 * If changing the boot time or clock adjustment, do so before 517 * NTP processing. 518 */ 519 if (new_boottime != NULL) 520 th->th_boottime = *new_boottime; 521 if (new_adjtimedelta != NULL) 522 th->th_adjtimedelta = *new_adjtimedelta; 523 524 /* 525 * Deal with NTP second processing. The for loop normally 526 * iterates at most once, but in extreme situations it might 527 * keep NTP sane if timeouts are not run for several seconds. 528 * At boot, the time step can be large when the TOD hardware 529 * has been read, so on really large steps, we call 530 * ntp_update_second only twice. We need to call it twice in 531 * case we missed a leap second. 532 */ 533 bt = th->th_offset; 534 bintimeadd(&bt, &th->th_boottime, &bt); 535 i = bt.sec - tho->th_microtime.tv_sec; 536 if (i > LARGE_STEP) 537 i = 2; 538 for (; i > 0; i--) 539 ntp_update_second(th); 540 541 /* Update the UTC timestamps used by the get*() functions. */ 542 /* XXX shouldn't do this here. Should force non-`get' versions. */ 543 BINTIME_TO_TIMEVAL(&bt, &th->th_microtime); 544 BINTIME_TO_TIMESPEC(&bt, &th->th_nanotime); 545 546 /* Now is a good time to change timecounters. */ 547 if (th->th_counter != active_tc) { 548 th->th_counter = active_tc; 549 th->th_offset_count = ncount; 550 } 551 552 /*- 553 * Recalculate the scaling factor. We want the number of 1/2^64 554 * fractions of a second per period of the hardware counter, taking 555 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 556 * processing provides us with. 557 * 558 * The th_adjustment is nanoseconds per second with 32 bit binary 559 * fraction and we want 64 bit binary fraction of second: 560 * 561 * x = a * 2^32 / 10^9 = a * 4.294967296 562 * 563 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 564 * we can only multiply by about 850 without overflowing, but that 565 * leaves suitably precise fractions for multiply before divide. 566 * 567 * Divide before multiply with a fraction of 2199/512 results in a 568 * systematic undercompensation of 10PPM of th_adjustment. On a 569 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 570 * 571 * We happily sacrifice the lowest of the 64 bits of our result 572 * to the goddess of code clarity. 573 * 574 */ 575 scale = (u_int64_t)1 << 63; 576 scale += (th->th_adjustment / 1024) * 2199; 577 scale /= th->th_counter->tc_frequency; 578 th->th_scale = scale * 2; 579 580 /* 581 * Now that the struct timehands is again consistent, set the new 582 * generation number, making sure to not make it zero. 583 */ 584 if (++ogen == 0) 585 ogen = 1; 586 membar_producer(); 587 th->th_generation = ogen; 588 589 /* Go live with the new struct timehands. */ 590 time_second = th->th_microtime.tv_sec; 591 time_uptime = th->th_offset.sec; 592 membar_producer(); 593 timehands = th; 594 } 595 596 /* Report or change the active timecounter hardware. */ 597 int 598 sysctl_tc_hardware(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 599 { 600 char newname[32]; 601 struct timecounter *newtc, *tc; 602 int error; 603 604 tc = timecounter; 605 strlcpy(newname, tc->tc_name, sizeof(newname)); 606 607 error = sysctl_string(oldp, oldlenp, newp, newlen, newname, sizeof(newname)); 608 if (error != 0 || strcmp(newname, tc->tc_name) == 0) 609 return (error); 610 SLIST_FOREACH(newtc, &tc_list, tc_next) { 611 if (strcmp(newname, newtc->tc_name) != 0) 612 continue; 613 614 /* Warm up new timecounter. */ 615 (void)newtc->tc_get_timecount(newtc); 616 (void)newtc->tc_get_timecount(newtc); 617 618 rw_enter_write(&tc_lock); 619 timecounter = newtc; 620 rw_exit_write(&tc_lock); 621 622 return (0); 623 } 624 return (EINVAL); 625 } 626 627 /* Report or change the active timecounter hardware. */ 628 int 629 sysctl_tc_choice(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 630 { 631 char buf[32], *spc, *choices; 632 struct timecounter *tc; 633 int error, maxlen; 634 635 if (SLIST_EMPTY(&tc_list)) 636 return (sysctl_rdstring(oldp, oldlenp, newp, "")); 637 638 spc = ""; 639 maxlen = 0; 640 SLIST_FOREACH(tc, &tc_list, tc_next) 641 maxlen += sizeof(buf); 642 choices = malloc(maxlen, M_TEMP, M_WAITOK); 643 *choices = '\0'; 644 SLIST_FOREACH(tc, &tc_list, tc_next) { 645 snprintf(buf, sizeof(buf), "%s%s(%d)", 646 spc, tc->tc_name, tc->tc_quality); 647 spc = " "; 648 strlcat(choices, buf, maxlen); 649 } 650 error = sysctl_rdstring(oldp, oldlenp, newp, choices); 651 free(choices, M_TEMP, maxlen); 652 return (error); 653 } 654 655 /* 656 * Timecounters need to be updated every so often to prevent the hardware 657 * counter from overflowing. Updating also recalculates the cached values 658 * used by the get*() family of functions, so their precision depends on 659 * the update frequency. 660 */ 661 static int tc_tick; 662 663 void 664 tc_ticktock(void) 665 { 666 static int count; 667 668 if (++count < tc_tick) 669 return; 670 if (!mtx_enter_try(&windup_mtx)) 671 return; 672 count = 0; 673 tc_windup(NULL, NULL, NULL); 674 mtx_leave(&windup_mtx); 675 } 676 677 void 678 inittimecounter(void) 679 { 680 #ifdef DEBUG 681 u_int p; 682 #endif 683 684 /* 685 * Set the initial timeout to 686 * max(1, <approx. number of hardclock ticks in a millisecond>). 687 * People should probably not use the sysctl to set the timeout 688 * to smaller than its initial value, since that value is the 689 * smallest reasonable one. If they want better timestamps they 690 * should use the non-"get"* functions. 691 */ 692 if (hz > 1000) 693 tc_tick = (hz + 500) / 1000; 694 else 695 tc_tick = 1; 696 #ifdef DEBUG 697 p = (tc_tick * 1000000) / hz; 698 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); 699 #endif 700 701 /* warm up new timecounter (again) and get rolling. */ 702 (void)timecounter->tc_get_timecount(timecounter); 703 (void)timecounter->tc_get_timecount(timecounter); 704 } 705 706 /* 707 * Return timecounter-related information. 708 */ 709 int 710 sysctl_tc(int *name, u_int namelen, void *oldp, size_t *oldlenp, 711 void *newp, size_t newlen) 712 { 713 if (namelen != 1) 714 return (ENOTDIR); 715 716 switch (name[0]) { 717 case KERN_TIMECOUNTER_TICK: 718 return (sysctl_rdint(oldp, oldlenp, newp, tc_tick)); 719 case KERN_TIMECOUNTER_TIMESTEPWARNINGS: 720 return (sysctl_int(oldp, oldlenp, newp, newlen, 721 ×tepwarnings)); 722 case KERN_TIMECOUNTER_HARDWARE: 723 return (sysctl_tc_hardware(oldp, oldlenp, newp, newlen)); 724 case KERN_TIMECOUNTER_CHOICE: 725 return (sysctl_tc_choice(oldp, oldlenp, newp, newlen)); 726 default: 727 return (EOPNOTSUPP); 728 } 729 /* NOTREACHED */ 730 } 731 732 /* 733 * Skew the timehands according to any adjfreq(2)/adjtime(2) adjustments. 734 */ 735 void 736 ntp_update_second(struct timehands *th) 737 { 738 int64_t adj; 739 740 MUTEX_ASSERT_LOCKED(&windup_mtx); 741 742 if (th->th_adjtimedelta > 0) 743 adj = MIN(5000, th->th_adjtimedelta); 744 else 745 adj = MAX(-5000, th->th_adjtimedelta); 746 th->th_adjtimedelta -= adj; 747 th->th_adjustment = (adj * 1000) << 32; 748 th->th_adjustment += th->th_counter->tc_freq_adj; 749 } 750 751 void 752 tc_adjfreq(int64_t *old, int64_t *new) 753 { 754 if (old != NULL) { 755 rw_assert_anylock(&tc_lock); 756 *old = timecounter->tc_freq_adj; 757 } 758 if (new != NULL) { 759 rw_assert_wrlock(&tc_lock); 760 mtx_enter(&windup_mtx); 761 timecounter->tc_freq_adj = *new; 762 tc_windup(NULL, NULL, NULL); 763 mtx_leave(&windup_mtx); 764 } 765 } 766 767 void 768 tc_adjtime(int64_t *old, int64_t *new) 769 { 770 struct timehands *th; 771 u_int gen; 772 773 if (old != NULL) { 774 do { 775 th = timehands; 776 gen = th->th_generation; 777 membar_consumer(); 778 *old = th->th_adjtimedelta; 779 membar_consumer(); 780 } while (gen == 0 || gen != th->th_generation); 781 } 782 if (new != NULL) { 783 rw_assert_wrlock(&tc_lock); 784 mtx_enter(&windup_mtx); 785 tc_windup(NULL, NULL, new); 786 mtx_leave(&windup_mtx); 787 } 788 } 789