1 /* $NetBSD: kern_tc.c,v 1.28 2007/12/15 18:20:11 yamt Exp $ */ 2 3 /*- 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * --------------------------------------------------------------------------- 10 */ 11 12 #include <sys/cdefs.h> 13 /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */ 14 __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.28 2007/12/15 18:20:11 yamt Exp $"); 15 16 #include "opt_ntp.h" 17 18 #include <sys/param.h> 19 #ifdef __HAVE_TIMECOUNTER /* XXX */ 20 #include <sys/kernel.h> 21 #include <sys/reboot.h> /* XXX just to get AB_VERBOSE */ 22 #include <sys/sysctl.h> 23 #include <sys/syslog.h> 24 #include <sys/systm.h> 25 #include <sys/timepps.h> 26 #include <sys/timetc.h> 27 #include <sys/timex.h> 28 #include <sys/evcnt.h> 29 #include <sys/kauth.h> 30 #include <sys/mutex.h> 31 #include <sys/atomic.h> 32 33 /* 34 * A large step happens on boot. This constant detects such steps. 35 * It is relatively small so that ntp_update_second gets called enough 36 * in the typical 'missed a couple of seconds' case, but doesn't loop 37 * forever when the time step is large. 38 */ 39 #define LARGE_STEP 200 40 41 /* 42 * Implement a dummy timecounter which we can use until we get a real one 43 * in the air. This allows the console and other early stuff to use 44 * time services. 45 */ 46 47 static u_int 48 dummy_get_timecount(struct timecounter *tc) 49 { 50 static u_int now; 51 52 return (++now); 53 } 54 55 static struct timecounter dummy_timecounter = { 56 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000, NULL, NULL, 57 }; 58 59 struct timehands { 60 /* These fields must be initialized by the driver. */ 61 struct timecounter *th_counter; 62 int64_t th_adjustment; 63 u_int64_t th_scale; 64 u_int th_offset_count; 65 struct bintime th_offset; 66 struct timeval th_microtime; 67 struct timespec th_nanotime; 68 /* Fields not to be copied in tc_windup start with th_generation. */ 69 volatile u_int th_generation; 70 struct timehands *th_next; 71 }; 72 73 static struct timehands th0; 74 static struct timehands th9 = { .th_next = &th0, }; 75 static struct timehands th8 = { .th_next = &th9, }; 76 static struct timehands th7 = { .th_next = &th8, }; 77 static struct timehands th6 = { .th_next = &th7, }; 78 static struct timehands th5 = { .th_next = &th6, }; 79 static struct timehands th4 = { .th_next = &th5, }; 80 static struct timehands th3 = { .th_next = &th4, }; 81 static struct timehands th2 = { .th_next = &th3, }; 82 static struct timehands th1 = { .th_next = &th2, }; 83 static struct timehands th0 = { 84 .th_counter = &dummy_timecounter, 85 .th_scale = (uint64_t)-1 / 1000000, 86 .th_offset = { .sec = 1, .frac = 0 }, 87 .th_generation = 1, 88 .th_next = &th1, 89 }; 90 91 static struct timehands *volatile timehands = &th0; 92 struct timecounter *timecounter = &dummy_timecounter; 93 static struct timecounter *timecounters = &dummy_timecounter; 94 95 time_t time_second = 1; 96 time_t time_uptime = 1; 97 98 static struct bintime timebasebin; 99 100 static int timestepwarnings; 101 102 extern kmutex_t time_lock; 103 104 #ifdef __FreeBSD__ 105 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, 106 ×tepwarnings, 0, ""); 107 #endif /* __FreeBSD__ */ 108 109 /* 110 * sysctl helper routine for kern.timercounter.hardware 111 */ 112 static int 113 sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS) 114 { 115 struct sysctlnode node; 116 int error; 117 char newname[MAX_TCNAMELEN]; 118 struct timecounter *newtc, *tc; 119 120 tc = timecounter; 121 122 strlcpy(newname, tc->tc_name, sizeof(newname)); 123 124 node = *rnode; 125 node.sysctl_data = newname; 126 node.sysctl_size = sizeof(newname); 127 128 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 129 130 if (error || 131 newp == NULL || 132 strncmp(newname, tc->tc_name, sizeof(newname)) == 0) 133 return error; 134 135 if (l != NULL && (error = kauth_authorize_system(l->l_cred, 136 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS, newname, 137 NULL, NULL)) != 0) 138 return (error); 139 140 if (!cold) 141 mutex_enter(&time_lock); 142 error = EINVAL; 143 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { 144 if (strcmp(newname, newtc->tc_name) != 0) 145 continue; 146 /* Warm up new timecounter. */ 147 (void)newtc->tc_get_timecount(newtc); 148 (void)newtc->tc_get_timecount(newtc); 149 timecounter = newtc; 150 error = 0; 151 break; 152 } 153 if (!cold) 154 mutex_exit(&time_lock); 155 return error; 156 } 157 158 static int 159 sysctl_kern_timecounter_choice(SYSCTLFN_ARGS) 160 { 161 char buf[MAX_TCNAMELEN+48]; 162 char *where = oldp; 163 const char *spc; 164 struct timecounter *tc; 165 size_t needed, left, slen; 166 int error; 167 168 if (newp != NULL) 169 return (EPERM); 170 if (namelen != 0) 171 return (EINVAL); 172 173 spc = ""; 174 error = 0; 175 needed = 0; 176 left = *oldlenp; 177 178 mutex_enter(&time_lock); 179 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { 180 if (where == NULL) { 181 needed += sizeof(buf); /* be conservative */ 182 } else { 183 slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64 184 " Hz)", spc, tc->tc_name, tc->tc_quality, 185 tc->tc_frequency); 186 if (left < slen + 1) 187 break; 188 /* XXX use sysctl_copyout? (from sysctl_hw_disknames) */ 189 /* XXX copyout with held lock. */ 190 error = copyout(buf, where, slen + 1); 191 spc = " "; 192 where += slen; 193 needed += slen; 194 left -= slen; 195 } 196 } 197 mutex_exit(&time_lock); 198 199 *oldlenp = needed; 200 return (error); 201 } 202 203 SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup") 204 { 205 const struct sysctlnode *node; 206 207 sysctl_createv(clog, 0, NULL, &node, 208 CTLFLAG_PERMANENT, 209 CTLTYPE_NODE, "timecounter", 210 SYSCTL_DESCR("time counter information"), 211 NULL, 0, NULL, 0, 212 CTL_KERN, CTL_CREATE, CTL_EOL); 213 214 if (node != NULL) { 215 sysctl_createv(clog, 0, NULL, NULL, 216 CTLFLAG_PERMANENT, 217 CTLTYPE_STRING, "choice", 218 SYSCTL_DESCR("available counters"), 219 sysctl_kern_timecounter_choice, 0, NULL, 0, 220 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 221 222 sysctl_createv(clog, 0, NULL, NULL, 223 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 224 CTLTYPE_STRING, "hardware", 225 SYSCTL_DESCR("currently active time counter"), 226 sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN, 227 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 228 229 sysctl_createv(clog, 0, NULL, NULL, 230 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 231 CTLTYPE_INT, "timestepwarnings", 232 SYSCTL_DESCR("log time steps"), 233 NULL, 0, ×tepwarnings, 0, 234 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 235 } 236 } 237 238 #define TC_STATS(name) \ 239 static struct evcnt n##name = \ 240 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name); \ 241 EVCNT_ATTACH_STATIC(n##name) 242 243 TC_STATS(binuptime); TC_STATS(nanouptime); TC_STATS(microuptime); 244 TC_STATS(bintime); TC_STATS(nanotime); TC_STATS(microtime); 245 TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime); 246 TC_STATS(getbintime); TC_STATS(getnanotime); TC_STATS(getmicrotime); 247 TC_STATS(setclock); 248 249 #undef TC_STATS 250 251 static void tc_windup(void); 252 253 /* 254 * Return the difference between the timehands' counter value now and what 255 * was when we copied it to the timehands' offset_count. 256 */ 257 static __inline u_int 258 tc_delta(struct timehands *th) 259 { 260 struct timecounter *tc; 261 262 tc = th->th_counter; 263 return ((tc->tc_get_timecount(tc) - 264 th->th_offset_count) & tc->tc_counter_mask); 265 } 266 267 /* 268 * Functions for reading the time. We have to loop until we are sure that 269 * the timehands that we operated on was not updated under our feet. See 270 * the comment in <sys/timevar.h> for a description of these 12 functions. 271 */ 272 273 void 274 binuptime(struct bintime *bt) 275 { 276 struct timehands *th; 277 u_int gen; 278 279 nbinuptime.ev_count++; 280 do { 281 th = timehands; 282 gen = th->th_generation; 283 *bt = th->th_offset; 284 bintime_addx(bt, th->th_scale * tc_delta(th)); 285 } while (gen == 0 || gen != th->th_generation); 286 } 287 288 void 289 nanouptime(struct timespec *tsp) 290 { 291 struct bintime bt; 292 293 nnanouptime.ev_count++; 294 binuptime(&bt); 295 bintime2timespec(&bt, tsp); 296 } 297 298 void 299 microuptime(struct timeval *tvp) 300 { 301 struct bintime bt; 302 303 nmicrouptime.ev_count++; 304 binuptime(&bt); 305 bintime2timeval(&bt, tvp); 306 } 307 308 void 309 bintime(struct bintime *bt) 310 { 311 312 nbintime.ev_count++; 313 binuptime(bt); 314 bintime_add(bt, &timebasebin); 315 } 316 317 void 318 nanotime(struct timespec *tsp) 319 { 320 struct bintime bt; 321 322 nnanotime.ev_count++; 323 bintime(&bt); 324 bintime2timespec(&bt, tsp); 325 } 326 327 void 328 microtime(struct timeval *tvp) 329 { 330 struct bintime bt; 331 332 nmicrotime.ev_count++; 333 bintime(&bt); 334 bintime2timeval(&bt, tvp); 335 } 336 337 void 338 getbinuptime(struct bintime *bt) 339 { 340 struct timehands *th; 341 u_int gen; 342 343 ngetbinuptime.ev_count++; 344 do { 345 th = timehands; 346 gen = th->th_generation; 347 *bt = th->th_offset; 348 } while (gen == 0 || gen != th->th_generation); 349 } 350 351 void 352 getnanouptime(struct timespec *tsp) 353 { 354 struct timehands *th; 355 u_int gen; 356 357 ngetnanouptime.ev_count++; 358 do { 359 th = timehands; 360 gen = th->th_generation; 361 bintime2timespec(&th->th_offset, tsp); 362 } while (gen == 0 || gen != th->th_generation); 363 } 364 365 void 366 getmicrouptime(struct timeval *tvp) 367 { 368 struct timehands *th; 369 u_int gen; 370 371 ngetmicrouptime.ev_count++; 372 do { 373 th = timehands; 374 gen = th->th_generation; 375 bintime2timeval(&th->th_offset, tvp); 376 } while (gen == 0 || gen != th->th_generation); 377 } 378 379 void 380 getbintime(struct bintime *bt) 381 { 382 struct timehands *th; 383 u_int gen; 384 385 ngetbintime.ev_count++; 386 do { 387 th = timehands; 388 gen = th->th_generation; 389 *bt = th->th_offset; 390 } while (gen == 0 || gen != th->th_generation); 391 bintime_add(bt, &timebasebin); 392 } 393 394 void 395 getnanotime(struct timespec *tsp) 396 { 397 struct timehands *th; 398 u_int gen; 399 400 ngetnanotime.ev_count++; 401 do { 402 th = timehands; 403 gen = th->th_generation; 404 *tsp = th->th_nanotime; 405 } while (gen == 0 || gen != th->th_generation); 406 } 407 408 void 409 getmicrotime(struct timeval *tvp) 410 { 411 struct timehands *th; 412 u_int gen; 413 414 ngetmicrotime.ev_count++; 415 do { 416 th = timehands; 417 gen = th->th_generation; 418 *tvp = th->th_microtime; 419 } while (gen == 0 || gen != th->th_generation); 420 } 421 422 /* 423 * Initialize a new timecounter and possibly use it. 424 */ 425 void 426 tc_init(struct timecounter *tc) 427 { 428 u_int u; 429 int s; 430 431 u = tc->tc_frequency / tc->tc_counter_mask; 432 /* XXX: We need some margin here, 10% is a guess */ 433 u *= 11; 434 u /= 10; 435 if (u > hz && tc->tc_quality >= 0) { 436 tc->tc_quality = -2000; 437 aprint_verbose( 438 "timecounter: Timecounter \"%s\" frequency %ju Hz", 439 tc->tc_name, (uintmax_t)tc->tc_frequency); 440 aprint_verbose(" -- Insufficient hz, needs at least %u\n", u); 441 } else if (tc->tc_quality >= 0 || bootverbose) { 442 aprint_verbose( 443 "timecounter: Timecounter \"%s\" frequency %ju Hz " 444 "quality %d\n", tc->tc_name, (uintmax_t)tc->tc_frequency, 445 tc->tc_quality); 446 } 447 448 mutex_enter(&time_lock); 449 s = splsched(); 450 tc->tc_next = timecounters; 451 timecounters = tc; 452 /* 453 * Never automatically use a timecounter with negative quality. 454 * Even though we run on the dummy counter, switching here may be 455 * worse since this timecounter may not be monotonous. 456 */ 457 if (tc->tc_quality >= 0 && (tc->tc_quality > timecounter->tc_quality || 458 (tc->tc_quality == timecounter->tc_quality && 459 tc->tc_frequency > timecounter->tc_frequency))) { 460 (void)tc->tc_get_timecount(tc); 461 (void)tc->tc_get_timecount(tc); 462 timecounter = tc; 463 tc_windup(); 464 } 465 splx(s); 466 mutex_exit(&time_lock); 467 } 468 469 /* Report the frequency of the current timecounter. */ 470 u_int64_t 471 tc_getfrequency(void) 472 { 473 474 return (timehands->th_counter->tc_frequency); 475 } 476 477 /* 478 * Step our concept of UTC. This is done by modifying our estimate of 479 * when we booted. 480 * XXX: not locked. 481 */ 482 void 483 tc_setclock(struct timespec *ts) 484 { 485 struct timespec ts2; 486 struct bintime bt, bt2; 487 488 nsetclock.ev_count++; 489 binuptime(&bt2); 490 timespec2bintime(ts, &bt); 491 bintime_sub(&bt, &bt2); 492 bintime_add(&bt2, &timebasebin); 493 timebasebin = bt; 494 495 /* XXX fiddle all the little crinkly bits around the fiords... */ 496 tc_windup(); 497 if (timestepwarnings) { 498 bintime2timespec(&bt2, &ts2); 499 log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n", 500 (intmax_t)ts2.tv_sec, ts2.tv_nsec, 501 (intmax_t)ts->tv_sec, ts->tv_nsec); 502 } 503 } 504 505 /* 506 * Initialize the next struct timehands in the ring and make 507 * it the active timehands. Along the way we might switch to a different 508 * timecounter and/or do seconds processing in NTP. Slightly magic. 509 */ 510 static void 511 tc_windup(void) 512 { 513 struct bintime bt; 514 struct timehands *th, *tho; 515 u_int64_t scale; 516 u_int delta, ncount, ogen; 517 int i, s_update; 518 time_t t; 519 520 s_update = 0; 521 522 /* 523 * Make the next timehands a copy of the current one, but do not 524 * overwrite the generation or next pointer. While we update 525 * the contents, the generation must be zero. Ensure global 526 * visibility of the generation before proceeding. 527 */ 528 tho = timehands; 529 th = tho->th_next; 530 ogen = th->th_generation; 531 th->th_generation = 0; 532 membar_producer(); 533 bcopy(tho, th, offsetof(struct timehands, th_generation)); 534 535 /* 536 * Capture a timecounter delta on the current timecounter and if 537 * changing timecounters, a counter value from the new timecounter. 538 * Update the offset fields accordingly. 539 */ 540 delta = tc_delta(th); 541 if (th->th_counter != timecounter) 542 ncount = timecounter->tc_get_timecount(timecounter); 543 else 544 ncount = 0; 545 th->th_offset_count += delta; 546 th->th_offset_count &= th->th_counter->tc_counter_mask; 547 bintime_addx(&th->th_offset, th->th_scale * delta); 548 549 /* 550 * Hardware latching timecounters may not generate interrupts on 551 * PPS events, so instead we poll them. There is a finite risk that 552 * the hardware might capture a count which is later than the one we 553 * got above, and therefore possibly in the next NTP second which might 554 * have a different rate than the current NTP second. It doesn't 555 * matter in practice. 556 */ 557 if (tho->th_counter->tc_poll_pps) 558 tho->th_counter->tc_poll_pps(tho->th_counter); 559 560 /* 561 * Deal with NTP second processing. The for loop normally 562 * iterates at most once, but in extreme situations it might 563 * keep NTP sane if timeouts are not run for several seconds. 564 * At boot, the time step can be large when the TOD hardware 565 * has been read, so on really large steps, we call 566 * ntp_update_second only twice. We need to call it twice in 567 * case we missed a leap second. 568 * If NTP is not compiled in ntp_update_second still calculates 569 * the adjustment resulting from adjtime() calls. 570 */ 571 bt = th->th_offset; 572 bintime_add(&bt, &timebasebin); 573 i = bt.sec - tho->th_microtime.tv_sec; 574 if (i > LARGE_STEP) 575 i = 2; 576 for (; i > 0; i--) { 577 t = bt.sec; 578 ntp_update_second(&th->th_adjustment, &bt.sec); 579 s_update = 1; 580 if (bt.sec != t) 581 timebasebin.sec += bt.sec - t; 582 } 583 584 /* Update the UTC timestamps used by the get*() functions. */ 585 /* XXX shouldn't do this here. Should force non-`get' versions. */ 586 bintime2timeval(&bt, &th->th_microtime); 587 bintime2timespec(&bt, &th->th_nanotime); 588 589 /* Now is a good time to change timecounters. */ 590 if (th->th_counter != timecounter) { 591 th->th_counter = timecounter; 592 th->th_offset_count = ncount; 593 s_update = 1; 594 } 595 596 /*- 597 * Recalculate the scaling factor. We want the number of 1/2^64 598 * fractions of a second per period of the hardware counter, taking 599 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 600 * processing provides us with. 601 * 602 * The th_adjustment is nanoseconds per second with 32 bit binary 603 * fraction and we want 64 bit binary fraction of second: 604 * 605 * x = a * 2^32 / 10^9 = a * 4.294967296 606 * 607 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 608 * we can only multiply by about 850 without overflowing, but that 609 * leaves suitably precise fractions for multiply before divide. 610 * 611 * Divide before multiply with a fraction of 2199/512 results in a 612 * systematic undercompensation of 10PPM of th_adjustment. On a 613 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 614 * 615 * We happily sacrifice the lowest of the 64 bits of our result 616 * to the goddess of code clarity. 617 * 618 */ 619 if (s_update) { 620 scale = (u_int64_t)1 << 63; 621 scale += (th->th_adjustment / 1024) * 2199; 622 scale /= th->th_counter->tc_frequency; 623 th->th_scale = scale * 2; 624 } 625 /* 626 * Now that the struct timehands is again consistent, set the new 627 * generation number, making sure to not make it zero. Ensure 628 * changes are globally visible before changing. 629 */ 630 if (++ogen == 0) 631 ogen = 1; 632 membar_producer(); 633 th->th_generation = ogen; 634 635 /* 636 * Go live with the new struct timehands. Ensure changes are 637 * globally visible before changing. 638 */ 639 time_second = th->th_microtime.tv_sec; 640 time_uptime = th->th_offset.sec; 641 membar_producer(); 642 timehands = th; 643 644 /* 645 * Force users of the old timehand to move on. This is 646 * necessary for MP systems; we need to ensure that the 647 * consumers will move away from the old timehand before 648 * we begin updating it again when we eventually wrap 649 * around. 650 */ 651 if (++tho->th_generation == 0) 652 tho->th_generation = 1; 653 } 654 655 /* 656 * RFC 2783 PPS-API implementation. 657 */ 658 659 int 660 pps_ioctl(u_long cmd, void *data, struct pps_state *pps) 661 { 662 pps_params_t *app; 663 pps_info_t *pipi; 664 #ifdef PPS_SYNC 665 int *epi; 666 #endif 667 668 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */ 669 switch (cmd) { 670 case PPS_IOC_CREATE: 671 return (0); 672 case PPS_IOC_DESTROY: 673 return (0); 674 case PPS_IOC_SETPARAMS: 675 app = (pps_params_t *)data; 676 if (app->mode & ~pps->ppscap) 677 return (EINVAL); 678 pps->ppsparam = *app; 679 return (0); 680 case PPS_IOC_GETPARAMS: 681 app = (pps_params_t *)data; 682 *app = pps->ppsparam; 683 app->api_version = PPS_API_VERS_1; 684 return (0); 685 case PPS_IOC_GETCAP: 686 *(int*)data = pps->ppscap; 687 return (0); 688 case PPS_IOC_FETCH: 689 pipi = (pps_info_t *)data; 690 pps->ppsinfo.current_mode = pps->ppsparam.mode; 691 *pipi = pps->ppsinfo; 692 return (0); 693 case PPS_IOC_KCBIND: 694 #ifdef PPS_SYNC 695 epi = (int *)data; 696 /* XXX Only root should be able to do this */ 697 if (*epi & ~pps->ppscap) 698 return (EINVAL); 699 pps->kcmode = *epi; 700 return (0); 701 #else 702 return (EOPNOTSUPP); 703 #endif 704 default: 705 return (EPASSTHROUGH); 706 } 707 } 708 709 void 710 pps_init(struct pps_state *pps) 711 { 712 pps->ppscap |= PPS_TSFMT_TSPEC; 713 if (pps->ppscap & PPS_CAPTUREASSERT) 714 pps->ppscap |= PPS_OFFSETASSERT; 715 if (pps->ppscap & PPS_CAPTURECLEAR) 716 pps->ppscap |= PPS_OFFSETCLEAR; 717 } 718 719 void 720 pps_capture(struct pps_state *pps) 721 { 722 struct timehands *th; 723 724 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */ 725 th = timehands; 726 pps->capgen = th->th_generation; 727 pps->capth = th; 728 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); 729 if (pps->capgen != th->th_generation) 730 pps->capgen = 0; 731 } 732 733 void 734 pps_event(struct pps_state *pps, int event) 735 { 736 struct bintime bt; 737 struct timespec ts, *tsp, *osp; 738 u_int tcount, *pcount; 739 int foff, fhard; 740 pps_seq_t *pseq; 741 742 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */ 743 /* If the timecounter was wound up underneath us, bail out. */ 744 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) 745 return; 746 747 /* Things would be easier with arrays. */ 748 if (event == PPS_CAPTUREASSERT) { 749 tsp = &pps->ppsinfo.assert_timestamp; 750 osp = &pps->ppsparam.assert_offset; 751 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 752 fhard = pps->kcmode & PPS_CAPTUREASSERT; 753 pcount = &pps->ppscount[0]; 754 pseq = &pps->ppsinfo.assert_sequence; 755 } else { 756 tsp = &pps->ppsinfo.clear_timestamp; 757 osp = &pps->ppsparam.clear_offset; 758 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 759 fhard = pps->kcmode & PPS_CAPTURECLEAR; 760 pcount = &pps->ppscount[1]; 761 pseq = &pps->ppsinfo.clear_sequence; 762 } 763 764 /* 765 * If the timecounter changed, we cannot compare the count values, so 766 * we have to drop the rest of the PPS-stuff until the next event. 767 */ 768 if (pps->ppstc != pps->capth->th_counter) { 769 pps->ppstc = pps->capth->th_counter; 770 *pcount = pps->capcount; 771 pps->ppscount[2] = pps->capcount; 772 return; 773 } 774 775 /* Convert the count to a timespec. */ 776 tcount = pps->capcount - pps->capth->th_offset_count; 777 tcount &= pps->capth->th_counter->tc_counter_mask; 778 bt = pps->capth->th_offset; 779 bintime_addx(&bt, pps->capth->th_scale * tcount); 780 bintime_add(&bt, &timebasebin); 781 bintime2timespec(&bt, &ts); 782 783 /* If the timecounter was wound up underneath us, bail out. */ 784 if (pps->capgen != pps->capth->th_generation) 785 return; 786 787 *pcount = pps->capcount; 788 (*pseq)++; 789 *tsp = ts; 790 791 if (foff) { 792 timespecadd(tsp, osp, tsp); 793 if (tsp->tv_nsec < 0) { 794 tsp->tv_nsec += 1000000000; 795 tsp->tv_sec -= 1; 796 } 797 } 798 #ifdef PPS_SYNC 799 if (fhard) { 800 u_int64_t scale; 801 802 /* 803 * Feed the NTP PLL/FLL. 804 * The FLL wants to know how many (hardware) nanoseconds 805 * elapsed since the previous event. 806 */ 807 tcount = pps->capcount - pps->ppscount[2]; 808 pps->ppscount[2] = pps->capcount; 809 tcount &= pps->capth->th_counter->tc_counter_mask; 810 scale = (u_int64_t)1 << 63; 811 scale /= pps->capth->th_counter->tc_frequency; 812 scale *= 2; 813 bt.sec = 0; 814 bt.frac = 0; 815 bintime_addx(&bt, scale * tcount); 816 bintime2timespec(&bt, &ts); 817 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); 818 } 819 #endif 820 } 821 822 /* 823 * Timecounters need to be updated every so often to prevent the hardware 824 * counter from overflowing. Updating also recalculates the cached values 825 * used by the get*() family of functions, so their precision depends on 826 * the update frequency. 827 */ 828 829 static int tc_tick; 830 831 void 832 tc_ticktock(void) 833 { 834 static int count; 835 836 if (++count < tc_tick) 837 return; 838 count = 0; 839 tc_windup(); 840 } 841 842 void 843 inittimecounter(void) 844 { 845 u_int p; 846 847 /* 848 * Set the initial timeout to 849 * max(1, <approx. number of hardclock ticks in a millisecond>). 850 * People should probably not use the sysctl to set the timeout 851 * to smaller than its inital value, since that value is the 852 * smallest reasonable one. If they want better timestamps they 853 * should use the non-"get"* functions. 854 */ 855 if (hz > 1000) 856 tc_tick = (hz + 500) / 1000; 857 else 858 tc_tick = 1; 859 p = (tc_tick * 1000000) / hz; 860 aprint_verbose("timecounter: Timecounters tick every %d.%03u msec\n", 861 p / 1000, p % 1000); 862 863 /* warm up new timecounter (again) and get rolling. */ 864 (void)timecounter->tc_get_timecount(timecounter); 865 (void)timecounter->tc_get_timecount(timecounter); 866 } 867 868 #endif /* __HAVE_TIMECOUNTER */ 869