1 /* $NetBSD: kern_tc.c,v 1.26 2007/11/23 16:03:48 elad Exp $ */ 2 3 /*- 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * --------------------------------------------------------------------------- 10 */ 11 12 #include <sys/cdefs.h> 13 /* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */ 14 __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.26 2007/11/23 16:03:48 elad Exp $"); 15 16 #include "opt_ntp.h" 17 18 #include <sys/param.h> 19 #ifdef __HAVE_TIMECOUNTER /* XXX */ 20 #include <sys/kernel.h> 21 #include <sys/reboot.h> /* XXX just to get AB_VERBOSE */ 22 #include <sys/sysctl.h> 23 #include <sys/syslog.h> 24 #include <sys/systm.h> 25 #include <sys/timepps.h> 26 #include <sys/timetc.h> 27 #include <sys/timex.h> 28 #include <sys/evcnt.h> 29 #include <sys/kauth.h> 30 #include <sys/mutex.h> 31 32 /* 33 * A large step happens on boot. This constant detects such steps. 34 * It is relatively small so that ntp_update_second gets called enough 35 * in the typical 'missed a couple of seconds' case, but doesn't loop 36 * forever when the time step is large. 37 */ 38 #define LARGE_STEP 200 39 40 /* 41 * Implement a dummy timecounter which we can use until we get a real one 42 * in the air. This allows the console and other early stuff to use 43 * time services. 44 */ 45 46 static u_int 47 dummy_get_timecount(struct timecounter *tc) 48 { 49 static u_int now; 50 51 return (++now); 52 } 53 54 static struct timecounter dummy_timecounter = { 55 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000, NULL, NULL, 56 }; 57 58 struct timehands { 59 /* These fields must be initialized by the driver. */ 60 struct timecounter *th_counter; 61 int64_t th_adjustment; 62 u_int64_t th_scale; 63 u_int th_offset_count; 64 struct bintime th_offset; 65 struct timeval th_microtime; 66 struct timespec th_nanotime; 67 /* Fields not to be copied in tc_windup start with th_generation. */ 68 volatile u_int th_generation; 69 struct timehands *th_next; 70 }; 71 72 static struct timehands th0; 73 static struct timehands th9 = { .th_next = &th0, }; 74 static struct timehands th8 = { .th_next = &th9, }; 75 static struct timehands th7 = { .th_next = &th8, }; 76 static struct timehands th6 = { .th_next = &th7, }; 77 static struct timehands th5 = { .th_next = &th6, }; 78 static struct timehands th4 = { .th_next = &th5, }; 79 static struct timehands th3 = { .th_next = &th4, }; 80 static struct timehands th2 = { .th_next = &th3, }; 81 static struct timehands th1 = { .th_next = &th2, }; 82 static struct timehands th0 = { 83 .th_counter = &dummy_timecounter, 84 .th_scale = (uint64_t)-1 / 1000000, 85 .th_offset = { .sec = 1, .frac = 0 }, 86 .th_generation = 1, 87 .th_next = &th1, 88 }; 89 90 static struct timehands *volatile timehands = &th0; 91 struct timecounter *timecounter = &dummy_timecounter; 92 static struct timecounter *timecounters = &dummy_timecounter; 93 94 time_t time_second = 1; 95 time_t time_uptime = 1; 96 97 static struct bintime timebasebin; 98 99 static int timestepwarnings; 100 101 extern kmutex_t time_lock; 102 103 #ifdef __FreeBSD__ 104 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, 105 ×tepwarnings, 0, ""); 106 #endif /* __FreeBSD__ */ 107 108 /* 109 * sysctl helper routine for kern.timercounter.current 110 */ 111 static int 112 sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS) 113 { 114 struct sysctlnode node; 115 int error; 116 char newname[MAX_TCNAMELEN]; 117 struct timecounter *newtc, *tc; 118 119 tc = timecounter; 120 121 strlcpy(newname, tc->tc_name, sizeof(newname)); 122 123 node = *rnode; 124 node.sysctl_data = newname; 125 node.sysctl_size = sizeof(newname); 126 127 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 128 129 if (error || 130 newp == NULL || 131 strncmp(newname, tc->tc_name, sizeof(newname)) == 0) 132 return error; 133 134 if (l != NULL && (error = kauth_authorize_system(l->l_cred, 135 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS, newname, 136 NULL, NULL)) != 0) 137 return (error); 138 139 if (!cold) 140 mutex_enter(&time_lock); 141 error = EINVAL; 142 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { 143 if (strcmp(newname, newtc->tc_name) != 0) 144 continue; 145 /* Warm up new timecounter. */ 146 (void)newtc->tc_get_timecount(newtc); 147 (void)newtc->tc_get_timecount(newtc); 148 timecounter = newtc; 149 error = 0; 150 break; 151 } 152 if (!cold) 153 mutex_exit(&time_lock); 154 return error; 155 } 156 157 static int 158 sysctl_kern_timecounter_choice(SYSCTLFN_ARGS) 159 { 160 char buf[MAX_TCNAMELEN+48]; 161 char *where = oldp; 162 const char *spc; 163 struct timecounter *tc; 164 size_t needed, left, slen; 165 int error; 166 167 if (newp != NULL) 168 return (EPERM); 169 if (namelen != 0) 170 return (EINVAL); 171 172 spc = ""; 173 error = 0; 174 needed = 0; 175 left = *oldlenp; 176 177 mutex_enter(&time_lock); 178 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { 179 if (where == NULL) { 180 needed += sizeof(buf); /* be conservative */ 181 } else { 182 slen = snprintf(buf, sizeof(buf), "%s%s(q=%d, f=%" PRId64 183 " Hz)", spc, tc->tc_name, tc->tc_quality, 184 tc->tc_frequency); 185 if (left < slen + 1) 186 break; 187 /* XXX use sysctl_copyout? (from sysctl_hw_disknames) */ 188 /* XXX copyout with held lock. */ 189 error = copyout(buf, where, slen + 1); 190 spc = " "; 191 where += slen; 192 needed += slen; 193 left -= slen; 194 } 195 } 196 mutex_exit(&time_lock); 197 198 *oldlenp = needed; 199 return (error); 200 } 201 202 SYSCTL_SETUP(sysctl_timecounter_setup, "sysctl timecounter setup") 203 { 204 const struct sysctlnode *node; 205 206 sysctl_createv(clog, 0, NULL, &node, 207 CTLFLAG_PERMANENT, 208 CTLTYPE_NODE, "timecounter", 209 SYSCTL_DESCR("time counter information"), 210 NULL, 0, NULL, 0, 211 CTL_KERN, CTL_CREATE, CTL_EOL); 212 213 if (node != NULL) { 214 sysctl_createv(clog, 0, NULL, NULL, 215 CTLFLAG_PERMANENT, 216 CTLTYPE_STRING, "choice", 217 SYSCTL_DESCR("available counters"), 218 sysctl_kern_timecounter_choice, 0, NULL, 0, 219 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 220 221 sysctl_createv(clog, 0, NULL, NULL, 222 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 223 CTLTYPE_STRING, "hardware", 224 SYSCTL_DESCR("currently active time counter"), 225 sysctl_kern_timecounter_hardware, 0, NULL, MAX_TCNAMELEN, 226 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 227 228 sysctl_createv(clog, 0, NULL, NULL, 229 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 230 CTLTYPE_INT, "timestepwarnings", 231 SYSCTL_DESCR("log time steps"), 232 NULL, 0, ×tepwarnings, 0, 233 CTL_KERN, node->sysctl_num, CTL_CREATE, CTL_EOL); 234 } 235 } 236 237 #define TC_STATS(name) \ 238 static struct evcnt n##name = \ 239 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "timecounter", #name); \ 240 EVCNT_ATTACH_STATIC(n##name) 241 242 TC_STATS(binuptime); TC_STATS(nanouptime); TC_STATS(microuptime); 243 TC_STATS(bintime); TC_STATS(nanotime); TC_STATS(microtime); 244 TC_STATS(getbinuptime); TC_STATS(getnanouptime); TC_STATS(getmicrouptime); 245 TC_STATS(getbintime); TC_STATS(getnanotime); TC_STATS(getmicrotime); 246 TC_STATS(setclock); 247 248 #undef TC_STATS 249 250 static void tc_windup(void); 251 252 /* 253 * Return the difference between the timehands' counter value now and what 254 * was when we copied it to the timehands' offset_count. 255 */ 256 static __inline u_int 257 tc_delta(struct timehands *th) 258 { 259 struct timecounter *tc; 260 261 tc = th->th_counter; 262 return ((tc->tc_get_timecount(tc) - 263 th->th_offset_count) & tc->tc_counter_mask); 264 } 265 266 /* 267 * Functions for reading the time. We have to loop until we are sure that 268 * the timehands that we operated on was not updated under our feet. See 269 * the comment in <sys/timevar.h> for a description of these 12 functions. 270 */ 271 272 void 273 binuptime(struct bintime *bt) 274 { 275 struct timehands *th; 276 u_int gen; 277 278 nbinuptime.ev_count++; 279 do { 280 th = timehands; 281 gen = th->th_generation; 282 *bt = th->th_offset; 283 bintime_addx(bt, th->th_scale * tc_delta(th)); 284 } while (gen == 0 || gen != th->th_generation); 285 } 286 287 void 288 nanouptime(struct timespec *tsp) 289 { 290 struct bintime bt; 291 292 nnanouptime.ev_count++; 293 binuptime(&bt); 294 bintime2timespec(&bt, tsp); 295 } 296 297 void 298 microuptime(struct timeval *tvp) 299 { 300 struct bintime bt; 301 302 nmicrouptime.ev_count++; 303 binuptime(&bt); 304 bintime2timeval(&bt, tvp); 305 } 306 307 void 308 bintime(struct bintime *bt) 309 { 310 311 nbintime.ev_count++; 312 binuptime(bt); 313 bintime_add(bt, &timebasebin); 314 } 315 316 void 317 nanotime(struct timespec *tsp) 318 { 319 struct bintime bt; 320 321 nnanotime.ev_count++; 322 bintime(&bt); 323 bintime2timespec(&bt, tsp); 324 } 325 326 void 327 microtime(struct timeval *tvp) 328 { 329 struct bintime bt; 330 331 nmicrotime.ev_count++; 332 bintime(&bt); 333 bintime2timeval(&bt, tvp); 334 } 335 336 void 337 getbinuptime(struct bintime *bt) 338 { 339 struct timehands *th; 340 u_int gen; 341 342 ngetbinuptime.ev_count++; 343 do { 344 th = timehands; 345 gen = th->th_generation; 346 *bt = th->th_offset; 347 } while (gen == 0 || gen != th->th_generation); 348 } 349 350 void 351 getnanouptime(struct timespec *tsp) 352 { 353 struct timehands *th; 354 u_int gen; 355 356 ngetnanouptime.ev_count++; 357 do { 358 th = timehands; 359 gen = th->th_generation; 360 bintime2timespec(&th->th_offset, tsp); 361 } while (gen == 0 || gen != th->th_generation); 362 } 363 364 void 365 getmicrouptime(struct timeval *tvp) 366 { 367 struct timehands *th; 368 u_int gen; 369 370 ngetmicrouptime.ev_count++; 371 do { 372 th = timehands; 373 gen = th->th_generation; 374 bintime2timeval(&th->th_offset, tvp); 375 } while (gen == 0 || gen != th->th_generation); 376 } 377 378 void 379 getbintime(struct bintime *bt) 380 { 381 struct timehands *th; 382 u_int gen; 383 384 ngetbintime.ev_count++; 385 do { 386 th = timehands; 387 gen = th->th_generation; 388 *bt = th->th_offset; 389 } while (gen == 0 || gen != th->th_generation); 390 bintime_add(bt, &timebasebin); 391 } 392 393 void 394 getnanotime(struct timespec *tsp) 395 { 396 struct timehands *th; 397 u_int gen; 398 399 ngetnanotime.ev_count++; 400 do { 401 th = timehands; 402 gen = th->th_generation; 403 *tsp = th->th_nanotime; 404 } while (gen == 0 || gen != th->th_generation); 405 } 406 407 void 408 getmicrotime(struct timeval *tvp) 409 { 410 struct timehands *th; 411 u_int gen; 412 413 ngetmicrotime.ev_count++; 414 do { 415 th = timehands; 416 gen = th->th_generation; 417 *tvp = th->th_microtime; 418 } while (gen == 0 || gen != th->th_generation); 419 } 420 421 /* 422 * Initialize a new timecounter and possibly use it. 423 */ 424 void 425 tc_init(struct timecounter *tc) 426 { 427 u_int u; 428 int s; 429 430 u = tc->tc_frequency / tc->tc_counter_mask; 431 /* XXX: We need some margin here, 10% is a guess */ 432 u *= 11; 433 u /= 10; 434 if (u > hz && tc->tc_quality >= 0) { 435 tc->tc_quality = -2000; 436 aprint_verbose( 437 "timecounter: Timecounter \"%s\" frequency %ju Hz", 438 tc->tc_name, (uintmax_t)tc->tc_frequency); 439 aprint_verbose(" -- Insufficient hz, needs at least %u\n", u); 440 } else if (tc->tc_quality >= 0 || bootverbose) { 441 aprint_verbose( 442 "timecounter: Timecounter \"%s\" frequency %ju Hz " 443 "quality %d\n", tc->tc_name, (uintmax_t)tc->tc_frequency, 444 tc->tc_quality); 445 } 446 447 mutex_enter(&time_lock); 448 s = splsched(); 449 tc->tc_next = timecounters; 450 timecounters = tc; 451 /* 452 * Never automatically use a timecounter with negative quality. 453 * Even though we run on the dummy counter, switching here may be 454 * worse since this timecounter may not be monotonous. 455 */ 456 if (tc->tc_quality >= 0 && (tc->tc_quality > timecounter->tc_quality || 457 (tc->tc_quality == timecounter->tc_quality && 458 tc->tc_frequency > timecounter->tc_frequency))) { 459 (void)tc->tc_get_timecount(tc); 460 (void)tc->tc_get_timecount(tc); 461 timecounter = tc; 462 tc_windup(); 463 } 464 splx(s); 465 mutex_exit(&time_lock); 466 } 467 468 /* Report the frequency of the current timecounter. */ 469 u_int64_t 470 tc_getfrequency(void) 471 { 472 473 return (timehands->th_counter->tc_frequency); 474 } 475 476 /* 477 * Step our concept of UTC. This is done by modifying our estimate of 478 * when we booted. 479 * XXX: not locked. 480 */ 481 void 482 tc_setclock(struct timespec *ts) 483 { 484 struct timespec ts2; 485 struct bintime bt, bt2; 486 487 nsetclock.ev_count++; 488 binuptime(&bt2); 489 timespec2bintime(ts, &bt); 490 bintime_sub(&bt, &bt2); 491 bintime_add(&bt2, &timebasebin); 492 timebasebin = bt; 493 494 /* XXX fiddle all the little crinkly bits around the fiords... */ 495 tc_windup(); 496 if (timestepwarnings) { 497 bintime2timespec(&bt2, &ts2); 498 log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n", 499 (intmax_t)ts2.tv_sec, ts2.tv_nsec, 500 (intmax_t)ts->tv_sec, ts->tv_nsec); 501 } 502 } 503 504 /* 505 * Initialize the next struct timehands in the ring and make 506 * it the active timehands. Along the way we might switch to a different 507 * timecounter and/or do seconds processing in NTP. Slightly magic. 508 */ 509 static void 510 tc_windup(void) 511 { 512 struct bintime bt; 513 struct timehands *th, *tho; 514 u_int64_t scale; 515 u_int delta, ncount, ogen; 516 int i, s_update; 517 time_t t; 518 519 s_update = 0; 520 521 /* 522 * Make the next timehands a copy of the current one, but do not 523 * overwrite the generation or next pointer. While we update 524 * the contents, the generation must be zero. Ensure global 525 * visibility of the generation before proceeding. 526 */ 527 tho = timehands; 528 th = tho->th_next; 529 ogen = th->th_generation; 530 th->th_generation = 0; 531 mb_write(); 532 bcopy(tho, th, offsetof(struct timehands, th_generation)); 533 534 /* 535 * Capture a timecounter delta on the current timecounter and if 536 * changing timecounters, a counter value from the new timecounter. 537 * Update the offset fields accordingly. 538 */ 539 delta = tc_delta(th); 540 if (th->th_counter != timecounter) 541 ncount = timecounter->tc_get_timecount(timecounter); 542 else 543 ncount = 0; 544 th->th_offset_count += delta; 545 th->th_offset_count &= th->th_counter->tc_counter_mask; 546 bintime_addx(&th->th_offset, th->th_scale * delta); 547 548 /* 549 * Hardware latching timecounters may not generate interrupts on 550 * PPS events, so instead we poll them. There is a finite risk that 551 * the hardware might capture a count which is later than the one we 552 * got above, and therefore possibly in the next NTP second which might 553 * have a different rate than the current NTP second. It doesn't 554 * matter in practice. 555 */ 556 if (tho->th_counter->tc_poll_pps) 557 tho->th_counter->tc_poll_pps(tho->th_counter); 558 559 /* 560 * Deal with NTP second processing. The for loop normally 561 * iterates at most once, but in extreme situations it might 562 * keep NTP sane if timeouts are not run for several seconds. 563 * At boot, the time step can be large when the TOD hardware 564 * has been read, so on really large steps, we call 565 * ntp_update_second only twice. We need to call it twice in 566 * case we missed a leap second. 567 * If NTP is not compiled in ntp_update_second still calculates 568 * the adjustment resulting from adjtime() calls. 569 */ 570 bt = th->th_offset; 571 bintime_add(&bt, &timebasebin); 572 i = bt.sec - tho->th_microtime.tv_sec; 573 if (i > LARGE_STEP) 574 i = 2; 575 for (; i > 0; i--) { 576 t = bt.sec; 577 ntp_update_second(&th->th_adjustment, &bt.sec); 578 s_update = 1; 579 if (bt.sec != t) 580 timebasebin.sec += bt.sec - t; 581 } 582 583 /* Update the UTC timestamps used by the get*() functions. */ 584 /* XXX shouldn't do this here. Should force non-`get' versions. */ 585 bintime2timeval(&bt, &th->th_microtime); 586 bintime2timespec(&bt, &th->th_nanotime); 587 588 /* Now is a good time to change timecounters. */ 589 if (th->th_counter != timecounter) { 590 th->th_counter = timecounter; 591 th->th_offset_count = ncount; 592 s_update = 1; 593 } 594 595 /*- 596 * Recalculate the scaling factor. We want the number of 1/2^64 597 * fractions of a second per period of the hardware counter, taking 598 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 599 * processing provides us with. 600 * 601 * The th_adjustment is nanoseconds per second with 32 bit binary 602 * fraction and we want 64 bit binary fraction of second: 603 * 604 * x = a * 2^32 / 10^9 = a * 4.294967296 605 * 606 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 607 * we can only multiply by about 850 without overflowing, but that 608 * leaves suitably precise fractions for multiply before divide. 609 * 610 * Divide before multiply with a fraction of 2199/512 results in a 611 * systematic undercompensation of 10PPM of th_adjustment. On a 612 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 613 * 614 * We happily sacrifice the lowest of the 64 bits of our result 615 * to the goddess of code clarity. 616 * 617 */ 618 if (s_update) { 619 scale = (u_int64_t)1 << 63; 620 scale += (th->th_adjustment / 1024) * 2199; 621 scale /= th->th_counter->tc_frequency; 622 th->th_scale = scale * 2; 623 } 624 /* 625 * Now that the struct timehands is again consistent, set the new 626 * generation number, making sure to not make it zero. Ensure 627 * changes are globally visible before changing. 628 */ 629 if (++ogen == 0) 630 ogen = 1; 631 mb_write(); 632 th->th_generation = ogen; 633 634 /* 635 * Go live with the new struct timehands. Ensure changes are 636 * globally visible before changing. 637 */ 638 time_second = th->th_microtime.tv_sec; 639 time_uptime = th->th_offset.sec; 640 mb_write(); 641 timehands = th; 642 643 /* 644 * Force users of the old timehand to move on. This is 645 * necessary for MP systems; we need to ensure that the 646 * consumers will move away from the old timehand before 647 * we begin updating it again when we eventually wrap 648 * around. 649 */ 650 if (++tho->th_generation == 0) 651 tho->th_generation = 1; 652 } 653 654 /* 655 * RFC 2783 PPS-API implementation. 656 */ 657 658 int 659 pps_ioctl(u_long cmd, void *data, struct pps_state *pps) 660 { 661 pps_params_t *app; 662 pps_info_t *pipi; 663 #ifdef PPS_SYNC 664 int *epi; 665 #endif 666 667 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_ioctl") */ 668 switch (cmd) { 669 case PPS_IOC_CREATE: 670 return (0); 671 case PPS_IOC_DESTROY: 672 return (0); 673 case PPS_IOC_SETPARAMS: 674 app = (pps_params_t *)data; 675 if (app->mode & ~pps->ppscap) 676 return (EINVAL); 677 pps->ppsparam = *app; 678 return (0); 679 case PPS_IOC_GETPARAMS: 680 app = (pps_params_t *)data; 681 *app = pps->ppsparam; 682 app->api_version = PPS_API_VERS_1; 683 return (0); 684 case PPS_IOC_GETCAP: 685 *(int*)data = pps->ppscap; 686 return (0); 687 case PPS_IOC_FETCH: 688 pipi = (pps_info_t *)data; 689 pps->ppsinfo.current_mode = pps->ppsparam.mode; 690 *pipi = pps->ppsinfo; 691 return (0); 692 case PPS_IOC_KCBIND: 693 #ifdef PPS_SYNC 694 epi = (int *)data; 695 /* XXX Only root should be able to do this */ 696 if (*epi & ~pps->ppscap) 697 return (EINVAL); 698 pps->kcmode = *epi; 699 return (0); 700 #else 701 return (EOPNOTSUPP); 702 #endif 703 default: 704 return (EPASSTHROUGH); 705 } 706 } 707 708 void 709 pps_init(struct pps_state *pps) 710 { 711 pps->ppscap |= PPS_TSFMT_TSPEC; 712 if (pps->ppscap & PPS_CAPTUREASSERT) 713 pps->ppscap |= PPS_OFFSETASSERT; 714 if (pps->ppscap & PPS_CAPTURECLEAR) 715 pps->ppscap |= PPS_OFFSETCLEAR; 716 } 717 718 void 719 pps_capture(struct pps_state *pps) 720 { 721 struct timehands *th; 722 723 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_capture") */ 724 th = timehands; 725 pps->capgen = th->th_generation; 726 pps->capth = th; 727 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); 728 if (pps->capgen != th->th_generation) 729 pps->capgen = 0; 730 } 731 732 void 733 pps_event(struct pps_state *pps, int event) 734 { 735 struct bintime bt; 736 struct timespec ts, *tsp, *osp; 737 u_int tcount, *pcount; 738 int foff, fhard; 739 pps_seq_t *pseq; 740 741 KASSERT(pps != NULL); /* XXX ("NULL pps pointer in pps_event") */ 742 /* If the timecounter was wound up underneath us, bail out. */ 743 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) 744 return; 745 746 /* Things would be easier with arrays. */ 747 if (event == PPS_CAPTUREASSERT) { 748 tsp = &pps->ppsinfo.assert_timestamp; 749 osp = &pps->ppsparam.assert_offset; 750 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 751 fhard = pps->kcmode & PPS_CAPTUREASSERT; 752 pcount = &pps->ppscount[0]; 753 pseq = &pps->ppsinfo.assert_sequence; 754 } else { 755 tsp = &pps->ppsinfo.clear_timestamp; 756 osp = &pps->ppsparam.clear_offset; 757 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 758 fhard = pps->kcmode & PPS_CAPTURECLEAR; 759 pcount = &pps->ppscount[1]; 760 pseq = &pps->ppsinfo.clear_sequence; 761 } 762 763 /* 764 * If the timecounter changed, we cannot compare the count values, so 765 * we have to drop the rest of the PPS-stuff until the next event. 766 */ 767 if (pps->ppstc != pps->capth->th_counter) { 768 pps->ppstc = pps->capth->th_counter; 769 *pcount = pps->capcount; 770 pps->ppscount[2] = pps->capcount; 771 return; 772 } 773 774 /* Convert the count to a timespec. */ 775 tcount = pps->capcount - pps->capth->th_offset_count; 776 tcount &= pps->capth->th_counter->tc_counter_mask; 777 bt = pps->capth->th_offset; 778 bintime_addx(&bt, pps->capth->th_scale * tcount); 779 bintime_add(&bt, &timebasebin); 780 bintime2timespec(&bt, &ts); 781 782 /* If the timecounter was wound up underneath us, bail out. */ 783 if (pps->capgen != pps->capth->th_generation) 784 return; 785 786 *pcount = pps->capcount; 787 (*pseq)++; 788 *tsp = ts; 789 790 if (foff) { 791 timespecadd(tsp, osp, tsp); 792 if (tsp->tv_nsec < 0) { 793 tsp->tv_nsec += 1000000000; 794 tsp->tv_sec -= 1; 795 } 796 } 797 #ifdef PPS_SYNC 798 if (fhard) { 799 u_int64_t scale; 800 801 /* 802 * Feed the NTP PLL/FLL. 803 * The FLL wants to know how many (hardware) nanoseconds 804 * elapsed since the previous event. 805 */ 806 tcount = pps->capcount - pps->ppscount[2]; 807 pps->ppscount[2] = pps->capcount; 808 tcount &= pps->capth->th_counter->tc_counter_mask; 809 scale = (u_int64_t)1 << 63; 810 scale /= pps->capth->th_counter->tc_frequency; 811 scale *= 2; 812 bt.sec = 0; 813 bt.frac = 0; 814 bintime_addx(&bt, scale * tcount); 815 bintime2timespec(&bt, &ts); 816 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); 817 } 818 #endif 819 } 820 821 /* 822 * Timecounters need to be updated every so often to prevent the hardware 823 * counter from overflowing. Updating also recalculates the cached values 824 * used by the get*() family of functions, so their precision depends on 825 * the update frequency. 826 */ 827 828 static int tc_tick; 829 830 void 831 tc_ticktock(void) 832 { 833 static int count; 834 835 if (++count < tc_tick) 836 return; 837 count = 0; 838 tc_windup(); 839 } 840 841 void 842 inittimecounter(void) 843 { 844 u_int p; 845 846 /* 847 * Set the initial timeout to 848 * max(1, <approx. number of hardclock ticks in a millisecond>). 849 * People should probably not use the sysctl to set the timeout 850 * to smaller than its inital value, since that value is the 851 * smallest reasonable one. If they want better timestamps they 852 * should use the non-"get"* functions. 853 */ 854 if (hz > 1000) 855 tc_tick = (hz + 500) / 1000; 856 else 857 tc_tick = 1; 858 p = (tc_tick * 1000000) / hz; 859 aprint_verbose("timecounter: Timecounters tick every %d.%03u msec\n", 860 p / 1000, p % 1000); 861 862 /* warm up new timecounter (again) and get rolling. */ 863 (void)timecounter->tc_get_timecount(timecounter); 864 (void)timecounter->tc_get_timecount(timecounter); 865 } 866 867 #endif /* __HAVE_TIMECOUNTER */ 868