1 /* $OpenBSD: ntp.c,v 1.165 2020/06/22 06:11:34 otto Exp $ */ 2 3 /* 4 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 5 * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/types.h> 21 #include <sys/time.h> 22 #include <sys/stat.h> 23 #include <errno.h> 24 #include <fcntl.h> 25 #include <paths.h> 26 #include <poll.h> 27 #include <pwd.h> 28 #include <signal.h> 29 #include <stdlib.h> 30 #include <string.h> 31 #include <syslog.h> 32 #include <time.h> 33 #include <unistd.h> 34 #include <err.h> 35 36 #include "ntpd.h" 37 38 #define PFD_PIPE_MAIN 0 39 #define PFD_PIPE_DNS 1 40 #define PFD_SOCK_CTL 2 41 #define PFD_MAX 3 42 43 volatile sig_atomic_t ntp_quit = 0; 44 struct imsgbuf *ibuf_main; 45 static struct imsgbuf *ibuf_dns; 46 struct ntpd_conf *conf; 47 struct ctl_conns ctl_conns; 48 u_int peer_cnt; 49 u_int sensors_cnt; 50 extern u_int constraint_cnt; 51 52 void ntp_sighdlr(int); 53 int ntp_dispatch_imsg(void); 54 int ntp_dispatch_imsg_dns(void); 55 void peer_add(struct ntp_peer *); 56 void peer_remove(struct ntp_peer *); 57 int inpool(struct sockaddr_storage *, 58 struct sockaddr_storage[MAX_SERVERS_DNS], size_t); 59 60 void 61 ntp_sighdlr(int sig) 62 { 63 switch (sig) { 64 case SIGINT: 65 case SIGTERM: 66 ntp_quit = 1; 67 break; 68 } 69 } 70 71 void 72 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv) 73 { 74 int a, b, nfds, i, j, idx_peers, timeout; 75 int nullfd, pipe_dns[2], idx_clients; 76 int ctls; 77 int fd_ctl; 78 u_int pfd_elms = 0, idx2peer_elms = 0; 79 u_int listener_cnt, new_cnt, sent_cnt, trial_cnt; 80 u_int ctl_cnt; 81 struct pollfd *pfd = NULL; 82 struct servent *se; 83 struct listen_addr *la; 84 struct ntp_peer *p; 85 struct ntp_peer **idx2peer = NULL; 86 struct ntp_sensor *s, *next_s; 87 struct constraint *cstr; 88 struct timespec tp; 89 struct stat stb; 90 struct ctl_conn *cc; 91 time_t nextaction, last_sensor_scan = 0, now; 92 void *newp; 93 94 if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC, 95 pipe_dns) == -1) 96 fatal("socketpair"); 97 98 start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv); 99 100 log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose, 101 LOG_DAEMON); 102 if (!nconf->debug && setsid() == -1) 103 fatal("setsid"); 104 log_procinit("ntp"); 105 106 if ((se = getservbyname("ntp", "udp")) == NULL) 107 fatal("getservbyname"); 108 109 /* Start control socket. */ 110 if ((fd_ctl = control_init(CTLSOCKET)) == -1) 111 fatalx("control socket init failed"); 112 if (control_listen(fd_ctl) == -1) 113 fatalx("control socket listen failed"); 114 if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1) 115 fatal(NULL); 116 117 if (stat(pw->pw_dir, &stb) == -1) { 118 fatal("privsep dir %s could not be opened", pw->pw_dir); 119 } 120 if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) { 121 fatalx("bad privsep dir %s permissions: %o", 122 pw->pw_dir, stb.st_mode); 123 } 124 if (chroot(pw->pw_dir) == -1) 125 fatal("chroot"); 126 if (chdir("/") == -1) 127 fatal("chdir(\"/\")"); 128 129 if (!nconf->debug) { 130 dup2(nullfd, STDIN_FILENO); 131 dup2(nullfd, STDOUT_FILENO); 132 dup2(nullfd, STDERR_FILENO); 133 } 134 close(nullfd); 135 136 setproctitle("ntp engine"); 137 138 conf = nconf; 139 setup_listeners(se, conf, &listener_cnt); 140 141 if (setgroups(1, &pw->pw_gid) || 142 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 143 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 144 fatal("can't drop privileges"); 145 146 endservent(); 147 148 /* The ntp process will want to open NTP client sockets -> "inet" */ 149 if (pledge("stdio inet", NULL) == -1) 150 err(1, "pledge"); 151 152 signal(SIGTERM, ntp_sighdlr); 153 signal(SIGINT, ntp_sighdlr); 154 signal(SIGPIPE, SIG_IGN); 155 signal(SIGHUP, SIG_IGN); 156 signal(SIGCHLD, SIG_DFL); 157 158 if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL) 159 fatal(NULL); 160 imsg_init(ibuf_main, PARENT_SOCK_FILENO); 161 if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL) 162 fatal(NULL); 163 imsg_init(ibuf_dns, pipe_dns[0]); 164 165 constraint_cnt = 0; 166 conf->constraint_median = 0; 167 conf->constraint_last = getmonotime(); 168 TAILQ_FOREACH(cstr, &conf->constraints, entry) 169 constraint_cnt += constraint_init(cstr); 170 171 TAILQ_FOREACH(p, &conf->ntp_peers, entry) 172 client_peer_init(p); 173 174 memset(&conf->status, 0, sizeof(conf->status)); 175 176 conf->freq.num = 0; 177 conf->freq.samples = 0; 178 conf->freq.x = 0.0; 179 conf->freq.xx = 0.0; 180 conf->freq.xy = 0.0; 181 conf->freq.y = 0.0; 182 conf->freq.overall_offset = 0.0; 183 184 conf->status.synced = 0; 185 clock_getres(CLOCK_REALTIME, &tp); 186 b = 1000000000 / tp.tv_nsec; /* convert to Hz */ 187 for (a = 0; b > 1; a--, b >>= 1) 188 ; 189 conf->status.precision = a; 190 conf->scale = 1; 191 192 TAILQ_INIT(&ctl_conns); 193 sensor_init(); 194 195 log_info("ntp engine ready"); 196 197 ctl_cnt = 0; 198 peer_cnt = 0; 199 TAILQ_FOREACH(p, &conf->ntp_peers, entry) 200 peer_cnt++; 201 202 while (ntp_quit == 0) { 203 if (peer_cnt > idx2peer_elms) { 204 if ((newp = reallocarray(idx2peer, peer_cnt, 205 sizeof(*idx2peer))) == NULL) { 206 /* panic for now */ 207 log_warn("could not resize idx2peer from %u -> " 208 "%u entries", idx2peer_elms, peer_cnt); 209 fatalx("exiting"); 210 } 211 idx2peer = newp; 212 idx2peer_elms = peer_cnt; 213 } 214 215 new_cnt = PFD_MAX + 216 peer_cnt + listener_cnt + ctl_cnt; 217 if (new_cnt > pfd_elms) { 218 if ((newp = reallocarray(pfd, new_cnt, 219 sizeof(*pfd))) == NULL) { 220 /* panic for now */ 221 log_warn("could not resize pfd from %u -> " 222 "%u entries", pfd_elms, new_cnt); 223 fatalx("exiting"); 224 } 225 pfd = newp; 226 pfd_elms = new_cnt; 227 } 228 229 memset(pfd, 0, sizeof(*pfd) * pfd_elms); 230 memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms); 231 nextaction = getmonotime() + 900; 232 pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd; 233 pfd[PFD_PIPE_MAIN].events = POLLIN; 234 pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd; 235 pfd[PFD_PIPE_DNS].events = POLLIN; 236 pfd[PFD_SOCK_CTL].fd = fd_ctl; 237 pfd[PFD_SOCK_CTL].events = POLLIN; 238 239 i = PFD_MAX; 240 TAILQ_FOREACH(la, &conf->listen_addrs, entry) { 241 pfd[i].fd = la->fd; 242 pfd[i].events = POLLIN; 243 i++; 244 } 245 246 idx_peers = i; 247 sent_cnt = trial_cnt = 0; 248 TAILQ_FOREACH(p, &conf->ntp_peers, entry) { 249 if (!p->trusted && constraint_cnt && 250 conf->constraint_median == 0) 251 continue; 252 253 if (p->next > 0 && p->next <= getmonotime()) { 254 if (p->state > STATE_DNS_INPROGRESS) 255 trial_cnt++; 256 if (client_query(p) == 0) 257 sent_cnt++; 258 } 259 if (p->deadline > 0 && p->deadline <= getmonotime()) { 260 timeout = 300; 261 log_debug("no reply from %s received in time, " 262 "next query %ds", log_sockaddr( 263 (struct sockaddr *)&p->addr->ss), timeout); 264 if (p->trustlevel >= TRUSTLEVEL_BADPEER && 265 (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER) 266 log_info("peer %s now invalid", 267 log_sockaddr( 268 (struct sockaddr *)&p->addr->ss)); 269 if (client_nextaddr(p) == 1) { 270 peer_addr_head_clear(p); 271 client_nextaddr(p); 272 } 273 set_next(p, timeout); 274 } 275 if (p->senderrors > MAX_SEND_ERRORS) { 276 log_debug("failed to send query to %s, " 277 "next query %ds", log_sockaddr( 278 (struct sockaddr *)&p->addr->ss), 279 INTERVAL_QUERY_PATHETIC); 280 p->senderrors = 0; 281 if (client_nextaddr(p) == 1) { 282 peer_addr_head_clear(p); 283 client_nextaddr(p); 284 } 285 set_next(p, INTERVAL_QUERY_PATHETIC); 286 } 287 if (p->next > 0 && p->next < nextaction) 288 nextaction = p->next; 289 if (p->deadline > 0 && p->deadline < nextaction) 290 nextaction = p->deadline; 291 292 if (p->state == STATE_QUERY_SENT && 293 p->query->fd != -1) { 294 pfd[i].fd = p->query->fd; 295 pfd[i].events = POLLIN; 296 idx2peer[i - idx_peers] = p; 297 i++; 298 } 299 } 300 idx_clients = i; 301 302 if (!TAILQ_EMPTY(&conf->ntp_conf_sensors) && 303 (conf->trusted_sensors || constraint_cnt == 0 || 304 conf->constraint_median != 0)) { 305 if (last_sensor_scan == 0 || 306 last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) { 307 sensors_cnt = sensor_scan(); 308 last_sensor_scan = getmonotime(); 309 } 310 if (sensors_cnt == 0 && 311 nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL) 312 nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL; 313 sensors_cnt = 0; 314 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) { 315 if (conf->settime && s->offsets[0].offset) 316 priv_settime(s->offsets[0].offset, NULL); 317 sensors_cnt++; 318 if (s->next > 0 && s->next < nextaction) 319 nextaction = s->next; 320 } 321 } 322 323 if (conf->settime && 324 ((trial_cnt > 0 && sent_cnt == 0) || 325 (peer_cnt == 0 && sensors_cnt == 0))) 326 priv_settime(0, "no valid peers configured"); 327 328 TAILQ_FOREACH(cstr, &conf->constraints, entry) { 329 if (constraint_query(cstr) == -1) 330 continue; 331 } 332 333 if (ibuf_main->w.queued > 0) 334 pfd[PFD_PIPE_MAIN].events |= POLLOUT; 335 if (ibuf_dns->w.queued > 0) 336 pfd[PFD_PIPE_DNS].events |= POLLOUT; 337 338 TAILQ_FOREACH(cc, &ctl_conns, entry) { 339 pfd[i].fd = cc->ibuf.fd; 340 pfd[i].events = POLLIN; 341 if (cc->ibuf.w.queued > 0) 342 pfd[i].events |= POLLOUT; 343 i++; 344 } 345 ctls = i; 346 347 now = getmonotime(); 348 timeout = nextaction - now; 349 if (timeout < 0) 350 timeout = 0; 351 352 if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1) 353 if (errno != EINTR) { 354 log_warn("poll error"); 355 ntp_quit = 1; 356 } 357 358 if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT)) 359 if (msgbuf_write(&ibuf_main->w) <= 0 && 360 errno != EAGAIN) { 361 log_warn("pipe write error (to parent)"); 362 ntp_quit = 1; 363 } 364 365 if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) { 366 nfds--; 367 if (ntp_dispatch_imsg() == -1) { 368 log_debug("pipe read error (from main)"); 369 ntp_quit = 1; 370 } 371 } 372 373 if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT)) 374 if (msgbuf_write(&ibuf_dns->w) <= 0 && 375 errno != EAGAIN) { 376 log_warn("pipe write error (to dns engine)"); 377 ntp_quit = 1; 378 } 379 380 if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) { 381 nfds--; 382 if (ntp_dispatch_imsg_dns() == -1) { 383 log_warn("pipe read error (from dns engine)"); 384 ntp_quit = 1; 385 } 386 } 387 388 if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) { 389 nfds--; 390 ctl_cnt += control_accept(fd_ctl); 391 } 392 393 for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++) 394 if (pfd[j].revents & (POLLIN|POLLERR)) { 395 nfds--; 396 if (server_dispatch(pfd[j].fd, conf) == -1) { 397 log_warn("pipe write error (conf)"); 398 ntp_quit = 1; 399 } 400 } 401 402 for (; nfds > 0 && j < idx_clients; j++) { 403 if (pfd[j].revents & (POLLIN|POLLERR)) { 404 nfds--; 405 if (client_dispatch(idx2peer[j - idx_peers], 406 conf->settime, conf->automatic) == -1) { 407 log_warn("pipe write error (settime)"); 408 ntp_quit = 1; 409 } 410 } 411 } 412 413 for (; nfds > 0 && j < ctls; j++) { 414 nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt); 415 } 416 417 for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL; 418 s = next_s) { 419 next_s = TAILQ_NEXT(s, entry); 420 if (s->next <= getmonotime()) 421 sensor_query(s); 422 } 423 } 424 425 msgbuf_write(&ibuf_main->w); 426 msgbuf_clear(&ibuf_main->w); 427 free(ibuf_main); 428 msgbuf_write(&ibuf_dns->w); 429 msgbuf_clear(&ibuf_dns->w); 430 free(ibuf_dns); 431 432 log_info("ntp engine exiting"); 433 exit(0); 434 } 435 436 int 437 ntp_dispatch_imsg(void) 438 { 439 struct imsg imsg; 440 int n; 441 442 if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0) 443 return (-1); 444 445 for (;;) { 446 if ((n = imsg_get(ibuf_main, &imsg)) == -1) 447 return (-1); 448 449 if (n == 0) 450 break; 451 452 switch (imsg.hdr.type) { 453 case IMSG_ADJTIME: 454 memcpy(&n, imsg.data, sizeof(n)); 455 if (n == 1 && !conf->status.synced) { 456 log_info("clock is now synced"); 457 conf->status.synced = 1; 458 priv_dns(IMSG_SYNCED, NULL, 0); 459 constraint_reset(); 460 } else if (n == 0 && conf->status.synced) { 461 log_info("clock is now unsynced"); 462 conf->status.synced = 0; 463 priv_dns(IMSG_UNSYNCED, NULL, 0); 464 } 465 break; 466 case IMSG_CONSTRAINT_RESULT: 467 constraint_msg_result(imsg.hdr.peerid, 468 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 469 break; 470 case IMSG_CONSTRAINT_CLOSE: 471 constraint_msg_close(imsg.hdr.peerid, 472 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 473 break; 474 default: 475 break; 476 } 477 imsg_free(&imsg); 478 } 479 return (0); 480 } 481 482 int 483 inpool(struct sockaddr_storage *a, 484 struct sockaddr_storage old[MAX_SERVERS_DNS], size_t n) 485 { 486 size_t i; 487 488 for (i = 0; i < n; i++) { 489 if (a->ss_family != old[i].ss_family) 490 continue; 491 if (a->ss_family == AF_INET) { 492 if (((struct sockaddr_in *)a)->sin_addr.s_addr == 493 ((struct sockaddr_in *)&old[i])->sin_addr.s_addr) 494 return 1; 495 } else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr, 496 &((struct sockaddr_in6 *)&old[i])->sin6_addr, 497 sizeof(struct sockaddr_in6)) == 0) { 498 return 1; 499 } 500 } 501 return 0; 502 } 503 504 int 505 ntp_dispatch_imsg_dns(void) 506 { 507 struct imsg imsg; 508 struct sockaddr_storage existing[MAX_SERVERS_DNS]; 509 struct ntp_peer *peer, *npeer, *tmp; 510 u_int16_t dlen; 511 u_char *p; 512 struct ntp_addr *h; 513 size_t addrcount, peercount; 514 int n; 515 516 if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0) 517 return (-1); 518 519 for (;;) { 520 if ((n = imsg_get(ibuf_dns, &imsg)) == -1) 521 return (-1); 522 523 if (n == 0) 524 break; 525 526 switch (imsg.hdr.type) { 527 case IMSG_HOST_DNS: 528 TAILQ_FOREACH(peer, &conf->ntp_peers, entry) 529 if (peer->id == imsg.hdr.peerid) 530 break; 531 if (peer == NULL) { 532 log_warnx("IMSG_HOST_DNS with invalid peerID"); 533 break; 534 } 535 if (peer->addr != NULL) { 536 log_warnx("IMSG_HOST_DNS but addr != NULL!"); 537 break; 538 } 539 540 if (peer->addr_head.pool) { 541 n = 0; 542 peercount = 0; 543 544 TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers, 545 entry, tmp) { 546 if (npeer->addr_head.pool != 547 peer->addr_head.pool) 548 continue; 549 peercount++; 550 if (npeer->id == peer->id) 551 continue; 552 if (npeer->addr != NULL) 553 existing[n++] = npeer->addr->ss; 554 } 555 } 556 557 dlen = imsg.hdr.len - IMSG_HEADER_SIZE; 558 if (dlen == 0) { /* no data -> temp error */ 559 log_warnx("DNS lookup tempfail"); 560 peer->state = STATE_DNS_TEMPFAIL; 561 if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL) 562 priv_settime(0, "of dns failures"); 563 break; 564 } 565 566 p = (u_char *)imsg.data; 567 addrcount = dlen / (sizeof(struct sockaddr_storage) + 568 sizeof(int)); 569 570 while (dlen >= sizeof(struct sockaddr_storage) + 571 sizeof(int)) { 572 if ((h = calloc(1, sizeof(struct ntp_addr))) == 573 NULL) 574 fatal(NULL); 575 memcpy(&h->ss, p, sizeof(h->ss)); 576 p += sizeof(h->ss); 577 dlen -= sizeof(h->ss); 578 memcpy(&h->notauth, p, sizeof(int)); 579 p += sizeof(int); 580 dlen -= sizeof(int); 581 if (peer->addr_head.pool) { 582 if (peercount > addrcount) { 583 free(h); 584 continue; 585 } 586 if (inpool(&h->ss, existing, 587 n)) { 588 free(h); 589 continue; 590 } 591 log_debug("Adding address %s to %s", 592 log_sockaddr((struct sockaddr *) 593 &h->ss), peer->addr_head.name); 594 npeer = new_peer(); 595 npeer->weight = peer->weight; 596 npeer->query_addr4 = peer->query_addr4; 597 npeer->query_addr6 = peer->query_addr6; 598 h->next = NULL; 599 npeer->addr = h; 600 npeer->addr_head.a = h; 601 npeer->addr_head.name = 602 peer->addr_head.name; 603 npeer->addr_head.pool = 604 peer->addr_head.pool; 605 client_peer_init(npeer); 606 npeer->state = STATE_DNS_DONE; 607 peer_add(npeer); 608 peercount++; 609 } else { 610 h->next = peer->addr; 611 peer->addr = h; 612 peer->addr_head.a = peer->addr; 613 peer->state = STATE_DNS_DONE; 614 } 615 } 616 if (dlen != 0) 617 fatalx("IMSG_HOST_DNS: dlen != 0"); 618 if (peer->addr_head.pool) 619 peer_remove(peer); 620 else 621 client_addr_init(peer); 622 break; 623 case IMSG_CONSTRAINT_DNS: 624 constraint_msg_dns(imsg.hdr.peerid, 625 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 626 break; 627 case IMSG_PROBE_ROOT: 628 dlen = imsg.hdr.len - IMSG_HEADER_SIZE; 629 if (dlen != sizeof(int)) 630 fatalx("IMSG_PROBE_ROOT"); 631 memcpy(&n, imsg.data, sizeof(int)); 632 if (n < 0) 633 priv_settime(0, "dns probe failed"); 634 break; 635 default: 636 break; 637 } 638 imsg_free(&imsg); 639 } 640 return (0); 641 } 642 643 void 644 peer_add(struct ntp_peer *p) 645 { 646 TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry); 647 peer_cnt++; 648 } 649 650 void 651 peer_remove(struct ntp_peer *p) 652 { 653 TAILQ_REMOVE(&conf->ntp_peers, p, entry); 654 free(p); 655 peer_cnt--; 656 } 657 658 void 659 peer_addr_head_clear(struct ntp_peer *p) 660 { 661 host_dns_free(p->addr_head.a); 662 p->addr_head.a = NULL; 663 p->addr = NULL; 664 } 665 666 static void 667 priv_adjfreq(double offset) 668 { 669 double curtime, freq; 670 671 if (!conf->status.synced){ 672 conf->freq.samples = 0; 673 return; 674 } 675 676 conf->freq.samples++; 677 678 if (conf->freq.samples <= 0) 679 return; 680 681 conf->freq.overall_offset += offset; 682 offset = conf->freq.overall_offset; 683 684 curtime = gettime_corrected(); 685 conf->freq.xy += offset * curtime; 686 conf->freq.x += curtime; 687 conf->freq.y += offset; 688 conf->freq.xx += curtime * curtime; 689 690 if (conf->freq.samples % FREQUENCY_SAMPLES != 0) 691 return; 692 693 freq = 694 (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples) 695 / 696 (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples); 697 698 if (freq > MAX_FREQUENCY_ADJUST) 699 freq = MAX_FREQUENCY_ADJUST; 700 else if (freq < -MAX_FREQUENCY_ADJUST) 701 freq = -MAX_FREQUENCY_ADJUST; 702 703 imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq)); 704 conf->filters |= FILTER_ADJFREQ; 705 conf->freq.xy = 0.0; 706 conf->freq.x = 0.0; 707 conf->freq.y = 0.0; 708 conf->freq.xx = 0.0; 709 conf->freq.samples = 0; 710 conf->freq.overall_offset = 0.0; 711 conf->freq.num++; 712 } 713 714 int 715 priv_adjtime(void) 716 { 717 struct ntp_peer *p; 718 struct ntp_sensor *s; 719 int offset_cnt = 0, i = 0, j; 720 struct ntp_offset **offsets; 721 double offset_median; 722 723 TAILQ_FOREACH(p, &conf->ntp_peers, entry) { 724 if (p->trustlevel < TRUSTLEVEL_BADPEER) 725 continue; 726 if (!p->update.good) 727 return (1); 728 offset_cnt += p->weight; 729 } 730 731 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) { 732 if (!s->update.good) 733 continue; 734 offset_cnt += s->weight; 735 } 736 737 if (offset_cnt == 0) 738 return (1); 739 740 if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL) 741 fatal("calloc priv_adjtime"); 742 743 TAILQ_FOREACH(p, &conf->ntp_peers, entry) { 744 if (p->trustlevel < TRUSTLEVEL_BADPEER) 745 continue; 746 for (j = 0; j < p->weight; j++) 747 offsets[i++] = &p->update; 748 } 749 750 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) { 751 if (!s->update.good) 752 continue; 753 for (j = 0; j < s->weight; j++) 754 offsets[i++] = &s->update; 755 } 756 757 qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare); 758 759 i = offset_cnt / 2; 760 if (offset_cnt % 2 == 0) 761 if (offsets[i - 1]->delay < offsets[i]->delay) 762 i -= 1; 763 offset_median = offsets[i]->offset; 764 conf->status.rootdelay = offsets[i]->delay; 765 conf->status.stratum = offsets[i]->status.stratum; 766 conf->status.leap = offsets[i]->status.leap; 767 768 imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1, 769 &offset_median, sizeof(offset_median)); 770 771 priv_adjfreq(offset_median); 772 773 conf->status.reftime = gettime(); 774 conf->status.stratum++; /* one more than selected peer */ 775 if (conf->status.stratum > NTP_MAXSTRATUM) 776 conf->status.stratum = NTP_MAXSTRATUM; 777 update_scale(offset_median); 778 779 conf->status.refid = offsets[i]->status.send_refid; 780 781 free(offsets); 782 783 TAILQ_FOREACH(p, &conf->ntp_peers, entry) { 784 for (i = 0; i < OFFSET_ARRAY_SIZE; i++) 785 p->reply[i].offset -= offset_median; 786 p->update.good = 0; 787 } 788 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) { 789 for (i = 0; i < SENSOR_OFFSETS; i++) 790 s->offsets[i].offset -= offset_median; 791 s->update.offset -= offset_median; 792 } 793 794 return (0); 795 } 796 797 int 798 offset_compare(const void *aa, const void *bb) 799 { 800 const struct ntp_offset * const *a; 801 const struct ntp_offset * const *b; 802 803 a = aa; 804 b = bb; 805 806 if ((*a)->offset < (*b)->offset) 807 return (-1); 808 else if ((*a)->offset > (*b)->offset) 809 return (1); 810 else 811 return (0); 812 } 813 814 void 815 priv_settime(double offset, char *msg) 816 { 817 if (offset == 0) 818 log_info("cancel settime because %s", msg); 819 imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1, 820 &offset, sizeof(offset)); 821 conf->settime = 0; 822 } 823 824 void 825 priv_dns(int cmd, char *name, u_int32_t peerid) 826 { 827 u_int16_t dlen = 0; 828 829 if (name != NULL) 830 dlen = strlen(name) + 1; 831 imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen); 832 } 833 834 void 835 update_scale(double offset) 836 { 837 offset += getoffset(); 838 if (offset < 0) 839 offset = -offset; 840 841 if (offset > QSCALE_OFF_MAX || !conf->status.synced || 842 conf->freq.num < 3) 843 conf->scale = 1; 844 else if (offset < QSCALE_OFF_MIN) 845 conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN; 846 else 847 conf->scale = QSCALE_OFF_MAX / offset; 848 } 849 850 time_t 851 scale_interval(time_t requested) 852 { 853 time_t interval, r; 854 855 interval = requested * conf->scale; 856 r = arc4random_uniform(MAXIMUM(5, interval / 10)); 857 return (interval + r); 858 } 859 860 time_t 861 error_interval(void) 862 { 863 time_t interval, r; 864 865 interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN; 866 r = arc4random_uniform(interval / 10); 867 return (interval + r); 868 } 869