xref: /openbsd-src/usr.sbin/ntpd/ntp.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: ntp.c,v 1.156 2019/06/20 07:28:18 otto Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/time.h>
22 #include <sys/stat.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <paths.h>
26 #include <poll.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <syslog.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <err.h>
35 
36 #include "ntpd.h"
37 
38 #define	PFD_PIPE_MAIN	0
39 #define	PFD_PIPE_DNS	1
40 #define	PFD_SOCK_CTL	2
41 #define	PFD_MAX		3
42 
43 volatile sig_atomic_t	 ntp_quit = 0;
44 struct imsgbuf		*ibuf_main;
45 struct imsgbuf		*ibuf_dns;
46 struct ntpd_conf	*conf;
47 struct ctl_conns	 ctl_conns;
48 u_int			 peer_cnt;
49 u_int			 sensors_cnt;
50 extern u_int		 constraint_cnt;
51 
52 void	ntp_sighdlr(int);
53 int	ntp_dispatch_imsg(void);
54 int	ntp_dispatch_imsg_dns(void);
55 void	peer_add(struct ntp_peer *);
56 void	peer_remove(struct ntp_peer *);
57 
58 void
59 ntp_sighdlr(int sig)
60 {
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		ntp_quit = 1;
65 		break;
66 	}
67 }
68 
69 void
70 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
71 {
72 	int			 a, b, nfds, i, j, idx_peers, timeout;
73 	int			 nullfd, pipe_dns[2], idx_clients;
74 	int			 ctls;
75 	int			 fd_ctl;
76 	u_int			 pfd_elms = 0, idx2peer_elms = 0;
77 	u_int			 listener_cnt, new_cnt, sent_cnt, trial_cnt;
78 	u_int			 ctl_cnt;
79 	struct pollfd		*pfd = NULL;
80 	struct servent		*se;
81 	struct listen_addr	*la;
82 	struct ntp_peer		*p;
83 	struct ntp_peer		**idx2peer = NULL;
84 	struct ntp_sensor	*s, *next_s;
85 	struct constraint	*cstr;
86 	struct timespec		 tp;
87 	struct stat		 stb;
88 	struct ctl_conn		*cc;
89 	time_t			 nextaction, last_sensor_scan = 0, now;
90 	void			*newp;
91 
92 	if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
93 	    pipe_dns) == -1)
94 		fatal("socketpair");
95 
96 	start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
97 
98 	log_init(nconf->debug, LOG_DAEMON);
99 	log_setverbose(nconf->verbose);
100 	if (!nconf->debug && setsid() == -1)
101 		fatal("setsid");
102 	log_procinit("ntp");
103 
104 	if ((se = getservbyname("ntp", "udp")) == NULL)
105 		fatal("getservbyname");
106 
107 	/* Start control socket. */
108 	if ((fd_ctl = control_init(CTLSOCKET)) == -1)
109 		fatalx("control socket init failed");
110 	if (control_listen(fd_ctl) == -1)
111 		fatalx("control socket listen failed");
112 	if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1)
113 		fatal(NULL);
114 
115 	if (stat(pw->pw_dir, &stb) == -1) {
116 		fatal("privsep dir %s could not be opened", pw->pw_dir);
117 	}
118 	if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
119 		fatalx("bad privsep dir %s permissions: %o",
120 		    pw->pw_dir, stb.st_mode);
121 	}
122 	if (chroot(pw->pw_dir) == -1)
123 		fatal("chroot");
124 	if (chdir("/") == -1)
125 		fatal("chdir(\"/\")");
126 
127 	if (!nconf->debug) {
128 		dup2(nullfd, STDIN_FILENO);
129 		dup2(nullfd, STDOUT_FILENO);
130 		dup2(nullfd, STDERR_FILENO);
131 	}
132 	close(nullfd);
133 
134 	setproctitle("ntp engine");
135 
136 	conf = nconf;
137 	setup_listeners(se, conf, &listener_cnt);
138 
139 	if (setgroups(1, &pw->pw_gid) ||
140 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
141 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
142 		fatal("can't drop privileges");
143 
144 	endservent();
145 
146 	/* The ntp process will want to open NTP client sockets -> "inet" */
147 	if (pledge("stdio inet", NULL) == -1)
148 		err(1, "pledge");
149 
150 	signal(SIGTERM, ntp_sighdlr);
151 	signal(SIGINT, ntp_sighdlr);
152 	signal(SIGPIPE, SIG_IGN);
153 	signal(SIGHUP, SIG_IGN);
154 	signal(SIGCHLD, SIG_DFL);
155 
156 	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
157 		fatal(NULL);
158 	imsg_init(ibuf_main, PARENT_SOCK_FILENO);
159 	if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
160 		fatal(NULL);
161 	imsg_init(ibuf_dns, pipe_dns[0]);
162 
163 	constraint_cnt = 0;
164 	conf->constraint_median = 0;
165 	conf->constraint_last = getmonotime();
166 	TAILQ_FOREACH(cstr, &conf->constraints, entry)
167 		constraint_cnt += constraint_init(cstr);
168 
169 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
170 		client_peer_init(p);
171 
172 	memset(&conf->status, 0, sizeof(conf->status));
173 
174 	conf->freq.num = 0;
175 	conf->freq.samples = 0;
176 	conf->freq.x = 0.0;
177 	conf->freq.xx = 0.0;
178 	conf->freq.xy = 0.0;
179 	conf->freq.y = 0.0;
180 	conf->freq.overall_offset = 0.0;
181 
182 	conf->status.synced = 0;
183 	clock_getres(CLOCK_REALTIME, &tp);
184 	b = 1000000000 / tp.tv_nsec;	/* convert to Hz */
185 	for (a = 0; b > 1; a--, b >>= 1)
186 		;
187 	conf->status.precision = a;
188 	conf->scale = 1;
189 
190 	TAILQ_INIT(&ctl_conns);
191 	sensor_init();
192 
193 	log_info("ntp engine ready");
194 
195 	ctl_cnt = 0;
196 	peer_cnt = 0;
197 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
198 		peer_cnt++;
199 
200 	while (ntp_quit == 0) {
201 		if (peer_cnt > idx2peer_elms) {
202 			if ((newp = reallocarray(idx2peer, peer_cnt,
203 			    sizeof(*idx2peer))) == NULL) {
204 				/* panic for now */
205 				log_warn("could not resize idx2peer from %u -> "
206 				    "%u entries", idx2peer_elms, peer_cnt);
207 				fatalx("exiting");
208 			}
209 			idx2peer = newp;
210 			idx2peer_elms = peer_cnt;
211 		}
212 
213 		new_cnt = PFD_MAX +
214 		    peer_cnt + listener_cnt + ctl_cnt;
215 		if (new_cnt > pfd_elms) {
216 			if ((newp = reallocarray(pfd, new_cnt,
217 			    sizeof(*pfd))) == NULL) {
218 				/* panic for now */
219 				log_warn("could not resize pfd from %u -> "
220 				    "%u entries", pfd_elms, new_cnt);
221 				fatalx("exiting");
222 			}
223 			pfd = newp;
224 			pfd_elms = new_cnt;
225 		}
226 
227 		memset(pfd, 0, sizeof(*pfd) * pfd_elms);
228 		memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
229 		nextaction = getmonotime() + 3600;
230 		pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
231 		pfd[PFD_PIPE_MAIN].events = POLLIN;
232 		pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
233 		pfd[PFD_PIPE_DNS].events = POLLIN;
234 		pfd[PFD_SOCK_CTL].fd = fd_ctl;
235 		pfd[PFD_SOCK_CTL].events = POLLIN;
236 
237 		i = PFD_MAX;
238 		TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
239 			pfd[i].fd = la->fd;
240 			pfd[i].events = POLLIN;
241 			i++;
242 		}
243 
244 		idx_peers = i;
245 		sent_cnt = trial_cnt = 0;
246 		TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
247 			if (constraint_cnt && conf->constraint_median == 0)
248 				continue;
249 
250 			if (p->next > 0 && p->next <= getmonotime()) {
251 				if (p->state > STATE_DNS_INPROGRESS)
252 					trial_cnt++;
253 				if (client_query(p) == 0)
254 					sent_cnt++;
255 			}
256 			if (p->deadline > 0 && p->deadline <= getmonotime()) {
257 				timeout = 300;
258 				log_debug("no reply from %s received in time, "
259 				    "next query %ds", log_sockaddr(
260 				    (struct sockaddr *)&p->addr->ss), timeout);
261 				if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
262 				    (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
263 					log_info("peer %s now invalid",
264 					    log_sockaddr(
265 					    (struct sockaddr *)&p->addr->ss));
266 				client_nextaddr(p);
267 				set_next(p, timeout);
268 			}
269 			if (p->senderrors > MAX_SEND_ERRORS) {
270 				log_debug("failed to send query to %s, "
271 				    "next query %ds", log_sockaddr(
272 				    (struct sockaddr *)&p->addr->ss),
273 				    INTERVAL_QUERY_PATHETIC);
274 				p->senderrors = 0;
275 				client_nextaddr(p);
276 				set_next(p, INTERVAL_QUERY_PATHETIC);
277 			}
278 			if (p->next > 0 && p->next < nextaction)
279 				nextaction = p->next;
280 			if (p->deadline > 0 && p->deadline < nextaction)
281 				nextaction = p->deadline;
282 
283 			if (p->state == STATE_QUERY_SENT &&
284 			    p->query->fd != -1) {
285 				pfd[i].fd = p->query->fd;
286 				pfd[i].events = POLLIN;
287 				idx2peer[i - idx_peers] = p;
288 				i++;
289 			}
290 		}
291 		idx_clients = i;
292 
293 		if (!TAILQ_EMPTY(&conf->ntp_conf_sensors)) {
294 			if (last_sensor_scan == 0 ||
295 			    last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
296 				sensors_cnt = sensor_scan();
297 				last_sensor_scan = getmonotime();
298 			}
299 			if (sensors_cnt == 0 &&
300 			    nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
301 				nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
302 			sensors_cnt = 0;
303 			TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
304 				if (conf->settime && s->offsets[0].offset)
305 					priv_settime(s->offsets[0].offset, NULL);
306 				sensors_cnt++;
307 				if (s->next > 0 && s->next < nextaction)
308 					nextaction = s->next;
309 			}
310 		}
311 
312 		if (conf->settime &&
313 		    ((trial_cnt > 0 && sent_cnt == 0) ||
314 		    (peer_cnt == 0 && sensors_cnt == 0)))
315 			priv_settime(0, "no valid peers configured");
316 
317 		TAILQ_FOREACH(cstr, &conf->constraints, entry) {
318 			if (constraint_query(cstr) == -1)
319 				continue;
320 		}
321 
322 		if (ibuf_main->w.queued > 0)
323 			pfd[PFD_PIPE_MAIN].events |= POLLOUT;
324 		if (ibuf_dns->w.queued > 0)
325 			pfd[PFD_PIPE_DNS].events |= POLLOUT;
326 
327 		TAILQ_FOREACH(cc, &ctl_conns, entry) {
328 			pfd[i].fd = cc->ibuf.fd;
329 			pfd[i].events = POLLIN;
330 			if (cc->ibuf.w.queued > 0)
331 				pfd[i].events |= POLLOUT;
332 			i++;
333 		}
334 		ctls = i;
335 
336 		now = getmonotime();
337 		timeout = nextaction - now;
338 		if (timeout < 0)
339 			timeout = 0;
340 
341 		if ((nfds = poll(pfd, i, timeout * 1000)) == -1)
342 			if (errno != EINTR) {
343 				log_warn("poll error");
344 				ntp_quit = 1;
345 			}
346 
347 		if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
348 			if (msgbuf_write(&ibuf_main->w) <= 0 &&
349 			    errno != EAGAIN) {
350 				log_warn("pipe write error (to parent)");
351 				ntp_quit = 1;
352 			}
353 
354 		if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
355 			nfds--;
356 			if (ntp_dispatch_imsg() == -1) {
357 				log_warn("pipe write error (from main)");
358 				ntp_quit = 1;
359 			}
360 		}
361 
362 		if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
363 			if (msgbuf_write(&ibuf_dns->w) <= 0 &&
364 			    errno != EAGAIN) {
365 				log_warn("pipe write error (to dns engine)");
366 				ntp_quit = 1;
367 			}
368 
369 		if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
370 			nfds--;
371 			if (ntp_dispatch_imsg_dns() == -1) {
372 				log_warn("pipe write error (from dns engine)");
373 				ntp_quit = 1;
374 			}
375 		}
376 
377 		if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
378 			nfds--;
379 			ctl_cnt += control_accept(fd_ctl);
380 		}
381 
382 		for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
383 			if (pfd[j].revents & (POLLIN|POLLERR)) {
384 				nfds--;
385 				if (server_dispatch(pfd[j].fd, conf) == -1) {
386 					log_warn("pipe write error (conf)");
387 					ntp_quit = 1;
388 				}
389 			}
390 
391 		for (; nfds > 0 && j < idx_clients; j++) {
392 			if (pfd[j].revents & (POLLIN|POLLERR)) {
393 				nfds--;
394 				if (client_dispatch(idx2peer[j - idx_peers],
395 				    conf->settime, conf->automatic) == -1) {
396 					log_warn("pipe write error (settime)");
397 					ntp_quit = 1;
398 				}
399 			}
400 		}
401 
402 		for (; nfds > 0 && j < ctls; j++) {
403 			nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
404 		}
405 
406 		for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
407 		    s = next_s) {
408 			next_s = TAILQ_NEXT(s, entry);
409 			if (s->next <= getmonotime())
410 				sensor_query(s);
411 		}
412 	}
413 
414 	msgbuf_write(&ibuf_main->w);
415 	msgbuf_clear(&ibuf_main->w);
416 	free(ibuf_main);
417 	msgbuf_write(&ibuf_dns->w);
418 	msgbuf_clear(&ibuf_dns->w);
419 	free(ibuf_dns);
420 
421 	log_info("ntp engine exiting");
422 	exit(0);
423 }
424 
425 int
426 ntp_dispatch_imsg(void)
427 {
428 	struct imsg		 imsg;
429 	int			 n;
430 
431 	if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
432 		return (-1);
433 
434 	for (;;) {
435 		if ((n = imsg_get(ibuf_main, &imsg)) == -1)
436 			return (-1);
437 
438 		if (n == 0)
439 			break;
440 
441 		switch (imsg.hdr.type) {
442 		case IMSG_ADJTIME:
443 			memcpy(&n, imsg.data, sizeof(n));
444 			if (n == 1 && !conf->status.synced) {
445 				log_info("clock is now synced");
446 				conf->status.synced = 1;
447 				priv_dns(IMSG_SYNCED, NULL, 0);
448 			} else if (n == 0 && conf->status.synced) {
449 				log_info("clock is now unsynced");
450 				conf->status.synced = 0;
451 				priv_dns(IMSG_UNSYNCED, NULL, 0);
452 			}
453 			break;
454 		case IMSG_CONSTRAINT_RESULT:
455 			constraint_msg_result(imsg.hdr.peerid,
456 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
457 			break;
458 		case IMSG_CONSTRAINT_CLOSE:
459 			constraint_msg_close(imsg.hdr.peerid,
460 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
461 			break;
462 		default:
463 			break;
464 		}
465 		imsg_free(&imsg);
466 	}
467 	return (0);
468 }
469 
470 int
471 ntp_dispatch_imsg_dns(void)
472 {
473 	struct imsg		 imsg;
474 	struct ntp_peer		*peer, *npeer, *tmp;
475 	u_int16_t		 dlen;
476 	u_char			*p;
477 	struct ntp_addr		*h;
478 	int			 n;
479 
480 	if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
481 		return (-1);
482 
483 	for (;;) {
484 		if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
485 			return (-1);
486 
487 		if (n == 0)
488 			break;
489 
490 		switch (imsg.hdr.type) {
491 		case IMSG_HOST_DNS:
492 			TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
493 				if (peer->id == imsg.hdr.peerid)
494 					break;
495 			if (peer == NULL) {
496 				log_warnx("IMSG_HOST_DNS with invalid peerID");
497 				break;
498 			}
499 			if (peer->addr != NULL) {
500 				log_warnx("IMSG_HOST_DNS but addr != NULL!");
501 				break;
502 			}
503 
504 			/*
505 			 * For the redo dns case we want to have only one clone
506 			 * of the pool peer, since it wil be cloned again
507 			 */
508 			if (peer->addr_head.pool) {
509 				TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
510 				    entry, tmp) {
511 					if (npeer->id == peer->id)
512 						continue;
513 					if (strcmp(npeer->addr_head.name,
514 					    peer->addr_head.name) == 0)
515 						peer_remove(npeer);
516 				}
517 			}
518 
519 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
520 			if (dlen == 0) {	/* no data -> temp error */
521 				log_warnx("DNS lookup tempfail");
522 				peer->state = STATE_DNS_TEMPFAIL;
523 				if (++conf->tmpfail > TRIES_AUTO_DNSFAIL)
524 					priv_settime(0, "of dns failures");
525 				break;
526 			}
527 
528 			p = (u_char *)imsg.data;
529 			while (dlen >= sizeof(struct sockaddr_storage) +
530 			    sizeof(int)) {
531 				if ((h = calloc(1, sizeof(struct ntp_addr))) ==
532 				    NULL)
533 					fatal(NULL);
534 				memcpy(&h->ss, p, sizeof(h->ss));
535 				p += sizeof(h->ss);
536 				dlen -= sizeof(h->ss);
537 				memcpy(&h->notauth, p, sizeof(int));
538 				p += sizeof(int);
539 				dlen -= sizeof(int);
540 				if (peer->addr_head.pool) {
541 					npeer = new_peer();
542 					npeer->weight = peer->weight;
543 					npeer->query_addr4 = peer->query_addr4;
544 					npeer->query_addr6 = peer->query_addr6;
545 					h->next = NULL;
546 					npeer->addr = h;
547 					npeer->addr_head.a = h;
548 					npeer->addr_head.name =
549 					    peer->addr_head.name;
550 					npeer->addr_head.pool = 1;
551 					client_peer_init(npeer);
552 					npeer->state = STATE_DNS_DONE;
553 					peer_add(npeer);
554 				} else {
555 					h->next = peer->addr;
556 					peer->addr = h;
557 					peer->addr_head.a = peer->addr;
558 					peer->state = STATE_DNS_DONE;
559 				}
560 			}
561 			if (dlen != 0)
562 				fatalx("IMSG_HOST_DNS: dlen != 0");
563 			if (peer->addr_head.pool)
564 				peer_remove(peer);
565 			else
566 				client_addr_init(peer);
567 			break;
568 		case IMSG_CONSTRAINT_DNS:
569 			constraint_msg_dns(imsg.hdr.peerid,
570 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
571 			break;
572 		case IMSG_PROBE_ROOT:
573 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
574 			if (dlen != sizeof(int))
575 				fatalx("IMSG_PROBE_ROOT");
576 			memcpy(&n, imsg.data, sizeof(int));
577 			if (n < 0)
578 				priv_settime(0, "dns probe failed");
579 			break;
580 		default:
581 			break;
582 		}
583 		imsg_free(&imsg);
584 	}
585 	return (0);
586 }
587 
588 void
589 peer_add(struct ntp_peer *p)
590 {
591 	TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
592 	peer_cnt++;
593 }
594 
595 void
596 peer_remove(struct ntp_peer *p)
597 {
598 	TAILQ_REMOVE(&conf->ntp_peers, p, entry);
599 	free(p);
600 	peer_cnt--;
601 }
602 
603 void
604 peer_addr_head_clear(struct ntp_peer *p)
605 {
606 	struct ntp_addr *a = p->addr_head.a;
607 	while (a) {
608 		struct ntp_addr *next = a->next;
609 		free(a);
610 		a = next;
611 	}
612 	p->addr_head.a = NULL;
613 	p->addr = NULL;
614 }
615 
616 static void
617 priv_adjfreq(double offset)
618 {
619 	double curtime, freq;
620 
621 	if (!conf->status.synced){
622 		conf->freq.samples = 0;
623 		return;
624 	}
625 
626 	conf->freq.samples++;
627 
628 	if (conf->freq.samples <= 0)
629 		return;
630 
631 	conf->freq.overall_offset += offset;
632 	offset = conf->freq.overall_offset;
633 
634 	curtime = gettime_corrected();
635 	conf->freq.xy += offset * curtime;
636 	conf->freq.x += curtime;
637 	conf->freq.y += offset;
638 	conf->freq.xx += curtime * curtime;
639 
640 	if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
641 		return;
642 
643 	freq =
644 	    (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
645 	    /
646 	    (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
647 
648 	if (freq > MAX_FREQUENCY_ADJUST)
649 		freq = MAX_FREQUENCY_ADJUST;
650 	else if (freq < -MAX_FREQUENCY_ADJUST)
651 		freq = -MAX_FREQUENCY_ADJUST;
652 
653 	imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
654 	conf->filters |= FILTER_ADJFREQ;
655 	conf->freq.xy = 0.0;
656 	conf->freq.x = 0.0;
657 	conf->freq.y = 0.0;
658 	conf->freq.xx = 0.0;
659 	conf->freq.samples = 0;
660 	conf->freq.overall_offset = 0.0;
661 	conf->freq.num++;
662 }
663 
664 int
665 priv_adjtime(void)
666 {
667 	struct ntp_peer		 *p;
668 	struct ntp_sensor	 *s;
669 	int			  offset_cnt = 0, i = 0, j;
670 	struct ntp_offset	**offsets;
671 	double			  offset_median;
672 
673 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
674 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
675 			continue;
676 		if (!p->update.good)
677 			return (1);
678 		offset_cnt += p->weight;
679 	}
680 
681 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
682 		if (!s->update.good)
683 			continue;
684 		offset_cnt += s->weight;
685 	}
686 
687 	if (offset_cnt == 0)
688 		return (1);
689 
690 	if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
691 		fatal("calloc priv_adjtime");
692 
693 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
694 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
695 			continue;
696 		for (j = 0; j < p->weight; j++)
697 			offsets[i++] = &p->update;
698 	}
699 
700 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
701 		if (!s->update.good)
702 			continue;
703 		for (j = 0; j < s->weight; j++)
704 			offsets[i++] = &s->update;
705 	}
706 
707 	qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
708 
709 	i = offset_cnt / 2;
710 	if (offset_cnt % 2 == 0)
711 		if (offsets[i - 1]->delay < offsets[i]->delay)
712 			i -= 1;
713 	offset_median = offsets[i]->offset;
714 	conf->status.rootdelay = offsets[i]->delay;
715 	conf->status.stratum = offsets[i]->status.stratum;
716 	conf->status.leap = offsets[i]->status.leap;
717 
718 	imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
719 	    &offset_median, sizeof(offset_median));
720 
721 	priv_adjfreq(offset_median);
722 
723 	conf->status.reftime = gettime();
724 	conf->status.stratum++;	/* one more than selected peer */
725 	if (conf->status.stratum > NTP_MAXSTRATUM)
726 		conf->status.stratum = NTP_MAXSTRATUM;
727 	update_scale(offset_median);
728 
729 	conf->status.refid = offsets[i]->status.send_refid;
730 
731 	free(offsets);
732 
733 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
734 		for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
735 			p->reply[i].offset -= offset_median;
736 		p->update.good = 0;
737 	}
738 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
739 		for (i = 0; i < SENSOR_OFFSETS; i++)
740 			s->offsets[i].offset -= offset_median;
741 		s->update.offset -= offset_median;
742 	}
743 
744 	return (0);
745 }
746 
747 int
748 offset_compare(const void *aa, const void *bb)
749 {
750 	const struct ntp_offset * const *a;
751 	const struct ntp_offset * const *b;
752 
753 	a = aa;
754 	b = bb;
755 
756 	if ((*a)->offset < (*b)->offset)
757 		return (-1);
758 	else if ((*a)->offset > (*b)->offset)
759 		return (1);
760 	else
761 		return (0);
762 }
763 
764 void
765 priv_settime(double offset, char *msg)
766 {
767 	if (offset == 0)
768 		log_info("cancel settime because %s", msg);
769 	imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
770 	    &offset, sizeof(offset));
771 	conf->settime = 0;
772 }
773 
774 void
775 priv_dns(int cmd, char *name, u_int32_t peerid)
776 {
777 	u_int16_t	dlen = 0;
778 
779 	if (name != NULL)
780 		dlen = strlen(name) + 1;
781 	imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
782 }
783 
784 void
785 update_scale(double offset)
786 {
787 	offset += getoffset();
788 	if (offset < 0)
789 		offset = -offset;
790 
791 	if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
792 	    conf->freq.num < 3)
793 		conf->scale = 1;
794 	else if (offset < QSCALE_OFF_MIN)
795 		conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
796 	else
797 		conf->scale = QSCALE_OFF_MAX / offset;
798 }
799 
800 time_t
801 scale_interval(time_t requested)
802 {
803 	time_t interval, r;
804 
805 	interval = requested * conf->scale;
806 	r = arc4random_uniform(MAXIMUM(5, interval / 10));
807 	return (interval + r);
808 }
809 
810 time_t
811 error_interval(void)
812 {
813 	time_t interval, r;
814 
815 	interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
816 	r = arc4random_uniform(interval / 10);
817 	return (interval + r);
818 }
819