xref: /openbsd-src/usr.sbin/ntpd/ntp.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: ntp.c,v 1.143 2016/09/14 13:20:16 rzalamena Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/time.h>
22 #include <sys/stat.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <paths.h>
26 #include <poll.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <syslog.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <err.h>
35 
36 #include "ntpd.h"
37 
38 #define	PFD_PIPE_MAIN	0
39 #define	PFD_PIPE_DNS	1
40 #define	PFD_SOCK_CTL	2
41 #define	PFD_MAX		3
42 
43 volatile sig_atomic_t	 ntp_quit = 0;
44 struct imsgbuf		*ibuf_main;
45 struct imsgbuf		*ibuf_dns;
46 struct ntpd_conf	*conf;
47 struct ctl_conns	 ctl_conns;
48 u_int			 peer_cnt;
49 u_int			 sensors_cnt;
50 extern u_int		 constraint_cnt;
51 
52 void	ntp_sighdlr(int);
53 int	ntp_dispatch_imsg(void);
54 int	ntp_dispatch_imsg_dns(void);
55 void	peer_add(struct ntp_peer *);
56 void	peer_remove(struct ntp_peer *);
57 
58 void
59 ntp_sighdlr(int sig)
60 {
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		ntp_quit = 1;
65 		break;
66 	}
67 }
68 
69 void
70 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
71 {
72 	int			 a, b, nfds, i, j, idx_peers, timeout;
73 	int			 nullfd, pipe_dns[2], idx_clients;
74 	int			 ctls;
75 	int			 fd_ctl;
76 	u_int			 pfd_elms = 0, idx2peer_elms = 0;
77 	u_int			 listener_cnt, new_cnt, sent_cnt, trial_cnt;
78 	u_int			 ctl_cnt;
79 	struct pollfd		*pfd = NULL;
80 	struct servent		*se;
81 	struct listen_addr	*la;
82 	struct ntp_peer		*p;
83 	struct ntp_peer		**idx2peer = NULL;
84 	struct ntp_sensor	*s, *next_s;
85 	struct constraint	*cstr;
86 	struct timespec		 tp;
87 	struct stat		 stb;
88 	struct ctl_conn		*cc;
89 	time_t			 nextaction, last_sensor_scan = 0, now;
90 	void			*newp;
91 
92 	if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
93 	    pipe_dns) == -1)
94 		fatal("socketpair");
95 
96 	start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
97 
98 	/* in this case the parent didn't init logging and didn't daemonize */
99 	if (nconf->settime && !nconf->debug) {
100 		log_init(nconf->debug, LOG_DAEMON);
101 		if (setsid() == -1)
102 			fatal("setsid");
103 	}
104 	log_procinit("ntp");
105 
106 	if ((se = getservbyname("ntp", "udp")) == NULL)
107 		fatal("getservbyname");
108 
109 	/* Start control socket. */
110 	if ((fd_ctl = control_init(CTLSOCKET)) == -1)
111 		fatalx("control socket init failed");
112 	if (control_listen(fd_ctl) == -1)
113 		fatalx("control socket listen failed");
114 	if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1)
115 		fatal(NULL);
116 
117 	if (stat(pw->pw_dir, &stb) == -1) {
118 		fatal("privsep dir %s could not be opened", pw->pw_dir);
119 	}
120 	if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
121 		fatalx("bad privsep dir %s permissions: %o",
122 		    pw->pw_dir, stb.st_mode);
123 	}
124 	if (chroot(pw->pw_dir) == -1)
125 		fatal("chroot");
126 	if (chdir("/") == -1)
127 		fatal("chdir(\"/\")");
128 
129 	if (!nconf->debug) {
130 		dup2(nullfd, STDIN_FILENO);
131 		dup2(nullfd, STDOUT_FILENO);
132 		dup2(nullfd, STDERR_FILENO);
133 	}
134 	close(nullfd);
135 
136 	setproctitle("ntp engine");
137 
138 	conf = nconf;
139 	setup_listeners(se, conf, &listener_cnt);
140 
141 	if (setgroups(1, &pw->pw_gid) ||
142 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
143 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
144 		fatal("can't drop privileges");
145 
146 	endservent();
147 
148 	/* The ntp process will want to open NTP client sockets -> "inet" */
149 	if (pledge("stdio inet", NULL) == -1)
150 		err(1, "pledge");
151 
152 	signal(SIGTERM, ntp_sighdlr);
153 	signal(SIGINT, ntp_sighdlr);
154 	signal(SIGPIPE, SIG_IGN);
155 	signal(SIGHUP, SIG_IGN);
156 	signal(SIGCHLD, SIG_DFL);
157 
158 	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
159 		fatal(NULL);
160 	imsg_init(ibuf_main, PARENT_SOCK_FILENO);
161 	if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
162 		fatal(NULL);
163 	imsg_init(ibuf_dns, pipe_dns[0]);
164 
165 	constraint_cnt = 0;
166 	conf->constraint_median = 0;
167 	conf->constraint_last = getmonotime();
168 	TAILQ_FOREACH(cstr, &conf->constraints, entry)
169 		constraint_cnt += constraint_init(cstr);
170 
171 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
172 		client_peer_init(p);
173 
174 	memset(&conf->status, 0, sizeof(conf->status));
175 
176 	conf->freq.num = 0;
177 	conf->freq.samples = 0;
178 	conf->freq.x = 0.0;
179 	conf->freq.xx = 0.0;
180 	conf->freq.xy = 0.0;
181 	conf->freq.y = 0.0;
182 	conf->freq.overall_offset = 0.0;
183 
184 	conf->status.synced = 0;
185 	clock_getres(CLOCK_REALTIME, &tp);
186 	b = 1000000000 / tp.tv_nsec;	/* convert to Hz */
187 	for (a = 0; b > 1; a--, b >>= 1)
188 		;
189 	conf->status.precision = a;
190 	conf->scale = 1;
191 
192 	TAILQ_INIT(&ctl_conns);
193 	sensor_init();
194 
195 	log_info("ntp engine ready");
196 
197 	ctl_cnt = 0;
198 	peer_cnt = 0;
199 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
200 		peer_cnt++;
201 
202 	while (ntp_quit == 0) {
203 		if (peer_cnt > idx2peer_elms) {
204 			if ((newp = reallocarray(idx2peer, peer_cnt,
205 			    sizeof(*idx2peer))) == NULL) {
206 				/* panic for now */
207 				log_warn("could not resize idx2peer from %u -> "
208 				    "%u entries", idx2peer_elms, peer_cnt);
209 				fatalx("exiting");
210 			}
211 			idx2peer = newp;
212 			idx2peer_elms = peer_cnt;
213 		}
214 
215 		new_cnt = PFD_MAX +
216 		    peer_cnt + listener_cnt + ctl_cnt;
217 		if (new_cnt > pfd_elms) {
218 			if ((newp = reallocarray(pfd, new_cnt,
219 			    sizeof(*pfd))) == NULL) {
220 				/* panic for now */
221 				log_warn("could not resize pfd from %u -> "
222 				    "%u entries", pfd_elms, new_cnt);
223 				fatalx("exiting");
224 			}
225 			pfd = newp;
226 			pfd_elms = new_cnt;
227 		}
228 
229 		memset(pfd, 0, sizeof(*pfd) * pfd_elms);
230 		memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
231 		nextaction = getmonotime() + 3600;
232 		pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
233 		pfd[PFD_PIPE_MAIN].events = POLLIN;
234 		pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
235 		pfd[PFD_PIPE_DNS].events = POLLIN;
236 		pfd[PFD_SOCK_CTL].fd = fd_ctl;
237 		pfd[PFD_SOCK_CTL].events = POLLIN;
238 
239 		i = PFD_MAX;
240 		TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
241 			pfd[i].fd = la->fd;
242 			pfd[i].events = POLLIN;
243 			i++;
244 		}
245 
246 		idx_peers = i;
247 		sent_cnt = trial_cnt = 0;
248 		TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
249 			if (constraint_cnt && conf->constraint_median == 0)
250 				continue;
251 
252 			if (p->next > 0 && p->next <= getmonotime()) {
253 				if (p->state > STATE_DNS_INPROGRESS)
254 					trial_cnt++;
255 				if (client_query(p) == 0)
256 					sent_cnt++;
257 			}
258 			if (p->deadline > 0 && p->deadline <= getmonotime()) {
259 				timeout = 300;
260 				log_debug("no reply from %s received in time, "
261 				    "next query %ds", log_sockaddr(
262 				    (struct sockaddr *)&p->addr->ss), timeout);
263 				if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
264 				    (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
265 					log_info("peer %s now invalid",
266 					    log_sockaddr(
267 					    (struct sockaddr *)&p->addr->ss));
268 				client_nextaddr(p);
269 				set_next(p, timeout);
270 			}
271 			if (p->senderrors > MAX_SEND_ERRORS) {
272 				log_debug("failed to send query to %s, "
273 				    "next query %ds", log_sockaddr(
274 				    (struct sockaddr *)&p->addr->ss),
275 				    INTERVAL_QUERY_PATHETIC);
276 				p->senderrors = 0;
277 				client_nextaddr(p);
278 				set_next(p, INTERVAL_QUERY_PATHETIC);
279 			}
280 			if (p->next > 0 && p->next < nextaction)
281 				nextaction = p->next;
282 			if (p->deadline > 0 && p->deadline < nextaction)
283 				nextaction = p->deadline;
284 
285 			if (p->state == STATE_QUERY_SENT &&
286 			    p->query->fd != -1) {
287 				pfd[i].fd = p->query->fd;
288 				pfd[i].events = POLLIN;
289 				idx2peer[i - idx_peers] = p;
290 				i++;
291 			}
292 		}
293 		idx_clients = i;
294 
295 		if (!TAILQ_EMPTY(&conf->ntp_conf_sensors)) {
296 			if (last_sensor_scan == 0 ||
297 			    last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
298 				sensors_cnt = sensor_scan();
299 				last_sensor_scan = getmonotime();
300 			}
301 			if (sensors_cnt == 0 &&
302 			    nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
303 				nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
304 			sensors_cnt = 0;
305 			TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
306 				if (conf->settime && s->offsets[0].offset)
307 					priv_settime(s->offsets[0].offset);
308 				sensors_cnt++;
309 				if (s->next > 0 && s->next < nextaction)
310 					nextaction = s->next;
311 			}
312 		}
313 
314 		if (conf->settime &&
315 		    ((trial_cnt > 0 && sent_cnt == 0) ||
316 		    (peer_cnt == 0 && sensors_cnt == 0)))
317 			priv_settime(0);	/* no good peers, don't wait */
318 
319 		if (ibuf_main->w.queued > 0)
320 			pfd[PFD_PIPE_MAIN].events |= POLLOUT;
321 		if (ibuf_dns->w.queued > 0)
322 			pfd[PFD_PIPE_DNS].events |= POLLOUT;
323 
324 		TAILQ_FOREACH(cc, &ctl_conns, entry) {
325 			pfd[i].fd = cc->ibuf.fd;
326 			pfd[i].events = POLLIN;
327 			if (cc->ibuf.w.queued > 0)
328 				pfd[i].events |= POLLOUT;
329 			i++;
330 		}
331 		ctls = i;
332 
333 		TAILQ_FOREACH(cstr, &conf->constraints, entry) {
334 			if (constraint_query(cstr) == -1)
335 				continue;
336 		}
337 
338 		now = getmonotime();
339 		if (constraint_cnt)
340 			nextaction = now + 1;
341 
342 		timeout = nextaction - now;
343 		if (timeout < 0)
344 			timeout = 0;
345 
346 		if ((nfds = poll(pfd, i, timeout * 1000)) == -1)
347 			if (errno != EINTR) {
348 				log_warn("poll error");
349 				ntp_quit = 1;
350 			}
351 
352 		if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
353 			if (msgbuf_write(&ibuf_main->w) <= 0 &&
354 			    errno != EAGAIN) {
355 				log_warn("pipe write error (to parent)");
356 				ntp_quit = 1;
357 			}
358 
359 		if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
360 			nfds--;
361 			if (ntp_dispatch_imsg() == -1)
362 				ntp_quit = 1;
363 		}
364 
365 		if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
366 			if (msgbuf_write(&ibuf_dns->w) <= 0 &&
367 			    errno != EAGAIN) {
368 				log_warn("pipe write error (to dns engine)");
369 				ntp_quit = 1;
370 			}
371 
372 		if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
373 			nfds--;
374 			if (ntp_dispatch_imsg_dns() == -1)
375 				ntp_quit = 1;
376 		}
377 
378 		if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
379 			nfds--;
380 			ctl_cnt += control_accept(fd_ctl);
381 		}
382 
383 		for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
384 			if (pfd[j].revents & (POLLIN|POLLERR)) {
385 				nfds--;
386 				if (server_dispatch(pfd[j].fd, conf) == -1)
387 					ntp_quit = 1;
388 			}
389 
390 		for (; nfds > 0 && j < idx_clients; j++) {
391 			if (pfd[j].revents & (POLLIN|POLLERR)) {
392 				nfds--;
393 				if (client_dispatch(idx2peer[j - idx_peers],
394 				    conf->settime) == -1)
395 					ntp_quit = 1;
396 			}
397 		}
398 
399 		for (; nfds > 0 && j < ctls; j++) {
400 			nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
401 		}
402 
403 		for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
404 		    s = next_s) {
405 			next_s = TAILQ_NEXT(s, entry);
406 			if (s->next <= getmonotime())
407 				sensor_query(s);
408 		}
409 	}
410 
411 	msgbuf_write(&ibuf_main->w);
412 	msgbuf_clear(&ibuf_main->w);
413 	free(ibuf_main);
414 	msgbuf_write(&ibuf_dns->w);
415 	msgbuf_clear(&ibuf_dns->w);
416 	free(ibuf_dns);
417 
418 	log_info("ntp engine exiting");
419 	exit(0);
420 }
421 
422 int
423 ntp_dispatch_imsg(void)
424 {
425 	struct imsg		 imsg;
426 	int			 n;
427 
428 	if ((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN)
429 		return (-1);
430 
431 	if (n == 0) {	/* connection closed */
432 		log_warnx("ntp_dispatch_imsg in ntp engine: pipe closed");
433 		return (-1);
434 	}
435 
436 	for (;;) {
437 		if ((n = imsg_get(ibuf_main, &imsg)) == -1)
438 			return (-1);
439 
440 		if (n == 0)
441 			break;
442 
443 		switch (imsg.hdr.type) {
444 		case IMSG_ADJTIME:
445 			memcpy(&n, imsg.data, sizeof(n));
446 			if (n == 1 && !conf->status.synced) {
447 				log_info("clock is now synced");
448 				conf->status.synced = 1;
449 			} else if (n == 0 && conf->status.synced) {
450 				log_info("clock is now unsynced");
451 				conf->status.synced = 0;
452 			}
453 			break;
454 		case IMSG_CONSTRAINT_RESULT:
455 			constraint_msg_result(imsg.hdr.peerid,
456 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
457 			break;
458 		case IMSG_CONSTRAINT_CLOSE:
459 			constraint_msg_close(imsg.hdr.peerid,
460 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
461 			break;
462 		default:
463 			break;
464 		}
465 		imsg_free(&imsg);
466 	}
467 	return (0);
468 }
469 
470 int
471 ntp_dispatch_imsg_dns(void)
472 {
473 	struct imsg		 imsg;
474 	struct ntp_peer		*peer, *npeer;
475 	u_int16_t		 dlen;
476 	u_char			*p;
477 	struct ntp_addr		*h;
478 	int			 n;
479 
480 	if ((n = imsg_read(ibuf_dns)) == -1)
481 		return (-1);
482 
483 	if (n == 0) {	/* connection closed */
484 		log_warnx("ntp_dispatch_imsg_dns in ntp engine: pipe closed");
485 		return (-1);
486 	}
487 
488 	for (;;) {
489 		if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
490 			return (-1);
491 
492 		if (n == 0)
493 			break;
494 
495 		switch (imsg.hdr.type) {
496 		case IMSG_HOST_DNS:
497 			TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
498 				if (peer->id == imsg.hdr.peerid)
499 					break;
500 			if (peer == NULL) {
501 				log_warnx("IMSG_HOST_DNS with invalid peerID");
502 				break;
503 			}
504 			if (peer->addr != NULL) {
505 				log_warnx("IMSG_HOST_DNS but addr != NULL!");
506 				break;
507 			}
508 
509 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
510 			if (dlen == 0) {	/* no data -> temp error */
511 				peer->state = STATE_DNS_TEMPFAIL;
512 				break;
513 			}
514 
515 			p = (u_char *)imsg.data;
516 			while (dlen >= sizeof(struct sockaddr_storage)) {
517 				if ((h = calloc(1, sizeof(struct ntp_addr))) ==
518 				    NULL)
519 					fatal(NULL);
520 				memcpy(&h->ss, p, sizeof(h->ss));
521 				p += sizeof(h->ss);
522 				dlen -= sizeof(h->ss);
523 				if (peer->addr_head.pool) {
524 					npeer = new_peer();
525 					npeer->weight = peer->weight;
526 					h->next = NULL;
527 					npeer->addr = h;
528 					npeer->addr_head.a = h;
529 					npeer->addr_head.name =
530 					    peer->addr_head.name;
531 					npeer->addr_head.pool = 1;
532 					client_peer_init(npeer);
533 					npeer->state = STATE_DNS_DONE;
534 					peer_add(npeer);
535 				} else {
536 					h->next = peer->addr;
537 					peer->addr = h;
538 					peer->addr_head.a = peer->addr;
539 					peer->state = STATE_DNS_DONE;
540 				}
541 			}
542 			if (dlen != 0)
543 				fatalx("IMSG_HOST_DNS: dlen != 0");
544 			if (peer->addr_head.pool)
545 				peer_remove(peer);
546 			else
547 				client_addr_init(peer);
548 			break;
549 		case IMSG_CONSTRAINT_DNS:
550 			constraint_msg_dns(imsg.hdr.peerid,
551 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
552 			break;
553 		default:
554 			break;
555 		}
556 		imsg_free(&imsg);
557 	}
558 	return (0);
559 }
560 
561 void
562 peer_add(struct ntp_peer *p)
563 {
564 	TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
565 	peer_cnt++;
566 }
567 
568 void
569 peer_remove(struct ntp_peer *p)
570 {
571 	TAILQ_REMOVE(&conf->ntp_peers, p, entry);
572 	free(p);
573 	peer_cnt--;
574 }
575 
576 static void
577 priv_adjfreq(double offset)
578 {
579 	double curtime, freq;
580 
581 	if (!conf->status.synced){
582 		conf->freq.samples = 0;
583 		return;
584 	}
585 
586 	conf->freq.samples++;
587 
588 	if (conf->freq.samples <= 0)
589 		return;
590 
591 	conf->freq.overall_offset += offset;
592 	offset = conf->freq.overall_offset;
593 
594 	curtime = gettime_corrected();
595 	conf->freq.xy += offset * curtime;
596 	conf->freq.x += curtime;
597 	conf->freq.y += offset;
598 	conf->freq.xx += curtime * curtime;
599 
600 	if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
601 		return;
602 
603 	freq =
604 	    (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
605 	    /
606 	    (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
607 
608 	if (freq > MAX_FREQUENCY_ADJUST)
609 		freq = MAX_FREQUENCY_ADJUST;
610 	else if (freq < -MAX_FREQUENCY_ADJUST)
611 		freq = -MAX_FREQUENCY_ADJUST;
612 
613 	imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
614 	conf->filters |= FILTER_ADJFREQ;
615 	conf->freq.xy = 0.0;
616 	conf->freq.x = 0.0;
617 	conf->freq.y = 0.0;
618 	conf->freq.xx = 0.0;
619 	conf->freq.samples = 0;
620 	conf->freq.overall_offset = 0.0;
621 	conf->freq.num++;
622 }
623 
624 int
625 priv_adjtime(void)
626 {
627 	struct ntp_peer		 *p;
628 	struct ntp_sensor	 *s;
629 	int			  offset_cnt = 0, i = 0, j;
630 	struct ntp_offset	**offsets;
631 	double			  offset_median;
632 
633 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
634 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
635 			continue;
636 		if (!p->update.good)
637 			return (1);
638 		offset_cnt += p->weight;
639 	}
640 
641 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
642 		if (!s->update.good)
643 			continue;
644 		offset_cnt += s->weight;
645 	}
646 
647 	if (offset_cnt == 0)
648 		return (1);
649 
650 	if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
651 		fatal("calloc priv_adjtime");
652 
653 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
654 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
655 			continue;
656 		for (j = 0; j < p->weight; j++)
657 			offsets[i++] = &p->update;
658 	}
659 
660 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
661 		if (!s->update.good)
662 			continue;
663 		for (j = 0; j < s->weight; j++)
664 			offsets[i++] = &s->update;
665 	}
666 
667 	qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
668 
669 	i = offset_cnt / 2;
670 	if (offset_cnt % 2 == 0)
671 		if (offsets[i - 1]->delay < offsets[i]->delay)
672 			i -= 1;
673 	offset_median = offsets[i]->offset;
674 	conf->status.rootdelay = offsets[i]->delay;
675 	conf->status.stratum = offsets[i]->status.stratum;
676 	conf->status.leap = offsets[i]->status.leap;
677 
678 	imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
679 	    &offset_median, sizeof(offset_median));
680 
681 	priv_adjfreq(offset_median);
682 
683 	conf->status.reftime = gettime();
684 	conf->status.stratum++;	/* one more than selected peer */
685 	if (conf->status.stratum > NTP_MAXSTRATUM)
686 		conf->status.stratum = NTP_MAXSTRATUM;
687 	update_scale(offset_median);
688 
689 	conf->status.refid = offsets[i]->status.send_refid;
690 
691 	free(offsets);
692 
693 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
694 		for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
695 			p->reply[i].offset -= offset_median;
696 		p->update.good = 0;
697 	}
698 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
699 		for (i = 0; i < SENSOR_OFFSETS; i++)
700 			s->offsets[i].offset -= offset_median;
701 		s->update.offset -= offset_median;
702 	}
703 
704 	return (0);
705 }
706 
707 int
708 offset_compare(const void *aa, const void *bb)
709 {
710 	const struct ntp_offset * const *a;
711 	const struct ntp_offset * const *b;
712 
713 	a = aa;
714 	b = bb;
715 
716 	if ((*a)->offset < (*b)->offset)
717 		return (-1);
718 	else if ((*a)->offset > (*b)->offset)
719 		return (1);
720 	else
721 		return (0);
722 }
723 
724 void
725 priv_settime(double offset)
726 {
727 	imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
728 	    &offset, sizeof(offset));
729 	conf->settime = 0;
730 }
731 
732 void
733 priv_dns(int cmd, char *name, u_int32_t peerid)
734 {
735 	u_int16_t	dlen;
736 
737 	dlen = strlen(name) + 1;
738 	imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
739 }
740 
741 void
742 update_scale(double offset)
743 {
744 	offset += getoffset();
745 	if (offset < 0)
746 		offset = -offset;
747 
748 	if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
749 	    conf->freq.num < 3)
750 		conf->scale = 1;
751 	else if (offset < QSCALE_OFF_MIN)
752 		conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
753 	else
754 		conf->scale = QSCALE_OFF_MAX / offset;
755 }
756 
757 time_t
758 scale_interval(time_t requested)
759 {
760 	time_t interval, r;
761 
762 	interval = requested * conf->scale;
763 	r = arc4random_uniform(MAXIMUM(5, interval / 10));
764 	return (interval + r);
765 }
766 
767 time_t
768 error_interval(void)
769 {
770 	time_t interval, r;
771 
772 	interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
773 	r = arc4random_uniform(interval / 10);
774 	return (interval + r);
775 }
776