xref: /netbsd-src/external/bsd/ntp/dist/sntp/main.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: main.c,v 1.14 2016/01/08 21:35:40 christos Exp $	*/
2 
3 #include <config.h>
4 
5 #include <event2/util.h>
6 #include <event2/event.h>
7 
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12 
13 #include "main.h"
14 #include "ntp_libopts.h"
15 #include "kod_management.h"
16 #include "networking.h"
17 #include "utilities.h"
18 #include "log.h"
19 #include "libntp.h"
20 
21 
22 int shutting_down;
23 int time_derived;
24 int time_adjusted;
25 int n_pending_dns = 0;
26 int n_pending_ntp = 0;
27 int ai_fam_pref = AF_UNSPEC;
28 int ntpver = 4;
29 double steplimit = -1;
30 SOCKET sock4 = -1;		/* Socket for IPv4 */
31 SOCKET sock6 = -1;		/* Socket for IPv6 */
32 /*
33 ** BCAST *must* listen on port 123 (by default), so we can only
34 ** use the UCST sockets (above) if they too are using port 123
35 */
36 SOCKET bsock4 = -1;		/* Broadcast Socket for IPv4 */
37 SOCKET bsock6 = -1;		/* Broadcast Socket for IPv6 */
38 struct event_base *base;
39 struct event *ev_sock4;
40 struct event *ev_sock6;
41 struct event *ev_worker_timeout;
42 struct event *ev_xmt_timer;
43 
44 struct dns_ctx {
45 	const char *	name;
46 	int		flags;
47 #define CTX_BCST	0x0001
48 #define CTX_UCST	0x0002
49 #define CTX_xCST	0x0003
50 #define CTX_CONC	0x0004
51 #define CTX_unused	0xfffd
52 	int		key_id;
53 	struct timeval	timeout;
54 	struct key *	key;
55 };
56 
57 typedef struct sent_pkt_tag sent_pkt;
58 struct sent_pkt_tag {
59 	sent_pkt *		link;
60 	struct dns_ctx *	dctx;
61 	sockaddr_u		addr;
62 	time_t			stime;
63 	int			done;
64 	struct pkt		x_pkt;
65 };
66 
67 typedef struct xmt_ctx_tag xmt_ctx;
68 struct xmt_ctx_tag {
69 	xmt_ctx *		link;
70 	SOCKET			sock;
71 	time_t			sched;
72 	sent_pkt *		spkt;
73 };
74 
75 struct timeval	gap;
76 xmt_ctx *	xmt_q;
77 struct key *	keys = NULL;
78 int		response_timeout;
79 struct timeval	response_tv;
80 struct timeval	start_tv;
81 /* check the timeout at least once per second */
82 struct timeval	wakeup_tv = { 0, 888888 };
83 
84 sent_pkt *	fam_listheads[2];
85 #define v4_pkts_list	(fam_listheads[0])
86 #define v6_pkts_list	(fam_listheads[1])
87 
88 static union {
89 	struct pkt pkt;
90 	char   buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
91 } rbuf;
92 
93 #define r_pkt  rbuf.pkt
94 
95 #ifdef HAVE_DROPROOT
96 int droproot;			/* intres imports these */
97 int root_dropped;
98 #endif
99 u_long current_time;		/* libntp/authkeys.c */
100 
101 void open_sockets(void);
102 void handle_lookup(const char *name, int flags);
103 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
104 void worker_timeout(evutil_socket_t, short, void *);
105 void worker_resp_cb(evutil_socket_t, short, void *);
106 void sntp_name_resolved(int, int, void *, const char *, const char *,
107 			const struct addrinfo *,
108 			const struct addrinfo *);
109 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
110 	       u_int xmt_delay);
111 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
112 void xmt(xmt_ctx *xctx);
113 int  check_kod(const struct addrinfo *ai);
114 void timeout_query(sent_pkt *);
115 void timeout_queries(void);
116 void sock_cb(evutil_socket_t, short, void *);
117 void check_exit_conditions(void);
118 void sntp_libevent_log_cb(int, const char *);
119 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
120 int  set_time(double offset);
121 void dec_pending_ntp(const char *, sockaddr_u *);
122 int  libevent_version_ok(void);
123 int  gettimeofday_cached(struct event_base *b, struct timeval *tv);
124 
125 
126 /*
127  * The actual main function.
128  */
129 int
130 sntp_main (
131 	int argc,
132 	char **argv,
133 	const char *sntpVersion
134 	)
135 {
136 	int			i;
137 	int			exitcode;
138 	int			optct;
139 	struct event_config *	evcfg;
140 
141 	/* Initialize logging system - sets up progname */
142 	sntp_init_logging(argv[0]);
143 
144 	if (!libevent_version_ok())
145 		exit(EX_SOFTWARE);
146 
147 	init_lib();
148 	init_auth();
149 
150 	optct = ntpOptionProcess(&sntpOptions, argc, argv);
151 	argc -= optct;
152 	argv += optct;
153 
154 
155 	debug = OPT_VALUE_SET_DEBUG_LEVEL;
156 
157 	TRACE(2, ("init_lib() done, %s%s\n",
158 		  (ipv4_works)
159 		      ? "ipv4_works "
160 		      : "",
161 		  (ipv6_works)
162 		      ? "ipv6_works "
163 		      : ""));
164 	ntpver = OPT_VALUE_NTPVERSION;
165 	steplimit = OPT_VALUE_STEPLIMIT / 1e3;
166 	gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
167 	gap.tv_usec = min(gap.tv_usec, 999999);
168 
169 	if (HAVE_OPT(LOGFILE))
170 		open_logfile(OPT_ARG(LOGFILE));
171 
172 	msyslog(LOG_INFO, "%s", sntpVersion);
173 
174 	if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
175 		printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
176 		       progname);
177 		exit(EX_USAGE);
178 	}
179 
180 
181 	/*
182 	** Eventually, we probably want:
183 	** - separate bcst and ucst timeouts (why?)
184 	** - multiple --timeout values in the commandline
185 	*/
186 
187 	response_timeout = OPT_VALUE_TIMEOUT;
188 	response_tv.tv_sec = response_timeout;
189 	response_tv.tv_usec = 0;
190 
191 	/* IPv6 available? */
192 	if (isc_net_probeipv6() != ISC_R_SUCCESS) {
193 		ai_fam_pref = AF_INET;
194 		TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
195 	} else {
196 		/* Check for options -4 and -6 */
197 		if (HAVE_OPT(IPV4))
198 			ai_fam_pref = AF_INET;
199 		else if (HAVE_OPT(IPV6))
200 			ai_fam_pref = AF_INET6;
201 	}
202 
203 	/* TODO: Parse config file if declared */
204 
205 	/*
206 	** Init the KOD system.
207 	** For embedded systems with no writable filesystem,
208 	** -K /dev/null can be used to disable KoD storage.
209 	*/
210 	kod_init_kod_db(OPT_ARG(KOD), FALSE);
211 
212 	// HMS: Should we use arg-defalt for this too?
213 	if (HAVE_OPT(KEYFILE))
214 		auth_init(OPT_ARG(KEYFILE), &keys);
215 
216 	/*
217 	** Considering employing a variable that prevents functions of doing
218 	** anything until everything is initialized properly
219 	**
220 	** HMS: What exactly does the above mean?
221 	*/
222 	event_set_log_callback(&sntp_libevent_log_cb);
223 	if (debug > 0)
224 		event_enable_debug_mode();
225 #ifdef WORK_THREAD
226 	evthread_use_pthreads();
227 	/* we use libevent from main thread only, locks should be academic */
228 	if (debug > 0)
229 		evthread_enable_lock_debuging();
230 #endif
231 	evcfg = event_config_new();
232 	if (NULL == evcfg) {
233 		printf("%s: event_config_new() failed!\n", progname);
234 		return -1;
235 	}
236 #ifndef HAVE_SOCKETPAIR
237 	event_config_require_features(evcfg, EV_FEATURE_FDS);
238 #endif
239 	/* all libevent calls are from main thread */
240 	/* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
241 	base = event_base_new_with_config(evcfg);
242 	event_config_free(evcfg);
243 	if (NULL == base) {
244 		printf("%s: event_base_new() failed!\n", progname);
245 		return -1;
246 	}
247 
248 	/* wire into intres resolver */
249 	worker_per_query = TRUE;
250 	addremove_io_fd = &sntp_addremove_fd;
251 
252 	open_sockets();
253 
254 	if (HAVE_OPT(BROADCAST)) {
255 		int		cn = STACKCT_OPT(  BROADCAST );
256 		const char **	cp = STACKLST_OPT( BROADCAST );
257 
258 		while (cn-- > 0) {
259 			handle_lookup(*cp, CTX_BCST);
260 			cp++;
261 		}
262 	}
263 
264 	if (HAVE_OPT(CONCURRENT)) {
265 		int		cn = STACKCT_OPT( CONCURRENT );
266 		const char **	cp = STACKLST_OPT( CONCURRENT );
267 
268 		while (cn-- > 0) {
269 			handle_lookup(*cp, CTX_UCST | CTX_CONC);
270 			cp++;
271 		}
272 	}
273 
274 	for (i = 0; i < argc; ++i)
275 		handle_lookup(argv[i], CTX_UCST);
276 
277 	gettimeofday_cached(base, &start_tv);
278 	event_base_dispatch(base);
279 	event_base_free(base);
280 
281 	if (!time_adjusted &&
282 	    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
283 		exitcode = 1;
284 	else
285 		exitcode = 0;
286 
287 	return exitcode;
288 }
289 
290 
291 /*
292 ** open sockets and make them non-blocking
293 */
294 void
295 open_sockets(
296 	void
297 	)
298 {
299 	sockaddr_u	name;
300 
301 	if (-1 == sock4) {
302 		sock4 = socket(PF_INET, SOCK_DGRAM, 0);
303 		if (-1 == sock4) {
304 			/* error getting a socket */
305 			msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
306 			exit(1);
307 		}
308 		/* Make it non-blocking */
309 		make_socket_nonblocking(sock4);
310 
311 		/* Let's try using a wildcard... */
312 		ZERO(name);
313 		AF(&name) = AF_INET;
314 		SET_ADDR4N(&name, INADDR_ANY);
315 		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
316 
317 		if (-1 == bind(sock4, &name.sa,
318 			       SOCKLEN(&name))) {
319 			msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
320 			exit(1);
321 		}
322 
323 		/* Register an NTP callback for recv/timeout */
324 		ev_sock4 = event_new(base, sock4,
325 				     EV_TIMEOUT | EV_READ | EV_PERSIST,
326 				     &sock_cb, NULL);
327 		if (NULL == ev_sock4) {
328 			msyslog(LOG_ERR,
329 				"open_sockets: event_new(base, sock4) failed!");
330 		} else {
331 			event_add(ev_sock4, &wakeup_tv);
332 		}
333 	}
334 
335 	/* We may not always have IPv6... */
336 	if (-1 == sock6 && ipv6_works) {
337 		sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
338 		if (-1 == sock6 && ipv6_works) {
339 			/* error getting a socket */
340 			msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
341 			exit(1);
342 		}
343 		/* Make it non-blocking */
344 		make_socket_nonblocking(sock6);
345 
346 		/* Let's try using a wildcard... */
347 		ZERO(name);
348 		AF(&name) = AF_INET6;
349 		SET_ADDR6N(&name, in6addr_any);
350 		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
351 
352 		if (-1 == bind(sock6, &name.sa,
353 			       SOCKLEN(&name))) {
354 			msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
355 			exit(1);
356 		}
357 		/* Register an NTP callback for recv/timeout */
358 		ev_sock6 = event_new(base, sock6,
359 				     EV_TIMEOUT | EV_READ | EV_PERSIST,
360 				     &sock_cb, NULL);
361 		if (NULL == ev_sock6) {
362 			msyslog(LOG_ERR,
363 				"open_sockets: event_new(base, sock6) failed!");
364 		} else {
365 			event_add(ev_sock6, &wakeup_tv);
366 		}
367 	}
368 
369 	return;
370 }
371 
372 
373 /*
374 ** handle_lookup
375 */
376 void
377 handle_lookup(
378 	const char *name,
379 	int flags
380 	)
381 {
382 	struct addrinfo	hints;	/* Local copy is OK */
383 	struct dns_ctx *ctx;
384 	long		l;
385 	char *		name_copy;
386 	size_t		name_sz;
387 	size_t		octets;
388 
389 	TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
390 
391 	ZERO(hints);
392 	hints.ai_family = ai_fam_pref;
393 	hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
394 	/*
395 	** Unless we specify a socktype, we'll get at least two
396 	** entries for each address: one for TCP and one for
397 	** UDP. That's not what we want.
398 	*/
399 	hints.ai_socktype = SOCK_DGRAM;
400 	hints.ai_protocol = IPPROTO_UDP;
401 
402 	name_sz = 1 + strlen(name);
403 	octets = sizeof(*ctx) + name_sz;	// Space for a ctx and the name
404 	ctx = emalloc_zero(octets);		// ctx at ctx[0]
405 	name_copy = (char *)(ctx + 1);		// Put the name at ctx[1]
406 	memcpy(name_copy, name, name_sz);	// copy the name to ctx[1]
407 	ctx->name = name_copy;			// point to it...
408 	ctx->flags = flags;
409 	ctx->timeout = response_tv;
410 
411 	/* The following should arguably be passed in... */
412 	if (ENABLED_OPT(AUTHENTICATION) &&
413 	    atoint(OPT_ARG(AUTHENTICATION), &l)) {
414 		ctx->key_id = l;
415 		get_key(ctx->key_id, &ctx->key);
416 	} else {
417 		ctx->key_id = -1;
418 		ctx->key = NULL;
419 	}
420 
421 	++n_pending_dns;
422 	getaddrinfo_sometime(name, "123", &hints, 0,
423 			     &sntp_name_resolved, ctx);
424 }
425 
426 
427 /*
428 ** DNS Callback:
429 ** - For each IP:
430 ** - - open a socket
431 ** - - increment n_pending_ntp
432 ** - - send a request if this is a Unicast callback
433 ** - - queue wait for response
434 ** - decrement n_pending_dns
435 */
436 void
437 sntp_name_resolved(
438 	int			rescode,
439 	int			gai_errno,
440 	void *			context,
441 	const char *		name,
442 	const char *		service,
443 	const struct addrinfo *	hints,
444 	const struct addrinfo *	addr
445 	)
446 {
447 	struct dns_ctx *	dctx;
448 	sent_pkt *		spkt;
449 	const struct addrinfo *	ai;
450 	SOCKET			sock;
451 	u_int			xmt_delay_v4;
452 	u_int			xmt_delay_v6;
453 	u_int			xmt_delay;
454 	size_t			octets;
455 
456 	xmt_delay_v4 = 0;
457 	xmt_delay_v6 = 0;
458 	dctx = context;
459 	if (rescode) {
460 #ifdef EAI_SYSTEM
461 		if (EAI_SYSTEM == rescode) {
462 			errno = gai_errno;
463 			mfprintf(stderr, "%s lookup error %m\n",
464 				 dctx->name);
465 		} else
466 #endif
467 			fprintf(stderr, "%s lookup error %s\n",
468 				dctx->name, gai_strerror(rescode));
469 	} else {
470 		TRACE(3, ("%s [%s]\n", dctx->name,
471 			  (addr->ai_canonname != NULL)
472 			      ? addr->ai_canonname
473 			      : ""));
474 
475 		for (ai = addr; ai != NULL; ai = ai->ai_next) {
476 
477 			if (check_kod(ai))
478 				continue;
479 
480 			switch (ai->ai_family) {
481 
482 			case AF_INET:
483 				sock = sock4;
484 				xmt_delay = xmt_delay_v4;
485 				xmt_delay_v4++;
486 				break;
487 
488 			case AF_INET6:
489 				if (!ipv6_works)
490 					continue;
491 
492 				sock = sock6;
493 				xmt_delay = xmt_delay_v6;
494 				xmt_delay_v6++;
495 				break;
496 
497 			default:
498 				msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
499 					ai->ai_family);
500 				exit(1);
501 				break;
502 			}
503 
504 			/*
505 			** We're waiting for a response for either unicast
506 			** or broadcast, so...
507 			*/
508 			++n_pending_ntp;
509 
510 			/* If this is for a unicast IP, queue a request */
511 			if (dctx->flags & CTX_UCST) {
512 				spkt = emalloc_zero(sizeof(*spkt));
513 				spkt->dctx = dctx;
514 				octets = min(ai->ai_addrlen, sizeof(spkt->addr));
515 				memcpy(&spkt->addr, ai->ai_addr, octets);
516 				queue_xmt(sock, dctx, spkt, xmt_delay);
517 			}
518 		}
519 	}
520 	/* n_pending_dns really should be >0 here... */
521 	--n_pending_dns;
522 	check_exit_conditions();
523 }
524 
525 
526 /*
527 ** queue_xmt
528 */
529 void
530 queue_xmt(
531 	SOCKET			sock,
532 	struct dns_ctx *	dctx,
533 	sent_pkt *		spkt,
534 	u_int			xmt_delay
535 	)
536 {
537 	sockaddr_u *	dest;
538 	sent_pkt **	pkt_listp;
539 	sent_pkt *	match;
540 	xmt_ctx *	xctx;
541 	struct timeval	start_cb;
542 	struct timeval	delay;
543 
544 	dest = &spkt->addr;
545 	if (IS_IPV6(dest))
546 		pkt_listp = &v6_pkts_list;
547 	else
548 		pkt_listp = &v4_pkts_list;
549 
550 	/* reject attempts to add address already listed */
551 	for (match = *pkt_listp; match != NULL; match = match->link) {
552 		if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
553 			if (strcasecmp(spkt->dctx->name,
554 				       match->dctx->name))
555 				printf("%s %s duplicate address from %s ignored.\n",
556 				       sptoa(&match->addr),
557 				       match->dctx->name,
558 				       spkt->dctx->name);
559 			else
560 				printf("%s %s, duplicate address ignored.\n",
561 				       sptoa(&match->addr),
562 				       match->dctx->name);
563 			dec_pending_ntp(spkt->dctx->name, &spkt->addr);
564 			free(spkt);
565 			return;
566 		}
567 	}
568 
569 	LINK_SLIST(*pkt_listp, spkt, link);
570 
571 	xctx = emalloc_zero(sizeof(*xctx));
572 	xctx->sock = sock;
573 	xctx->spkt = spkt;
574 	gettimeofday_cached(base, &start_cb);
575 	xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
576 
577 	LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
578 			link, xmt_ctx);
579 	if (xmt_q == xctx) {
580 		/*
581 		 * The new entry is the first scheduled.  The timer is
582 		 * either not active or is set for the second xmt
583 		 * context in xmt_q.
584 		 */
585 		if (NULL == ev_xmt_timer)
586 			ev_xmt_timer = event_new(base, INVALID_SOCKET,
587 						 EV_TIMEOUT,
588 						 &xmt_timer_cb, NULL);
589 		if (NULL == ev_xmt_timer) {
590 			msyslog(LOG_ERR,
591 				"queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
592 			exit(1);
593 		}
594 		ZERO(delay);
595 		if (xctx->sched > start_cb.tv_sec)
596 			delay.tv_sec = xctx->sched - start_cb.tv_sec;
597 		event_add(ev_xmt_timer, &delay);
598 		TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
599 			  (u_int)delay.tv_usec));
600 	}
601 }
602 
603 
604 /*
605 ** xmt_timer_cb
606 */
607 void
608 xmt_timer_cb(
609 	evutil_socket_t	fd,
610 	short		what,
611 	void *		ctx
612 	)
613 {
614 	struct timeval	start_cb;
615 	struct timeval	delay;
616 	xmt_ctx *	x;
617 
618 	UNUSED_ARG(fd);
619 	UNUSED_ARG(ctx);
620 	DEBUG_INSIST(EV_TIMEOUT == what);
621 
622 	if (NULL == xmt_q || shutting_down)
623 		return;
624 	gettimeofday_cached(base, &start_cb);
625 	if (xmt_q->sched <= start_cb.tv_sec) {
626 		UNLINK_HEAD_SLIST(x, xmt_q, link);
627 		TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
628 			  (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
629 		xmt(x);
630 		free(x);
631 		if (NULL == xmt_q)
632 			return;
633 	}
634 	if (xmt_q->sched <= start_cb.tv_sec) {
635 		event_add(ev_xmt_timer, &gap);
636 		TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
637 			  (u_int)start_cb.tv_usec,
638 			  (u_int)gap.tv_usec));
639 	} else {
640 		delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
641 		delay.tv_usec = 0;
642 		event_add(ev_xmt_timer, &delay);
643 		TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
644 			  (u_int)start_cb.tv_usec,
645 			  (long)delay.tv_sec));
646 	}
647 }
648 
649 
650 /*
651 ** xmt()
652 */
653 void
654 xmt(
655 	xmt_ctx *	xctx
656 	)
657 {
658 	SOCKET		sock = xctx->sock;
659 	struct dns_ctx *dctx = xctx->spkt->dctx;
660 	sent_pkt *	spkt = xctx->spkt;
661 	sockaddr_u *	dst = &spkt->addr;
662 	struct timeval	tv_xmt;
663 	struct pkt	x_pkt;
664 	size_t		pkt_len;
665 	int		sent;
666 
667 	if (0 != gettimeofday(&tv_xmt, NULL)) {
668 		msyslog(LOG_ERR,
669 			"xmt: gettimeofday() failed: %m");
670 		exit(1);
671 	}
672 	tv_xmt.tv_sec += JAN_1970;
673 
674 	pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
675 			       dctx->key);
676 
677 	sent = sendpkt(sock, dst, &x_pkt, pkt_len);
678 	if (sent) {
679 		/* Save the packet we sent... */
680 		memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
681 		       pkt_len));
682 		spkt->stime = tv_xmt.tv_sec - JAN_1970;
683 
684 		TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
685 			  (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
686 	} else {
687 		dec_pending_ntp(dctx->name, dst);
688 	}
689 
690 	return;
691 }
692 
693 
694 /*
695  * timeout_queries() -- give up on unrequited NTP queries
696  */
697 void
698 timeout_queries(void)
699 {
700 	struct timeval	start_cb;
701 	u_int		idx;
702 	sent_pkt *	head;
703 	sent_pkt *	spkt;
704 	sent_pkt *	spkt_next;
705 	long		age;
706 	int didsomething = 0;
707 
708 	TRACE(3, ("timeout_queries: called to check %u items\n",
709 		  (unsigned)COUNTOF(fam_listheads)));
710 
711 	gettimeofday_cached(base, &start_cb);
712 	for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
713 		head = fam_listheads[idx];
714 		for (spkt = head; spkt != NULL; spkt = spkt_next) {
715 			char xcst;
716 
717 			didsomething = 1;
718 			switch (spkt->dctx->flags & CTX_xCST) {
719 			    case CTX_BCST:
720 				xcst = 'B';
721 				break;
722 
723 			    case CTX_UCST:
724 				xcst = 'U';
725 				break;
726 
727 			    default:
728 				INSIST(!"spkt->dctx->flags neither UCST nor BCST");
729 				break;
730 			}
731 
732 			spkt_next = spkt->link;
733 			if (0 == spkt->stime || spkt->done)
734 				continue;
735 			age = start_cb.tv_sec - spkt->stime;
736 			TRACE(3, ("%s %s %cCST age %ld\n",
737 				  stoa(&spkt->addr),
738 				  spkt->dctx->name, xcst, age));
739 			if (age > response_timeout)
740 				timeout_query(spkt);
741 		}
742 	}
743 	// Do we care about didsomething?
744 	TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
745 		  didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
746 	if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
747 		TRACE(3, ("timeout_queries: bail!\n"));
748 		event_base_loopexit(base, NULL);
749 		shutting_down = TRUE;
750 	}
751 }
752 
753 
754 void dec_pending_ntp(
755 	const char *	name,
756 	sockaddr_u *	server
757 	)
758 {
759 	if (n_pending_ntp > 0) {
760 		--n_pending_ntp;
761 		check_exit_conditions();
762 	} else {
763 		INSIST(0 == n_pending_ntp);
764 		TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
765 			  hostnameaddr(name, server)));
766 	}
767 }
768 
769 
770 void timeout_query(
771 	sent_pkt *	spkt
772 	)
773 {
774 	sockaddr_u *	server;
775 	char		xcst;
776 
777 
778 	switch (spkt->dctx->flags & CTX_xCST) {
779 	    case CTX_BCST:
780 		xcst = 'B';
781 		break;
782 
783 	    case CTX_UCST:
784 		xcst = 'U';
785 		break;
786 
787 	    default:
788 		INSIST(!"spkt->dctx->flags neither UCST nor BCST");
789 		break;
790 	}
791 	spkt->done = TRUE;
792 	server = &spkt->addr;
793 	msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
794 		hostnameaddr(spkt->dctx->name, server), xcst,
795 		response_timeout);
796 	dec_pending_ntp(spkt->dctx->name, server);
797 	return;
798 }
799 
800 
801 /*
802 ** check_kod
803 */
804 int
805 check_kod(
806 	const struct addrinfo *	ai
807 	)
808 {
809 	char *hostname;
810 	struct kod_entry *reason;
811 
812 	/* Is there a KoD on file for this address? */
813 	hostname = addrinfo_to_str(ai);
814 	TRACE(2, ("check_kod: checking <%s>\n", hostname));
815 	if (search_entry(hostname, &reason)) {
816 		printf("prior KoD for %s, skipping.\n",
817 			hostname);
818 		free(reason);
819 		free(hostname);
820 
821 		return 1;
822 	}
823 	free(hostname);
824 
825 	return 0;
826 }
827 
828 
829 /*
830 ** Socket readable/timeout Callback:
831 ** Read in the packet
832 ** Unicast:
833 ** - close socket
834 ** - decrement n_pending_ntp
835 ** - If packet is good, set the time and "exit"
836 ** Broadcast:
837 ** - If packet is good, set the time and "exit"
838 */
839 void
840 sock_cb(
841 	evutil_socket_t fd,
842 	short what,
843 	void *ptr
844 	)
845 {
846 	sockaddr_u	sender;
847 	sockaddr_u *	psau;
848 	sent_pkt **	p_pktlist;
849 	sent_pkt *	spkt;
850 	int		rpktl;
851 	int		rc;
852 
853 	INSIST(sock4 == fd || sock6 == fd);
854 
855 	TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
856 		  (fd == sock6)
857 		      ? "6"
858 		      : "4",
859 		  (what & EV_TIMEOUT) ? " timeout" : "",
860 		  (what & EV_READ)    ? " read" : "",
861 		  (what & EV_WRITE)   ? " write" : "",
862 		  (what & EV_SIGNAL)  ? " signal" : ""));
863 
864 	if (!(EV_READ & what)) {
865 		if (EV_TIMEOUT & what)
866 			timeout_queries();
867 
868 		return;
869 	}
870 
871 	/* Read in the packet */
872 	rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
873 	if (rpktl < 0) {
874 		msyslog(LOG_DEBUG, "recvfrom error %m");
875 		return;
876 	}
877 
878 	if (sock6 == fd)
879 		p_pktlist = &v6_pkts_list;
880 	else
881 		p_pktlist = &v4_pkts_list;
882 
883 	for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
884 		psau = &spkt->addr;
885 		if (SOCK_EQ(&sender, psau))
886 			break;
887 	}
888 	if (NULL == spkt) {
889 		msyslog(LOG_WARNING,
890 			"Packet from unexpected source %s dropped",
891 			sptoa(&sender));
892 		return;
893 	}
894 
895 	TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
896 		  sptoa(&sender)));
897 
898 	rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
899 			    &spkt->x_pkt, "sock_cb");
900 
901 	TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
902 
903 	/* If this is a Unicast packet, one down ... */
904 	if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
905 		dec_pending_ntp(spkt->dctx->name, &spkt->addr);
906 		spkt->done = TRUE;
907 	}
908 
909 
910 	/* If the packet is good, set the time and we're all done */
911 	rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
912 	if (0 != rc)
913 		TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
914 	check_exit_conditions();
915 }
916 
917 
918 /*
919  * check_exit_conditions()
920  *
921  * If sntp has a reply, ask the event loop to stop after this round of
922  * callbacks, unless --wait was used.
923  */
924 void
925 check_exit_conditions(void)
926 {
927 	if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
928 	    (time_derived && !HAVE_OPT(WAIT))) {
929 		event_base_loopexit(base, NULL);
930 		shutting_down = TRUE;
931 	} else {
932 		TRACE(2, ("%d NTP and %d name queries pending\n",
933 			  n_pending_ntp, n_pending_dns));
934 	}
935 }
936 
937 
938 /*
939  * sntp_addremove_fd() is invoked by the intres blocking worker code
940  * to read from a pipe, or to stop same.
941  */
942 void sntp_addremove_fd(
943 	int	fd,
944 	int	is_pipe,
945 	int	remove_it
946 	)
947 {
948 	u_int		idx;
949 	blocking_child *c;
950 	struct event *	ev;
951 
952 #ifdef HAVE_SOCKETPAIR
953 	if (is_pipe) {
954 		/* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
955 		msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
956 		exit(1);
957 	}
958 #endif
959 
960 	c = NULL;
961 	for (idx = 0; idx < blocking_children_alloc; idx++) {
962 		c = blocking_children[idx];
963 		if (NULL == c)
964 			continue;
965 		if (fd == c->resp_read_pipe)
966 			break;
967 	}
968 	if (idx == blocking_children_alloc)
969 		return;
970 
971 	if (remove_it) {
972 		ev = c->resp_read_ctx;
973 		c->resp_read_ctx = NULL;
974 		event_del(ev);
975 		event_free(ev);
976 
977 		return;
978 	}
979 
980 	ev = event_new(base, fd, EV_READ | EV_PERSIST,
981 		       &worker_resp_cb, c);
982 	if (NULL == ev) {
983 		msyslog(LOG_ERR,
984 			"sntp_addremove_fd: event_new(base, fd) failed!");
985 		return;
986 	}
987 	c->resp_read_ctx = ev;
988 	event_add(ev, NULL);
989 }
990 
991 
992 /* called by forked intres child to close open descriptors */
993 #ifdef WORK_FORK
994 void
995 kill_asyncio(
996 	int	startfd
997 	)
998 {
999 	if (INVALID_SOCKET != sock4) {
1000 		closesocket(sock4);
1001 		sock4 = INVALID_SOCKET;
1002 	}
1003 	if (INVALID_SOCKET != sock6) {
1004 		closesocket(sock6);
1005 		sock6 = INVALID_SOCKET;
1006 	}
1007 	if (INVALID_SOCKET != bsock4) {
1008 		closesocket(sock4);
1009 		sock4 = INVALID_SOCKET;
1010 	}
1011 	if (INVALID_SOCKET != bsock6) {
1012 		closesocket(sock6);
1013 		sock6 = INVALID_SOCKET;
1014 	}
1015 }
1016 #endif
1017 
1018 
1019 /*
1020  * worker_resp_cb() is invoked when resp_read_pipe is readable.
1021  */
1022 void
1023 worker_resp_cb(
1024 	evutil_socket_t	fd,
1025 	short		what,
1026 	void *		ctx	/* blocking_child * */
1027 	)
1028 {
1029 	blocking_child *	c;
1030 
1031 	DEBUG_INSIST(EV_READ & what);
1032 	c = ctx;
1033 	DEBUG_INSIST(fd == c->resp_read_pipe);
1034 	process_blocking_resp(c);
1035 }
1036 
1037 
1038 /*
1039  * intres_timeout_req(s) is invoked in the parent to schedule an idle
1040  * timeout to fire in s seconds, if not reset earlier by a call to
1041  * intres_timeout_req(0), which clears any pending timeout.  When the
1042  * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1043  * parent).
1044  *
1045  * sntp and ntpd each provide implementations adapted to their timers.
1046  */
1047 void
1048 intres_timeout_req(
1049 	u_int	seconds		/* 0 cancels */
1050 	)
1051 {
1052 	struct timeval	tv_to;
1053 
1054 	if (NULL == ev_worker_timeout) {
1055 		ev_worker_timeout = event_new(base, -1,
1056 					      EV_TIMEOUT | EV_PERSIST,
1057 					      &worker_timeout, NULL);
1058 		DEBUG_INSIST(NULL != ev_worker_timeout);
1059 	} else {
1060 		event_del(ev_worker_timeout);
1061 	}
1062 	if (0 == seconds)
1063 		return;
1064 	tv_to.tv_sec = seconds;
1065 	tv_to.tv_usec = 0;
1066 	event_add(ev_worker_timeout, &tv_to);
1067 }
1068 
1069 
1070 void
1071 worker_timeout(
1072 	evutil_socket_t	fd,
1073 	short		what,
1074 	void *		ctx
1075 	)
1076 {
1077 	UNUSED_ARG(fd);
1078 	UNUSED_ARG(ctx);
1079 
1080 	DEBUG_REQUIRE(EV_TIMEOUT & what);
1081 	worker_idle_timer_fired();
1082 }
1083 
1084 
1085 void
1086 sntp_libevent_log_cb(
1087 	int		severity,
1088 	const char *	msg
1089 	)
1090 {
1091 	int		level;
1092 
1093 	switch (severity) {
1094 
1095 	default:
1096 	case _EVENT_LOG_DEBUG:
1097 		level = LOG_DEBUG;
1098 		break;
1099 
1100 	case _EVENT_LOG_MSG:
1101 		level = LOG_NOTICE;
1102 		break;
1103 
1104 	case _EVENT_LOG_WARN:
1105 		level = LOG_WARNING;
1106 		break;
1107 
1108 	case _EVENT_LOG_ERR:
1109 		level = LOG_ERR;
1110 		break;
1111 	}
1112 
1113 	msyslog(level, "%s", msg);
1114 }
1115 
1116 
1117 int
1118 generate_pkt (
1119 	struct pkt *x_pkt,
1120 	const struct timeval *tv_xmt,
1121 	int key_id,
1122 	struct key *pkt_key
1123 	)
1124 {
1125 	l_fp	xmt_fp;
1126 	int	pkt_len;
1127 	int	mac_size;
1128 
1129 	pkt_len = LEN_PKT_NOMAC;
1130 	ZERO(*x_pkt);
1131 	TVTOTS(tv_xmt, &xmt_fp);
1132 	HTONL_FP(&xmt_fp, &x_pkt->xmt);
1133 	x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1134 	x_pkt->ppoll = 8;
1135 	/* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1136 	set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1137 	if (pkt_key != NULL) {
1138 		x_pkt->exten[0] = htonl(key_id);
1139 		mac_size = 20; /* max room for MAC */
1140 		mac_size = make_mac((char *)x_pkt, pkt_len, mac_size,
1141 				    pkt_key, (char *)&x_pkt->exten[1]);
1142 		if (mac_size > 0)
1143 			pkt_len += mac_size + 4;
1144 	}
1145 	return pkt_len;
1146 }
1147 
1148 
1149 int
1150 handle_pkt(
1151 	int		rpktl,
1152 	struct pkt *	rpkt,
1153 	sockaddr_u *	host,
1154 	const char *	hostname
1155 	)
1156 {
1157 	char		disptxt[32];
1158 	const char *	addrtxt;
1159 	struct timeval	tv_dst;
1160 	int		cnt;
1161 	int		sw_case;
1162 	int		digits;
1163 	int		stratum;
1164 	char *		ref;
1165 	char *		ts_str;
1166 	const char *	leaptxt;
1167 	double		offset;
1168 	double		precision;
1169 	double		synch_distance;
1170 	char *		p_SNTP_PRETEND_TIME;
1171 	time_t		pretend_time;
1172 #if SIZEOF_TIME_T == 8
1173 	long long	ll;
1174 #else
1175 	long		l;
1176 #endif
1177 
1178 	ts_str = NULL;
1179 
1180 	if (rpktl > 0)
1181 		sw_case = 1;
1182 	else
1183 		sw_case = rpktl;
1184 
1185 	switch (sw_case) {
1186 
1187 	case SERVER_UNUSEABLE:
1188 		return -1;
1189 		break;
1190 
1191 	case PACKET_UNUSEABLE:
1192 		break;
1193 
1194 	case SERVER_AUTH_FAIL:
1195 		break;
1196 
1197 	case KOD_DEMOBILIZE:
1198 		/* Received a DENY or RESTR KOD packet */
1199 		addrtxt = stoa(host);
1200 		ref = (char *)&rpkt->refid;
1201 		add_entry(addrtxt, ref);
1202 		msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1203 			ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1204 		break;
1205 
1206 	case KOD_RATE:
1207 		/*
1208 		** Hmm...
1209 		** We should probably call add_entry() with an
1210 		** expiration timestamp of several seconds in the future,
1211 		** and back-off even more if we get more RATE responses.
1212 		*/
1213 		break;
1214 
1215 	case 1:
1216 		TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1217 			  rpktl, stoa(host), hostname));
1218 
1219 		gettimeofday_cached(base, &tv_dst);
1220 
1221 		p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1222 		if (p_SNTP_PRETEND_TIME) {
1223 			pretend_time = 0;
1224 #if SIZEOF_TIME_T == 4
1225 			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1226 				pretend_time = (time_t)l;
1227 #elif SIZEOF_TIME_T == 8
1228 			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1229 				pretend_time = (time_t)ll;
1230 #else
1231 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1232 #endif
1233 			if (0 != pretend_time)
1234 				tv_dst.tv_sec = pretend_time;
1235 		}
1236 
1237 		offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1238 				   &precision, &synch_distance);
1239 		time_derived = TRUE;
1240 
1241 		for (digits = 0; (precision *= 10.) < 1.; ++digits)
1242 			/* empty */ ;
1243 		if (digits > 6)
1244 			digits = 6;
1245 
1246 		ts_str = tv_to_str(&tv_dst);
1247 		stratum = rpkt->stratum;
1248 		if (0 == stratum)
1249 				stratum = 16;
1250 
1251 		if (synch_distance > 0.) {
1252 			cnt = snprintf(disptxt, sizeof(disptxt),
1253 				       " +/- %f", synch_distance);
1254 			if ((size_t)cnt >= sizeof(disptxt))
1255 				snprintf(disptxt, sizeof(disptxt),
1256 					 "ERROR %d >= %d", cnt,
1257 					 (int)sizeof(disptxt));
1258 		} else {
1259 			disptxt[0] = '\0';
1260 		}
1261 
1262 		switch (PKT_LEAP(rpkt->li_vn_mode)) {
1263 		    case LEAP_NOWARNING:
1264 		    	leaptxt = "no-leap";
1265 			break;
1266 		    case LEAP_ADDSECOND:
1267 		    	leaptxt = "add-leap";
1268 			break;
1269 		    case LEAP_DELSECOND:
1270 		    	leaptxt = "del-leap";
1271 			break;
1272 		    case LEAP_NOTINSYNC:
1273 		    	leaptxt = "unsync";
1274 			break;
1275 		    default:
1276 		    	leaptxt = "LEAP-ERROR";
1277 			break;
1278 		}
1279 
1280 		msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1281 			digits, offset, disptxt,
1282 			hostnameaddr(hostname, host), stratum,
1283 			leaptxt,
1284 			(time_adjusted)
1285 			    ? " [excess]"
1286 			    : "");
1287 		free(ts_str);
1288 
1289 		if (p_SNTP_PRETEND_TIME)
1290 			return 0;
1291 
1292 		if (!time_adjusted &&
1293 		    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1294 			return set_time(offset);
1295 
1296 		return EX_OK;
1297 	}
1298 
1299 	return 1;
1300 }
1301 
1302 
1303 void
1304 offset_calculation(
1305 	struct pkt *rpkt,
1306 	int rpktl,
1307 	struct timeval *tv_dst,
1308 	double *offset,
1309 	double *precision,
1310 	double *synch_distance
1311 	)
1312 {
1313 	l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1314 	u_fp p_rdly, p_rdsp;
1315 	double t21, t34, delta;
1316 
1317 	/* Convert timestamps from network to host byte order */
1318 	p_rdly = NTOHS_FP(rpkt->rootdelay);
1319 	p_rdsp = NTOHS_FP(rpkt->rootdisp);
1320 	NTOHL_FP(&rpkt->reftime, &p_ref);
1321 	NTOHL_FP(&rpkt->org, &p_org);
1322 	NTOHL_FP(&rpkt->rec, &p_rec);
1323 	NTOHL_FP(&rpkt->xmt, &p_xmt);
1324 
1325 	*precision = LOGTOD(rpkt->precision);
1326 
1327 	TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1328 
1329 	/* Compute offset etc. */
1330 	tmp = p_rec;
1331 	L_SUB(&tmp, &p_org);
1332 	LFPTOD(&tmp, t21);
1333 	TVTOTS(tv_dst, &dst);
1334 	dst.l_ui += JAN_1970;
1335 	tmp = p_xmt;
1336 	L_SUB(&tmp, &dst);
1337 	LFPTOD(&tmp, t34);
1338 	*offset = (t21 + t34) / 2.;
1339 	delta = t21 - t34;
1340 
1341 	// synch_distance is:
1342 	// (peer->delay + peer->rootdelay) / 2 + peer->disp
1343 	// + peer->rootdisp + clock_phi * (current_time - peer->update)
1344 	// + peer->jitter;
1345 	//
1346 	// and peer->delay = fabs(peer->offset - p_offset) * 2;
1347 	// and peer->offset needs history, so we're left with
1348 	// p_offset = (t21 + t34) / 2.;
1349 	// peer->disp = 0; (we have no history to augment this)
1350 	// clock_phi = 15e-6;
1351 	// peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1352 	// and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1353 	//
1354 	// so our answer seems to be:
1355 	//
1356 	// (fabs(t21 + t34) + peer->rootdelay) / 3.
1357 	// + 0 (peer->disp)
1358 	// + peer->rootdisp
1359 	// + 15e-6 (clock_phi)
1360 	// + LOGTOD(sys_precision)
1361 
1362 	INSIST( FPTOD(p_rdly) >= 0. );
1363 #if 1
1364 	*synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1365 		+ 0.
1366 		+ FPTOD(p_rdsp)
1367 		+ 15e-6
1368 		+ 0.	/* LOGTOD(sys_precision) when we can get it */
1369 		;
1370 	INSIST( *synch_distance >= 0. );
1371 #else
1372 	*synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1373 #endif
1374 
1375 #ifdef DEBUG
1376 	if (debug > 3) {
1377 		printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1378 		printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1379 		printf("sntp syncdist: %f\n", *synch_distance);
1380 
1381 		pkt_output(rpkt, rpktl, stdout);
1382 
1383 		printf("sntp offset_calculation: rpkt->reftime:\n");
1384 		l_fp_output(&p_ref, stdout);
1385 		printf("sntp offset_calculation: rpkt->org:\n");
1386 		l_fp_output(&p_org, stdout);
1387 		printf("sntp offset_calculation: rpkt->rec:\n");
1388 		l_fp_output(&p_rec, stdout);
1389 		printf("sntp offset_calculation: rpkt->xmt:\n");
1390 		l_fp_output(&p_xmt, stdout);
1391 	}
1392 #endif
1393 
1394 	TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1395 		  "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1396 		  t21, t34, delta, *offset));
1397 
1398 	return;
1399 }
1400 
1401 
1402 
1403 /* Compute the 8 bits for li_vn_mode */
1404 void
1405 set_li_vn_mode (
1406 	struct pkt *spkt,
1407 	char leap,
1408 	char version,
1409 	char mode
1410 	)
1411 {
1412 	if (leap > 3) {
1413 		msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1414 		leap = 3;
1415 	}
1416 
1417 	if ((unsigned char)version > 7) {
1418 		msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1419 		version = 4;
1420 	}
1421 
1422 	if (mode > 7) {
1423 		msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1424 		mode = 3;
1425 	}
1426 
1427 	spkt->li_vn_mode  = leap << 6;
1428 	spkt->li_vn_mode |= version << 3;
1429 	spkt->li_vn_mode |= mode;
1430 }
1431 
1432 
1433 /*
1434 ** set_time applies 'offset' to the local clock.
1435 */
1436 int
1437 set_time(
1438 	double offset
1439 	)
1440 {
1441 	int rc;
1442 
1443 	if (time_adjusted)
1444 		return EX_OK;
1445 
1446 	/*
1447 	** If we can step but we cannot slew, then step.
1448 	** If we can step or slew and and |offset| > steplimit, then step.
1449 	*/
1450 	if (ENABLED_OPT(STEP) &&
1451 	    (   !ENABLED_OPT(SLEW)
1452 	     || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1453 	    )) {
1454 		rc = step_systime(offset);
1455 
1456 		/* If there was a problem, can we rely on errno? */
1457 		if (1 == rc)
1458 			time_adjusted = TRUE;
1459 		return (time_adjusted)
1460 			   ? EX_OK
1461 			   : 1;
1462 		/*
1463 		** In case of error, what should we use?
1464 		** EX_UNAVAILABLE?
1465 		** EX_OSERR?
1466 		** EX_NOPERM?
1467 		*/
1468 	}
1469 
1470 	if (ENABLED_OPT(SLEW)) {
1471 		rc = adj_systime(offset);
1472 
1473 		/* If there was a problem, can we rely on errno? */
1474 		if (1 == rc)
1475 			time_adjusted = TRUE;
1476 		return (time_adjusted)
1477 			   ? EX_OK
1478 			   : 1;
1479 		/*
1480 		** In case of error, what should we use?
1481 		** EX_UNAVAILABLE?
1482 		** EX_OSERR?
1483 		** EX_NOPERM?
1484 		*/
1485 	}
1486 
1487 	return EX_SOFTWARE;
1488 }
1489 
1490 
1491 int
1492 libevent_version_ok(void)
1493 {
1494 	ev_uint32_t v_compile_maj;
1495 	ev_uint32_t v_run_maj;
1496 
1497 	v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1498 	v_run_maj = event_get_version_number() & 0xffff0000;
1499 	if (v_compile_maj != v_run_maj) {
1500 		fprintf(stderr,
1501 			"Incompatible libevent versions: have %s, built with %s\n",
1502 			event_get_version(),
1503 			LIBEVENT_VERSION);
1504 		return 0;
1505 	}
1506 	return 1;
1507 }
1508 
1509 /*
1510  * gettimeofday_cached()
1511  *
1512  * Clones the event_base_gettimeofday_cached() interface but ensures the
1513  * times are always on the gettimeofday() 1970 scale.  Older libevent 2
1514  * sometimes used gettimeofday(), sometimes the since-system-start
1515  * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1516  *
1517  * It is not cleanly possible to tell which timescale older libevent is
1518  * using.
1519  *
1520  * The strategy involves 1 hour thresholds chosen to be far longer than
1521  * the duration of a round of libevent callbacks, which share a cached
1522  * start-of-round time.  First compare the last cached time with the
1523  * current gettimeofday() time.  If they are within one hour, libevent
1524  * is using the proper timescale so leave the offset 0.  Otherwise,
1525  * compare libevent's cached time and the current time on the monotonic
1526  * scale.  If they are within an hour, libevent is using the monotonic
1527  * scale so calculate the offset to add to such times to bring them to
1528  * gettimeofday()'s scale.
1529  */
1530 int
1531 gettimeofday_cached(
1532 	struct event_base *	b,
1533 	struct timeval *	caller_tv
1534 	)
1535 {
1536 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1537 	static struct event_base *	cached_b;
1538 	static struct timeval		cached;
1539 	static struct timeval		adj_cached;
1540 	static struct timeval		offset;
1541 	static int			offset_ready;
1542 	struct timeval			latest;
1543 	struct timeval			systemt;
1544 	struct timespec			ts;
1545 	struct timeval			mono;
1546 	struct timeval			diff;
1547 	int				cgt_rc;
1548 	int				gtod_rc;
1549 
1550 	event_base_gettimeofday_cached(b, &latest);
1551 	if (b == cached_b &&
1552 	    !memcmp(&latest, &cached, sizeof(latest))) {
1553 		*caller_tv = adj_cached;
1554 		return 0;
1555 	}
1556 	cached = latest;
1557 	cached_b = b;
1558 	if (!offset_ready) {
1559 		cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1560 		gtod_rc = gettimeofday(&systemt, NULL);
1561 		if (0 != gtod_rc) {
1562 			msyslog(LOG_ERR,
1563 				"%s: gettimeofday() error %m",
1564 				progname);
1565 			exit(1);
1566 		}
1567 		diff = sub_tval(systemt, latest);
1568 		if (debug > 1)
1569 			printf("system minus cached %+ld.%06ld\n",
1570 			       (long)diff.tv_sec, (long)diff.tv_usec);
1571 		if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1572 			/*
1573 			 * Either use_monotonic == 0, or this libevent
1574 			 * has been repaired.  Leave offset at zero.
1575 			 */
1576 		} else {
1577 			mono.tv_sec = ts.tv_sec;
1578 			mono.tv_usec = ts.tv_nsec / 1000;
1579 			diff = sub_tval(latest, mono);
1580 			if (debug > 1)
1581 				printf("cached minus monotonic %+ld.%06ld\n",
1582 				       (long)diff.tv_sec, (long)diff.tv_usec);
1583 			if (labs((long)diff.tv_sec) < 3600) {
1584 				/* older libevent2 using monotonic */
1585 				offset = sub_tval(systemt, mono);
1586 				TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times  by %+ld.%06ld\n",
1587 					 "gettimeofday_cached",
1588 					 (long)offset.tv_sec,
1589 					 (long)offset.tv_usec));
1590 			}
1591 		}
1592 		offset_ready = TRUE;
1593 	}
1594 	adj_cached = add_tval(cached, offset);
1595 	*caller_tv = adj_cached;
1596 
1597 	return 0;
1598 #else
1599 	return event_base_gettimeofday_cached(b, caller_tv);
1600 #endif
1601 }
1602 
1603