xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_request.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: ntp_request.c,v 1.15 2018/04/07 00:19:53 christos Exp $	*/
2 
3 /*
4  * ntp_request.c - respond to information requests
5  */
6 
7 #ifdef HAVE_CONFIG_H
8 # include <config.h>
9 #endif
10 
11 #include "ntpd.h"
12 #include "ntp_io.h"
13 #include "ntp_request.h"
14 #include "ntp_control.h"
15 #include "ntp_refclock.h"
16 #include "ntp_if.h"
17 #include "ntp_stdlib.h"
18 #include "ntp_assert.h"
19 
20 #include <stdio.h>
21 #include <stddef.h>
22 #include <signal.h>
23 #ifdef HAVE_NETINET_IN_H
24 #include <netinet/in.h>
25 #endif
26 #include <arpa/inet.h>
27 
28 #include "recvbuff.h"
29 
30 #ifdef KERNEL_PLL
31 #include "ntp_syscall.h"
32 #endif /* KERNEL_PLL */
33 
34 /*
35  * Structure to hold request procedure information
36  */
37 #define	NOAUTH	0
38 #define	AUTH	1
39 
40 #define	NO_REQUEST	(-1)
41 /*
42  * Because we now have v6 addresses in the messages, we need to compensate
43  * for the larger size.  Therefore, we introduce the alternate size to
44  * keep us friendly with older implementations.  A little ugly.
45  */
46 static int client_v6_capable = 0;   /* the client can handle longer messages */
47 
48 #define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
49 
50 struct req_proc {
51 	short request_code;	/* defined request code */
52 	short needs_auth;	/* true when authentication needed */
53 	short sizeofitem;	/* size of request data item (older size)*/
54 	short v6_sizeofitem;	/* size of request data item (new size)*/
55 	void (*handler) (sockaddr_u *, endpt *,
56 			   struct req_pkt *);	/* routine to handle request */
57 };
58 
59 /*
60  * Universal request codes
61  */
62 static const struct req_proc univ_codes[] = {
63 	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
64 };
65 
66 static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
67 static	void *	prepare_pkt	(sockaddr_u *, endpt *,
68 				 struct req_pkt *, size_t);
69 static	void *	more_pkt	(void);
70 static	void	flush_pkt	(void);
71 static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
72 static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
73 static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
74 static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75 static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
76 static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77 static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78 static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
79 static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
80 static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
81 static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
82 static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
83 static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
84 static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
85 static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
86 static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
87 static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
88 static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
89 static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
90 static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
91 static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
92 static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
93 static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
94 static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
95 static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
96 static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
97 static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
98 static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
99 static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
100 static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
101 static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
102 static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
103 static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
104 static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
105 static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
106 static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
107 static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
108 static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
109 static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
110 #ifdef KERNEL_PLL
111 static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
112 #endif /* KERNEL_PLL */
113 #ifdef REFCLOCK
114 static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
115 static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
116 #endif	/* REFCLOCK */
117 #ifdef REFCLOCK
118 static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
119 #endif	/* REFCLOCK */
120 
121 /*
122  * ntpd request codes
123  */
124 static const struct req_proc ntp_codes[] = {
125 	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
126 	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
127 	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
128 				sizeof(struct info_peer_list), peer_info},
129 	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
130 				sizeof(struct info_peer_list), peer_stats},
131 	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
132 	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
133 	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
134 	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
135 	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
136 	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
137 	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
138 				sizeof(struct conf_peer), do_conf },
139 	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
140 				sizeof(struct conf_unpeer), do_unconf },
141 	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142 				sizeof(struct conf_sys_flags), set_sys_flag },
143 	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
144 				sizeof(struct conf_sys_flags),  clr_sys_flag },
145 	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
146 	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
147 				sizeof(struct conf_restrict), do_resaddflags },
148 	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
149 				sizeof(struct conf_restrict), do_ressubflags },
150 	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
151 				sizeof(struct conf_restrict), do_unrestrict },
152 	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
153 	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
154 	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
155 	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
156 				sizeof(struct conf_unpeer), reset_peer },
157 	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
158 	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
159 	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
160 	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
161 	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
162 	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
163 				sizeof(struct conf_trap), req_set_trap },
164 	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
165 				sizeof(struct conf_trap), req_clr_trap },
166 	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167 				set_request_keyid },
168 	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
169 				set_control_keyid },
170 	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
171 #ifdef KERNEL_PLL
172 	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
173 #endif
174 #ifdef REFCLOCK
175 	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
176 				get_clock_info },
177 	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
178 				sizeof(struct conf_fudge), set_clock_fudge },
179 	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
180 				get_clkbug_info },
181 #endif
182 	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
183 	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
184 
185 	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
186 };
187 
188 
189 /*
190  * Authentication keyid used to authenticate requests.  Zero means we
191  * don't allow writing anything.
192  */
193 keyid_t info_auth_keyid;
194 
195 /*
196  * Statistic counters to keep track of requests and responses.
197  */
198 u_long numrequests;		/* number of requests we've received */
199 u_long numresppkts;		/* number of resp packets sent with data */
200 
201 /*
202  * lazy way to count errors, indexed by the error code
203  */
204 u_long errorcounter[MAX_INFO_ERR + 1];
205 
206 /*
207  * A hack.  To keep the authentication module clear of ntp-ism's, we
208  * include a time reset variable for its stats here.
209  */
210 u_long auth_timereset;
211 
212 /*
213  * Response packet used by these routines.  Also some state information
214  * so that we can handle packet formatting within a common set of
215  * subroutines.  Note we try to enter data in place whenever possible,
216  * but the need to set the more bit correctly means we occasionally
217  * use the extra buffer and copy.
218  */
219 static struct resp_pkt rpkt;
220 static int reqver;
221 static int seqno;
222 static int nitems;
223 static int itemsize;
224 static int databytes;
225 static char exbuf[RESP_DATA_SIZE];
226 static int usingexbuf;
227 static sockaddr_u *toaddr;
228 static endpt *frominter;
229 
230 /*
231  * init_request - initialize request data
232  */
233 void
234 init_request (void)
235 {
236 	size_t i;
237 
238 	numrequests = 0;
239 	numresppkts = 0;
240 	auth_timereset = 0;
241 	info_auth_keyid = 0;	/* by default, can't do this */
242 
243 	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
244 	    errorcounter[i] = 0;
245 }
246 
247 
248 /*
249  * req_ack - acknowledge request with no data
250  */
251 static void
252 req_ack(
253 	sockaddr_u *srcadr,
254 	endpt *inter,
255 	struct req_pkt *inpkt,
256 	int errcode
257 	)
258 {
259 	/*
260 	 * fill in the fields
261 	 */
262 	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
263 	rpkt.auth_seq = AUTH_SEQ(0, 0);
264 	rpkt.implementation = inpkt->implementation;
265 	rpkt.request = inpkt->request;
266 	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
267 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
268 
269 	/*
270 	 * send packet and bump counters
271 	 */
272 	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
273 	errorcounter[errcode]++;
274 }
275 
276 
277 /*
278  * prepare_pkt - prepare response packet for transmission, return pointer
279  *		 to storage for data item.
280  */
281 static void *
282 prepare_pkt(
283 	sockaddr_u *srcadr,
284 	endpt *inter,
285 	struct req_pkt *pkt,
286 	size_t structsize
287 	)
288 {
289 	DPRINTF(4, ("request: preparing pkt\n"));
290 
291 	/*
292 	 * Fill in the implementation, request and itemsize fields
293 	 * since these won't change.
294 	 */
295 	rpkt.implementation = pkt->implementation;
296 	rpkt.request = pkt->request;
297 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
298 
299 	/*
300 	 * Compute the static data needed to carry on.
301 	 */
302 	toaddr = srcadr;
303 	frominter = inter;
304 	seqno = 0;
305 	nitems = 0;
306 	itemsize = structsize;
307 	databytes = 0;
308 	usingexbuf = 0;
309 
310 	/*
311 	 * return the beginning of the packet buffer.
312 	 */
313 	return &rpkt.u;
314 }
315 
316 
317 /*
318  * more_pkt - return a data pointer for a new item.
319  */
320 static void *
321 more_pkt(void)
322 {
323 	/*
324 	 * If we were using the extra buffer, send the packet.
325 	 */
326 	if (usingexbuf) {
327 		DPRINTF(3, ("request: sending pkt\n"));
328 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
329 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
330 		rpkt.err_nitems = htons((u_short)nitems);
331 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
332 			RESP_HEADER_SIZE + databytes);
333 		numresppkts++;
334 
335 		/*
336 		 * Copy data out of exbuf into the packet.
337 		 */
338 		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
339 		seqno++;
340 		databytes = 0;
341 		nitems = 0;
342 		usingexbuf = 0;
343 	}
344 
345 	databytes += itemsize;
346 	nitems++;
347 	if (databytes + itemsize <= RESP_DATA_SIZE) {
348 		DPRINTF(4, ("request: giving him more data\n"));
349 		/*
350 		 * More room in packet.  Give him the
351 		 * next address.
352 		 */
353 		return &rpkt.u.data[databytes];
354 	} else {
355 		/*
356 		 * No room in packet.  Give him the extra
357 		 * buffer unless this was the last in the sequence.
358 		 */
359 		DPRINTF(4, ("request: into extra buffer\n"));
360 		if (seqno == MAXSEQ)
361 			return NULL;
362 		else {
363 			usingexbuf = 1;
364 			return exbuf;
365 		}
366 	}
367 }
368 
369 
370 /*
371  * flush_pkt - we're done, return remaining information.
372  */
373 static void
374 flush_pkt(void)
375 {
376 	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
377 	/*
378 	 * Must send the last packet.  If nothing in here and nothing
379 	 * has been sent, send an error saying no data to be found.
380 	 */
381 	if (seqno == 0 && nitems == 0)
382 		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
383 			INFO_ERR_NODATA);
384 	else {
385 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
386 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
387 		rpkt.err_nitems = htons((u_short)nitems);
388 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
389 			RESP_HEADER_SIZE+databytes);
390 		numresppkts++;
391 	}
392 }
393 
394 
395 
396 /*
397  * Given a buffer, return the packet mode
398  */
399 int
400 get_packet_mode(struct recvbuf *rbufp)
401 {
402 	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
403 	return (INFO_MODE(inpkt->rm_vn_mode));
404 }
405 
406 
407 /*
408  * process_private - process private mode (7) packets
409  */
410 void
411 process_private(
412 	struct recvbuf *rbufp,
413 	int mod_okay
414 	)
415 {
416 	static u_long quiet_until;
417 	struct req_pkt *inpkt;
418 	struct req_pkt_tail *tailinpkt;
419 	sockaddr_u *srcadr;
420 	endpt *inter;
421 	const struct req_proc *proc;
422 	int ec;
423 	short temp_size;
424 	l_fp ftmp;
425 	double dtemp;
426 	size_t recv_len;
427 	size_t noslop_len;
428 	size_t mac_len;
429 
430 	/*
431 	 * Initialize pointers, for convenience
432 	 */
433 	recv_len = rbufp->recv_length;
434 	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
435 	srcadr = &rbufp->recv_srcadr;
436 	inter = rbufp->dstadr;
437 
438 	DPRINTF(3, ("process_private: impl %d req %d\n",
439 		    inpkt->implementation, inpkt->request));
440 
441 	/*
442 	 * Do some sanity checks on the packet.  Return a format
443 	 * error if it fails.
444 	 */
445 	ec = 0;
446 	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
447 	    || (++ec, ISMORE(inpkt->rm_vn_mode))
448 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
449 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
450 	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
451 	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
452 	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
453 	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
454 		) {
455 		NLOG(NLOG_SYSEVENT)
456 			if (current_time >= quiet_until) {
457 				msyslog(LOG_ERR,
458 					"process_private: drop test %d"
459 					" failed, pkt from %s",
460 					ec, stoa(srcadr));
461 				quiet_until = current_time + 60;
462 			}
463 		return;
464 	}
465 
466 	reqver = INFO_VERSION(inpkt->rm_vn_mode);
467 
468 	/*
469 	 * Get the appropriate procedure list to search.
470 	 */
471 	if (inpkt->implementation == IMPL_UNIV)
472 		proc = univ_codes;
473 	else if ((inpkt->implementation == IMPL_XNTPD) ||
474 		 (inpkt->implementation == IMPL_XNTPD_OLD))
475 		proc = ntp_codes;
476 	else {
477 		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
478 		return;
479 	}
480 
481 	/*
482 	 * Search the list for the request codes.  If it isn't one
483 	 * we know, return an error.
484 	 */
485 	while (proc->request_code != NO_REQUEST) {
486 		if (proc->request_code == (short) inpkt->request)
487 			break;
488 		proc++;
489 	}
490 	if (proc->request_code == NO_REQUEST) {
491 		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
492 		return;
493 	}
494 
495 	DPRINTF(4, ("found request in tables\n"));
496 
497 	/*
498 	 * If we need data, check to see if we have some.  If we
499 	 * don't, check to see that there is none (picky, picky).
500 	 */
501 
502 	/* This part is a bit tricky, we want to be sure that the size
503 	 * returned is either the old or the new size.  We also can find
504 	 * out if the client can accept both types of messages this way.
505 	 *
506 	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
507 	 */
508 	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
509 	if ((temp_size != proc->sizeofitem &&
510 	     temp_size != proc->v6_sizeofitem) &&
511 	    !(inpkt->implementation == IMPL_XNTPD &&
512 	      inpkt->request == REQ_CONFIG &&
513 	      temp_size == sizeof(struct old_conf_peer))) {
514 		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
515 			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
516 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
517 		return;
518 	}
519 	if ((proc->sizeofitem != 0) &&
520 	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
521 	     (recv_len - REQ_LEN_HDR))) {
522 		DPRINTF(3, ("process_private: not enough data\n"));
523 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
524 		return;
525 	}
526 
527 	switch (inpkt->implementation) {
528 	case IMPL_XNTPD:
529 		client_v6_capable = 1;
530 		break;
531 	case IMPL_XNTPD_OLD:
532 		client_v6_capable = 0;
533 		break;
534 	default:
535 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
536 		return;
537 	}
538 
539 	/*
540 	 * If we need to authenticate, do so.  Note that an
541 	 * authenticatable packet must include a mac field, must
542 	 * have used key info_auth_keyid and must have included
543 	 * a time stamp in the appropriate field.  The time stamp
544 	 * must be within INFO_TS_MAXSKEW of the receive
545 	 * time stamp.
546 	 */
547 	if (proc->needs_auth && sys_authenticate) {
548 
549 		if (recv_len < (REQ_LEN_HDR +
550 		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
551 		    INFO_NITEMS(inpkt->err_nitems)) +
552 		    REQ_TAIL_MIN)) {
553 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
554 			return;
555 		}
556 
557 		/*
558 		 * For 16-octet digests, regardless of itemsize and
559 		 * nitems, authenticated requests are a fixed size
560 		 * with the timestamp, key ID, and digest located
561 		 * at the end of the packet.  Because the key ID
562 		 * determining the digest size precedes the digest,
563 		 * for larger digests the fixed size request scheme
564 		 * is abandoned and the timestamp, key ID, and digest
565 		 * are located relative to the start of the packet,
566 		 * with the digest size determined by the packet size.
567 		 */
568 		noslop_len = REQ_LEN_HDR
569 			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
570 			       INFO_NITEMS(inpkt->err_nitems)
571 			     + sizeof(inpkt->tstamp);
572 		/* 32-bit alignment */
573 		noslop_len = (noslop_len + 3) & ~3;
574 		if (recv_len > (noslop_len + MAX_MAC_LEN))
575 			mac_len = 20;
576 		else
577 			mac_len = recv_len - noslop_len;
578 
579 		tailinpkt = (void *)((char *)inpkt + recv_len -
580 			    (mac_len + sizeof(inpkt->tstamp)));
581 
582 		/*
583 		 * If this guy is restricted from doing this, don't let
584 		 * him.  If the wrong key was used, or packet doesn't
585 		 * have mac, return.
586 		 */
587 		/* XXX: Use authistrustedip(), or equivalent. */
588 		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
589 		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
590 			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
591 				    INFO_IS_AUTH(inpkt->auth_seq),
592 				    info_auth_keyid,
593 				    ntohl(tailinpkt->keyid), (u_long)mac_len));
594 #ifdef DEBUG
595 			msyslog(LOG_DEBUG,
596 				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
597 				INFO_IS_AUTH(inpkt->auth_seq),
598 				info_auth_keyid,
599 				ntohl(tailinpkt->keyid), (u_long)mac_len);
600 #endif
601 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
602 			return;
603 		}
604 		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
605 			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
606 			msyslog(LOG_ERR,
607 				"process_private: bad pkt length %zu",
608 				recv_len);
609 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
610 			return;
611 		}
612 		if (!mod_okay || !authhavekey(info_auth_keyid)) {
613 			DPRINTF(5, ("failed auth mod_okay %d\n",
614 				    mod_okay));
615 #ifdef DEBUG
616 			msyslog(LOG_DEBUG,
617 				"process_private: failed auth mod_okay %d\n",
618 				mod_okay);
619 #endif
620 			if (!mod_okay) {
621 				sys_restricted++;
622 			}
623 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
624 			return;
625 		}
626 
627 		/*
628 		 * calculate absolute time difference between xmit time stamp
629 		 * and receive time stamp.  If too large, too bad.
630 		 */
631 		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
632 		L_SUB(&ftmp, &rbufp->recv_time);
633 		LFPTOD(&ftmp, dtemp);
634 		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
635 			/*
636 			 * He's a loser.  Tell him.
637 			 */
638 			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
639 				    dtemp, INFO_TS_MAXSKEW));
640 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
641 			return;
642 		}
643 
644 		/*
645 		 * So far so good.  See if decryption works out okay.
646 		 */
647 		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
648 				 recv_len - mac_len, mac_len)) {
649 			DPRINTF(5, ("authdecrypt failed\n"));
650 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
651 			return;
652 		}
653 	}
654 
655 	DPRINTF(3, ("process_private: all okay, into handler\n"));
656 	/*
657 	 * Packet is okay.  Call the handler to send him data.
658 	 */
659 	(proc->handler)(srcadr, inter, inpkt);
660 }
661 
662 
663 /*
664  * list_peers - send a list of the peers
665  */
666 static void
667 list_peers(
668 	sockaddr_u *srcadr,
669 	endpt *inter,
670 	struct req_pkt *inpkt
671 	)
672 {
673 	struct info_peer_list *	ip;
674 	const struct peer *	pp;
675 
676 	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
677 	    v6sizeof(struct info_peer_list));
678 	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
679 		if (IS_IPV6(&pp->srcadr)) {
680 			if (!client_v6_capable)
681 				continue;
682 			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
683 			ip->v6_flag = 1;
684 		} else {
685 			ip->addr = NSRCADR(&pp->srcadr);
686 			if (client_v6_capable)
687 				ip->v6_flag = 0;
688 		}
689 
690 		ip->port = NSRCPORT(&pp->srcadr);
691 		ip->hmode = pp->hmode;
692 		ip->flags = 0;
693 		if (pp->flags & FLAG_CONFIG)
694 			ip->flags |= INFO_FLAG_CONFIG;
695 		if (pp == sys_peer)
696 			ip->flags |= INFO_FLAG_SYSPEER;
697 		if (pp->status == CTL_PST_SEL_SYNCCAND)
698 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
699 		if (pp->status >= CTL_PST_SEL_SYSPEER)
700 			ip->flags |= INFO_FLAG_SHORTLIST;
701 		ip = (struct info_peer_list *)more_pkt();
702 	}	/* for pp */
703 
704 	flush_pkt();
705 }
706 
707 
708 /*
709  * list_peers_sum - return extended peer list
710  */
711 static void
712 list_peers_sum(
713 	sockaddr_u *srcadr,
714 	endpt *inter,
715 	struct req_pkt *inpkt
716 	)
717 {
718 	struct info_peer_summary *	ips;
719 	const struct peer *		pp;
720 	l_fp 				ltmp;
721 
722 	DPRINTF(3, ("wants peer list summary\n"));
723 
724 	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
725 	    v6sizeof(struct info_peer_summary));
726 	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
727 		DPRINTF(4, ("sum: got one\n"));
728 		/*
729 		 * Be careful here not to return v6 peers when we
730 		 * want only v4.
731 		 */
732 		if (IS_IPV6(&pp->srcadr)) {
733 			if (!client_v6_capable)
734 				continue;
735 			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
736 			ips->v6_flag = 1;
737 			if (pp->dstadr)
738 				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
739 			else
740 				ZERO(ips->dstadr6);
741 		} else {
742 			ips->srcadr = NSRCADR(&pp->srcadr);
743 			if (client_v6_capable)
744 				ips->v6_flag = 0;
745 
746 			if (pp->dstadr) {
747 				if (!pp->processed)
748 					ips->dstadr = NSRCADR(&pp->dstadr->sin);
749 				else {
750 					if (MDF_BCAST == pp->cast_flags)
751 						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
752 					else if (pp->cast_flags) {
753 						ips->dstadr = NSRCADR(&pp->dstadr->sin);
754 						if (!ips->dstadr)
755 							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
756 					}
757 				}
758 			} else {
759 				ips->dstadr = 0;
760 			}
761 		}
762 
763 		ips->srcport = NSRCPORT(&pp->srcadr);
764 		ips->stratum = pp->stratum;
765 		ips->hpoll = pp->hpoll;
766 		ips->ppoll = pp->ppoll;
767 		ips->reach = pp->reach;
768 		ips->flags = 0;
769 		if (pp == sys_peer)
770 			ips->flags |= INFO_FLAG_SYSPEER;
771 		if (pp->flags & FLAG_CONFIG)
772 			ips->flags |= INFO_FLAG_CONFIG;
773 		if (pp->flags & FLAG_REFCLOCK)
774 			ips->flags |= INFO_FLAG_REFCLOCK;
775 		if (pp->flags & FLAG_PREFER)
776 			ips->flags |= INFO_FLAG_PREFER;
777 		if (pp->flags & FLAG_BURST)
778 			ips->flags |= INFO_FLAG_BURST;
779 		if (pp->status == CTL_PST_SEL_SYNCCAND)
780 			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
781 		if (pp->status >= CTL_PST_SEL_SYSPEER)
782 			ips->flags |= INFO_FLAG_SHORTLIST;
783 		ips->hmode = pp->hmode;
784 		ips->delay = HTONS_FP(DTOFP(pp->delay));
785 		DTOLFP(pp->offset, &ltmp);
786 		HTONL_FP(&ltmp, &ips->offset);
787 		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
788 
789 		ips = (struct info_peer_summary *)more_pkt();
790 	}	/* for pp */
791 
792 	flush_pkt();
793 }
794 
795 
796 /*
797  * peer_info - send information for one or more peers
798  */
799 static void
800 peer_info (
801 	sockaddr_u *srcadr,
802 	endpt *inter,
803 	struct req_pkt *inpkt
804 	)
805 {
806 	u_short			items;
807 	size_t			item_sz;
808 	char *			datap;
809 	struct info_peer_list	ipl;
810 	struct peer *		pp;
811 	struct info_peer *	ip;
812 	int			i;
813 	int			j;
814 	sockaddr_u		addr;
815 	l_fp			ltmp;
816 
817 	items = INFO_NITEMS(inpkt->err_nitems);
818 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
819 	datap = inpkt->u.data;
820 	if (item_sz != sizeof(ipl)) {
821 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
822 		return;
823 	}
824 	ip = prepare_pkt(srcadr, inter, inpkt,
825 			 v6sizeof(struct info_peer));
826 	while (items-- > 0 && ip != NULL) {
827 		ZERO(ipl);
828 		memcpy(&ipl, datap, item_sz);
829 		ZERO_SOCK(&addr);
830 		NSRCPORT(&addr) = ipl.port;
831 		if (client_v6_capable && ipl.v6_flag) {
832 			AF(&addr) = AF_INET6;
833 			SOCK_ADDR6(&addr) = ipl.addr6;
834 		} else {
835 			AF(&addr) = AF_INET;
836 			NSRCADR(&addr) = ipl.addr;
837 		}
838 #ifdef ISC_PLATFORM_HAVESALEN
839 		addr.sa.sa_len = SOCKLEN(&addr);
840 #endif
841 		datap += item_sz;
842 
843 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
844 		if (NULL == pp)
845 			continue;
846 		if (IS_IPV6(srcadr)) {
847 			if (pp->dstadr)
848 				ip->dstadr6 =
849 				    (MDF_BCAST == pp->cast_flags)
850 					? SOCK_ADDR6(&pp->dstadr->bcast)
851 					: SOCK_ADDR6(&pp->dstadr->sin);
852 			else
853 				ZERO(ip->dstadr6);
854 
855 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
856 			ip->v6_flag = 1;
857 		} else {
858 			if (pp->dstadr) {
859 				if (!pp->processed)
860 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
861 				else {
862 					if (MDF_BCAST == pp->cast_flags)
863 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
864 					else if (pp->cast_flags) {
865 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
866 						if (!ip->dstadr)
867 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
868 					}
869 				}
870 			} else
871 				ip->dstadr = 0;
872 
873 			ip->srcadr = NSRCADR(&pp->srcadr);
874 			if (client_v6_capable)
875 				ip->v6_flag = 0;
876 		}
877 		ip->srcport = NSRCPORT(&pp->srcadr);
878 		ip->flags = 0;
879 		if (pp == sys_peer)
880 			ip->flags |= INFO_FLAG_SYSPEER;
881 		if (pp->flags & FLAG_CONFIG)
882 			ip->flags |= INFO_FLAG_CONFIG;
883 		if (pp->flags & FLAG_REFCLOCK)
884 			ip->flags |= INFO_FLAG_REFCLOCK;
885 		if (pp->flags & FLAG_PREFER)
886 			ip->flags |= INFO_FLAG_PREFER;
887 		if (pp->flags & FLAG_BURST)
888 			ip->flags |= INFO_FLAG_BURST;
889 		if (pp->status == CTL_PST_SEL_SYNCCAND)
890 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
891 		if (pp->status >= CTL_PST_SEL_SYSPEER)
892 			ip->flags |= INFO_FLAG_SHORTLIST;
893 		ip->leap = pp->leap;
894 		ip->hmode = pp->hmode;
895 		ip->keyid = pp->keyid;
896 		ip->stratum = pp->stratum;
897 		ip->ppoll = pp->ppoll;
898 		ip->hpoll = pp->hpoll;
899 		ip->precision = pp->precision;
900 		ip->version = pp->version;
901 		ip->reach = pp->reach;
902 		ip->unreach = (u_char)pp->unreach;
903 		ip->flash = (u_char)pp->flash;
904 		ip->flash2 = (u_short)pp->flash;
905 		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
906 		ip->ttl = (u_char)pp->ttl;
907 		ip->associd = htons(pp->associd);
908 		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
909 		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
910 		ip->refid = pp->refid;
911 		HTONL_FP(&pp->reftime, &ip->reftime);
912 		HTONL_FP(&pp->aorg, &ip->org);
913 		HTONL_FP(&pp->rec, &ip->rec);
914 		HTONL_FP(&pp->xmt, &ip->xmt);
915 		j = pp->filter_nextpt - 1;
916 		for (i = 0; i < NTP_SHIFT; i++, j--) {
917 			if (j < 0)
918 				j = NTP_SHIFT-1;
919 			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
920 			DTOLFP(pp->filter_offset[j], &ltmp);
921 			HTONL_FP(&ltmp, &ip->filtoffset[i]);
922 			ip->order[i] = (u_char)((pp->filter_nextpt +
923 						 NTP_SHIFT - 1) -
924 						pp->filter_order[i]);
925 			if (ip->order[i] >= NTP_SHIFT)
926 				ip->order[i] -= NTP_SHIFT;
927 		}
928 		DTOLFP(pp->offset, &ltmp);
929 		HTONL_FP(&ltmp, &ip->offset);
930 		ip->delay = HTONS_FP(DTOFP(pp->delay));
931 		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
932 		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
933 		ip = more_pkt();
934 	}
935 	flush_pkt();
936 }
937 
938 
939 /*
940  * peer_stats - send statistics for one or more peers
941  */
942 static void
943 peer_stats (
944 	sockaddr_u *srcadr,
945 	endpt *inter,
946 	struct req_pkt *inpkt
947 	)
948 {
949 	u_short			items;
950 	size_t			item_sz;
951 	char *			datap;
952 	struct info_peer_list	ipl;
953 	struct peer *		pp;
954 	struct info_peer_stats *ip;
955 	sockaddr_u addr;
956 
957 	DPRINTF(1, ("peer_stats: called\n"));
958 	items = INFO_NITEMS(inpkt->err_nitems);
959 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
960 	datap = inpkt->u.data;
961 	if (item_sz > sizeof(ipl)) {
962 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
963 		return;
964 	}
965 	ip = prepare_pkt(srcadr, inter, inpkt,
966 			 v6sizeof(struct info_peer_stats));
967 	while (items-- > 0 && ip != NULL) {
968 		ZERO(ipl);
969 		memcpy(&ipl, datap, item_sz);
970 		ZERO(addr);
971 		NSRCPORT(&addr) = ipl.port;
972 		if (client_v6_capable && ipl.v6_flag) {
973 			AF(&addr) = AF_INET6;
974 			SOCK_ADDR6(&addr) = ipl.addr6;
975 		} else {
976 			AF(&addr) = AF_INET;
977 			NSRCADR(&addr) = ipl.addr;
978 		}
979 #ifdef ISC_PLATFORM_HAVESALEN
980 		addr.sa.sa_len = SOCKLEN(&addr);
981 #endif
982 		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
983 			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
984 
985 		datap += item_sz;
986 
987 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
988 		if (NULL == pp)
989 			continue;
990 
991 		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
992 
993 		if (IS_IPV4(&pp->srcadr)) {
994 			if (pp->dstadr) {
995 				if (!pp->processed)
996 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
997 				else {
998 					if (MDF_BCAST == pp->cast_flags)
999 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1000 					else if (pp->cast_flags) {
1001 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1002 						if (!ip->dstadr)
1003 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1004 					}
1005 				}
1006 			} else
1007 				ip->dstadr = 0;
1008 
1009 			ip->srcadr = NSRCADR(&pp->srcadr);
1010 			if (client_v6_capable)
1011 				ip->v6_flag = 0;
1012 		} else {
1013 			if (pp->dstadr)
1014 				ip->dstadr6 =
1015 				    (MDF_BCAST == pp->cast_flags)
1016 					? SOCK_ADDR6(&pp->dstadr->bcast)
1017 					: SOCK_ADDR6(&pp->dstadr->sin);
1018 			else
1019 				ZERO(ip->dstadr6);
1020 
1021 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1022 			ip->v6_flag = 1;
1023 		}
1024 		ip->srcport = NSRCPORT(&pp->srcadr);
1025 		ip->flags = 0;
1026 		if (pp == sys_peer)
1027 		    ip->flags |= INFO_FLAG_SYSPEER;
1028 		if (pp->flags & FLAG_CONFIG)
1029 		    ip->flags |= INFO_FLAG_CONFIG;
1030 		if (pp->flags & FLAG_REFCLOCK)
1031 		    ip->flags |= INFO_FLAG_REFCLOCK;
1032 		if (pp->flags & FLAG_PREFER)
1033 		    ip->flags |= INFO_FLAG_PREFER;
1034 		if (pp->flags & FLAG_BURST)
1035 		    ip->flags |= INFO_FLAG_BURST;
1036 		if (pp->flags & FLAG_IBURST)
1037 		    ip->flags |= INFO_FLAG_IBURST;
1038 		if (pp->status == CTL_PST_SEL_SYNCCAND)
1039 		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1040 		if (pp->status >= CTL_PST_SEL_SYSPEER)
1041 		    ip->flags |= INFO_FLAG_SHORTLIST;
1042 		ip->flags = htons(ip->flags);
1043 		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1044 		ip->timetosend = htonl(pp->nextdate - current_time);
1045 		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1046 		ip->sent = htonl((u_int32)(pp->sent));
1047 		ip->processed = htonl((u_int32)(pp->processed));
1048 		ip->badauth = htonl((u_int32)(pp->badauth));
1049 		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1050 		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1051 		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1052 		ip->selbroken = htonl((u_int32)(pp->selbroken));
1053 		ip->candidate = pp->status;
1054 		ip = (struct info_peer_stats *)more_pkt();
1055 	}
1056 	flush_pkt();
1057 }
1058 
1059 
1060 /*
1061  * sys_info - return system info
1062  */
1063 static void
1064 sys_info(
1065 	sockaddr_u *srcadr,
1066 	endpt *inter,
1067 	struct req_pkt *inpkt
1068 	)
1069 {
1070 	register struct info_sys *is;
1071 
1072 	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1073 	    v6sizeof(struct info_sys));
1074 
1075 	if (sys_peer) {
1076 		if (IS_IPV4(&sys_peer->srcadr)) {
1077 			is->peer = NSRCADR(&sys_peer->srcadr);
1078 			if (client_v6_capable)
1079 				is->v6_flag = 0;
1080 		} else if (client_v6_capable) {
1081 			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1082 			is->v6_flag = 1;
1083 		}
1084 		is->peer_mode = sys_peer->hmode;
1085 	} else {
1086 		is->peer = 0;
1087 		if (client_v6_capable) {
1088 			is->v6_flag = 0;
1089 		}
1090 		is->peer_mode = 0;
1091 	}
1092 
1093 	is->leap = sys_leap;
1094 	is->stratum = sys_stratum;
1095 	is->precision = sys_precision;
1096 	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1097 	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1098 	is->frequency = htonl(DTOFP(sys_jitter));
1099 	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1100 	is->refid = sys_refid;
1101 	HTONL_FP(&sys_reftime, &is->reftime);
1102 
1103 	is->poll = sys_poll;
1104 
1105 	is->flags = 0;
1106 	if (sys_authenticate)
1107 		is->flags |= INFO_FLAG_AUTHENTICATE;
1108 	if (sys_bclient)
1109 		is->flags |= INFO_FLAG_BCLIENT;
1110 #ifdef REFCLOCK
1111 	if (cal_enable)
1112 		is->flags |= INFO_FLAG_CAL;
1113 #endif /* REFCLOCK */
1114 	if (kern_enable)
1115 		is->flags |= INFO_FLAG_KERNEL;
1116 	if (mon_enabled != MON_OFF)
1117 		is->flags |= INFO_FLAG_MONITOR;
1118 	if (ntp_enable)
1119 		is->flags |= INFO_FLAG_NTP;
1120 	if (hardpps_enable)
1121 		is->flags |= INFO_FLAG_PPS_SYNC;
1122 	if (stats_control)
1123 		is->flags |= INFO_FLAG_FILEGEN;
1124 	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1125 	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1126 	(void) more_pkt();
1127 	flush_pkt();
1128 }
1129 
1130 
1131 /*
1132  * sys_stats - return system statistics
1133  */
1134 static void
1135 sys_stats(
1136 	sockaddr_u *srcadr,
1137 	endpt *inter,
1138 	struct req_pkt *inpkt
1139 	)
1140 {
1141 	register struct info_sys_stats *ss;
1142 
1143 	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1144 		sizeof(struct info_sys_stats));
1145 	ss->timeup = htonl((u_int32)current_time);
1146 	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1147 	ss->denied = htonl((u_int32)sys_restricted);
1148 	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1149 	ss->newversionpkt = htonl((u_int32)sys_newversion);
1150 	ss->unknownversion = htonl((u_int32)sys_declined);
1151 	ss->badlength = htonl((u_int32)sys_badlength);
1152 	ss->processed = htonl((u_int32)sys_processed);
1153 	ss->badauth = htonl((u_int32)sys_badauth);
1154 	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1155 	ss->received = htonl((u_int32)sys_received);
1156 	ss->lamport = htonl((u_int32)sys_lamport);
1157 	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1158 	(void) more_pkt();
1159 	flush_pkt();
1160 }
1161 
1162 
1163 /*
1164  * mem_stats - return memory statistics
1165  */
1166 static void
1167 mem_stats(
1168 	sockaddr_u *srcadr,
1169 	endpt *inter,
1170 	struct req_pkt *inpkt
1171 	)
1172 {
1173 	register struct info_mem_stats *ms;
1174 	register int i;
1175 
1176 	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1177 						  sizeof(struct info_mem_stats));
1178 
1179 	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1180 	ms->totalpeermem = htons((u_short)total_peer_structs);
1181 	ms->freepeermem = htons((u_short)peer_free_count);
1182 	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1183 	ms->allocations = htonl((u_int32)peer_allocations);
1184 	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1185 
1186 	for (i = 0; i < NTP_HASH_SIZE; i++)
1187 		ms->hashcount[i] = (u_char)
1188 		    max((u_int)peer_hash_count[i], UCHAR_MAX);
1189 
1190 	(void) more_pkt();
1191 	flush_pkt();
1192 }
1193 
1194 
1195 /*
1196  * io_stats - return io statistics
1197  */
1198 static void
1199 io_stats(
1200 	sockaddr_u *srcadr,
1201 	endpt *inter,
1202 	struct req_pkt *inpkt
1203 	)
1204 {
1205 	struct info_io_stats *io;
1206 
1207 	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1208 						 sizeof(struct info_io_stats));
1209 
1210 	io->timereset = htonl((u_int32)(current_time - io_timereset));
1211 	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1212 	io->freerecvbufs = htons((u_short) free_recvbuffs());
1213 	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1214 	io->lowwater = htons((u_short) lowater_additions());
1215 	io->dropped = htonl((u_int32)packets_dropped);
1216 	io->ignored = htonl((u_int32)packets_ignored);
1217 	io->received = htonl((u_int32)packets_received);
1218 	io->sent = htonl((u_int32)packets_sent);
1219 	io->notsent = htonl((u_int32)packets_notsent);
1220 	io->interrupts = htonl((u_int32)handler_calls);
1221 	io->int_received = htonl((u_int32)handler_pkts);
1222 
1223 	(void) more_pkt();
1224 	flush_pkt();
1225 }
1226 
1227 
1228 /*
1229  * timer_stats - return timer statistics
1230  */
1231 static void
1232 timer_stats(
1233 	sockaddr_u *		srcadr,
1234 	endpt *			inter,
1235 	struct req_pkt *	inpkt
1236 	)
1237 {
1238 	struct info_timer_stats *	ts;
1239 	u_long				sincereset;
1240 
1241 	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1242 						    inpkt, sizeof(*ts));
1243 
1244 	sincereset = current_time - timer_timereset;
1245 	ts->timereset = htonl((u_int32)sincereset);
1246 	ts->alarms = ts->timereset;
1247 	ts->overflows = htonl((u_int32)alarm_overflow);
1248 	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1249 
1250 	(void) more_pkt();
1251 	flush_pkt();
1252 }
1253 
1254 
1255 /*
1256  * loop_info - return the current state of the loop filter
1257  */
1258 static void
1259 loop_info(
1260 	sockaddr_u *srcadr,
1261 	endpt *inter,
1262 	struct req_pkt *inpkt
1263 	)
1264 {
1265 	struct info_loop *li;
1266 	l_fp ltmp;
1267 
1268 	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1269 	    sizeof(struct info_loop));
1270 
1271 	DTOLFP(last_offset, &ltmp);
1272 	HTONL_FP(&ltmp, &li->last_offset);
1273 	DTOLFP(drift_comp * 1e6, &ltmp);
1274 	HTONL_FP(&ltmp, &li->drift_comp);
1275 	li->compliance = htonl((u_int32)(tc_counter));
1276 	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1277 
1278 	(void) more_pkt();
1279 	flush_pkt();
1280 }
1281 
1282 
1283 /*
1284  * do_conf - add a peer to the configuration list
1285  */
1286 static void
1287 do_conf(
1288 	sockaddr_u *srcadr,
1289 	endpt *inter,
1290 	struct req_pkt *inpkt
1291 	)
1292 {
1293 	u_short			items;
1294 	size_t			item_sz;
1295 	u_int			fl;
1296 	char *			datap;
1297 	struct conf_peer	temp_cp;
1298 	sockaddr_u		peeraddr;
1299 
1300 	/*
1301 	 * Do a check of everything to see that it looks
1302 	 * okay.  If not, complain about it.  Note we are
1303 	 * very picky here.
1304 	 */
1305 	items = INFO_NITEMS(inpkt->err_nitems);
1306 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1307 	datap = inpkt->u.data;
1308 	if (item_sz > sizeof(temp_cp)) {
1309 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1310 		return;
1311 	}
1312 
1313 	while (items-- > 0) {
1314 		ZERO(temp_cp);
1315 		memcpy(&temp_cp, datap, item_sz);
1316 		ZERO_SOCK(&peeraddr);
1317 
1318 		fl = 0;
1319 		if (temp_cp.flags & CONF_FLAG_PREFER)
1320 			fl |= FLAG_PREFER;
1321 		if (temp_cp.flags & CONF_FLAG_BURST)
1322 			fl |= FLAG_BURST;
1323 		if (temp_cp.flags & CONF_FLAG_IBURST)
1324 			fl |= FLAG_IBURST;
1325 #ifdef AUTOKEY
1326 		if (temp_cp.flags & CONF_FLAG_SKEY)
1327 			fl |= FLAG_SKEY;
1328 #endif	/* AUTOKEY */
1329 		if (client_v6_capable && temp_cp.v6_flag) {
1330 			AF(&peeraddr) = AF_INET6;
1331 			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1332 		} else {
1333 			AF(&peeraddr) = AF_INET;
1334 			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1335 			/*
1336 			 * Make sure the address is valid
1337 			 */
1338 			if (!ISREFCLOCKADR(&peeraddr) &&
1339 			    ISBADADR(&peeraddr)) {
1340 				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1341 				return;
1342 			}
1343 
1344 		}
1345 		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1346 #ifdef ISC_PLATFORM_HAVESALEN
1347 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1348 #endif
1349 
1350 		/* check mode value: 0 <= hmode <= 6
1351 		 *
1352 		 * There's no good global define for that limit, and
1353 		 * using a magic define is as good (or bad, actually) as
1354 		 * a magic number. So we use the highest possible peer
1355 		 * mode, and that is MODE_BCLIENT.
1356 		 *
1357 		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1358 		 * but the code in ntp_peer.c indicates trouble for any
1359 		 * hmode > 6 ( --> MODE_BCLIENT).
1360 		 */
1361 		if (temp_cp.hmode > MODE_BCLIENT) {
1362 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1363 			return;
1364 		}
1365 
1366 		/* Any more checks on the values? Unchecked at this
1367 		 * point:
1368 		 *   - version
1369 		 *   - ttl
1370 		 *   - keyid
1371 		 *
1372 		 *   - minpoll/maxpoll, but they are treated properly
1373 		 *     for all cases internally. Checking not necessary.
1374 		 *
1375 		 * Note that we ignore any previously-specified ippeerlimit.
1376 		 * If we're told to create the peer, we create the peer.
1377 		 */
1378 
1379 		/* finally create the peer */
1380 		if (peer_config(&peeraddr, NULL, NULL, -1,
1381 		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1382 		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1383 		    NULL) == 0)
1384 		{
1385 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1386 			return;
1387 		}
1388 
1389 		datap += item_sz;
1390 	}
1391 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1392 }
1393 
1394 
1395 /*
1396  * do_unconf - remove a peer from the configuration list
1397  */
1398 static void
1399 do_unconf(
1400 	sockaddr_u *	srcadr,
1401 	endpt *		inter,
1402 	struct req_pkt *inpkt
1403 	)
1404 {
1405 	u_short			items;
1406 	size_t			item_sz;
1407 	char *			datap;
1408 	struct conf_unpeer	temp_cp;
1409 	struct peer *		p;
1410 	sockaddr_u		peeraddr;
1411 	int			loops;
1412 
1413 	/*
1414 	 * This is a bit unstructured, but I like to be careful.
1415 	 * We check to see that every peer exists and is actually
1416 	 * configured.  If so, we remove them.  If not, we return
1417 	 * an error.
1418 	 *
1419 	 * [Bug 3011] Even if we checked all peers given in the request
1420 	 * in a dry run, there's still a chance that the caller played
1421 	 * unfair and gave the same peer multiple times. So we still
1422 	 * have to be prepared for nasty surprises in the second run ;)
1423 	 */
1424 
1425 	/* basic consistency checks */
1426 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1427 	if (item_sz > sizeof(temp_cp)) {
1428 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1429 		return;
1430 	}
1431 
1432 	/* now do two runs: first a dry run, then a busy one */
1433 	for (loops = 0; loops != 2; ++loops) {
1434 		items = INFO_NITEMS(inpkt->err_nitems);
1435 		datap = inpkt->u.data;
1436 		while (items-- > 0) {
1437 			/* copy from request to local */
1438 			ZERO(temp_cp);
1439 			memcpy(&temp_cp, datap, item_sz);
1440 			/* get address structure */
1441 			ZERO_SOCK(&peeraddr);
1442 			if (client_v6_capable && temp_cp.v6_flag) {
1443 				AF(&peeraddr) = AF_INET6;
1444 				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1445 			} else {
1446 				AF(&peeraddr) = AF_INET;
1447 				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1448 			}
1449 			SET_PORT(&peeraddr, NTP_PORT);
1450 #ifdef ISC_PLATFORM_HAVESALEN
1451 			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1452 #endif
1453 			DPRINTF(1, ("searching for %s\n",
1454 				    stoa(&peeraddr)));
1455 
1456 			/* search for matching configred(!) peer */
1457 			p = NULL;
1458 			do {
1459 				p = findexistingpeer(
1460 					&peeraddr, NULL, p, -1, 0, NULL);
1461 			} while (p && !(FLAG_CONFIG & p->flags));
1462 
1463 			if (!loops && !p) {
1464 				/* Item not found in dry run -- bail! */
1465 				req_ack(srcadr, inter, inpkt,
1466 					INFO_ERR_NODATA);
1467 				return;
1468 			} else if (loops && p) {
1469 				/* Item found in busy run -- remove! */
1470 				peer_clear(p, "GONE");
1471 				unpeer(p);
1472 			}
1473 			datap += item_sz;
1474 		}
1475 	}
1476 
1477 	/* report success */
1478 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1479 }
1480 
1481 
1482 /*
1483  * set_sys_flag - set system flags
1484  */
1485 static void
1486 set_sys_flag(
1487 	sockaddr_u *srcadr,
1488 	endpt *inter,
1489 	struct req_pkt *inpkt
1490 	)
1491 {
1492 	setclr_flags(srcadr, inter, inpkt, 1);
1493 }
1494 
1495 
1496 /*
1497  * clr_sys_flag - clear system flags
1498  */
1499 static void
1500 clr_sys_flag(
1501 	sockaddr_u *srcadr,
1502 	endpt *inter,
1503 	struct req_pkt *inpkt
1504 	)
1505 {
1506 	setclr_flags(srcadr, inter, inpkt, 0);
1507 }
1508 
1509 
1510 /*
1511  * setclr_flags - do the grunge work of flag setting/clearing
1512  */
1513 static void
1514 setclr_flags(
1515 	sockaddr_u *srcadr,
1516 	endpt *inter,
1517 	struct req_pkt *inpkt,
1518 	u_long set
1519 	)
1520 {
1521 	struct conf_sys_flags *sf;
1522 	u_int32 flags;
1523 
1524 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1525 		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1526 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1527 		return;
1528 	}
1529 
1530 	sf = (struct conf_sys_flags *)&inpkt->u;
1531 	flags = ntohl(sf->flags);
1532 
1533 	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1534 		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1535 		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1536 		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1537 			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1538 				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1539 				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1540 				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1541 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1542 		return;
1543 	}
1544 
1545 	if (flags & SYS_FLAG_BCLIENT)
1546 		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1547 	if (flags & SYS_FLAG_PPS)
1548 		proto_config(PROTO_PPS, set, 0., NULL);
1549 	if (flags & SYS_FLAG_NTP)
1550 		proto_config(PROTO_NTP, set, 0., NULL);
1551 	if (flags & SYS_FLAG_KERNEL)
1552 		proto_config(PROTO_KERNEL, set, 0., NULL);
1553 	if (flags & SYS_FLAG_MONITOR)
1554 		proto_config(PROTO_MONITOR, set, 0., NULL);
1555 	if (flags & SYS_FLAG_FILEGEN)
1556 		proto_config(PROTO_FILEGEN, set, 0., NULL);
1557 	if (flags & SYS_FLAG_AUTH)
1558 		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1559 	if (flags & SYS_FLAG_CAL)
1560 		proto_config(PROTO_CAL, set, 0., NULL);
1561 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1562 }
1563 
1564 /* There have been some issues with the restrict list processing,
1565  * ranging from problems with deep recursion (resulting in stack
1566  * overflows) and overfull reply buffers.
1567  *
1568  * To avoid this trouble the list reversal is done iteratively using a
1569  * scratch pad.
1570  */
1571 typedef struct RestrictStack RestrictStackT;
1572 struct RestrictStack {
1573 	RestrictStackT   *link;
1574 	size_t            fcnt;
1575 	const restrict_u *pres[63];
1576 };
1577 
1578 static size_t
1579 getStackSheetSize(
1580 	RestrictStackT *sp
1581 	)
1582 {
1583 	if (sp)
1584 		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1585 	return 0u;
1586 }
1587 
1588 static int/*BOOL*/
1589 pushRestriction(
1590 	RestrictStackT  **spp,
1591 	const restrict_u *ptr
1592 	)
1593 {
1594 	RestrictStackT *sp;
1595 
1596 	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1597 		/* need another sheet in the scratch pad */
1598 		sp = emalloc(sizeof(*sp));
1599 		sp->link = *spp;
1600 		sp->fcnt = getStackSheetSize(sp);
1601 		*spp = sp;
1602 	}
1603 	sp->pres[--sp->fcnt] = ptr;
1604 	return TRUE;
1605 }
1606 
1607 static int/*BOOL*/
1608 popRestriction(
1609 	RestrictStackT   **spp,
1610 	const restrict_u **opp
1611 	)
1612 {
1613 	RestrictStackT *sp;
1614 
1615 	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1616 		return FALSE;
1617 
1618 	*opp = sp->pres[sp->fcnt++];
1619 	if (sp->fcnt >= getStackSheetSize(sp)) {
1620 		/* discard sheet from scratch pad */
1621 		*spp = sp->link;
1622 		free(sp);
1623 	}
1624 	return TRUE;
1625 }
1626 
1627 static void
1628 flushRestrictionStack(
1629 	RestrictStackT **spp
1630 	)
1631 {
1632 	RestrictStackT *sp;
1633 
1634 	while (NULL != (sp = *spp)) {
1635 		*spp = sp->link;
1636 		free(sp);
1637 	}
1638 }
1639 
1640 /*
1641  * list_restrict4 - iterative helper for list_restrict dumps IPv4
1642  *		    restriction list in reverse order.
1643  */
1644 static void
1645 list_restrict4(
1646 	const restrict_u *	res,
1647 	struct info_restrict **	ppir
1648 	)
1649 {
1650 	RestrictStackT *	rpad;
1651 	struct info_restrict *	pir;
1652 
1653 	pir = *ppir;
1654 	for (rpad = NULL; res; res = res->link)
1655 		if (!pushRestriction(&rpad, res))
1656 			break;
1657 
1658 	while (pir && popRestriction(&rpad, &res)) {
1659 		pir->addr = htonl(res->u.v4.addr);
1660 		if (client_v6_capable)
1661 			pir->v6_flag = 0;
1662 		pir->mask = htonl(res->u.v4.mask);
1663 		pir->count = htonl(res->count);
1664 		pir->rflags = htons(res->rflags);
1665 		pir->mflags = htons(res->mflags);
1666 		pir = (struct info_restrict *)more_pkt();
1667 	}
1668 	flushRestrictionStack(&rpad);
1669 	*ppir = pir;
1670 }
1671 
1672 /*
1673  * list_restrict6 - iterative helper for list_restrict dumps IPv6
1674  *		    restriction list in reverse order.
1675  */
1676 static void
1677 list_restrict6(
1678 	const restrict_u *	res,
1679 	struct info_restrict **	ppir
1680 	)
1681 {
1682 	RestrictStackT *	rpad;
1683 	struct info_restrict *	pir;
1684 
1685 	pir = *ppir;
1686 	for (rpad = NULL; res; res = res->link)
1687 		if (!pushRestriction(&rpad, res))
1688 			break;
1689 
1690 	while (pir && popRestriction(&rpad, &res)) {
1691 		pir->addr6 = res->u.v6.addr;
1692 		pir->mask6 = res->u.v6.mask;
1693 		pir->v6_flag = 1;
1694 		pir->count = htonl(res->count);
1695 		pir->rflags = htons(res->rflags);
1696 		pir->mflags = htons(res->mflags);
1697 		pir = (struct info_restrict *)more_pkt();
1698 	}
1699 	flushRestrictionStack(&rpad);
1700 	*ppir = pir;
1701 }
1702 
1703 
1704 /*
1705  * list_restrict - return the restrict list
1706  */
1707 static void
1708 list_restrict(
1709 	sockaddr_u *srcadr,
1710 	endpt *inter,
1711 	struct req_pkt *inpkt
1712 	)
1713 {
1714 	struct info_restrict *ir;
1715 
1716 	DPRINTF(3, ("wants restrict list summary\n"));
1717 
1718 	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1719 	    v6sizeof(struct info_restrict));
1720 
1721 	/*
1722 	 * The restriction lists are kept sorted in the reverse order
1723 	 * than they were originally.  To preserve the output semantics,
1724 	 * dump each list in reverse order. The workers take care of that.
1725 	 */
1726 	list_restrict4(restrictlist4, &ir);
1727 	if (client_v6_capable)
1728 		list_restrict6(restrictlist6, &ir);
1729 	flush_pkt();
1730 }
1731 
1732 
1733 /*
1734  * do_resaddflags - add flags to a restrict entry (or create one)
1735  */
1736 static void
1737 do_resaddflags(
1738 	sockaddr_u *srcadr,
1739 	endpt *inter,
1740 	struct req_pkt *inpkt
1741 	)
1742 {
1743 	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1744 }
1745 
1746 
1747 
1748 /*
1749  * do_ressubflags - remove flags from a restrict entry
1750  */
1751 static void
1752 do_ressubflags(
1753 	sockaddr_u *srcadr,
1754 	endpt *inter,
1755 	struct req_pkt *inpkt
1756 	)
1757 {
1758 	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1759 }
1760 
1761 
1762 /*
1763  * do_unrestrict - remove a restrict entry from the list
1764  */
1765 static void
1766 do_unrestrict(
1767 	sockaddr_u *srcadr,
1768 	endpt *inter,
1769 	struct req_pkt *inpkt
1770 	)
1771 {
1772 	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1773 }
1774 
1775 
1776 /*
1777  * do_restrict - do the dirty stuff of dealing with restrictions
1778  */
1779 static void
1780 do_restrict(
1781 	sockaddr_u *srcadr,
1782 	endpt *inter,
1783 	struct req_pkt *inpkt,
1784 	restrict_op op
1785 	)
1786 {
1787 	char *			datap;
1788 	struct conf_restrict	cr;
1789 	u_short			items;
1790 	size_t			item_sz;
1791 	sockaddr_u		matchaddr;
1792 	sockaddr_u		matchmask;
1793 	int			bad;
1794 
1795 	switch(op) {
1796 	    case RESTRICT_FLAGS:
1797 	    case RESTRICT_UNFLAG:
1798 	    case RESTRICT_REMOVE:
1799 	    case RESTRICT_REMOVEIF:
1800 	    	break;
1801 
1802 	    default:
1803 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1804 		return;
1805 	}
1806 
1807 	/*
1808 	 * Do a check of the flags to make sure that only
1809 	 * the NTPPORT flag is set, if any.  If not, complain
1810 	 * about it.  Note we are very picky here.
1811 	 */
1812 	items = INFO_NITEMS(inpkt->err_nitems);
1813 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1814 	datap = inpkt->u.data;
1815 	if (item_sz > sizeof(cr)) {
1816 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1817 		return;
1818 	}
1819 
1820 	bad = 0;
1821 	while (items-- > 0 && !bad) {
1822 		memcpy(&cr, datap, item_sz);
1823 		cr.flags = ntohs(cr.flags);
1824 		cr.mflags = ntohs(cr.mflags);
1825 		if (~RESM_NTPONLY & cr.mflags)
1826 			bad |= 1;
1827 		if (~RES_ALLFLAGS & cr.flags)
1828 			bad |= 2;
1829 		if (INADDR_ANY != cr.mask) {
1830 			if (client_v6_capable && cr.v6_flag) {
1831 				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1832 					bad |= 4;
1833 			} else {
1834 				if (INADDR_ANY == cr.addr)
1835 					bad |= 8;
1836 			}
1837 		}
1838 		datap += item_sz;
1839 	}
1840 
1841 	if (bad) {
1842 		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1843 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1844 		return;
1845 	}
1846 
1847 	/*
1848 	 * Looks okay, try it out.  Needs to reload data pointer and
1849 	 * item counter. (Talos-CAN-0052)
1850 	 */
1851 	ZERO_SOCK(&matchaddr);
1852 	ZERO_SOCK(&matchmask);
1853 	items = INFO_NITEMS(inpkt->err_nitems);
1854 	datap = inpkt->u.data;
1855 
1856 	while (items-- > 0) {
1857 		memcpy(&cr, datap, item_sz);
1858 		cr.flags = ntohs(cr.flags);
1859 		cr.mflags = ntohs(cr.mflags);
1860 		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1861 		if (client_v6_capable && cr.v6_flag) {
1862 			AF(&matchaddr) = AF_INET6;
1863 			AF(&matchmask) = AF_INET6;
1864 			SOCK_ADDR6(&matchaddr) = cr.addr6;
1865 			SOCK_ADDR6(&matchmask) = cr.mask6;
1866 		} else {
1867 			AF(&matchaddr) = AF_INET;
1868 			AF(&matchmask) = AF_INET;
1869 			NSRCADR(&matchaddr) = cr.addr;
1870 			NSRCADR(&matchmask) = cr.mask;
1871 		}
1872 		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1873 			      cr.ippeerlimit, cr.flags, 0);
1874 		datap += item_sz;
1875 	}
1876 
1877 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1878 }
1879 
1880 
1881 /*
1882  * mon_getlist - return monitor data
1883  */
1884 static void
1885 mon_getlist(
1886 	sockaddr_u *srcadr,
1887 	endpt *inter,
1888 	struct req_pkt *inpkt
1889 	)
1890 {
1891 	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1892 }
1893 
1894 
1895 /*
1896  * Module entry points and the flags they correspond with
1897  */
1898 struct reset_entry {
1899 	int flag;		/* flag this corresponds to */
1900 	void (*handler)(void);	/* routine to handle request */
1901 };
1902 
1903 struct reset_entry reset_entries[] = {
1904 	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1905 	{ RESET_FLAG_IO,	io_clr_stats },
1906 	{ RESET_FLAG_SYS,	proto_clr_stats },
1907 	{ RESET_FLAG_MEM,	peer_clr_stats },
1908 	{ RESET_FLAG_TIMER,	timer_clr_stats },
1909 	{ RESET_FLAG_AUTH,	reset_auth_stats },
1910 	{ RESET_FLAG_CTL,	ctl_clr_stats },
1911 	{ 0,			0 }
1912 };
1913 
1914 /*
1915  * reset_stats - reset statistic counters here and there
1916  */
1917 static void
1918 reset_stats(
1919 	sockaddr_u *srcadr,
1920 	endpt *inter,
1921 	struct req_pkt *inpkt
1922 	)
1923 {
1924 	struct reset_flags *rflags;
1925 	u_long flags;
1926 	struct reset_entry *rent;
1927 
1928 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1929 		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1930 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1931 		return;
1932 	}
1933 
1934 	rflags = (struct reset_flags *)&inpkt->u;
1935 	flags = ntohl(rflags->flags);
1936 
1937 	if (flags & ~RESET_ALLFLAGS) {
1938 		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1939 			flags & ~RESET_ALLFLAGS);
1940 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1941 		return;
1942 	}
1943 
1944 	for (rent = reset_entries; rent->flag != 0; rent++) {
1945 		if (flags & rent->flag)
1946 			(*rent->handler)();
1947 	}
1948 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1949 }
1950 
1951 
1952 /*
1953  * reset_peer - clear a peer's statistics
1954  */
1955 static void
1956 reset_peer(
1957 	sockaddr_u *srcadr,
1958 	endpt *inter,
1959 	struct req_pkt *inpkt
1960 	)
1961 {
1962 	u_short			items;
1963 	size_t			item_sz;
1964 	char *			datap;
1965 	struct conf_unpeer	cp;
1966 	struct peer *		p;
1967 	sockaddr_u		peeraddr;
1968 	int			bad;
1969 
1970 	/*
1971 	 * We check first to see that every peer exists.  If not,
1972 	 * we return an error.
1973 	 */
1974 
1975 	items = INFO_NITEMS(inpkt->err_nitems);
1976 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1977 	datap = inpkt->u.data;
1978 	if (item_sz > sizeof(cp)) {
1979 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1980 		return;
1981 	}
1982 
1983 	bad = FALSE;
1984 	while (items-- > 0 && !bad) {
1985 		ZERO(cp);
1986 		memcpy(&cp, datap, item_sz);
1987 		ZERO_SOCK(&peeraddr);
1988 		if (client_v6_capable && cp.v6_flag) {
1989 			AF(&peeraddr) = AF_INET6;
1990 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1991 		} else {
1992 			AF(&peeraddr) = AF_INET;
1993 			NSRCADR(&peeraddr) = cp.peeraddr;
1994 		}
1995 
1996 #ifdef ISC_PLATFORM_HAVESALEN
1997 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1998 #endif
1999 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2000 		if (NULL == p)
2001 			bad++;
2002 		datap += item_sz;
2003 	}
2004 
2005 	if (bad) {
2006 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2007 		return;
2008 	}
2009 
2010 	/*
2011 	 * Now do it in earnest. Needs to reload data pointer and item
2012 	 * counter. (Talos-CAN-0052)
2013 	 */
2014 
2015 	items = INFO_NITEMS(inpkt->err_nitems);
2016 	datap = inpkt->u.data;
2017 	while (items-- > 0) {
2018 		ZERO(cp);
2019 		memcpy(&cp, datap, item_sz);
2020 		ZERO_SOCK(&peeraddr);
2021 		if (client_v6_capable && cp.v6_flag) {
2022 			AF(&peeraddr) = AF_INET6;
2023 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2024 		} else {
2025 			AF(&peeraddr) = AF_INET;
2026 			NSRCADR(&peeraddr) = cp.peeraddr;
2027 		}
2028 		SET_PORT(&peeraddr, 123);
2029 #ifdef ISC_PLATFORM_HAVESALEN
2030 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2031 #endif
2032 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2033 		while (p != NULL) {
2034 			peer_reset(p);
2035 			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2036 		}
2037 		datap += item_sz;
2038 	}
2039 
2040 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2041 }
2042 
2043 
2044 /*
2045  * do_key_reread - reread the encryption key file
2046  */
2047 static void
2048 do_key_reread(
2049 	sockaddr_u *srcadr,
2050 	endpt *inter,
2051 	struct req_pkt *inpkt
2052 	)
2053 {
2054 	rereadkeys();
2055 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2056 }
2057 
2058 
2059 /*
2060  * trust_key - make one or more keys trusted
2061  */
2062 static void
2063 trust_key(
2064 	sockaddr_u *srcadr,
2065 	endpt *inter,
2066 	struct req_pkt *inpkt
2067 	)
2068 {
2069 	do_trustkey(srcadr, inter, inpkt, 1);
2070 }
2071 
2072 
2073 /*
2074  * untrust_key - make one or more keys untrusted
2075  */
2076 static void
2077 untrust_key(
2078 	sockaddr_u *srcadr,
2079 	endpt *inter,
2080 	struct req_pkt *inpkt
2081 	)
2082 {
2083 	do_trustkey(srcadr, inter, inpkt, 0);
2084 }
2085 
2086 
2087 /*
2088  * do_trustkey - make keys either trustable or untrustable
2089  */
2090 static void
2091 do_trustkey(
2092 	sockaddr_u *srcadr,
2093 	endpt *inter,
2094 	struct req_pkt *inpkt,
2095 	u_long trust
2096 	)
2097 {
2098 	register uint32_t *kp;
2099 	register int items;
2100 
2101 	items = INFO_NITEMS(inpkt->err_nitems);
2102 	kp = (uint32_t *)&inpkt->u;
2103 	while (items-- > 0) {
2104 		authtrust(*kp, trust);
2105 		kp++;
2106 	}
2107 
2108 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2109 }
2110 
2111 
2112 /*
2113  * get_auth_info - return some stats concerning the authentication module
2114  */
2115 static void
2116 get_auth_info(
2117 	sockaddr_u *srcadr,
2118 	endpt *inter,
2119 	struct req_pkt *inpkt
2120 	)
2121 {
2122 	register struct info_auth *ia;
2123 
2124 	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2125 					     sizeof(struct info_auth));
2126 
2127 	ia->numkeys = htonl((u_int32)authnumkeys);
2128 	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2129 	ia->keylookups = htonl((u_int32)authkeylookups);
2130 	ia->keynotfound = htonl((u_int32)authkeynotfound);
2131 	ia->encryptions = htonl((u_int32)authencryptions);
2132 	ia->decryptions = htonl((u_int32)authdecryptions);
2133 	ia->keyuncached = htonl((u_int32)authkeyuncached);
2134 	ia->expired = htonl((u_int32)authkeyexpired);
2135 	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2136 
2137 	(void) more_pkt();
2138 	flush_pkt();
2139 }
2140 
2141 
2142 
2143 /*
2144  * reset_auth_stats - reset the authentication stat counters.  Done here
2145  *		      to keep ntp-isms out of the authentication module
2146  */
2147 void
2148 reset_auth_stats(void)
2149 {
2150 	authkeylookups = 0;
2151 	authkeynotfound = 0;
2152 	authencryptions = 0;
2153 	authdecryptions = 0;
2154 	authkeyuncached = 0;
2155 	auth_timereset = current_time;
2156 }
2157 
2158 
2159 /*
2160  * req_get_traps - return information about current trap holders
2161  */
2162 static void
2163 req_get_traps(
2164 	sockaddr_u *srcadr,
2165 	endpt *inter,
2166 	struct req_pkt *inpkt
2167 	)
2168 {
2169 	struct info_trap *it;
2170 	struct ctl_trap *tr;
2171 	size_t i;
2172 
2173 	if (num_ctl_traps == 0) {
2174 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2175 		return;
2176 	}
2177 
2178 	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2179 	    v6sizeof(struct info_trap));
2180 
2181 	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2182 		if (tr->tr_flags & TRAP_INUSE) {
2183 			if (IS_IPV4(&tr->tr_addr)) {
2184 				if (tr->tr_localaddr == any_interface)
2185 					it->local_address = 0;
2186 				else
2187 					it->local_address
2188 					    = NSRCADR(&tr->tr_localaddr->sin);
2189 				it->trap_address = NSRCADR(&tr->tr_addr);
2190 				if (client_v6_capable)
2191 					it->v6_flag = 0;
2192 			} else {
2193 				if (!client_v6_capable)
2194 					continue;
2195 				it->local_address6
2196 				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2197 				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2198 				it->v6_flag = 1;
2199 			}
2200 			it->trap_port = NSRCPORT(&tr->tr_addr);
2201 			it->sequence = htons(tr->tr_sequence);
2202 			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2203 			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2204 			it->resets = htonl((u_int32)tr->tr_resets);
2205 			it->flags = htonl((u_int32)tr->tr_flags);
2206 			it = (struct info_trap *)more_pkt();
2207 		}
2208 	}
2209 	flush_pkt();
2210 }
2211 
2212 
2213 /*
2214  * req_set_trap - configure a trap
2215  */
2216 static void
2217 req_set_trap(
2218 	sockaddr_u *srcadr,
2219 	endpt *inter,
2220 	struct req_pkt *inpkt
2221 	)
2222 {
2223 	do_setclr_trap(srcadr, inter, inpkt, 1);
2224 }
2225 
2226 
2227 
2228 /*
2229  * req_clr_trap - unconfigure a trap
2230  */
2231 static void
2232 req_clr_trap(
2233 	sockaddr_u *srcadr,
2234 	endpt *inter,
2235 	struct req_pkt *inpkt
2236 	)
2237 {
2238 	do_setclr_trap(srcadr, inter, inpkt, 0);
2239 }
2240 
2241 
2242 
2243 /*
2244  * do_setclr_trap - do the grunge work of (un)configuring a trap
2245  */
2246 static void
2247 do_setclr_trap(
2248 	sockaddr_u *srcadr,
2249 	endpt *inter,
2250 	struct req_pkt *inpkt,
2251 	int set
2252 	)
2253 {
2254 	register struct conf_trap *ct;
2255 	register endpt *linter;
2256 	int res;
2257 	sockaddr_u laddr;
2258 
2259 	/*
2260 	 * Prepare sockaddr
2261 	 */
2262 	ZERO_SOCK(&laddr);
2263 	AF(&laddr) = AF(srcadr);
2264 	SET_PORT(&laddr, NTP_PORT);
2265 
2266 	/*
2267 	 * Restrict ourselves to one item only.  This eliminates
2268 	 * the error reporting problem.
2269 	 */
2270 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2271 		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2272 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2273 		return;
2274 	}
2275 	ct = (struct conf_trap *)&inpkt->u;
2276 
2277 	/*
2278 	 * Look for the local interface.  If none, use the default.
2279 	 */
2280 	if (ct->local_address == 0) {
2281 		linter = any_interface;
2282 	} else {
2283 		if (IS_IPV4(&laddr))
2284 			NSRCADR(&laddr) = ct->local_address;
2285 		else
2286 			SOCK_ADDR6(&laddr) = ct->local_address6;
2287 		linter = findinterface(&laddr);
2288 		if (NULL == linter) {
2289 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2290 			return;
2291 		}
2292 	}
2293 
2294 	if (IS_IPV4(&laddr))
2295 		NSRCADR(&laddr) = ct->trap_address;
2296 	else
2297 		SOCK_ADDR6(&laddr) = ct->trap_address6;
2298 	if (ct->trap_port)
2299 		NSRCPORT(&laddr) = ct->trap_port;
2300 	else
2301 		SET_PORT(&laddr, TRAPPORT);
2302 
2303 	if (set) {
2304 		res = ctlsettrap(&laddr, linter, 0,
2305 				 INFO_VERSION(inpkt->rm_vn_mode));
2306 	} else {
2307 		res = ctlclrtrap(&laddr, linter, 0);
2308 	}
2309 
2310 	if (!res) {
2311 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2312 	} else {
2313 		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2314 	}
2315 	return;
2316 }
2317 
2318 /*
2319  * Validate a request packet for a new request or control key:
2320  *  - only one item allowed
2321  *  - key must be valid (that is, known, and not in the autokey range)
2322  */
2323 static void
2324 set_keyid_checked(
2325 	keyid_t        *into,
2326 	const char     *what,
2327 	sockaddr_u     *srcadr,
2328 	endpt          *inter,
2329 	struct req_pkt *inpkt
2330 	)
2331 {
2332 	keyid_t *pkeyid;
2333 	keyid_t  tmpkey;
2334 
2335 	/* restrict ourselves to one item only */
2336 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2337 		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2338 			what);
2339 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2340 		return;
2341 	}
2342 
2343 	/* plug the new key from the packet */
2344 	pkeyid = (keyid_t *)&inpkt->u;
2345 	tmpkey = ntohl(*pkeyid);
2346 
2347 	/* validate the new key id, claim data error on failure */
2348 	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2349 		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2350 			what, (long)tmpkey);
2351 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2352 		return;
2353 	}
2354 
2355 	/* if we arrive here, the key is good -- use it */
2356 	*into = tmpkey;
2357 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2358 }
2359 
2360 /*
2361  * set_request_keyid - set the keyid used to authenticate requests
2362  */
2363 static void
2364 set_request_keyid(
2365 	sockaddr_u *srcadr,
2366 	endpt *inter,
2367 	struct req_pkt *inpkt
2368 	)
2369 {
2370 	set_keyid_checked(&info_auth_keyid, "request",
2371 			  srcadr, inter, inpkt);
2372 }
2373 
2374 
2375 
2376 /*
2377  * set_control_keyid - set the keyid used to authenticate requests
2378  */
2379 static void
2380 set_control_keyid(
2381 	sockaddr_u *srcadr,
2382 	endpt *inter,
2383 	struct req_pkt *inpkt
2384 	)
2385 {
2386 	set_keyid_checked(&ctl_auth_keyid, "control",
2387 			  srcadr, inter, inpkt);
2388 }
2389 
2390 
2391 
2392 /*
2393  * get_ctl_stats - return some stats concerning the control message module
2394  */
2395 static void
2396 get_ctl_stats(
2397 	sockaddr_u *srcadr,
2398 	endpt *inter,
2399 	struct req_pkt *inpkt
2400 	)
2401 {
2402 	register struct info_control *ic;
2403 
2404 	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2405 						sizeof(struct info_control));
2406 
2407 	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2408 	ic->numctlreq = htonl((u_int32)numctlreq);
2409 	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2410 	ic->numctlresponses = htonl((u_int32)numctlresponses);
2411 	ic->numctlfrags = htonl((u_int32)numctlfrags);
2412 	ic->numctlerrors = htonl((u_int32)numctlerrors);
2413 	ic->numctltooshort = htonl((u_int32)numctltooshort);
2414 	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2415 	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2416 	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2417 	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2418 	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2419 	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2420 	ic->numctlbadop = htonl((u_int32)numctlbadop);
2421 	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2422 
2423 	(void) more_pkt();
2424 	flush_pkt();
2425 }
2426 
2427 
2428 #ifdef KERNEL_PLL
2429 /*
2430  * get_kernel_info - get kernel pll/pps information
2431  */
2432 static void
2433 get_kernel_info(
2434 	sockaddr_u *srcadr,
2435 	endpt *inter,
2436 	struct req_pkt *inpkt
2437 	)
2438 {
2439 	register struct info_kernel *ik;
2440 	struct timex ntx;
2441 
2442 	if (!pll_control) {
2443 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2444 		return;
2445 	}
2446 
2447 	ZERO(ntx);
2448 	if (ntp_adjtime(&ntx) < 0)
2449 		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2450 	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2451 	    sizeof(struct info_kernel));
2452 
2453 	/*
2454 	 * pll variables
2455 	 */
2456 	ik->offset = htonl((u_int32)ntx.offset);
2457 	ik->freq = htonl((u_int32)ntx.freq);
2458 	ik->maxerror = htonl((u_int32)ntx.maxerror);
2459 	ik->esterror = htonl((u_int32)ntx.esterror);
2460 	ik->status = htons(ntx.status);
2461 	ik->constant = htonl((u_int32)ntx.constant);
2462 	ik->precision = htonl((u_int32)ntx.precision);
2463 	ik->tolerance = htonl((u_int32)ntx.tolerance);
2464 
2465 	/*
2466 	 * pps variables
2467 	 */
2468 	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2469 	ik->jitter = htonl((u_int32)ntx.jitter);
2470 	ik->shift = htons(ntx.shift);
2471 	ik->stabil = htonl((u_int32)ntx.stabil);
2472 	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2473 	ik->calcnt = htonl((u_int32)ntx.calcnt);
2474 	ik->errcnt = htonl((u_int32)ntx.errcnt);
2475 	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2476 
2477 	(void) more_pkt();
2478 	flush_pkt();
2479 }
2480 #endif /* KERNEL_PLL */
2481 
2482 
2483 #ifdef REFCLOCK
2484 /*
2485  * get_clock_info - get info about a clock
2486  */
2487 static void
2488 get_clock_info(
2489 	sockaddr_u *srcadr,
2490 	endpt *inter,
2491 	struct req_pkt *inpkt
2492 	)
2493 {
2494 	register struct info_clock *ic;
2495 	register u_int32 *clkaddr;
2496 	register int items;
2497 	struct refclockstat clock_stat;
2498 	sockaddr_u addr;
2499 	l_fp ltmp;
2500 
2501 	ZERO_SOCK(&addr);
2502 	AF(&addr) = AF_INET;
2503 #ifdef ISC_PLATFORM_HAVESALEN
2504 	addr.sa.sa_len = SOCKLEN(&addr);
2505 #endif
2506 	SET_PORT(&addr, NTP_PORT);
2507 	items = INFO_NITEMS(inpkt->err_nitems);
2508 	clkaddr = &inpkt->u.u32[0];
2509 
2510 	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2511 					      sizeof(struct info_clock));
2512 
2513 	while (items-- > 0 && ic) {
2514 		NSRCADR(&addr) = *clkaddr++;
2515 		if (!ISREFCLOCKADR(&addr) || NULL ==
2516 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2517 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2518 			return;
2519 		}
2520 
2521 		clock_stat.kv_list = (struct ctl_var *)0;
2522 
2523 		refclock_control(&addr, NULL, &clock_stat);
2524 
2525 		ic->clockadr = NSRCADR(&addr);
2526 		ic->type = clock_stat.type;
2527 		ic->flags = clock_stat.flags;
2528 		ic->lastevent = clock_stat.lastevent;
2529 		ic->currentstatus = clock_stat.currentstatus;
2530 		ic->polls = htonl((u_int32)clock_stat.polls);
2531 		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2532 		ic->badformat = htonl((u_int32)clock_stat.badformat);
2533 		ic->baddata = htonl((u_int32)clock_stat.baddata);
2534 		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2535 		DTOLFP(clock_stat.fudgetime1, &ltmp);
2536 		HTONL_FP(&ltmp, &ic->fudgetime1);
2537 		DTOLFP(clock_stat.fudgetime2, &ltmp);
2538 		HTONL_FP(&ltmp, &ic->fudgetime2);
2539 		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2540 		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2541 
2542 		free_varlist(clock_stat.kv_list);
2543 
2544 		ic = (struct info_clock *)more_pkt();
2545 	}
2546 	flush_pkt();
2547 }
2548 
2549 
2550 
2551 /*
2552  * set_clock_fudge - get a clock's fudge factors
2553  */
2554 static void
2555 set_clock_fudge(
2556 	sockaddr_u *srcadr,
2557 	endpt *inter,
2558 	struct req_pkt *inpkt
2559 	)
2560 {
2561 	register struct conf_fudge *cf;
2562 	register int items;
2563 	struct refclockstat clock_stat;
2564 	sockaddr_u addr;
2565 	l_fp ltmp;
2566 
2567 	ZERO(addr);
2568 	ZERO(clock_stat);
2569 	items = INFO_NITEMS(inpkt->err_nitems);
2570 	cf = (struct conf_fudge *)&inpkt->u;
2571 
2572 	while (items-- > 0) {
2573 		AF(&addr) = AF_INET;
2574 		NSRCADR(&addr) = cf->clockadr;
2575 #ifdef ISC_PLATFORM_HAVESALEN
2576 		addr.sa.sa_len = SOCKLEN(&addr);
2577 #endif
2578 		SET_PORT(&addr, NTP_PORT);
2579 		if (!ISREFCLOCKADR(&addr) || NULL ==
2580 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2581 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2582 			return;
2583 		}
2584 
2585 		switch(ntohl(cf->which)) {
2586 		    case FUDGE_TIME1:
2587 			NTOHL_FP(&cf->fudgetime, &ltmp);
2588 			LFPTOD(&ltmp, clock_stat.fudgetime1);
2589 			clock_stat.haveflags = CLK_HAVETIME1;
2590 			break;
2591 		    case FUDGE_TIME2:
2592 			NTOHL_FP(&cf->fudgetime, &ltmp);
2593 			LFPTOD(&ltmp, clock_stat.fudgetime2);
2594 			clock_stat.haveflags = CLK_HAVETIME2;
2595 			break;
2596 		    case FUDGE_VAL1:
2597 			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2598 			clock_stat.haveflags = CLK_HAVEVAL1;
2599 			break;
2600 		    case FUDGE_VAL2:
2601 			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2602 			clock_stat.haveflags = CLK_HAVEVAL2;
2603 			break;
2604 		    case FUDGE_FLAGS:
2605 			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2606 			clock_stat.haveflags =
2607 				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2608 			break;
2609 		    default:
2610 			msyslog(LOG_ERR, "set_clock_fudge: default!");
2611 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2612 			return;
2613 		}
2614 
2615 		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2616 	}
2617 
2618 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2619 }
2620 #endif
2621 
2622 #ifdef REFCLOCK
2623 /*
2624  * get_clkbug_info - get debugging info about a clock
2625  */
2626 static void
2627 get_clkbug_info(
2628 	sockaddr_u *srcadr,
2629 	endpt *inter,
2630 	struct req_pkt *inpkt
2631 	)
2632 {
2633 	register int i;
2634 	register struct info_clkbug *ic;
2635 	register u_int32 *clkaddr;
2636 	register int items;
2637 	struct refclockbug bug;
2638 	sockaddr_u addr;
2639 
2640 	ZERO_SOCK(&addr);
2641 	AF(&addr) = AF_INET;
2642 #ifdef ISC_PLATFORM_HAVESALEN
2643 	addr.sa.sa_len = SOCKLEN(&addr);
2644 #endif
2645 	SET_PORT(&addr, NTP_PORT);
2646 	items = INFO_NITEMS(inpkt->err_nitems);
2647 	clkaddr = (u_int32 *)&inpkt->u;
2648 
2649 	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2650 					       sizeof(struct info_clkbug));
2651 
2652 	while (items-- > 0 && ic) {
2653 		NSRCADR(&addr) = *clkaddr++;
2654 		if (!ISREFCLOCKADR(&addr) || NULL ==
2655 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2656 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2657 			return;
2658 		}
2659 
2660 		ZERO(bug);
2661 		refclock_buginfo(&addr, &bug);
2662 		if (bug.nvalues == 0 && bug.ntimes == 0) {
2663 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2664 			return;
2665 		}
2666 
2667 		ic->clockadr = NSRCADR(&addr);
2668 		i = bug.nvalues;
2669 		if (i > NUMCBUGVALUES)
2670 		    i = NUMCBUGVALUES;
2671 		ic->nvalues = (u_char)i;
2672 		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2673 		while (--i >= 0)
2674 		    ic->values[i] = htonl(bug.values[i]);
2675 
2676 		i = bug.ntimes;
2677 		if (i > NUMCBUGTIMES)
2678 		    i = NUMCBUGTIMES;
2679 		ic->ntimes = (u_char)i;
2680 		ic->stimes = htonl(bug.stimes);
2681 		while (--i >= 0) {
2682 			HTONL_FP(&bug.times[i], &ic->times[i]);
2683 		}
2684 
2685 		ic = (struct info_clkbug *)more_pkt();
2686 	}
2687 	flush_pkt();
2688 }
2689 #endif
2690 
2691 /*
2692  * receiver of interface structures
2693  */
2694 static void
2695 fill_info_if_stats(void *data, interface_info_t *interface_info)
2696 {
2697 	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2698 	struct info_if_stats *ifs = *ifsp;
2699 	endpt *ep = interface_info->ep;
2700 
2701 	if (NULL == ifs)
2702 		return;
2703 
2704 	ZERO(*ifs);
2705 
2706 	if (IS_IPV6(&ep->sin)) {
2707 		if (!client_v6_capable)
2708 			return;
2709 		ifs->v6_flag = 1;
2710 		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2711 		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2712 		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2713 	} else {
2714 		ifs->v6_flag = 0;
2715 		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2716 		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2717 		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2718 	}
2719 	ifs->v6_flag = htonl(ifs->v6_flag);
2720 	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2721 	ifs->family = htons(ep->family);
2722 	ifs->flags = htonl(ep->flags);
2723 	ifs->last_ttl = htonl(ep->last_ttl);
2724 	ifs->num_mcast = htonl(ep->num_mcast);
2725 	ifs->received = htonl(ep->received);
2726 	ifs->sent = htonl(ep->sent);
2727 	ifs->notsent = htonl(ep->notsent);
2728 	ifs->ifindex = htonl(ep->ifindex);
2729 	/* scope no longer in endpt, in in6_addr typically */
2730 	ifs->scopeid = ifs->ifindex;
2731 	ifs->ifnum = htonl(ep->ifnum);
2732 	ifs->uptime = htonl(current_time - ep->starttime);
2733 	ifs->ignore_packets = ep->ignore_packets;
2734 	ifs->peercnt = htonl(ep->peercnt);
2735 	ifs->action = interface_info->action;
2736 
2737 	*ifsp = (struct info_if_stats *)more_pkt();
2738 }
2739 
2740 /*
2741  * get_if_stats - get interface statistics
2742  */
2743 static void
2744 get_if_stats(
2745 	sockaddr_u *srcadr,
2746 	endpt *inter,
2747 	struct req_pkt *inpkt
2748 	)
2749 {
2750 	struct info_if_stats *ifs;
2751 
2752 	DPRINTF(3, ("wants interface statistics\n"));
2753 
2754 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2755 	    v6sizeof(struct info_if_stats));
2756 
2757 	interface_enumerate(fill_info_if_stats, &ifs);
2758 
2759 	flush_pkt();
2760 }
2761 
2762 static void
2763 do_if_reload(
2764 	sockaddr_u *srcadr,
2765 	endpt *inter,
2766 	struct req_pkt *inpkt
2767 	)
2768 {
2769 	struct info_if_stats *ifs;
2770 
2771 	DPRINTF(3, ("wants interface reload\n"));
2772 
2773 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2774 	    v6sizeof(struct info_if_stats));
2775 
2776 	interface_update(fill_info_if_stats, &ifs);
2777 
2778 	flush_pkt();
2779 }
2780 
2781