xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_request.c (revision 9e9fa66ad400fe02341b89216c5270e23a212d33)
1 /*	$NetBSD: ntp_request.c,v 1.20 2024/10/01 20:59:51 christos Exp $	*/
2 
3 /*
4  * ntp_request.c - respond to information requests
5  */
6 
7 #ifdef HAVE_CONFIG_H
8 # include <config.h>
9 #endif
10 
11 #include "ntpd.h"
12 #include "ntp_io.h"
13 #include "ntp_request.h"
14 #include "ntp_control.h"
15 #include "ntp_refclock.h"
16 #include "ntp_if.h"
17 #include "ntp_stdlib.h"
18 #include "ntp_assert.h"
19 
20 #include <stdio.h>
21 #include <stddef.h>
22 #include <signal.h>
23 #ifdef HAVE_NETINET_IN_H
24 #include <netinet/in.h>
25 #endif
26 #include <arpa/inet.h>
27 
28 #include "recvbuff.h"
29 
30 #ifdef KERNEL_PLL
31 #include "ntp_syscall.h"
32 #endif /* KERNEL_PLL */
33 
34 /*
35  * Structure to hold request procedure information
36  */
37 #define	NOAUTH	0
38 #define	AUTH	1
39 
40 #define	NO_REQUEST	(-1)
41 /*
42  * Because we now have v6 addresses in the messages, we need to compensate
43  * for the larger size.  Therefore, we introduce the alternate size to
44  * keep us friendly with older implementations.  A little ugly.
45  */
46 static int client_v6_capable = 0;   /* the client can handle longer messages */
47 
48 #define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
49 
50 struct req_proc {
51 	short request_code;	/* defined request code */
52 	short needs_auth;	/* true when authentication needed */
53 	short sizeofitem;	/* size of request data item (older size)*/
54 	short v6_sizeofitem;	/* size of request data item (new size)*/
55 	void (*handler) (sockaddr_u *, endpt *,
56 			   struct req_pkt *);	/* routine to handle request */
57 };
58 
59 /*
60  * Universal request codes
61  */
62 static const struct req_proc univ_codes[] = {
63 	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
64 };
65 
66 static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
67 static	void *	prepare_pkt	(sockaddr_u *, endpt *,
68 				 struct req_pkt *, size_t);
69 static	void *	more_pkt	(void);
70 static	void	flush_pkt	(void);
71 static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
72 static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
73 static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
74 static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75 static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
76 static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77 static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78 static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
79 static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
80 static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
81 static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
82 static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
83 static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
84 static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
85 static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
86 static	void	list_restrict4	(const struct restrict_4 *, struct info_restrict **);
87 static	void	list_restrict6	(const struct restrict_6 *, struct info_restrict **);
88 static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
89 static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
90 static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
91 static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
92 static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
93 static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
94 static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
95 static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
96 static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
97 static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
98 static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
99 static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
100 static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
101 static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
102 static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
103 static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
104 static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
105 static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
106 static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
107 static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
108 static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
109 static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
110 #ifdef KERNEL_PLL
111 static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
112 #endif /* KERNEL_PLL */
113 #ifdef REFCLOCK
114 static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
115 static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
116 #endif	/* REFCLOCK */
117 #ifdef REFCLOCK
118 static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
119 #endif	/* REFCLOCK */
120 
121 /*
122  * ntpd request codes
123  */
124 static const struct req_proc ntp_codes[] = {
125 	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
126 	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
127 	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
128 				sizeof(struct info_peer_list), peer_info},
129 	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
130 				sizeof(struct info_peer_list), peer_stats},
131 	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
132 	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
133 	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
134 	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
135 	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
136 	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
137 	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
138 				sizeof(struct conf_peer), do_conf },
139 	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
140 				sizeof(struct conf_unpeer), do_unconf },
141 	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142 				sizeof(struct conf_sys_flags), set_sys_flag },
143 	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
144 				sizeof(struct conf_sys_flags),  clr_sys_flag },
145 	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
146 	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
147 				sizeof(struct conf_restrict), do_resaddflags },
148 	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
149 				sizeof(struct conf_restrict), do_ressubflags },
150 	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
151 				sizeof(struct conf_restrict), do_unrestrict },
152 	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
153 	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
154 	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
155 	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
156 				sizeof(struct conf_unpeer), reset_peer },
157 	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
158 	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
159 	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
160 	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
161 	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
162 	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
163 				sizeof(struct conf_trap), req_set_trap },
164 	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
165 				sizeof(struct conf_trap), req_clr_trap },
166 	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167 				set_request_keyid },
168 	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
169 				set_control_keyid },
170 	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
171 #ifdef KERNEL_PLL
172 	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
173 #endif
174 #ifdef REFCLOCK
175 	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
176 				get_clock_info },
177 	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
178 				sizeof(struct conf_fudge), set_clock_fudge },
179 	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
180 				get_clkbug_info },
181 #endif
182 	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
183 	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
184 
185 	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
186 };
187 
188 
189 /*
190  * Authentication keyid used to authenticate requests.  Zero means we
191  * don't allow writing anything.
192  */
193 keyid_t info_auth_keyid;
194 
195 /*
196  * Statistic counters to keep track of requests and responses.
197  */
198 u_long numrequests;		/* number of requests we've received */
199 u_long numresppkts;		/* number of resp packets sent with data */
200 
201 /*
202  * lazy way to count errors, indexed by the error code
203  */
204 u_long errorcounter[MAX_INFO_ERR + 1];
205 
206 /*
207  * A hack.  To keep the authentication module clear of ntp-ism's, we
208  * include a time reset variable for its stats here.
209  */
210 u_long auth_timereset;
211 
212 /*
213  * Response packet used by these routines.  Also some state information
214  * so that we can handle packet formatting within a common set of
215  * subroutines.  Note we try to enter data in place whenever possible,
216  * but the need to set the more bit correctly means we occasionally
217  * use the extra buffer and copy.
218  */
219 static struct resp_pkt rpkt;
220 static int reqver;
221 static int seqno;
222 static int nitems;
223 static int itemsize;
224 static int databytes;
225 static char exbuf[RESP_DATA_SIZE];
226 static int usingexbuf;
227 static sockaddr_u *toaddr;
228 static endpt *frominter;
229 
230 /*
231  * init_request - initialize request data
232  */
233 void
234 init_request (void)
235 {
236 	size_t i;
237 
238 	numrequests = 0;
239 	numresppkts = 0;
240 	auth_timereset = 0;
241 	info_auth_keyid = 0;	/* by default, can't do this */
242 
243 	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
244 	    errorcounter[i] = 0;
245 }
246 
247 
248 /*
249  * req_ack - acknowledge request with no data
250  */
251 static void
252 req_ack(
253 	sockaddr_u *srcadr,
254 	endpt *inter,
255 	struct req_pkt *inpkt,
256 	int errcode
257 	)
258 {
259 	/*
260 	 * fill in the fields
261 	 */
262 	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
263 	rpkt.auth_seq = AUTH_SEQ(0, 0);
264 	rpkt.implementation = inpkt->implementation;
265 	rpkt.request = inpkt->request;
266 	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
267 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
268 
269 	/*
270 	 * send packet and bump counters
271 	 */
272 	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
273 	errorcounter[errcode]++;
274 }
275 
276 
277 /*
278  * prepare_pkt - prepare response packet for transmission, return pointer
279  *		 to storage for data item.
280  */
281 static void *
282 prepare_pkt(
283 	sockaddr_u *srcadr,
284 	endpt *inter,
285 	struct req_pkt *pkt,
286 	size_t structsize
287 	)
288 {
289 	DPRINTF(4, ("request: preparing pkt\n"));
290 
291 	/*
292 	 * Fill in the implementation, request and itemsize fields
293 	 * since these won't change.
294 	 */
295 	rpkt.implementation = pkt->implementation;
296 	rpkt.request = pkt->request;
297 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
298 
299 	/*
300 	 * Compute the static data needed to carry on.
301 	 */
302 	toaddr = srcadr;
303 	frominter = inter;
304 	seqno = 0;
305 	nitems = 0;
306 	itemsize = structsize;
307 	databytes = 0;
308 	usingexbuf = 0;
309 
310 	/*
311 	 * return the beginning of the packet buffer.
312 	 */
313 	return &rpkt.u;
314 }
315 
316 
317 /*
318  * more_pkt - return a data pointer for a new item.
319  */
320 static void *
321 more_pkt(void)
322 {
323 	/*
324 	 * If we were using the extra buffer, send the packet.
325 	 */
326 	if (usingexbuf) {
327 		DPRINTF(3, ("request: sending pkt\n"));
328 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
329 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
330 		rpkt.err_nitems = htons((u_short)nitems);
331 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
332 			RESP_HEADER_SIZE + databytes);
333 		numresppkts++;
334 
335 		/*
336 		 * Copy data out of exbuf into the packet.
337 		 */
338 		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
339 		seqno++;
340 		databytes = 0;
341 		nitems = 0;
342 		usingexbuf = 0;
343 	}
344 
345 	databytes += itemsize;
346 	nitems++;
347 	if (databytes + itemsize <= RESP_DATA_SIZE) {
348 		DPRINTF(4, ("request: giving him more data\n"));
349 		/*
350 		 * More room in packet.  Give him the
351 		 * next address.
352 		 */
353 		return &rpkt.u.data[databytes];
354 	} else {
355 		/*
356 		 * No room in packet.  Give him the extra
357 		 * buffer unless this was the last in the sequence.
358 		 */
359 		DPRINTF(4, ("request: into extra buffer\n"));
360 		if (seqno == MAXSEQ)
361 			return NULL;
362 		else {
363 			usingexbuf = 1;
364 			return exbuf;
365 		}
366 	}
367 }
368 
369 
370 /*
371  * flush_pkt - we're done, return remaining information.
372  */
373 static void
374 flush_pkt(void)
375 {
376 	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
377 	/*
378 	 * Must send the last packet.  If nothing in here and nothing
379 	 * has been sent, send an error saying no data to be found.
380 	 */
381 	if (seqno == 0 && nitems == 0)
382 		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
383 			INFO_ERR_NODATA);
384 	else {
385 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
386 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
387 		rpkt.err_nitems = htons((u_short)nitems);
388 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
389 			RESP_HEADER_SIZE+databytes);
390 		numresppkts++;
391 	}
392 }
393 
394 
395 
396 /*
397  * Given a buffer, return the packet mode
398  */
399 int
400 get_packet_mode(struct recvbuf *rbufp)
401 {
402 	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
403 	return (INFO_MODE(inpkt->rm_vn_mode));
404 }
405 
406 
407 /*
408  * process_private - process private mode (7) packets
409  */
410 void
411 process_private(
412 	struct recvbuf *rbufp,
413 	int mod_okay
414 	)
415 {
416 	static u_long quiet_until;
417 	struct req_pkt *inpkt;
418 	struct req_pkt_tail *tailinpkt;
419 	sockaddr_u *srcadr;
420 	endpt *inter;
421 	const struct req_proc *proc;
422 	int ec;
423 	short temp_size;
424 	l_fp ftmp;
425 	double dtemp;
426 	size_t recv_len;
427 	size_t noslop_len;
428 	size_t mac_len;
429 
430 	/*
431 	 * Initialize pointers, for convenience
432 	 */
433 	recv_len = rbufp->recv_length;
434 	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
435 	srcadr = &rbufp->recv_srcadr;
436 	inter = rbufp->dstadr;
437 
438 	DPRINTF(3, ("process_private: impl %d req %d\n",
439 		    inpkt->implementation, inpkt->request));
440 
441 	/*
442 	 * Do some sanity checks on the packet.  Return a format
443 	 * error if it fails.
444 	 */
445 	ec = 0;
446 	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
447 	    || (++ec, ISMORE(inpkt->rm_vn_mode))
448 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
449 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
450 	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
451 	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
452 	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
453 	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
454 		) {
455 		NLOG(NLOG_SYSEVENT)
456 			if (current_time >= quiet_until) {
457 				msyslog(LOG_ERR,
458 					"process_private: drop test %d"
459 					" failed, pkt from %s",
460 					ec, stoa(srcadr));
461 				quiet_until = current_time + 60;
462 			}
463 		return;
464 	}
465 
466 	reqver = INFO_VERSION(inpkt->rm_vn_mode);
467 
468 	/*
469 	 * Get the appropriate procedure list to search.
470 	 */
471 	if (inpkt->implementation == IMPL_UNIV)
472 		proc = univ_codes;
473 	else if ((inpkt->implementation == IMPL_XNTPD) ||
474 		 (inpkt->implementation == IMPL_XNTPD_OLD))
475 		proc = ntp_codes;
476 	else {
477 		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
478 		return;
479 	}
480 
481 	/*
482 	 * Search the list for the request codes.  If it isn't one
483 	 * we know, return an error.
484 	 */
485 	while (proc->request_code != NO_REQUEST) {
486 		if (proc->request_code == (short) inpkt->request)
487 			break;
488 		proc++;
489 	}
490 	if (proc->request_code == NO_REQUEST) {
491 		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
492 		return;
493 	}
494 
495 	DPRINTF(4, ("found request in tables\n"));
496 
497 	/*
498 	 * If we need data, check to see if we have some.  If we
499 	 * don't, check to see that there is none (picky, picky).
500 	 */
501 
502 	/* This part is a bit tricky, we want to be sure that the size
503 	 * returned is either the old or the new size.  We also can find
504 	 * out if the client can accept both types of messages this way.
505 	 *
506 	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
507 	 */
508 	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
509 	if ((temp_size != proc->sizeofitem &&
510 	     temp_size != proc->v6_sizeofitem) &&
511 	    !(inpkt->implementation == IMPL_XNTPD &&
512 	      inpkt->request == REQ_CONFIG &&
513 	      temp_size == sizeof(struct old_conf_peer))) {
514 		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
515 			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
516 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
517 		return;
518 	}
519 	if ((proc->sizeofitem != 0) &&
520 	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
521 	     (recv_len - REQ_LEN_HDR))) {
522 		DPRINTF(3, ("process_private: not enough data\n"));
523 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
524 		return;
525 	}
526 
527 	switch (inpkt->implementation) {
528 	case IMPL_XNTPD:
529 		client_v6_capable = 1;
530 		break;
531 	case IMPL_XNTPD_OLD:
532 		client_v6_capable = 0;
533 		break;
534 	default:
535 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
536 		return;
537 	}
538 
539 	/*
540 	 * If we need to authenticate, do so.  Note that an
541 	 * authenticatable packet must include a mac field, must
542 	 * have used key info_auth_keyid and must have included
543 	 * a time stamp in the appropriate field.  The time stamp
544 	 * must be within INFO_TS_MAXSKEW of the receive
545 	 * time stamp.
546 	 */
547 	if (proc->needs_auth && sys_authenticate) {
548 
549 		if (recv_len < (REQ_LEN_HDR +
550 		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
551 		    INFO_NITEMS(inpkt->err_nitems)) +
552 		    REQ_TAIL_MIN)) {
553 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
554 			return;
555 		}
556 
557 		/*
558 		 * For 16-octet digests, regardless of itemsize and
559 		 * nitems, authenticated requests are a fixed size
560 		 * with the timestamp, key ID, and digest located
561 		 * at the end of the packet.  Because the key ID
562 		 * determining the digest size precedes the digest,
563 		 * for larger digests the fixed size request scheme
564 		 * is abandoned and the timestamp, key ID, and digest
565 		 * are located relative to the start of the packet,
566 		 * with the digest size determined by the packet size.
567 		 */
568 		noslop_len = REQ_LEN_HDR
569 			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
570 			       INFO_NITEMS(inpkt->err_nitems)
571 			     + sizeof(inpkt->tstamp);
572 		/* 32-bit alignment */
573 		noslop_len = (noslop_len + 3) & ~3;
574 		if (recv_len > (noslop_len + MAX_MAC_LEN))
575 			mac_len = 20;
576 		else
577 			mac_len = recv_len - noslop_len;
578 
579 		tailinpkt = (void *)((char *)inpkt + recv_len -
580 			    (mac_len + sizeof(inpkt->tstamp)));
581 
582 		/*
583 		 * If this guy is restricted from doing this, don't let
584 		 * him.  If the wrong key was used, or packet doesn't
585 		 * have mac, return.
586 		 */
587 		/* XXX: Use authistrustedip(), or equivalent. */
588 		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
589 		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
590 			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
591 				    INFO_IS_AUTH(inpkt->auth_seq),
592 				    info_auth_keyid,
593 				    ntohl(tailinpkt->keyid), (u_long)mac_len));
594 #ifdef DEBUG
595 			msyslog(LOG_DEBUG,
596 				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
597 				INFO_IS_AUTH(inpkt->auth_seq),
598 				info_auth_keyid,
599 				ntohl(tailinpkt->keyid), (u_long)mac_len);
600 #endif
601 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
602 			return;
603 		}
604 		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
605 			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
606 			msyslog(LOG_ERR,
607 				"process_private: bad pkt length %zu",
608 				recv_len);
609 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
610 			return;
611 		}
612 		if (!mod_okay || !authhavekey(info_auth_keyid)) {
613 			DPRINTF(5, ("failed auth mod_okay %d\n",
614 				    mod_okay));
615 #ifdef DEBUG
616 			msyslog(LOG_DEBUG,
617 				"process_private: failed auth mod_okay %d\n",
618 				mod_okay);
619 #endif
620 			if (!mod_okay) {
621 				sys_restricted++;
622 			}
623 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
624 			return;
625 		}
626 
627 		/*
628 		 * calculate absolute time difference between xmit time stamp
629 		 * and receive time stamp.  If too large, too bad.
630 		 */
631 		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
632 		L_SUB(&ftmp, &rbufp->recv_time);
633 		LFPTOD(&ftmp, dtemp);
634 		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
635 			/*
636 			 * He's a loser.  Tell him.
637 			 */
638 			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
639 				    dtemp, INFO_TS_MAXSKEW));
640 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
641 			return;
642 		}
643 
644 		/*
645 		 * So far so good.  See if decryption works out okay.
646 		 */
647 		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
648 				 recv_len - mac_len, mac_len)) {
649 			DPRINTF(5, ("authdecrypt failed\n"));
650 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
651 			return;
652 		}
653 	}
654 
655 	DPRINTF(3, ("process_private: all okay, into handler\n"));
656 	/*
657 	 * Packet is okay.  Call the handler to send him data.
658 	 */
659 	(proc->handler)(srcadr, inter, inpkt);
660 }
661 
662 
663 /*
664  * list_peers - send a list of the peers
665  */
666 static void
667 list_peers(
668 	sockaddr_u *srcadr,
669 	endpt *inter,
670 	struct req_pkt *inpkt
671 	)
672 {
673 	struct info_peer_list *	ip;
674 	const struct peer *	pp;
675 
676 	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
677 	    v6sizeof(struct info_peer_list));
678 	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
679 		if (IS_IPV6(&pp->srcadr)) {
680 			if (!client_v6_capable)
681 				continue;
682 			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
683 			ip->v6_flag = 1;
684 		} else {
685 			ip->addr = NSRCADR(&pp->srcadr);
686 			if (client_v6_capable)
687 				ip->v6_flag = 0;
688 		}
689 
690 		ip->port = NSRCPORT(&pp->srcadr);
691 		ip->hmode = pp->hmode;
692 		ip->flags = 0;
693 		if (pp->flags & FLAG_CONFIG)
694 			ip->flags |= INFO_FLAG_CONFIG;
695 		if (pp == sys_peer)
696 			ip->flags |= INFO_FLAG_SYSPEER;
697 		if (pp->status == CTL_PST_SEL_SYNCCAND)
698 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
699 		if (pp->status >= CTL_PST_SEL_SYSPEER)
700 			ip->flags |= INFO_FLAG_SHORTLIST;
701 		ip = (struct info_peer_list *)more_pkt();
702 	}	/* for pp */
703 
704 	flush_pkt();
705 }
706 
707 
708 /*
709  * list_peers_sum - return extended peer list
710  */
711 static void
712 list_peers_sum(
713 	sockaddr_u *srcadr,
714 	endpt *inter,
715 	struct req_pkt *inpkt
716 	)
717 {
718 	struct info_peer_summary *	ips;
719 	const struct peer *		pp;
720 	l_fp 				ltmp;
721 
722 	DPRINTF(3, ("wants peer list summary\n"));
723 
724 	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
725 	    v6sizeof(struct info_peer_summary));
726 	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
727 		DPRINTF(4, ("sum: got one\n"));
728 		/*
729 		 * Be careful here not to return v6 peers when we
730 		 * want only v4.
731 		 */
732 		if (IS_IPV6(&pp->srcadr)) {
733 			if (!client_v6_capable)
734 				continue;
735 			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
736 			ips->v6_flag = 1;
737 			if (pp->dstadr)
738 				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
739 			else
740 				ZERO(ips->dstadr6);
741 		} else {
742 			ips->srcadr = NSRCADR(&pp->srcadr);
743 			if (client_v6_capable)
744 				ips->v6_flag = 0;
745 
746 			if (pp->dstadr) {
747 				if (!pp->processed)
748 					ips->dstadr = NSRCADR(&pp->dstadr->sin);
749 				else {
750 					if (MDF_BCAST == pp->cast_flags)
751 						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
752 					else if (pp->cast_flags) {
753 						ips->dstadr = NSRCADR(&pp->dstadr->sin);
754 						if (!ips->dstadr)
755 							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
756 					}
757 				}
758 			} else {
759 				ips->dstadr = 0;
760 			}
761 		}
762 
763 		ips->srcport = NSRCPORT(&pp->srcadr);
764 		ips->stratum = pp->stratum;
765 		ips->hpoll = pp->hpoll;
766 		ips->ppoll = pp->ppoll;
767 		ips->reach = pp->reach;
768 		ips->flags = 0;
769 		if (pp == sys_peer)
770 			ips->flags |= INFO_FLAG_SYSPEER;
771 		if (pp->flags & FLAG_CONFIG)
772 			ips->flags |= INFO_FLAG_CONFIG;
773 		if (pp->flags & FLAG_REFCLOCK)
774 			ips->flags |= INFO_FLAG_REFCLOCK;
775 		if (pp->flags & FLAG_PREFER)
776 			ips->flags |= INFO_FLAG_PREFER;
777 		if (pp->flags & FLAG_BURST)
778 			ips->flags |= INFO_FLAG_BURST;
779 		if (pp->status == CTL_PST_SEL_SYNCCAND)
780 			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
781 		if (pp->status >= CTL_PST_SEL_SYSPEER)
782 			ips->flags |= INFO_FLAG_SHORTLIST;
783 		ips->hmode = pp->hmode;
784 		ips->delay = HTONS_FP(DTOFP(pp->delay));
785 		DTOLFP(pp->offset, &ltmp);
786 		HTONL_FP(&ltmp, &ips->offset);
787 		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
788 
789 		ips = (struct info_peer_summary *)more_pkt();
790 	}	/* for pp */
791 
792 	flush_pkt();
793 }
794 
795 
796 /*
797  * peer_info - send information for one or more peers
798  */
799 static void
800 peer_info (
801 	sockaddr_u *srcadr,
802 	endpt *inter,
803 	struct req_pkt *inpkt
804 	)
805 {
806 	u_short			items;
807 	size_t			item_sz;
808 	char *			datap;
809 	struct info_peer_list	ipl;
810 	struct peer *		pp;
811 	struct info_peer *	ip;
812 	int			i;
813 	int			j;
814 	sockaddr_u		addr;
815 	l_fp			ltmp;
816 
817 	items = INFO_NITEMS(inpkt->err_nitems);
818 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
819 	datap = inpkt->u.data;
820 	if (item_sz != sizeof(ipl)) {
821 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
822 		return;
823 	}
824 	ip = prepare_pkt(srcadr, inter, inpkt,
825 			 v6sizeof(struct info_peer));
826 	while (items-- > 0 && ip != NULL) {
827 		ZERO(ipl);
828 		memcpy(&ipl, datap, item_sz);
829 		ZERO_SOCK(&addr);
830 		NSRCPORT(&addr) = ipl.port;
831 		if (client_v6_capable && ipl.v6_flag) {
832 			AF(&addr) = AF_INET6;
833 			SOCK_ADDR6(&addr) = ipl.addr6;
834 		} else {
835 			AF(&addr) = AF_INET;
836 			NSRCADR(&addr) = ipl.addr;
837 		}
838 #ifdef ISC_PLATFORM_HAVESALEN
839 		addr.sa.sa_len = SOCKLEN(&addr);
840 #endif
841 		datap += item_sz;
842 
843 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
844 		if (NULL == pp)
845 			continue;
846 		if (IS_IPV6(&pp->srcadr)) {
847 			if (pp->dstadr)
848 				ip->dstadr6 =
849 				    (MDF_BCAST == pp->cast_flags)
850 					? SOCK_ADDR6(&pp->dstadr->bcast)
851 					: SOCK_ADDR6(&pp->dstadr->sin);
852 			else
853 				ZERO(ip->dstadr6);
854 
855 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
856 			ip->v6_flag = 1;
857 		} else {
858 			if (pp->dstadr) {
859 				if (!pp->processed)
860 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
861 				else {
862 					if (MDF_BCAST == pp->cast_flags)
863 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
864 					else if (pp->cast_flags) {
865 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
866 						if (!ip->dstadr)
867 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
868 					}
869 				}
870 			} else
871 				ip->dstadr = 0;
872 
873 			ip->srcadr = NSRCADR(&pp->srcadr);
874 			if (client_v6_capable)
875 				ip->v6_flag = 0;
876 		}
877 		ip->srcport = NSRCPORT(&pp->srcadr);
878 		ip->flags = 0;
879 		if (pp == sys_peer)
880 			ip->flags |= INFO_FLAG_SYSPEER;
881 		if (pp->flags & FLAG_CONFIG)
882 			ip->flags |= INFO_FLAG_CONFIG;
883 		if (pp->flags & FLAG_REFCLOCK)
884 			ip->flags |= INFO_FLAG_REFCLOCK;
885 		if (pp->flags & FLAG_PREFER)
886 			ip->flags |= INFO_FLAG_PREFER;
887 		if (pp->flags & FLAG_BURST)
888 			ip->flags |= INFO_FLAG_BURST;
889 		if (pp->status == CTL_PST_SEL_SYNCCAND)
890 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
891 		if (pp->status >= CTL_PST_SEL_SYSPEER)
892 			ip->flags |= INFO_FLAG_SHORTLIST;
893 		ip->leap = pp->leap;
894 		ip->hmode = pp->hmode;
895 		ip->pmode = pp->pmode;
896 		ip->keyid = pp->keyid;
897 		ip->stratum = pp->stratum;
898 		ip->ppoll = pp->ppoll;
899 		ip->hpoll = pp->hpoll;
900 		ip->precision = pp->precision;
901 		ip->version = pp->version;
902 		ip->reach = pp->reach;
903 		ip->unreach = (u_char)pp->unreach;
904 		ip->flash = (u_char)pp->flash;
905 		ip->flash2 = (u_short)pp->flash;
906 		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
907 		ip->ttl = (u_char)pp->ttl;
908 		ip->associd = htons(pp->associd);
909 		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
910 		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
911 		ip->refid = pp->refid;
912 		HTONL_FP(&pp->reftime, &ip->reftime);
913 		HTONL_FP(&pp->aorg, &ip->org);
914 		HTONL_FP(&pp->rec, &ip->rec);
915 		HTONL_FP(&pp->xmt, &ip->xmt);
916 		j = pp->filter_nextpt - 1;
917 		for (i = 0; i < NTP_SHIFT; i++, j--) {
918 			if (j < 0)
919 				j = NTP_SHIFT-1;
920 			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
921 			DTOLFP(pp->filter_offset[j], &ltmp);
922 			HTONL_FP(&ltmp, &ip->filtoffset[i]);
923 			ip->order[i] = (u_char)((pp->filter_nextpt +
924 						 NTP_SHIFT - 1) -
925 						pp->filter_order[i]);
926 			if (ip->order[i] >= NTP_SHIFT)
927 				ip->order[i] -= NTP_SHIFT;
928 		}
929 		DTOLFP(pp->offset, &ltmp);
930 		HTONL_FP(&ltmp, &ip->offset);
931 		ip->delay = HTONS_FP(DTOFP(pp->delay));
932 		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
933 		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
934 		ip = more_pkt();
935 	}
936 	flush_pkt();
937 }
938 
939 
940 /*
941  * peer_stats - send statistics for one or more peers
942  */
943 static void
944 peer_stats (
945 	sockaddr_u *srcadr,
946 	endpt *inter,
947 	struct req_pkt *inpkt
948 	)
949 {
950 	u_short			items;
951 	size_t			item_sz;
952 	char *			datap;
953 	struct info_peer_list	ipl;
954 	struct peer *		pp;
955 	struct info_peer_stats *ip;
956 	sockaddr_u addr;
957 
958 	DPRINTF(1, ("peer_stats: called\n"));
959 	items = INFO_NITEMS(inpkt->err_nitems);
960 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
961 	datap = inpkt->u.data;
962 	if (item_sz > sizeof(ipl)) {
963 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
964 		return;
965 	}
966 	ip = prepare_pkt(srcadr, inter, inpkt,
967 			 v6sizeof(struct info_peer_stats));
968 	while (items-- > 0 && ip != NULL) {
969 		ZERO(ipl);
970 		memcpy(&ipl, datap, item_sz);
971 		ZERO(addr);
972 		NSRCPORT(&addr) = ipl.port;
973 		if (client_v6_capable && ipl.v6_flag) {
974 			AF(&addr) = AF_INET6;
975 			SOCK_ADDR6(&addr) = ipl.addr6;
976 		} else {
977 			AF(&addr) = AF_INET;
978 			NSRCADR(&addr) = ipl.addr;
979 		}
980 #ifdef ISC_PLATFORM_HAVESALEN
981 		addr.sa.sa_len = SOCKLEN(&addr);
982 #endif
983 		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
984 			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
985 
986 		datap += item_sz;
987 
988 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
989 		if (NULL == pp)
990 			continue;
991 
992 		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
993 
994 		if (IS_IPV4(&pp->srcadr)) {
995 			if (pp->dstadr) {
996 				if (!pp->processed)
997 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
998 				else {
999 					if (MDF_BCAST == pp->cast_flags)
1000 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1001 					else if (pp->cast_flags) {
1002 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1003 						if (!ip->dstadr)
1004 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1005 					}
1006 				}
1007 			} else
1008 				ip->dstadr = 0;
1009 
1010 			ip->srcadr = NSRCADR(&pp->srcadr);
1011 			if (client_v6_capable)
1012 				ip->v6_flag = 0;
1013 		} else {
1014 			if (pp->dstadr)
1015 				ip->dstadr6 =
1016 				    (MDF_BCAST == pp->cast_flags)
1017 					? SOCK_ADDR6(&pp->dstadr->bcast)
1018 					: SOCK_ADDR6(&pp->dstadr->sin);
1019 			else
1020 				ZERO(ip->dstadr6);
1021 
1022 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1023 			ip->v6_flag = 1;
1024 		}
1025 		ip->srcport = NSRCPORT(&pp->srcadr);
1026 		ip->flags = 0;
1027 		if (pp == sys_peer)
1028 		    ip->flags |= INFO_FLAG_SYSPEER;
1029 		if (pp->flags & FLAG_CONFIG)
1030 		    ip->flags |= INFO_FLAG_CONFIG;
1031 		if (pp->flags & FLAG_REFCLOCK)
1032 		    ip->flags |= INFO_FLAG_REFCLOCK;
1033 		if (pp->flags & FLAG_PREFER)
1034 		    ip->flags |= INFO_FLAG_PREFER;
1035 		if (pp->flags & FLAG_BURST)
1036 		    ip->flags |= INFO_FLAG_BURST;
1037 		if (pp->flags & FLAG_IBURST)
1038 		    ip->flags |= INFO_FLAG_IBURST;
1039 		if (pp->status == CTL_PST_SEL_SYNCCAND)
1040 		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1041 		if (pp->status >= CTL_PST_SEL_SYSPEER)
1042 		    ip->flags |= INFO_FLAG_SHORTLIST;
1043 		ip->flags = htons(ip->flags);
1044 		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1045 		ip->timetosend = htonl(pp->nextdate - current_time);
1046 		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1047 		ip->sent = htonl((u_int32)(pp->sent));
1048 		ip->processed = htonl((u_int32)(pp->processed));
1049 		ip->badauth = htonl((u_int32)(pp->badauth));
1050 		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1051 		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1052 		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1053 		ip->selbroken = htonl((u_int32)(pp->selbroken));
1054 		ip->candidate = pp->status;
1055 		ip = (struct info_peer_stats *)more_pkt();
1056 	}
1057 	flush_pkt();
1058 }
1059 
1060 
1061 /*
1062  * sys_info - return system info
1063  */
1064 static void
1065 sys_info(
1066 	sockaddr_u *srcadr,
1067 	endpt *inter,
1068 	struct req_pkt *inpkt
1069 	)
1070 {
1071 	register struct info_sys *is;
1072 
1073 	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1074 	    v6sizeof(struct info_sys));
1075 
1076 	if (sys_peer) {
1077 		if (IS_IPV4(&sys_peer->srcadr)) {
1078 			is->peer = NSRCADR(&sys_peer->srcadr);
1079 			if (client_v6_capable)
1080 				is->v6_flag = 0;
1081 		} else if (client_v6_capable) {
1082 			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1083 			is->v6_flag = 1;
1084 		}
1085 		is->peer_mode = sys_peer->hmode;
1086 	} else {
1087 		is->peer = 0;
1088 		if (client_v6_capable) {
1089 			is->v6_flag = 0;
1090 		}
1091 		is->peer_mode = 0;
1092 	}
1093 
1094 	is->leap = sys_leap;
1095 	is->stratum = sys_stratum;
1096 	is->precision = sys_precision;
1097 	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1098 	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1099 	is->frequency = htonl(DTOFP(sys_jitter));
1100 	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1101 	is->refid = sys_refid;
1102 	HTONL_FP(&sys_reftime, &is->reftime);
1103 
1104 	is->poll = sys_poll;
1105 
1106 	is->flags = 0;
1107 	if (sys_authenticate)
1108 		is->flags |= INFO_FLAG_AUTHENTICATE;
1109 	if (sys_bclient || sys_mclient)
1110 		is->flags |= INFO_FLAG_BCLIENT;
1111 #ifdef REFCLOCK
1112 	if (cal_enable)
1113 		is->flags |= INFO_FLAG_CAL;
1114 #endif /* REFCLOCK */
1115 	if (kern_enable)
1116 		is->flags |= INFO_FLAG_KERNEL;
1117 	if (mon_enabled != MON_OFF)
1118 		is->flags |= INFO_FLAG_MONITOR;
1119 	if (ntp_enable)
1120 		is->flags |= INFO_FLAG_NTP;
1121 	if (hardpps_enable)
1122 		is->flags |= INFO_FLAG_PPS_SYNC;
1123 	if (stats_control)
1124 		is->flags |= INFO_FLAG_FILEGEN;
1125 	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1126 	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1127 	(void) more_pkt();
1128 	flush_pkt();
1129 }
1130 
1131 
1132 /*
1133  * sys_stats - return system statistics
1134  */
1135 static void
1136 sys_stats(
1137 	sockaddr_u *srcadr,
1138 	endpt *inter,
1139 	struct req_pkt *inpkt
1140 	)
1141 {
1142 	register struct info_sys_stats *ss;
1143 
1144 	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1145 		sizeof(struct info_sys_stats));
1146 	ss->timeup = htonl((u_int32)current_time);
1147 	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1148 	ss->denied = htonl((u_int32)sys_restricted);
1149 	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1150 	ss->newversionpkt = htonl((u_int32)sys_newversion);
1151 	ss->unknownversion = htonl((u_int32)sys_declined);
1152 	ss->badlength = htonl((u_int32)sys_badlength);
1153 	ss->processed = htonl((u_int32)sys_processed);
1154 	ss->badauth = htonl((u_int32)sys_badauth);
1155 	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1156 	ss->received = htonl((u_int32)sys_received);
1157 	ss->lamport = htonl((u_int32)sys_lamport);
1158 	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1159 	(void) more_pkt();
1160 	flush_pkt();
1161 }
1162 
1163 
1164 /*
1165  * mem_stats - return memory statistics
1166  */
1167 static void
1168 mem_stats(
1169 	sockaddr_u *srcadr,
1170 	endpt *inter,
1171 	struct req_pkt *inpkt
1172 	)
1173 {
1174 	register struct info_mem_stats *ms;
1175 	register int i;
1176 
1177 	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1178 						  sizeof(struct info_mem_stats));
1179 
1180 	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1181 	ms->totalpeermem = htons((u_short)total_peer_structs);
1182 	ms->freepeermem = htons((u_short)peer_free_count);
1183 	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1184 	ms->allocations = htonl((u_int32)peer_allocations);
1185 	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1186 
1187 	for (i = 0; i < NTP_HASH_SIZE; i++)
1188 		ms->hashcount[i] = (u_char)
1189 		    min((u_int)peer_hash_count[i], UCHAR_MAX);
1190 
1191 	(void) more_pkt();
1192 	flush_pkt();
1193 }
1194 
1195 
1196 /*
1197  * io_stats - return io statistics
1198  */
1199 static void
1200 io_stats(
1201 	sockaddr_u *srcadr,
1202 	endpt *inter,
1203 	struct req_pkt *inpkt
1204 	)
1205 {
1206 	struct info_io_stats *io;
1207 
1208 	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1209 						 sizeof(struct info_io_stats));
1210 
1211 	io->timereset = htonl((u_int32)(current_time - io_timereset));
1212 	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1213 	io->freerecvbufs = htons((u_short) free_recvbuffs());
1214 	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1215 	io->lowwater = htons((u_short) lowater_additions());
1216 	io->dropped = htonl((u_int32)packets_dropped);
1217 	io->ignored = htonl((u_int32)packets_ignored);
1218 	io->received = htonl((u_int32)packets_received);
1219 	io->sent = htonl((u_int32)packets_sent);
1220 	io->notsent = htonl((u_int32)packets_notsent);
1221 	io->interrupts = htonl((u_int32)handler_calls);
1222 	io->int_received = htonl((u_int32)handler_pkts);
1223 
1224 	(void) more_pkt();
1225 	flush_pkt();
1226 }
1227 
1228 
1229 /*
1230  * timer_stats - return timer statistics
1231  */
1232 static void
1233 timer_stats(
1234 	sockaddr_u *		srcadr,
1235 	endpt *			inter,
1236 	struct req_pkt *	inpkt
1237 	)
1238 {
1239 	struct info_timer_stats *	ts;
1240 	u_long				sincereset;
1241 
1242 	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1243 						    inpkt, sizeof(*ts));
1244 
1245 	sincereset = current_time - timer_timereset;
1246 	ts->timereset = htonl((u_int32)sincereset);
1247 	ts->alarms = ts->timereset;
1248 	ts->overflows = htonl((u_int32)alarm_overflow);
1249 	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1250 
1251 	(void) more_pkt();
1252 	flush_pkt();
1253 }
1254 
1255 
1256 /*
1257  * loop_info - return the current state of the loop filter
1258  */
1259 static void
1260 loop_info(
1261 	sockaddr_u *srcadr,
1262 	endpt *inter,
1263 	struct req_pkt *inpkt
1264 	)
1265 {
1266 	struct info_loop *li;
1267 	l_fp ltmp;
1268 
1269 	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1270 	    sizeof(struct info_loop));
1271 
1272 	DTOLFP(last_offset, &ltmp);
1273 	HTONL_FP(&ltmp, &li->last_offset);
1274 	DTOLFP(drift_comp * 1e6, &ltmp);
1275 	HTONL_FP(&ltmp, &li->drift_comp);
1276 	li->compliance = htonl((u_int32)(tc_counter));
1277 	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1278 
1279 	(void) more_pkt();
1280 	flush_pkt();
1281 }
1282 
1283 
1284 /*
1285  * do_conf - add a peer to the configuration list
1286  */
1287 static void
1288 do_conf(
1289 	sockaddr_u *srcadr,
1290 	endpt *inter,
1291 	struct req_pkt *inpkt
1292 	)
1293 {
1294 	u_short			items;
1295 	size_t			item_sz;
1296 	u_int			fl;
1297 	char *			datap;
1298 	struct conf_peer	temp_cp;
1299 	sockaddr_u		peeraddr;
1300 
1301 	/*
1302 	 * Do a check of everything to see that it looks
1303 	 * okay.  If not, complain about it.  Note we are
1304 	 * very picky here.
1305 	 */
1306 	items = INFO_NITEMS(inpkt->err_nitems);
1307 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1308 	datap = inpkt->u.data;
1309 	if (item_sz > sizeof(temp_cp)) {
1310 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1311 		return;
1312 	}
1313 
1314 	while (items-- > 0) {
1315 		ZERO(temp_cp);
1316 		memcpy(&temp_cp, datap, item_sz);
1317 		ZERO_SOCK(&peeraddr);
1318 
1319 		fl = 0;
1320 		if (temp_cp.flags & CONF_FLAG_PREFER)
1321 			fl |= FLAG_PREFER;
1322 		if (temp_cp.flags & CONF_FLAG_BURST)
1323 			fl |= FLAG_BURST;
1324 		if (temp_cp.flags & CONF_FLAG_IBURST)
1325 			fl |= FLAG_IBURST;
1326 #ifdef AUTOKEY
1327 		if (temp_cp.flags & CONF_FLAG_SKEY)
1328 			fl |= FLAG_SKEY;
1329 #endif	/* AUTOKEY */
1330 		if (client_v6_capable && temp_cp.v6_flag) {
1331 			AF(&peeraddr) = AF_INET6;
1332 			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1333 		} else {
1334 			AF(&peeraddr) = AF_INET;
1335 			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1336 			/*
1337 			 * Make sure the address is valid
1338 			 */
1339 			if (!ISREFCLOCKADR(&peeraddr) &&
1340 			    ISBADADR(&peeraddr)) {
1341 				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1342 				return;
1343 			}
1344 
1345 		}
1346 		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1347 #ifdef ISC_PLATFORM_HAVESALEN
1348 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1349 #endif
1350 
1351 		/* check mode value: 0 <= hmode <= 6
1352 		 *
1353 		 * There's no good global define for that limit, and
1354 		 * using a magic define is as good (or bad, actually) as
1355 		 * a magic number. So we use the highest possible peer
1356 		 * mode, and that is MODE_BCLIENT.
1357 		 *
1358 		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1359 		 * but the code in ntp_peer.c indicates trouble for any
1360 		 * hmode > 6 ( --> MODE_BCLIENT).
1361 		 */
1362 		if (temp_cp.hmode > MODE_BCLIENT) {
1363 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1364 			return;
1365 		}
1366 
1367 		/* Any more checks on the values? Unchecked at this
1368 		 * point:
1369 		 *   - version
1370 		 *   - ttl
1371 		 *   - keyid
1372 		 *
1373 		 *   - minpoll/maxpoll, but they are treated properly
1374 		 *     for all cases internally. Checking not necessary.
1375 		 *
1376 		 * Note that we ignore any previously-specified ippeerlimit.
1377 		 * If we're told to create the peer, we create the peer.
1378 		 */
1379 
1380 		/* finally create the peer */
1381 		if (peer_config(&peeraddr, NULL, NULL, -1,
1382 		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1383 		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1384 		    NULL) == 0)
1385 		{
1386 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1387 			return;
1388 		}
1389 
1390 		datap += item_sz;
1391 	}
1392 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1393 }
1394 
1395 
1396 /*
1397  * do_unconf - remove a peer from the configuration list
1398  */
1399 static void
1400 do_unconf(
1401 	sockaddr_u *	srcadr,
1402 	endpt *		inter,
1403 	struct req_pkt *inpkt
1404 	)
1405 {
1406 	u_short			items;
1407 	size_t			item_sz;
1408 	char *			datap;
1409 	struct conf_unpeer	temp_cp;
1410 	struct peer *		p;
1411 	sockaddr_u		peeraddr;
1412 	int			loops;
1413 
1414 	/*
1415 	 * This is a bit unstructured, but I like to be careful.
1416 	 * We check to see that every peer exists and is actually
1417 	 * configured.  If so, we remove them.  If not, we return
1418 	 * an error.
1419 	 *
1420 	 * [Bug 3011] Even if we checked all peers given in the request
1421 	 * in a dry run, there's still a chance that the caller played
1422 	 * unfair and gave the same peer multiple times. So we still
1423 	 * have to be prepared for nasty surprises in the second run ;)
1424 	 */
1425 
1426 	/* basic consistency checks */
1427 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1428 	if (item_sz > sizeof(temp_cp)) {
1429 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1430 		return;
1431 	}
1432 
1433 	/* now do two runs: first a dry run, then a busy one */
1434 	for (loops = 0; loops != 2; ++loops) {
1435 		items = INFO_NITEMS(inpkt->err_nitems);
1436 		datap = inpkt->u.data;
1437 		while (items-- > 0) {
1438 			/* copy from request to local */
1439 			ZERO(temp_cp);
1440 			memcpy(&temp_cp, datap, item_sz);
1441 			/* get address structure */
1442 			ZERO_SOCK(&peeraddr);
1443 			if (client_v6_capable && temp_cp.v6_flag) {
1444 				AF(&peeraddr) = AF_INET6;
1445 				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1446 			} else {
1447 				AF(&peeraddr) = AF_INET;
1448 				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1449 			}
1450 			SET_PORT(&peeraddr, NTP_PORT);
1451 #ifdef ISC_PLATFORM_HAVESALEN
1452 			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1453 #endif
1454 			DPRINTF(1, ("searching for %s\n",
1455 				    stoa(&peeraddr)));
1456 
1457 			/* search for matching configred(!) peer */
1458 			p = NULL;
1459 			do {
1460 				p = findexistingpeer(
1461 					&peeraddr, NULL, p, -1, 0, NULL);
1462 			} while (p && !(FLAG_CONFIG & p->flags));
1463 
1464 			if (!loops && !p) {
1465 				/* Item not found in dry run -- bail! */
1466 				req_ack(srcadr, inter, inpkt,
1467 					INFO_ERR_NODATA);
1468 				return;
1469 			} else if (loops && p) {
1470 				/* Item found in busy run -- remove! */
1471 				peer_clear(p, "GONE");
1472 				unpeer(p);
1473 			}
1474 			datap += item_sz;
1475 		}
1476 	}
1477 
1478 	/* report success */
1479 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1480 }
1481 
1482 
1483 /*
1484  * set_sys_flag - set system flags
1485  */
1486 static void
1487 set_sys_flag(
1488 	sockaddr_u *srcadr,
1489 	endpt *inter,
1490 	struct req_pkt *inpkt
1491 	)
1492 {
1493 	setclr_flags(srcadr, inter, inpkt, 1);
1494 }
1495 
1496 
1497 /*
1498  * clr_sys_flag - clear system flags
1499  */
1500 static void
1501 clr_sys_flag(
1502 	sockaddr_u *srcadr,
1503 	endpt *inter,
1504 	struct req_pkt *inpkt
1505 	)
1506 {
1507 	setclr_flags(srcadr, inter, inpkt, 0);
1508 }
1509 
1510 
1511 /*
1512  * setclr_flags - do the grunge work of flag setting/clearing
1513  */
1514 static void
1515 setclr_flags(
1516 	sockaddr_u *srcadr,
1517 	endpt *inter,
1518 	struct req_pkt *inpkt,
1519 	u_long set
1520 	)
1521 {
1522 	struct conf_sys_flags *sf;
1523 	u_int32 flags;
1524 
1525 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1526 		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1527 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1528 		return;
1529 	}
1530 
1531 	sf = (struct conf_sys_flags *)&inpkt->u;
1532 	flags = ntohl(sf->flags);
1533 
1534 	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1535 		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1536 		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1537 		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1538 			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1539 				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1540 				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1541 				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1542 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1543 		return;
1544 	}
1545 
1546 	if (flags & SYS_FLAG_BCLIENT)
1547 		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1548 	if (flags & SYS_FLAG_PPS)
1549 		proto_config(PROTO_PPS, set, 0., NULL);
1550 	if (flags & SYS_FLAG_NTP)
1551 		proto_config(PROTO_NTP, set, 0., NULL);
1552 	if (flags & SYS_FLAG_KERNEL)
1553 		proto_config(PROTO_KERNEL, set, 0., NULL);
1554 	if (flags & SYS_FLAG_MONITOR)
1555 		proto_config(PROTO_MONITOR, set, 0., NULL);
1556 	if (flags & SYS_FLAG_FILEGEN)
1557 		proto_config(PROTO_FILEGEN, set, 0., NULL);
1558 	if (flags & SYS_FLAG_AUTH)
1559 		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1560 	if (flags & SYS_FLAG_CAL)
1561 		proto_config(PROTO_CAL, set, 0., NULL);
1562 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1563 }
1564 
1565 /* There have been some issues with the restrict list processing,
1566  * ranging from problems with deep recursion (resulting in stack
1567  * overflows) and overfull reply buffers.
1568  *
1569  * To avoid this trouble the list reversal is done iteratively using a
1570  * scratch pad.
1571  */
1572 typedef struct RestrictStack4 RestrictStack4T;
1573 struct RestrictStack4 {
1574 	RestrictStack4T   *link;
1575 	size_t            fcnt;
1576 	const struct restrict_4 *pres[63];
1577 };
1578 
1579 static size_t
1580 getStackSheetSize4(
1581 	RestrictStack4T *sp
1582 	)
1583 {
1584 	if (sp)
1585 		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1586 	return 0u;
1587 }
1588 
1589 static int/*BOOL*/
1590 pushRestriction4(
1591 	RestrictStack4T  **spp,
1592 	const struct restrict_4 *ptr
1593 	)
1594 {
1595 	RestrictStack4T *sp;
1596 
1597 	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1598 		/* need another sheet in the scratch pad */
1599 		sp = emalloc(sizeof(*sp));
1600 		sp->link = *spp;
1601 		sp->fcnt = getStackSheetSize4(sp);
1602 		*spp = sp;
1603 	}
1604 	sp->pres[--sp->fcnt] = ptr;
1605 	return TRUE;
1606 }
1607 
1608 static int/*BOOL*/
1609 popRestriction4(
1610 	RestrictStack4T   **spp,
1611 	const struct restrict_4 **opp
1612 	)
1613 {
1614 	RestrictStack4T *sp;
1615 
1616 	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize4(sp))
1617 		return FALSE;
1618 
1619 	*opp = sp->pres[sp->fcnt++];
1620 	if (sp->fcnt >= getStackSheetSize4(sp)) {
1621 		/* discard sheet from scratch pad */
1622 		*spp = sp->link;
1623 		free(sp);
1624 	}
1625 	return TRUE;
1626 }
1627 
1628 static void
1629 flushRestrictionStack4(
1630 	RestrictStack4T **spp
1631 	)
1632 {
1633 	RestrictStack4T *sp;
1634 
1635 	while (NULL != (sp = *spp)) {
1636 		*spp = sp->link;
1637 		free(sp);
1638 	}
1639 }
1640 
1641 /*
1642  * list_restrict4 - iterative helper for list_restrict dumps IPv4
1643  *		    restriction list in reverse order.
1644  */
1645 static void
1646 list_restrict4(
1647 	const struct restrict_4 *	res,
1648 	struct info_restrict **	ppir
1649 	)
1650 {
1651 	RestrictStack4T *	rpad;
1652 	struct info_restrict *	pir;
1653 
1654 	pir = *ppir;
1655 	for (rpad = NULL; res; res = res->link)
1656 		if (!pushRestriction4(&rpad, res))
1657 			break;
1658 
1659 	while (pir && popRestriction4(&rpad, &res)) {
1660 		pir->addr = htonl(res->v4.addr);
1661 		if (client_v6_capable)
1662 			pir->v6_flag = 0;
1663 		pir->mask = htonl(res->v4.mask);
1664 		pir->count = htonl(res->ri.count);
1665 		pir->rflags = htons(res->ri.rflags);
1666 		pir->mflags = htons(res->ri.mflags);
1667 		pir = (struct info_restrict *)more_pkt();
1668 	}
1669 	flushRestrictionStack4(&rpad);
1670 	*ppir = pir;
1671 }
1672 
1673 typedef struct RestrictStack6 RestrictStack6T;
1674 struct RestrictStack6 {
1675 	RestrictStack6T   *link;
1676 	size_t            fcnt;
1677 	const struct restrict_6 *pres[63];
1678 };
1679 
1680 static size_t
1681 getStackSheetSize6(
1682 	RestrictStack6T *sp
1683 	)
1684 {
1685 	if (sp)
1686 		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1687 	return 0u;
1688 }
1689 
1690 static int/*BOOL*/
1691 pushRestriction6(
1692 	RestrictStack6T  **spp,
1693 	const struct restrict_6 *ptr
1694 	)
1695 {
1696 	RestrictStack6T *sp;
1697 
1698 	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1699 		/* need another sheet in the scratch pad */
1700 		sp = emalloc(sizeof(*sp));
1701 		sp->link = *spp;
1702 		sp->fcnt = getStackSheetSize6(sp);
1703 		*spp = sp;
1704 	}
1705 	sp->pres[--sp->fcnt] = ptr;
1706 	return TRUE;
1707 }
1708 
1709 static int/*BOOL*/
1710 popRestriction6(
1711 	RestrictStack6T   **spp,
1712 	const struct restrict_6 **opp
1713 	)
1714 {
1715 	RestrictStack6T *sp;
1716 
1717 	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize6(sp))
1718 		return FALSE;
1719 
1720 	*opp = sp->pres[sp->fcnt++];
1721 	if (sp->fcnt >= getStackSheetSize6(sp)) {
1722 		/* discard sheet from scratch pad */
1723 		*spp = sp->link;
1724 		free(sp);
1725 	}
1726 	return TRUE;
1727 }
1728 
1729 static void
1730 flushRestrictionStack6(
1731 	RestrictStack6T **spp
1732 	)
1733 {
1734 	RestrictStack6T *sp;
1735 
1736 	while (NULL != (sp = *spp)) {
1737 		*spp = sp->link;
1738 		free(sp);
1739 	}
1740 }
1741 
1742 /*
1743  * list_restrict6 - iterative helper for list_restrict dumps IPv6
1744  *		    restriction list in reverse order.
1745  */
1746 static void
1747 list_restrict6(
1748 	const struct restrict_6 *	res,
1749 	struct info_restrict **	ppir
1750 	)
1751 {
1752 	RestrictStack6T *	rpad;
1753 	struct info_restrict *	pir;
1754 
1755 	pir = *ppir;
1756 	for (rpad = NULL; res; res = res->link)
1757 		if (!pushRestriction6(&rpad, res))
1758 			break;
1759 
1760 	while (pir && popRestriction6(&rpad, &res)) {
1761 		pir->addr6 = res->v6.addr;
1762 		pir->mask6 = res->v6.mask;
1763 		pir->v6_flag = 1;
1764 		pir->count = htonl(res->ri.count);
1765 		pir->rflags = htons(res->ri.rflags);
1766 		pir->mflags = htons(res->ri.mflags);
1767 		pir = (struct info_restrict *)more_pkt();
1768 	}
1769 	flushRestrictionStack6(&rpad);
1770 	*ppir = pir;
1771 }
1772 
1773 
1774 /*
1775  * list_restrict - return the restrict list
1776  */
1777 static void
1778 list_restrict(
1779 	sockaddr_u *srcadr,
1780 	endpt *inter,
1781 	struct req_pkt *inpkt
1782 	)
1783 {
1784 	struct info_restrict *ir;
1785 
1786 	DPRINTF(3, ("wants restrict list summary\n"));
1787 
1788 	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1789 	    v6sizeof(struct info_restrict));
1790 
1791 	/*
1792 	 * The restriction lists are kept sorted in the reverse order
1793 	 * than they were originally.  To preserve the output semantics,
1794 	 * dump each list in reverse order. The workers take care of that.
1795 	 */
1796 	list_restrict4(restrictlist4, &ir);
1797 	if (client_v6_capable)
1798 		list_restrict6(restrictlist6, &ir);
1799 	flush_pkt();
1800 }
1801 
1802 
1803 /*
1804  * do_resaddflags - add flags to a restrict entry (or create one)
1805  */
1806 static void
1807 do_resaddflags(
1808 	sockaddr_u *srcadr,
1809 	endpt *inter,
1810 	struct req_pkt *inpkt
1811 	)
1812 {
1813 	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1814 }
1815 
1816 
1817 
1818 /*
1819  * do_ressubflags - remove flags from a restrict entry
1820  */
1821 static void
1822 do_ressubflags(
1823 	sockaddr_u *srcadr,
1824 	endpt *inter,
1825 	struct req_pkt *inpkt
1826 	)
1827 {
1828 	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1829 }
1830 
1831 
1832 /*
1833  * do_unrestrict - remove a restrict entry from the list
1834  */
1835 static void
1836 do_unrestrict(
1837 	sockaddr_u *srcadr,
1838 	endpt *inter,
1839 	struct req_pkt *inpkt
1840 	)
1841 {
1842 	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1843 }
1844 
1845 
1846 /*
1847  * do_restrict - do the dirty stuff of dealing with restrictions
1848  */
1849 static void
1850 do_restrict(
1851 	sockaddr_u *srcadr,
1852 	endpt *inter,
1853 	struct req_pkt *inpkt,
1854 	restrict_op op
1855 	)
1856 {
1857 	char *			datap;
1858 	struct conf_restrict	cr;
1859 	u_short			items;
1860 	size_t			item_sz;
1861 	sockaddr_u		matchaddr;
1862 	sockaddr_u		matchmask;
1863 	int			bad;
1864 	int/*BOOL*/		success;
1865 
1866 	switch(op) {
1867 	    case RESTRICT_FLAGS:
1868 	    case RESTRICT_UNFLAG:
1869 	    case RESTRICT_REMOVE:
1870 	    case RESTRICT_REMOVEIF:
1871 	    	break;
1872 
1873 	    default:
1874 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1875 		return;
1876 	}
1877 
1878 	/*
1879 	 * Do a check of the flags to make sure that only
1880 	 * the NTPPORT flag is set, if any.  If not, complain
1881 	 * about it.  Note we are very picky here.
1882 	 */
1883 	items = INFO_NITEMS(inpkt->err_nitems);
1884 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1885 	datap = inpkt->u.data;
1886 	if (item_sz > sizeof(cr)) {
1887 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1888 		return;
1889 	}
1890 
1891 	bad = 0;
1892 	while (items-- > 0 && !bad) {
1893 		memcpy(&cr, datap, item_sz);
1894 		cr.flags = ntohs(cr.flags);	/* XXX */
1895 		cr.mflags = ntohs(cr.mflags);
1896 		if (~RESM_NTPONLY & cr.mflags)
1897 			bad |= 1;
1898 		if (~RES_ALLFLAGS & cr.flags)
1899 			bad |= 2;
1900 		if (INADDR_ANY != cr.mask) {
1901 			if (client_v6_capable && cr.v6_flag) {
1902 				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1903 					bad |= 4;
1904 			} else {
1905 				if (INADDR_ANY == cr.addr)
1906 					bad |= 8;
1907 			}
1908 		}
1909 		datap += item_sz;
1910 	}
1911 
1912 	if (bad) {
1913 		msyslog(LOG_ERR, "%s: bad = 0x%x", __func__, bad);
1914 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1915 		return;
1916 	}
1917 
1918 	/*
1919 	 * Looks okay, try it out.  Needs to reload data pointer and
1920 	 * item counter. (Talos-CAN-0052)
1921 	 */
1922 	ZERO_SOCK(&matchaddr);
1923 	ZERO_SOCK(&matchmask);
1924 	items = INFO_NITEMS(inpkt->err_nitems);
1925 	datap = inpkt->u.data;
1926 
1927 	while (items-- > 0) {
1928 		memcpy(&cr, datap, item_sz);
1929 		cr.flags = ntohs(cr.flags);	/* XXX: size */
1930 		cr.mflags = ntohs(cr.mflags);
1931 		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1932 		if (client_v6_capable && cr.v6_flag) {
1933 			AF(&matchaddr) = AF_INET6;
1934 			AF(&matchmask) = AF_INET6;
1935 			SOCK_ADDR6(&matchaddr) = cr.addr6;
1936 			SOCK_ADDR6(&matchmask) = cr.mask6;
1937 		} else {
1938 			AF(&matchaddr) = AF_INET;
1939 			AF(&matchmask) = AF_INET;
1940 			NSRCADR(&matchaddr) = cr.addr;
1941 			NSRCADR(&matchmask) = cr.mask;
1942 		}
1943 		success =  hack_restrict(op, &matchaddr, &matchmask,
1944 					 cr.ippeerlimit, cr.mflags,
1945 					 cr.flags, 0);
1946 		if (!success) {
1947 			DPRINTF(1, ("%s: %s %s mask %s ippeerlimit %hd %s %s failed",
1948 				    __func__, resop_str(op),
1949 				    stoa(&matchaddr), stoa(&matchmask),
1950 				    cr.ippeerlimit, mflags_str(cr.mflags),
1951 				    rflags_str(cr.flags)));
1952 		}
1953 		datap += item_sz;
1954 	}
1955 
1956 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1957 }
1958 
1959 
1960 /*
1961  * mon_getlist - return monitor data
1962  */
1963 static void
1964 mon_getlist(
1965 	sockaddr_u *srcadr,
1966 	endpt *inter,
1967 	struct req_pkt *inpkt
1968 	)
1969 {
1970 	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1971 }
1972 
1973 
1974 /*
1975  * Module entry points and the flags they correspond with
1976  */
1977 struct reset_entry {
1978 	int flag;		/* flag this corresponds to */
1979 	void (*handler)(void);	/* routine to handle request */
1980 };
1981 
1982 struct reset_entry reset_entries[] = {
1983 	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1984 	{ RESET_FLAG_IO,	io_clr_stats },
1985 	{ RESET_FLAG_SYS,	proto_clr_stats },
1986 	{ RESET_FLAG_MEM,	peer_clr_stats },
1987 	{ RESET_FLAG_TIMER,	timer_clr_stats },
1988 	{ RESET_FLAG_AUTH,	reset_auth_stats },
1989 	{ RESET_FLAG_CTL,	ctl_clr_stats },
1990 	{ 0,			0 }
1991 };
1992 
1993 /*
1994  * reset_stats - reset statistic counters here and there
1995  */
1996 static void
1997 reset_stats(
1998 	sockaddr_u *srcadr,
1999 	endpt *inter,
2000 	struct req_pkt *inpkt
2001 	)
2002 {
2003 	struct reset_flags *rflags;
2004 	u_long flags;
2005 	struct reset_entry *rent;
2006 
2007 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2008 		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
2009 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2010 		return;
2011 	}
2012 
2013 	rflags = (struct reset_flags *)&inpkt->u;
2014 	flags = ntohl(rflags->flags);
2015 
2016 	if (flags & ~RESET_ALLFLAGS) {
2017 		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
2018 			flags & ~RESET_ALLFLAGS);
2019 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2020 		return;
2021 	}
2022 
2023 	for (rent = reset_entries; rent->flag != 0; rent++) {
2024 		if (flags & rent->flag)
2025 			(*rent->handler)();
2026 	}
2027 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2028 }
2029 
2030 
2031 /*
2032  * reset_peer - clear a peer's statistics
2033  */
2034 static void
2035 reset_peer(
2036 	sockaddr_u *srcadr,
2037 	endpt *inter,
2038 	struct req_pkt *inpkt
2039 	)
2040 {
2041 	u_short			items;
2042 	size_t			item_sz;
2043 	char *			datap;
2044 	struct conf_unpeer	cp;
2045 	struct peer *		p;
2046 	sockaddr_u		peeraddr;
2047 	int			bad;
2048 
2049 	/*
2050 	 * We check first to see that every peer exists.  If not,
2051 	 * we return an error.
2052 	 */
2053 
2054 	items = INFO_NITEMS(inpkt->err_nitems);
2055 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
2056 	datap = inpkt->u.data;
2057 	if (item_sz > sizeof(cp)) {
2058 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2059 		return;
2060 	}
2061 
2062 	bad = FALSE;
2063 	while (items-- > 0 && !bad) {
2064 		ZERO(cp);
2065 		memcpy(&cp, datap, item_sz);
2066 		ZERO_SOCK(&peeraddr);
2067 		if (client_v6_capable && cp.v6_flag) {
2068 			AF(&peeraddr) = AF_INET6;
2069 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2070 		} else {
2071 			AF(&peeraddr) = AF_INET;
2072 			NSRCADR(&peeraddr) = cp.peeraddr;
2073 		}
2074 
2075 #ifdef ISC_PLATFORM_HAVESALEN
2076 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2077 #endif
2078 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2079 		if (NULL == p)
2080 			bad++;
2081 		datap += item_sz;
2082 	}
2083 
2084 	if (bad) {
2085 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2086 		return;
2087 	}
2088 
2089 	/*
2090 	 * Now do it in earnest. Needs to reload data pointer and item
2091 	 * counter. (Talos-CAN-0052)
2092 	 */
2093 
2094 	items = INFO_NITEMS(inpkt->err_nitems);
2095 	datap = inpkt->u.data;
2096 	while (items-- > 0) {
2097 		ZERO(cp);
2098 		memcpy(&cp, datap, item_sz);
2099 		ZERO_SOCK(&peeraddr);
2100 		if (client_v6_capable && cp.v6_flag) {
2101 			AF(&peeraddr) = AF_INET6;
2102 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2103 		} else {
2104 			AF(&peeraddr) = AF_INET;
2105 			NSRCADR(&peeraddr) = cp.peeraddr;
2106 		}
2107 		SET_PORT(&peeraddr, 123);
2108 #ifdef ISC_PLATFORM_HAVESALEN
2109 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2110 #endif
2111 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2112 		while (p != NULL) {
2113 			peer_reset(p);
2114 			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2115 		}
2116 		datap += item_sz;
2117 	}
2118 
2119 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2120 }
2121 
2122 
2123 /*
2124  * do_key_reread - reread the encryption key file
2125  */
2126 static void
2127 do_key_reread(
2128 	sockaddr_u *srcadr,
2129 	endpt *inter,
2130 	struct req_pkt *inpkt
2131 	)
2132 {
2133 	rereadkeys();
2134 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2135 }
2136 
2137 
2138 /*
2139  * trust_key - make one or more keys trusted
2140  */
2141 static void
2142 trust_key(
2143 	sockaddr_u *srcadr,
2144 	endpt *inter,
2145 	struct req_pkt *inpkt
2146 	)
2147 {
2148 	do_trustkey(srcadr, inter, inpkt, 1);
2149 }
2150 
2151 
2152 /*
2153  * untrust_key - make one or more keys untrusted
2154  */
2155 static void
2156 untrust_key(
2157 	sockaddr_u *srcadr,
2158 	endpt *inter,
2159 	struct req_pkt *inpkt
2160 	)
2161 {
2162 	do_trustkey(srcadr, inter, inpkt, 0);
2163 }
2164 
2165 
2166 /*
2167  * do_trustkey - make keys either trustable or untrustable
2168  */
2169 static void
2170 do_trustkey(
2171 	sockaddr_u *srcadr,
2172 	endpt *inter,
2173 	struct req_pkt *inpkt,
2174 	u_long trust
2175 	)
2176 {
2177 	register uint32_t *kp;
2178 	register int items;
2179 
2180 	items = INFO_NITEMS(inpkt->err_nitems);
2181 	kp = (uint32_t *)&inpkt->u;
2182 	while (items-- > 0) {
2183 		authtrust(*kp, trust);
2184 		kp++;
2185 	}
2186 
2187 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2188 }
2189 
2190 
2191 /*
2192  * get_auth_info - return some stats concerning the authentication module
2193  */
2194 static void
2195 get_auth_info(
2196 	sockaddr_u *srcadr,
2197 	endpt *inter,
2198 	struct req_pkt *inpkt
2199 	)
2200 {
2201 	register struct info_auth *ia;
2202 
2203 	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2204 					     sizeof(struct info_auth));
2205 
2206 	ia->numkeys = htonl((u_int32)authnumkeys);
2207 	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2208 	ia->keylookups = htonl((u_int32)authkeylookups);
2209 	ia->keynotfound = htonl((u_int32)authkeynotfound);
2210 	ia->encryptions = htonl((u_int32)authencryptions);
2211 	ia->decryptions = htonl((u_int32)authdecryptions);
2212 	ia->keyuncached = htonl((u_int32)authkeyuncached);
2213 	ia->expired = htonl((u_int32)authkeyexpired);
2214 	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2215 
2216 	(void) more_pkt();
2217 	flush_pkt();
2218 }
2219 
2220 
2221 
2222 /*
2223  * reset_auth_stats - reset the authentication stat counters.  Done here
2224  *		      to keep ntp-isms out of the authentication module
2225  */
2226 void
2227 reset_auth_stats(void)
2228 {
2229 	authkeylookups = 0;
2230 	authkeynotfound = 0;
2231 	authencryptions = 0;
2232 	authdecryptions = 0;
2233 	authkeyuncached = 0;
2234 	auth_timereset = current_time;
2235 }
2236 
2237 
2238 /*
2239  * req_get_traps - return information about current trap holders
2240  */
2241 static void
2242 req_get_traps(
2243 	sockaddr_u *srcadr,
2244 	endpt *inter,
2245 	struct req_pkt *inpkt
2246 	)
2247 {
2248 	struct info_trap *it;
2249 	struct ctl_trap *tr;
2250 	size_t i;
2251 
2252 	if (num_ctl_traps == 0) {
2253 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2254 		return;
2255 	}
2256 
2257 	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2258 	    v6sizeof(struct info_trap));
2259 
2260 	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2261 		if (tr->tr_flags & TRAP_INUSE) {
2262 			if (IS_IPV4(&tr->tr_addr)) {
2263 				if (tr->tr_localaddr == any_interface)
2264 					it->local_address = 0;
2265 				else
2266 					it->local_address
2267 					    = NSRCADR(&tr->tr_localaddr->sin);
2268 				it->trap_address = NSRCADR(&tr->tr_addr);
2269 				if (client_v6_capable)
2270 					it->v6_flag = 0;
2271 			} else {
2272 				if (!client_v6_capable)
2273 					continue;
2274 				it->local_address6
2275 				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2276 				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2277 				it->v6_flag = 1;
2278 			}
2279 			it->trap_port = NSRCPORT(&tr->tr_addr);
2280 			it->sequence = htons(tr->tr_sequence);
2281 			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2282 			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2283 			it->resets = htonl((u_int32)tr->tr_resets);
2284 			it->flags = htonl((u_int32)tr->tr_flags);
2285 			it = (struct info_trap *)more_pkt();
2286 		}
2287 	}
2288 	flush_pkt();
2289 }
2290 
2291 
2292 /*
2293  * req_set_trap - configure a trap
2294  */
2295 static void
2296 req_set_trap(
2297 	sockaddr_u *srcadr,
2298 	endpt *inter,
2299 	struct req_pkt *inpkt
2300 	)
2301 {
2302 	do_setclr_trap(srcadr, inter, inpkt, 1);
2303 }
2304 
2305 
2306 
2307 /*
2308  * req_clr_trap - unconfigure a trap
2309  */
2310 static void
2311 req_clr_trap(
2312 	sockaddr_u *srcadr,
2313 	endpt *inter,
2314 	struct req_pkt *inpkt
2315 	)
2316 {
2317 	do_setclr_trap(srcadr, inter, inpkt, 0);
2318 }
2319 
2320 
2321 
2322 /*
2323  * do_setclr_trap - do the grunge work of (un)configuring a trap
2324  */
2325 static void
2326 do_setclr_trap(
2327 	sockaddr_u *srcadr,
2328 	endpt *inter,
2329 	struct req_pkt *inpkt,
2330 	int set
2331 	)
2332 {
2333 	register struct conf_trap *ct;
2334 	register endpt *linter;
2335 	int res;
2336 	sockaddr_u laddr;
2337 
2338 	/*
2339 	 * Prepare sockaddr
2340 	 */
2341 	ZERO_SOCK(&laddr);
2342 	AF(&laddr) = AF(srcadr);
2343 	SET_PORT(&laddr, NTP_PORT);
2344 
2345 	/*
2346 	 * Restrict ourselves to one item only.  This eliminates
2347 	 * the error reporting problem.
2348 	 */
2349 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2350 		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2351 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2352 		return;
2353 	}
2354 	ct = (struct conf_trap *)&inpkt->u;
2355 
2356 	/*
2357 	 * Look for the local interface.  If none, use the default.
2358 	 */
2359 	if (ct->local_address == 0) {
2360 		linter = any_interface;
2361 	} else {
2362 		if (IS_IPV4(&laddr))
2363 			NSRCADR(&laddr) = ct->local_address;
2364 		else
2365 			SOCK_ADDR6(&laddr) = ct->local_address6;
2366 		linter = findinterface(&laddr);
2367 		if (NULL == linter) {
2368 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2369 			return;
2370 		}
2371 	}
2372 
2373 	if (IS_IPV4(&laddr))
2374 		NSRCADR(&laddr) = ct->trap_address;
2375 	else
2376 		SOCK_ADDR6(&laddr) = ct->trap_address6;
2377 	if (ct->trap_port)
2378 		NSRCPORT(&laddr) = ct->trap_port;
2379 	else
2380 		SET_PORT(&laddr, TRAPPORT);
2381 
2382 	if (set) {
2383 		res = ctlsettrap(&laddr, linter, 0,
2384 				 INFO_VERSION(inpkt->rm_vn_mode));
2385 	} else {
2386 		res = ctlclrtrap(&laddr, linter, 0);
2387 	}
2388 
2389 	if (!res) {
2390 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2391 	} else {
2392 		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2393 	}
2394 	return;
2395 }
2396 
2397 /*
2398  * Validate a request packet for a new request or control key:
2399  *  - only one item allowed
2400  *  - key must be valid (that is, known, and not in the autokey range)
2401  */
2402 static void
2403 set_keyid_checked(
2404 	keyid_t        *into,
2405 	const char     *what,
2406 	sockaddr_u     *srcadr,
2407 	endpt          *inter,
2408 	struct req_pkt *inpkt
2409 	)
2410 {
2411 	keyid_t *pkeyid;
2412 	keyid_t  tmpkey;
2413 
2414 	/* restrict ourselves to one item only */
2415 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2416 		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2417 			what);
2418 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2419 		return;
2420 	}
2421 
2422 	/* plug the new key from the packet */
2423 	pkeyid = (keyid_t *)&inpkt->u;
2424 	tmpkey = ntohl(*pkeyid);
2425 
2426 	/* validate the new key id, claim data error on failure */
2427 	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2428 		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2429 			what, (long)tmpkey);
2430 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2431 		return;
2432 	}
2433 
2434 	/* if we arrive here, the key is good -- use it */
2435 	*into = tmpkey;
2436 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2437 }
2438 
2439 /*
2440  * set_request_keyid - set the keyid used to authenticate requests
2441  */
2442 static void
2443 set_request_keyid(
2444 	sockaddr_u *srcadr,
2445 	endpt *inter,
2446 	struct req_pkt *inpkt
2447 	)
2448 {
2449 	set_keyid_checked(&info_auth_keyid, "request",
2450 			  srcadr, inter, inpkt);
2451 }
2452 
2453 
2454 
2455 /*
2456  * set_control_keyid - set the keyid used to authenticate requests
2457  */
2458 static void
2459 set_control_keyid(
2460 	sockaddr_u *srcadr,
2461 	endpt *inter,
2462 	struct req_pkt *inpkt
2463 	)
2464 {
2465 	set_keyid_checked(&ctl_auth_keyid, "control",
2466 			  srcadr, inter, inpkt);
2467 }
2468 
2469 
2470 
2471 /*
2472  * get_ctl_stats - return some stats concerning the control message module
2473  */
2474 static void
2475 get_ctl_stats(
2476 	sockaddr_u *srcadr,
2477 	endpt *inter,
2478 	struct req_pkt *inpkt
2479 	)
2480 {
2481 	register struct info_control *ic;
2482 
2483 	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2484 						sizeof(struct info_control));
2485 
2486 	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2487 	ic->numctlreq = htonl((u_int32)numctlreq);
2488 	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2489 	ic->numctlresponses = htonl((u_int32)numctlresponses);
2490 	ic->numctlfrags = htonl((u_int32)numctlfrags);
2491 	ic->numctlerrors = htonl((u_int32)numctlerrors);
2492 	ic->numctltooshort = htonl((u_int32)numctltooshort);
2493 	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2494 	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2495 	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2496 	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2497 	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2498 	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2499 	ic->numctlbadop = htonl((u_int32)numctlbadop);
2500 	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2501 
2502 	(void) more_pkt();
2503 	flush_pkt();
2504 }
2505 
2506 
2507 #ifdef KERNEL_PLL
2508 /*
2509  * get_kernel_info - get kernel pll/pps information
2510  */
2511 static void
2512 get_kernel_info(
2513 	sockaddr_u *srcadr,
2514 	endpt *inter,
2515 	struct req_pkt *inpkt
2516 	)
2517 {
2518 	register struct info_kernel *ik;
2519 	struct timex ntx;
2520 
2521 	if (!pll_control) {
2522 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2523 		return;
2524 	}
2525 
2526 	ZERO(ntx);
2527 	if (ntp_adjtime(&ntx) < 0)
2528 		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2529 	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2530 	    sizeof(struct info_kernel));
2531 
2532 	/*
2533 	 * pll variables
2534 	 */
2535 	ik->offset = htonl((u_int32)ntx.offset);
2536 	ik->freq = htonl((u_int32)ntx.freq);
2537 	ik->maxerror = htonl((u_int32)ntx.maxerror);
2538 	ik->esterror = htonl((u_int32)ntx.esterror);
2539 	ik->status = htons(ntx.status);
2540 	ik->constant = htonl((u_int32)ntx.constant);
2541 	ik->precision = htonl((u_int32)ntx.precision);
2542 	ik->tolerance = htonl((u_int32)ntx.tolerance);
2543 
2544 	/*
2545 	 * pps variables
2546 	 */
2547 	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2548 	ik->jitter = htonl((u_int32)ntx.jitter);
2549 	ik->shift = htons(ntx.shift);
2550 	ik->stabil = htonl((u_int32)ntx.stabil);
2551 	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2552 	ik->calcnt = htonl((u_int32)ntx.calcnt);
2553 	ik->errcnt = htonl((u_int32)ntx.errcnt);
2554 	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2555 
2556 	(void) more_pkt();
2557 	flush_pkt();
2558 }
2559 #endif /* KERNEL_PLL */
2560 
2561 
2562 #ifdef REFCLOCK
2563 /*
2564  * get_clock_info - get info about a clock
2565  */
2566 static void
2567 get_clock_info(
2568 	sockaddr_u *srcadr,
2569 	endpt *inter,
2570 	struct req_pkt *inpkt
2571 	)
2572 {
2573 	register struct info_clock *ic;
2574 	register u_int32 *clkaddr;
2575 	register int items;
2576 	struct refclockstat clock_stat;
2577 	sockaddr_u addr;
2578 	l_fp ltmp;
2579 
2580 	ZERO_SOCK(&addr);
2581 	AF(&addr) = AF_INET;
2582 #ifdef ISC_PLATFORM_HAVESALEN
2583 	addr.sa.sa_len = SOCKLEN(&addr);
2584 #endif
2585 	SET_PORT(&addr, NTP_PORT);
2586 	items = INFO_NITEMS(inpkt->err_nitems);
2587 	clkaddr = &inpkt->u.u32[0];
2588 
2589 	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2590 					      sizeof(struct info_clock));
2591 
2592 	while (items-- > 0 && ic) {
2593 		NSRCADR(&addr) = *clkaddr++;
2594 		if (!ISREFCLOCKADR(&addr) || NULL ==
2595 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2596 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2597 			return;
2598 		}
2599 
2600 		clock_stat.kv_list = (struct ctl_var *)0;
2601 
2602 		refclock_control(&addr, NULL, &clock_stat);
2603 
2604 		ic->clockadr = NSRCADR(&addr);
2605 		ic->type = clock_stat.type;
2606 		ic->flags = clock_stat.flags;
2607 		ic->lastevent = clock_stat.lastevent;
2608 		ic->currentstatus = clock_stat.currentstatus;
2609 		ic->polls = htonl((u_int32)clock_stat.polls);
2610 		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2611 		ic->badformat = htonl((u_int32)clock_stat.badformat);
2612 		ic->baddata = htonl((u_int32)clock_stat.baddata);
2613 		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2614 		DTOLFP(clock_stat.fudgetime1, &ltmp);
2615 		HTONL_FP(&ltmp, &ic->fudgetime1);
2616 		DTOLFP(clock_stat.fudgetime2, &ltmp);
2617 		HTONL_FP(&ltmp, &ic->fudgetime2);
2618 		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2619 		/* [Bug3527] Backward Incompatible: ic->fudgeval2 is
2620 		 * a string, instantiated via memcpy() so there is no
2621 		 * endian issue to correct.
2622 		 */
2623 #ifdef DISABLE_BUG3527_FIX
2624 		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2625 #else
2626 		ic->fudgeval2 = clock_stat.fudgeval2;
2627 #endif
2628 
2629 		free_varlist(clock_stat.kv_list);
2630 
2631 		ic = (struct info_clock *)more_pkt();
2632 	}
2633 	flush_pkt();
2634 }
2635 
2636 
2637 
2638 /*
2639  * set_clock_fudge - get a clock's fudge factors
2640  */
2641 static void
2642 set_clock_fudge(
2643 	sockaddr_u *srcadr,
2644 	endpt *inter,
2645 	struct req_pkt *inpkt
2646 	)
2647 {
2648 	register struct conf_fudge *cf;
2649 	register int items;
2650 	struct refclockstat clock_stat;
2651 	sockaddr_u addr;
2652 	l_fp ltmp;
2653 
2654 	ZERO(addr);
2655 	ZERO(clock_stat);
2656 	items = INFO_NITEMS(inpkt->err_nitems);
2657 	cf = (struct conf_fudge *)&inpkt->u;
2658 
2659 	while (items-- > 0) {
2660 		AF(&addr) = AF_INET;
2661 		NSRCADR(&addr) = cf->clockadr;
2662 #ifdef ISC_PLATFORM_HAVESALEN
2663 		addr.sa.sa_len = SOCKLEN(&addr);
2664 #endif
2665 		SET_PORT(&addr, NTP_PORT);
2666 		if (!ISREFCLOCKADR(&addr) || NULL ==
2667 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2668 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2669 			return;
2670 		}
2671 
2672 		switch(ntohl(cf->which)) {
2673 		    case FUDGE_TIME1:
2674 			NTOHL_FP(&cf->fudgetime, &ltmp);
2675 			LFPTOD(&ltmp, clock_stat.fudgetime1);
2676 			clock_stat.haveflags = CLK_HAVETIME1;
2677 			break;
2678 		    case FUDGE_TIME2:
2679 			NTOHL_FP(&cf->fudgetime, &ltmp);
2680 			LFPTOD(&ltmp, clock_stat.fudgetime2);
2681 			clock_stat.haveflags = CLK_HAVETIME2;
2682 			break;
2683 		    case FUDGE_VAL1:
2684 			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2685 			clock_stat.haveflags = CLK_HAVEVAL1;
2686 			break;
2687 		    case FUDGE_VAL2:
2688 			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2689 			clock_stat.haveflags = CLK_HAVEVAL2;
2690 			break;
2691 		    case FUDGE_FLAGS:
2692 			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2693 			clock_stat.haveflags =
2694 				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2695 			break;
2696 		    default:
2697 			msyslog(LOG_ERR, "set_clock_fudge: default!");
2698 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2699 			return;
2700 		}
2701 
2702 		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2703 	}
2704 
2705 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2706 }
2707 #endif
2708 
2709 #ifdef REFCLOCK
2710 /*
2711  * get_clkbug_info - get debugging info about a clock
2712  */
2713 static void
2714 get_clkbug_info(
2715 	sockaddr_u *srcadr,
2716 	endpt *inter,
2717 	struct req_pkt *inpkt
2718 	)
2719 {
2720 	register int i;
2721 	register struct info_clkbug *ic;
2722 	register u_int32 *clkaddr;
2723 	register int items;
2724 	struct refclockbug bug;
2725 	sockaddr_u addr;
2726 
2727 	ZERO_SOCK(&addr);
2728 	AF(&addr) = AF_INET;
2729 #ifdef ISC_PLATFORM_HAVESALEN
2730 	addr.sa.sa_len = SOCKLEN(&addr);
2731 #endif
2732 	SET_PORT(&addr, NTP_PORT);
2733 	items = INFO_NITEMS(inpkt->err_nitems);
2734 	clkaddr = (u_int32 *)&inpkt->u;
2735 
2736 	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2737 					       sizeof(struct info_clkbug));
2738 
2739 	while (items-- > 0 && ic) {
2740 		NSRCADR(&addr) = *clkaddr++;
2741 		if (!ISREFCLOCKADR(&addr) || NULL ==
2742 		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2743 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2744 			return;
2745 		}
2746 
2747 		ZERO(bug);
2748 		refclock_buginfo(&addr, &bug);
2749 		if (bug.nvalues == 0 && bug.ntimes == 0) {
2750 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2751 			return;
2752 		}
2753 
2754 		ic->clockadr = NSRCADR(&addr);
2755 		i = bug.nvalues;
2756 		if (i > NUMCBUGVALUES)
2757 		    i = NUMCBUGVALUES;
2758 		ic->nvalues = (u_char)i;
2759 		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2760 		while (--i >= 0)
2761 		    ic->values[i] = htonl(bug.values[i]);
2762 
2763 		i = bug.ntimes;
2764 		if (i > NUMCBUGTIMES)
2765 		    i = NUMCBUGTIMES;
2766 		ic->ntimes = (u_char)i;
2767 		ic->stimes = htonl(bug.stimes);
2768 		while (--i >= 0) {
2769 			HTONL_FP(&bug.times[i], &ic->times[i]);
2770 		}
2771 
2772 		ic = (struct info_clkbug *)more_pkt();
2773 	}
2774 	flush_pkt();
2775 }
2776 #endif
2777 
2778 /*
2779  * receiver of interface structures
2780  */
2781 static void
2782 fill_info_if_stats(void *data, interface_info_t *interface_info)
2783 {
2784 	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2785 	struct info_if_stats *ifs = *ifsp;
2786 	endpt *ep = interface_info->ep;
2787 
2788 	if (NULL == ifs)
2789 		return;
2790 
2791 	ZERO(*ifs);
2792 
2793 	if (IS_IPV6(&ep->sin)) {
2794 		if (!client_v6_capable)
2795 			return;
2796 		ifs->v6_flag = 1;
2797 		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2798 		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2799 		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2800 	} else {
2801 		ifs->v6_flag = 0;
2802 		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2803 		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2804 		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2805 	}
2806 	ifs->v6_flag = htonl(ifs->v6_flag);
2807 	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2808 	ifs->family = htons(ep->family);
2809 	ifs->flags = htonl(ep->flags);
2810 	ifs->last_ttl = htonl(ep->last_ttl);
2811 	ifs->num_mcast = htonl(ep->num_mcast);
2812 	ifs->received = htonl(ep->received);
2813 	ifs->sent = htonl(ep->sent);
2814 	ifs->notsent = htonl(ep->notsent);
2815 	ifs->ifindex = htonl(ep->ifindex);
2816 	/* scope no longer in endpt, in in6_addr typically */
2817 	ifs->scopeid = ifs->ifindex;
2818 	ifs->ifnum = htonl(ep->ifnum);
2819 	ifs->uptime = htonl(current_time - ep->starttime);
2820 	ifs->ignore_packets = ep->ignore_packets;
2821 	ifs->peercnt = htonl(ep->peercnt);
2822 	ifs->action = interface_info->action;
2823 
2824 	*ifsp = (struct info_if_stats *)more_pkt();
2825 }
2826 
2827 /*
2828  * get_if_stats - get interface statistics
2829  */
2830 static void
2831 get_if_stats(
2832 	sockaddr_u *srcadr,
2833 	endpt *inter,
2834 	struct req_pkt *inpkt
2835 	)
2836 {
2837 	struct info_if_stats *ifs;
2838 
2839 	DPRINTF(3, ("wants interface statistics\n"));
2840 
2841 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2842 	    v6sizeof(struct info_if_stats));
2843 
2844 	interface_enumerate(fill_info_if_stats, &ifs);
2845 
2846 	flush_pkt();
2847 }
2848 
2849 static void
2850 do_if_reload(
2851 	sockaddr_u *srcadr,
2852 	endpt *inter,
2853 	struct req_pkt *inpkt
2854 	)
2855 {
2856 	struct info_if_stats *ifs;
2857 
2858 	DPRINTF(3, ("wants interface reload\n"));
2859 
2860 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2861 	    v6sizeof(struct info_if_stats));
2862 
2863 	interface_update(fill_info_if_stats, &ifs);
2864 
2865 	flush_pkt();
2866 }
2867 
2868