xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_restrict.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 /*	$NetBSD: ntp_restrict.c,v 1.13 2024/10/01 20:59:51 christos Exp $	*/
2 
3 /*
4  * ntp_restrict.c - determine host restrictions
5  */
6 #ifdef HAVE_CONFIG_H
7 #include <config.h>
8 #endif
9 
10 #include <stdio.h>
11 #include <sys/types.h>
12 
13 #include "ntpd.h"
14 #include "ntp_if.h"
15 #include "ntp_lists.h"
16 #include "ntp_stdlib.h"
17 #include "ntp_assert.h"
18 
19 /*
20  * This code keeps a simple address-and-mask list of addressses we want
21  * to place restrictions on (or remove them from). The restrictions are
22  * implemented as a set of flags which tell you what matching addresses
23  * can't do.  The list is sorted retrieve the restrictions most specific
24 *  to the address.
25  *
26  * This was originally intended to restrict you from sync'ing to your
27  * own broadcasts when you are doing that, by restricting yourself from
28  * your own interfaces. It was also thought it would sometimes be useful
29  * to keep a misbehaving host or two from abusing your primary clock. It
30  * has been expanded, however, to suit the needs of those with more
31  * restrictive access policies.
32  */
33 #define MASK_IPV6_ADDR(dst, src, msk)					\
34 	do {								\
35 		int x;							\
36 									\
37 		for (x = 0; x < (int)COUNTOF((dst)->s6_addr); x++) {	\
38 			(dst)->s6_addr[x] =   (src)->s6_addr[x]		\
39 					    & (msk)->s6_addr[x];	\
40 		}							\
41 	} while (FALSE)
42 
43 /*
44  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
45  * Auto-tune these to be just less than 1KB (leaving at least 32 bytes
46  * for allocator overhead).
47  */
48 #define	INC_RESLIST4	((1024 - 32) / sizeof(struct restrict_4))
49 #define	INC_RESLIST6	((1024 - 32) / sizeof(struct restrict_6))
50 
51 /*
52  * The restriction list
53  */
54 struct restrict_4 *restrictlist4;
55 struct restrict_6 *restrictlist6;
56 static size_t restrictcount;	/* count in the restrict lists */
57 
58 /*
59  * The free list and associated counters.  Also some uninteresting
60  * stat counters.
61  */
62 static struct restrict_4 *resfree4;	/* available entries (free list) */
63 static struct restrict_6 *resfree6;
64 
65 static u_long res_calls;
66 static u_long res_found;
67 static u_long res_not_found;
68 
69 /*
70  * Count number of restriction entries referring to RES_LIMITED, to
71  * control implicit activation/deactivation of the MRU monlist.
72  */
73 static	u_long res_limited_refcnt;
74 
75 /*
76  * Our default entries.
77  *
78  * We can make this cleaner with c99 support: see init_restrict().
79  */
80 static	struct restrict_4	restrict_def4;
81 static	struct restrict_6	restrict_def6;
82 
83 /*
84  * "restrict source ..." enabled knob and restriction bits.
85  */
86 static	int		restrict_source_enabled;
87 static	u_int32		restrict_source_rflags;
88 static	u_short		restrict_source_mflags;
89 static	short		restrict_source_ippeerlimit;
90 
91 /*
92  * private functions
93  */
94 static	struct restrict_4 *	alloc_res4(void);
95 static	struct restrict_6 *	alloc_res6(void);
96 static	void		free_res4(struct restrict_4 *);
97 static	void		free_res6(struct restrict_6 *);
98 static	inline void	inc_res_limited(void);
99 static	inline void	dec_res_limited(void);
100 static	struct restrict_4 *	match_restrict4_addr(u_int32, u_short);
101 static	struct restrict_6 *	match_restrict6_addr(const struct in6_addr *,
102 					     u_short);
103 static inline int/*BOOL*/	mflags_sorts_before(u_short, u_short);
104 static	int/*BOOL*/	res_sorts_before4(struct restrict_4 *,
105 					struct restrict_4 *);
106 static	int/*BOOL*/	res_sorts_before6(struct restrict_6 *,
107 					struct restrict_6 *);
108 
109 #ifdef DEBUG
110 /* dump_restrict() & dump_restricts() are DEBUG-only */
111 
112 static void
113 dump_restrict(const struct restrict_info *ri, const char *as, const char *ms)
114 {
115 	printf("%s/%s: hits %u ippeerlimit %hd mflags %s rflags %s",
116 		as, ms, ri->count, ri->ippeerlimit,
117 		mflags_str(ri->mflags),
118 		rflags_str(ri->rflags));
119 	if (ri->expire > 0) {
120 		printf(" expire %u\n", ri->expire);
121 	} else {
122 		printf("\n");
123 	}
124 }
125 
126 /*
127  * dump_restrict - spit out a single restriction entry
128  */
129 static void
130 dump_restrict4(
131 	struct restrict_4 *	res)
132 {
133 	char as[INET6_ADDRSTRLEN];
134 	char ms[INET6_ADDRSTRLEN];
135 
136 	struct in_addr	sia, sim;
137 
138 	sia.s_addr = htonl(res->v4.addr);
139 	sim.s_addr = htonl(res->v4.addr);
140 	inet_ntop(AF_INET, &sia, as, sizeof as);
141 	inet_ntop(AF_INET, &sim, ms, sizeof ms);
142 
143 	dump_restrict(&res->ri, as, ms);
144 }
145 
146 static void
147 dump_restrict6(
148 	struct restrict_6 *	res)
149 {
150 	char as[INET6_ADDRSTRLEN];
151 	char ms[INET6_ADDRSTRLEN];
152 
153 	inet_ntop(AF_INET6, &res->v6.addr, as, sizeof as);
154 	inet_ntop(AF_INET6, &res->v6.mask, ms, sizeof ms);
155 
156 	dump_restrict(&res->ri, as, ms);
157 }
158 
159 
160 /*
161  * dump_restricts - spit out the 'restrict' entries
162  */
163 void
164 dump_restricts(void)
165 {
166 	struct restrict_4 *	res4;
167 	struct restrict_6 *	res6;
168 
169 	/* Spit out the IPv4 list */
170 	printf("dump_restricts: restrictlist4: %p\n", restrictlist4);
171 	for (res4 = restrictlist4; res4 != NULL; res4 = res4->link) {
172 		dump_restrict4(res4);
173 	}
174 
175 	/* Spit out the IPv6 list */
176 	printf("dump_restricts: restrictlist6: %p\n", restrictlist6);
177 	for (res6 = restrictlist6; res6 != NULL; res6 = res6->link) {
178 		dump_restrict6(res6);
179 	}
180 }
181 #endif /* DEBUG - dump_restrict() / dump_restricts() */
182 
183 
184 /*
185  * init_restrict - initialize the restriction data structures
186  */
187 void
188 init_restrict(void)
189 {
190 	/*
191 	 * The restriction lists end with a default entry with address
192 	 * and mask 0, which will match any entry.  The lists are kept
193 	 * sorted by descending address followed by descending mask:
194 	 *
195 	 *   address	  mask
196 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
197 	 * 192.168.0.0	255.255.0.0	kod limited
198 	 * 0.0.0.0	0.0.0.0		kod limited noquery
199 	 *
200 	 * The first entry which matches an address is used.  With the
201 	 * example restrictions above, 192.168.0.0/24 matches the first
202 	 * entry, the rest of 192.168.0.0/16 matches the second, and
203 	 * everything else matches the third (default).
204 	 *
205 	 * Note this achieves the same result a little more efficiently
206 	 * than the documented behavior, which is to keep the lists
207 	 * sorted by ascending address followed by ascending mask, with
208 	 * the _last_ matching entry used.
209 	 *
210 	 * An additional wrinkle is we may have multiple entries with
211 	 * the same address and mask but differing match flags (mflags).
212 	 * We want to never talk to ourself, so RES_IGNORE entries for
213 	 * each local address are added by ntp_io.c with a host mask and
214 	 * both RESM_INTERFACE and RESM_NTPONLY set.  We sort those
215 	 * entries before entries without those flags to achieve this.
216 	 * The remaining match flag is RESM_SOURCE, used to dynamically
217 	 * set restrictions for each peer based on the prototype set by
218 	 * "restrict source" in the configuration.  We want those entries
219 	 * to be considered only when there is not a static host
220 	 * restriction for the address in the configuration, to allow
221 	 * operators to blacklist pool and manycast servers at runtime as
222 	 * desired using ntpq runtime configuration.  Such static entries
223 	 * have no RESM_ bits set, so the sort order for mflags is first
224 	 * RESM_INTERFACE, then entries without RESM_SOURCE, finally the
225 	 * remaining.
226 	 */
227 
228 	restrict_def4.ri.ippeerlimit = -1;	/* Cleaner if we have C99 */
229 	restrict_def6.ri.ippeerlimit = -1;	/* Cleaner if we have C99 */
230 
231 	LINK_SLIST(restrictlist4, &restrict_def4, link);
232 	LINK_SLIST(restrictlist6, &restrict_def6, link);
233 	restrictcount = 2;
234 }
235 
236 
237 static struct restrict_4 *
238 alloc_res4(void)
239 {
240 	const size_t	count = INC_RESLIST4;
241 	struct restrict_4*	rl;
242 	struct restrict_4*	res;
243 	const size_t	cb = sizeof(*rl);
244 	size_t		i;
245 
246 	UNLINK_HEAD_SLIST(res, resfree4, link);
247 	if (res != NULL) {
248 		return res;
249 	}
250 	rl = eallocarray(count, cb);
251 	/* link all but the first onto free list */
252 	res = (void *)((char *)rl + (count - 1) * cb);
253 	for (i = count - 1; i > 0; i--) {
254 		LINK_SLIST(resfree4, res, link);
255 		res = (void *)((char *)res - cb);
256 	}
257 	DEBUG_INSIST(rl == res);
258 	/* allocate the first */
259 	return res;
260 }
261 
262 
263 static struct restrict_6 *
264 alloc_res6(void)
265 {
266 	const size_t	count = INC_RESLIST6;
267 	struct restrict_6 *	rl;
268 	struct restrict_6 *	res;
269 	const size_t	cb = sizeof(*rl);
270 	size_t		i;
271 
272 	UNLINK_HEAD_SLIST(res, resfree6, link);
273 	if (res != NULL) {
274 		return res;
275 	}
276 	rl = eallocarray(count, cb);
277 	/* link all but the first onto free list */
278 	res = (void *)((char *)rl + (count - 1) * cb);
279 	for (i = count - 1; i > 0; i--) {
280 		LINK_SLIST(resfree6, res, link);
281 		res = (void *)((char *)res - cb);
282 	}
283 	DEBUG_INSIST(rl == res);
284 	/* allocate the first */
285 	return res;
286 }
287 
288 
289 static void
290 free_res6(struct restrict_6 *	res)
291 {
292 	struct restrict_6 *	unlinked;
293 
294 	restrictcount--;
295 	if (RES_LIMITED & res->ri.rflags) {
296 		dec_res_limited();
297 	}
298 	UNLINK_SLIST(unlinked, restrictlist6, res, link, struct restrict_6);
299 	INSIST(unlinked == res);
300 	zero_mem(res, sizeof(*res));
301 	LINK_SLIST(resfree6, res, link);
302 }
303 
304 static void
305 free_res4(struct restrict_4 *	res)
306 {
307 	struct restrict_4 *	unlinked;
308 
309 	restrictcount--;
310 	if (RES_LIMITED & res->ri.rflags) {
311 		dec_res_limited();
312 	}
313 	UNLINK_SLIST(unlinked, restrictlist4, res, link, struct restrict_4);
314 	INSIST(unlinked == res);
315 	zero_mem(res, sizeof(*res));
316 	LINK_SLIST(resfree4, res, link);
317 }
318 
319 static inline void
320 inc_res_limited(void)
321 {
322 	if (0 == res_limited_refcnt) {
323 		mon_start(MON_RES);
324 	}
325 	res_limited_refcnt++;
326 }
327 
328 
329 static inline void
330 dec_res_limited(void)
331 {
332 	res_limited_refcnt--;
333 	if (0 == res_limited_refcnt) {
334 		mon_stop(MON_RES);
335 	}
336 }
337 
338 
339 static struct restrict_4 *
340 match_restrict4_addr(
341 	u_int32	addr,
342 	u_short	port
343 	)
344 {
345 	struct restrict_4 *	res;
346 	struct restrict_4 *	next;
347 
348 	for (res = restrictlist4; res != NULL; res = next) {
349 		next = res->link;
350 		if (res->ri.expire && res->ri.expire <= current_time) {
351 			free_res4(res);	/* zeroes the contents */
352 		}
353 		if (   res->v4.addr == (addr & res->v4.mask)
354 		    && (   !(RESM_NTPONLY & res->ri.mflags)
355 			|| NTP_PORT == port)) {
356 
357 			break;
358 		}
359 	}
360 	return res;
361 }
362 
363 
364 static struct restrict_6 *
365 match_restrict6_addr(
366 	const struct in6_addr *	addr,
367 	u_short			port
368 	)
369 {
370 	struct restrict_6 *	res;
371 	struct restrict_6 *	next;
372 	struct in6_addr	masked;
373 
374 	for (res = restrictlist6; res != NULL; res = next) {
375 		next = res->link;
376 		if (res->ri.expire && res->ri.expire <= current_time) {
377 			free_res6(res);
378 		}
379 		MASK_IPV6_ADDR(&masked, addr, &res->v6.mask);
380 		if (ADDR6_EQ(&masked, &res->v6.addr)
381 		    && (   !(RESM_NTPONLY & res->ri.mflags)
382 			|| NTP_PORT == (int)port)) {
383 
384 			break;
385 		}
386 	}
387 	return res;
388 }
389 
390 
391 /*
392  * match_restrict_entry - find an exact match on a restrict list.
393  *
394  * Exact match is addr, mask, and mflags all equal.
395  * In order to use more common code for IPv4 and IPv6, this routine
396  * requires the caller to populate a restrict_[46] with mflags and either
397  * the v4 or v6 address and mask as appropriate.  Other fields in the
398  * input restrict_u are ignored.
399  */
400 static struct restrict_4 *
401 match_restrict4_entry(
402 	const struct restrict_4 *	pmatch)
403 {
404 	struct restrict_4 *res;
405 
406 	for (res = restrictlist4; res != NULL; res = res->link) {
407 		if (res->ri.mflags == pmatch->ri.mflags &&
408 		    !memcmp(&res->v4, &pmatch->v4, sizeof(res->v4))) {
409 			break;
410 		}
411 	}
412 	return res;
413 }
414 
415 static struct restrict_6 *
416 match_restrict6_entry(
417 	const struct restrict_6 *	pmatch)
418 {
419 	struct restrict_6 *res;
420 
421 	for (res = restrictlist6; res != NULL; res = res->link) {
422 		if (res->ri.mflags == pmatch->ri.mflags &&
423 		    !memcmp(&res->v6, &pmatch->v6, sizeof(res->v6))) {
424 			break;
425 		}
426 	}
427 	return res;
428 }
429 
430 /*
431  * mflags_sorts_before - common mflags sorting code
432  *
433  * See block comment in init_restrict() above for rationale.
434  */
435 static inline int/*BOOL*/
436 mflags_sorts_before(
437 	u_short	m1,
438 	u_short	m2
439 	)
440 {
441 	if (    (RESM_INTERFACE & m1)
442 	    && !(RESM_INTERFACE & m2)) {
443 		return TRUE;
444 	} else if (   !(RESM_SOURCE & m1)
445 		   &&  (RESM_SOURCE & m2)) {
446 		return TRUE;
447 	} else {
448 		return FALSE;
449 	}
450 }
451 
452 
453 /*
454  * res_sorts_before4 - compare IPv4 restriction entries
455  *
456  * Returns nonzero if r1 sorts before r2.  We sort by descending
457  * address, then descending mask, then an intricate mflags sort
458  * order explained in a block comment near the top of this file.
459  */
460 static int/*BOOL*/
461 res_sorts_before4(
462 	struct restrict_4 *r1,
463 	struct restrict_4 *r2
464 	)
465 {
466 	int r1_before_r2;
467 
468 	if (r1->v4.addr > r2->v4.addr) {
469 		r1_before_r2 = TRUE;
470 	} else if (r1->v4.addr < r2->v4.addr) {
471 		r1_before_r2 = FALSE;
472 	} else if (r1->v4.mask > r2->v4.mask) {
473 		r1_before_r2 = TRUE;
474 	} else if (r1->v4.mask < r2->v4.mask) {
475 		r1_before_r2 = FALSE;
476 	} else {
477 		r1_before_r2 = mflags_sorts_before(r1->ri.mflags, r2->ri.mflags);
478 	}
479 
480 	return r1_before_r2;
481 }
482 
483 
484 /*
485  * res_sorts_before6 - compare IPv6 restriction entries
486  *
487  * Returns nonzero if r1 sorts before r2.  We sort by descending
488  * address, then descending mask, then an intricate mflags sort
489  * order explained in a block comment near the top of this file.
490  */
491 static int/*BOOL*/
492 res_sorts_before6(
493 	struct restrict_6* r1,
494 	struct restrict_6* r2
495 )
496 {
497 	int r1_before_r2;
498 	int cmp;
499 
500 	cmp = ADDR6_CMP(&r1->v6.addr, &r2->v6.addr);
501 	if (cmp > 0) {		/* r1->addr > r2->addr */
502 		r1_before_r2 = TRUE;
503 	} else if (cmp < 0) {	/* r2->addr > r1->addr */
504 		r1_before_r2 = FALSE;
505 	} else {
506 		cmp = ADDR6_CMP(&r1->v6.mask, &r2->v6.mask);
507 		if (cmp > 0) {		/* r1->mask > r2->mask*/
508 			r1_before_r2 = TRUE;
509 		} else if (cmp < 0) {	/* r2->mask > r1->mask */
510 			r1_before_r2 = FALSE;
511 		} else {
512 			r1_before_r2 = mflags_sorts_before(r1->ri.mflags,
513 							   r2->ri.mflags);
514 		}
515 	}
516 
517 	return r1_before_r2;
518 }
519 
520 
521 /*
522  * restrictions - return restrictions for this host in *r4a
523  */
524 void
525 restrictions(
526 	sockaddr_u *srcadr,
527 	r4addr *r4a
528 	)
529 {
530 	struct in6_addr *pin6;
531 
532 	DEBUG_REQUIRE(NULL != r4a);
533 
534 	res_calls++;
535 
536 	if (IS_IPV4(srcadr)) {
537 		struct restrict_4 *match;
538 		/*
539 		 * Ignore any packets with a multicast source address
540 		 * (this should be done early in the receive process,
541 		 * not later!)
542 		 */
543 		if (IN_CLASSD(SRCADR(srcadr))) {
544 			goto multicast;
545 		}
546 
547 		match = match_restrict4_addr(SRCADR(srcadr),
548 					     SRCPORT(srcadr));
549 		DEBUG_INSIST(match != NULL);
550 		match->ri.count++;
551 		/*
552 		 * res_not_found counts only use of the final default
553 		 * entry, not any "restrict default ntpport ...", which
554 		 * would be just before the final default.
555 		 */
556 		if (&restrict_def4 == match)
557 			res_not_found++;
558 		else
559 			res_found++;
560 		r4a->rflags = match->ri.rflags;
561 		r4a->ippeerlimit = match->ri.ippeerlimit;
562 	} else {
563 		struct restrict_6 *match;
564 		DEBUG_REQUIRE(IS_IPV6(srcadr));
565 
566 		pin6 = PSOCK_ADDR6(srcadr);
567 
568 		/*
569 		 * Ignore any packets with a multicast source address
570 		 * (this should be done early in the receive process,
571 		 * not later!)
572 		 */
573 		if (IN6_IS_ADDR_MULTICAST(pin6)) {
574 			goto multicast;
575 		}
576 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
577 		DEBUG_INSIST(match != NULL);
578 		match->ri.count++;
579 		if (&restrict_def6 == match)
580 			res_not_found++;
581 		else
582 			res_found++;
583 		r4a->rflags = match->ri.rflags;
584 		r4a->ippeerlimit = match->ri.ippeerlimit;
585 	}
586 
587 	return;
588 
589     multicast:
590 	r4a->rflags = RES_IGNORE;
591 	r4a->ippeerlimit = 0;
592 }
593 
594 
595 #ifdef DEBUG
596 /* display string for restrict_op */
597 const char *
598 resop_str(restrict_op op)
599 {
600 	switch (op) {
601 	    case RESTRICT_FLAGS:	return "RESTRICT_FLAGS";
602 	    case RESTRICT_UNFLAG:	return "RESTRICT_UNFLAG";
603 	    case RESTRICT_REMOVE:	return "RESTRICT_REMOVE";
604 	    case RESTRICT_REMOVEIF:	return "RESTRICT_REMOVEIF";
605 	}
606 	DEBUG_INVARIANT(!"bad restrict_op in resop_str");
607 	return "";	/* silence not all paths return value warning */
608 }
609 #endif	/* DEBUG */
610 
611 
612 /*
613  * hack_restrict - add/subtract/manipulate entries on the restrict list
614  */
615 int/*BOOL*/
616 hack_restrict(
617 	restrict_op	op,
618 	sockaddr_u *	resaddr,
619 	sockaddr_u *	resmask,
620 	short		ippeerlimit,
621 	u_short		mflags,
622 	u_short		rflags,
623 	u_int32		expire
624 	)
625 {
626 	int		bump_res_limited = FALSE;
627 	struct restrict_4	match4, *res4 = NULL;
628 	struct restrict_6	match6, *res6 = NULL;
629 	struct restrict_info *ri;
630 
631 #ifdef DEBUG
632 	if (debug > 0) {
633 		printf("hack_restrict: op %s addr %s mask %s",
634 			resop_str(op), stoa(resaddr), stoa(resmask));
635 		if (ippeerlimit >= 0) {
636 			printf(" ippeerlimit %d", ippeerlimit);
637 		}
638 		printf(" mflags %s rflags %s", mflags_str(mflags),
639 		       rflags_str(rflags));
640 		if (expire) {
641 			printf("lifetime %u\n",
642 			       expire - (u_int32)current_time);
643 		} else {
644 			printf("\n");
645 		}
646 	}
647 #endif
648 
649 	if (NULL == resaddr) {
650 		DEBUG_REQUIRE(NULL == resmask);
651 		DEBUG_REQUIRE(RESTRICT_FLAGS == op);
652 		DEBUG_REQUIRE(RESM_SOURCE & mflags);
653 		restrict_source_rflags = rflags;
654 		restrict_source_mflags = mflags;
655 		restrict_source_ippeerlimit = ippeerlimit;
656 		restrict_source_enabled = TRUE;
657 		DPRINTF(1, ("restrict source template saved\n"));
658 		return TRUE;
659 	}
660 
661 
662 	if (IS_IPV4(resaddr)) {
663 		DEBUG_INVARIANT(IS_IPV4(resmask));
664 		/*
665 		 * Get address and mask in host byte order for easy
666 		 * comparison as u_int32
667 		 */
668 		ZERO(match4);
669 		match4.v4.addr = SRCADR(resaddr);
670 		match4.v4.mask = SRCADR(resmask);
671 		match4.v4.addr &= match4.v4.mask;
672 		match4.ri.mflags = mflags;
673 		res4 = match_restrict4_entry(&match4);
674 		ri = res4 ? &res4->ri : NULL;
675 	} else {
676 		DEBUG_INVARIANT(IS_IPV6(resaddr));
677 		DEBUG_INVARIANT(IS_IPV6(resmask));
678 		/*
679 		 * Get address and mask in network byte order for easy
680 		 * comparison as byte sequences (e.g. memcmp())
681 		 */
682 		ZERO(match6);
683 		match6.v6.mask = SOCK_ADDR6(resmask);
684 		MASK_IPV6_ADDR(&match6.v6.addr, PSOCK_ADDR6(resaddr),
685 			       &match6.v6.mask);
686 		match6.ri.mflags = mflags;
687 		res6 = match_restrict6_entry(&match6);
688 		ri = res6 ? &res6->ri : NULL;
689 	}
690 
691 
692 	switch (op) {
693 
694 	case RESTRICT_FLAGS:
695 		/*
696 		 * Here we add bits to the rflags. If we already have
697 		 * this restriction modify it.
698 		 */
699 		if (NULL != ri) {
700 			if (    (RES_LIMITED & rflags)
701 			    && !(RES_LIMITED & ri->rflags)) {
702 
703 				bump_res_limited = TRUE;
704 			}
705 			ri->rflags |= rflags;
706 			ri->expire = expire;
707 		} else {
708 			if (IS_IPV4(resaddr)) {
709 				match4.ri.rflags = rflags;
710 				match4.ri.expire = expire;
711 				match4.ri.ippeerlimit = ippeerlimit;
712 				res4 = alloc_res4();
713 				memcpy(res4, &match4, sizeof(*res4));
714 				LINK_SORT_SLIST(
715 				    restrictlist4, res4,
716 				    res_sorts_before4(res4, L_S_S_CUR()),
717 				    link, struct restrict_4);
718 			} else {
719 				match6.ri.rflags = rflags;
720 				match6.ri.expire = expire;
721 				match6.ri.ippeerlimit = ippeerlimit;
722 				res6 = alloc_res6();
723 				memcpy(res6, &match6, sizeof(*res6));
724 				LINK_SORT_SLIST(
725 				    restrictlist6, res6,
726 				    res_sorts_before6(res6, L_S_S_CUR()),
727 				    link, struct restrict_6);
728 			}
729 			restrictcount++;
730 			if (RES_LIMITED & rflags) {
731 				bump_res_limited = TRUE;
732 			}
733 		}
734 		if (bump_res_limited) {
735 			inc_res_limited();
736 		}
737 		return TRUE;
738 
739 	case RESTRICT_UNFLAG:
740 		/*
741 		 * Remove some bits from the rflags. If we didn't
742 		 * find this one, just return.
743 		 */
744 		if (NULL == ri) {
745 			DPRINTF(1, ("No match for %s %s removing rflags %s\n",
746 				    stoa(resaddr), stoa(resmask),
747 				    rflags_str(rflags)));
748 			return FALSE;
749 		}
750 		if (   (RES_LIMITED & ri->rflags)
751 		    && (RES_LIMITED & rflags)) {
752 			dec_res_limited();
753 		}
754 		ri->rflags &= ~rflags;
755 		return TRUE;
756 
757 	case RESTRICT_REMOVE:
758 	case RESTRICT_REMOVEIF:
759 		/*
760 		 * Remove an entry from the table entirely if we
761 		 * found one. Don't remove the default entry and
762 		 * don't remove an interface entry unless asked.
763 		 */
764 		if (   ri != NULL
765 		    && (   RESTRICT_REMOVEIF == op
766 			|| !(RESM_INTERFACE & ri->mflags))) {
767 			if (res4 && res4 != &restrict_def4) {
768 				free_res4(res4);
769 				return TRUE;
770 			}
771 			if (res6 && res6 != &restrict_def6) {
772 				free_res6(res6);
773 				return TRUE;
774 			}
775 		}
776 		DPRINTF(1, ("No match removing %s %s restriction\n",
777 			    stoa(resaddr), stoa(resmask)));
778 		return FALSE;
779 	}
780 	/* notreached */
781 	return FALSE;
782 }
783 
784 
785 /*
786  * restrict_source - maintains dynamic "restrict source ..." entries as
787  *		     peers come and go.
788  */
789 void
790 restrict_source(
791 	sockaddr_u *	addr,
792 	int		farewell,	/* TRUE to remove */
793 	u_int32		lifetime	/* seconds, 0 forever */
794 	)
795 {
796 	sockaddr_u	onesmask;
797 	int/*BOOL*/	success;
798 
799 	if (   !restrict_source_enabled || SOCK_UNSPEC(addr)
800 	    || IS_MCAST(addr) || ISREFCLOCKADR(addr)) {
801 		return;
802 	}
803 
804 	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
805 
806 	SET_HOSTMASK(&onesmask, AF(addr));
807 	if (farewell) {
808 		success = hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
809 					0, RESM_SOURCE, 0, 0);
810 		if (success) {
811 			DPRINTF(1, ("%s %s removed", __func__,
812 				    stoa(addr)));
813 		} else {
814 			msyslog(LOG_ERR, "%s remove %s failed",
815 					 __func__, stoa(addr));
816 		}
817 		return;
818 	}
819 
820 	success = hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
821 				restrict_source_ippeerlimit,
822 				restrict_source_mflags,
823 				restrict_source_rflags,
824 				lifetime > 0
825 				    ? lifetime + current_time
826 				    : 0);
827 	if (success) {
828 		DPRINTF(1, ("%s %s add/upd\n", __func__,
829 			    stoa(addr)));
830 	} else {
831 		msyslog(LOG_ERR, "%s %s failed", __func__, stoa(addr));
832 	}
833 }
834 
835 
836 #ifdef DEBUG
837 /* Convert restriction RES_ flag bits into a display string */
838 const char *
839 rflags_str(
840 	u_short rflags
841 	)
842 {
843 	const size_t	sz = LIB_BUFLENGTH;
844 	char *		rfs;
845 
846 	LIB_GETBUF(rfs);
847 	rfs[0] = '\0';
848 
849 	if (rflags & RES_FLAKE) {
850 		CLEAR_BIT_IF_DEBUG(RES_FLAKE, rflags);
851 		append_flagstr(rfs, sz, "flake");
852 	}
853 
854 	if (rflags & RES_IGNORE) {
855 		CLEAR_BIT_IF_DEBUG(RES_IGNORE, rflags);
856 		append_flagstr(rfs, sz, "ignore");
857 	}
858 
859 	if (rflags & RES_KOD) {
860 		CLEAR_BIT_IF_DEBUG(RES_KOD, rflags);
861 		append_flagstr(rfs, sz, "kod");
862 	}
863 
864 	if (rflags & RES_MSSNTP) {
865 		CLEAR_BIT_IF_DEBUG(RES_MSSNTP, rflags);
866 		append_flagstr(rfs, sz, "mssntp");
867 	}
868 
869 	if (rflags & RES_LIMITED) {
870 		CLEAR_BIT_IF_DEBUG(RES_LIMITED, rflags);
871 		append_flagstr(rfs, sz, "limited");
872 	}
873 
874 	if (rflags & RES_LPTRAP) {
875 		CLEAR_BIT_IF_DEBUG(RES_LPTRAP, rflags);
876 		append_flagstr(rfs, sz, "lptrap");
877 	}
878 
879 	if (rflags & RES_NOMODIFY) {
880 		CLEAR_BIT_IF_DEBUG(RES_NOMODIFY, rflags);
881 		append_flagstr(rfs, sz, "nomodify");
882 	}
883 
884 	if (rflags & RES_NOMRULIST) {
885 		CLEAR_BIT_IF_DEBUG(RES_NOMRULIST, rflags);
886 		append_flagstr(rfs, sz, "nomrulist");
887 	}
888 
889 	if (rflags & RES_NOEPEER) {
890 		CLEAR_BIT_IF_DEBUG(RES_NOEPEER, rflags);
891 		append_flagstr(rfs, sz, "noepeer");
892 	}
893 
894 	if (rflags & RES_NOPEER) {
895 		CLEAR_BIT_IF_DEBUG(RES_NOPEER, rflags);
896 		append_flagstr(rfs, sz, "nopeer");
897 	}
898 
899 	if (rflags & RES_NOQUERY) {
900 		CLEAR_BIT_IF_DEBUG(RES_NOQUERY, rflags);
901 		append_flagstr(rfs, sz, "noquery");
902 	}
903 
904 	if (rflags & RES_DONTSERVE) {
905 		CLEAR_BIT_IF_DEBUG(RES_DONTSERVE, rflags);
906 		append_flagstr(rfs, sz, "dontserve");
907 	}
908 
909 	if (rflags & RES_NOTRAP) {
910 		CLEAR_BIT_IF_DEBUG(RES_NOTRAP, rflags);
911 		append_flagstr(rfs, sz, "notrap");
912 	}
913 
914 	if (rflags & RES_DONTTRUST) {
915 		CLEAR_BIT_IF_DEBUG(RES_DONTTRUST, rflags);
916 		append_flagstr(rfs, sz, "notrust");
917 	}
918 
919 	if (rflags & RES_SRVRSPFUZ) {
920 		CLEAR_BIT_IF_DEBUG(RES_SRVRSPFUZ, rflags);
921 		append_flagstr(rfs, sz, "srvrspfuz");
922 	}
923 
924 	if (rflags & RES_VERSION) {
925 		CLEAR_BIT_IF_DEBUG(RES_VERSION, rflags);
926 		append_flagstr(rfs, sz, "version");
927 	}
928 
929 	DEBUG_INVARIANT(!rflags);
930 
931 	if ('\0' == rfs[0]) {
932 		append_flagstr(rfs, sz, "(none)");
933 	}
934 
935 	return rfs;
936 }
937 
938 
939 /* Convert restriction match RESM_ flag bits into a display string */
940 const char *
941 mflags_str(
942 	u_short mflags
943 	)
944 {
945 	const size_t	sz = LIB_BUFLENGTH;
946 	char *		mfs;
947 
948 	LIB_GETBUF(mfs);
949 	mfs[0] = '\0';
950 
951 	if (mflags & RESM_NTPONLY) {
952 		CLEAR_BIT_IF_DEBUG(RESM_NTPONLY, mflags);
953 		append_flagstr(mfs, sz, "ntponly");
954 	}
955 
956 	if (mflags & RESM_SOURCE) {
957 		CLEAR_BIT_IF_DEBUG(RESM_SOURCE, mflags);
958 		append_flagstr(mfs, sz, "source");
959 	}
960 
961 	if (mflags & RESM_INTERFACE) {
962 		CLEAR_BIT_IF_DEBUG(RESM_INTERFACE, mflags);
963 		append_flagstr(mfs, sz, "interface");
964 	}
965 
966 	DEBUG_INVARIANT(!mflags);
967 
968 	return mfs;
969 }
970 #endif	/* DEBUG */
971