xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_restrict.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: ntp_restrict.c,v 1.3 2013/12/28 03:20:14 christos Exp $	*/
2 
3 /*
4  * ntp_restrict.c - determine host restrictions
5  */
6 #ifdef HAVE_CONFIG_H
7 #include <config.h>
8 #endif
9 
10 #include <stdio.h>
11 #include <sys/types.h>
12 
13 #include "ntpd.h"
14 #include "ntp_if.h"
15 #include "ntp_lists.h"
16 #include "ntp_stdlib.h"
17 #include "ntp_assert.h"
18 
19 /*
20  * This code keeps a simple address-and-mask list of hosts we want
21  * to place restrictions on (or remove them from). The restrictions
22  * are implemented as a set of flags which tell you what the host
23  * can't do. There is a subroutine entry to return the flags. The
24  * list is kept sorted to reduce the average number of comparisons
25  * and make sure you get the set of restrictions most specific to
26  * the address.
27  *
28  * The algorithm is that, when looking up a host, it is first assumed
29  * that the default set of restrictions will apply. It then searches
30  * down through the list. Whenever it finds a match it adopts the
31  * match's flags instead. When you hit the point where the sorted
32  * address is greater than the target, you return with the last set of
33  * flags you found. Because of the ordering of the list, the most
34  * specific match will provide the final set of flags.
35  *
36  * This was originally intended to restrict you from sync'ing to your
37  * own broadcasts when you are doing that, by restricting yourself from
38  * your own interfaces. It was also thought it would sometimes be useful
39  * to keep a misbehaving host or two from abusing your primary clock. It
40  * has been expanded, however, to suit the needs of those with more
41  * restrictive access policies.
42  */
43 /*
44  * We will use two lists, one for IPv4 addresses and one for IPv6
45  * addresses. This is not protocol-independant but for now I can't
46  * find a way to respect this. We'll check this later... JFB 07/2001
47  */
48 #define MASK_IPV6_ADDR(dst, src, msk)					\
49 	do {								\
50 		int idx;						\
51 		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
52 			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
53 					      & (msk)->s6_addr[idx];	\
54 		}							\
55 	} while (0)
56 
57 /*
58  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
59  * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
60  * for allocator overhead).
61  */
62 #define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
63 #define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
64 
65 /*
66  * The restriction list
67  */
68 restrict_u *restrictlist4;
69 restrict_u *restrictlist6;
70 static int restrictcount;	/* count in the restrict lists */
71 
72 /*
73  * The free list and associated counters.  Also some uninteresting
74  * stat counters.
75  */
76 static restrict_u *resfree4;	/* available entries (free list) */
77 static restrict_u *resfree6;
78 
79 static u_long res_calls;
80 static u_long res_found;
81 static u_long res_not_found;
82 
83 /*
84  * Count number of restriction entries referring to RES_LIMITED, to
85  * control implicit activation/deactivation of the MRU monlist.
86  */
87 static	u_long res_limited_refcnt;
88 
89 /*
90  * Our default entries.
91  */
92 static	restrict_u	restrict_def4;
93 static	restrict_u	restrict_def6;
94 
95 /*
96  * "restrict source ..." enabled knob and restriction bits.
97  */
98 static	int		restrict_source_enabled;
99 static	u_short		restrict_source_flags;
100 static	u_short		restrict_source_mflags;
101 
102 /*
103  * private functions
104  */
105 static restrict_u *	alloc_res4(void);
106 static restrict_u *	alloc_res6(void);
107 static void		free_res(restrict_u *, int);
108 static void		inc_res_limited(void);
109 static void		dec_res_limited(void);
110 static restrict_u *	match_restrict4_addr(u_int32, u_short);
111 static restrict_u *	match_restrict6_addr(const struct in6_addr *,
112 					     u_short);
113 static restrict_u *	match_restrict_entry(const restrict_u *, int);
114 static int		res_sorts_before4(restrict_u *, restrict_u *);
115 static int		res_sorts_before6(restrict_u *, restrict_u *);
116 
117 
118 /*
119  * init_restrict - initialize the restriction data structures
120  */
121 void
122 init_restrict(void)
123 {
124 	/*
125 	 * The restriction lists begin with a default entry with address
126 	 * and mask 0, which will match any entry.  The lists are kept
127 	 * sorted by descending address followed by descending mask:
128 	 *
129 	 *   address	  mask
130 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
131 	 * 192.168.0.0	255.255.0.0	kod limited
132 	 * 0.0.0.0	0.0.0.0		kod limited noquery
133 	 *
134 	 * The first entry which matches an address is used.  With the
135 	 * example restrictions above, 192.168.0.0/24 matches the first
136 	 * entry, the rest of 192.168.0.0/16 matches the second, and
137 	 * everything else matches the third (default).
138 	 *
139 	 * Note this achieves the same result a little more efficiently
140 	 * than the documented behavior, which is to keep the lists
141 	 * sorted by ascending address followed by ascending mask, with
142 	 * the _last_ matching entry used.
143 	 *
144 	 * An additional wrinkle is we may have multiple entries with
145 	 * the same address and mask but differing match flags (mflags).
146 	 * At present there is only one, RESM_NTPONLY.  Entries with
147 	 * RESM_NTPONLY are sorted earlier so they take precedence over
148 	 * any otherwise similar entry without.  Again, this is the same
149 	 * behavior as but reversed implementation compared to the docs.
150 	 *
151 	 */
152 	LINK_SLIST(restrictlist4, &restrict_def4, link);
153 	LINK_SLIST(restrictlist6, &restrict_def6, link);
154 	restrictcount = 2;
155 }
156 
157 
158 static restrict_u *
159 alloc_res4(void)
160 {
161 	const size_t	cb = V4_SIZEOF_RESTRICT_U;
162 	const size_t	count = INC_RESLIST4;
163 	restrict_u *	rl;
164 	restrict_u *	res;
165 	int		i;
166 
167 	UNLINK_HEAD_SLIST(res, resfree4, link);
168 	if (res != NULL)
169 		return res;
170 
171 	rl = emalloc_zero(count * cb);
172 	/* link all but the first onto free list */
173 	res = (void *)((char *)rl + (count - 1) * cb);
174 	for (i = count - 1; i > 0; i--) {
175 		LINK_SLIST(resfree4, res, link);
176 		res = (void *)((char *)res - cb);
177 	}
178 	NTP_INSIST(rl == res);
179 	/* allocate the first */
180 	return res;
181 }
182 
183 
184 static restrict_u *
185 alloc_res6(void)
186 {
187 	const size_t	cb = V6_SIZEOF_RESTRICT_U;
188 	const size_t	count = INC_RESLIST6;
189 	restrict_u *	rl;
190 	restrict_u *	res;
191 	int		i;
192 
193 	UNLINK_HEAD_SLIST(res, resfree6, link);
194 	if (res != NULL)
195 		return res;
196 
197 	rl = emalloc_zero(count * cb);
198 	/* link all but the first onto free list */
199 	res = (void *)((char *)rl + (count - 1) * cb);
200 	for (i = count - 1; i > 0; i--) {
201 		LINK_SLIST(resfree6, res, link);
202 		res = (void *)((char *)res - cb);
203 	}
204 	NTP_INSIST(rl == res);
205 	/* allocate the first */
206 	return res;
207 }
208 
209 
210 static void
211 free_res(
212 	restrict_u *	res,
213 	int		v6
214 	)
215 {
216 	restrict_u **	plisthead;
217 	restrict_u *	unlinked;
218 
219 	restrictcount--;
220 	if (RES_LIMITED & res->flags)
221 		dec_res_limited();
222 
223 	if (v6)
224 		plisthead = &restrictlist6;
225 	else
226 		plisthead = &restrictlist4;
227 	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
228 	NTP_INSIST(unlinked == res);
229 
230 	if (v6) {
231 		zero_mem(res, V6_SIZEOF_RESTRICT_U);
232 		plisthead = &resfree6;
233 	} else {
234 		zero_mem(res, V4_SIZEOF_RESTRICT_U);
235 		plisthead = &resfree4;
236 	}
237 	LINK_SLIST(*plisthead, res, link);
238 }
239 
240 
241 static void
242 inc_res_limited(void)
243 {
244 	if (!res_limited_refcnt)
245 		mon_start(MON_RES);
246 	res_limited_refcnt++;
247 }
248 
249 
250 static void
251 dec_res_limited(void)
252 {
253 	res_limited_refcnt--;
254 	if (!res_limited_refcnt)
255 		mon_stop(MON_RES);
256 }
257 
258 
259 static restrict_u *
260 match_restrict4_addr(
261 	u_int32	addr,
262 	u_short	port
263 	)
264 {
265 	const int	v6 = 0;
266 	restrict_u *	res;
267 	restrict_u *	next;
268 
269 	for (res = restrictlist4; res != NULL; res = next) {
270 		next = res->link;
271 		if (res->expire &&
272 		    res->expire <= current_time)
273 			free_res(res, v6);
274 		if (res->u.v4.addr == (addr & res->u.v4.mask)
275 		    && (!(RESM_NTPONLY & res->mflags)
276 			|| NTP_PORT == port))
277 			break;
278 	}
279 	return res;
280 }
281 
282 
283 static restrict_u *
284 match_restrict6_addr(
285 	const struct in6_addr *	addr,
286 	u_short			port
287 	)
288 {
289 	const int	v6 = 1;
290 	restrict_u *	res;
291 	restrict_u *	next;
292 	struct in6_addr	masked;
293 
294 	for (res = restrictlist6; res != NULL; res = next) {
295 		next = res->link;
296 		NTP_INSIST(next != res);
297 		if (res->expire &&
298 		    res->expire <= current_time)
299 			free_res(res, v6);
300 		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
301 		if (ADDR6_EQ(&masked, &res->u.v6.addr)
302 		    && (!(RESM_NTPONLY & res->mflags)
303 			|| NTP_PORT == (int)port))
304 			break;
305 	}
306 	return res;
307 }
308 
309 
310 /*
311  * match_restrict_entry - find an exact match on a restrict list.
312  *
313  * Exact match is addr, mask, and mflags all equal.
314  * In order to use more common code for IPv4 and IPv6, this routine
315  * requires the caller to populate a restrict_u with mflags and either
316  * the v4 or v6 address and mask as appropriate.  Other fields in the
317  * input restrict_u are ignored.
318  */
319 static restrict_u *
320 match_restrict_entry(
321 	const restrict_u *	pmatch,
322 	int			v6
323 	)
324 {
325 	restrict_u *res;
326 	restrict_u *rlist;
327 	size_t cb;
328 
329 	if (v6) {
330 		rlist = restrictlist6;
331 		cb = sizeof(pmatch->u.v6);
332 	} else {
333 		rlist = restrictlist4;
334 		cb = sizeof(pmatch->u.v4);
335 	}
336 
337 	for (res = rlist; res != NULL; res = res->link)
338 		if (res->mflags == pmatch->mflags &&
339 		    !memcmp(&res->u, &pmatch->u, cb))
340 			break;
341 	return res;
342 }
343 
344 
345 /*
346  * res_sorts_before4 - compare two restrict4 entries
347  *
348  * Returns nonzero if r1 sorts before r2.  We sort by descending
349  * address, then descending mask, then descending mflags, so sorting
350  * before means having a higher value.
351  */
352 static int
353 res_sorts_before4(
354 	restrict_u *r1,
355 	restrict_u *r2
356 	)
357 {
358 	int r1_before_r2;
359 
360 	if (r1->u.v4.addr > r2->u.v4.addr)
361 		r1_before_r2 = 1;
362 	else if (r1->u.v4.addr < r2->u.v4.addr)
363 		r1_before_r2 = 0;
364 	else if (r1->u.v4.mask > r2->u.v4.mask)
365 		r1_before_r2 = 1;
366 	else if (r1->u.v4.mask < r2->u.v4.mask)
367 		r1_before_r2 = 0;
368 	else if (r1->mflags > r2->mflags)
369 		r1_before_r2 = 1;
370 	else
371 		r1_before_r2 = 0;
372 
373 	return r1_before_r2;
374 }
375 
376 
377 /*
378  * res_sorts_before6 - compare two restrict6 entries
379  *
380  * Returns nonzero if r1 sorts before r2.  We sort by descending
381  * address, then descending mask, then descending mflags, so sorting
382  * before means having a higher value.
383  */
384 static int
385 res_sorts_before6(
386 	restrict_u *r1,
387 	restrict_u *r2
388 	)
389 {
390 	int r1_before_r2;
391 	int cmp;
392 
393 	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
394 	if (cmp > 0)		/* r1->addr > r2->addr */
395 		r1_before_r2 = 1;
396 	else if (cmp < 0)	/* r2->addr > r1->addr */
397 		r1_before_r2 = 0;
398 	else {
399 		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
400 		if (cmp > 0)		/* r1->mask > r2->mask*/
401 			r1_before_r2 = 1;
402 		else if (cmp < 0)	/* r2->mask > r1->mask */
403 			r1_before_r2 = 0;
404 		else if (r1->mflags > r2->mflags)
405 			r1_before_r2 = 1;
406 		else
407 			r1_before_r2 = 0;
408 	}
409 
410 	return r1_before_r2;
411 }
412 
413 
414 /*
415  * restrictions - return restrictions for this host
416  */
417 u_short
418 restrictions(
419 	sockaddr_u *srcadr
420 	)
421 {
422 	restrict_u *match;
423 	struct in6_addr *pin6;
424 	u_short flags;
425 
426 	res_calls++;
427 	flags = 0;
428 	/* IPv4 source address */
429 	if (IS_IPV4(srcadr)) {
430 		/*
431 		 * Ignore any packets with a multicast source address
432 		 * (this should be done early in the receive process,
433 		 * not later!)
434 		 */
435 		if (IN_CLASSD(SRCADR(srcadr)))
436 			return (int)RES_IGNORE;
437 
438 		match = match_restrict4_addr(SRCADR(srcadr),
439 					     SRCPORT(srcadr));
440 		match->count++;
441 		/*
442 		 * res_not_found counts only use of the final default
443 		 * entry, not any "restrict default ntpport ...", which
444 		 * would be just before the final default.
445 		 */
446 		if (&restrict_def4 == match)
447 			res_not_found++;
448 		else
449 			res_found++;
450 		flags = match->flags;
451 	}
452 
453 	/* IPv6 source address */
454 	if (IS_IPV6(srcadr)) {
455 		pin6 = PSOCK_ADDR6(srcadr);
456 
457 		/*
458 		 * Ignore any packets with a multicast source address
459 		 * (this should be done early in the receive process,
460 		 * not later!)
461 		 */
462 		if (IN6_IS_ADDR_MULTICAST(pin6))
463 			return (int)RES_IGNORE;
464 
465 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
466 		match->count++;
467 		if (&restrict_def6 == match)
468 			res_not_found++;
469 		else
470 			res_found++;
471 		flags = match->flags;
472 	}
473 	return (flags);
474 }
475 
476 
477 /*
478  * hack_restrict - add/subtract/manipulate entries on the restrict list
479  */
480 void
481 hack_restrict(
482 	int		op,
483 	sockaddr_u *	resaddr,
484 	sockaddr_u *	resmask,
485 	u_short		mflags,
486 	u_short		flags,
487 	u_long		expire
488 	)
489 {
490 	int		v6;
491 	restrict_u	match;
492 	restrict_u *	res;
493 	restrict_u **	plisthead;
494 
495 	DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
496 		    op, stoa(resaddr), stoa(resmask), mflags, flags));
497 
498 	if (NULL == resaddr) {
499 		NTP_REQUIRE(NULL == resmask);
500 		NTP_REQUIRE(RESTRICT_FLAGS == op);
501 		restrict_source_flags = flags;
502 		restrict_source_mflags = mflags;
503 		restrict_source_enabled = 1;
504 		return;
505 	}
506 
507 	ZERO(match);
508 	/* silence VC9 potentially uninit warnings */
509 	res = NULL;
510 	v6 = 0;
511 
512 	if (IS_IPV4(resaddr)) {
513 		v6 = 0;
514 		/*
515 		 * Get address and mask in host byte order for easy
516 		 * comparison as u_int32
517 		 */
518 		match.u.v4.addr = SRCADR(resaddr);
519 		match.u.v4.mask = SRCADR(resmask);
520 		match.u.v4.addr &= match.u.v4.mask;
521 
522 	} else if (IS_IPV6(resaddr)) {
523 		v6 = 1;
524 		/*
525 		 * Get address and mask in network byte order for easy
526 		 * comparison as byte sequences (e.g. memcmp())
527 		 */
528 		match.u.v6.mask = SOCK_ADDR6(resmask);
529 		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
530 			       &match.u.v6.mask);
531 
532 	} else	/* not IPv4 nor IPv6 */
533 		NTP_REQUIRE(0);
534 
535 	match.flags = flags;
536 	match.mflags = mflags;
537 	match.expire = expire;
538 	res = match_restrict_entry(&match, v6);
539 
540 	switch (op) {
541 
542 	case RESTRICT_FLAGS:
543 		/*
544 		 * Here we add bits to the flags. If this is a
545 		 * new restriction add it.
546 		 */
547 		if (NULL == res) {
548 			if (v6) {
549 				res = alloc_res6();
550 				memcpy(res, &match,
551 				       V6_SIZEOF_RESTRICT_U);
552 				plisthead = &restrictlist6;
553 			} else {
554 				res = alloc_res4();
555 				memcpy(res, &match,
556 				       V4_SIZEOF_RESTRICT_U);
557 				plisthead = &restrictlist4;
558 			}
559 			LINK_SORT_SLIST(
560 				*plisthead, res,
561 				(v6)
562 				  ? res_sorts_before6(res, L_S_S_CUR())
563 				  : res_sorts_before4(res, L_S_S_CUR()),
564 				link, restrict_u);
565 			restrictcount++;
566 			if (RES_LIMITED & flags)
567 				inc_res_limited();
568 		} else {
569 			if ((RES_LIMITED & flags) &&
570 			    !(RES_LIMITED & res->flags))
571 				inc_res_limited();
572 			res->flags |= flags;
573 		}
574 		break;
575 
576 	case RESTRICT_UNFLAG:
577 		/*
578 		 * Remove some bits from the flags. If we didn't
579 		 * find this one, just return.
580 		 */
581 		if (res != NULL) {
582 			if ((RES_LIMITED & res->flags)
583 			    && (RES_LIMITED & flags))
584 				dec_res_limited();
585 			res->flags &= ~flags;
586 		}
587 		break;
588 
589 	case RESTRICT_REMOVE:
590 	case RESTRICT_REMOVEIF:
591 		/*
592 		 * Remove an entry from the table entirely if we
593 		 * found one. Don't remove the default entry and
594 		 * don't remove an interface entry.
595 		 */
596 		if (res != NULL
597 		    && (RESTRICT_REMOVEIF == op
598 			|| !(RESM_INTERFACE & res->mflags))
599 		    && res != &restrict_def4
600 		    && res != &restrict_def6)
601 			free_res(res, v6);
602 		break;
603 
604 	default:	/* unknown op */
605 		NTP_INSIST(0);
606 		break;
607 	}
608 
609 }
610 
611 
612 /*
613  * restrict_source - maintains dynamic "restrict source ..." entries as
614  *		     peers come and go.
615  */
616 void
617 restrict_source(
618 	sockaddr_u *	addr,
619 	int		farewell,	/* 0 to add, 1 to remove */
620 	u_long		expire		/* 0 is infinite, valid until */
621 	)
622 {
623 	sockaddr_u	onesmask;
624 	restrict_u *	res;
625 	int		found_specific;
626 
627 	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
628 	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
629 		return;
630 
631 	NTP_REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
632 
633 	SET_HOSTMASK(&onesmask, AF(addr));
634 	if (farewell) {
635 		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
636 			      0, 0, 0);
637 		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
638 		return;
639 	}
640 
641 	/*
642 	 * If there is a specific entry for this address, hands
643 	 * off, as it is condidered more specific than "restrict
644 	 * server ...".
645 	 * However, if the specific entry found is a fleeting one
646 	 * added by pool_xmit() before soliciting, replace it
647 	 * immediately regardless of the expire value to make way
648 	 * for the more persistent entry.
649 	 */
650 	if (IS_IPV4(addr)) {
651 		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
652 		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
653 	} else {
654 		res = match_restrict6_addr(&SOCK_ADDR6(addr),
655 					   SRCPORT(addr));
656 		found_specific = ADDR6_EQ(&res->u.v6.mask,
657 					  &SOCK_ADDR6(&onesmask));
658 	}
659 	if (!expire && found_specific && res->expire) {
660 		found_specific = 0;
661 		free_res(res, IS_IPV6(addr));
662 	}
663 	if (found_specific)
664 		return;
665 
666 	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
667 		      restrict_source_mflags, restrict_source_flags,
668 		      expire);
669 	DPRINTF(1, ("restrict_source: %s host restriction added\n",
670 		    stoa(addr)));
671 }
672