xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_restrict.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /*	$NetBSD: ntp_restrict.c,v 1.2 2012/02/01 20:48:01 kardel Exp $	*/
2 
3 /*
4  * ntp_restrict.c - determine host restrictions
5  */
6 #ifdef HAVE_CONFIG_H
7 #include <config.h>
8 #endif
9 
10 #include <stdio.h>
11 #include <sys/types.h>
12 
13 #include "ntpd.h"
14 #include "ntp_if.h"
15 #include "ntp_lists.h"
16 #include "ntp_stdlib.h"
17 #include "ntp_assert.h"
18 
19 /*
20  * This code keeps a simple address-and-mask list of hosts we want
21  * to place restrictions on (or remove them from). The restrictions
22  * are implemented as a set of flags which tell you what the host
23  * can't do. There is a subroutine entry to return the flags. The
24  * list is kept sorted to reduce the average number of comparisons
25  * and make sure you get the set of restrictions most specific to
26  * the address.
27  *
28  * The algorithm is that, when looking up a host, it is first assumed
29  * that the default set of restrictions will apply. It then searches
30  * down through the list. Whenever it finds a match it adopts the
31  * match's flags instead. When you hit the point where the sorted
32  * address is greater than the target, you return with the last set of
33  * flags you found. Because of the ordering of the list, the most
34  * specific match will provide the final set of flags.
35  *
36  * This was originally intended to restrict you from sync'ing to your
37  * own broadcasts when you are doing that, by restricting yourself from
38  * your own interfaces. It was also thought it would sometimes be useful
39  * to keep a misbehaving host or two from abusing your primary clock. It
40  * has been expanded, however, to suit the needs of those with more
41  * restrictive access policies.
42  */
43 /*
44  * We will use two lists, one for IPv4 addresses and one for IPv6
45  * addresses. This is not protocol-independant but for now I can't
46  * find a way to respect this. We'll check this later... JFB 07/2001
47  */
48 #define MASK_IPV6_ADDR(dst, src, msk)					\
49 	do {								\
50 		int idx;						\
51 		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
52 			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
53 					      & (msk)->s6_addr[idx];	\
54 		}							\
55 	} while (0)
56 
57 /*
58  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
59  * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
60  * for allocator overhead).
61  */
62 #define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
63 #define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
64 
65 /*
66  * The restriction list
67  */
68 restrict_u *restrictlist4;
69 restrict_u *restrictlist6;
70 static int restrictcount;	/* count in the restrict lists */
71 
72 /*
73  * The free list and associated counters.  Also some uninteresting
74  * stat counters.
75  */
76 static restrict_u *resfree4;	/* available entries (free list) */
77 static restrict_u *resfree6;
78 
79 static u_long res_calls;
80 static u_long res_found;
81 static u_long res_not_found;
82 
83 /*
84  * Count number of restriction entries referring to RES_LIMITED, to
85  * control implicit activation/deactivation of the MRU monlist.
86  */
87 static	u_long res_limited_refcnt;
88 
89 /*
90  * Our default entries.
91  */
92 static	restrict_u	restrict_def4;
93 static	restrict_u	restrict_def6;
94 
95 /*
96  * "restrict source ..." enabled knob and restriction bits.
97  */
98 static	int		restrict_source_enabled;
99 static	u_short		restrict_source_flags;
100 static	u_short		restrict_source_mflags;
101 
102 /*
103  * private functions
104  */
105 static restrict_u *	alloc_res4(void);
106 static restrict_u *	alloc_res6(void);
107 static void		free_res(restrict_u *, int);
108 static void		inc_res_limited(void);
109 static void		dec_res_limited(void);
110 static restrict_u *	match_restrict4_addr(u_int32, u_short);
111 static restrict_u *	match_restrict6_addr(const struct in6_addr *,
112 					     u_short);
113 static restrict_u *	match_restrict_entry(const restrict_u *, int);
114 static int		res_sorts_before4(restrict_u *, restrict_u *);
115 static int		res_sorts_before6(restrict_u *, restrict_u *);
116 
117 
118 /*
119  * init_restrict - initialize the restriction data structures
120  */
121 void
122 init_restrict(void)
123 {
124 	/*
125 	 * The restriction lists begin with a default entry with address
126 	 * and mask 0, which will match any entry.  The lists are kept
127 	 * sorted by descending address followed by descending mask:
128 	 *
129 	 *   address	  mask
130 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
131 	 * 192.168.0.0	255.255.0.0	kod limited
132 	 * 0.0.0.0	0.0.0.0		kod limited noquery
133 	 *
134 	 * The first entry which matches an address is used.  With the
135 	 * example restrictions above, 192.168.0.0/24 matches the first
136 	 * entry, the rest of 192.168.0.0/16 matches the second, and
137 	 * everything else matches the third (default).
138 	 *
139 	 * Note this achieves the same result a little more efficiently
140 	 * than the documented behavior, which is to keep the lists
141 	 * sorted by ascending address followed by ascending mask, with
142 	 * the _last_ matching entry used.
143 	 *
144 	 * An additional wrinkle is we may have multiple entries with
145 	 * the same address and mask but differing match flags (mflags).
146 	 * At present there is only one, RESM_NTPONLY.  Entries with
147 	 * RESM_NTPONLY are sorted earlier so they take precedence over
148 	 * any otherwise similar entry without.  Again, this is the same
149 	 * behavior as but reversed implementation compared to the docs.
150 	 *
151 	 */
152 	LINK_SLIST(restrictlist4, &restrict_def4, link);
153 	LINK_SLIST(restrictlist6, &restrict_def6, link);
154 	restrictcount = 2;
155 }
156 
157 
158 static restrict_u *
159 alloc_res4(void)
160 {
161 	const size_t	cb = V4_SIZEOF_RESTRICT_U;
162 	const size_t	count = INC_RESLIST4;
163 	restrict_u *	rl;
164 	restrict_u *	res;
165 	int		i;
166 
167 	UNLINK_HEAD_SLIST(res, resfree4, link);
168 	if (res != NULL)
169 		return res;
170 
171 	rl = emalloc(count * cb);
172 	memset(rl, 0, count * cb);
173 	/* link all but the first onto free list */
174 	res = (void *)((char *)rl + (count - 1) * cb);
175 	for (i = count - 1; i > 0; i--) {
176 		LINK_SLIST(resfree4, res, link);
177 		res = (void *)((char *)res - cb);
178 	}
179 	NTP_INSIST(rl == res);
180 	/* allocate the first */
181 	return res;
182 }
183 
184 
185 static restrict_u *
186 alloc_res6(void)
187 {
188 	const size_t	cb = V6_SIZEOF_RESTRICT_U;
189 	const size_t	count = INC_RESLIST6;
190 	restrict_u *	rl;
191 	restrict_u *	res;
192 	int		i;
193 
194 	UNLINK_HEAD_SLIST(res, resfree6, link);
195 	if (res != NULL)
196 		return res;
197 
198 	rl = emalloc(count * cb);
199 	memset(rl, 0, count * cb);
200 	/* link all but the first onto free list */
201 	res = (void *)((char *)rl + (count - 1) * cb);
202 	for (i = count - 1; i > 0; i--) {
203 		LINK_SLIST(resfree6, res, link);
204 		res = (void *)((char *)res - cb);
205 	}
206 	NTP_INSIST(rl == res);
207 	/* allocate the first */
208 	return res;
209 }
210 
211 
212 static void
213 free_res(
214 	restrict_u *	res,
215 	int		v6
216 	)
217 {
218 	restrict_u **	plisthead;
219 	restrict_u *	unlinked;
220 
221 	restrictcount--;
222 	if (RES_LIMITED && res->flags)
223 		dec_res_limited();
224 
225 	if (v6)
226 		plisthead = &restrictlist6;
227 	else
228 		plisthead = &restrictlist4;
229 	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
230 	NTP_INSIST(unlinked == res);
231 
232 	if (v6) {
233 		memset(res, 0, V6_SIZEOF_RESTRICT_U);
234 		plisthead = &resfree6;
235 	} else {
236 		memset(res, 0, V4_SIZEOF_RESTRICT_U);
237 		plisthead = &resfree4;
238 	}
239 	LINK_SLIST(*plisthead, res, link);
240 }
241 
242 
243 static void
244 inc_res_limited(void)
245 {
246 	if (!res_limited_refcnt)
247 		mon_start(MON_RES);
248 	res_limited_refcnt++;
249 }
250 
251 
252 static void
253 dec_res_limited(void)
254 {
255 	res_limited_refcnt--;
256 	if (!res_limited_refcnt)
257 		mon_stop(MON_RES);
258 }
259 
260 
261 static restrict_u *
262 match_restrict4_addr(
263 	u_int32	addr,
264 	u_short	port
265 	)
266 {
267 	restrict_u *	res;
268 	restrict_u *	next;
269 
270 	for (res = restrictlist4; res != NULL; res = next) {
271 		next = res->link;
272 		if (res->u.v4.addr == (addr & res->u.v4.mask)
273 		    && (!(RESM_NTPONLY & res->mflags)
274 			|| NTP_PORT == port))
275 			break;
276 	}
277 	return res;
278 }
279 
280 
281 static restrict_u *
282 match_restrict6_addr(
283 	const struct in6_addr *	addr,
284 	u_short			port
285 	)
286 {
287 	restrict_u *	res;
288 	restrict_u *	next;
289 	struct in6_addr	masked;
290 
291 	for (res = restrictlist6; res != NULL; res = next) {
292 		next = res->link;
293 		NTP_INSIST(next != res);
294 		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
295 		if (ADDR6_EQ(&masked, &res->u.v6.addr)
296 		    && (!(RESM_NTPONLY & res->mflags)
297 			|| NTP_PORT == (int)port))
298 			break;
299 	}
300 	return res;
301 }
302 
303 
304 /*
305  * match_restrict_entry - find an exact match on a restrict list.
306  *
307  * Exact match is addr, mask, and mflags all equal.
308  * In order to use more common code for IPv4 and IPv6, this routine
309  * requires the caller to populate a restrict_u with mflags and either
310  * the v4 or v6 address and mask as appropriate.  Other fields in the
311  * input restrict_u are ignored.
312  */
313 static restrict_u *
314 match_restrict_entry(
315 	const restrict_u *	pmatch,
316 	int			v6
317 	)
318 {
319 	restrict_u *res;
320 	restrict_u *rlist;
321 	size_t cb;
322 
323 	if (v6) {
324 		rlist = restrictlist6;
325 		cb = sizeof(pmatch->u.v6);
326 	} else {
327 		rlist = restrictlist4;
328 		cb = sizeof(pmatch->u.v4);
329 	}
330 
331 	for (res = rlist; res != NULL; res = res->link)
332 		if (res->mflags == pmatch->mflags &&
333 		    !memcmp(&res->u, &pmatch->u, cb))
334 			break;
335 	return res;
336 }
337 
338 
339 /*
340  * res_sorts_before4 - compare two restrict4 entries
341  *
342  * Returns nonzero if r1 sorts before r2.  We sort by descending
343  * address, then descending mask, then descending mflags, so sorting
344  * before means having a higher value.
345  */
346 static int
347 res_sorts_before4(
348 	restrict_u *r1,
349 	restrict_u *r2
350 	)
351 {
352 	int r1_before_r2;
353 
354 	if (r1->u.v4.addr > r2->u.v4.addr)
355 		r1_before_r2 = 1;
356 	else if (r1->u.v4.addr < r2->u.v4.addr)
357 		r1_before_r2 = 0;
358 	else if (r1->u.v4.mask > r2->u.v4.mask)
359 		r1_before_r2 = 1;
360 	else if (r1->u.v4.mask < r2->u.v4.mask)
361 		r1_before_r2 = 0;
362 	else if (r1->mflags > r2->mflags)
363 		r1_before_r2 = 1;
364 	else
365 		r1_before_r2 = 0;
366 
367 	return r1_before_r2;
368 }
369 
370 
371 /*
372  * res_sorts_before6 - compare two restrict6 entries
373  *
374  * Returns nonzero if r1 sorts before r2.  We sort by descending
375  * address, then descending mask, then descending mflags, so sorting
376  * before means having a higher value.
377  */
378 static int
379 res_sorts_before6(
380 	restrict_u *r1,
381 	restrict_u *r2
382 	)
383 {
384 	int r1_before_r2;
385 	int cmp;
386 
387 	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
388 	if (cmp > 0)		/* r1->addr > r2->addr */
389 		r1_before_r2 = 1;
390 	else if (cmp < 0)	/* r2->addr > r1->addr */
391 		r1_before_r2 = 0;
392 	else {
393 		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
394 		if (cmp > 0)		/* r1->mask > r2->mask*/
395 			r1_before_r2 = 1;
396 		else if (cmp < 0)	/* r2->mask > r1->mask */
397 			r1_before_r2 = 0;
398 		else if (r1->mflags > r2->mflags)
399 			r1_before_r2 = 1;
400 		else
401 			r1_before_r2 = 0;
402 	}
403 
404 	return r1_before_r2;
405 }
406 
407 
408 /*
409  * restrictions - return restrictions for this host
410  */
411 u_short
412 restrictions(
413 	sockaddr_u *srcadr
414 	)
415 {
416 	restrict_u *match;
417 	struct in6_addr *pin6;
418 	u_short flags;
419 
420 	res_calls++;
421 	flags = 0;
422 	/* IPv4 source address */
423 	if (IS_IPV4(srcadr)) {
424 		/*
425 		 * Ignore any packets with a multicast source address
426 		 * (this should be done early in the receive process,
427 		 * not later!)
428 		 */
429 		if (IN_CLASSD(SRCADR(srcadr)))
430 			return (int)RES_IGNORE;
431 
432 		match = match_restrict4_addr(SRCADR(srcadr),
433 					     SRCPORT(srcadr));
434 		match->count++;
435 		/*
436 		 * res_not_found counts only use of the final default
437 		 * entry, not any "restrict default ntpport ...", which
438 		 * would be just before the final default.
439 		 */
440 		if (&restrict_def4 == match)
441 			res_not_found++;
442 		else
443 			res_found++;
444 		flags = match->flags;
445 	}
446 
447 	/* IPv6 source address */
448 	if (IS_IPV6(srcadr)) {
449 		pin6 = PSOCK_ADDR6(srcadr);
450 
451 		/*
452 		 * Ignore any packets with a multicast source address
453 		 * (this should be done early in the receive process,
454 		 * not later!)
455 		 */
456 		if (IN6_IS_ADDR_MULTICAST(pin6))
457 			return (int)RES_IGNORE;
458 
459 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
460 		match->count++;
461 		if (&restrict_def6 == match)
462 			res_not_found++;
463 		else
464 			res_found++;
465 		flags = match->flags;
466 	}
467 	return (flags);
468 }
469 
470 
471 /*
472  * hack_restrict - add/subtract/manipulate entries on the restrict list
473  */
474 void
475 hack_restrict(
476 	int		op,
477 	sockaddr_u *	resaddr,
478 	sockaddr_u *	resmask,
479 	u_short		mflags,
480 	u_short		flags
481 	)
482 {
483 	int		v6;
484 	restrict_u	match;
485 	restrict_u *	res;
486 	restrict_u **	plisthead;
487 
488 	DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
489 		    op, stoa(resaddr), stoa(resmask), mflags, flags));
490 
491 	if (NULL == resaddr) {
492 		NTP_REQUIRE(NULL == resmask);
493 		NTP_REQUIRE(RESTRICT_FLAGS == op);
494 		restrict_source_flags = flags;
495 		restrict_source_mflags = mflags;
496 		restrict_source_enabled = 1;
497 		return;
498 	}
499 
500 	memset(&match, 0, sizeof(match));
501 	/* silence VC9 potentially uninit warnings */
502 	res = NULL;
503 	v6 = 0;
504 
505 	if (IS_IPV4(resaddr)) {
506 		v6 = 0;
507 		/*
508 		 * Get address and mask in host byte order for easy
509 		 * comparison as u_int32
510 		 */
511 		match.u.v4.addr = SRCADR(resaddr);
512 		match.u.v4.mask = SRCADR(resmask);
513 		match.u.v4.addr &= match.u.v4.mask;
514 
515 	} else if (IS_IPV6(resaddr)) {
516 		v6 = 1;
517 		/*
518 		 * Get address and mask in network byte order for easy
519 		 * comparison as byte sequences (e.g. memcmp())
520 		 */
521 		match.u.v6.mask = SOCK_ADDR6(resmask);
522 		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
523 			       &match.u.v6.mask);
524 
525 	} else	/* not IPv4 nor IPv6 */
526 		NTP_REQUIRE(0);
527 
528 	match.flags = flags;
529 	match.mflags = mflags;
530 	res = match_restrict_entry(&match, v6);
531 
532 	switch (op) {
533 
534 	case RESTRICT_FLAGS:
535 		/*
536 		 * Here we add bits to the flags. If this is a
537 		 * new restriction add it.
538 		 */
539 		if (NULL == res) {
540 			if (v6) {
541 				res = alloc_res6();
542 				memcpy(res, &match,
543 				       V6_SIZEOF_RESTRICT_U);
544 				plisthead = &restrictlist6;
545 			} else {
546 				res = alloc_res4();
547 				memcpy(res, &match,
548 				       V4_SIZEOF_RESTRICT_U);
549 				plisthead = &restrictlist4;
550 			}
551 			LINK_SORT_SLIST(
552 				*plisthead, res,
553 				(v6)
554 				  ? res_sorts_before6(res, L_S_S_CUR())
555 				  : res_sorts_before4(res, L_S_S_CUR()),
556 				link, restrict_u);
557 			restrictcount++;
558 			if (RES_LIMITED & flags)
559 				inc_res_limited();
560 		} else {
561 			if ((RES_LIMITED & flags) &&
562 			    !(RES_LIMITED & res->flags))
563 				inc_res_limited();
564 			res->flags |= flags;
565 		}
566 		break;
567 
568 	case RESTRICT_UNFLAG:
569 		/*
570 		 * Remove some bits from the flags. If we didn't
571 		 * find this one, just return.
572 		 */
573 		if (res != NULL) {
574 			if ((RES_LIMITED & res->flags)
575 			    && (RES_LIMITED & flags))
576 				dec_res_limited();
577 			res->flags &= ~flags;
578 		}
579 		break;
580 
581 	case RESTRICT_REMOVE:
582 	case RESTRICT_REMOVEIF:
583 		/*
584 		 * Remove an entry from the table entirely if we
585 		 * found one. Don't remove the default entry and
586 		 * don't remove an interface entry.
587 		 */
588 		if (res != NULL
589 		    && (RESTRICT_REMOVEIF == op
590 			|| !(RESM_INTERFACE & res->mflags))
591 		    && res != &restrict_def4
592 		    && res != &restrict_def6)
593 			free_res(res, v6);
594 		break;
595 
596 	default:	/* unknown op */
597 		NTP_INSIST(0);
598 		break;
599 	}
600 
601 }
602 
603