1 /* $NetBSD: ntp_restrict.c,v 1.8 2016/01/08 21:35:39 christos Exp $ */ 2 3 /* 4 * ntp_restrict.c - determine host restrictions 5 */ 6 #ifdef HAVE_CONFIG_H 7 #include <config.h> 8 #endif 9 10 #include <stdio.h> 11 #include <sys/types.h> 12 13 #include "ntpd.h" 14 #include "ntp_if.h" 15 #include "ntp_lists.h" 16 #include "ntp_stdlib.h" 17 #include "ntp_assert.h" 18 19 /* 20 * This code keeps a simple address-and-mask list of hosts we want 21 * to place restrictions on (or remove them from). The restrictions 22 * are implemented as a set of flags which tell you what the host 23 * can't do. There is a subroutine entry to return the flags. The 24 * list is kept sorted to reduce the average number of comparisons 25 * and make sure you get the set of restrictions most specific to 26 * the address. 27 * 28 * The algorithm is that, when looking up a host, it is first assumed 29 * that the default set of restrictions will apply. It then searches 30 * down through the list. Whenever it finds a match it adopts the 31 * match's flags instead. When you hit the point where the sorted 32 * address is greater than the target, you return with the last set of 33 * flags you found. Because of the ordering of the list, the most 34 * specific match will provide the final set of flags. 35 * 36 * This was originally intended to restrict you from sync'ing to your 37 * own broadcasts when you are doing that, by restricting yourself from 38 * your own interfaces. It was also thought it would sometimes be useful 39 * to keep a misbehaving host or two from abusing your primary clock. It 40 * has been expanded, however, to suit the needs of those with more 41 * restrictive access policies. 42 */ 43 /* 44 * We will use two lists, one for IPv4 addresses and one for IPv6 45 * addresses. This is not protocol-independant but for now I can't 46 * find a way to respect this. We'll check this later... JFB 07/2001 47 */ 48 #define MASK_IPV6_ADDR(dst, src, msk) \ 49 do { \ 50 int idx; \ 51 for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \ 52 (dst)->s6_addr[idx] = (src)->s6_addr[idx] \ 53 & (msk)->s6_addr[idx]; \ 54 } \ 55 } while (0) 56 57 /* 58 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty. 59 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes 60 * for allocator overhead). 61 */ 62 #define INC_RESLIST4 ((1024 - 16) / V4_SIZEOF_RESTRICT_U) 63 #define INC_RESLIST6 ((1024 - 16) / V6_SIZEOF_RESTRICT_U) 64 65 /* 66 * The restriction list 67 */ 68 restrict_u *restrictlist4; 69 restrict_u *restrictlist6; 70 static int restrictcount; /* count in the restrict lists */ 71 72 /* 73 * The free list and associated counters. Also some uninteresting 74 * stat counters. 75 */ 76 static restrict_u *resfree4; /* available entries (free list) */ 77 static restrict_u *resfree6; 78 79 static u_long res_calls; 80 static u_long res_found; 81 static u_long res_not_found; 82 83 /* 84 * Count number of restriction entries referring to RES_LIMITED, to 85 * control implicit activation/deactivation of the MRU monlist. 86 */ 87 static u_long res_limited_refcnt; 88 89 /* 90 * Our default entries. 91 */ 92 static restrict_u restrict_def4; 93 static restrict_u restrict_def6; 94 95 /* 96 * "restrict source ..." enabled knob and restriction bits. 97 */ 98 static int restrict_source_enabled; 99 static u_short restrict_source_flags; 100 static u_short restrict_source_mflags; 101 102 /* 103 * private functions 104 */ 105 static restrict_u * alloc_res4(void); 106 static restrict_u * alloc_res6(void); 107 static void free_res(restrict_u *, int); 108 static void inc_res_limited(void); 109 static void dec_res_limited(void); 110 static restrict_u * match_restrict4_addr(u_int32, u_short); 111 static restrict_u * match_restrict6_addr(const struct in6_addr *, 112 u_short); 113 static restrict_u * match_restrict_entry(const restrict_u *, int); 114 static int res_sorts_before4(restrict_u *, restrict_u *); 115 static int res_sorts_before6(restrict_u *, restrict_u *); 116 117 118 /* 119 * init_restrict - initialize the restriction data structures 120 */ 121 void 122 init_restrict(void) 123 { 124 /* 125 * The restriction lists begin with a default entry with address 126 * and mask 0, which will match any entry. The lists are kept 127 * sorted by descending address followed by descending mask: 128 * 129 * address mask 130 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer 131 * 192.168.0.0 255.255.0.0 kod limited 132 * 0.0.0.0 0.0.0.0 kod limited noquery 133 * 134 * The first entry which matches an address is used. With the 135 * example restrictions above, 192.168.0.0/24 matches the first 136 * entry, the rest of 192.168.0.0/16 matches the second, and 137 * everything else matches the third (default). 138 * 139 * Note this achieves the same result a little more efficiently 140 * than the documented behavior, which is to keep the lists 141 * sorted by ascending address followed by ascending mask, with 142 * the _last_ matching entry used. 143 * 144 * An additional wrinkle is we may have multiple entries with 145 * the same address and mask but differing match flags (mflags). 146 * At present there is only one, RESM_NTPONLY. Entries with 147 * RESM_NTPONLY are sorted earlier so they take precedence over 148 * any otherwise similar entry without. Again, this is the same 149 * behavior as but reversed implementation compared to the docs. 150 * 151 */ 152 LINK_SLIST(restrictlist4, &restrict_def4, link); 153 LINK_SLIST(restrictlist6, &restrict_def6, link); 154 restrictcount = 2; 155 } 156 157 158 static restrict_u * 159 alloc_res4(void) 160 { 161 const size_t cb = V4_SIZEOF_RESTRICT_U; 162 const size_t count = INC_RESLIST4; 163 restrict_u * rl; 164 restrict_u * res; 165 size_t i; 166 167 UNLINK_HEAD_SLIST(res, resfree4, link); 168 if (res != NULL) 169 return res; 170 171 rl = emalloc_zero(count * cb); 172 /* link all but the first onto free list */ 173 res = (void *)((char *)rl + (count - 1) * cb); 174 for (i = count - 1; i > 0; i--) { 175 LINK_SLIST(resfree4, res, link); 176 res = (void *)((char *)res - cb); 177 } 178 INSIST(rl == res); 179 /* allocate the first */ 180 return res; 181 } 182 183 184 static restrict_u * 185 alloc_res6(void) 186 { 187 const size_t cb = V6_SIZEOF_RESTRICT_U; 188 const size_t count = INC_RESLIST6; 189 restrict_u * rl; 190 restrict_u * res; 191 size_t i; 192 193 UNLINK_HEAD_SLIST(res, resfree6, link); 194 if (res != NULL) 195 return res; 196 197 rl = emalloc_zero(count * cb); 198 /* link all but the first onto free list */ 199 res = (void *)((char *)rl + (count - 1) * cb); 200 for (i = count - 1; i > 0; i--) { 201 LINK_SLIST(resfree6, res, link); 202 res = (void *)((char *)res - cb); 203 } 204 INSIST(rl == res); 205 /* allocate the first */ 206 return res; 207 } 208 209 210 static void 211 free_res( 212 restrict_u * res, 213 int v6 214 ) 215 { 216 restrict_u ** plisthead; 217 restrict_u * unlinked; 218 219 restrictcount--; 220 if (RES_LIMITED & res->flags) 221 dec_res_limited(); 222 223 if (v6) 224 plisthead = &restrictlist6; 225 else 226 plisthead = &restrictlist4; 227 UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u); 228 INSIST(unlinked == res); 229 230 if (v6) { 231 zero_mem(res, V6_SIZEOF_RESTRICT_U); 232 plisthead = &resfree6; 233 } else { 234 zero_mem(res, V4_SIZEOF_RESTRICT_U); 235 plisthead = &resfree4; 236 } 237 LINK_SLIST(*plisthead, res, link); 238 } 239 240 241 static void 242 inc_res_limited(void) 243 { 244 if (!res_limited_refcnt) 245 mon_start(MON_RES); 246 res_limited_refcnt++; 247 } 248 249 250 static void 251 dec_res_limited(void) 252 { 253 res_limited_refcnt--; 254 if (!res_limited_refcnt) 255 mon_stop(MON_RES); 256 } 257 258 259 static restrict_u * 260 match_restrict4_addr( 261 u_int32 addr, 262 u_short port 263 ) 264 { 265 const int v6 = 0; 266 restrict_u * res; 267 restrict_u * next; 268 269 for (res = restrictlist4; res != NULL; res = next) { 270 next = res->link; 271 if (res->expire && 272 res->expire <= current_time) 273 free_res(res, v6); 274 if (res->u.v4.addr == (addr & res->u.v4.mask) 275 && (!(RESM_NTPONLY & res->mflags) 276 || NTP_PORT == port)) 277 break; 278 } 279 return res; 280 } 281 282 283 static restrict_u * 284 match_restrict6_addr( 285 const struct in6_addr * addr, 286 u_short port 287 ) 288 { 289 const int v6 = 1; 290 restrict_u * res; 291 restrict_u * next; 292 struct in6_addr masked; 293 294 for (res = restrictlist6; res != NULL; res = next) { 295 next = res->link; 296 INSIST(next != res); 297 if (res->expire && 298 res->expire <= current_time) 299 free_res(res, v6); 300 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask); 301 if (ADDR6_EQ(&masked, &res->u.v6.addr) 302 && (!(RESM_NTPONLY & res->mflags) 303 || NTP_PORT == (int)port)) 304 break; 305 } 306 return res; 307 } 308 309 310 /* 311 * match_restrict_entry - find an exact match on a restrict list. 312 * 313 * Exact match is addr, mask, and mflags all equal. 314 * In order to use more common code for IPv4 and IPv6, this routine 315 * requires the caller to populate a restrict_u with mflags and either 316 * the v4 or v6 address and mask as appropriate. Other fields in the 317 * input restrict_u are ignored. 318 */ 319 static restrict_u * 320 match_restrict_entry( 321 const restrict_u * pmatch, 322 int v6 323 ) 324 { 325 restrict_u *res; 326 restrict_u *rlist; 327 size_t cb; 328 329 if (v6) { 330 rlist = restrictlist6; 331 cb = sizeof(pmatch->u.v6); 332 } else { 333 rlist = restrictlist4; 334 cb = sizeof(pmatch->u.v4); 335 } 336 337 for (res = rlist; res != NULL; res = res->link) 338 if (res->mflags == pmatch->mflags && 339 !memcmp(&res->u, &pmatch->u, cb)) 340 break; 341 return res; 342 } 343 344 345 /* 346 * res_sorts_before4 - compare two restrict4 entries 347 * 348 * Returns nonzero if r1 sorts before r2. We sort by descending 349 * address, then descending mask, then descending mflags, so sorting 350 * before means having a higher value. 351 */ 352 static int 353 res_sorts_before4( 354 restrict_u *r1, 355 restrict_u *r2 356 ) 357 { 358 int r1_before_r2; 359 360 if (r1->u.v4.addr > r2->u.v4.addr) 361 r1_before_r2 = 1; 362 else if (r1->u.v4.addr < r2->u.v4.addr) 363 r1_before_r2 = 0; 364 else if (r1->u.v4.mask > r2->u.v4.mask) 365 r1_before_r2 = 1; 366 else if (r1->u.v4.mask < r2->u.v4.mask) 367 r1_before_r2 = 0; 368 else if (r1->mflags > r2->mflags) 369 r1_before_r2 = 1; 370 else 371 r1_before_r2 = 0; 372 373 return r1_before_r2; 374 } 375 376 377 /* 378 * res_sorts_before6 - compare two restrict6 entries 379 * 380 * Returns nonzero if r1 sorts before r2. We sort by descending 381 * address, then descending mask, then descending mflags, so sorting 382 * before means having a higher value. 383 */ 384 static int 385 res_sorts_before6( 386 restrict_u *r1, 387 restrict_u *r2 388 ) 389 { 390 int r1_before_r2; 391 int cmp; 392 393 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr); 394 if (cmp > 0) /* r1->addr > r2->addr */ 395 r1_before_r2 = 1; 396 else if (cmp < 0) /* r2->addr > r1->addr */ 397 r1_before_r2 = 0; 398 else { 399 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask); 400 if (cmp > 0) /* r1->mask > r2->mask*/ 401 r1_before_r2 = 1; 402 else if (cmp < 0) /* r2->mask > r1->mask */ 403 r1_before_r2 = 0; 404 else if (r1->mflags > r2->mflags) 405 r1_before_r2 = 1; 406 else 407 r1_before_r2 = 0; 408 } 409 410 return r1_before_r2; 411 } 412 413 414 /* 415 * restrictions - return restrictions for this host 416 */ 417 u_short 418 restrictions( 419 sockaddr_u *srcadr 420 ) 421 { 422 restrict_u *match; 423 struct in6_addr *pin6; 424 u_short flags; 425 426 res_calls++; 427 flags = 0; 428 /* IPv4 source address */ 429 if (IS_IPV4(srcadr)) { 430 /* 431 * Ignore any packets with a multicast source address 432 * (this should be done early in the receive process, 433 * not later!) 434 */ 435 if (IN_CLASSD(SRCADR(srcadr))) 436 return (int)RES_IGNORE; 437 438 match = match_restrict4_addr(SRCADR(srcadr), 439 SRCPORT(srcadr)); 440 441 INSIST(match != NULL); 442 443 match->count++; 444 /* 445 * res_not_found counts only use of the final default 446 * entry, not any "restrict default ntpport ...", which 447 * would be just before the final default. 448 */ 449 if (&restrict_def4 == match) 450 res_not_found++; 451 else 452 res_found++; 453 flags = match->flags; 454 } 455 456 /* IPv6 source address */ 457 if (IS_IPV6(srcadr)) { 458 pin6 = PSOCK_ADDR6(srcadr); 459 460 /* 461 * Ignore any packets with a multicast source address 462 * (this should be done early in the receive process, 463 * not later!) 464 */ 465 if (IN6_IS_ADDR_MULTICAST(pin6)) 466 return (int)RES_IGNORE; 467 468 match = match_restrict6_addr(pin6, SRCPORT(srcadr)); 469 INSIST(match != NULL); 470 match->count++; 471 if (&restrict_def6 == match) 472 res_not_found++; 473 else 474 res_found++; 475 flags = match->flags; 476 } 477 return (flags); 478 } 479 480 481 /* 482 * hack_restrict - add/subtract/manipulate entries on the restrict list 483 */ 484 void 485 hack_restrict( 486 int op, 487 sockaddr_u * resaddr, 488 sockaddr_u * resmask, 489 u_short mflags, 490 u_short flags, 491 u_long expire 492 ) 493 { 494 int v6; 495 restrict_u match; 496 restrict_u * res; 497 restrict_u ** plisthead; 498 499 DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n", 500 op, stoa(resaddr), stoa(resmask), mflags, flags)); 501 502 if (NULL == resaddr) { 503 REQUIRE(NULL == resmask); 504 REQUIRE(RESTRICT_FLAGS == op); 505 restrict_source_flags = flags; 506 restrict_source_mflags = mflags; 507 restrict_source_enabled = 1; 508 return; 509 } 510 511 ZERO(match); 512 513 #if 0 514 /* silence VC9 potentially uninit warnings */ 515 // HMS: let's use a compiler-specific "enable" for this. 516 res = NULL; 517 v6 = 0; 518 #endif 519 520 if (IS_IPV4(resaddr)) { 521 v6 = 0; 522 /* 523 * Get address and mask in host byte order for easy 524 * comparison as u_int32 525 */ 526 match.u.v4.addr = SRCADR(resaddr); 527 match.u.v4.mask = SRCADR(resmask); 528 match.u.v4.addr &= match.u.v4.mask; 529 530 } else if (IS_IPV6(resaddr)) { 531 v6 = 1; 532 /* 533 * Get address and mask in network byte order for easy 534 * comparison as byte sequences (e.g. memcmp()) 535 */ 536 match.u.v6.mask = SOCK_ADDR6(resmask); 537 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr), 538 &match.u.v6.mask); 539 540 } else /* not IPv4 nor IPv6 */ 541 REQUIRE(0); 542 543 match.flags = flags; 544 match.mflags = mflags; 545 match.expire = expire; 546 res = match_restrict_entry(&match, v6); 547 548 switch (op) { 549 550 case RESTRICT_FLAGS: 551 /* 552 * Here we add bits to the flags. If this is a 553 * new restriction add it. 554 */ 555 if (NULL == res) { 556 if (v6) { 557 res = alloc_res6(); 558 memcpy(res, &match, 559 V6_SIZEOF_RESTRICT_U); 560 plisthead = &restrictlist6; 561 } else { 562 res = alloc_res4(); 563 memcpy(res, &match, 564 V4_SIZEOF_RESTRICT_U); 565 plisthead = &restrictlist4; 566 } 567 LINK_SORT_SLIST( 568 *plisthead, res, 569 (v6) 570 ? res_sorts_before6(res, L_S_S_CUR()) 571 : res_sorts_before4(res, L_S_S_CUR()), 572 link, restrict_u); 573 restrictcount++; 574 if (RES_LIMITED & flags) 575 inc_res_limited(); 576 } else { 577 if ((RES_LIMITED & flags) && 578 !(RES_LIMITED & res->flags)) 579 inc_res_limited(); 580 res->flags |= flags; 581 } 582 break; 583 584 case RESTRICT_UNFLAG: 585 /* 586 * Remove some bits from the flags. If we didn't 587 * find this one, just return. 588 */ 589 if (res != NULL) { 590 if ((RES_LIMITED & res->flags) 591 && (RES_LIMITED & flags)) 592 dec_res_limited(); 593 res->flags &= ~flags; 594 } 595 break; 596 597 case RESTRICT_REMOVE: 598 case RESTRICT_REMOVEIF: 599 /* 600 * Remove an entry from the table entirely if we 601 * found one. Don't remove the default entry and 602 * don't remove an interface entry. 603 */ 604 if (res != NULL 605 && (RESTRICT_REMOVEIF == op 606 || !(RESM_INTERFACE & res->mflags)) 607 && res != &restrict_def4 608 && res != &restrict_def6) 609 free_res(res, v6); 610 break; 611 612 default: /* unknown op */ 613 INSIST(0); 614 break; 615 } 616 617 } 618 619 620 /* 621 * restrict_source - maintains dynamic "restrict source ..." entries as 622 * peers come and go. 623 */ 624 void 625 restrict_source( 626 sockaddr_u * addr, 627 int farewell, /* 0 to add, 1 to remove */ 628 u_long expire /* 0 is infinite, valid until */ 629 ) 630 { 631 sockaddr_u onesmask; 632 restrict_u * res; 633 int found_specific; 634 635 if (!restrict_source_enabled || SOCK_UNSPEC(addr) || 636 IS_MCAST(addr) || ISREFCLOCKADR(addr)) 637 return; 638 639 REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr)); 640 641 SET_HOSTMASK(&onesmask, AF(addr)); 642 if (farewell) { 643 hack_restrict(RESTRICT_REMOVE, addr, &onesmask, 644 0, 0, 0); 645 DPRINTF(1, ("restrict_source: %s removed", stoa(addr))); 646 return; 647 } 648 649 /* 650 * If there is a specific entry for this address, hands 651 * off, as it is condidered more specific than "restrict 652 * server ...". 653 * However, if the specific entry found is a fleeting one 654 * added by pool_xmit() before soliciting, replace it 655 * immediately regardless of the expire value to make way 656 * for the more persistent entry. 657 */ 658 if (IS_IPV4(addr)) { 659 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr)); 660 INSIST(res != NULL); 661 found_specific = (SRCADR(&onesmask) == res->u.v4.mask); 662 } else { 663 res = match_restrict6_addr(&SOCK_ADDR6(addr), 664 SRCPORT(addr)); 665 INSIST(res != NULL); 666 found_specific = ADDR6_EQ(&res->u.v6.mask, 667 &SOCK_ADDR6(&onesmask)); 668 } 669 if (!expire && found_specific && res->expire) { 670 found_specific = 0; 671 free_res(res, IS_IPV6(addr)); 672 } 673 if (found_specific) 674 return; 675 676 hack_restrict(RESTRICT_FLAGS, addr, &onesmask, 677 restrict_source_mflags, restrict_source_flags, 678 expire); 679 DPRINTF(1, ("restrict_source: %s host restriction added\n", 680 stoa(addr))); 681 } 682