1 /* $NetBSD: ntp_restrict.c,v 1.12 2024/08/18 20:47:18 christos Exp $ */ 2 3 /* 4 * ntp_restrict.c - determine host restrictions 5 */ 6 #ifdef HAVE_CONFIG_H 7 #include <config.h> 8 #endif 9 10 #include <stdio.h> 11 #include <sys/types.h> 12 13 #include "ntpd.h" 14 #include "ntp_if.h" 15 #include "ntp_lists.h" 16 #include "ntp_stdlib.h" 17 #include "ntp_assert.h" 18 19 /* 20 * This code keeps a simple address-and-mask list of addressses we want 21 * to place restrictions on (or remove them from). The restrictions are 22 * implemented as a set of flags which tell you what matching addresses 23 * can't do. The list is sorted retrieve the restrictions most specific 24 * to the address. 25 * 26 * This was originally intended to restrict you from sync'ing to your 27 * own broadcasts when you are doing that, by restricting yourself from 28 * your own interfaces. It was also thought it would sometimes be useful 29 * to keep a misbehaving host or two from abusing your primary clock. It 30 * has been expanded, however, to suit the needs of those with more 31 * restrictive access policies. 32 */ 33 #define MASK_IPV6_ADDR(dst, src, msk) \ 34 do { \ 35 int x; \ 36 \ 37 for (x = 0; x < (int)COUNTOF((dst)->s6_addr); x++) { \ 38 (dst)->s6_addr[x] = (src)->s6_addr[x] \ 39 & (msk)->s6_addr[x]; \ 40 } \ 41 } while (FALSE) 42 43 /* 44 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty. 45 * Auto-tune these to be just less than 1KB (leaving at least 32 bytes 46 * for allocator overhead). 47 */ 48 #define INC_RESLIST4 ((1024 - 32) / V4_SIZEOF_RESTRICT_U) 49 #define INC_RESLIST6 ((1024 - 32) / V6_SIZEOF_RESTRICT_U) 50 51 /* 52 * The restriction list 53 */ 54 restrict_u *restrictlist4; 55 restrict_u *restrictlist6; 56 static int restrictcount; /* count in the restrict lists */ 57 58 /* 59 * The free list and associated counters. Also some uninteresting 60 * stat counters. 61 */ 62 static restrict_u *resfree4; /* available entries (free list) */ 63 static restrict_u *resfree6; 64 65 static u_long res_calls; 66 static u_long res_found; 67 static u_long res_not_found; 68 69 /* 70 * Count number of restriction entries referring to RES_LIMITED, to 71 * control implicit activation/deactivation of the MRU monlist. 72 */ 73 static u_long res_limited_refcnt; 74 75 /* 76 * Our default entries. 77 * 78 * We can make this cleaner with c99 support: see init_restrict(). 79 */ 80 static restrict_u restrict_def4; 81 static restrict_u restrict_def6; 82 83 /* 84 * "restrict source ..." enabled knob and restriction bits. 85 */ 86 static int restrict_source_enabled; 87 static u_int32 restrict_source_rflags; 88 static u_short restrict_source_mflags; 89 static short restrict_source_ippeerlimit; 90 91 /* 92 * private functions 93 */ 94 static restrict_u * alloc_res4(void); 95 static restrict_u * alloc_res6(void); 96 static void free_res(restrict_u *, int); 97 static inline void inc_res_limited(void); 98 static inline void dec_res_limited(void); 99 static restrict_u * match_restrict4_addr(u_int32, u_short); 100 static restrict_u * match_restrict6_addr(const struct in6_addr *, 101 u_short); 102 static restrict_u * match_restrict_entry(const restrict_u *, int); 103 static inline int/*BOOL*/ mflags_sorts_before(u_short, u_short); 104 static int/*BOOL*/ res_sorts_before4(restrict_u *, restrict_u *); 105 static int/*BOOL*/ res_sorts_before6(restrict_u *, restrict_u *); 106 107 typedef int (*res_sort_fn)(restrict_u *, restrict_u *); 108 109 110 /* dump_restrict() & dump_restricts() are DEBUG-only */ 111 #ifdef DEBUG 112 static void dump_restrict(restrict_u *, int); 113 114 115 /* 116 * dump_restrict - spit out a single restriction entry 117 */ 118 static void 119 dump_restrict( 120 restrict_u * res, 121 int is_ipv6 122 ) 123 { 124 char as[INET6_ADDRSTRLEN]; 125 char ms[INET6_ADDRSTRLEN]; 126 127 if (is_ipv6) { 128 inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as); 129 inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms); 130 } else { 131 struct in_addr sia, sim; 132 133 sia.s_addr = htonl(res->u.v4.addr); 134 sim.s_addr = htonl(res->u.v4.addr); 135 inet_ntop(AF_INET, &sia, as, sizeof as); 136 inet_ntop(AF_INET, &sim, ms, sizeof ms); 137 } 138 printf("%s/%s: hits %u ippeerlimit %hd mflags %s rflags %s", 139 as, ms, res->count, res->ippeerlimit, 140 mflags_str(res->mflags), 141 rflags_str(res->rflags)); 142 if (res->expire > 0) { 143 printf(" expire %u\n", res->expire); 144 } else { 145 printf("\n"); 146 } 147 } 148 149 150 /* 151 * dump_restricts - spit out the 'restrict' entries 152 */ 153 void 154 dump_restricts(void) 155 { 156 restrict_u * res; 157 158 /* Spit out the IPv4 list */ 159 printf("dump_restricts: restrictlist4: %p\n", restrictlist4); 160 for (res = restrictlist4; res != NULL; res = res->link) { 161 dump_restrict(res, 0); 162 } 163 164 /* Spit out the IPv6 list */ 165 printf("dump_restricts: restrictlist6: %p\n", restrictlist6); 166 for (res = restrictlist6; res != NULL; res = res->link) { 167 dump_restrict(res, 1); 168 } 169 } 170 #endif /* DEBUG - dump_restrict() / dump_restricts() */ 171 172 173 /* 174 * init_restrict - initialize the restriction data structures 175 */ 176 void 177 init_restrict(void) 178 { 179 /* 180 * The restriction lists end with a default entry with address 181 * and mask 0, which will match any entry. The lists are kept 182 * sorted by descending address followed by descending mask: 183 * 184 * address mask 185 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer 186 * 192.168.0.0 255.255.0.0 kod limited 187 * 0.0.0.0 0.0.0.0 kod limited noquery 188 * 189 * The first entry which matches an address is used. With the 190 * example restrictions above, 192.168.0.0/24 matches the first 191 * entry, the rest of 192.168.0.0/16 matches the second, and 192 * everything else matches the third (default). 193 * 194 * Note this achieves the same result a little more efficiently 195 * than the documented behavior, which is to keep the lists 196 * sorted by ascending address followed by ascending mask, with 197 * the _last_ matching entry used. 198 * 199 * An additional wrinkle is we may have multiple entries with 200 * the same address and mask but differing match flags (mflags). 201 * We want to never talk to ourself, so RES_IGNORE entries for 202 * each local address are added by ntp_io.c with a host mask and 203 * both RESM_INTERFACE and RESM_NTPONLY set. We sort those 204 * entries before entries without those flags to achieve this. 205 * The remaining match flag is RESM_SOURCE, used to dynamically 206 * set restrictions for each peer based on the prototype set by 207 * "restrict source" in the configuration. We want those entries 208 * to be considered only when there is not a static host 209 * restriction for the address in the configuration, to allow 210 * operators to blacklist pool and manycast servers at runtime as 211 * desired using ntpq runtime configuration. Such static entries 212 * have no RESM_ bits set, so the sort order for mflags is first 213 * RESM_INTERFACE, then entries without RESM_SOURCE, finally the 214 * remaining. 215 */ 216 217 restrict_def4.ippeerlimit = -1; /* Cleaner if we have C99 */ 218 restrict_def6.ippeerlimit = -1; /* Cleaner if we have C99 */ 219 220 LINK_SLIST(restrictlist4, &restrict_def4, link); 221 LINK_SLIST(restrictlist6, &restrict_def6, link); 222 restrictcount = 2; 223 } 224 225 226 static restrict_u * 227 alloc_res4(void) 228 { 229 const size_t cb = V4_SIZEOF_RESTRICT_U; 230 const size_t count = INC_RESLIST4; 231 restrict_u* rl; 232 restrict_u* res; 233 size_t i; 234 235 UNLINK_HEAD_SLIST(res, resfree4, link); 236 if (res != NULL) { 237 return res; 238 } 239 rl = eallocarray(count, cb); 240 /* link all but the first onto free list */ 241 res = (void *)((char *)rl + (count - 1) * cb); 242 for (i = count - 1; i > 0; i--) { 243 LINK_SLIST(resfree4, res, link); 244 res = (void *)((char *)res - cb); 245 } 246 DEBUG_INSIST(rl == res); 247 /* allocate the first */ 248 return res; 249 } 250 251 252 static restrict_u * 253 alloc_res6(void) 254 { 255 const size_t cb = V6_SIZEOF_RESTRICT_U; 256 const size_t count = INC_RESLIST6; 257 restrict_u * rl; 258 restrict_u * res; 259 size_t i; 260 261 UNLINK_HEAD_SLIST(res, resfree6, link); 262 if (res != NULL) { 263 return res; 264 } 265 rl = eallocarray(count, cb); 266 /* link all but the first onto free list */ 267 res = (void *)((char *)rl + (count - 1) * cb); 268 for (i = count - 1; i > 0; i--) { 269 LINK_SLIST(resfree6, res, link); 270 res = (void *)((char *)res - cb); 271 } 272 DEBUG_INSIST(rl == res); 273 /* allocate the first */ 274 return res; 275 } 276 277 278 static void 279 free_res( 280 restrict_u * res, 281 int v6 282 ) 283 { 284 restrict_u ** rlisthead_ptr; 285 restrict_u ** flisthead_ptr; 286 restrict_u * unlinked; 287 size_t sz; 288 289 restrictcount--; 290 if (RES_LIMITED & res->rflags) { 291 dec_res_limited(); 292 } 293 if (v6) { 294 rlisthead_ptr = &restrictlist6; 295 flisthead_ptr = &resfree6; 296 sz = V6_SIZEOF_RESTRICT_U; 297 } else { 298 rlisthead_ptr = &restrictlist4; 299 flisthead_ptr = &resfree4; 300 sz = V4_SIZEOF_RESTRICT_U; 301 } 302 UNLINK_SLIST(unlinked, *rlisthead_ptr, res, link, restrict_u); 303 INSIST(unlinked == res); 304 zero_mem(res, sz); 305 LINK_SLIST(*flisthead_ptr, res, link); 306 } 307 308 309 static inline void 310 inc_res_limited(void) 311 { 312 if (0 == res_limited_refcnt) { 313 mon_start(MON_RES); 314 } 315 res_limited_refcnt++; 316 } 317 318 319 static inline void 320 dec_res_limited(void) 321 { 322 res_limited_refcnt--; 323 if (0 == res_limited_refcnt) { 324 mon_stop(MON_RES); 325 } 326 } 327 328 329 static restrict_u * 330 match_restrict4_addr( 331 u_int32 addr, 332 u_short port 333 ) 334 { 335 const int v6 = FALSE; 336 restrict_u * res; 337 restrict_u * next; 338 339 for (res = restrictlist4; res != NULL; res = next) { 340 next = res->link; 341 if (res->expire && res->expire <= current_time) { 342 free_res(res, v6); /* zeroes the contents */ 343 } 344 if ( res->u.v4.addr == (addr & res->u.v4.mask) 345 && ( !(RESM_NTPONLY & res->mflags) 346 || NTP_PORT == port)) { 347 348 break; 349 } 350 } 351 return res; 352 } 353 354 355 static restrict_u * 356 match_restrict6_addr( 357 const struct in6_addr * addr, 358 u_short port 359 ) 360 { 361 const int v6 = TRUE; 362 restrict_u * res; 363 restrict_u * next; 364 struct in6_addr masked; 365 366 for (res = restrictlist6; res != NULL; res = next) { 367 next = res->link; 368 if (res->expire && res->expire <= current_time) { 369 free_res(res, v6); 370 } 371 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask); 372 if (ADDR6_EQ(&masked, &res->u.v6.addr) 373 && ( !(RESM_NTPONLY & res->mflags) 374 || NTP_PORT == (int)port)) { 375 376 break; 377 } 378 } 379 return res; 380 } 381 382 383 /* 384 * match_restrict_entry - find an exact match on a restrict list. 385 * 386 * Exact match is addr, mask, and mflags all equal. 387 * In order to use more common code for IPv4 and IPv6, this routine 388 * requires the caller to populate a restrict_u with mflags and either 389 * the v4 or v6 address and mask as appropriate. Other fields in the 390 * input restrict_u are ignored. 391 */ 392 static restrict_u * 393 match_restrict_entry( 394 const restrict_u * pmatch, 395 int v6 396 ) 397 { 398 restrict_u *res; 399 restrict_u *rlist; 400 size_t cb; 401 402 if (v6) { 403 rlist = restrictlist6; 404 cb = sizeof(pmatch->u.v6); 405 } else { 406 rlist = restrictlist4; 407 cb = sizeof(pmatch->u.v4); 408 } 409 410 for (res = rlist; res != NULL; res = res->link) { 411 if (res->mflags == pmatch->mflags && 412 !memcmp(&res->u, &pmatch->u, cb)) { 413 break; 414 } 415 } 416 return res; 417 } 418 419 420 /* 421 * mflags_sorts_before - common mflags sorting code 422 * 423 * See block comment in init_restrict() above for rationale. 424 */ 425 static inline int/*BOOL*/ 426 mflags_sorts_before( 427 u_short m1, 428 u_short m2 429 ) 430 { 431 if ( (RESM_INTERFACE & m1) 432 && !(RESM_INTERFACE & m2)) { 433 return TRUE; 434 } else if ( !(RESM_SOURCE & m1) 435 && (RESM_SOURCE & m2)) { 436 return TRUE; 437 } else { 438 return FALSE; 439 } 440 } 441 442 443 /* 444 * res_sorts_before4 - compare IPv4 restriction entries 445 * 446 * Returns nonzero if r1 sorts before r2. We sort by descending 447 * address, then descending mask, then an intricate mflags sort 448 * order explained in a block comment near the top of this file. 449 */ 450 static int/*BOOL*/ 451 res_sorts_before4( 452 restrict_u *r1, 453 restrict_u *r2 454 ) 455 { 456 int r1_before_r2; 457 458 if (r1->u.v4.addr > r2->u.v4.addr) { 459 r1_before_r2 = TRUE; 460 } else if (r1->u.v4.addr < r2->u.v4.addr) { 461 r1_before_r2 = FALSE; 462 } else if (r1->u.v4.mask > r2->u.v4.mask) { 463 r1_before_r2 = TRUE; 464 } else if (r1->u.v4.mask < r2->u.v4.mask) { 465 r1_before_r2 = FALSE; 466 } else { 467 r1_before_r2 = mflags_sorts_before(r1->mflags, r2->mflags); 468 } 469 470 return r1_before_r2; 471 } 472 473 474 /* 475 * res_sorts_before6 - compare IPv6 restriction entries 476 * 477 * Returns nonzero if r1 sorts before r2. We sort by descending 478 * address, then descending mask, then an intricate mflags sort 479 * order explained in a block comment near the top of this file. 480 */ 481 static int/*BOOL*/ 482 res_sorts_before6( 483 restrict_u* r1, 484 restrict_u* r2 485 ) 486 { 487 int r1_before_r2; 488 int cmp; 489 490 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr); 491 if (cmp > 0) { /* r1->addr > r2->addr */ 492 r1_before_r2 = TRUE; 493 } else if (cmp < 0) { /* r2->addr > r1->addr */ 494 r1_before_r2 = FALSE; 495 } else { 496 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask); 497 if (cmp > 0) { /* r1->mask > r2->mask*/ 498 r1_before_r2 = TRUE; 499 } else if (cmp < 0) { /* r2->mask > r1->mask */ 500 r1_before_r2 = FALSE; 501 } else { 502 r1_before_r2 = mflags_sorts_before(r1->mflags, 503 r2->mflags); 504 } 505 } 506 507 return r1_before_r2; 508 } 509 510 511 /* 512 * restrictions - return restrictions for this host in *r4a 513 */ 514 void 515 restrictions( 516 sockaddr_u *srcadr, 517 r4addr *r4a 518 ) 519 { 520 restrict_u *match; 521 struct in6_addr *pin6; 522 523 DEBUG_REQUIRE(NULL != r4a); 524 525 res_calls++; 526 527 if (IS_IPV4(srcadr)) { 528 /* 529 * Ignore any packets with a multicast source address 530 * (this should be done early in the receive process, 531 * not later!) 532 */ 533 if (IN_CLASSD(SRCADR(srcadr))) { 534 goto multicast; 535 } 536 537 match = match_restrict4_addr(SRCADR(srcadr), 538 SRCPORT(srcadr)); 539 DEBUG_INSIST(match != NULL); 540 match->count++; 541 /* 542 * res_not_found counts only use of the final default 543 * entry, not any "restrict default ntpport ...", which 544 * would be just before the final default. 545 */ 546 if (&restrict_def4 == match) 547 res_not_found++; 548 else 549 res_found++; 550 r4a->rflags = match->rflags; 551 r4a->ippeerlimit = match->ippeerlimit; 552 } else { 553 DEBUG_REQUIRE(IS_IPV6(srcadr)); 554 555 pin6 = PSOCK_ADDR6(srcadr); 556 557 /* 558 * Ignore any packets with a multicast source address 559 * (this should be done early in the receive process, 560 * not later!) 561 */ 562 if (IN6_IS_ADDR_MULTICAST(pin6)) { 563 goto multicast; 564 } 565 match = match_restrict6_addr(pin6, SRCPORT(srcadr)); 566 DEBUG_INSIST(match != NULL); 567 match->count++; 568 if (&restrict_def6 == match) 569 res_not_found++; 570 else 571 res_found++; 572 r4a->rflags = match->rflags; 573 r4a->ippeerlimit = match->ippeerlimit; 574 } 575 576 return; 577 578 multicast: 579 r4a->rflags = RES_IGNORE; 580 r4a->ippeerlimit = 0; 581 } 582 583 584 #ifdef DEBUG 585 /* display string for restrict_op */ 586 const char * 587 resop_str(restrict_op op) 588 { 589 switch (op) { 590 case RESTRICT_FLAGS: return "RESTRICT_FLAGS"; 591 case RESTRICT_UNFLAG: return "RESTRICT_UNFLAG"; 592 case RESTRICT_REMOVE: return "RESTRICT_REMOVE"; 593 case RESTRICT_REMOVEIF: return "RESTRICT_REMOVEIF"; 594 } 595 DEBUG_INVARIANT(!"bad restrict_op in resop_str"); 596 return ""; /* silence not all paths return value warning */ 597 } 598 #endif /* DEBUG */ 599 600 601 /* 602 * hack_restrict - add/subtract/manipulate entries on the restrict list 603 */ 604 int/*BOOL*/ 605 hack_restrict( 606 restrict_op op, 607 sockaddr_u * resaddr, 608 sockaddr_u * resmask, 609 short ippeerlimit, 610 u_short mflags, 611 u_short rflags, 612 u_int32 expire 613 ) 614 { 615 int v6; 616 int bump_res_limited = FALSE; 617 restrict_u match; 618 restrict_u * res; 619 restrict_u ** plisthead; 620 res_sort_fn pfn_sort; 621 622 #ifdef DEBUG 623 if (debug > 0) { 624 printf("hack_restrict: op %s addr %s mask %s", 625 resop_str(op), stoa(resaddr), stoa(resmask)); 626 if (ippeerlimit >= 0) { 627 printf(" ippeerlimit %d", ippeerlimit); 628 } 629 printf(" mflags %s rflags %s", mflags_str(mflags), 630 rflags_str(rflags)); 631 if (expire) { 632 printf("lifetime %u\n", 633 expire - (u_int32)current_time); 634 } else { 635 printf("\n"); 636 } 637 } 638 #endif 639 640 if (NULL == resaddr) { 641 DEBUG_REQUIRE(NULL == resmask); 642 DEBUG_REQUIRE(RESTRICT_FLAGS == op); 643 DEBUG_REQUIRE(RESM_SOURCE & mflags); 644 restrict_source_rflags = rflags; 645 restrict_source_mflags = mflags; 646 restrict_source_ippeerlimit = ippeerlimit; 647 restrict_source_enabled = TRUE; 648 DPRINTF(1, ("restrict source template saved\n")); 649 return TRUE; 650 } 651 652 ZERO(match); 653 654 if (IS_IPV4(resaddr)) { 655 DEBUG_INVARIANT(IS_IPV4(resmask)); 656 v6 = FALSE; 657 /* 658 * Get address and mask in host byte order for easy 659 * comparison as u_int32 660 */ 661 match.u.v4.addr = SRCADR(resaddr); 662 match.u.v4.mask = SRCADR(resmask); 663 match.u.v4.addr &= match.u.v4.mask; 664 } else { 665 DEBUG_INVARIANT(IS_IPV6(resaddr)); 666 DEBUG_INVARIANT(IS_IPV6(resmask)); 667 v6 = TRUE; 668 /* 669 * Get address and mask in network byte order for easy 670 * comparison as byte sequences (e.g. memcmp()) 671 */ 672 match.u.v6.mask = SOCK_ADDR6(resmask); 673 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr), 674 &match.u.v6.mask); 675 } 676 677 match.mflags = mflags; 678 res = match_restrict_entry(&match, v6); 679 680 switch (op) { 681 682 case RESTRICT_FLAGS: 683 /* 684 * Here we add bits to the rflags. If we already have 685 * this restriction modify it. 686 */ 687 if (NULL != res) { 688 if ( (RES_LIMITED & rflags) 689 && !(RES_LIMITED & res->rflags)) { 690 691 bump_res_limited = TRUE; 692 } 693 res->rflags |= rflags; 694 res->expire = expire; 695 } else { 696 match.rflags = rflags; 697 match.expire = expire; 698 match.ippeerlimit = ippeerlimit; 699 if (v6) { 700 res = alloc_res6(); 701 memcpy(res, &match, V6_SIZEOF_RESTRICT_U); 702 plisthead = &restrictlist6; 703 pfn_sort = &res_sorts_before6; 704 } else { 705 res = alloc_res4(); 706 memcpy(res, &match, V4_SIZEOF_RESTRICT_U); 707 plisthead = &restrictlist4; 708 pfn_sort = &res_sorts_before4; 709 } 710 LINK_SORT_SLIST( 711 *plisthead, res, 712 (*pfn_sort)(res, L_S_S_CUR()), 713 link, restrict_u); 714 restrictcount++; 715 if (RES_LIMITED & rflags) { 716 bump_res_limited = TRUE; 717 } 718 } 719 if (bump_res_limited) { 720 inc_res_limited(); 721 } 722 return TRUE; 723 724 case RESTRICT_UNFLAG: 725 /* 726 * Remove some bits from the rflags. If we didn't 727 * find this one, just return. 728 */ 729 if (NULL == res) { 730 DPRINTF(1, ("No match for %s %s removing rflags %s\n", 731 stoa(resaddr), stoa(resmask), 732 rflags_str(rflags))); 733 return FALSE; 734 } 735 if ( (RES_LIMITED & res->rflags) 736 && (RES_LIMITED & rflags)) { 737 dec_res_limited(); 738 } 739 res->rflags &= ~rflags; 740 return TRUE; 741 742 case RESTRICT_REMOVE: 743 case RESTRICT_REMOVEIF: 744 /* 745 * Remove an entry from the table entirely if we 746 * found one. Don't remove the default entry and 747 * don't remove an interface entry unless asked. 748 */ 749 if ( res != NULL 750 && ( RESTRICT_REMOVEIF == op 751 || !(RESM_INTERFACE & res->mflags)) 752 && res != &restrict_def4 753 && res != &restrict_def6) { 754 755 free_res(res, v6); 756 return TRUE; 757 } 758 DPRINTF(1, ("No match removing %s %s restriction\n", 759 stoa(resaddr), stoa(resmask))); 760 return FALSE; 761 } 762 /* notreached */ 763 return FALSE; 764 } 765 766 767 /* 768 * restrict_source - maintains dynamic "restrict source ..." entries as 769 * peers come and go. 770 */ 771 void 772 restrict_source( 773 sockaddr_u * addr, 774 int farewell, /* TRUE to remove */ 775 u_int32 lifetime /* seconds, 0 forever */ 776 ) 777 { 778 sockaddr_u onesmask; 779 int/*BOOL*/ success; 780 781 if ( !restrict_source_enabled || SOCK_UNSPEC(addr) 782 || IS_MCAST(addr) || ISREFCLOCKADR(addr)) { 783 return; 784 } 785 786 REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr)); 787 788 SET_HOSTMASK(&onesmask, AF(addr)); 789 if (farewell) { 790 success = hack_restrict(RESTRICT_REMOVE, addr, &onesmask, 791 0, RESM_SOURCE, 0, 0); 792 if (success) { 793 DPRINTF(1, ("%s %s removed", __func__, 794 stoa(addr))); 795 } else { 796 msyslog(LOG_ERR, "%s remove %s failed", 797 __func__, stoa(addr)); 798 } 799 return; 800 } 801 802 success = hack_restrict(RESTRICT_FLAGS, addr, &onesmask, 803 restrict_source_ippeerlimit, 804 restrict_source_mflags, 805 restrict_source_rflags, 806 lifetime > 0 807 ? lifetime + current_time 808 : 0); 809 if (success) { 810 DPRINTF(1, ("%s %s add/upd\n", __func__, 811 stoa(addr))); 812 } else { 813 msyslog(LOG_ERR, "%s %s failed", __func__, stoa(addr)); 814 } 815 } 816 817 818 #ifdef DEBUG 819 /* Convert restriction RES_ flag bits into a display string */ 820 const char * 821 rflags_str( 822 u_short rflags 823 ) 824 { 825 const size_t sz = LIB_BUFLENGTH; 826 char * rfs; 827 828 LIB_GETBUF(rfs); 829 rfs[0] = '\0'; 830 831 if (rflags & RES_FLAKE) { 832 CLEAR_BIT_IF_DEBUG(RES_FLAKE, rflags); 833 append_flagstr(rfs, sz, "flake"); 834 } 835 836 if (rflags & RES_IGNORE) { 837 CLEAR_BIT_IF_DEBUG(RES_IGNORE, rflags); 838 append_flagstr(rfs, sz, "ignore"); 839 } 840 841 if (rflags & RES_KOD) { 842 CLEAR_BIT_IF_DEBUG(RES_KOD, rflags); 843 append_flagstr(rfs, sz, "kod"); 844 } 845 846 if (rflags & RES_MSSNTP) { 847 CLEAR_BIT_IF_DEBUG(RES_MSSNTP, rflags); 848 append_flagstr(rfs, sz, "mssntp"); 849 } 850 851 if (rflags & RES_LIMITED) { 852 CLEAR_BIT_IF_DEBUG(RES_LIMITED, rflags); 853 append_flagstr(rfs, sz, "limited"); 854 } 855 856 if (rflags & RES_LPTRAP) { 857 CLEAR_BIT_IF_DEBUG(RES_LPTRAP, rflags); 858 append_flagstr(rfs, sz, "lptrap"); 859 } 860 861 if (rflags & RES_NOMODIFY) { 862 CLEAR_BIT_IF_DEBUG(RES_NOMODIFY, rflags); 863 append_flagstr(rfs, sz, "nomodify"); 864 } 865 866 if (rflags & RES_NOMRULIST) { 867 CLEAR_BIT_IF_DEBUG(RES_NOMRULIST, rflags); 868 append_flagstr(rfs, sz, "nomrulist"); 869 } 870 871 if (rflags & RES_NOEPEER) { 872 CLEAR_BIT_IF_DEBUG(RES_NOEPEER, rflags); 873 append_flagstr(rfs, sz, "noepeer"); 874 } 875 876 if (rflags & RES_NOPEER) { 877 CLEAR_BIT_IF_DEBUG(RES_NOPEER, rflags); 878 append_flagstr(rfs, sz, "nopeer"); 879 } 880 881 if (rflags & RES_NOQUERY) { 882 CLEAR_BIT_IF_DEBUG(RES_NOQUERY, rflags); 883 append_flagstr(rfs, sz, "noquery"); 884 } 885 886 if (rflags & RES_DONTSERVE) { 887 CLEAR_BIT_IF_DEBUG(RES_DONTSERVE, rflags); 888 append_flagstr(rfs, sz, "dontserve"); 889 } 890 891 if (rflags & RES_NOTRAP) { 892 CLEAR_BIT_IF_DEBUG(RES_NOTRAP, rflags); 893 append_flagstr(rfs, sz, "notrap"); 894 } 895 896 if (rflags & RES_DONTTRUST) { 897 CLEAR_BIT_IF_DEBUG(RES_DONTTRUST, rflags); 898 append_flagstr(rfs, sz, "notrust"); 899 } 900 901 if (rflags & RES_SRVRSPFUZ) { 902 CLEAR_BIT_IF_DEBUG(RES_SRVRSPFUZ, rflags); 903 append_flagstr(rfs, sz, "srvrspfuz"); 904 } 905 906 if (rflags & RES_VERSION) { 907 CLEAR_BIT_IF_DEBUG(RES_VERSION, rflags); 908 append_flagstr(rfs, sz, "version"); 909 } 910 911 DEBUG_INVARIANT(!rflags); 912 913 if ('\0' == rfs[0]) { 914 append_flagstr(rfs, sz, "(none)"); 915 } 916 917 return rfs; 918 } 919 920 921 /* Convert restriction match RESM_ flag bits into a display string */ 922 const char * 923 mflags_str( 924 u_short mflags 925 ) 926 { 927 const size_t sz = LIB_BUFLENGTH; 928 char * mfs; 929 930 LIB_GETBUF(mfs); 931 mfs[0] = '\0'; 932 933 if (mflags & RESM_NTPONLY) { 934 CLEAR_BIT_IF_DEBUG(RESM_NTPONLY, mflags); 935 append_flagstr(mfs, sz, "ntponly"); 936 } 937 938 if (mflags & RESM_SOURCE) { 939 CLEAR_BIT_IF_DEBUG(RESM_SOURCE, mflags); 940 append_flagstr(mfs, sz, "source"); 941 } 942 943 if (mflags & RESM_INTERFACE) { 944 CLEAR_BIT_IF_DEBUG(RESM_INTERFACE, mflags); 945 append_flagstr(mfs, sz, "interface"); 946 } 947 948 DEBUG_INVARIANT(!mflags); 949 950 return mfs; 951 } 952 #endif /* DEBUG */ 953