1 /* $NetBSD: ntp_restrict.c,v 1.11 2020/05/25 20:47:25 christos Exp $ */ 2 3 /* 4 * ntp_restrict.c - determine host restrictions 5 */ 6 #ifdef HAVE_CONFIG_H 7 #include <config.h> 8 #endif 9 10 #include <stdio.h> 11 #include <sys/types.h> 12 13 #include "ntpd.h" 14 #include "ntp_if.h" 15 #include "ntp_lists.h" 16 #include "ntp_stdlib.h" 17 #include "ntp_assert.h" 18 19 /* 20 * This code keeps a simple address-and-mask list of hosts we want 21 * to place restrictions on (or remove them from). The restrictions 22 * are implemented as a set of flags which tell you what the host 23 * can't do. There is a subroutine entry to return the flags. The 24 * list is kept sorted to reduce the average number of comparisons 25 * and make sure you get the set of restrictions most specific to 26 * the address. 27 * 28 * The algorithm is that, when looking up a host, it is first assumed 29 * that the default set of restrictions will apply. It then searches 30 * down through the list. Whenever it finds a match it adopts the 31 * match's flags instead. When you hit the point where the sorted 32 * address is greater than the target, you return with the last set of 33 * flags you found. Because of the ordering of the list, the most 34 * specific match will provide the final set of flags. 35 * 36 * This was originally intended to restrict you from sync'ing to your 37 * own broadcasts when you are doing that, by restricting yourself from 38 * your own interfaces. It was also thought it would sometimes be useful 39 * to keep a misbehaving host or two from abusing your primary clock. It 40 * has been expanded, however, to suit the needs of those with more 41 * restrictive access policies. 42 */ 43 /* 44 * We will use two lists, one for IPv4 addresses and one for IPv6 45 * addresses. This is not protocol-independant but for now I can't 46 * find a way to respect this. We'll check this later... JFB 07/2001 47 */ 48 #define MASK_IPV6_ADDR(dst, src, msk) \ 49 do { \ 50 int idx; \ 51 for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \ 52 (dst)->s6_addr[idx] = (src)->s6_addr[idx] \ 53 & (msk)->s6_addr[idx]; \ 54 } \ 55 } while (0) 56 57 /* 58 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty. 59 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes 60 * for allocator overhead). 61 */ 62 #define INC_RESLIST4 ((1024 - 16) / V4_SIZEOF_RESTRICT_U) 63 #define INC_RESLIST6 ((1024 - 16) / V6_SIZEOF_RESTRICT_U) 64 65 /* 66 * The restriction list 67 */ 68 restrict_u *restrictlist4; 69 restrict_u *restrictlist6; 70 static int restrictcount; /* count in the restrict lists */ 71 72 /* 73 * The free list and associated counters. Also some uninteresting 74 * stat counters. 75 */ 76 static restrict_u *resfree4; /* available entries (free list) */ 77 static restrict_u *resfree6; 78 79 static u_long res_calls; 80 static u_long res_found; 81 static u_long res_not_found; 82 83 /* 84 * Count number of restriction entries referring to RES_LIMITED, to 85 * control implicit activation/deactivation of the MRU monlist. 86 */ 87 static u_long res_limited_refcnt; 88 89 /* 90 * Our default entries. 91 * 92 * We can make this cleaner with c99 support: see init_restrict(). 93 */ 94 static restrict_u restrict_def4; 95 static restrict_u restrict_def6; 96 97 /* 98 * "restrict source ..." enabled knob and restriction bits. 99 */ 100 static int restrict_source_enabled; 101 static u_int32 restrict_source_rflags; 102 static u_short restrict_source_mflags; 103 static short restrict_source_ippeerlimit; 104 105 /* 106 * private functions 107 */ 108 static restrict_u * alloc_res4(void); 109 static restrict_u * alloc_res6(void); 110 static void free_res(restrict_u *, int); 111 static void inc_res_limited(void); 112 static void dec_res_limited(void); 113 static restrict_u * match_restrict4_addr(u_int32, u_short); 114 static restrict_u * match_restrict6_addr(const struct in6_addr *, 115 u_short); 116 static restrict_u * match_restrict_entry(const restrict_u *, int); 117 static int res_sorts_before4(restrict_u *, restrict_u *); 118 static int res_sorts_before6(restrict_u *, restrict_u *); 119 static const char * roptoa(restrict_op op); 120 121 122 void dump_restricts(void); 123 124 /* 125 * dump_restrict - spit out a restrict_u 126 */ 127 static void 128 dump_restrict( 129 restrict_u * res, 130 int is_ipv6 131 ) 132 { 133 char as[INET6_ADDRSTRLEN]; 134 char ms[INET6_ADDRSTRLEN]; 135 136 if (is_ipv6) { 137 inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as); 138 inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms); 139 } else { 140 struct in_addr sia = { htonl(res->u.v4.addr) }; 141 struct in_addr sim = { htonl(res->u.v4.mask) }; 142 143 inet_ntop(AF_INET, &sia, as, sizeof as); 144 inet_ntop(AF_INET, &sim, ms, sizeof ms); 145 } 146 mprintf("restrict node at %p: %s/%s count %d, rflags %08x, mflags %04x, ippeerlimit %d, expire %lu, next %p\n", 147 res, as, ms, res->count, res->rflags, res->mflags, 148 res->ippeerlimit, res->expire, res->link); 149 return; 150 } 151 152 153 /* 154 * dump_restricts - spit out the 'restrict' lines 155 */ 156 void 157 dump_restricts(void) 158 { 159 restrict_u * res; 160 restrict_u * next; 161 162 mprintf("dump_restrict: restrict_def4: %p\n", &restrict_def4); 163 /* Spit out 'restrict {,-4,-6} default ...' lines, if needed */ 164 for (res = &restrict_def4; res != NULL; res = next) { 165 dump_restrict(res, 0); 166 next = res->link; 167 } 168 169 mprintf("dump_restrict: restrict_def6: %p\n", &restrict_def6); 170 for (res = &restrict_def6; res != NULL; res = next) { 171 dump_restrict(res, 1); 172 next = res->link; 173 } 174 175 /* Spit out the IPv4 list */ 176 mprintf("dump_restrict: restrictlist4: %p\n", &restrictlist4); 177 for (res = restrictlist4; res != NULL; res = next) { 178 dump_restrict(res, 0); 179 next = res->link; 180 } 181 182 /* Spit out the IPv6 list */ 183 mprintf("dump_restrict: restrictlist6: %p\n", &restrictlist6); 184 for (res = restrictlist6; res != NULL; res = next) { 185 dump_restrict(res, 1); 186 next = res->link; 187 } 188 189 return; 190 } 191 192 /* 193 * init_restrict - initialize the restriction data structures 194 */ 195 void 196 init_restrict(void) 197 { 198 /* 199 * The restriction lists begin with a default entry with address 200 * and mask 0, which will match any entry. The lists are kept 201 * sorted by descending address followed by descending mask: 202 * 203 * address mask 204 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer 205 * 192.168.0.0 255.255.0.0 kod limited 206 * 0.0.0.0 0.0.0.0 kod limited noquery 207 * 208 * The first entry which matches an address is used. With the 209 * example restrictions above, 192.168.0.0/24 matches the first 210 * entry, the rest of 192.168.0.0/16 matches the second, and 211 * everything else matches the third (default). 212 * 213 * Note this achieves the same result a little more efficiently 214 * than the documented behavior, which is to keep the lists 215 * sorted by ascending address followed by ascending mask, with 216 * the _last_ matching entry used. 217 * 218 * An additional wrinkle is we may have multiple entries with 219 * the same address and mask but differing match flags (mflags). 220 * At present there is only one, RESM_NTPONLY. Entries with 221 * RESM_NTPONLY are sorted earlier so they take precedence over 222 * any otherwise similar entry without. Again, this is the same 223 * behavior as but reversed implementation compared to the docs. 224 * 225 */ 226 227 restrict_def4.ippeerlimit = -1; /* Cleaner if we have C99 */ 228 restrict_def6.ippeerlimit = -1; /* Cleaner if we have C99 */ 229 230 LINK_SLIST(restrictlist4, &restrict_def4, link); 231 LINK_SLIST(restrictlist6, &restrict_def6, link); 232 restrictcount = 2; 233 } 234 235 236 static restrict_u * 237 alloc_res4(void) 238 { 239 const size_t cb = V4_SIZEOF_RESTRICT_U; 240 const size_t count = INC_RESLIST4; 241 restrict_u * rl; 242 restrict_u * res; 243 size_t i; 244 245 UNLINK_HEAD_SLIST(res, resfree4, link); 246 if (res != NULL) 247 return res; 248 249 rl = eallocarray(count, cb); 250 /* link all but the first onto free list */ 251 res = (void *)((char *)rl + (count - 1) * cb); 252 for (i = count - 1; i > 0; i--) { 253 LINK_SLIST(resfree4, res, link); 254 res = (void *)((char *)res - cb); 255 } 256 INSIST(rl == res); 257 /* allocate the first */ 258 return res; 259 } 260 261 262 static restrict_u * 263 alloc_res6(void) 264 { 265 const size_t cb = V6_SIZEOF_RESTRICT_U; 266 const size_t count = INC_RESLIST6; 267 restrict_u * rl; 268 restrict_u * res; 269 size_t i; 270 271 UNLINK_HEAD_SLIST(res, resfree6, link); 272 if (res != NULL) 273 return res; 274 275 rl = eallocarray(count, cb); 276 /* link all but the first onto free list */ 277 res = (void *)((char *)rl + (count - 1) * cb); 278 for (i = count - 1; i > 0; i--) { 279 LINK_SLIST(resfree6, res, link); 280 res = (void *)((char *)res - cb); 281 } 282 INSIST(rl == res); 283 /* allocate the first */ 284 return res; 285 } 286 287 288 static void 289 free_res( 290 restrict_u * res, 291 int v6 292 ) 293 { 294 restrict_u ** plisthead; 295 restrict_u * unlinked; 296 297 restrictcount--; 298 if (RES_LIMITED & res->rflags) 299 dec_res_limited(); 300 301 if (v6) 302 plisthead = &restrictlist6; 303 else 304 plisthead = &restrictlist4; 305 UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u); 306 INSIST(unlinked == res); 307 308 if (v6) { 309 zero_mem(res, V6_SIZEOF_RESTRICT_U); 310 plisthead = &resfree6; 311 } else { 312 zero_mem(res, V4_SIZEOF_RESTRICT_U); 313 plisthead = &resfree4; 314 } 315 LINK_SLIST(*plisthead, res, link); 316 } 317 318 319 static void 320 inc_res_limited(void) 321 { 322 if (!res_limited_refcnt) 323 mon_start(MON_RES); 324 res_limited_refcnt++; 325 } 326 327 328 static void 329 dec_res_limited(void) 330 { 331 res_limited_refcnt--; 332 if (!res_limited_refcnt) 333 mon_stop(MON_RES); 334 } 335 336 337 static restrict_u * 338 match_restrict4_addr( 339 u_int32 addr, 340 u_short port 341 ) 342 { 343 const int v6 = 0; 344 restrict_u * res; 345 restrict_u * next; 346 347 for (res = restrictlist4; res != NULL; res = next) { 348 struct in_addr sia = { htonl(res->u.v4.addr) }; 349 350 next = res->link; 351 DPRINTF(2, ("match_restrict4_addr: Checking %s, port %d ... ", 352 inet_ntoa(sia), port)); 353 if ( res->expire 354 && res->expire <= current_time) 355 free_res(res, v6); /* zeroes the contents */ 356 if ( res->u.v4.addr == (addr & res->u.v4.mask) 357 && ( !(RESM_NTPONLY & res->mflags) 358 || NTP_PORT == port)) { 359 DPRINTF(2, ("MATCH: ippeerlimit %d\n", res->ippeerlimit)); 360 break; 361 } 362 DPRINTF(2, ("doesn't match: ippeerlimit %d\n", res->ippeerlimit)); 363 } 364 return res; 365 } 366 367 368 static restrict_u * 369 match_restrict6_addr( 370 const struct in6_addr * addr, 371 u_short port 372 ) 373 { 374 const int v6 = 1; 375 restrict_u * res; 376 restrict_u * next; 377 struct in6_addr masked; 378 379 for (res = restrictlist6; res != NULL; res = next) { 380 next = res->link; 381 INSIST(next != res); 382 if (res->expire && 383 res->expire <= current_time) 384 free_res(res, v6); 385 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask); 386 if (ADDR6_EQ(&masked, &res->u.v6.addr) 387 && (!(RESM_NTPONLY & res->mflags) 388 || NTP_PORT == (int)port)) 389 break; 390 } 391 return res; 392 } 393 394 395 /* 396 * match_restrict_entry - find an exact match on a restrict list. 397 * 398 * Exact match is addr, mask, and mflags all equal. 399 * In order to use more common code for IPv4 and IPv6, this routine 400 * requires the caller to populate a restrict_u with mflags and either 401 * the v4 or v6 address and mask as appropriate. Other fields in the 402 * input restrict_u are ignored. 403 */ 404 static restrict_u * 405 match_restrict_entry( 406 const restrict_u * pmatch, 407 int v6 408 ) 409 { 410 restrict_u *res; 411 restrict_u *rlist; 412 size_t cb; 413 414 if (v6) { 415 rlist = restrictlist6; 416 cb = sizeof(pmatch->u.v6); 417 } else { 418 rlist = restrictlist4; 419 cb = sizeof(pmatch->u.v4); 420 } 421 422 for (res = rlist; res != NULL; res = res->link) 423 if (res->mflags == pmatch->mflags && 424 !memcmp(&res->u, &pmatch->u, cb)) 425 break; 426 return res; 427 } 428 429 430 /* 431 * res_sorts_before4 - compare two restrict4 entries 432 * 433 * Returns nonzero if r1 sorts before r2. We sort by descending 434 * address, then descending mask, then descending mflags, so sorting 435 * before means having a higher value. 436 */ 437 static int 438 res_sorts_before4( 439 restrict_u *r1, 440 restrict_u *r2 441 ) 442 { 443 int r1_before_r2; 444 445 if (r1->u.v4.addr > r2->u.v4.addr) 446 r1_before_r2 = 1; 447 else if (r1->u.v4.addr < r2->u.v4.addr) 448 r1_before_r2 = 0; 449 else if (r1->u.v4.mask > r2->u.v4.mask) 450 r1_before_r2 = 1; 451 else if (r1->u.v4.mask < r2->u.v4.mask) 452 r1_before_r2 = 0; 453 else if (r1->mflags > r2->mflags) 454 r1_before_r2 = 1; 455 else 456 r1_before_r2 = 0; 457 458 return r1_before_r2; 459 } 460 461 462 /* 463 * res_sorts_before6 - compare two restrict6 entries 464 * 465 * Returns nonzero if r1 sorts before r2. We sort by descending 466 * address, then descending mask, then descending mflags, so sorting 467 * before means having a higher value. 468 */ 469 static int 470 res_sorts_before6( 471 restrict_u *r1, 472 restrict_u *r2 473 ) 474 { 475 int r1_before_r2; 476 int cmp; 477 478 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr); 479 if (cmp > 0) /* r1->addr > r2->addr */ 480 r1_before_r2 = 1; 481 else if (cmp < 0) /* r2->addr > r1->addr */ 482 r1_before_r2 = 0; 483 else { 484 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask); 485 if (cmp > 0) /* r1->mask > r2->mask*/ 486 r1_before_r2 = 1; 487 else if (cmp < 0) /* r2->mask > r1->mask */ 488 r1_before_r2 = 0; 489 else if (r1->mflags > r2->mflags) 490 r1_before_r2 = 1; 491 else 492 r1_before_r2 = 0; 493 } 494 495 return r1_before_r2; 496 } 497 498 499 /* 500 * restrictions - return restrictions for this host in *r4a 501 */ 502 void 503 restrictions( 504 sockaddr_u *srcadr, 505 r4addr *r4a 506 ) 507 { 508 restrict_u *match; 509 struct in6_addr *pin6; 510 511 REQUIRE(NULL != r4a); 512 513 res_calls++; 514 r4a->rflags = RES_IGNORE; 515 r4a->ippeerlimit = 0; 516 517 DPRINTF(1, ("restrictions: looking up %s\n", stoa(srcadr))); 518 519 /* IPv4 source address */ 520 if (IS_IPV4(srcadr)) { 521 /* 522 * Ignore any packets with a multicast source address 523 * (this should be done early in the receive process, 524 * not later!) 525 */ 526 if (IN_CLASSD(SRCADR(srcadr))) { 527 DPRINTF(1, ("restrictions: srcadr %s is multicast\n", stoa(srcadr))); 528 r4a->ippeerlimit = 2; /* XXX: we should use a better value */ 529 return; 530 } 531 532 match = match_restrict4_addr(SRCADR(srcadr), 533 SRCPORT(srcadr)); 534 535 INSIST(match != NULL); 536 537 match->count++; 538 /* 539 * res_not_found counts only use of the final default 540 * entry, not any "restrict default ntpport ...", which 541 * would be just before the final default. 542 */ 543 if (&restrict_def4 == match) 544 res_not_found++; 545 else 546 res_found++; 547 r4a->rflags = match->rflags; 548 r4a->ippeerlimit = match->ippeerlimit; 549 } 550 551 /* IPv6 source address */ 552 if (IS_IPV6(srcadr)) { 553 pin6 = PSOCK_ADDR6(srcadr); 554 555 /* 556 * Ignore any packets with a multicast source address 557 * (this should be done early in the receive process, 558 * not later!) 559 */ 560 if (IN6_IS_ADDR_MULTICAST(pin6)) 561 return; 562 563 match = match_restrict6_addr(pin6, SRCPORT(srcadr)); 564 INSIST(match != NULL); 565 match->count++; 566 if (&restrict_def6 == match) 567 res_not_found++; 568 else 569 res_found++; 570 r4a->rflags = match->rflags; 571 r4a->ippeerlimit = match->ippeerlimit; 572 } 573 574 return; 575 } 576 577 578 /* 579 * roptoa - convert a restrict_op to a string 580 */ 581 const char * 582 roptoa(restrict_op op) { 583 static char sb[30]; 584 585 switch(op) { 586 case RESTRICT_FLAGS: return "RESTRICT_FLAGS"; 587 case RESTRICT_UNFLAG: return "RESTRICT_UNFLAGS"; 588 case RESTRICT_REMOVE: return "RESTRICT_REMOVE"; 589 case RESTRICT_REMOVEIF: return "RESTRICT_REMOVEIF"; 590 default: 591 snprintf(sb, sizeof sb, "**RESTRICT_#%d**", op); 592 return sb; 593 } 594 } 595 596 597 /* 598 * hack_restrict - add/subtract/manipulate entries on the restrict list 599 */ 600 void 601 hack_restrict( 602 restrict_op op, 603 sockaddr_u * resaddr, 604 sockaddr_u * resmask, 605 short ippeerlimit, 606 u_short mflags, 607 u_short rflags, 608 u_long expire 609 ) 610 { 611 int v6; 612 restrict_u match; 613 restrict_u * res; 614 restrict_u ** plisthead; 615 616 DPRINTF(1, ("hack_restrict: op %s addr %s mask %s ippeerlimit %d mflags %08x rflags %08x\n", 617 roptoa(op), stoa(resaddr), stoa(resmask), ippeerlimit, mflags, rflags)); 618 619 if (NULL == resaddr) { 620 REQUIRE(NULL == resmask); 621 REQUIRE(RESTRICT_FLAGS == op); 622 restrict_source_rflags = rflags; 623 restrict_source_mflags = mflags; 624 restrict_source_ippeerlimit = ippeerlimit; 625 restrict_source_enabled = 1; 626 return; 627 } 628 629 ZERO(match); 630 631 #if 0 632 /* silence VC9 potentially uninit warnings */ 633 // HMS: let's use a compiler-specific "enable" for this. 634 res = NULL; 635 v6 = 0; 636 #endif 637 638 if (IS_IPV4(resaddr)) { 639 v6 = 0; 640 /* 641 * Get address and mask in host byte order for easy 642 * comparison as u_int32 643 */ 644 match.u.v4.addr = SRCADR(resaddr); 645 match.u.v4.mask = SRCADR(resmask); 646 match.u.v4.addr &= match.u.v4.mask; 647 648 } else if (IS_IPV6(resaddr)) { 649 v6 = 1; 650 /* 651 * Get address and mask in network byte order for easy 652 * comparison as byte sequences (e.g. memcmp()) 653 */ 654 match.u.v6.mask = SOCK_ADDR6(resmask); 655 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr), 656 &match.u.v6.mask); 657 658 } else /* not IPv4 nor IPv6 */ 659 REQUIRE(0); 660 661 match.rflags = rflags; 662 match.mflags = mflags; 663 match.ippeerlimit = ippeerlimit; 664 match.expire = expire; 665 res = match_restrict_entry(&match, v6); 666 667 switch (op) { 668 669 case RESTRICT_FLAGS: 670 /* 671 * Here we add bits to the rflags. If this is a 672 * new restriction add it. 673 */ 674 if (NULL == res) { 675 if (v6) { 676 res = alloc_res6(); 677 memcpy(res, &match, 678 V6_SIZEOF_RESTRICT_U); 679 plisthead = &restrictlist6; 680 } else { 681 res = alloc_res4(); 682 memcpy(res, &match, 683 V4_SIZEOF_RESTRICT_U); 684 plisthead = &restrictlist4; 685 } 686 LINK_SORT_SLIST( 687 *plisthead, res, 688 (v6) 689 ? res_sorts_before6(res, L_S_S_CUR()) 690 : res_sorts_before4(res, L_S_S_CUR()), 691 link, restrict_u); 692 restrictcount++; 693 if (RES_LIMITED & rflags) 694 inc_res_limited(); 695 } else { 696 if ( (RES_LIMITED & rflags) 697 && !(RES_LIMITED & res->rflags)) 698 inc_res_limited(); 699 res->rflags |= rflags; 700 } 701 702 res->ippeerlimit = match.ippeerlimit; 703 704 break; 705 706 case RESTRICT_UNFLAG: 707 /* 708 * Remove some bits from the rflags. If we didn't 709 * find this one, just return. 710 */ 711 if (res != NULL) { 712 if ( (RES_LIMITED & res->rflags) 713 && (RES_LIMITED & rflags)) 714 dec_res_limited(); 715 res->rflags &= ~rflags; 716 } 717 break; 718 719 case RESTRICT_REMOVE: 720 case RESTRICT_REMOVEIF: 721 /* 722 * Remove an entry from the table entirely if we 723 * found one. Don't remove the default entry and 724 * don't remove an interface entry. 725 */ 726 if (res != NULL 727 && (RESTRICT_REMOVEIF == op 728 || !(RESM_INTERFACE & res->mflags)) 729 && res != &restrict_def4 730 && res != &restrict_def6) 731 free_res(res, v6); 732 break; 733 734 default: /* unknown op */ 735 INSIST(0); 736 break; 737 } 738 739 } 740 741 742 /* 743 * restrict_source - maintains dynamic "restrict source ..." entries as 744 * peers come and go. 745 */ 746 void 747 restrict_source( 748 sockaddr_u * addr, 749 int farewell, /* 0 to add, 1 to remove */ 750 u_long expire /* 0 is infinite, valid until */ 751 ) 752 { 753 sockaddr_u onesmask; 754 restrict_u * res; 755 int found_specific; 756 757 if (!restrict_source_enabled || SOCK_UNSPEC(addr) || 758 IS_MCAST(addr) || ISREFCLOCKADR(addr)) 759 return; 760 761 REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr)); 762 763 SET_HOSTMASK(&onesmask, AF(addr)); 764 if (farewell) { 765 hack_restrict(RESTRICT_REMOVE, addr, &onesmask, 766 -2, 0, 0, 0); 767 DPRINTF(1, ("restrict_source: %s removed", stoa(addr))); 768 return; 769 } 770 771 /* 772 * If there is a specific entry for this address, hands 773 * off, as it is condidered more specific than "restrict 774 * server ...". 775 * However, if the specific entry found is a fleeting one 776 * added by pool_xmit() before soliciting, replace it 777 * immediately regardless of the expire value to make way 778 * for the more persistent entry. 779 */ 780 if (IS_IPV4(addr)) { 781 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr)); 782 INSIST(res != NULL); 783 found_specific = (SRCADR(&onesmask) == res->u.v4.mask); 784 } else { 785 res = match_restrict6_addr(&SOCK_ADDR6(addr), 786 SRCPORT(addr)); 787 INSIST(res != NULL); 788 found_specific = ADDR6_EQ(&res->u.v6.mask, 789 &SOCK_ADDR6(&onesmask)); 790 } 791 if (!expire && found_specific && res->expire) { 792 found_specific = 0; 793 free_res(res, IS_IPV6(addr)); 794 } 795 if (found_specific) 796 return; 797 798 hack_restrict(RESTRICT_FLAGS, addr, &onesmask, 799 restrict_source_ippeerlimit, 800 restrict_source_mflags, restrict_source_rflags, expire); 801 DPRINTF(1, ("restrict_source: %s host restriction added\n", 802 stoa(addr))); 803 } 804